Kernel: Implement new abstract MemoryRegion

MemoryBackedRegion now inherits from this and is used for private
anonymous mappigs. This will make shared mappings and file backed
mappings much easier to implement.
This commit is contained in:
Bananymous 2023-09-29 16:18:23 +03:00
parent 48096b18c2
commit 4ee759aa3b
7 changed files with 319 additions and 54 deletions

View File

@ -35,6 +35,8 @@ set(KERNEL_SOURCES
kernel/Memory/GeneralAllocator.cpp kernel/Memory/GeneralAllocator.cpp
kernel/Memory/Heap.cpp kernel/Memory/Heap.cpp
kernel/Memory/kmalloc.cpp kernel/Memory/kmalloc.cpp
kernel/Memory/MemoryBackedRegion.cpp
kernel/Memory/MemoryRegion.cpp
kernel/Memory/PhysicalRange.cpp kernel/Memory/PhysicalRange.cpp
kernel/Memory/VirtualRange.cpp kernel/Memory/VirtualRange.cpp
kernel/Networking/E1000.cpp kernel/Networking/E1000.cpp

View File

@ -0,0 +1,29 @@
#pragma once
#include <kernel/Memory/MemoryRegion.h>
namespace Kernel
{
class MemoryBackedRegion final : public MemoryRegion
{
BAN_NON_COPYABLE(MemoryBackedRegion);
BAN_NON_MOVABLE(MemoryBackedRegion);
public:
static BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> create(PageTable&, size_t size, AddressRange, Type, PageTable::flags_t);
~MemoryBackedRegion();
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t vaddr) override;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) override;
// Copy data from buffer into this region
// This can fail if no memory is mapped and no free memory was available
BAN::ErrorOr<void> copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size);
private:
MemoryBackedRegion(PageTable&, size_t size, Type, PageTable::flags_t);
};
}

View File

@ -0,0 +1,59 @@
#pragma once
#include <BAN/UniqPtr.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Memory/Types.h>
#include <stddef.h>
namespace Kernel
{
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
class MemoryRegion
{
BAN_NON_COPYABLE(MemoryRegion);
BAN_NON_MOVABLE(MemoryRegion);
public:
enum Type : uint8_t
{
PRIVATE,
SHARED
};
public:
virtual ~MemoryRegion();
bool contains(vaddr_t address) const;
bool contains_fully(vaddr_t address, size_t size) const;
bool overlaps(vaddr_t address, size_t size) const;
size_t size() const { return m_size; }
vaddr_t vaddr() const { return m_vaddr; }
// Returns error if no memory was available
// Returns true if page was succesfully allocated
// Returns false if page was already allocated
virtual BAN::ErrorOr<bool> allocate_page_containing(vaddr_t address) = 0;
virtual BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> clone(PageTable& new_page_table) = 0;
protected:
MemoryRegion(PageTable&, size_t size, Type type, PageTable::flags_t flags);
BAN::ErrorOr<void> initialize(AddressRange);
protected:
PageTable& m_page_table;
const size_t m_size;
const Type m_type;
const PageTable::flags_t m_flags;
vaddr_t m_vaddr { 0 };
};
}

View File

@ -7,7 +7,7 @@
#include <kernel/Credentials.h> #include <kernel/Credentials.h>
#include <kernel/FS/Inode.h> #include <kernel/FS/Inode.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h> #include <kernel/Memory/MemoryRegion.h>
#include <kernel/OpenFileDescriptorSet.h> #include <kernel/OpenFileDescriptorSet.h>
#include <kernel/SpinLock.h> #include <kernel/SpinLock.h>
#include <kernel/Terminal/TTY.h> #include <kernel/Terminal/TTY.h>
@ -174,7 +174,7 @@ namespace Kernel
OpenFileDescriptorSet m_open_file_descriptors; OpenFileDescriptorSet m_open_file_descriptors;
BAN::UniqPtr<LibELF::LoadableELF> m_loadable_elf; BAN::UniqPtr<LibELF::LoadableELF> m_loadable_elf;
BAN::Vector<BAN::UniqPtr<VirtualRange>> m_mapped_ranges; BAN::Vector<BAN::UniqPtr<MemoryRegion>> m_mapped_regions;
pid_t m_sid; pid_t m_sid;
pid_t m_pgrp; pid_t m_pgrp;

View File

@ -0,0 +1,124 @@
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
#include <kernel/LockGuard.h>
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<MemoryBackedRegion>> MemoryBackedRegion::create(PageTable& page_table, size_t size, AddressRange address_range, Type type, PageTable::flags_t flags)
{
ASSERT(type == Type::PRIVATE);
auto* region_ptr = new MemoryBackedRegion(page_table, size, type, flags);
if (region_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto region = BAN::UniqPtr<MemoryBackedRegion>::adopt(region_ptr);
TRY(region->initialize(address_range));
return region;
}
MemoryBackedRegion::MemoryBackedRegion(PageTable& page_table, size_t size, Type type, PageTable::flags_t flags)
: MemoryRegion(page_table, size, type, flags)
{
}
MemoryBackedRegion::~MemoryBackedRegion()
{
ASSERT(m_type == Type::PRIVATE);
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + i * PAGE_SIZE);
if (paddr != 0)
Heap::get().release_page(paddr);
}
}
BAN::ErrorOr<bool> MemoryBackedRegion::allocate_page_containing(vaddr_t address)
{
ASSERT(m_type == Type::PRIVATE);
ASSERT(contains(address));
// Check if address is already mapped
vaddr_t vaddr = address & PAGE_ADDR_MASK;
if (m_page_table.physical_address_of(vaddr) != 0)
return false;
// Map new physcial page to address
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(paddr, vaddr, m_flags);
// Zero out the new page
if (&PageTable::current() == &m_page_table)
memset((void*)vaddr, 0x00, PAGE_SIZE);
else
{
LockGuard _(PageTable::current());
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memset((void*)0, 0x00, PAGE_SIZE);
PageTable::current().unmap_page(0);
}
return true;
}
BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>> MemoryBackedRegion::clone(PageTable& new_page_table)
{
ASSERT(&PageTable::current() == &m_page_table);
auto result = TRY(MemoryBackedRegion::create(new_page_table, m_size, { .start = m_vaddr, .end = m_vaddr + m_size }, m_type, m_flags));
for (size_t offset = 0; offset < m_size; offset += PAGE_SIZE)
{
paddr_t paddr = m_page_table.physical_address_of(m_vaddr + offset);
if (paddr == 0)
continue;
TRY(result->copy_data_to_region(offset, (const uint8_t*)(m_vaddr + offset), PAGE_SIZE));
}
return BAN::UniqPtr<MemoryRegion>(BAN::move(result));
}
BAN::ErrorOr<void> MemoryBackedRegion::copy_data_to_region(size_t offset_into_region, const uint8_t* buffer, size_t buffer_size)
{
ASSERT(offset_into_region + buffer_size <= m_size);
size_t written = 0;
while (written < buffer_size)
{
vaddr_t write_vaddr = m_vaddr + offset_into_region + written;
vaddr_t page_offset = write_vaddr % PAGE_SIZE;
size_t bytes = BAN::Math::min<size_t>(buffer_size - written, PAGE_SIZE - page_offset);
TRY(allocate_page_containing(write_vaddr));
if (&PageTable::current() == &m_page_table)
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else
{
paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK);
ASSERT(paddr);
LockGuard _(PageTable::current());
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)page_offset, (void*)(buffer + written), bytes);
PageTable::current().unmap_page(0);
}
written += bytes;
}
return {};
}
}

View File

@ -0,0 +1,50 @@
#include <kernel/Memory/MemoryRegion.h>
namespace Kernel
{
MemoryRegion::MemoryRegion(PageTable& page_table, size_t size, Type type, PageTable::flags_t flags)
: m_page_table(page_table)
, m_size(size)
, m_type(type)
, m_flags(flags)
{
}
MemoryRegion::~MemoryRegion()
{
if (m_vaddr)
m_page_table.unmap_range(m_vaddr, m_size);
}
BAN::ErrorOr<void> MemoryRegion::initialize(AddressRange address_range)
{
size_t needed_pages = BAN::Math::div_round_up<size_t>(m_size, PAGE_SIZE);
m_vaddr = m_page_table.reserve_free_contiguous_pages(needed_pages, address_range.start);
if (m_vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
if (m_vaddr + needed_pages * PAGE_SIZE > address_range.end)
return BAN::Error::from_errno(ENOMEM);
return {};
}
bool MemoryRegion::contains(vaddr_t address) const
{
return m_vaddr <= address && address < m_vaddr + m_size;
}
bool MemoryRegion::contains_fully(vaddr_t address, size_t size) const
{
return m_vaddr <= address && address + size < m_vaddr + m_size;
}
bool MemoryRegion::overlaps(vaddr_t address, size_t size) const
{
if (address + size < m_vaddr)
return false;
if (address >= m_vaddr + m_size)
return false;
return true;
}
}

View File

@ -7,6 +7,7 @@
#include <kernel/InterruptController.h> #include <kernel/InterruptController.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
#include <kernel/Memory/PageTableScope.h> #include <kernel/Memory/PageTableScope.h>
#include <kernel/Process.h> #include <kernel/Process.h>
#include <kernel/Scheduler.h> #include <kernel/Scheduler.h>
@ -128,23 +129,23 @@ namespace Kernel
if (auto rem = needed_bytes % PAGE_SIZE) if (auto rem = needed_bytes % PAGE_SIZE)
needed_bytes += PAGE_SIZE - rem; needed_bytes += PAGE_SIZE - rem;
auto argv_range = MUST(VirtualRange::create_to_vaddr_range( auto argv_region = MUST(MemoryBackedRegion::create(
process->page_table(), process->page_table(),
0x400000, KERNEL_OFFSET,
needed_bytes, needed_bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, { .start = 0x400000, .end = KERNEL_OFFSET },
true MemoryRegion::Type::PRIVATE,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
)); ));
uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2; uintptr_t temp = argv_region->vaddr() + sizeof(char*) * 2;
argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*)); MUST(argv_region->copy_data_to_region(0, (const uint8_t*)&temp, sizeof(char*)));
temp = 0; temp = 0;
argv_range->copy_from(sizeof(char*), (uint8_t*)&temp, sizeof(char*)); MUST(argv_region->copy_data_to_region(sizeof(char*), (const uint8_t*)&temp, sizeof(char*)));
argv_range->copy_from(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size()); MUST(argv_region->copy_data_to_region(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size()));
MUST(process->m_mapped_ranges.push_back(BAN::move(argv_range))); MUST(process->m_mapped_regions.push_back(BAN::move(argv_region)));
} }
process->m_userspace_info.argc = 1; process->m_userspace_info.argc = 1;
@ -172,7 +173,8 @@ namespace Kernel
Process::~Process() Process::~Process()
{ {
ASSERT(m_threads.empty()); ASSERT(m_threads.empty());
ASSERT(m_mapped_ranges.empty()); ASSERT(m_mapped_regions.empty());
ASSERT(!m_loadable_elf);
ASSERT(m_exit_status.waiting == 0); ASSERT(m_exit_status.waiting == 0);
ASSERT(&PageTable::current() != m_page_table.ptr()); ASSERT(&PageTable::current() != m_page_table.ptr());
} }
@ -205,7 +207,8 @@ namespace Kernel
m_open_file_descriptors.close_all(); m_open_file_descriptors.close_all();
// NOTE: We must unmap ranges while the page table is still alive // NOTE: We must unmap ranges while the page table is still alive
m_mapped_ranges.clear(); m_mapped_regions.clear();
m_loadable_elf.clear();
} }
void Process::on_thread_exit(Thread& thread) void Process::on_thread_exit(Thread& thread)
@ -318,10 +321,10 @@ namespace Kernel
OpenFileDescriptorSet open_file_descriptors(m_credentials); OpenFileDescriptorSet open_file_descriptors(m_credentials);
TRY(open_file_descriptors.clone_from(m_open_file_descriptors)); TRY(open_file_descriptors.clone_from(m_open_file_descriptors));
BAN::Vector<BAN::UniqPtr<VirtualRange>> mapped_ranges; BAN::Vector<BAN::UniqPtr<MemoryRegion>> mapped_regions;
TRY(mapped_ranges.reserve(m_mapped_ranges.size())); TRY(mapped_regions.reserve(m_mapped_regions.size()));
for (auto& mapped_range : m_mapped_ranges) for (auto& mapped_region : m_mapped_regions)
MUST(mapped_ranges.push_back(TRY(mapped_range->clone(*page_table)))); MUST(mapped_regions.push_back(TRY(mapped_region->clone(*page_table))));
auto loadable_elf = TRY(m_loadable_elf->clone(*page_table)); auto loadable_elf = TRY(m_loadable_elf->clone(*page_table));
@ -330,7 +333,7 @@ namespace Kernel
forked->m_working_directory = BAN::move(working_directory); forked->m_working_directory = BAN::move(working_directory);
forked->m_page_table = BAN::move(page_table); forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(open_file_descriptors); forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
forked->m_mapped_ranges = BAN::move(mapped_ranges); forked->m_mapped_regions = BAN::move(mapped_regions);
forked->m_loadable_elf = BAN::move(loadable_elf); forked->m_loadable_elf = BAN::move(loadable_elf);
forked->m_is_userspace = m_is_userspace; forked->m_is_userspace = m_is_userspace;
forked->m_userspace_info = m_userspace_info; forked->m_userspace_info = m_userspace_info;
@ -373,7 +376,7 @@ namespace Kernel
m_open_file_descriptors.close_cloexec(); m_open_file_descriptors.close_cloexec();
m_mapped_ranges.clear(); m_mapped_regions.clear();
m_loadable_elf.clear(); m_loadable_elf.clear();
m_loadable_elf = TRY(load_elf_for_exec(m_credentials, executable_path, m_working_directory, page_table())); m_loadable_elf = TRY(load_elf_for_exec(m_credentials, executable_path, m_working_directory, page_table()));
@ -387,8 +390,8 @@ namespace Kernel
ASSERT(&Process::current() == this); ASSERT(&Process::current() == this);
// allocate memory on the new process for arguments and environment // allocate memory on the new process for arguments and environment
auto create_range = auto create_region =
[&](const auto& container) -> BAN::UniqPtr<VirtualRange> [&](BAN::Span<BAN::String> container) -> BAN::ErrorOr<BAN::UniqPtr<MemoryRegion>>
{ {
size_t bytes = sizeof(char*); size_t bytes = sizeof(char*);
for (auto& elem : container) for (auto& elem : container)
@ -397,36 +400,36 @@ namespace Kernel
if (auto rem = bytes % PAGE_SIZE) if (auto rem = bytes % PAGE_SIZE)
bytes += PAGE_SIZE - rem; bytes += PAGE_SIZE - rem;
auto range = MUST(VirtualRange::create_to_vaddr_range( auto region = TRY(MemoryBackedRegion::create(
page_table(), page_table(),
0x400000, KERNEL_OFFSET,
bytes, bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, { .start = 0x400000, .end = KERNEL_OFFSET },
true MemoryRegion::Type::PRIVATE,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
)); ));
size_t data_offset = sizeof(char*) * (container.size() + 1); size_t data_offset = sizeof(char*) * (container.size() + 1);
for (size_t i = 0; i < container.size(); i++) for (size_t i = 0; i < container.size(); i++)
{ {
uintptr_t ptr_addr = range->vaddr() + data_offset; uintptr_t ptr_addr = region->vaddr() + data_offset;
range->copy_from(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*)); TRY(region->copy_data_to_region(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*)));
range->copy_from(data_offset, (const uint8_t*)container[i].data(), container[i].size()); TRY(region->copy_data_to_region(data_offset, (const uint8_t*)container[i].data(), container[i].size()));
data_offset += container[i].size() + 1; data_offset += container[i].size() + 1;
} }
uintptr_t null = 0; uintptr_t null = 0;
range->copy_from(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*)); TRY(region->copy_data_to_region(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*)));
return BAN::move(range); return BAN::UniqPtr<MemoryRegion>(BAN::move(region));
}; };
auto argv_range = create_range(str_argv); auto argv_region = MUST(create_region(str_argv.span()));
m_userspace_info.argv = (char**)argv_range->vaddr(); m_userspace_info.argv = (char**)argv_region->vaddr();
MUST(m_mapped_ranges.push_back(BAN::move(argv_range))); MUST(m_mapped_regions.push_back(BAN::move(argv_region)));
auto envp_range = create_range(str_envp); auto envp_region = MUST(create_region(str_envp.span()));
m_userspace_info.envp = (char**)envp_range->vaddr(); m_userspace_info.envp = (char**)envp_region->vaddr();
MUST(m_mapped_ranges.push_back(BAN::move(envp_range))); MUST(m_mapped_regions.push_back(BAN::move(envp_region)));
m_userspace_info.argc = str_argv.size(); m_userspace_info.argc = str_argv.size();
@ -544,11 +547,11 @@ namespace Kernel
return true; return true;
} }
for (auto& mapped_range : m_mapped_ranges) for (auto& region : m_mapped_regions)
{ {
if (!mapped_range->contains(address)) if (!region->contains(address))
continue; continue;
TRY(mapped_range->allocate_page_for_demand_paging(address)); TRY(region->allocate_page_containing(address));
return true; return true;
} }
@ -824,17 +827,17 @@ namespace Kernel
if (args->len % PAGE_SIZE != 0) if (args->len % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL); return BAN::Error::from_errno(EINVAL);
auto range = TRY(VirtualRange::create_to_vaddr_range( auto region = TRY(MemoryBackedRegion::create(
page_table(), page_table(),
0x400000, KERNEL_OFFSET,
args->len, args->len,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, { .start = 0x400000, .end = KERNEL_OFFSET },
false MemoryRegion::Type::PRIVATE,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
)); ));
LockGuard _(m_lock); LockGuard _(m_lock);
TRY(m_mapped_ranges.push_back(BAN::move(range))); TRY(m_mapped_regions.push_back(BAN::move(region)));
return m_mapped_ranges.back()->vaddr(); return m_mapped_regions.back()->vaddr();
} }
return BAN::Error::from_errno(ENOTSUP); return BAN::Error::from_errno(ENOTSUP);
@ -851,13 +854,10 @@ namespace Kernel
LockGuard _(m_lock); LockGuard _(m_lock);
for (size_t i = 0; i < m_mapped_ranges.size(); i++) // FIXME: We should only map partial regions
{ for (size_t i = 0; i < m_mapped_regions.size(); i++)
auto& range = m_mapped_ranges[i]; if (m_mapped_regions[i]->overlaps(vaddr, len))
if (vaddr + len < range->vaddr() || vaddr >= range->vaddr() + range->size()) m_mapped_regions.remove(i);
continue;
m_mapped_ranges.remove(i);
}
return 0; return 0;
} }
@ -1341,10 +1341,11 @@ namespace Kernel
return; return;
// FIXME: should we allow cross mapping access? // FIXME: should we allow cross mapping access?
for (auto& mapped_range : m_mapped_ranges) for (auto& mapped_region : m_mapped_regions)
if (vaddr >= mapped_range->vaddr() && vaddr + size <= mapped_range->vaddr() + mapped_range->size()) mapped_region->contains_fully(vaddr, size);
return; return;
// FIXME: elf should contain full range [vaddr, vaddr + size)
if (m_loadable_elf->contains(vaddr)) if (m_loadable_elf->contains(vaddr))
return; return;