Kernel: mmap regions are now demand paged

mmap will not actually take any memory unless you use the given
memory.
This commit is contained in:
Bananymous
2023-09-28 21:07:14 +03:00
parent 15cd59b8ce
commit 245f58cc3a
7 changed files with 149 additions and 29 deletions

View File

@@ -5,26 +5,27 @@
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
VirtualRange* result_ptr = new VirtualRange(page_table);
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages, false);
if (result_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_kmalloc = false;
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_vaddr = vaddr;
result->m_size = size;
result->m_flags = flags;
ASSERT(page_table.reserve_range(vaddr, size));
size_t needed_pages = size / PAGE_SIZE;
if (!preallocate_pages)
return result;
size_t needed_pages = size / PAGE_SIZE;
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = Heap::get().take_free_page();
@@ -39,10 +40,12 @@ namespace Kernel
page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
}
result->set_zero();
return result;
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
@@ -58,20 +61,22 @@ namespace Kernel
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0)
{
dprintln("no free {} byte area", size);
return BAN::Error::from_errno(ENOMEM);
}
ASSERT(vaddr + size <= vaddr_end);
LockGuard _(page_table);
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
return create_to_vaddr(page_table, vaddr, size, flags);
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_kmalloc(size_t size)
{
VirtualRange* result = new VirtualRange(PageTable::kernel());
VirtualRange* result = new VirtualRange(PageTable::kernel(), false, true);
ASSERT(result);
result->m_kmalloc = true;
result->m_size = size;
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
result->m_vaddr = (vaddr_t)kmalloc(size);
@@ -81,11 +86,15 @@ namespace Kernel
return BAN::Error::from_errno(ENOMEM);
}
result->set_zero();
return BAN::UniqPtr<VirtualRange>::adopt(result);
}
VirtualRange::VirtualRange(PageTable& page_table)
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool kmalloc)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_kmalloc(kmalloc)
{ }
VirtualRange::~VirtualRange()
@@ -98,7 +107,11 @@ namespace Kernel
else
{
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
Heap::get().release_page(m_page_table.physical_address_of(vaddr() + offset));
{
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
if (paddr)
Heap::get().release_page(paddr);
}
m_page_table.unmap_range(vaddr(), size());
}
}
@@ -107,12 +120,19 @@ namespace Kernel
{
ASSERT(&PageTable::current() == &m_page_table);
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags()));
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
LockGuard _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
{
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
}
m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE);
}
@@ -121,11 +141,31 @@ namespace Kernel
return result;
}
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
{
ASSERT(!m_kmalloc);
ASSERT(!m_preallocated);
ASSERT(contains(address));
ASSERT(&PageTable::current() == &m_page_table);
vaddr_t vaddr = address & PAGE_ADDR_MASK;
ASSERT(m_page_table.physical_address_of(vaddr) == 0);
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(paddr, vaddr, m_flags);
memset((void*)vaddr, 0x00, PAGE_SIZE);
return {};
}
void VirtualRange::set_zero()
{
PageTable& page_table = PageTable::current();
if (&page_table == &m_page_table)
if (m_kmalloc || &page_table == &m_page_table)
{
memset((void*)vaddr(), 0, size());
return;
@@ -153,7 +193,7 @@ namespace Kernel
PageTable& page_table = PageTable::current();
if (&page_table == &m_page_table)
if (m_kmalloc || &page_table == &m_page_table)
{
memcpy((void*)(vaddr() + offset), buffer, bytes);
return;

View File

@@ -136,9 +136,9 @@ namespace Kernel
process->page_table(),
0x400000, KERNEL_OFFSET,
needed_bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
));
argv_range->set_zero();
uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2;
argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*));
@@ -437,9 +437,9 @@ namespace Kernel
page_table(),
0x400000, KERNEL_OFFSET,
bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
));
range->set_zero();
size_t data_offset = sizeof(char*) * (container.size() + 1);
for (size_t i = 0; i < container.size(); i++)
@@ -584,8 +584,7 @@ namespace Kernel
{
LockGuard _(m_lock);
auto range = MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags));
range->set_zero();
auto range = MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags, true));
range->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz);
MUST(m_mapped_ranges.emplace_back(false, BAN::move(range)));
@@ -623,6 +622,29 @@ namespace Kernel
return {};
}
BAN::ErrorOr<bool> Process::allocate_page_for_demand_paging(vaddr_t addr)
{
ASSERT(&Process::current() == this);
LockGuard _(m_lock);
if (Thread::current().stack().contains(addr))
{
TRY(Thread::current().stack().allocate_page_for_demand_paging(addr));
return true;
}
for (auto& mapped_range : m_mapped_ranges)
{
if (!mapped_range.range->contains(addr))
continue;
TRY(mapped_range.range->allocate_page_for_demand_paging(addr));
return true;
}
return false;
}
BAN::ErrorOr<long> Process::open_file(BAN::StringView path, int flags, mode_t mode)
{
BAN::String absolute_path = TRY(absolute_path_of(path));
@@ -890,9 +912,9 @@ namespace Kernel
page_table(),
0x400000, KERNEL_OFFSET,
args->len,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
false
));
range->set_zero();
LockGuard _(m_lock);
TRY(m_mapped_ranges.emplace_back(true, BAN::move(range)));

View File

@@ -122,8 +122,8 @@ namespace Kernel
thread->m_is_userspace = true;
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
thread->m_interrupt_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, true));
thread->m_interrupt_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, true));
thread->setup_exec();