Kernel: mmap regions are now demand paged

mmap will not actually take any memory unless you use the given
memory.
This commit is contained in:
Bananymous
2023-09-28 21:07:14 +03:00
parent 15cd59b8ce
commit 245f58cc3a
7 changed files with 149 additions and 29 deletions

View File

@@ -5,26 +5,27 @@
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
VirtualRange* result_ptr = new VirtualRange(page_table);
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages, false);
if (result_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_kmalloc = false;
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_vaddr = vaddr;
result->m_size = size;
result->m_flags = flags;
ASSERT(page_table.reserve_range(vaddr, size));
size_t needed_pages = size / PAGE_SIZE;
if (!preallocate_pages)
return result;
size_t needed_pages = size / PAGE_SIZE;
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = Heap::get().take_free_page();
@@ -39,10 +40,12 @@ namespace Kernel
page_table.map_page_at(paddr, vaddr + i * PAGE_SIZE, flags);
}
result->set_zero();
return result;
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
@@ -58,20 +61,22 @@ namespace Kernel
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0)
{
dprintln("no free {} byte area", size);
return BAN::Error::from_errno(ENOMEM);
}
ASSERT(vaddr + size <= vaddr_end);
LockGuard _(page_table);
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
return create_to_vaddr(page_table, vaddr, size, flags);
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_kmalloc(size_t size)
{
VirtualRange* result = new VirtualRange(PageTable::kernel());
VirtualRange* result = new VirtualRange(PageTable::kernel(), false, true);
ASSERT(result);
result->m_kmalloc = true;
result->m_size = size;
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
result->m_vaddr = (vaddr_t)kmalloc(size);
@@ -81,11 +86,15 @@ namespace Kernel
return BAN::Error::from_errno(ENOMEM);
}
result->set_zero();
return BAN::UniqPtr<VirtualRange>::adopt(result);
}
VirtualRange::VirtualRange(PageTable& page_table)
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool kmalloc)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_kmalloc(kmalloc)
{ }
VirtualRange::~VirtualRange()
@@ -98,7 +107,11 @@ namespace Kernel
else
{
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
Heap::get().release_page(m_page_table.physical_address_of(vaddr() + offset));
{
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
if (paddr)
Heap::get().release_page(paddr);
}
m_page_table.unmap_range(vaddr(), size());
}
}
@@ -107,12 +120,19 @@ namespace Kernel
{
ASSERT(&PageTable::current() == &m_page_table);
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags()));
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
LockGuard _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
{
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
}
m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE);
}
@@ -121,11 +141,31 @@ namespace Kernel
return result;
}
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
{
ASSERT(!m_kmalloc);
ASSERT(!m_preallocated);
ASSERT(contains(address));
ASSERT(&PageTable::current() == &m_page_table);
vaddr_t vaddr = address & PAGE_ADDR_MASK;
ASSERT(m_page_table.physical_address_of(vaddr) == 0);
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(paddr, vaddr, m_flags);
memset((void*)vaddr, 0x00, PAGE_SIZE);
return {};
}
void VirtualRange::set_zero()
{
PageTable& page_table = PageTable::current();
if (&page_table == &m_page_table)
if (m_kmalloc || &page_table == &m_page_table)
{
memset((void*)vaddr(), 0, size());
return;
@@ -153,7 +193,7 @@ namespace Kernel
PageTable& page_table = PageTable::current();
if (&page_table == &m_page_table)
if (m_kmalloc || &page_table == &m_page_table)
{
memcpy((void*)(vaddr() + offset), buffer, bytes);
return;