Kernel: Big commit. Rewrite ELF loading code

We now load ELF files to VirtualRanges instead of using kmalloc.
We have only a fixed 1 MiB kmalloc for big allocations and this
allows loading files even when they don't fit in there.

This caused me to rewrite the whole ELF loading process since the
loaded ELF is not in memory mapped by every process.

Virtual ranges allow you to zero out the memory and to copy into
them from arbitary byte buffers.
This commit is contained in:
Bananymous
2023-06-09 00:37:43 +03:00
parent 59b10c4d25
commit 801025ad7b
9 changed files with 206 additions and 49 deletions

View File

@@ -1,5 +1,4 @@
#include <kernel/Memory/GeneralAllocator.h>
#include <kernel/Memory/PageTableScope.h>
namespace Kernel
{

View File

@@ -8,11 +8,11 @@ namespace Kernel
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(&page_table != &PageTable::kernel());
VirtualRange* result = new VirtualRange(page_table);
ASSERT(result);
result->m_kmalloc = false;
result->m_size = size;
result->m_flags = flags;
MUST(result->m_physical_pages.reserve(size / PAGE_SIZE));
@@ -21,7 +21,7 @@ namespace Kernel
if (vaddr == 0)
{
vaddr = page_table.get_free_contiguous_pages(size / PAGE_SIZE);
vaddr = page_table.get_free_contiguous_pages(size / PAGE_SIZE, 0x400000);
ASSERT(vaddr);
}
@@ -43,8 +43,9 @@ namespace Kernel
VirtualRange* VirtualRange::create_kmalloc(size_t size)
{
VirtualRange* result = new VirtualRange(PageTable::kernel());
if (result == nullptr)
return nullptr;
ASSERT(result);
result->m_kmalloc = true;
result->m_size = size;
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
result->m_vaddr = (vaddr_t)kmalloc(size);
@@ -53,6 +54,7 @@ namespace Kernel
delete result;
return nullptr;
}
return result;
}
@@ -62,7 +64,7 @@ namespace Kernel
VirtualRange::~VirtualRange()
{
if (&m_page_table == &PageTable::kernel())
if (m_kmalloc)
{
kfree((void*)m_vaddr);
return;
@@ -94,4 +96,82 @@ namespace Kernel
return result;
}
void VirtualRange::set_zero()
{
PageTable& page_table = PageTable::current();
if (&page_table == &m_page_table)
{
memset((void*)vaddr(), 0, size());
return;
}
page_table.lock();
ASSERT(page_table.is_page_free(0));
for (size_t i = 0; i < m_physical_pages.size(); i++)
{
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.invalidate(0);
memset((void*)0, 0, PAGE_SIZE);
}
page_table.unmap_page(0);
page_table.invalidate(0);
page_table.unlock();
}
void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
{
if (bytes == 0)
return;
// NOTE: Handling overflow
ASSERT(offset <= size());
ASSERT(bytes <= size());
ASSERT(offset + bytes <= size());
PageTable& page_table = PageTable::current();
if (&page_table == &m_page_table)
{
memcpy((void*)(vaddr() + offset), buffer, bytes);
return;
}
page_table.lock();
ASSERT(page_table.is_page_free(0));
size_t off = offset % PAGE_SIZE;
size_t i = offset / PAGE_SIZE;
// NOTE: we map the first page separately since it needs extra calculations
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.invalidate(0);
memcpy((void*)off, buffer, PAGE_SIZE - off);
buffer += PAGE_SIZE - off;
bytes -= PAGE_SIZE - off;
i++;
while (bytes > 0)
{
size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes);
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.invalidate(0);
memcpy((void*)0, buffer, len);
buffer += len;
bytes -= len;
i++;
}
page_table.unmap_page(0);
page_table.invalidate(0);
page_table.unlock();
}
}