Kernel: Add fast page to page table

Add "fast page" to KERNEL_OFFSET. This is always present in page
tables and only requires changing the page table entry to map. This
requires no interrupts since it should only be for very operations
like memcpy.

I used to map all temporary mappings to vaddr 0, but this is much
better. C++ standard always says that nullptr access is undefined
and this gets rid of it.

Fixed some bugs I found along the way
This commit is contained in:
2023-10-30 19:02:09 +02:00
parent 6e676ee8c5
commit 42772611ce
9 changed files with 193 additions and 134 deletions

View File

@@ -1,3 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
@@ -71,13 +72,10 @@ namespace Kernel
continue;
{
auto& page_table = PageTable::current();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(pages[i], 0, PageTable::Flags::Present);
memcpy(page_buffer, (void*)0, PAGE_SIZE);
page_table.unmap_page(0);
CriticalScope _;
PageTable::map_fast_page(pages[i]);
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
PageTable::unmap_fast_page();
}
if (auto ret = inode->write(i * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
@@ -105,23 +103,8 @@ namespace Kernel
size_t file_offset = m_offset + (vaddr - m_vaddr);
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
BAN::ErrorOr<size_t> read_ret = 0;
// Zero out the new page
if (&PageTable::current() == &m_page_table)
read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
else
{
auto& page_table = PageTable::current();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)0, bytes));
memset((void*)0, 0x00, PAGE_SIZE);
page_table.unmap_page(0);
}
ASSERT_EQ(&PageTable::current(), &m_page_table);
auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
if (read_ret.is_error())
{
@@ -158,15 +141,10 @@ namespace Kernel
TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes)));
auto& page_table = PageTable::current();
// TODO: check if this can cause deadlock?
LockGuard page_table_lock(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(pages[page_index], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, m_shared_data->page_buffer, PAGE_SIZE);
page_table.unmap_page(0);
CriticalScope _;
PageTable::map_fast_page(pages[page_index]);
memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE);
PageTable::unmap_fast_page();
}
paddr_t paddr = pages[page_index];

View File

@@ -1,3 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
@@ -60,12 +61,10 @@ namespace Kernel
memset((void*)vaddr, 0x00, PAGE_SIZE);
else
{
LockGuard _(PageTable::current());
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memset((void*)0, 0x00, PAGE_SIZE);
PageTable::current().unmap_page(0);
CriticalScope _;
PageTable::map_fast_page(paddr);
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
}
return true;
@@ -105,15 +104,10 @@ namespace Kernel
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else
{
paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK);
ASSERT(paddr);
LockGuard _(PageTable::current());
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)page_offset, (void*)(buffer + written), bytes);
PageTable::current().unmap_page(0);
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK));
memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
PageTable::unmap_fast_page();
}
written += bytes;

View File

@@ -1,3 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
@@ -124,7 +125,6 @@ namespace Kernel
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
LockGuard _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
@@ -134,10 +134,12 @@ namespace Kernel
return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
}
m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE);
CriticalScope _;
PageTable::map_fast_page(result->m_page_table.physical_address_of(vaddr() + offset));
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
PageTable::unmap_fast_page();
}
m_page_table.unmap_page(0);
return result;
}
@@ -172,14 +174,13 @@ namespace Kernel
return;
}
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memset((void*)0, 0, PAGE_SIZE);
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + offset));
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
}
page_table.unmap_page(0);
}
void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
@@ -187,47 +188,34 @@ namespace Kernel
if (bytes == 0)
return;
// NOTE: Handling overflow
ASSERT(offset <= size());
ASSERT(bytes <= size());
ASSERT(offset + bytes <= size());
// Verify no overflow
ASSERT_LE(bytes, size());
ASSERT_LE(offset, size());
ASSERT_LE(offset, size() - bytes);
PageTable& page_table = PageTable::current();
if (m_kmalloc || &page_table == &m_page_table)
if (m_kmalloc || &PageTable::current() == &m_page_table)
{
memcpy((void*)(vaddr() + offset), buffer, bytes);
return;
}
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
size_t off = offset % PAGE_SIZE;
size_t i = offset / PAGE_SIZE;
// NOTE: we map the first page separately since it needs extra calculations
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)off, buffer, PAGE_SIZE - off);
buffer += PAGE_SIZE - off;
bytes -= PAGE_SIZE - off;
i++;
size_t page_offset = offset % PAGE_SIZE;
size_t page_index = offset / PAGE_SIZE;
while (bytes > 0)
{
size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes);
{
CriticalScope _;
PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE));
memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
PageTable::unmap_fast_page();
}
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, buffer, len);
buffer += len;
bytes -= len;
i++;
buffer += PAGE_SIZE - page_offset;
bytes -= PAGE_SIZE - page_offset;
page_offset = 0;
page_index++;
}
page_table.unmap_page(0);
}
}