Kernel: Add fast page to page table

Add "fast page" to KERNEL_OFFSET. This is always present in page
tables and only requires changing the page table entry to map. This
requires no interrupts since it should only be for very operations
like memcpy.

I used to map all temporary mappings to vaddr 0, but this is much
better. C++ standard always says that nullptr access is undefined
and this gets rid of it.

Fixed some bugs I found along the way
This commit is contained in:
Bananymous 2023-10-30 19:02:09 +02:00
parent 4dbe15aa0e
commit 3bac19e518
9 changed files with 193 additions and 134 deletions

View File

@ -1,4 +1,5 @@
#include <BAN/ScopeGuard.h> #include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <LibELF/LoadableELF.h> #include <LibELF/LoadableELF.h>
@ -306,9 +307,12 @@ namespace LibELF
if (paddr == 0) if (paddr == 0)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); {
memcpy((void*)0, (void*)(start + i * PAGE_SIZE), PAGE_SIZE); CriticalScope _;
m_page_table.unmap_page(0); PageTable::map_fast_page(paddr);
memcpy(PageTable::fast_page_as_ptr(), (void*)(start + i * PAGE_SIZE), PAGE_SIZE);
PageTable::unmap_fast_page();
}
new_page_table.map_page_at(paddr, start + i * PAGE_SIZE, flags); new_page_table.map_page_at(paddr, start + i * PAGE_SIZE, flags);
elf->m_physical_page_count++; elf->m_physical_page_count++;

View File

@ -1,6 +1,6 @@
#include <BAN/Errors.h>
#include <kernel/Arch.h> #include <kernel/Arch.h>
#include <kernel/CPUID.h> #include <kernel/CPUID.h>
#include <kernel/InterruptController.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <kernel/Memory/kmalloc.h> #include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTable.h> #include <kernel/Memory/PageTable.h>
@ -143,6 +143,8 @@ namespace Kernel
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct); uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
pml4[511] = s_global_pml4e; pml4[511] = s_global_pml4e;
prepare_fast_page();
// Map (phys_kernel_start -> phys_kernel_end) to (virt_kernel_start -> virt_kernel_end) // Map (phys_kernel_start -> phys_kernel_end) to (virt_kernel_start -> virt_kernel_end)
ASSERT((vaddr_t)g_kernel_start % PAGE_SIZE == 0); ASSERT((vaddr_t)g_kernel_start % PAGE_SIZE == 0);
map_range_at( map_range_at(
@ -185,6 +187,76 @@ namespace Kernel
g_multiboot2_info = (multiboot2_info_t*)(multiboot2_vaddr + ((vaddr_t)g_multiboot2_info % PAGE_SIZE)); g_multiboot2_info = (multiboot2_info_t*)(multiboot2_vaddr + ((vaddr_t)g_multiboot2_info % PAGE_SIZE));
} }
void PageTable::prepare_fast_page()
{
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
constexpr uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
ASSERT(!(pml4[pml4e] & Flags::Present));
pml4[pml4e] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
ASSERT(!(pdpt[pdpte] & Flags::Present));
pdpt[pdpte] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
ASSERT(!(pd[pde] & Flags::Present));
pd[pde] = V2P(allocate_zeroed_page_aligned_page()) | Flags::ReadWrite | Flags::Present;
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = V2P(allocate_zeroed_page_aligned_page());
}
void PageTable::map_fast_page(paddr_t paddr)
{
ASSERT(s_kernel);
ASSERT_GE(paddr, 0);
ASSERT(!interrupts_enabled());
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
constexpr uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
uint64_t* pml4 = (uint64_t*)P2V(s_kernel->m_highest_paging_struct);
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
ASSERT(!(pt[pte] & Flags::Present));
pt[pte] = paddr | Flags::ReadWrite | Flags::Present;
invalidate(fast_page());
}
void PageTable::unmap_fast_page()
{
ASSERT(s_kernel);
ASSERT(!interrupts_enabled());
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
constexpr uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
constexpr uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
constexpr uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
uint64_t* pml4 = (uint64_t*)P2V(s_kernel->m_highest_paging_struct);
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
ASSERT(pt[pte] & Flags::Present);
pt[pte] = 0;
invalidate(fast_page());
}
BAN::ErrorOr<PageTable*> PageTable::create_userspace() BAN::ErrorOr<PageTable*> PageTable::create_userspace()
{ {
LockGuard _(s_kernel->m_lock); LockGuard _(s_kernel->m_lock);
@ -246,13 +318,16 @@ namespace Kernel
void PageTable::invalidate(vaddr_t vaddr) void PageTable::invalidate(vaddr_t vaddr)
{ {
ASSERT(vaddr % PAGE_SIZE == 0); ASSERT(vaddr % PAGE_SIZE == 0);
if (this == s_current)
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory"); asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
} }
void PageTable::unmap_page(vaddr_t vaddr) void PageTable::unmap_page(vaddr_t vaddr)
{ {
if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel)) ASSERT(vaddr);
ASSERT(vaddr != fast_page());
if (vaddr >= KERNEL_OFFSET)
ASSERT_GE(vaddr, (vaddr_t)g_kernel_start);
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel); Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel);
ASSERT(is_canonical(vaddr)); ASSERT(is_canonical(vaddr));
@ -294,7 +369,11 @@ namespace Kernel
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags) void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags)
{ {
if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel)) ASSERT(vaddr);
ASSERT(vaddr != fast_page());
if (vaddr >= KERNEL_OFFSET)
ASSERT_GE(vaddr, (vaddr_t)g_kernel_start);
if ((vaddr >= KERNEL_OFFSET) != (this == s_kernel))
Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel); Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel);
ASSERT(is_canonical(vaddr)); ASSERT(is_canonical(vaddr));
@ -361,12 +440,11 @@ namespace Kernel
{ {
ASSERT(is_canonical(vaddr)); ASSERT(is_canonical(vaddr));
ASSERT(vaddr);
ASSERT(paddr % PAGE_SIZE == 0); ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0); ASSERT(vaddr % PAGE_SIZE == 0);
size_t first_page = vaddr / PAGE_SIZE; size_t page_count = range_page_count(vaddr, size);
size_t last_page = (vaddr + size - 1) / PAGE_SIZE;
size_t page_count = last_page - first_page + 1;
LockGuard _(m_lock); LockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++) for (size_t page = 0; page < page_count; page++)
@ -527,6 +605,8 @@ namespace Kernel
vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address) vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address)
{ {
if (first_address >= KERNEL_OFFSET && first_address < (vaddr_t)g_kernel_start)
first_address = (vaddr_t)g_kernel_start;
if (size_t rem = first_address % PAGE_SIZE) if (size_t rem = first_address % PAGE_SIZE)
first_address += PAGE_SIZE - rem; first_address += PAGE_SIZE - rem;
if (size_t rem = last_address % PAGE_SIZE) if (size_t rem = last_address % PAGE_SIZE)

View File

@ -38,6 +38,7 @@ namespace Kernel
BAN::RefPtr<Inode> m_inode; BAN::RefPtr<Inode> m_inode;
const off_t m_offset; const off_t m_offset;
// FIXME: is this even synchronized?
BAN::RefPtr<SharedFileData> m_shared_data; BAN::RefPtr<SharedFileData> m_shared_data;
}; };

View File

@ -29,6 +29,24 @@ namespace Kernel
static PageTable& kernel(); static PageTable& kernel();
static PageTable& current(); static PageTable& current();
static void map_fast_page(paddr_t);
static void unmap_fast_page();
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
// FIXME: implement sized checks, return span, etc
static void* fast_page_as_ptr(size_t offset = 0)
{
ASSERT(offset <= PAGE_SIZE);
return reinterpret_cast<void*>(fast_page() + offset);
}
template<typename T>
static T& fast_page_as(size_t offset = 0)
{
ASSERT(offset + sizeof(T) <= PAGE_SIZE);
return *reinterpret_cast<T*>(fast_page() + offset);
}
static bool is_valid_pointer(uintptr_t); static bool is_valid_pointer(uintptr_t);
static BAN::ErrorOr<PageTable*> create_userspace(); static BAN::ErrorOr<PageTable*> create_userspace();
@ -64,7 +82,8 @@ namespace Kernel
uint64_t get_page_data(vaddr_t) const; uint64_t get_page_data(vaddr_t) const;
void initialize_kernel(); void initialize_kernel();
void map_kernel_memory(); void map_kernel_memory();
void invalidate(vaddr_t); void prepare_fast_page();
static void invalidate(vaddr_t);
private: private:
paddr_t m_highest_paging_struct { 0 }; paddr_t m_highest_paging_struct { 0 };

View File

@ -117,33 +117,33 @@ namespace Kernel
if (rsdp->revision >= 2) if (rsdp->revision >= 2)
{ {
PageTable::kernel().map_page_at(rsdp->xsdt_address & PAGE_ADDR_MASK, 0, PageTable::Flags::Present); PageTable::map_fast_page(rsdp->xsdt_address & PAGE_ADDR_MASK);
const XSDT* xsdt = (const XSDT*)(rsdp->xsdt_address % PAGE_SIZE); auto& xsdt = PageTable::fast_page_as<const XSDT>(rsdp->xsdt_address % PAGE_SIZE);
BAN::ScopeGuard _([xsdt] { PageTable::kernel().unmap_page(0); }); BAN::ScopeGuard _([] { PageTable::unmap_fast_page(); });
if (memcmp(xsdt->signature, "XSDT", 4) != 0) if (memcmp(xsdt.signature, "XSDT", 4) != 0)
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
if (!is_valid_std_header(xsdt)) if (!is_valid_std_header(&xsdt))
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
m_header_table_paddr = (paddr_t)xsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK); m_header_table_paddr = rsdp->xsdt_address + offsetof(XSDT, entries);
m_entry_size = 8; m_entry_size = 8;
root_entry_count = (xsdt->length - sizeof(SDTHeader)) / 8; root_entry_count = (xsdt.length - sizeof(SDTHeader)) / 8;
} }
else else
{ {
PageTable::kernel().map_page_at(rsdp->rsdt_address & PAGE_ADDR_MASK, 0, PageTable::Flags::Present); PageTable::map_fast_page(rsdp->rsdt_address & PAGE_ADDR_MASK);
const RSDT* rsdt = (const RSDT*)((vaddr_t)rsdp->rsdt_address % PAGE_SIZE); auto& rsdt = PageTable::fast_page_as<const RSDT>(rsdp->rsdt_address % PAGE_SIZE);
BAN::ScopeGuard _([rsdt] { PageTable::kernel().unmap_page(0); }); BAN::ScopeGuard _([] { PageTable::unmap_fast_page(); });
if (memcmp(rsdt->signature, "RSDT", 4) != 0) if (memcmp(rsdt.signature, "RSDT", 4) != 0)
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
if (!is_valid_std_header(rsdt)) if (!is_valid_std_header(&rsdt))
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
m_header_table_paddr = (paddr_t)rsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK); m_header_table_paddr = rsdp->rsdt_address + offsetof(RSDT, entries);
m_entry_size = 4; m_entry_size = 4;
root_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4; root_entry_count = (rsdt.length - sizeof(SDTHeader)) / 4;
} }
size_t needed_pages = range_page_count(m_header_table_paddr, root_entry_count * m_entry_size); size_t needed_pages = range_page_count(m_header_table_paddr, root_entry_count * m_entry_size);
@ -162,9 +162,9 @@ namespace Kernel
auto map_header = auto map_header =
[](paddr_t header_paddr) -> vaddr_t [](paddr_t header_paddr) -> vaddr_t
{ {
PageTable::kernel().map_page_at(header_paddr & PAGE_ADDR_MASK, 0, PageTable::Flags::Present); PageTable::map_fast_page(header_paddr & PAGE_ADDR_MASK);
size_t header_length = ((SDTHeader*)(header_paddr % PAGE_SIZE))->length; size_t header_length = PageTable::fast_page_as<SDTHeader>(header_paddr % PAGE_SIZE).length;
PageTable::kernel().unmap_page(0); PageTable::unmap_fast_page();
size_t needed_pages = range_page_count(header_paddr, header_length); size_t needed_pages = range_page_count(header_paddr, header_length);
vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET); vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);

View File

@ -1,3 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h> #include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
@ -71,13 +72,10 @@ namespace Kernel
continue; continue;
{ {
auto& page_table = PageTable::current(); CriticalScope _;
LockGuard _(page_table); PageTable::map_fast_page(pages[i]);
ASSERT(page_table.is_page_free(0)); memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
PageTable::unmap_fast_page();
page_table.map_page_at(pages[i], 0, PageTable::Flags::Present);
memcpy(page_buffer, (void*)0, PAGE_SIZE);
page_table.unmap_page(0);
} }
if (auto ret = inode->write(i * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error()) if (auto ret = inode->write(i * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
@ -105,23 +103,8 @@ namespace Kernel
size_t file_offset = m_offset + (vaddr - m_vaddr); size_t file_offset = m_offset + (vaddr - m_vaddr);
size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE); size_t bytes = BAN::Math::min<size_t>(m_size - file_offset, PAGE_SIZE);
BAN::ErrorOr<size_t> read_ret = 0; ASSERT_EQ(&PageTable::current(), &m_page_table);
auto read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
// Zero out the new page
if (&PageTable::current() == &m_page_table)
read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)vaddr, bytes));
else
{
auto& page_table = PageTable::current();
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
read_ret = m_inode->read(file_offset, BAN::ByteSpan((uint8_t*)0, bytes));
memset((void*)0, 0x00, PAGE_SIZE);
page_table.unmap_page(0);
}
if (read_ret.is_error()) if (read_ret.is_error())
{ {
@ -158,15 +141,10 @@ namespace Kernel
TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes))); TRY(m_inode->read(offset, BAN::ByteSpan(m_shared_data->page_buffer, bytes)));
auto& page_table = PageTable::current(); CriticalScope _;
PageTable::map_fast_page(pages[page_index]);
// TODO: check if this can cause deadlock? memcpy(PageTable::fast_page_as_ptr(), m_shared_data->page_buffer, PAGE_SIZE);
LockGuard page_table_lock(page_table); PageTable::unmap_fast_page();
ASSERT(page_table.is_page_free(0));
page_table.map_page_at(pages[page_index], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, m_shared_data->page_buffer, PAGE_SIZE);
page_table.unmap_page(0);
} }
paddr_t paddr = pages[page_index]; paddr_t paddr = pages[page_index];

View File

@ -1,3 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h> #include <kernel/Memory/MemoryBackedRegion.h>
@ -60,12 +61,10 @@ namespace Kernel
memset((void*)vaddr, 0x00, PAGE_SIZE); memset((void*)vaddr, 0x00, PAGE_SIZE);
else else
{ {
LockGuard _(PageTable::current()); CriticalScope _;
ASSERT(PageTable::current().is_page_free(0)); PageTable::map_fast_page(paddr);
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); PageTable::unmap_fast_page();
memset((void*)0, 0x00, PAGE_SIZE);
PageTable::current().unmap_page(0);
} }
return true; return true;
@ -105,15 +104,10 @@ namespace Kernel
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes); memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else else
{ {
paddr_t paddr = m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK); CriticalScope _;
ASSERT(paddr); PageTable::map_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK));
memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
LockGuard _(PageTable::current()); PageTable::unmap_fast_page();
ASSERT(PageTable::current().is_page_free(0));
PageTable::current().map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)page_offset, (void*)(buffer + written), bytes);
PageTable::current().unmap_page(0);
} }
written += bytes; written += bytes;

View File

@ -1,3 +1,4 @@
#include <kernel/CriticalScope.h>
#include <kernel/LockGuard.h> #include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h> #include <kernel/Memory/VirtualRange.h>
@ -124,7 +125,6 @@ namespace Kernel
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated)); auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
LockGuard _(m_page_table); LockGuard _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE) for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{ {
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset)) if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
@ -134,10 +134,12 @@ namespace Kernel
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags); result->m_page_table.map_page_at(paddr, vaddr() + offset, m_flags);
} }
m_page_table.map_page_at(result->m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)(vaddr() + offset), PAGE_SIZE); CriticalScope _;
PageTable::map_fast_page(result->m_page_table.physical_address_of(vaddr() + offset));
memcpy(PageTable::fast_page_as_ptr(), (void*)(vaddr() + offset), PAGE_SIZE);
PageTable::unmap_fast_page();
} }
m_page_table.unmap_page(0);
return result; return result;
} }
@ -172,14 +174,13 @@ namespace Kernel
return; return;
} }
LockGuard _(page_table);
ASSERT(page_table.is_page_free(0));
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE) for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{ {
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + offset), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); CriticalScope _;
memset((void*)0, 0, PAGE_SIZE); PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + offset));
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
PageTable::unmap_fast_page();
} }
page_table.unmap_page(0);
} }
void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes) void VirtualRange::copy_from(size_t offset, const uint8_t* buffer, size_t bytes)
@ -187,47 +188,34 @@ namespace Kernel
if (bytes == 0) if (bytes == 0)
return; return;
// NOTE: Handling overflow // Verify no overflow
ASSERT(offset <= size()); ASSERT_LE(bytes, size());
ASSERT(bytes <= size()); ASSERT_LE(offset, size());
ASSERT(offset + bytes <= size()); ASSERT_LE(offset, size() - bytes);
PageTable& page_table = PageTable::current(); if (m_kmalloc || &PageTable::current() == &m_page_table)
if (m_kmalloc || &page_table == &m_page_table)
{ {
memcpy((void*)(vaddr() + offset), buffer, bytes); memcpy((void*)(vaddr() + offset), buffer, bytes);
return; return;
} }
LockGuard _(page_table); size_t page_offset = offset % PAGE_SIZE;
ASSERT(page_table.is_page_free(0)); size_t page_index = offset / PAGE_SIZE;
size_t off = offset % PAGE_SIZE;
size_t i = offset / PAGE_SIZE;
// NOTE: we map the first page separately since it needs extra calculations
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)off, buffer, PAGE_SIZE - off);
buffer += PAGE_SIZE - off;
bytes -= PAGE_SIZE - off;
i++;
while (bytes > 0) while (bytes > 0)
{ {
size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes); {
CriticalScope _;
page_table.map_page_at(m_page_table.physical_address_of(vaddr() + i * PAGE_SIZE), 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); PageTable::map_fast_page(m_page_table.physical_address_of(vaddr() + page_index * PAGE_SIZE));
memcpy(PageTable::fast_page_as_ptr(page_offset), buffer, PAGE_SIZE - page_offset);
memcpy((void*)0, buffer, len); PageTable::unmap_fast_page();
}
buffer += len;
bytes -= len; buffer += PAGE_SIZE - page_offset;
i++; bytes -= PAGE_SIZE - page_offset;
page_offset = 0;
page_index++;
} }
page_table.unmap_page(0);
} }
} }

View File

@ -47,9 +47,9 @@ namespace Kernel
continue; continue;
CriticalScope _; CriticalScope _;
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present); PageTable::map_fast_page(cache.paddr);
memcpy(buffer.data(), (void*)(page_cache_offset * m_sector_size), m_sector_size); memcpy(buffer.data(), PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), m_sector_size);
page_table.unmap_page(0); PageTable::unmap_fast_page();
return true; return true;
} }
@ -82,9 +82,9 @@ namespace Kernel
{ {
CriticalScope _; CriticalScope _;
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); PageTable::map_fast_page(cache.paddr);
memcpy((void*)(page_cache_offset * m_sector_size), buffer.data(), m_sector_size); memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
page_table.unmap_page(0); PageTable::unmap_fast_page();
} }
cache.sector_mask |= 1 << page_cache_offset; cache.sector_mask |= 1 << page_cache_offset;
@ -113,9 +113,9 @@ namespace Kernel
{ {
CriticalScope _; CriticalScope _;
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); PageTable::map_fast_page(cache.paddr);
memcpy((void*)(page_cache_offset * m_sector_size), buffer.data(), m_sector_size); memcpy(PageTable::fast_page_as_ptr(page_cache_offset * m_sector_size), buffer.data(), m_sector_size);
page_table.unmap_page(0); PageTable::unmap_fast_page();
} }
return {}; return {};
@ -123,21 +123,16 @@ namespace Kernel
BAN::ErrorOr<void> DiskCache::sync() BAN::ErrorOr<void> DiskCache::sync()
{ {
ASSERT(&PageTable::current() == &PageTable::kernel());
auto& page_table = PageTable::kernel();
for (auto& cache : m_cache) for (auto& cache : m_cache)
{ {
if (cache.dirty_mask == 0) if (cache.dirty_mask == 0)
continue; continue;
{ {
LockGuard _(page_table); CriticalScope _;
ASSERT(page_table.is_page_free(0)); PageTable::map_fast_page(cache.paddr);
memcpy(m_sync_cache.data(), PageTable::fast_page_as_ptr(), PAGE_SIZE);
page_table.map_page_at(cache.paddr, 0, PageTable::Flags::Present); PageTable::unmap_fast_page();
memcpy(m_sync_cache.data(), (void*)0, PAGE_SIZE);
page_table.unmap_page(0);
} }
uint8_t sector_start = 0; uint8_t sector_start = 0;