Kernel: Rename MMU to PageTable

This is more descriptive name for what it actually represents
This commit is contained in:
Bananymous
2023-05-29 21:06:09 +03:00
parent fb17af4844
commit 5bb1f2a48c
21 changed files with 180 additions and 174 deletions

View File

@@ -1,11 +1,11 @@
#include <kernel/Memory/FixedWidthAllocator.h>
#include <kernel/Memory/MMUScope.h>
#include <kernel/Memory/PageTableScope.h>
namespace Kernel
{
FixedWidthAllocator::FixedWidthAllocator(MMU& mmu, uint32_t allocation_size)
: m_mmu(mmu)
FixedWidthAllocator::FixedWidthAllocator(PageTable& page_table, uint32_t allocation_size)
: m_page_table(page_table)
, m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size))
{
ASSERT(BAN::Math::is_power_of_two(allocation_size));
@@ -40,9 +40,9 @@ namespace Kernel
if (page_vaddr == 0)
continue;
ASSERT(!m_mmu.is_page_free(page_vaddr));
Heap::get().release_page(m_mmu.physical_address_of(page_vaddr));
m_mmu.unmap_page(page_vaddr);
ASSERT(!m_page_table.is_page_free(page_vaddr));
Heap::get().release_page(m_page_table.physical_address_of(page_vaddr));
m_page_table.unmap_page(page_vaddr);
}
kfree((void*)m_nodes_page);
@@ -187,8 +187,8 @@ namespace Kernel
paddr_t page_paddr = Heap::get().take_free_page();
ASSERT(page_paddr);
page_vaddr = m_mmu.get_free_page();
m_mmu.map_page_at(page_paddr, page_vaddr, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
page_vaddr = m_page_table.get_free_page();
m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
}
bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags)
@@ -204,7 +204,7 @@ namespace Kernel
}
// Page is not allocated so the vaddr must not be in use
ASSERT(m_mmu.is_page_free(vaddr));
ASSERT(m_page_table.is_page_free(vaddr));
// Allocate the vaddr on empty page
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
@@ -214,7 +214,7 @@ namespace Kernel
{
paddr_t paddr = Heap::get().take_free_page();
ASSERT(paddr);
m_mmu.map_page_at(paddr, vaddr, flags);
m_page_table.map_page_at(paddr, vaddr, flags);
page_begin = vaddr;
return true;
}
@@ -223,14 +223,14 @@ namespace Kernel
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<FixedWidthAllocator*> FixedWidthAllocator::clone(MMU& new_mmu)
BAN::ErrorOr<FixedWidthAllocator*> FixedWidthAllocator::clone(PageTable& new_page_table)
{
FixedWidthAllocator* allocator = new FixedWidthAllocator(new_mmu, allocation_size());
FixedWidthAllocator* allocator = new FixedWidthAllocator(new_page_table, allocation_size());
if (allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
MMUScope _(m_mmu);
ASSERT(m_mmu.is_page_free(0));
PageTableScope _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (node* node = m_used_list; node; node = node->next)
{
@@ -238,14 +238,14 @@ namespace Kernel
vaddr_t vaddr = address_of_node(node);
vaddr_t page_begin = vaddr & PAGE_ADDR_MASK;
uint8_t flags = m_mmu.get_page_flags(page_begin);
uint8_t flags = m_page_table.get_page_flags(page_begin);
// Allocate and copy all data from this allocation to the new one
if (allocator->allocate_page_if_needed(page_begin, flags))
{
paddr_t paddr = new_mmu.physical_address_of(page_begin);
m_mmu.map_page_at(paddr, 0, MMU::Flags::ReadWrite | MMU::Flags::Present);
m_mmu.invalidate(0);
paddr_t paddr = new_page_table.physical_address_of(page_begin);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(0);
memcpy((void*)0, (void*)page_begin, PAGE_SIZE);
}
@@ -254,8 +254,8 @@ namespace Kernel
allocator->allocate_node(new_node);
}
m_mmu.unmap_page(0);
m_mmu.invalidate(0);
m_page_table.unmap_page(0);
m_page_table.invalidate(0);
return allocator;
}

View File

@@ -1,12 +1,12 @@
#include <kernel/Memory/GeneralAllocator.h>
#include <kernel/Memory/MMUScope.h>
#include <kernel/Memory/PageTableScope.h>
#include <kernel/Process.h>
namespace Kernel
{
GeneralAllocator::GeneralAllocator(MMU& mmu)
: m_mmu(mmu)
GeneralAllocator::GeneralAllocator(PageTable& page_table)
: m_page_table(page_table)
{ }
GeneralAllocator::~GeneralAllocator()
@@ -35,9 +35,9 @@ namespace Kernel
allocation.pages[i] = paddr;
}
allocation.address = m_mmu.get_free_contiguous_pages(needed_pages);
allocation.address = m_page_table.get_free_contiguous_pages(needed_pages);
for (size_t i = 0; i < needed_pages; i++)
m_mmu.map_page_at(allocation.pages[i], allocation.address + i * PAGE_SIZE, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
m_page_table.map_page_at(allocation.pages[i], allocation.address + i * PAGE_SIZE, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
MUST(m_allocations.push_back(BAN::move(allocation)));
return allocation.address;
@@ -50,7 +50,7 @@ namespace Kernel
if (it->address != address)
continue;
m_mmu.unmap_range(it->address, it->pages.size() * PAGE_SIZE);
m_page_table.unmap_range(it->address, it->pages.size() * PAGE_SIZE);
for (auto paddr : it->pages)
Heap::get().release_page(paddr);
@@ -62,24 +62,24 @@ namespace Kernel
return false;
}
BAN::ErrorOr<GeneralAllocator*> GeneralAllocator::clone(MMU& new_mmu)
BAN::ErrorOr<GeneralAllocator*> GeneralAllocator::clone(PageTable& new_page_table)
{
GeneralAllocator* allocator = new GeneralAllocator(new_mmu);
GeneralAllocator* allocator = new GeneralAllocator(new_page_table);
if (allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
MMUScope _(m_mmu);
ASSERT(m_mmu.is_page_free(0));
PageTableScope _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (auto& allocation : m_allocations)
{
Allocation new_allocation;
ASSERT(new_mmu.is_range_free(allocation.address, allocation.pages.size() * PAGE_SIZE));
ASSERT(new_page_table.is_range_free(allocation.address, allocation.pages.size() * PAGE_SIZE));
new_allocation.address = allocation.address;
MUST(new_allocation.pages.reserve(allocation.pages.size()));
uint8_t flags = m_mmu.get_page_flags(allocation.address);
uint8_t flags = m_page_table.get_page_flags(allocation.address);
for (size_t i = 0; i < allocation.pages.size(); i++)
{
paddr_t paddr = Heap::get().take_free_page();
@@ -88,17 +88,17 @@ namespace Kernel
vaddr_t vaddr = allocation.address + i * PAGE_SIZE;
MUST(new_allocation.pages.push_back(paddr));
new_mmu.map_page_at(paddr, vaddr, flags);
new_page_table.map_page_at(paddr, vaddr, flags);
m_mmu.map_page_at(paddr, 0, MMU::Flags::ReadWrite | MMU::Flags::Present);
m_mmu.invalidate(0);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(0);
memcpy((void*)0, (void*)vaddr, PAGE_SIZE);
}
MUST(allocator->m_allocations.push_back(BAN::move(new_allocation)));
}
m_mmu.unmap_page(0);
m_mmu.invalidate(0);
m_page_table.unmap_page(0);
m_page_table.invalidate(0);
return allocator;
}

View File

@@ -1,6 +1,6 @@
#include <kernel/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MMU.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/multiboot.h>
namespace Kernel

View File

@@ -1,6 +1,6 @@
#include <BAN/Assert.h>
#include <BAN/Math.h>
#include <kernel/Memory/MMU.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Memory/PhysicalRange.h>
extern uint8_t g_kernel_end[];
@@ -37,7 +37,7 @@ namespace Kernel
m_list_pages = BAN::Math::div_round_up<uint64_t>(m_total_pages * sizeof(node), PAGE_SIZE);
m_reservable_pages = m_total_pages - m_list_pages;
MMU::kernel().identity_map_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::ReadWrite | MMU::Flags::Present);
PageTable::kernel().identity_map_range(m_start, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
// Initialize page list so that every page points to the next one
node* page_list = (node*)m_start;

View File

@@ -1,53 +1,53 @@
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MMUScope.h>
#include <kernel/Memory/PageTableScope.h>
#include <kernel/Memory/VirtualRange.h>
namespace Kernel
{
VirtualRange* VirtualRange::create(MMU& mmu, vaddr_t vaddr, size_t size, uint8_t flags)
VirtualRange* VirtualRange::create(PageTable& page_table, vaddr_t vaddr, size_t size, uint8_t flags)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(&mmu != &MMU::kernel());
ASSERT(&page_table != &PageTable::kernel());
VirtualRange* result = new VirtualRange(mmu);
VirtualRange* result = new VirtualRange(page_table);
ASSERT(result);
result->m_size = size;
result->m_flags = flags;
MUST(result->m_physical_pages.reserve(size / PAGE_SIZE));
mmu.lock();
page_table.lock();
if (vaddr == 0)
{
vaddr = mmu.get_free_contiguous_pages(size / PAGE_SIZE);
vaddr = page_table.get_free_contiguous_pages(size / PAGE_SIZE);
ASSERT(vaddr);
}
result->m_vaddr = vaddr;
ASSERT(mmu.is_range_free(vaddr, size));
ASSERT(page_table.is_range_free(vaddr, size));
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
{
paddr_t paddr = Heap::get().take_free_page();
ASSERT(paddr);
MUST(result->m_physical_pages.push_back(paddr));
mmu.map_page_at(paddr, vaddr + offset, flags);
page_table.map_page_at(paddr, vaddr + offset, flags);
}
mmu.unlock();
page_table.unlock();
return result;
}
VirtualRange* VirtualRange::create_kmalloc(size_t size)
{
VirtualRange* result = new VirtualRange(MMU::kernel());
VirtualRange* result = new VirtualRange(PageTable::kernel());
if (result == nullptr)
return nullptr;
result->m_size = size;
result->m_flags = MMU::Flags::ReadWrite | MMU::Flags::Present;
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
result->m_vaddr = (vaddr_t)kmalloc(size);
if (result->m_vaddr == 0)
{
@@ -57,37 +57,37 @@ namespace Kernel
return result;
}
VirtualRange::VirtualRange(MMU& mmu)
: m_mmu(mmu)
VirtualRange::VirtualRange(PageTable& page_table)
: m_page_table(page_table)
{ }
VirtualRange::~VirtualRange()
{
if (&m_mmu == &MMU::kernel())
if (&m_page_table == &PageTable::kernel())
{
kfree((void*)m_vaddr);
return;
}
m_mmu.unmap_range(vaddr(), size());
m_page_table.unmap_range(vaddr(), size());
for (paddr_t page : m_physical_pages)
Heap::get().release_page(page);
}
VirtualRange* VirtualRange::clone(MMU& mmu)
VirtualRange* VirtualRange::clone(PageTable& page_table)
{
VirtualRange* result = create(mmu, vaddr(), size(), flags());
VirtualRange* result = create(page_table, vaddr(), size(), flags());
MMUScope _(m_mmu);
ASSERT(m_mmu.is_page_free(0));
PageTableScope _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (size_t i = 0; i < result->m_physical_pages.size(); i++)
{
m_mmu.map_page_at(result->m_physical_pages[i], 0, MMU::Flags::ReadWrite | MMU::Flags::Present);
m_mmu.invalidate(0);
m_page_table.map_page_at(result->m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(0);
memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
}
m_mmu.unmap_page(0);
m_mmu.invalidate(0);
m_page_table.unmap_page(0);
m_page_table.invalidate(0);
return result;
}