Kernel: rework the whole PageTable structure

We now have page table structure for kernel memory which is shared
between all processes.
This commit is contained in:
Bananymous
2023-07-05 23:41:35 +03:00
parent 60fe5a656c
commit 4086d7c3be
13 changed files with 215 additions and 165 deletions

View File

@@ -196,7 +196,6 @@ namespace Kernel
page_vaddr = m_page_table.get_free_page();
m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(page_vaddr);
}
bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags)
@@ -251,7 +250,6 @@ namespace Kernel
{
paddr_t paddr = new_page_table.physical_address_of(page_begin);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(0);
memcpy((void*)0, (void*)page_begin, PAGE_SIZE);
}
@@ -261,7 +259,6 @@ namespace Kernel
}
m_page_table.unmap_page(0);
m_page_table.invalidate(0);
m_page_table.unlock();

View File

@@ -109,14 +109,12 @@ namespace Kernel
new_page_table.map_page_at(paddr, vaddr, flags);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(0);
memcpy((void*)0, (void*)vaddr, PAGE_SIZE);
}
MUST(allocator->m_allocations.push_back(BAN::move(new_allocation)));
}
m_page_table.unmap_page(0);
m_page_table.invalidate(0);
m_page_table.unlock();

View File

@@ -85,11 +85,9 @@ namespace Kernel
for (size_t i = 0; i < result->m_physical_pages.size(); i++)
{
m_page_table.map_page_at(result->m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
m_page_table.invalidate(0);
memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
}
m_page_table.unmap_page(0);
m_page_table.invalidate(0);
m_page_table.unlock();
@@ -112,11 +110,9 @@ namespace Kernel
for (size_t i = 0; i < m_physical_pages.size(); i++)
{
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.invalidate(0);
memset((void*)0, 0, PAGE_SIZE);
}
page_table.unmap_page(0);
page_table.invalidate(0);
page_table.unlock();
}
@@ -147,7 +143,6 @@ namespace Kernel
// NOTE: we map the first page separately since it needs extra calculations
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.invalidate(0);
memcpy((void*)off, buffer, PAGE_SIZE - off);
@@ -160,7 +155,6 @@ namespace Kernel
size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes);
page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
page_table.invalidate(0);
memcpy((void*)0, buffer, len);
@@ -169,7 +163,6 @@ namespace Kernel
i++;
}
page_table.unmap_page(0);
page_table.invalidate(0);
page_table.unlock();
}