Kernel: MMU now takes flags when allocating pages

This commit is contained in:
Bananymous 2023-03-01 20:15:58 +02:00
parent 7d84f290a1
commit 9756de02ef
5 changed files with 70 additions and 54 deletions

View File

@ -7,9 +7,6 @@
#define MMU_DEBUG_PRINT 0
#define PRESENT (1 << 0)
#define READ_WRITE (1 << 1)
// bits 31-12 set
#define PAGE_MASK 0xfffff000
#define PAGE_SIZE 0x00001000
@ -47,7 +44,7 @@ MMU::MMU()
for (int i = 0; i < 4; i++)
{
uint64_t* page_directory = allocate_page_aligned_page();
m_highest_paging_struct[i] = (uint64_t)page_directory | PRESENT;
m_highest_paging_struct[i] = (uint64_t)page_directory | Flags::Present;
}
// create and identity map first 4 MiB
@ -56,9 +53,9 @@ MMU::MMU()
{
uint64_t* page_table = allocate_page_aligned_page();
for (uint64_t j = 0; j < 512; j++)
page_table[j] = (i << 21) | (j << 12) | READ_WRITE | PRESENT;
page_table[j] = (i << 21) | (j << 12) | Flags::ReadWrite | Flags::Present;
page_directory1[i] = (uint64_t)page_table | READ_WRITE | PRESENT;
page_directory1[i] = (uint64_t)page_table | Flags::ReadWrite | Flags::Present;
}
// dont map first page (0 -> 4 KiB) so that nullptr dereference
@ -70,35 +67,37 @@ MMU::MMU()
asm volatile("movl %0, %%cr3" :: "r"(m_highest_paging_struct));
}
void MMU::allocate_page(uintptr_t address)
void MMU::allocate_page(uintptr_t address, uint8_t flags)
{
#if MMU_DEBUG_PRINT
dprintln("AllocatePage(0x{8H})", address & PAGE_MASK);
#endif
ASSERT(flags & Flags::Present);
uint32_t pdpte = (address & 0xC0000000) >> 30;
uint32_t pde = (address & 0x3FE00000) >> 21;
uint32_t pte = (address & 0x001FF000) >> 12;
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
if (!(page_directory[pde] & PRESENT))
if (!(page_directory[pde] & Flags::Present))
{
uint64_t* page_table = allocate_page_aligned_page();
page_directory[pde] = (uint64_t)page_table | READ_WRITE | PRESENT;
page_directory[pde] = (uint64_t)page_table;
}
page_directory[pde] |= flags;
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
page_table[pte] = (address & PAGE_MASK) | READ_WRITE | PRESENT;
page_table[pte] = (address & PAGE_MASK) | Flags::ReadWrite | Flags::Present;
asm volatile("invlpg (%0)" :: "r"(address & PAGE_MASK) : "memory");
}
void MMU::allocate_range(uintptr_t address, ptrdiff_t size)
void MMU::allocate_range(uintptr_t address, ptrdiff_t size, uint8_t flags)
{
uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
allocate_page(page);
allocate_page(page, flags);
}
void MMU::unallocate_page(uintptr_t address)
@ -112,11 +111,11 @@ void MMU::unallocate_page(uintptr_t address)
uint32_t pte = (address & 0x001FF000) >> 12;
uint64_t* page_directory = (uint64_t*)(m_highest_paging_struct[pdpte] & PAGE_MASK);
if (!(page_directory[pde] & PRESENT))
if (!(page_directory[pde] & Flags::Present))
return;
uint64_t* page_table = (uint64_t*)(page_directory[pde] & PAGE_MASK);
if (!(page_table[pte] & PRESENT))
if (!(page_table[pte] & Flags::Present))
return;
page_table[pte] = 0;

View File

@ -2,15 +2,12 @@
#include <kernel/kmalloc.h>
#include <kernel/MMU.h>
#define PRESENT (1 << 0)
#define READ_WRITE (1 << 1)
#define PAGE_SIZE 0x1000
#define PAGE_MASK ~(PAGE_SIZE - 1)
#define CLEANUP_STRUCTURE(s) \
for (uint64_t i = 0; i < 512; i++) \
if (s[i] & PRESENT) \
if (s[i] & Flags::Present) \
goto cleanup_done; \
kfree(s)
@ -42,17 +39,17 @@ MMU::MMU()
m_highest_paging_struct = allocate_page_aligned_page();
uint64_t* pdpt = allocate_page_aligned_page();
m_highest_paging_struct[0] = (uint64_t)pdpt | READ_WRITE | PRESENT;
m_highest_paging_struct[0] = (uint64_t)pdpt | Flags::ReadWrite | Flags::Present;
uint64_t* pd = allocate_page_aligned_page();
pdpt[0] = (uint64_t)pd | READ_WRITE | PRESENT;
pdpt[0] = (uint64_t)pd | Flags::ReadWrite | Flags::Present;
for (uint32_t i = 0; i < 2; i++)
{
uint64_t* pt = allocate_page_aligned_page();
for (uint64_t j = 0; j < 512; j++)
pt[j] = (i << 21) | (j << 12) | READ_WRITE | PRESENT;
pd[i] = (uint64_t)pt | READ_WRITE | PRESENT;
pt[j] = (i << 21) | (j << 12) | Flags::ReadWrite | Flags::Present;
pd[i] = (uint64_t)pt | Flags::ReadWrite | Flags::Present;
}
// Unmap 0 -> 4 KiB
@ -68,17 +65,17 @@ MMU::~MMU()
uint64_t* pml4 = m_highest_paging_struct;
for (uint32_t pml4e = 0; pml4e < 512; pml4e++)
{
if (!(pml4[pml4e] & PRESENT))
if (!(pml4[pml4e] & Flags::Present))
continue;
uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK);
for (uint32_t pdpte = 0; pdpte < 512; pdpte++)
{
if (!(pdpt[pdpte] & PRESENT))
if (!(pdpt[pdpte] & Flags::Present))
continue;
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
for (uint32_t pde = 0; pde < 512; pde++)
{
if (!(pd[pde] & PRESENT))
if (!(pd[pde] & Flags::Present))
continue;
kfree((void*)(pd[pde] & PAGE_MASK));
}
@ -89,10 +86,13 @@ MMU::~MMU()
kfree(pml4);
}
void MMU::allocate_page(uintptr_t address)
void MMU::allocate_page(uintptr_t address, uint8_t flags)
{
ASSERT((address >> 48) == 0);
ASSERT(flags & Flags::Present);
bool should_invalidate = false;
address &= PAGE_MASK;
uint64_t pml4e = (address >> 39) & 0x1FF;
@ -101,40 +101,49 @@ void MMU::allocate_page(uintptr_t address)
uint64_t pte = (address >> 12) & 0x1FF;
uint64_t* pml4 = m_highest_paging_struct;
if (!(pml4[pml4e] & PRESENT))
if ((pml4[pml4e] & flags) != flags)
{
uint64_t* pdpt = allocate_page_aligned_page();
pml4[pml4e] = (uint64_t)pdpt | READ_WRITE | PRESENT;
if (!(pml4[pml4e] & Flags::Present))
pml4[pml4e] = (uint64_t)allocate_page_aligned_page();
pml4[pml4e] = (pml4[pml4e] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK);
if (!(pdpt[pdpte] & PRESENT))
if ((pdpt[pdpte] & flags) != flags)
{
uint64_t* pd = allocate_page_aligned_page();
pdpt[pdpte] = (uint64_t)pd | READ_WRITE | PRESENT;
if (!(pdpt[pdpte] & Flags::Present))
pdpt[pdpte] = (uint64_t)allocate_page_aligned_page();
pdpt[pdpte] = (pdpt[pdpte] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
if (!(pd[pde] & PRESENT))
if ((pd[pde] & flags) != flags)
{
uint64_t* pt = allocate_page_aligned_page();
pd[pde] = (uint64_t)pt | READ_WRITE | PRESENT;
if (!(pd[pde] & Flags::Present))
pd[pde] = (uint64_t)allocate_page_aligned_page();
pd[pde] = (pd[pde] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pt = (uint64_t*)(pd[pde] & PAGE_MASK);
if (!(pt[pte] & PRESENT))
if ((pt[pte] & flags) != flags)
{
pt[pte] = address | READ_WRITE | PRESENT;
asm volatile("invlpg (%0)" :: "r"(address) : "memory");
pt[pte] = address | flags;
should_invalidate = true;
}
if (should_invalidate)
asm volatile("invlpg (%0)" :: "r"(address) : "memory");
}
void MMU::allocate_range(uintptr_t address, ptrdiff_t size)
void MMU::allocate_range(uintptr_t address, ptrdiff_t size, uint8_t flags)
{
uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
allocate_page(page);
allocate_page(page, flags);
}
void MMU::unallocate_page(uintptr_t address)
@ -149,19 +158,19 @@ void MMU::unallocate_page(uintptr_t address)
uint64_t pte = (address >> 12) & 0x1FF;
uint64_t* pml4 = m_highest_paging_struct;
if (!(pml4[pml4e] & PRESENT))
if (!(pml4[pml4e] & Flags::Present))
return;
uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK);
if (!(pdpt[pdpte] & PRESENT))
if (!(pdpt[pdpte] & Flags::Present))
return;
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
if (!(pd[pde] & PRESENT))
if (!(pd[pde] & Flags::Present))
return;
uint64_t* pt = (uint64_t*)(pd[pde] & PAGE_MASK);
if (!(pt[pte] & PRESENT))
if (!(pt[pte] & Flags::Present))
return;
pt[pte] = 0;

View File

@ -5,6 +5,14 @@
class MMU
{
public:
enum Flags : uint8_t
{
Present = 1,
ReadWrite = 2,
UserSupervisor = 4,
};
public:
static void intialize();
static MMU& get();
@ -12,8 +20,8 @@ public:
MMU();
~MMU();
void allocate_page(uintptr_t);
void allocate_range(uintptr_t, ptrdiff_t);
void allocate_page(uintptr_t, uint8_t);
void allocate_range(uintptr_t, ptrdiff_t, uint8_t);
void unallocate_page(uintptr_t);
void unallocate_range(uintptr_t, ptrdiff_t);

View File

@ -165,7 +165,7 @@ uintptr_t locate_madt(uintptr_t rsdp_addr)
if (rsdp->revision == 2)
{
uintptr_t xsdt_addr = rsdp->v2_xsdt_address;
MMU::get().allocate_page(xsdt_addr);
MMU::get().allocate_page(xsdt_addr, MMU::Flags::ReadWrite | MMU::Flags::Present);
entry_address_base = xsdt_addr + sizeof(SDTHeader);
entry_address_mask = (uintptr_t)0xFFFFFFFFFFFFFFFF;
entry_count = (((const SDTHeader*)xsdt_addr)->length - sizeof(SDTHeader)) / 8;
@ -175,7 +175,7 @@ uintptr_t locate_madt(uintptr_t rsdp_addr)
else
{
uintptr_t rsdt_addr = rsdp->rsdt_address;
MMU::get().allocate_page(rsdt_addr);
MMU::get().allocate_page(rsdt_addr, MMU::Flags::ReadWrite | MMU::Flags::Present);
entry_address_base = rsdt_addr + sizeof(SDTHeader);
entry_address_mask = 0xFFFFFFFF;
entry_count = (((const SDTHeader*)rsdt_addr)->length - sizeof(SDTHeader)) / 4;
@ -186,10 +186,10 @@ uintptr_t locate_madt(uintptr_t rsdp_addr)
for (uint32_t i = 0; i < entry_count; i++)
{
uintptr_t entry_addr_ptr = entry_address_base + i * entry_pointer_size;
MMU::get().allocate_page(entry_addr_ptr);
MMU::get().allocate_page(entry_addr_ptr, MMU::Flags::ReadWrite | MMU::Flags::Present);
uintptr_t entry_addr = *(uintptr_t*)entry_addr_ptr & entry_address_mask;
MMU::get().allocate_page(entry_addr);
MMU::get().allocate_page(entry_addr, MMU::Flags::ReadWrite | MMU::Flags::Present);
BAN::ScopeGuard _([&]() {
MMU::get().unallocate_page(entry_addr);
@ -228,7 +228,7 @@ APIC* APIC::create()
return nullptr;
}
MMU::get().allocate_page(madt_addr);
MMU::get().allocate_page(madt_addr, MMU::Flags::ReadWrite | MMU::Flags::Present);
const MADT* madt = (const MADT*)madt_addr;
@ -279,10 +279,10 @@ APIC* APIC::create()
return nullptr;
}
MMU::get().allocate_page(apic->m_local_apic);
MMU::get().allocate_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present);
for (auto& io_apic : apic->m_io_apics)
{
MMU::get().allocate_page(io_apic.address);
MMU::get().allocate_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present);
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
}

View File

@ -34,7 +34,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
return nullptr;
}
MMU::get().allocate_range(framebuffer.addr, framebuffer.pitch * framebuffer.height);
MMU::get().allocate_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
auto* driver = new VesaTerminalDriver(
framebuffer.width,