Kernel: Add some bareboness functionality to map virtual addresses

This commit is contained in:
Bananymous 2023-04-19 23:51:36 +03:00
parent d38470c8e2
commit b3eeb6412f
4 changed files with 65 additions and 57 deletions

View File

@ -90,54 +90,8 @@ MMU::~MMU()
void MMU::map_page(uintptr_t address, uint8_t flags)
{
ASSERT((address >> 48) == 0);
ASSERT(flags & Flags::Present);
bool should_invalidate = false;
address &= PAGE_MASK;
uint64_t pml4e = (address >> 39) & 0x1FF;
uint64_t pdpte = (address >> 30) & 0x1FF;
uint64_t pde = (address >> 21) & 0x1FF;
uint64_t pte = (address >> 12) & 0x1FF;
uint64_t* pml4 = m_highest_paging_struct;
if ((pml4[pml4e] & flags) != flags)
{
if (!(pml4[pml4e] & Flags::Present))
pml4[pml4e] = (uint64_t)allocate_page_aligned_page();
pml4[pml4e] = (pml4[pml4e] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK);
if ((pdpt[pdpte] & flags) != flags)
{
if (!(pdpt[pdpte] & Flags::Present))
pdpt[pdpte] = (uint64_t)allocate_page_aligned_page();
pdpt[pdpte] = (pdpt[pdpte] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
if ((pd[pde] & flags) != flags)
{
if (!(pd[pde] & Flags::Present))
pd[pde] = (uint64_t)allocate_page_aligned_page();
pd[pde] = (pd[pde] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pt = (uint64_t*)(pd[pde] & PAGE_MASK);
if ((pt[pte] & flags) != flags)
{
pt[pte] = address | flags;
should_invalidate = true;
}
if (should_invalidate)
asm volatile("invlpg (%0)" :: "r"(address) : "memory");
map_page_at(address, address, flags);
}
void MMU::map_range(uintptr_t address, ptrdiff_t size, uint8_t flags)
@ -195,3 +149,57 @@ void MMU::unmap_range(uintptr_t address, ptrdiff_t size)
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
unmap_page(page);
}
void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, uint8_t flags)
{
ASSERT((paddr >> 48) == 0);
ASSERT((vaddr >> 48) == 0);
ASSERT((paddr & ~PAGE_MASK) == 0);
ASSERT((vaddr & ~PAGE_MASK) == 0);;
ASSERT(flags & Flags::Present);
bool should_invalidate = false;
uint64_t pml4e = (vaddr >> 39) & 0x1FF;
uint64_t pdpte = (vaddr >> 30) & 0x1FF;
uint64_t pde = (vaddr >> 21) & 0x1FF;
uint64_t pte = (vaddr >> 12) & 0x1FF;
uint64_t* pml4 = m_highest_paging_struct;
if ((pml4[pml4e] & flags) != flags)
{
if (!(pml4[pml4e] & Flags::Present))
pml4[pml4e] = (uint64_t)allocate_page_aligned_page();
pml4[pml4e] = (pml4[pml4e] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK);
if ((pdpt[pdpte] & flags) != flags)
{
if (!(pdpt[pdpte] & Flags::Present))
pdpt[pdpte] = (uint64_t)allocate_page_aligned_page();
pdpt[pdpte] = (pdpt[pdpte] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK);
if ((pd[pde] & flags) != flags)
{
if (!(pd[pde] & Flags::Present))
pd[pde] = (uint64_t)allocate_page_aligned_page();
pd[pde] = (pd[pde] & PAGE_MASK) | flags;
should_invalidate = true;
}
uint64_t* pt = (uint64_t*)(pd[pde] & PAGE_MASK);
if ((pt[pte] & flags) != flags)
{
pt[pte] = paddr | flags;
should_invalidate = true;
}
if (should_invalidate)
asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct));
}

View File

@ -59,8 +59,8 @@ namespace Kernel::Memory
static void initialize();
static Heap& get();
paddr_t take_mapped_page(uint8_t);
void return_mapped_page(paddr_t);
paddr_t take_free_page();
void release_page(paddr_t);
private:
Heap() = default;

View File

@ -13,6 +13,9 @@ public:
UserSupervisor = 4,
};
using vaddr_t = uintptr_t;
using paddr_t = uintptr_t;
public:
static void intialize();
static MMU& get();
@ -26,6 +29,8 @@ public:
void unmap_page(uintptr_t);
void unmap_range(uintptr_t, ptrdiff_t);
void map_page_at(paddr_t, vaddr_t, uint8_t);
private:
uint64_t* m_highest_paging_struct;
};

View File

@ -158,26 +158,21 @@ namespace Kernel::Memory
dprintln("Total RAM {}.{} MB", total / (1 << 20), total % (1 << 20) * 1000 / (1 << 20));
}
paddr_t Heap::take_mapped_page(uint8_t flags)
paddr_t Heap::take_free_page()
{
for (auto& range : m_physical_ranges)
{
if (paddr_t page = range.reserve_page(); page != PhysicalRange::invalid)
{
MMU::get().map_page(page, flags);
return page;
}
}
ASSERT_NOT_REACHED();
}
void Heap::return_mapped_page(paddr_t addr)
void Heap::release_page(paddr_t addr)
{
for (auto& range : m_physical_ranges)
{
if (range.contains(addr))
{
MMU::get().unmap_page(addr);
range.release_page(addr);
return;
}
}