From b3eeb6412f2578aac3a808f984268b1499e2d575 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Wed, 19 Apr 2023 23:51:36 +0300 Subject: [PATCH] Kernel: Add some bareboness functionality to map virtual addresses --- kernel/arch/x86_64/MMU.cpp | 102 +++++++++++++++------------- kernel/include/kernel/Memory/Heap.h | 4 +- kernel/include/kernel/Memory/MMU.h | 5 ++ kernel/kernel/Memory/Heap.cpp | 11 +-- 4 files changed, 65 insertions(+), 57 deletions(-) diff --git a/kernel/arch/x86_64/MMU.cpp b/kernel/arch/x86_64/MMU.cpp index b9121643..94c16f4f 100644 --- a/kernel/arch/x86_64/MMU.cpp +++ b/kernel/arch/x86_64/MMU.cpp @@ -90,54 +90,8 @@ MMU::~MMU() void MMU::map_page(uintptr_t address, uint8_t flags) { - ASSERT((address >> 48) == 0); - - ASSERT(flags & Flags::Present); - bool should_invalidate = false; - address &= PAGE_MASK; - - uint64_t pml4e = (address >> 39) & 0x1FF; - uint64_t pdpte = (address >> 30) & 0x1FF; - uint64_t pde = (address >> 21) & 0x1FF; - uint64_t pte = (address >> 12) & 0x1FF; - - uint64_t* pml4 = m_highest_paging_struct; - if ((pml4[pml4e] & flags) != flags) - { - if (!(pml4[pml4e] & Flags::Present)) - pml4[pml4e] = (uint64_t)allocate_page_aligned_page(); - pml4[pml4e] = (pml4[pml4e] & PAGE_MASK) | flags; - should_invalidate = true; - } - - uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK); - if ((pdpt[pdpte] & flags) != flags) - { - if (!(pdpt[pdpte] & Flags::Present)) - pdpt[pdpte] = (uint64_t)allocate_page_aligned_page(); - pdpt[pdpte] = (pdpt[pdpte] & PAGE_MASK) | flags; - should_invalidate = true; - } - - uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK); - if ((pd[pde] & flags) != flags) - { - if (!(pd[pde] & Flags::Present)) - pd[pde] = (uint64_t)allocate_page_aligned_page(); - pd[pde] = (pd[pde] & PAGE_MASK) | flags; - should_invalidate = true; - } - - uint64_t* pt = (uint64_t*)(pd[pde] & PAGE_MASK); - if ((pt[pte] & flags) != flags) - { - pt[pte] = address | flags; - should_invalidate = true; - } - - if (should_invalidate) - asm volatile("invlpg (%0)" :: "r"(address) : "memory"); + map_page_at(address, address, flags); } void MMU::map_range(uintptr_t address, ptrdiff_t size, uint8_t flags) @@ -195,3 +149,57 @@ void MMU::unmap_range(uintptr_t address, ptrdiff_t size) for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE) unmap_page(page); } + +void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, uint8_t flags) +{ + ASSERT((paddr >> 48) == 0); + ASSERT((vaddr >> 48) == 0); + + ASSERT((paddr & ~PAGE_MASK) == 0); + ASSERT((vaddr & ~PAGE_MASK) == 0);; + + ASSERT(flags & Flags::Present); + bool should_invalidate = false; + + uint64_t pml4e = (vaddr >> 39) & 0x1FF; + uint64_t pdpte = (vaddr >> 30) & 0x1FF; + uint64_t pde = (vaddr >> 21) & 0x1FF; + uint64_t pte = (vaddr >> 12) & 0x1FF; + + uint64_t* pml4 = m_highest_paging_struct; + if ((pml4[pml4e] & flags) != flags) + { + if (!(pml4[pml4e] & Flags::Present)) + pml4[pml4e] = (uint64_t)allocate_page_aligned_page(); + pml4[pml4e] = (pml4[pml4e] & PAGE_MASK) | flags; + should_invalidate = true; + } + + uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK); + if ((pdpt[pdpte] & flags) != flags) + { + if (!(pdpt[pdpte] & Flags::Present)) + pdpt[pdpte] = (uint64_t)allocate_page_aligned_page(); + pdpt[pdpte] = (pdpt[pdpte] & PAGE_MASK) | flags; + should_invalidate = true; + } + + uint64_t* pd = (uint64_t*)(pdpt[pdpte] & PAGE_MASK); + if ((pd[pde] & flags) != flags) + { + if (!(pd[pde] & Flags::Present)) + pd[pde] = (uint64_t)allocate_page_aligned_page(); + pd[pde] = (pd[pde] & PAGE_MASK) | flags; + should_invalidate = true; + } + + uint64_t* pt = (uint64_t*)(pd[pde] & PAGE_MASK); + if ((pt[pte] & flags) != flags) + { + pt[pte] = paddr | flags; + should_invalidate = true; + } + + if (should_invalidate) + asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct)); +} diff --git a/kernel/include/kernel/Memory/Heap.h b/kernel/include/kernel/Memory/Heap.h index 27786d89..f970d3b0 100644 --- a/kernel/include/kernel/Memory/Heap.h +++ b/kernel/include/kernel/Memory/Heap.h @@ -59,8 +59,8 @@ namespace Kernel::Memory static void initialize(); static Heap& get(); - paddr_t take_mapped_page(uint8_t); - void return_mapped_page(paddr_t); + paddr_t take_free_page(); + void release_page(paddr_t); private: Heap() = default; diff --git a/kernel/include/kernel/Memory/MMU.h b/kernel/include/kernel/Memory/MMU.h index a8503b7f..04552d09 100644 --- a/kernel/include/kernel/Memory/MMU.h +++ b/kernel/include/kernel/Memory/MMU.h @@ -13,6 +13,9 @@ public: UserSupervisor = 4, }; + using vaddr_t = uintptr_t; + using paddr_t = uintptr_t; + public: static void intialize(); static MMU& get(); @@ -26,6 +29,8 @@ public: void unmap_page(uintptr_t); void unmap_range(uintptr_t, ptrdiff_t); + void map_page_at(paddr_t, vaddr_t, uint8_t); + private: uint64_t* m_highest_paging_struct; }; diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index 2b869aee..caad448d 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -158,26 +158,21 @@ namespace Kernel::Memory dprintln("Total RAM {}.{} MB", total / (1 << 20), total % (1 << 20) * 1000 / (1 << 20)); } - paddr_t Heap::take_mapped_page(uint8_t flags) + paddr_t Heap::take_free_page() { for (auto& range : m_physical_ranges) - { if (paddr_t page = range.reserve_page(); page != PhysicalRange::invalid) - { - MMU::get().map_page(page, flags); return page; - } - } ASSERT_NOT_REACHED(); } - void Heap::return_mapped_page(paddr_t addr) + void Heap::release_page(paddr_t addr) { for (auto& range : m_physical_ranges) { if (range.contains(addr)) { - MMU::get().unmap_page(addr); + range.release_page(addr); return; } }