From 5494e2c12518c7be07646db4656aea492cc5f496 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Tue, 18 Apr 2023 10:16:09 +0300 Subject: [PATCH] Kernel: Heap allows us to take free pages. The API is kinda weird and will probably be reworked in near future but this will work for now :) --- kernel/include/kernel/Memory/Heap.h | 11 +++++++--- kernel/kernel/Memory/Heap.cpp | 32 ++++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/kernel/include/kernel/Memory/Heap.h b/kernel/include/kernel/Memory/Heap.h index 063104c60b..27786d891c 100644 --- a/kernel/include/kernel/Memory/Heap.h +++ b/kernel/include/kernel/Memory/Heap.h @@ -22,9 +22,11 @@ namespace Kernel::Memory paddr_t reserve_page(); void release_page(paddr_t); - paddr_t usable_start() const { return m_start + m_list_pages * PAGE_SIZE; } - paddr_t usable_end() const { return m_start + m_total_pages * PAGE_SIZE; } - uint64_t usable_pages() const { return m_reservable_pages; } + paddr_t start() const { return m_start; } + paddr_t end() const { return m_start + m_size; } + bool contains(paddr_t addr) const { return m_start <= addr && addr < m_start + m_size; } + + size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; } private: struct node @@ -57,6 +59,9 @@ namespace Kernel::Memory static void initialize(); static Heap& get(); + paddr_t take_mapped_page(uint8_t); + void return_mapped_page(paddr_t); + private: Heap() = default; void initialize_impl(); diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index 32b45788c3..9c295e5b06 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -141,7 +141,7 @@ namespace Kernel::Memory if (mmmt->type == 1) { PhysicalRange range(mmmt->base_addr, mmmt->length); - if (range.usable_pages() > 0) + if (range.usable_memory() > 0) MUST(m_physical_ranges.push_back(range)); } @@ -151,11 +151,37 @@ namespace Kernel::Memory size_t total = 0; for (auto& range : m_physical_ranges) { - size_t bytes = range.usable_pages() * PAGE_SIZE; - dprintln("RAM {8H}->{8H}, {} pages ({}.{} MB)", range.usable_start(), range.usable_end(), range.usable_pages(), bytes / (1 << 20), bytes % (1 << 20) * 1000 / (1 << 20)); + size_t bytes = range.usable_memory(); + dprintln("RAM {8H}->{8H} ({}.{} MB)", range.start(), range.end(), bytes / (1 << 20), bytes % (1 << 20) * 1000 / (1 << 20)); total += bytes; } dprintln("Total RAM {}.{} MB", total / (1 << 20), total % (1 << 20) * 1000 / (1 << 20)); } + paddr_t Heap::take_mapped_page(uint8_t flags) + { + for (auto& range : m_physical_ranges) + { + if (paddr_t page = range.reserve_page(); page != PhysicalRange::invalid) + { + MMU::get().allocate_page(page, flags); + return page; + } + } + ASSERT_NOT_REACHED(); + } + + void Heap::return_mapped_page(paddr_t addr) + { + for (auto& range : m_physical_ranges) + { + if (range.contains(addr)) + { + MMU::get().unallocate_page(addr); + return; + } + } + ASSERT_NOT_REACHED(); + } + }