diff --git a/kernel/include/kernel/Memory/Heap.h b/kernel/include/kernel/Memory/Heap.h index 063104c6..27786d89 100644 --- a/kernel/include/kernel/Memory/Heap.h +++ b/kernel/include/kernel/Memory/Heap.h @@ -22,9 +22,11 @@ namespace Kernel::Memory paddr_t reserve_page(); void release_page(paddr_t); - paddr_t usable_start() const { return m_start + m_list_pages * PAGE_SIZE; } - paddr_t usable_end() const { return m_start + m_total_pages * PAGE_SIZE; } - uint64_t usable_pages() const { return m_reservable_pages; } + paddr_t start() const { return m_start; } + paddr_t end() const { return m_start + m_size; } + bool contains(paddr_t addr) const { return m_start <= addr && addr < m_start + m_size; } + + size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; } private: struct node @@ -57,6 +59,9 @@ namespace Kernel::Memory static void initialize(); static Heap& get(); + paddr_t take_mapped_page(uint8_t); + void return_mapped_page(paddr_t); + private: Heap() = default; void initialize_impl(); diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index 32b45788..9c295e5b 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -141,7 +141,7 @@ namespace Kernel::Memory if (mmmt->type == 1) { PhysicalRange range(mmmt->base_addr, mmmt->length); - if (range.usable_pages() > 0) + if (range.usable_memory() > 0) MUST(m_physical_ranges.push_back(range)); } @@ -151,11 +151,37 @@ namespace Kernel::Memory size_t total = 0; for (auto& range : m_physical_ranges) { - size_t bytes = range.usable_pages() * PAGE_SIZE; - dprintln("RAM {8H}->{8H}, {} pages ({}.{} MB)", range.usable_start(), range.usable_end(), range.usable_pages(), bytes / (1 << 20), bytes % (1 << 20) * 1000 / (1 << 20)); + size_t bytes = range.usable_memory(); + dprintln("RAM {8H}->{8H} ({}.{} MB)", range.start(), range.end(), bytes / (1 << 20), bytes % (1 << 20) * 1000 / (1 << 20)); total += bytes; } dprintln("Total RAM {}.{} MB", total / (1 << 20), total % (1 << 20) * 1000 / (1 << 20)); } + paddr_t Heap::take_mapped_page(uint8_t flags) + { + for (auto& range : m_physical_ranges) + { + if (paddr_t page = range.reserve_page(); page != PhysicalRange::invalid) + { + MMU::get().allocate_page(page, flags); + return page; + } + } + ASSERT_NOT_REACHED(); + } + + void Heap::return_mapped_page(paddr_t addr) + { + for (auto& range : m_physical_ranges) + { + if (range.contains(addr)) + { + MMU::get().unallocate_page(addr); + return; + } + } + ASSERT_NOT_REACHED(); + } + }