From 1aac3a0425862303cb32a0a5e12d04240b7cfcba Mon Sep 17 00:00:00 2001 From: Bananymous Date: Fri, 14 Apr 2023 23:23:31 +0300 Subject: [PATCH] Kernel: Heap implementation can now give free pages from all of RAM --- kernel/include/kernel/Memory/Heap.h | 19 ++++--- kernel/kernel/Memory/Heap.cpp | 79 ++++++++++++++++++++++------- 2 files changed, 74 insertions(+), 24 deletions(-) diff --git a/kernel/include/kernel/Memory/Heap.h b/kernel/include/kernel/Memory/Heap.h index 80f85d254..063104c60 100644 --- a/kernel/include/kernel/Memory/Heap.h +++ b/kernel/include/kernel/Memory/Heap.h @@ -22,12 +22,19 @@ namespace Kernel::Memory paddr_t reserve_page(); void release_page(paddr_t); - paddr_t start() const { return m_start + m_list_pages * PAGE_SIZE; } - paddr_t end() const { return m_start + m_total_pages * PAGE_SIZE; } - uint64_t pages() const { return m_reservable_pages; } + paddr_t usable_start() const { return m_start + m_list_pages * PAGE_SIZE; } + paddr_t usable_end() const { return m_start + m_total_pages * PAGE_SIZE; } + uint64_t usable_pages() const { return m_reservable_pages; } private: - paddr_t page_address(uint64_t) const; + struct node + { + node* next; + node* prev; + }; + + paddr_t page_address(const node*) const; + node* node_address(paddr_t) const; private: paddr_t m_start { 0 }; @@ -37,8 +44,8 @@ namespace Kernel::Memory uint64_t m_reservable_pages { 0 }; uint64_t m_list_pages { 0 }; - uint64_t* m_free_list { nullptr }; - uint64_t* m_used_list { nullptr }; + node* m_free_list { nullptr }; + node* m_used_list { nullptr }; }; class Heap diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index 812615ca5..8188a9aa1 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -21,44 +21,84 @@ namespace Kernel::Memory if (auto rem = m_size % PAGE_SIZE) m_size -= rem; - // FIXME: if total pages is just over multiple of (4096/sizeof(uint64_t)) we might make + // FIXME: if total pages is just over multiple of (PAGE_SIZE / sizeof(node)) we might make // couple of pages unallocatable m_total_pages = m_size / PAGE_SIZE; - m_list_pages = BAN::Math::div_round_up(m_total_pages * sizeof(uint64_t), PAGE_SIZE); + m_list_pages = BAN::Math::div_round_up(m_total_pages * sizeof(node), PAGE_SIZE); m_reservable_pages = m_total_pages - m_list_pages; MMU::get().allocate_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::Present); - // Initialize free list with every page pointing to the next one - uint64_t* list_ptr = (uint64_t*)m_start; - for (uint64_t i = 0; i < m_reservable_pages - 1; i++) - { - *list_ptr++ = i + 1; - //dprintln("{}/{}", i, m_reservable_pages); - } - - *list_ptr = invalid; - m_free_list = (uint64_t*)m_start; + // Initialize page list so that every page points to the next one + node* page_list = (node*)m_start; + for (uint64_t i = 0; i < m_reservable_pages; i++) + page_list[i] = { page_list + i - 1, page_list + i + 1 }; + page_list[ 0 ].next = nullptr; + page_list[m_reservable_pages - 1].prev = nullptr; + m_free_list = page_list; m_used_list = nullptr; } paddr_t PhysicalRange::reserve_page() { - ASSERT_NOT_REACHED(); + if (m_free_list == nullptr) + return invalid; + + node* page = m_free_list; + ASSERT(page->next == nullptr); + + // Detatch page from top of the free list + m_free_list = m_free_list->prev ? m_free_list->prev : nullptr; + if (m_free_list) + m_free_list->next = nullptr; + + // Add page to used list + if (m_used_list) + m_used_list->next = page; + page->prev = m_used_list; + m_used_list = page; + + return page_address(page); } - void PhysicalRange::release_page(paddr_t) + void PhysicalRange::release_page(paddr_t page_address) { - ASSERT_NOT_REACHED(); + ASSERT(m_used_list); + + node* page = node_address(page_address); + + // Detach page from used list + if (page->prev) + page->prev->next = page->next; + if (page->next) + page->next->prev = page->prev; + if (m_used_list == page) + m_used_list = page->prev; + + // Add page to the top of free list + page->prev = m_free_list; + page->next = nullptr; + if (m_free_list) + m_free_list->next = page; + m_free_list = page; } - paddr_t PhysicalRange::page_address(uint64_t page_index) const + paddr_t PhysicalRange::page_address(const node* page) const { - ASSERT(page_index < m_reservable_pages); + ASSERT((paddr_t)page <= m_start + m_reservable_pages * sizeof(node)); + uint64_t page_index = page - (node*)m_start; return m_start + (page_index + m_list_pages) * PAGE_SIZE; } + PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const + { + ASSERT(page_address % PAGE_SIZE == 0); + ASSERT(m_start + m_list_pages * PAGE_SIZE <= page_address && page_address < m_start + m_size); + uint64_t page_offset = page_address - (m_start + m_list_pages * PAGE_SIZE); + return (node*)m_start + page_offset / PAGE_SIZE; + } + static Heap* s_instance = nullptr; @@ -97,7 +137,10 @@ namespace Kernel::Memory } for (auto& range : m_physical_ranges) - dprintln("RAM {8H}->{8H}, {} pages ({}.{} MB)", range.start(), range.end(), range.pages(), range.pages() * PAGE_SIZE / (1 << 20), range.pages() * PAGE_SIZE % (1 << 20) * 100 / (1 << 20)); + { + size_t bytes = range.usable_pages() * PAGE_SIZE; + dprintln("RAM {8H}->{8H}, {} pages ({}.{} MB)", range.usable_start(), range.usable_end(), range.usable_pages(), bytes / (1 << 20), bytes % (1 << 20) * 1000 / (1 << 20)); + } } }