From 0fae2c730966b20f360c28bc5ddfb5394de65848 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Sun, 8 Oct 2023 01:38:51 +0300 Subject: [PATCH] Kernel: Rework physical memory allocation PhysicalRange is now much simpler bitmap. This makes expanding PhysicalRange API much easier. --- kernel/include/kernel/Memory/PhysicalRange.h | 34 ++--- kernel/kernel/Memory/Heap.cpp | 37 ++--- kernel/kernel/Memory/PhysicalRange.cpp | 142 +++++++------------ 3 files changed, 85 insertions(+), 128 deletions(-) diff --git a/kernel/include/kernel/Memory/PhysicalRange.h b/kernel/include/kernel/Memory/PhysicalRange.h index 212ce37f4f..76cca37f84 100644 --- a/kernel/include/kernel/Memory/PhysicalRange.h +++ b/kernel/include/kernel/Memory/PhysicalRange.h @@ -3,7 +3,6 @@ #include #include -#include namespace Kernel { @@ -12,42 +11,37 @@ namespace Kernel { public: PhysicalRange(paddr_t, size_t); + paddr_t reserve_page(); void release_page(paddr_t); + paddr_t reserve_contiguous_pages(size_t pages); + void release_contiguous_pages(paddr_t paddr, size_t pages); + paddr_t start() const { return m_paddr; } paddr_t end() const { return m_paddr + m_size; } bool contains(paddr_t addr) const { return m_paddr <= addr && addr < m_paddr + m_size; } - size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; } + size_t usable_memory() const { return m_data_pages * PAGE_SIZE; } - size_t used_pages() const { return m_used_pages; } + size_t used_pages() const { return m_data_pages - m_free_pages; } size_t free_pages() const { return m_free_pages; } private: - struct node - { - node* next; - node* prev; - }; + unsigned long long* ull_bitmap_ptr() { return (unsigned long long*)m_vaddr; } - paddr_t page_address(const node*) const; - node* node_address(paddr_t) const; + paddr_t paddr_for_bit(unsigned long long) const; + unsigned long long bit_for_paddr(paddr_t paddr) const; private: - paddr_t m_paddr { 0 }; + const paddr_t m_paddr { 0 }; + const size_t m_size { 0 }; + vaddr_t m_vaddr { 0 }; - size_t m_size { 0 }; - uint64_t m_total_pages { 0 }; - uint64_t m_reservable_pages { 0 }; - uint64_t m_list_pages { 0 }; - - size_t m_used_pages { 0 }; + const size_t m_bitmap_pages { 0 }; + const size_t m_data_pages { 0 }; size_t m_free_pages { 0 }; - - node* m_free_list { nullptr }; - node* m_used_list { nullptr }; }; } \ No newline at end of file diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index 5e1499f7f6..a3d49bfccb 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -3,6 +3,8 @@ #include #include +extern uint8_t g_kernel_end[]; + namespace Kernel { @@ -30,14 +32,22 @@ namespace Kernel for (size_t i = 0; i < g_multiboot_info->mmap_length;) { multiboot_memory_map_t* mmmt = (multiboot_memory_map_t*)P2V(g_multiboot_info->mmap_addr + i); - if (mmmt->type == 1) - { - PhysicalRange range(mmmt->base_addr, mmmt->length); - if (range.usable_memory() > 0) - MUST(m_physical_ranges.push_back(range)); - } + { + paddr_t start = mmmt->base_addr; + if (start < V2P(g_kernel_end)) + start = V2P(g_kernel_end); + if (auto rem = start % PAGE_SIZE) + start += PAGE_SIZE - rem; + paddr_t end = mmmt->base_addr + mmmt->length; + if (auto rem = end % PAGE_SIZE) + end -= rem; + + // Physical pages needs atleast 2 pages + if (end > start + PAGE_SIZE) + MUST(m_physical_ranges.emplace_back(start, end - start)); + } i += mmmt->size + sizeof(uint32_t); } @@ -55,22 +65,17 @@ namespace Kernel { LockGuard _(m_lock); for (auto& range : m_physical_ranges) - if (paddr_t page = range.reserve_page()) - return page; + if (range.free_pages() >= 1) + return range.reserve_page(); return 0; } - void Heap::release_page(paddr_t addr) + void Heap::release_page(paddr_t paddr) { LockGuard _(m_lock); for (auto& range : m_physical_ranges) - { - if (range.contains(addr)) - { - range.release_page(addr); - return; - } - } + if (range.contains(paddr)) + return range.release_page(paddr); ASSERT_NOT_REACHED(); } diff --git a/kernel/kernel/Memory/PhysicalRange.cpp b/kernel/kernel/Memory/PhysicalRange.cpp index e0f9ff76c9..2e74a05463 100644 --- a/kernel/kernel/Memory/PhysicalRange.cpp +++ b/kernel/kernel/Memory/PhysicalRange.cpp @@ -3,123 +3,81 @@ #include #include -extern uint8_t g_kernel_end[]; - namespace Kernel { - PhysicalRange::PhysicalRange(paddr_t start, size_t size) + using ull = unsigned long long; + + static constexpr ull ull_bits = sizeof(ull) * 8; + + PhysicalRange::PhysicalRange(paddr_t paddr, size_t size) + : m_paddr(paddr) + , m_size(size) + , m_bitmap_pages(BAN::Math::div_round_up(size / PAGE_SIZE, 8)) + , m_data_pages((size / PAGE_SIZE) - m_bitmap_pages) + , m_free_pages(m_data_pages) { - // We can't use the memory ovelapping with kernel - if (start + size <= V2P(g_kernel_end)) - return; + ASSERT(paddr % PAGE_SIZE == 0); + ASSERT(size % PAGE_SIZE == 0); + ASSERT(m_bitmap_pages < size / PAGE_SIZE); - // Align start to page boundary and after the kernel memory - m_paddr = BAN::Math::max(start, V2P(g_kernel_end)); - if (auto rem = m_paddr % PAGE_SIZE) - m_paddr += PAGE_SIZE - rem; - - if (size <= m_paddr - start) - return; - - // Align size to page boundary - m_size = size - (m_paddr - start); - if (auto rem = m_size % PAGE_SIZE) - m_size -= rem; - - // We need atleast 2 pages - m_total_pages = m_size / PAGE_SIZE; - if (m_total_pages <= 1) - return; - - // FIXME: if total pages is just over multiple of (PAGE_SIZE / sizeof(node)) we might make - // couple of pages unallocatable - m_list_pages = BAN::Math::div_round_up(m_total_pages * sizeof(node), PAGE_SIZE); - m_reservable_pages = m_total_pages - m_list_pages; - - m_used_pages = 0; - m_free_pages = m_reservable_pages; - - m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(m_list_pages, KERNEL_OFFSET); + m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(m_bitmap_pages, KERNEL_OFFSET); ASSERT(m_vaddr); - - PageTable::kernel().map_range_at(m_paddr, m_vaddr, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present); + PageTable::kernel().map_range_at(m_paddr, m_vaddr, size, PageTable::Flags::ReadWrite | PageTable::Flags::Present); - // Initialize page list so that every page points to the next one - node* page_list = (node*)m_vaddr; + memset((void*)m_vaddr, 0x00, m_bitmap_pages * PAGE_SIZE); + memset((void*)m_vaddr, 0xFF, m_data_pages / 8); + for (int i = 0; i < m_data_pages % 8; i++) + ((uint8_t*)m_vaddr)[m_data_pages / 8] |= 1 << i; - for (uint64_t i = 0; i < m_reservable_pages; i++) - page_list[i] = { page_list + i - 1, page_list + i + 1 }; - page_list[ 0 ].next = nullptr; - page_list[m_reservable_pages - 1].prev = nullptr; + dprintln("physical range needs {} pages for bitmap", m_bitmap_pages); + } - m_free_list = page_list; - m_used_list = nullptr; + paddr_t PhysicalRange::paddr_for_bit(ull bit) const + { + return m_paddr + (m_bitmap_pages + bit) * PAGE_SIZE; + } + + ull PhysicalRange::bit_for_paddr(paddr_t paddr) const + { + return (paddr - m_paddr) / PAGE_SIZE - m_bitmap_pages; } paddr_t PhysicalRange::reserve_page() { - if (m_free_list == nullptr) - return 0; + ASSERT(free_pages() > 0); - node* page = m_free_list; - ASSERT(page->next == nullptr); + ull ull_count = BAN::Math::div_round_up(m_data_pages, ull_bits); - // Detatch page from top of the free list - m_free_list = m_free_list->prev; - if (m_free_list) - m_free_list->next = nullptr; + for (ull i = 0; i < ull_count; i++) + { + if (ull_bitmap_ptr()[i] == 0) + continue; - // Add page to used list - if (m_used_list) - m_used_list->next = page; - page->prev = m_used_list; - m_used_list = page; + int lsb = __builtin_ctzll(ull_bitmap_ptr()[i]); - m_used_pages++; - m_free_pages--; + ull_bitmap_ptr()[i] &= ~(1ull << lsb); + m_free_pages--; + return paddr_for_bit(i * ull_bits + lsb); + } - return page_address(page); + ASSERT_NOT_REACHED(); } - void PhysicalRange::release_page(paddr_t page_address) + void PhysicalRange::release_page(paddr_t paddr) { - ASSERT(m_used_list); + ASSERT(paddr % PAGE_SIZE == 0); + ASSERT(paddr - m_paddr <= m_size); - node* page = node_address(page_address); - - // Detach page from used list - if (page->prev) - page->prev->next = page->next; - if (page->next) - page->next->prev = page->prev; - if (m_used_list == page) - m_used_list = page->prev; + ull full_bit = bit_for_paddr(paddr); + ull off = full_bit / ull_bits; + ull bit = full_bit % ull_bits; + ull mask = 1ull << bit; - // Add page to the top of free list - page->prev = m_free_list; - page->next = nullptr; - if (m_free_list) - m_free_list->next = page; - m_free_list = page; + ASSERT(!(ull_bitmap_ptr()[off] & mask)); + ull_bitmap_ptr()[off] |= mask; - m_used_pages--; m_free_pages++; - } - - paddr_t PhysicalRange::page_address(const node* page) const - { - ASSERT((vaddr_t)page <= m_vaddr + m_reservable_pages * sizeof(node)); - uint64_t page_index = page - (node*)m_vaddr; - return m_paddr + (page_index + m_list_pages) * PAGE_SIZE; } - PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const - { - ASSERT(page_address % PAGE_SIZE == 0); - ASSERT(m_paddr + m_list_pages * PAGE_SIZE <= page_address && page_address < m_paddr + m_size); - uint64_t page_offset = page_address - (m_paddr + m_list_pages * PAGE_SIZE); - return (node*)m_vaddr + page_offset / PAGE_SIZE; - } - }