From 8a9816d6e0d26395b88e71642f210565ad54f862 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Sun, 8 Oct 2023 02:41:05 +0300 Subject: [PATCH] Kernel: Add API for getting contiguous physcial pages This will be used to create DMA regions. --- kernel/include/kernel/Memory/Heap.h | 3 + kernel/include/kernel/Memory/PhysicalRange.h | 3 + kernel/kernel/Memory/Heap.cpp | 19 ++++++ kernel/kernel/Memory/PhysicalRange.cpp | 71 +++++++++++++++++++- 4 files changed, 95 insertions(+), 1 deletion(-) diff --git a/kernel/include/kernel/Memory/Heap.h b/kernel/include/kernel/Memory/Heap.h index 716f768e..219f7b8c 100644 --- a/kernel/include/kernel/Memory/Heap.h +++ b/kernel/include/kernel/Memory/Heap.h @@ -21,6 +21,9 @@ namespace Kernel paddr_t take_free_page(); void release_page(paddr_t); + paddr_t take_free_contiguous_pages(size_t pages); + void release_contiguous_pages(paddr_t paddr, size_t pages); + size_t used_pages() const; size_t free_pages() const; diff --git a/kernel/include/kernel/Memory/PhysicalRange.h b/kernel/include/kernel/Memory/PhysicalRange.h index 76cca37f..7875a7e0 100644 --- a/kernel/include/kernel/Memory/PhysicalRange.h +++ b/kernel/include/kernel/Memory/PhysicalRange.h @@ -29,10 +29,13 @@ namespace Kernel private: unsigned long long* ull_bitmap_ptr() { return (unsigned long long*)m_vaddr; } + const unsigned long long* ull_bitmap_ptr() const { return (const unsigned long long*)m_vaddr; } paddr_t paddr_for_bit(unsigned long long) const; unsigned long long bit_for_paddr(paddr_t paddr) const; + unsigned long long contiguous_bits_set(unsigned long long start, unsigned long long count) const; + private: const paddr_t m_paddr { 0 }; const size_t m_size { 0 }; diff --git a/kernel/kernel/Memory/Heap.cpp b/kernel/kernel/Memory/Heap.cpp index a3d49bfc..57ee9838 100644 --- a/kernel/kernel/Memory/Heap.cpp +++ b/kernel/kernel/Memory/Heap.cpp @@ -79,6 +79,25 @@ namespace Kernel ASSERT_NOT_REACHED(); } + paddr_t Heap::take_free_contiguous_pages(size_t pages) + { + LockGuard _(m_lock); + for (auto& range : m_physical_ranges) + if (range.free_pages() >= pages) + if (paddr_t paddr = range.reserve_contiguous_pages(pages)) + return paddr; + return 0; + } + + void Heap::release_contiguous_pages(paddr_t paddr, size_t pages) + { + LockGuard _(m_lock); + for (auto& range : m_physical_ranges) + if (range.contains(paddr)) + return range.release_contiguous_pages(paddr, pages); + ASSERT_NOT_REACHED(); + } + size_t Heap::used_pages() const { LockGuard _(m_lock); diff --git a/kernel/kernel/Memory/PhysicalRange.cpp b/kernel/kernel/Memory/PhysicalRange.cpp index 2e74a054..99e95b94 100644 --- a/kernel/kernel/Memory/PhysicalRange.cpp +++ b/kernel/kernel/Memory/PhysicalRange.cpp @@ -27,7 +27,7 @@ namespace Kernel memset((void*)m_vaddr, 0x00, m_bitmap_pages * PAGE_SIZE); memset((void*)m_vaddr, 0xFF, m_data_pages / 8); - for (int i = 0; i < m_data_pages % 8; i++) + for (ull i = 0; i < m_data_pages % 8; i++) ((uint8_t*)m_vaddr)[m_data_pages / 8] |= 1 << i; dprintln("physical range needs {} pages for bitmap", m_bitmap_pages); @@ -43,6 +43,18 @@ namespace Kernel return (paddr - m_paddr) / PAGE_SIZE - m_bitmap_pages; } + ull PhysicalRange::contiguous_bits_set(ull start, ull count) const + { + for (ull i = 0; i < count; i++) + { + ull off = (start + i) / ull_bits; + ull bit = (start + i) % ull_bits; + if (!(ull_bitmap_ptr()[off] & (1ull << bit))) + return i; + } + return count; + } + paddr_t PhysicalRange::reserve_page() { ASSERT(free_pages() > 0); @@ -80,4 +92,61 @@ namespace Kernel m_free_pages++; } + paddr_t PhysicalRange::reserve_contiguous_pages(size_t pages) + { + ASSERT(pages > 0); + ASSERT(free_pages() > 0); + + if (pages == 1) + return reserve_page(); + + ull ull_count = BAN::Math::div_round_up(m_data_pages, ull_bits); + + // NOTE: This feels kinda slow, but I don't want to be + // doing premature optimization. This will be only + // used when creating DMA regions. + + for (ull i = 0; i < ull_count; i++) + { + if (ull_bitmap_ptr()[i] == 0) + continue; + + for (ull bit = 0; bit < ull_bits;) + { + ull start = i * ull_bits + bit; + ull set_cnt = contiguous_bits_set(start, pages); + if (set_cnt == pages) + { + for (ull j = 0; j < pages; j++) + ull_bitmap_ptr()[(start + j) / ull_bits] &= ~(1ull << ((start + j) % ull_bits)); + m_free_pages -= pages; + return paddr_for_bit(start); + } + bit += set_cnt + 1; + } + } + + ASSERT_NOT_REACHED(); + } + + void PhysicalRange::release_contiguous_pages(paddr_t paddr, size_t pages) + { + ASSERT(paddr % PAGE_SIZE == 0); + ASSERT(paddr - m_paddr <= m_size); + ASSERT(pages > 0); + + ull start_bit = bit_for_paddr(paddr); + for (size_t i = 0; i < pages; i++) + { + ull off = (start_bit + i) / ull_bits; + ull bit = (start_bit + i) % ull_bits; + ull mask = 1ull << bit; + + ASSERT(!(ull_bitmap_ptr()[off] & mask)); + ull_bitmap_ptr()[off] |= mask; + } + + m_free_pages += pages; + } + }