Kernel: Rewrite physical memory allocation with PageTable::fast_pages

This commit is contained in:
Bananymous 2024-10-14 11:28:15 +03:00
parent 6a46a25f48
commit 55fbd09e45
3 changed files with 101 additions and 119 deletions

View File

@ -19,31 +19,17 @@ namespace Kernel
void release_contiguous_pages(paddr_t paddr, size_t pages);
paddr_t start() const { return m_paddr; }
paddr_t end() const { return m_paddr + m_size; }
bool contains(paddr_t addr) const { return m_paddr <= addr && addr < m_paddr + m_size; }
paddr_t end() const { return m_paddr + m_page_count * PAGE_SIZE; }
bool contains(paddr_t addr) const { return start() <= addr && addr < end(); }
size_t usable_memory() const { return m_data_pages * PAGE_SIZE; }
size_t usable_memory() const { return m_page_count * PAGE_SIZE; }
size_t used_pages() const { return m_data_pages - m_free_pages; }
size_t used_pages() const { return m_page_count - m_free_pages; }
size_t free_pages() const { return m_free_pages; }
private:
unsigned long long* ull_bitmap_ptr() { return (unsigned long long*)m_vaddr; }
const unsigned long long* ull_bitmap_ptr() const { return (const unsigned long long*)m_vaddr; }
paddr_t paddr_for_bit(unsigned long long) const;
unsigned long long bit_for_paddr(paddr_t paddr) const;
unsigned long long contiguous_bits_set(unsigned long long start, unsigned long long count) const;
private:
const paddr_t m_paddr { 0 };
const size_t m_size { 0 };
vaddr_t m_vaddr { 0 };
const size_t m_bitmap_pages { 0 };
const size_t m_data_pages { 0 };
const size_t m_page_count { 0 };
size_t m_free_pages { 0 };
};

View File

@ -191,15 +191,17 @@ namespace Kernel
paddr_t page_containing = find_indirect(m_data_pages, index_of_page, 2);
paddr_t paddr_to_free = 0;
PageTable::with_fast_page(page_containing, [&] {
auto& page_info = PageTable::fast_page_as_sized<PageInfo>(index_in_page);
ASSERT(page_info.flags() & PageInfo::Flags::Present);
Heap::get().release_page(page_info.paddr());
paddr_to_free = page_info.paddr();
m_used_pages--;
page_info.set_paddr(0);
page_info.set_flags(0);
});
Heap::get().release_page(paddr_to_free);
}
BAN::ErrorOr<size_t> TmpFileSystem::allocate_block()

View File

@ -1,81 +1,67 @@
#include <BAN/Assert.h>
#include <BAN/Math.h>
#include <BAN/Optional.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Memory/PhysicalRange.h>
namespace Kernel
{
using ull = unsigned long long;
static constexpr ull ull_bits = sizeof(ull) * 8;
static constexpr size_t bits_per_page = PAGE_SIZE * 8;
PhysicalRange::PhysicalRange(paddr_t paddr, size_t size)
: m_paddr(paddr)
, m_size(size)
, m_bitmap_pages(BAN::Math::div_round_up<size_t>(size / PAGE_SIZE, PAGE_SIZE * 8))
, m_data_pages((size / PAGE_SIZE) - m_bitmap_pages)
, m_free_pages(m_data_pages)
, m_page_count(size / PAGE_SIZE)
, m_free_pages(m_page_count)
{
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(size % PAGE_SIZE == 0);
ASSERT(m_bitmap_pages < size / PAGE_SIZE);
m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(m_bitmap_pages, KERNEL_OFFSET);
ASSERT(m_vaddr);
PageTable::kernel().map_range_at(m_paddr, m_vaddr, m_bitmap_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memset((void*)m_vaddr, 0x00, m_bitmap_pages * PAGE_SIZE);
for (ull i = 0; i < m_data_pages / ull_bits; i++)
ull_bitmap_ptr()[i] = ~0ull;
if (m_data_pages % ull_bits)
const size_t bitmap_page_count = BAN::Math::div_round_up<size_t>(m_page_count, bits_per_page);
for (size_t i = 0; i < bitmap_page_count; i++)
{
ull off = m_data_pages / ull_bits;
ull bits = m_data_pages % ull_bits;
ull_bitmap_ptr()[off] = ~(~0ull << bits);
PageTable::with_fast_page(paddr + i * PAGE_SIZE, [] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
}
}
paddr_t PhysicalRange::paddr_for_bit(ull bit) const
{
return m_paddr + (m_bitmap_pages + bit) * PAGE_SIZE;
}
ull PhysicalRange::bit_for_paddr(paddr_t paddr) const
{
return (paddr - m_paddr) / PAGE_SIZE - m_bitmap_pages;
}
ull PhysicalRange::contiguous_bits_set(ull start, ull count) const
{
for (ull i = 0; i < count; i++)
{
ull off = (start + i) / ull_bits;
ull bit = (start + i) % ull_bits;
if (!(ull_bitmap_ptr()[off] & (1ull << bit)))
return i;
}
return count;
ASSERT(reserve_contiguous_pages(bitmap_page_count) == m_paddr);
}
paddr_t PhysicalRange::reserve_page()
{
ASSERT(free_pages() > 0);
ull ull_count = BAN::Math::div_round_up<ull>(m_data_pages, ull_bits);
const size_t bitmap_page_count = BAN::Math::div_round_up<size_t>(m_page_count, bits_per_page);
for (ull i = 0; i < ull_count; i++)
for (size_t i = 0; i < bitmap_page_count; i++)
{
if (ull_bitmap_ptr()[i] == 0)
continue;
BAN::Optional<size_t> page_matched_bit;
int lsb = __builtin_ctzll(ull_bitmap_ptr()[i]);
const paddr_t current_paddr = m_paddr + i * PAGE_SIZE;
PageTable::with_fast_page(current_paddr, [&page_matched_bit] {
for (size_t j = 0; j < PAGE_SIZE / sizeof(size_t); j++)
{
static_assert(sizeof(size_t) == sizeof(long));
const size_t current = PageTable::fast_page_as_sized<volatile size_t>(j);
if (current == BAN::numeric_limits<size_t>::max())
continue;
const int ctz = __builtin_ctzl(~current);
PageTable::fast_page_as_sized<volatile size_t>(j) = current | (static_cast<size_t>(1) << ctz);
page_matched_bit = j * sizeof(size_t) * 8 + ctz;
return;
}
});
ull_bitmap_ptr()[i] &= ~(1ull << lsb);
m_free_pages--;
return paddr_for_bit(i * ull_bits + lsb);
if (page_matched_bit.has_value())
{
m_free_pages--;
const size_t matched_bit = (i * bits_per_page) + page_matched_bit.value();
ASSERT(matched_bit < m_page_count);
return m_paddr + matched_bit * PAGE_SIZE;
}
}
ASSERT_NOT_REACHED();
@ -84,15 +70,21 @@ namespace Kernel
void PhysicalRange::release_page(paddr_t paddr)
{
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(paddr - m_paddr <= m_size);
ASSERT(paddr >= m_paddr);
ASSERT(paddr < m_paddr + m_page_count * PAGE_SIZE);
ull full_bit = bit_for_paddr(paddr);
ull off = full_bit / ull_bits;
ull bit = full_bit % ull_bits;
ull mask = 1ull << bit;
const size_t paddr_index = (paddr - m_paddr) / PAGE_SIZE;
ASSERT(!(ull_bitmap_ptr()[off] & mask));
ull_bitmap_ptr()[off] |= mask;
PageTable::with_fast_page(m_paddr + paddr_index / bits_per_page * PAGE_SIZE, [paddr_index] {
const size_t bitmap_bit = paddr_index % bits_per_page;
const size_t byte = bitmap_bit / 8;
const size_t bit = bitmap_bit % 8;
volatile uint8_t& bitmap_byte = PageTable::fast_page_as_sized<volatile uint8_t>(byte);
ASSERT(bitmap_byte & (1u << bit));
bitmap_byte = bitmap_byte & ~(1u << bit);
});
m_free_pages++;
}
@ -100,58 +92,60 @@ namespace Kernel
paddr_t PhysicalRange::reserve_contiguous_pages(size_t pages)
{
ASSERT(pages > 0);
ASSERT(free_pages() > 0);
ASSERT(pages <= free_pages());
if (pages == 1)
return reserve_page();
ull ull_count = BAN::Math::div_round_up<ull>(m_data_pages, ull_bits);
// NOTE: This feels kinda slow, but I don't want to be
// doing premature optimization. This will be only
// used when creating DMA regions.
for (ull i = 0; i < ull_count; i++)
{
if (ull_bitmap_ptr()[i] == 0)
continue;
for (ull bit = 0; bit < ull_bits;)
const auto bitmap_is_set =
[this](size_t buffer_bit) -> bool
{
ull start = i * ull_bits + bit;
ull set_cnt = contiguous_bits_set(start, pages);
if (set_cnt == pages)
{
for (ull j = 0; j < pages; j++)
ull_bitmap_ptr()[(start + j) / ull_bits] &= ~(1ull << ((start + j) % ull_bits));
m_free_pages -= pages;
return paddr_for_bit(start);
}
bit += set_cnt + 1;
}
const size_t page_index = buffer_bit / bits_per_page;
const size_t byte = buffer_bit / 8;
const size_t bit = buffer_bit % 8;
uint8_t current;
PageTable::with_fast_page(m_paddr + page_index * PAGE_SIZE, [&current, byte] {
current = PageTable::fast_page_as_sized<volatile uint8_t>(byte);
});
return current & (1u << bit);
};
const auto bitmap_set_bit =
[this](size_t buffer_bit) -> void
{
const size_t page_index = buffer_bit / bits_per_page;
const size_t byte = buffer_bit / 8;
const size_t bit = buffer_bit % 8;
PageTable::with_fast_page(m_paddr + page_index * PAGE_SIZE, [byte, bit] {
volatile uint8_t& current = PageTable::fast_page_as_sized<volatile uint8_t>(byte);
current = current | (1u << bit);
});
};
// FIXME: optimize this :)
for (size_t i = 0; i <= m_page_count - pages; i++)
{
bool all_unset = true;
for (size_t j = 0; j < pages && all_unset; j++)
if (bitmap_is_set(i + j))
all_unset = false;
if (!all_unset)
continue;
for (size_t j = 0; j < pages; j++)
bitmap_set_bit(i + j);
m_free_pages -= pages;
return m_paddr + i * PAGE_SIZE;
}
ASSERT_NOT_REACHED();
return 0;
}
void PhysicalRange::release_contiguous_pages(paddr_t paddr, size_t pages)
{
ASSERT(paddr % PAGE_SIZE == 0);
ASSERT(paddr - m_paddr <= m_size);
ASSERT(pages > 0);
ull start_bit = bit_for_paddr(paddr);
// FIXME: optimize this :)
for (size_t i = 0; i < pages; i++)
{
ull off = (start_bit + i) / ull_bits;
ull bit = (start_bit + i) % ull_bits;
ull mask = 1ull << bit;
ASSERT(!(ull_bitmap_ptr()[off] & mask));
ull_bitmap_ptr()[off] |= mask;
}
m_free_pages += pages;
release_page(paddr + i * PAGE_SIZE);
}
}