forked from Bananymous/banan-os
Kernel: Rework physical memory allocation
PhysicalRange is now much simpler bitmap. This makes expanding PhysicalRange API much easier.
This commit is contained in:
parent
f071240b33
commit
03d2bf4002
|
@ -3,7 +3,6 @@
|
||||||
#include <kernel/Memory/Types.h>
|
#include <kernel/Memory/Types.h>
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
@ -12,42 +11,37 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
PhysicalRange(paddr_t, size_t);
|
PhysicalRange(paddr_t, size_t);
|
||||||
|
|
||||||
paddr_t reserve_page();
|
paddr_t reserve_page();
|
||||||
void release_page(paddr_t);
|
void release_page(paddr_t);
|
||||||
|
|
||||||
|
paddr_t reserve_contiguous_pages(size_t pages);
|
||||||
|
void release_contiguous_pages(paddr_t paddr, size_t pages);
|
||||||
|
|
||||||
paddr_t start() const { return m_paddr; }
|
paddr_t start() const { return m_paddr; }
|
||||||
paddr_t end() const { return m_paddr + m_size; }
|
paddr_t end() const { return m_paddr + m_size; }
|
||||||
bool contains(paddr_t addr) const { return m_paddr <= addr && addr < m_paddr + m_size; }
|
bool contains(paddr_t addr) const { return m_paddr <= addr && addr < m_paddr + m_size; }
|
||||||
|
|
||||||
size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; }
|
size_t usable_memory() const { return m_data_pages * PAGE_SIZE; }
|
||||||
|
|
||||||
size_t used_pages() const { return m_used_pages; }
|
size_t used_pages() const { return m_data_pages - m_free_pages; }
|
||||||
size_t free_pages() const { return m_free_pages; }
|
size_t free_pages() const { return m_free_pages; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct node
|
unsigned long long* ull_bitmap_ptr() { return (unsigned long long*)m_vaddr; }
|
||||||
{
|
|
||||||
node* next;
|
|
||||||
node* prev;
|
|
||||||
};
|
|
||||||
|
|
||||||
paddr_t page_address(const node*) const;
|
paddr_t paddr_for_bit(unsigned long long) const;
|
||||||
node* node_address(paddr_t) const;
|
unsigned long long bit_for_paddr(paddr_t paddr) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
paddr_t m_paddr { 0 };
|
const paddr_t m_paddr { 0 };
|
||||||
|
const size_t m_size { 0 };
|
||||||
|
|
||||||
vaddr_t m_vaddr { 0 };
|
vaddr_t m_vaddr { 0 };
|
||||||
size_t m_size { 0 };
|
|
||||||
|
|
||||||
uint64_t m_total_pages { 0 };
|
const size_t m_bitmap_pages { 0 };
|
||||||
uint64_t m_reservable_pages { 0 };
|
const size_t m_data_pages { 0 };
|
||||||
uint64_t m_list_pages { 0 };
|
|
||||||
|
|
||||||
size_t m_used_pages { 0 };
|
|
||||||
size_t m_free_pages { 0 };
|
size_t m_free_pages { 0 };
|
||||||
|
|
||||||
node* m_free_list { nullptr };
|
|
||||||
node* m_used_list { nullptr };
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -3,6 +3,8 @@
|
||||||
#include <kernel/Memory/PageTable.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/multiboot.h>
|
#include <kernel/multiboot.h>
|
||||||
|
|
||||||
|
extern uint8_t g_kernel_end[];
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -30,14 +32,22 @@ namespace Kernel
|
||||||
for (size_t i = 0; i < g_multiboot_info->mmap_length;)
|
for (size_t i = 0; i < g_multiboot_info->mmap_length;)
|
||||||
{
|
{
|
||||||
multiboot_memory_map_t* mmmt = (multiboot_memory_map_t*)P2V(g_multiboot_info->mmap_addr + i);
|
multiboot_memory_map_t* mmmt = (multiboot_memory_map_t*)P2V(g_multiboot_info->mmap_addr + i);
|
||||||
|
|
||||||
if (mmmt->type == 1)
|
if (mmmt->type == 1)
|
||||||
{
|
{
|
||||||
PhysicalRange range(mmmt->base_addr, mmmt->length);
|
paddr_t start = mmmt->base_addr;
|
||||||
if (range.usable_memory() > 0)
|
if (start < V2P(g_kernel_end))
|
||||||
MUST(m_physical_ranges.push_back(range));
|
start = V2P(g_kernel_end);
|
||||||
}
|
if (auto rem = start % PAGE_SIZE)
|
||||||
|
start += PAGE_SIZE - rem;
|
||||||
|
|
||||||
|
paddr_t end = mmmt->base_addr + mmmt->length;
|
||||||
|
if (auto rem = end % PAGE_SIZE)
|
||||||
|
end -= rem;
|
||||||
|
|
||||||
|
// Physical pages needs atleast 2 pages
|
||||||
|
if (end > start + PAGE_SIZE)
|
||||||
|
MUST(m_physical_ranges.emplace_back(start, end - start));
|
||||||
|
}
|
||||||
i += mmmt->size + sizeof(uint32_t);
|
i += mmmt->size + sizeof(uint32_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,22 +65,17 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
for (auto& range : m_physical_ranges)
|
for (auto& range : m_physical_ranges)
|
||||||
if (paddr_t page = range.reserve_page())
|
if (range.free_pages() >= 1)
|
||||||
return page;
|
return range.reserve_page();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Heap::release_page(paddr_t addr)
|
void Heap::release_page(paddr_t paddr)
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
for (auto& range : m_physical_ranges)
|
for (auto& range : m_physical_ranges)
|
||||||
{
|
if (range.contains(paddr))
|
||||||
if (range.contains(addr))
|
return range.release_page(paddr);
|
||||||
{
|
|
||||||
range.release_page(addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,123 +3,81 @@
|
||||||
#include <kernel/Memory/PageTable.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/Memory/PhysicalRange.h>
|
#include <kernel/Memory/PhysicalRange.h>
|
||||||
|
|
||||||
extern uint8_t g_kernel_end[];
|
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
PhysicalRange::PhysicalRange(paddr_t start, size_t size)
|
using ull = unsigned long long;
|
||||||
|
|
||||||
|
static constexpr ull ull_bits = sizeof(ull) * 8;
|
||||||
|
|
||||||
|
PhysicalRange::PhysicalRange(paddr_t paddr, size_t size)
|
||||||
|
: m_paddr(paddr)
|
||||||
|
, m_size(size)
|
||||||
|
, m_bitmap_pages(BAN::Math::div_round_up<size_t>(size / PAGE_SIZE, 8))
|
||||||
|
, m_data_pages((size / PAGE_SIZE) - m_bitmap_pages)
|
||||||
|
, m_free_pages(m_data_pages)
|
||||||
{
|
{
|
||||||
// We can't use the memory ovelapping with kernel
|
ASSERT(paddr % PAGE_SIZE == 0);
|
||||||
if (start + size <= V2P(g_kernel_end))
|
ASSERT(size % PAGE_SIZE == 0);
|
||||||
return;
|
ASSERT(m_bitmap_pages < size / PAGE_SIZE);
|
||||||
|
|
||||||
// Align start to page boundary and after the kernel memory
|
m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(m_bitmap_pages, KERNEL_OFFSET);
|
||||||
m_paddr = BAN::Math::max(start, V2P(g_kernel_end));
|
|
||||||
if (auto rem = m_paddr % PAGE_SIZE)
|
|
||||||
m_paddr += PAGE_SIZE - rem;
|
|
||||||
|
|
||||||
if (size <= m_paddr - start)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// Align size to page boundary
|
|
||||||
m_size = size - (m_paddr - start);
|
|
||||||
if (auto rem = m_size % PAGE_SIZE)
|
|
||||||
m_size -= rem;
|
|
||||||
|
|
||||||
// We need atleast 2 pages
|
|
||||||
m_total_pages = m_size / PAGE_SIZE;
|
|
||||||
if (m_total_pages <= 1)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// FIXME: if total pages is just over multiple of (PAGE_SIZE / sizeof(node)) we might make
|
|
||||||
// couple of pages unallocatable
|
|
||||||
m_list_pages = BAN::Math::div_round_up<uint64_t>(m_total_pages * sizeof(node), PAGE_SIZE);
|
|
||||||
m_reservable_pages = m_total_pages - m_list_pages;
|
|
||||||
|
|
||||||
m_used_pages = 0;
|
|
||||||
m_free_pages = m_reservable_pages;
|
|
||||||
|
|
||||||
m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(m_list_pages, KERNEL_OFFSET);
|
|
||||||
ASSERT(m_vaddr);
|
ASSERT(m_vaddr);
|
||||||
|
PageTable::kernel().map_range_at(m_paddr, m_vaddr, size, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
PageTable::kernel().map_range_at(m_paddr, m_vaddr, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
|
||||||
|
|
||||||
// Initialize page list so that every page points to the next one
|
memset((void*)m_vaddr, 0x00, m_bitmap_pages * PAGE_SIZE);
|
||||||
node* page_list = (node*)m_vaddr;
|
memset((void*)m_vaddr, 0xFF, m_data_pages / 8);
|
||||||
|
for (int i = 0; i < m_data_pages % 8; i++)
|
||||||
|
((uint8_t*)m_vaddr)[m_data_pages / 8] |= 1 << i;
|
||||||
|
|
||||||
for (uint64_t i = 0; i < m_reservable_pages; i++)
|
dprintln("physical range needs {} pages for bitmap", m_bitmap_pages);
|
||||||
page_list[i] = { page_list + i - 1, page_list + i + 1 };
|
}
|
||||||
page_list[ 0 ].next = nullptr;
|
|
||||||
page_list[m_reservable_pages - 1].prev = nullptr;
|
|
||||||
|
|
||||||
m_free_list = page_list;
|
paddr_t PhysicalRange::paddr_for_bit(ull bit) const
|
||||||
m_used_list = nullptr;
|
{
|
||||||
|
return m_paddr + (m_bitmap_pages + bit) * PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
ull PhysicalRange::bit_for_paddr(paddr_t paddr) const
|
||||||
|
{
|
||||||
|
return (paddr - m_paddr) / PAGE_SIZE - m_bitmap_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
paddr_t PhysicalRange::reserve_page()
|
paddr_t PhysicalRange::reserve_page()
|
||||||
{
|
{
|
||||||
if (m_free_list == nullptr)
|
ASSERT(free_pages() > 0);
|
||||||
return 0;
|
|
||||||
|
|
||||||
node* page = m_free_list;
|
ull ull_count = BAN::Math::div_round_up<ull>(m_data_pages, ull_bits);
|
||||||
ASSERT(page->next == nullptr);
|
|
||||||
|
|
||||||
// Detatch page from top of the free list
|
for (ull i = 0; i < ull_count; i++)
|
||||||
m_free_list = m_free_list->prev;
|
{
|
||||||
if (m_free_list)
|
if (ull_bitmap_ptr()[i] == 0)
|
||||||
m_free_list->next = nullptr;
|
continue;
|
||||||
|
|
||||||
// Add page to used list
|
int lsb = __builtin_ctzll(ull_bitmap_ptr()[i]);
|
||||||
if (m_used_list)
|
|
||||||
m_used_list->next = page;
|
|
||||||
page->prev = m_used_list;
|
|
||||||
m_used_list = page;
|
|
||||||
|
|
||||||
m_used_pages++;
|
ull_bitmap_ptr()[i] &= ~(1ull << lsb);
|
||||||
m_free_pages--;
|
m_free_pages--;
|
||||||
|
return paddr_for_bit(i * ull_bits + lsb);
|
||||||
|
}
|
||||||
|
|
||||||
return page_address(page);
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void PhysicalRange::release_page(paddr_t page_address)
|
void PhysicalRange::release_page(paddr_t paddr)
|
||||||
{
|
{
|
||||||
ASSERT(m_used_list);
|
ASSERT(paddr % PAGE_SIZE == 0);
|
||||||
|
ASSERT(paddr - m_paddr <= m_size);
|
||||||
|
|
||||||
node* page = node_address(page_address);
|
ull full_bit = bit_for_paddr(paddr);
|
||||||
|
ull off = full_bit / ull_bits;
|
||||||
// Detach page from used list
|
ull bit = full_bit % ull_bits;
|
||||||
if (page->prev)
|
ull mask = 1ull << bit;
|
||||||
page->prev->next = page->next;
|
|
||||||
if (page->next)
|
|
||||||
page->next->prev = page->prev;
|
|
||||||
if (m_used_list == page)
|
|
||||||
m_used_list = page->prev;
|
|
||||||
|
|
||||||
// Add page to the top of free list
|
ASSERT(!(ull_bitmap_ptr()[off] & mask));
|
||||||
page->prev = m_free_list;
|
ull_bitmap_ptr()[off] |= mask;
|
||||||
page->next = nullptr;
|
|
||||||
if (m_free_list)
|
|
||||||
m_free_list->next = page;
|
|
||||||
m_free_list = page;
|
|
||||||
|
|
||||||
m_used_pages--;
|
|
||||||
m_free_pages++;
|
m_free_pages++;
|
||||||
}
|
|
||||||
|
|
||||||
paddr_t PhysicalRange::page_address(const node* page) const
|
|
||||||
{
|
|
||||||
ASSERT((vaddr_t)page <= m_vaddr + m_reservable_pages * sizeof(node));
|
|
||||||
uint64_t page_index = page - (node*)m_vaddr;
|
|
||||||
return m_paddr + (page_index + m_list_pages) * PAGE_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const
|
|
||||||
{
|
|
||||||
ASSERT(page_address % PAGE_SIZE == 0);
|
|
||||||
ASSERT(m_paddr + m_list_pages * PAGE_SIZE <= page_address && page_address < m_paddr + m_size);
|
|
||||||
uint64_t page_offset = page_address - (m_paddr + m_list_pages * PAGE_SIZE);
|
|
||||||
return (node*)m_vaddr + page_offset / PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue