Kernel: Add barebones GeneralAllocator for >4096B

This commit is contained in:
Bananymous
2023-05-08 22:10:49 +03:00
parent f1667b398a
commit 512be884ed
8 changed files with 169 additions and 13 deletions

View File

@@ -0,0 +1,64 @@
#include <kernel/Memory/GeneralAllocator.h>
#include <kernel/Process.h>
namespace Kernel
{
GeneralAllocator::GeneralAllocator(MMU& mmu)
: m_mmu(mmu)
{ }
GeneralAllocator::~GeneralAllocator()
{
while (!m_allocations.empty())
deallocate(m_allocations.front().address);
}
vaddr_t GeneralAllocator::allocate(size_t bytes)
{
size_t needed_pages = BAN::Math::div_round_up<size_t>(bytes, PAGE_SIZE);
Allocation allocation;
if (allocation.pages.resize(needed_pages, 0).is_error())
return 0;
for (size_t i = 0; i < needed_pages; i++)
{
paddr_t paddr = Heap::get().take_free_page();
if (paddr == 0)
{
for (size_t j = 0; j < i; j++)
Heap::get().release_page(allocation.pages[j]);
return 0;
}
allocation.pages[i] = paddr;
}
allocation.address = m_mmu.get_free_contiguous_pages(needed_pages);
for (size_t i = 0; i < needed_pages; i++)
m_mmu.map_page_at(allocation.pages[i], allocation.address + i * PAGE_SIZE, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
MUST(m_allocations.push_back(BAN::move(allocation)));
return allocation.address;
}
bool GeneralAllocator::deallocate(vaddr_t address)
{
for (auto it = m_allocations.begin(); it != m_allocations.end(); it++)
{
if (it->address != address)
continue;
m_mmu.unmap_range(it->address, it->pages.size() * PAGE_SIZE);
for (auto paddr : it->pages)
Heap::get().release_page(paddr);
m_allocations.remove(it);
return true;
}
return false;
}
}

View File

@@ -118,6 +118,7 @@ namespace Kernel
{
ASSERT(m_threads.empty());
ASSERT(m_fixed_width_allocators.empty());
ASSERT(m_general_allocator == nullptr);
if (m_mmu)
{
MMU::get().load();
@@ -152,6 +153,11 @@ namespace Kernel
// NOTE: We must clear allocators while the mmu is still alive
m_fixed_width_allocators.clear();
if (m_general_allocator)
{
delete m_general_allocator;
m_general_allocator = nullptr;
}
dprintln("process {} exit", pid());
s_process_lock.lock();
@@ -385,6 +391,8 @@ namespace Kernel
BAN::ErrorOr<void*> Process::allocate(size_t bytes)
{
vaddr_t address = 0;
if (bytes <= PAGE_SIZE)
{
// Do fixed width allocation
@@ -393,18 +401,40 @@ namespace Kernel
LockGuard _(m_lock);
for (auto& allocator : m_fixed_width_allocators)
if (allocator.allocation_size() == allocation_size && allocator.allocations() < allocator.max_allocations())
return (void*)allocator.allocate();
bool needs_new_allocator { true };
MUST(m_fixed_width_allocators.emplace_back(mmu(), allocation_size));
return (void*)m_fixed_width_allocators.back().allocate();
for (auto& allocator : m_fixed_width_allocators)
{
if (allocator.allocation_size() == allocation_size && allocator.allocations() < allocator.max_allocations())
{
address = allocator.allocate();
needs_new_allocator = false;
}
}
if (needs_new_allocator)
{
TRY(m_fixed_width_allocators.emplace_back(mmu(), allocation_size));
address = m_fixed_width_allocators.back().allocate();
}
}
else
{
// TODO: Do general allocation
return BAN::Error::from_errno(ENOMEM);
LockGuard _(m_lock);
if (!m_general_allocator)
{
m_general_allocator = new GeneralAllocator(mmu());
if (m_general_allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
}
address = m_general_allocator->allocate(bytes);
}
if (address == 0)
return BAN::Error::from_errno(ENOMEM);
return (void*)address;
}
void Process::free(void* ptr)
@@ -422,6 +452,10 @@ namespace Kernel
return;
}
}
if (m_general_allocator && m_general_allocator->deallocate((vaddr_t)ptr))
return;
dwarnln("free called on pointer that was not allocated");
}