Kernel: Allocators are now stored in UniqPtr

This allows proper memory management, we had some memory leak
This commit is contained in:
Bananymous 2023-06-04 01:25:57 +03:00
parent 51f4c0c750
commit 924fc2118c
6 changed files with 47 additions and 41 deletions

View File

@ -1,5 +1,6 @@
#pragma once #pragma once
#include <BAN/UniqPtr.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h> #include <kernel/Memory/PageTable.h>
@ -12,10 +13,10 @@ namespace Kernel
BAN_NON_MOVABLE(FixedWidthAllocator); BAN_NON_MOVABLE(FixedWidthAllocator);
public: public:
FixedWidthAllocator(PageTable&, uint32_t); static BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> create(PageTable&, uint32_t);
~FixedWidthAllocator(); ~FixedWidthAllocator();
BAN::ErrorOr<FixedWidthAllocator*> clone(PageTable&); BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> clone(PageTable&);
vaddr_t allocate(); vaddr_t allocate();
bool deallocate(vaddr_t); bool deallocate(vaddr_t);
@ -26,6 +27,7 @@ namespace Kernel
uint32_t max_allocations() const; uint32_t max_allocations() const;
private: private:
FixedWidthAllocator(PageTable&, uint32_t);
bool allocate_page_if_needed(vaddr_t, uint8_t flags); bool allocate_page_if_needed(vaddr_t, uint8_t flags);
struct node struct node

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <BAN/LinkedList.h> #include <BAN/LinkedList.h>
#include <BAN/UniqPtr.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h> #include <kernel/Memory/PageTable.h>
@ -13,13 +14,16 @@ namespace Kernel
BAN_NON_MOVABLE(GeneralAllocator); BAN_NON_MOVABLE(GeneralAllocator);
public: public:
GeneralAllocator(PageTable&); static BAN::ErrorOr<BAN::UniqPtr<GeneralAllocator>> create(PageTable&);
~GeneralAllocator(); ~GeneralAllocator();
BAN::ErrorOr<BAN::UniqPtr<GeneralAllocator>> clone(PageTable&);
vaddr_t allocate(size_t); vaddr_t allocate(size_t);
bool deallocate(vaddr_t); bool deallocate(vaddr_t);
BAN::ErrorOr<GeneralAllocator*> clone(PageTable&); private:
GeneralAllocator(PageTable&);
private: private:
struct Allocation struct Allocation

View File

@ -114,12 +114,12 @@ namespace Kernel
BAN::String m_working_directory; BAN::String m_working_directory;
BAN::Vector<Thread*> m_threads; BAN::Vector<Thread*> m_threads;
BAN::Vector<FixedWidthAllocator*> m_fixed_width_allocators; BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> m_fixed_width_allocators;
GeneralAllocator* m_general_allocator; BAN::UniqPtr<GeneralAllocator> m_general_allocator;
userspace_entry_t m_userspace_entry; userspace_entry_t m_userspace_entry;
PageTable* m_page_table { nullptr }; BAN::UniqPtr<PageTable> m_page_table;
TTY* m_tty { nullptr }; TTY* m_tty { nullptr };
}; };

View File

@ -3,6 +3,14 @@
namespace Kernel namespace Kernel
{ {
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> FixedWidthAllocator::create(PageTable& page_table, uint32_t allocation_size)
{
auto* allocator = new FixedWidthAllocator(page_table, allocation_size);
if (allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
return BAN::UniqPtr<FixedWidthAllocator>::adopt(allocator);
}
FixedWidthAllocator::FixedWidthAllocator(PageTable& page_table, uint32_t allocation_size) FixedWidthAllocator::FixedWidthAllocator(PageTable& page_table, uint32_t allocation_size)
: m_page_table(page_table) : m_page_table(page_table)
, m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size)) , m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size))
@ -222,14 +230,11 @@ namespace Kernel
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
BAN::ErrorOr<FixedWidthAllocator*> FixedWidthAllocator::clone(PageTable& new_page_table) BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> FixedWidthAllocator::clone(PageTable& new_page_table)
{ {
FixedWidthAllocator* allocator = new FixedWidthAllocator(new_page_table, allocation_size()); auto allocator = TRY(FixedWidthAllocator::create(new_page_table, allocation_size()));
if (allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
m_page_table.lock(); m_page_table.lock();
ASSERT(m_page_table.is_page_free(0)); ASSERT(m_page_table.is_page_free(0));
for (node* node = m_used_list; node; node = node->next) for (node* node = m_used_list; node; node = node->next)

View File

@ -1,8 +1,17 @@
#include <kernel/Memory/GeneralAllocator.h> #include <kernel/Memory/GeneralAllocator.h>
#include <kernel/Memory/PageTableScope.h>
namespace Kernel namespace Kernel
{ {
BAN::ErrorOr<BAN::UniqPtr<GeneralAllocator>> GeneralAllocator::create(PageTable& page_table)
{
auto* allocator = new GeneralAllocator(page_table);
if (allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
return BAN::UniqPtr<GeneralAllocator>::adopt(allocator);
}
GeneralAllocator::GeneralAllocator(PageTable& page_table) GeneralAllocator::GeneralAllocator(PageTable& page_table)
: m_page_table(page_table) : m_page_table(page_table)
{ } { }
@ -60,11 +69,9 @@ namespace Kernel
return false; return false;
} }
BAN::ErrorOr<GeneralAllocator*> GeneralAllocator::clone(PageTable& new_page_table) BAN::ErrorOr<BAN::UniqPtr<GeneralAllocator>> GeneralAllocator::clone(PageTable& new_page_table)
{ {
GeneralAllocator* allocator = new GeneralAllocator(new_page_table); auto allocator = TRY(GeneralAllocator::create(new_page_table));
if (allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
m_page_table.lock(); m_page_table.lock();

View File

@ -56,7 +56,7 @@ namespace Kernel
auto* process = create_process(); auto* process = create_process();
MUST(process->m_working_directory.push_back('/')); MUST(process->m_working_directory.push_back('/'));
process->m_page_table = MUST(PageTable::create_userspace()); process->m_page_table = BAN::UniqPtr<PageTable>::adopt(MUST(PageTable::create_userspace()));
process->load_elf(*elf); process->load_elf(*elf);
@ -91,13 +91,9 @@ namespace Kernel
{ {
ASSERT(m_threads.empty()); ASSERT(m_threads.empty());
ASSERT(m_fixed_width_allocators.empty()); ASSERT(m_fixed_width_allocators.empty());
ASSERT(m_general_allocator == nullptr); ASSERT(!m_general_allocator);
ASSERT(m_mapped_ranges.empty()); ASSERT(m_mapped_ranges.empty());
if (m_page_table) ASSERT(&PageTable::current() != m_page_table.ptr());
{
PageTable::kernel().load();
delete m_page_table;
}
dprintln("process {} exit", pid()); dprintln("process {} exit", pid());
} }
@ -133,11 +129,7 @@ namespace Kernel
// NOTE: We must clear allocators while the page table is still alive // NOTE: We must clear allocators while the page table is still alive
m_fixed_width_allocators.clear(); m_fixed_width_allocators.clear();
if (m_general_allocator) m_general_allocator.clear();
{
delete m_general_allocator;
m_general_allocator = nullptr;
}
s_process_lock.lock(); s_process_lock.lock();
for (size_t i = 0; i < s_processes.size(); i++) for (size_t i = 0; i < s_processes.size(); i++)
@ -163,7 +155,7 @@ namespace Kernel
{ {
Process* forked = create_process(); Process* forked = create_process();
forked->m_page_table = MUST(PageTable::create_userspace()); forked->m_page_table = BAN::UniqPtr<PageTable>::adopt(MUST(PageTable::create_userspace()));
LockGuard _(m_lock); LockGuard _(m_lock);
forked->m_tty = m_tty; forked->m_tty = m_tty;
@ -180,7 +172,8 @@ namespace Kernel
ASSERT(m_threads.front() == &Thread::current()); ASSERT(m_threads.front() == &Thread::current());
for (auto& allocator : m_fixed_width_allocators) for (auto& allocator : m_fixed_width_allocators)
MUST(forked->m_fixed_width_allocators.push_back(MUST(allocator->clone(forked->page_table())))); if (allocator->allocations() > 0)
MUST(forked->m_fixed_width_allocators.push_back(MUST(allocator->clone(forked->page_table()))));
if (m_general_allocator) if (m_general_allocator)
forked->m_general_allocator = MUST(m_general_allocator->clone(forked->page_table())); forked->m_general_allocator = MUST(m_general_allocator->clone(forked->page_table()));
@ -554,21 +547,20 @@ namespace Kernel
bool needs_new_allocator { true }; bool needs_new_allocator { true };
for (auto* allocator : m_fixed_width_allocators) for (auto& allocator : m_fixed_width_allocators)
{ {
if (allocator->allocation_size() == allocation_size && allocator->allocations() < allocator->max_allocations()) if (allocator->allocation_size() == allocation_size && allocator->allocations() < allocator->max_allocations())
{ {
address = allocator->allocate(); address = allocator->allocate();
needs_new_allocator = false; needs_new_allocator = false;
break;
} }
} }
if (needs_new_allocator) if (needs_new_allocator)
{ {
auto* allocator = new FixedWidthAllocator(page_table(), allocation_size); auto allocator = TRY(FixedWidthAllocator::create(page_table(), allocation_size));
if (allocator == nullptr) TRY(m_fixed_width_allocators.push_back(BAN::move(allocator)));
return BAN::Error::from_errno(ENOMEM);
TRY(m_fixed_width_allocators.push_back(allocator));
address = m_fixed_width_allocators.back()->allocate(); address = m_fixed_width_allocators.back()->allocate();
} }
} }
@ -577,11 +569,7 @@ namespace Kernel
LockGuard _(m_lock); LockGuard _(m_lock);
if (!m_general_allocator) if (!m_general_allocator)
{ m_general_allocator = TRY(GeneralAllocator::create(page_table()));
m_general_allocator = new GeneralAllocator(page_table());
if (m_general_allocator == nullptr)
return BAN::Error::from_errno(ENOMEM);
}
address = m_general_allocator->allocate(bytes); address = m_general_allocator->allocate(bytes);
} }
@ -597,7 +585,7 @@ namespace Kernel
for (size_t i = 0; i < m_fixed_width_allocators.size(); i++) for (size_t i = 0; i < m_fixed_width_allocators.size(); i++)
{ {
auto* allocator = m_fixed_width_allocators[i]; auto& allocator = m_fixed_width_allocators[i];
if (allocator->deallocate((vaddr_t)ptr)) if (allocator->deallocate((vaddr_t)ptr))
{ {
// TODO: This might be too much. Maybe we should only // TODO: This might be too much. Maybe we should only