Kernel: Fully remove sys_alloc and sys_free

I could delete the whole FixedWidthAllocator as it was now obsolete.
GeneralAllocator is still used by kmalloc. Kmalloc cannot actually
use it since, GeneralAllocator depends on SpinLock and kmalloc runs
without interrupts.
This commit is contained in:
Bananymous 2023-09-23 03:53:30 +03:00
parent fc953df281
commit c0a89e8951
5 changed files with 59 additions and 491 deletions

View File

@ -32,7 +32,6 @@ set(KERNEL_SOURCES
kernel/Input/PS2Keymap.cpp
kernel/InterruptController.cpp
kernel/kernel.cpp
kernel/Memory/FixedWidthAllocator.cpp
kernel/Memory/GeneralAllocator.cpp
kernel/Memory/Heap.cpp
kernel/Memory/kmalloc.cpp

View File

@ -1,64 +0,0 @@
#pragma once
#include <BAN/Errors.h>
#include <BAN/UniqPtr.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
namespace Kernel
{
class FixedWidthAllocator
{
BAN_NON_COPYABLE(FixedWidthAllocator);
BAN_NON_MOVABLE(FixedWidthAllocator);
public:
static BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> create(PageTable&, uint32_t);
~FixedWidthAllocator();
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> clone(PageTable&);
vaddr_t allocate();
bool deallocate(vaddr_t);
uint32_t allocation_size() const { return m_allocation_size; }
uint32_t allocations() const { return m_allocations; }
uint32_t max_allocations() const;
private:
FixedWidthAllocator(PageTable&, uint32_t);
BAN::ErrorOr<void> initialize();
bool allocate_page_if_needed(vaddr_t, uint8_t flags);
struct node
{
node* prev { nullptr };
node* next { nullptr };
bool allocated { false };
};
vaddr_t address_of_node(const node*) const;
node* node_from_address(vaddr_t) const;
void allocate_page_for_node_if_needed(const node*);
void allocate_node(node*);
void deallocate_node(node*);
private:
static constexpr uint32_t m_min_allocation_size = 16;
PageTable& m_page_table;
const uint32_t m_allocation_size;
vaddr_t m_nodes_page { 0 };
vaddr_t m_allocated_pages { 0 };
node* m_free_list { nullptr };
node* m_used_list { nullptr };
uint32_t m_allocations { 0 };
};
}

View File

@ -6,8 +6,6 @@
#include <BAN/Vector.h>
#include <kernel/Credentials.h>
#include <kernel/FS/Inode.h>
#include <kernel/Memory/FixedWidthAllocator.h>
#include <kernel/Memory/GeneralAllocator.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
#include <kernel/OpenFileDescriptorSet.h>
@ -117,9 +115,6 @@ namespace Kernel
BAN::ErrorOr<long> sys_mmap(const sys_mmap_t&);
BAN::ErrorOr<long> sys_munmap(void* addr, size_t len);
BAN::ErrorOr<long> sys_alloc(size_t);
BAN::ErrorOr<long> sys_free(void*);
BAN::ErrorOr<long> sys_signal(int, void (*)(int));
BAN::ErrorOr<long> sys_raise(int signal);
static BAN::ErrorOr<long> sys_kill(pid_t pid, int signal);
@ -181,9 +176,6 @@ namespace Kernel
BAN::Vector<BAN::UniqPtr<VirtualRange>> m_private_anonymous_mappings;
BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> m_fixed_width_allocators;
BAN::UniqPtr<GeneralAllocator> m_general_allocator;
vaddr_t m_signal_handlers[_SIGMAX + 1] { };
uint64_t m_signal_pending_mask { 0 };

View File

@ -1,288 +0,0 @@
#include <kernel/Memory/FixedWidthAllocator.h>
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> FixedWidthAllocator::create(PageTable& page_table, uint32_t allocation_size)
{
auto* allocator_ptr = new FixedWidthAllocator(page_table, allocation_size);
if (allocator_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
auto allocator = BAN::UniqPtr<FixedWidthAllocator>::adopt(allocator_ptr);
TRY(allocator->initialize());
return allocator;
}
FixedWidthAllocator::FixedWidthAllocator(PageTable& page_table, uint32_t allocation_size)
: m_page_table(page_table)
, m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size))
{
ASSERT(BAN::Math::is_power_of_two(allocation_size));
}
BAN::ErrorOr<void> FixedWidthAllocator::initialize()
{
m_nodes_page = (vaddr_t)kmalloc(PAGE_SIZE);
if (!m_nodes_page)
return BAN::Error::from_errno(ENOMEM);
m_allocated_pages = (vaddr_t)kmalloc(PAGE_SIZE);
if (!m_allocated_pages)
{
kfree((void*)m_nodes_page);
m_nodes_page = 0;
return BAN::Error::from_errno(ENOMEM);
}
memset((void*)m_nodes_page, 0, PAGE_SIZE);
memset((void*)m_allocated_pages, 0, PAGE_SIZE);
node* node_table = (node*)m_nodes_page;
for (uint32_t i = 0; i < PAGE_SIZE / sizeof(node); i++)
{
node_table[i].next = &node_table[i + 1];
node_table[i].prev = &node_table[i - 1];
}
node_table[0].prev = nullptr;
node_table[PAGE_SIZE / sizeof(node) - 1].next = nullptr;
m_free_list = node_table;
m_used_list = nullptr;
return {};
}
FixedWidthAllocator::~FixedWidthAllocator()
{
if (m_nodes_page && m_allocated_pages)
{
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t page_vaddr = ((vaddr_t*)m_allocated_pages)[page_index];
if (page_vaddr == 0)
continue;
ASSERT(!m_page_table.is_page_free(page_vaddr));
Heap::get().release_page(m_page_table.physical_address_of(page_vaddr));
m_page_table.unmap_page(page_vaddr);
}
}
if (m_nodes_page)
kfree((void*)m_nodes_page);
if (m_allocated_pages)
kfree((void*)m_allocated_pages);
}
paddr_t FixedWidthAllocator::allocate()
{
if (m_free_list == nullptr)
return 0;
node* node = m_free_list;
allocate_node(node);
allocate_page_for_node_if_needed(node);
return address_of_node(node);
}
bool FixedWidthAllocator::deallocate(vaddr_t address)
{
if (address % m_allocation_size)
return false;
if (m_allocations == 0)
return false;
node* node = node_from_address(address);
if (node == nullptr)
return false;
if (!node->allocated)
{
dwarnln("deallocate called on unallocated address");
return true;
}
deallocate_node(node);
return true;
}
void FixedWidthAllocator::allocate_node(node* node)
{
ASSERT(!node->allocated);
node->allocated = true;
if (node == m_free_list)
m_free_list = node->next;
if (node->prev)
node->prev->next = node->next;
if (node->next)
node->next->prev = node->prev;
node->next = m_used_list;
node->prev = nullptr;
if (m_used_list)
m_used_list->prev = node;
m_used_list = node;
m_allocations++;
}
void FixedWidthAllocator::deallocate_node(node* node)
{
ASSERT(node->allocated);
node->allocated = false;
if (node == m_used_list)
m_used_list = node->next;
if (node->prev)
node->prev->next = node->next;
if (node->next)
node->next->prev = node->prev;
node->next = m_free_list;
node->prev = nullptr;
if (m_free_list)
m_free_list->prev = node;
m_free_list = node;
m_allocations--;
}
uint32_t FixedWidthAllocator::max_allocations() const
{
return PAGE_SIZE / sizeof(node);
}
vaddr_t FixedWidthAllocator::address_of_node(const node* node) const
{
uint32_t index = node - (struct node*)m_nodes_page;
uint32_t page_index = index / (PAGE_SIZE / m_allocation_size);
ASSERT(page_index < PAGE_SIZE / sizeof(vaddr_t));
uint32_t offset = index % (PAGE_SIZE / m_allocation_size);
vaddr_t page_begin = ((vaddr_t*)m_allocated_pages)[page_index];
ASSERT(page_begin);
return page_begin + offset * m_allocation_size;
}
FixedWidthAllocator::node* FixedWidthAllocator::node_from_address(vaddr_t address) const
{
// TODO: This probably should be optimized from O(n) preferably to O(1) but I
// don't want to think about performance now.
ASSERT(address % m_allocation_size == 0);
vaddr_t page_begin = address / PAGE_SIZE * PAGE_SIZE;
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t vaddr = ((vaddr_t*)m_allocated_pages)[page_index];
if (vaddr != page_begin)
continue;
uint32_t offset = (address - page_begin) / m_allocation_size;
node* result = (node*)m_nodes_page;
result += page_index * PAGE_SIZE / m_allocation_size;
result += offset;
ASSERT(address_of_node(result) == address);
return result;
}
return nullptr;
}
void FixedWidthAllocator::allocate_page_for_node_if_needed(const node* node)
{
uint32_t index = node - (struct node*)m_nodes_page;
uint32_t page_index = index / (PAGE_SIZE / m_allocation_size);
ASSERT(page_index < PAGE_SIZE / sizeof(vaddr_t));
vaddr_t& page_vaddr = ((vaddr_t*)m_allocated_pages)[page_index];
if (page_vaddr)
return;
paddr_t page_paddr = Heap::get().take_free_page();
ASSERT(page_paddr);
page_vaddr = m_page_table.reserve_free_page(0x300000);
ASSERT(page_vaddr);
m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
}
bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags)
{
ASSERT(vaddr % PAGE_SIZE == 0);
// Check if page is already allocated
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t page_begin = ((vaddr_t*)m_allocated_pages)[page_index];
if (vaddr == page_begin)
return false;
}
// Page is not allocated so the vaddr must not be in use
ASSERT(m_page_table.is_page_free(vaddr));
// Allocate the vaddr on empty page
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
{
vaddr_t& page_begin = ((vaddr_t*)m_allocated_pages)[page_index];
if (page_begin == 0)
{
paddr_t paddr = Heap::get().take_free_page();
ASSERT(paddr);
m_page_table.map_page_at(paddr, vaddr, flags);
page_begin = vaddr;
return true;
}
}
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<BAN::UniqPtr<FixedWidthAllocator>> FixedWidthAllocator::clone(PageTable& new_page_table)
{
auto allocator = TRY(FixedWidthAllocator::create(new_page_table, allocation_size()));
m_page_table.lock();
ASSERT(m_page_table.is_page_free(0));
for (node* node = m_used_list; node; node = node->next)
{
ASSERT(node->allocated);
vaddr_t vaddr = address_of_node(node);
vaddr_t page_begin = vaddr & PAGE_ADDR_MASK;
PageTable::flags_t flags = m_page_table.get_page_flags(page_begin);
// Allocate and copy all data from this allocation to the new one
if (allocator->allocate_page_if_needed(page_begin, flags))
{
paddr_t paddr = new_page_table.physical_address_of(page_begin);
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
memcpy((void*)0, (void*)page_begin, PAGE_SIZE);
}
// Now that we are sure the page is allocated, we can access the node
struct node* new_node = allocator->node_from_address(vaddr);
allocator->allocate_node(new_node);
}
m_page_table.unmap_page(0);
m_page_table.unlock();
return allocator;
}
}

View File

@ -117,11 +117,27 @@ namespace Kernel
{
PageTableScope _(process->page_table());
argv = (char**)MUST(process->sys_alloc(sizeof(char**) * 2));
argv[0] = (char*)MUST(process->sys_alloc(path.size() + 1));
memcpy(argv[0], path.data(), path.size());
argv[0][path.size()] = '\0';
argv[1] = nullptr;
size_t needed_bytes = sizeof(char*) * 2 + path.size() + 1;
if (auto rem = needed_bytes % PAGE_SIZE)
needed_bytes += PAGE_SIZE - rem;
auto argv_range = MUST(VirtualRange::create_to_vaddr_range(
process->page_table(),
0x400000, KERNEL_OFFSET,
needed_bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
));
argv_range->set_zero();
uintptr_t temp = argv_range->vaddr() + sizeof(char*) * 2;
argv_range->copy_from(0, (uint8_t*)&temp, sizeof(char*));
temp = 0;
argv_range->copy_from(sizeof(char*), (uint8_t*)&temp, sizeof(char*));
argv_range->copy_from(sizeof(char*) * 2, (const uint8_t*)path.data(), path.size());
MUST(process->m_mapped_ranges.push_back(BAN::move(argv_range)));
}
process->m_userspace_info.argc = 1;
@ -149,8 +165,6 @@ namespace Kernel
Process::~Process()
{
ASSERT(m_threads.empty());
ASSERT(m_fixed_width_allocators.empty());
ASSERT(!m_general_allocator);
ASSERT(m_private_anonymous_mappings.empty());
ASSERT(m_mapped_ranges.empty());
ASSERT(m_exit_status.waiting == 0);
@ -187,10 +201,6 @@ namespace Kernel
// NOTE: We must unmap ranges while the page table is still alive
m_private_anonymous_mappings.clear();
m_mapped_ranges.clear();
// NOTE: We must clear allocators while the page table is still alive
m_fixed_width_allocators.clear();
m_general_allocator.clear();
}
void Process::on_thread_exit(Thread& thread)
@ -331,16 +341,6 @@ namespace Kernel
for (auto& mapped_range : m_mapped_ranges)
MUST(mapped_ranges.push_back(TRY(mapped_range->clone(*page_table))));
BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> fixed_width_allocators;
TRY(fixed_width_allocators.reserve(m_fixed_width_allocators.size()));
for (auto& allocator : m_fixed_width_allocators)
if (allocator->allocations() > 0)
MUST(fixed_width_allocators.push_back(TRY(allocator->clone(*page_table))));
BAN::UniqPtr<GeneralAllocator> general_allocator;
if (m_general_allocator)
general_allocator = TRY(m_general_allocator->clone(*page_table));
Process* forked = create_process(m_credentials, m_pid, m_sid, m_pgrp);
forked->m_controlling_terminal = m_controlling_terminal;
forked->m_working_directory = BAN::move(working_directory);
@ -348,8 +348,6 @@ namespace Kernel
forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
forked->m_private_anonymous_mappings = BAN::move(private_anonymous_mappings);
forked->m_mapped_ranges = BAN::move(mapped_ranges);
forked->m_fixed_width_allocators = BAN::move(fixed_width_allocators);
forked->m_general_allocator = BAN::move(general_allocator);
forked->m_is_userspace = m_is_userspace;
forked->m_userspace_info = m_userspace_info;
forked->m_has_called_exec = false;
@ -390,8 +388,6 @@ namespace Kernel
m_open_file_descriptors.close_cloexec();
m_fixed_width_allocators.clear();
m_general_allocator.clear();
m_private_anonymous_mappings.clear();
m_mapped_ranges.clear();
@ -409,27 +405,46 @@ namespace Kernel
ASSERT(&Process::current() == this);
// allocate memory on the new process for arguments and environment
{
LockGuard _(page_table());
m_userspace_info.argv = (char**)MUST(sys_alloc(sizeof(char**) * (str_argv.size() + 1)));
for (size_t i = 0; i < str_argv.size(); i++)
auto create_range =
[&](const auto& container) -> BAN::UniqPtr<VirtualRange>
{
m_userspace_info.argv[i] = (char*)MUST(sys_alloc(str_argv[i].size() + 1));
memcpy(m_userspace_info.argv[i], str_argv[i].data(), str_argv[i].size());
m_userspace_info.argv[i][str_argv[i].size()] = '\0';
}
m_userspace_info.argv[str_argv.size()] = nullptr;
size_t bytes = sizeof(char*);
for (auto& elem : container)
bytes += sizeof(char*) + elem.size() + 1;
m_userspace_info.envp = (char**)MUST(sys_alloc(sizeof(char**) * (str_envp.size() + 1)));
for (size_t i = 0; i < str_envp.size(); i++)
{
m_userspace_info.envp[i] = (char*)MUST(sys_alloc(str_envp[i].size() + 1));
memcpy(m_userspace_info.envp[i], str_envp[i].data(), str_envp[i].size());
m_userspace_info.envp[i][str_envp[i].size()] = '\0';
}
m_userspace_info.envp[str_envp.size()] = nullptr;
}
if (auto rem = bytes % PAGE_SIZE)
bytes += PAGE_SIZE - rem;
auto range = MUST(VirtualRange::create_to_vaddr_range(
page_table(),
0x400000, KERNEL_OFFSET,
bytes,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
));
range->set_zero();
size_t data_offset = sizeof(char*) * (container.size() + 1);
for (size_t i = 0; i < container.size(); i++)
{
uintptr_t ptr_addr = range->vaddr() + data_offset;
range->copy_from(sizeof(char*) * i, (const uint8_t*)&ptr_addr, sizeof(char*));
range->copy_from(data_offset, (const uint8_t*)container[i].data(), container[i].size());
data_offset += container[i].size() + 1;
}
uintptr_t null = 0;
range->copy_from(sizeof(char*) * container.size(), (const uint8_t*)&null, sizeof(char*));
return BAN::move(range);
};
auto argv_range = create_range(str_argv);
m_userspace_info.argv = (char**)argv_range->vaddr();
MUST(m_mapped_ranges.push_back(BAN::move(argv_range)));
auto envp_range = create_range(str_envp);
m_userspace_info.envp = (char**)envp_range->vaddr();
MUST(m_mapped_ranges.push_back(BAN::move(envp_range)));
m_userspace_info.argc = str_argv.size();
@ -829,92 +844,6 @@ namespace Kernel
return 0;
}
static constexpr size_t allocator_size_for_allocation(size_t value)
{
if (value <= 256) {
if (value <= 64)
return 64;
else
return 256;
} else {
if (value <= 1024)
return 1024;
else
return 4096;
}
}
BAN::ErrorOr<long> Process::sys_alloc(size_t bytes)
{
vaddr_t address = 0;
if (bytes <= PAGE_SIZE)
{
// Do fixed width allocation
size_t allocation_size = allocator_size_for_allocation(bytes);
ASSERT(bytes <= allocation_size);
ASSERT(allocation_size <= PAGE_SIZE);
LockGuard _(m_lock);
bool needs_new_allocator { true };
for (auto& allocator : m_fixed_width_allocators)
{
if (allocator->allocation_size() == allocation_size && allocator->allocations() < allocator->max_allocations())
{
address = allocator->allocate();
needs_new_allocator = false;
break;
}
}
if (needs_new_allocator)
{
auto allocator = TRY(FixedWidthAllocator::create(page_table(), allocation_size));
TRY(m_fixed_width_allocators.push_back(BAN::move(allocator)));
address = m_fixed_width_allocators.back()->allocate();
}
}
else
{
LockGuard _(m_lock);
if (!m_general_allocator)
m_general_allocator = TRY(GeneralAllocator::create(page_table(), 0x400000));
address = m_general_allocator->allocate(bytes);
}
if (address == 0)
return BAN::Error::from_errno(ENOMEM);
return address;
}
BAN::ErrorOr<long> Process::sys_free(void* ptr)
{
LockGuard _(m_lock);
for (size_t i = 0; i < m_fixed_width_allocators.size(); i++)
{
auto& allocator = m_fixed_width_allocators[i];
if (allocator->deallocate((vaddr_t)ptr))
{
// TODO: This might be too much. Maybe we should only
// remove allocators when we have low memory... ?
if (allocator->allocations() == 0)
m_fixed_width_allocators.remove(i);
return 0;
}
}
if (m_general_allocator && m_general_allocator->deallocate((vaddr_t)ptr))
return 0;
dwarnln("free called on pointer that was not allocated");
return BAN::Error::from_errno(EINVAL);
}
BAN::ErrorOr<long> Process::sys_termid(char* buffer) const
{
LockGuard _(m_lock);