Kernel/LibC: add mmap for private anonymous mappings

This will be used by the userspace to get more memory. Currently
kernel handles all allocations, which is not preferable.
This commit is contained in:
Bananymous
2023-09-22 15:41:05 +03:00
parent b9c779ff7e
commit af4af1cae9
7 changed files with 117 additions and 0 deletions

View File

@@ -15,6 +15,7 @@
#include <kernel/Terminal/TTY.h>
#include <kernel/Thread.h>
#include <sys/mman.h>
#include <termios.h>
namespace LibELF { class ELF; }
@@ -115,6 +116,9 @@ namespace Kernel
BAN::ErrorOr<long> sys_read_dir_entries(int fd, DirectoryEntryList* buffer, size_t buffer_size);
BAN::ErrorOr<long> sys_mmap(const sys_mmap_t&);
BAN::ErrorOr<long> sys_munmap(void* addr, size_t len);
BAN::ErrorOr<long> sys_alloc(size_t);
BAN::ErrorOr<long> sys_free(void*);
@@ -177,6 +181,8 @@ namespace Kernel
BAN::String m_working_directory;
BAN::Vector<Thread*> m_threads;
BAN::Vector<BAN::UniqPtr<VirtualRange>> m_private_anonymous_mappings;
BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> m_fixed_width_allocators;
BAN::UniqPtr<GeneralAllocator> m_general_allocator;

View File

@@ -159,6 +159,7 @@ namespace Kernel
ASSERT(m_threads.empty());
ASSERT(m_fixed_width_allocators.empty());
ASSERT(!m_general_allocator);
ASSERT(m_private_anonymous_mappings.empty());
ASSERT(m_mapped_ranges.empty());
ASSERT(m_exit_status.waiting == 0);
ASSERT(&PageTable::current() != m_page_table.ptr());
@@ -192,6 +193,7 @@ namespace Kernel
m_open_file_descriptors.close_all();
// NOTE: We must unmap ranges while the page table is still alive
m_private_anonymous_mappings.clear();
m_mapped_ranges.clear();
// NOTE: We must clear allocators while the page table is still alive
@@ -358,6 +360,11 @@ namespace Kernel
OpenFileDescriptorSet open_file_descriptors(m_credentials);
TRY(open_file_descriptors.clone_from(m_open_file_descriptors));
BAN::Vector<BAN::UniqPtr<VirtualRange>> private_anonymous_mappings;
TRY(private_anonymous_mappings.reserve(m_private_anonymous_mappings.size()));
for (auto& private_anonymous_mapping : m_private_anonymous_mappings)
MUST(private_anonymous_mappings.push_back(TRY(private_anonymous_mapping->clone(*page_table))));
BAN::Vector<BAN::UniqPtr<VirtualRange>> mapped_ranges;
TRY(mapped_ranges.reserve(m_mapped_ranges.size()));
for (auto& mapped_range : m_mapped_ranges)
@@ -378,6 +385,7 @@ namespace Kernel
forked->m_working_directory = BAN::move(working_directory);
forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
forked->m_private_anonymous_mappings = BAN::move(private_anonymous_mappings);
forked->m_mapped_ranges = BAN::move(mapped_ranges);
forked->m_fixed_width_allocators = BAN::move(fixed_width_allocators);
forked->m_general_allocator = BAN::move(general_allocator);
@@ -428,6 +436,7 @@ namespace Kernel
m_fixed_width_allocators.clear();
m_general_allocator.clear();
m_private_anonymous_mappings.clear();
m_mapped_ranges.clear();
load_elf_to_memory(*elf);
@@ -811,6 +820,66 @@ namespace Kernel
return (long)buffer;
}
BAN::ErrorOr<long> Process::sys_mmap(const sys_mmap_t& args)
{
if (args.prot != PROT_NONE && args.prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return BAN::Error::from_errno(EINVAL);
PageTable::flags_t flags = PageTable::Flags::UserSupervisor;
if (args.prot & PROT_READ)
flags |= PageTable::Flags::Present;
if (args.prot & PROT_WRITE)
flags |= PageTable::Flags::ReadWrite | PageTable::Flags::Present;
if (args.prot & PROT_EXEC)
flags |= PageTable::Flags::Execute | PageTable::Flags::Present;
if (args.flags == (MAP_ANONYMOUS | MAP_PRIVATE))
{
if (args.addr != nullptr)
return BAN::Error::from_errno(ENOTSUP);
if (args.off != 0)
return BAN::Error::from_errno(EINVAL);
if (args.len % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
auto range = TRY(VirtualRange::create_to_vaddr_range(
page_table(),
0x400000, KERNEL_OFFSET,
args.len,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present
));
range->set_zero();
LockGuard _(m_lock);
TRY(m_private_anonymous_mappings.push_back(BAN::move(range)));
return m_private_anonymous_mappings.back()->vaddr();
}
return BAN::Error::from_errno(ENOTSUP);
}
BAN::ErrorOr<long> Process::sys_munmap(void* addr, size_t len)
{
if (len == 0)
return BAN::Error::from_errno(EINVAL);
vaddr_t vaddr = (vaddr_t)addr;
if (vaddr % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_lock);
for (size_t i = 0; i < m_private_anonymous_mappings.size(); i++)
{
auto& mapping = m_private_anonymous_mappings[i];
if (vaddr + len < mapping->vaddr() || vaddr >= mapping->vaddr() + mapping->size())
continue;
m_private_anonymous_mappings.remove(i);
}
return 0;
}
static constexpr size_t allocator_size_for_allocation(size_t value)
{
if (value <= 256) {

View File

@@ -194,6 +194,12 @@ namespace Kernel
case SYS_SYNC:
ret = Process::current().sys_sync();
break;
case SYS_MMAP:
ret = Process::current().sys_mmap(*(const sys_mmap_t*)arg1);
break;
case SYS_MUNMAP:
ret = Process::current().sys_munmap((void*)arg1, (size_t)arg2);
break;
default:
dwarnln("Unknown syscall {}", syscall);
break;