Kernel: SYS_FORK can now fail instead of panicing on error

This commit is contained in:
Bananymous 2023-07-19 17:47:12 +03:00
parent 650e1b4fc5
commit c0c0bbc1bf
8 changed files with 79 additions and 67 deletions

View File

@ -1,7 +1,8 @@
#pragma once #pragma once
#include <BAN/Vector.h>
#include <BAN/NoCopyMove.h> #include <BAN/NoCopyMove.h>
#include <BAN/UniqPtr.h>
#include <BAN/Vector.h>
#include <kernel/Memory/PageTable.h> #include <kernel/Memory/PageTable.h>
namespace Kernel namespace Kernel
@ -13,11 +14,11 @@ namespace Kernel
BAN_NON_MOVABLE(VirtualRange); BAN_NON_MOVABLE(VirtualRange);
public: public:
static VirtualRange* create(PageTable&, vaddr_t, size_t, uint8_t flags); static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create(PageTable&, vaddr_t, size_t, uint8_t flags);
static VirtualRange* create_kmalloc(size_t); static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_kmalloc(size_t);
~VirtualRange(); ~VirtualRange();
VirtualRange* clone(PageTable&); BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
vaddr_t vaddr() const { return m_vaddr; } vaddr_t vaddr() const { return m_vaddr; }
size_t size() const { return m_size; } size_t size() const { return m_size; }

View File

@ -17,6 +17,8 @@ namespace Kernel
OpenFileDescriptorSet(const Credentials&); OpenFileDescriptorSet(const Credentials&);
~OpenFileDescriptorSet(); ~OpenFileDescriptorSet();
OpenFileDescriptorSet& operator=(OpenFileDescriptorSet&&);
BAN::ErrorOr<void> clone_from(const OpenFileDescriptorSet&); BAN::ErrorOr<void> clone_from(const OpenFileDescriptorSet&);
BAN::ErrorOr<int> open(BAN::StringView absolute_path, int flags); BAN::ErrorOr<int> open(BAN::StringView absolute_path, int flags);

View File

@ -140,7 +140,7 @@ namespace Kernel
OpenFileDescriptorSet m_open_file_descriptors; OpenFileDescriptorSet m_open_file_descriptors;
BAN::Vector<VirtualRange*> m_mapped_ranges; BAN::Vector<BAN::UniqPtr<VirtualRange>> m_mapped_ranges;
mutable RecursiveSpinLock m_lock; mutable RecursiveSpinLock m_lock;

View File

@ -2,6 +2,7 @@
#include <BAN/NoCopyMove.h> #include <BAN/NoCopyMove.h>
#include <BAN/RefPtr.h> #include <BAN/RefPtr.h>
#include <BAN/UniqPtr.h>
#include <kernel/Memory/VirtualRange.h> #include <kernel/Memory/VirtualRange.h>
#include <sys/types.h> #include <sys/types.h>
@ -69,18 +70,18 @@ namespace Kernel
void validate_stack() const; void validate_stack() const;
private: private:
static constexpr size_t m_kernel_stack_size = PAGE_SIZE * 1; static constexpr size_t m_kernel_stack_size = PAGE_SIZE * 1;
static constexpr size_t m_userspace_stack_size = PAGE_SIZE * 2; static constexpr size_t m_userspace_stack_size = PAGE_SIZE * 2;
static constexpr size_t m_interrupt_stack_size = PAGE_SIZE * 2; static constexpr size_t m_interrupt_stack_size = PAGE_SIZE * 2;
VirtualRange* m_interrupt_stack { nullptr }; BAN::UniqPtr<VirtualRange> m_interrupt_stack;
VirtualRange* m_stack { nullptr }; BAN::UniqPtr<VirtualRange> m_stack;
uintptr_t m_rip { 0 }; uintptr_t m_rip { 0 };
uintptr_t m_rsp { 0 }; uintptr_t m_rsp { 0 };
const pid_t m_tid { 0 }; const pid_t m_tid { 0 };
State m_state { State::NotStarted }; State m_state { State::NotStarted };
Process* m_process { nullptr }; Process* m_process { nullptr };
bool m_in_syscall { false }; bool m_in_syscall { false };
bool m_is_userspace { false }; bool m_is_userspace { false };
friend class Scheduler; friend class Scheduler;
}; };

View File

@ -4,7 +4,7 @@
namespace Kernel namespace Kernel
{ {
VirtualRange* VirtualRange::create(PageTable& page_table, vaddr_t vaddr, size_t size, uint8_t flags) BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create(PageTable& page_table, vaddr_t vaddr, size_t size, uint8_t flags)
{ {
ASSERT(size % PAGE_SIZE == 0); ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0); ASSERT(vaddr % PAGE_SIZE == 0);
@ -37,10 +37,10 @@ namespace Kernel
} }
page_table.unlock(); page_table.unlock();
return result; return BAN::UniqPtr<VirtualRange>::adopt(result);
} }
VirtualRange* VirtualRange::create_kmalloc(size_t size) BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_kmalloc(size_t size)
{ {
VirtualRange* result = new VirtualRange(PageTable::kernel()); VirtualRange* result = new VirtualRange(PageTable::kernel());
ASSERT(result); ASSERT(result);
@ -52,10 +52,10 @@ namespace Kernel
if (result->m_vaddr == 0) if (result->m_vaddr == 0)
{ {
delete result; delete result;
return nullptr; return BAN::UniqPtr<VirtualRange>();
} }
return result; return BAN::UniqPtr<VirtualRange>::adopt(result);
} }
VirtualRange::VirtualRange(PageTable& page_table) VirtualRange::VirtualRange(PageTable& page_table)
@ -75,9 +75,9 @@ namespace Kernel
Heap::get().release_page(page); Heap::get().release_page(page);
} }
VirtualRange* VirtualRange::clone(PageTable& page_table) BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
{ {
VirtualRange* result = create(page_table, vaddr(), size(), flags()); auto result = TRY(create(page_table, vaddr(), size(), flags()));
m_page_table.lock(); m_page_table.lock();

View File

@ -19,6 +19,13 @@ namespace Kernel
close_all(); close_all();
} }
OpenFileDescriptorSet& OpenFileDescriptorSet::operator=(OpenFileDescriptorSet&& other)
{
for (size_t i = 0; i < m_open_files.size(); i++)
m_open_files[i] = BAN::move(other.m_open_files[i]);
return *this;
}
BAN::ErrorOr<void> OpenFileDescriptorSet::clone_from(const OpenFileDescriptorSet& other) BAN::ErrorOr<void> OpenFileDescriptorSet::clone_from(const OpenFileDescriptorSet& other)
{ {
close_all(); close_all();

View File

@ -1,3 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <BAN/StringView.h> #include <BAN/StringView.h>
#include <kernel/CriticalScope.h> #include <kernel/CriticalScope.h>
#include <kernel/FS/VirtualFileSystem.h> #include <kernel/FS/VirtualFileSystem.h>
@ -139,8 +140,6 @@ namespace Kernel
m_open_file_descriptors.close_all(); m_open_file_descriptors.close_all();
// NOTE: We must unmap ranges while the page table is still alive // NOTE: We must unmap ranges while the page table is still alive
for (auto* range : m_mapped_ranges)
delete range;
m_mapped_ranges.clear(); m_mapped_ranges.clear();
// NOTE: We must clear allocators while the page table is still alive // NOTE: We must clear allocators while the page table is still alive
@ -269,29 +268,43 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_fork(uintptr_t rsp, uintptr_t rip) BAN::ErrorOr<long> Process::sys_fork(uintptr_t rsp, uintptr_t rip)
{ {
Process* forked = create_process(m_credentials); auto page_table = BAN::UniqPtr<PageTable>::adopt(TRY(PageTable::create_userspace()));
forked->m_page_table = BAN::UniqPtr<PageTable>::adopt(MUST(PageTable::create_userspace()));
LockGuard _(m_lock); LockGuard _(m_lock);
forked->m_tty = m_tty;
forked->m_working_directory = m_working_directory;
MUST(forked->m_open_file_descriptors.clone_from(m_open_file_descriptors)); BAN::String working_directory;
TRY(working_directory.append(m_working_directory));
forked->m_userspace_info = m_userspace_info; OpenFileDescriptorSet open_file_descriptors(m_credentials);
TRY(open_file_descriptors.clone_from(m_open_file_descriptors));
for (auto* mapped_range : m_mapped_ranges) BAN::Vector<BAN::UniqPtr<VirtualRange>> mapped_ranges;
MUST(forked->m_mapped_ranges.push_back(mapped_range->clone(forked->page_table()))); TRY(mapped_ranges.reserve(m_mapped_ranges.size()));
for (auto& mapped_range : m_mapped_ranges)
MUST(mapped_ranges.push_back(TRY(mapped_range->clone(*page_table))));
BAN::Vector<BAN::UniqPtr<FixedWidthAllocator>> fixed_width_allocators;
TRY(fixed_width_allocators.reserve(m_fixed_width_allocators.size()));
for (auto& allocator : m_fixed_width_allocators) for (auto& allocator : m_fixed_width_allocators)
if (allocator->allocations() > 0) if (allocator->allocations() > 0)
MUST(forked->m_fixed_width_allocators.push_back(MUST(allocator->clone(forked->page_table())))); MUST(fixed_width_allocators.push_back(TRY(allocator->clone(*page_table))));
BAN::UniqPtr<GeneralAllocator> general_allocator;
if (m_general_allocator) if (m_general_allocator)
forked->m_general_allocator = MUST(m_general_allocator->clone(forked->page_table())); general_allocator = TRY(m_general_allocator->clone(*page_table));
Process* forked = create_process(m_credentials);
forked->m_tty = m_tty;
forked->m_working_directory = BAN::move(working_directory);
forked->m_page_table = BAN::move(page_table);
forked->m_open_file_descriptors = BAN::move(open_file_descriptors);
forked->m_mapped_ranges = BAN::move(mapped_ranges);
forked->m_fixed_width_allocators = BAN::move(fixed_width_allocators);
forked->m_general_allocator = BAN::move(general_allocator);
forked->m_userspace_info = m_userspace_info;
ASSERT(this == &Process::current()); ASSERT(this == &Process::current());
// FIXME: this should be able to fail
Thread* thread = MUST(Thread::current().clone(forked, rsp, rip)); Thread* thread = MUST(Thread::current().clone(forked, rsp, rip));
forked->add_thread(thread); forked->add_thread(thread);
@ -330,9 +343,6 @@ namespace Kernel
m_fixed_width_allocators.clear(); m_fixed_width_allocators.clear();
m_general_allocator.clear(); m_general_allocator.clear();
for (auto* range : m_mapped_ranges)
delete range;
m_mapped_ranges.clear(); m_mapped_ranges.clear();
load_elf_to_memory(*elf); load_elf_to_memory(*elf);
@ -478,7 +488,7 @@ namespace Kernel
{ {
LockGuard _(m_lock); LockGuard _(m_lock);
MUST(m_mapped_ranges.push_back(VirtualRange::create(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags))); MUST(m_mapped_ranges.push_back(MUST(VirtualRange::create(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags))));
m_mapped_ranges.back()->set_zero(); m_mapped_ranges.back()->set_zero();
m_mapped_ranges.back()->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz); m_mapped_ranges.back()->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz);
} }

View File

@ -1,4 +1,5 @@
#include <BAN/Errors.h> #include <BAN/Errors.h>
#include <BAN/ScopeGuard.h>
#include <kernel/InterruptController.h> #include <kernel/InterruptController.h>
#include <kernel/Memory/kmalloc.h> #include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTableScope.h> #include <kernel/Memory/PageTableScope.h>
@ -27,11 +28,10 @@ namespace Kernel
Thread* thread = new Thread(s_next_tid++, process); Thread* thread = new Thread(s_next_tid++, process);
if (thread == nullptr) if (thread == nullptr)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
// Initialize stack and registers // Initialize stack and registers
thread->m_stack = VirtualRange::create_kmalloc(m_kernel_stack_size); thread->m_stack = TRY(VirtualRange::create_kmalloc(m_kernel_stack_size));
if (thread->m_stack == nullptr)
return BAN::Error::from_errno(ENOMEM);
thread->m_rsp = thread->stack_base() + thread->stack_size(); thread->m_rsp = thread->stack_base() + thread->stack_size();
thread->m_rip = (uintptr_t)entry; thread->m_rip = (uintptr_t)entry;
@ -40,6 +40,8 @@ namespace Kernel
write_to_stack<sizeof(void*)>(thread->m_rsp, &Thread::on_exit); write_to_stack<sizeof(void*)>(thread->m_rsp, &Thread::on_exit);
write_to_stack<sizeof(void*)>(thread->m_rsp, data); write_to_stack<sizeof(void*)>(thread->m_rsp, data);
thread_deleter.disable();
return thread; return thread;
} }
@ -51,26 +53,17 @@ namespace Kernel
Thread* thread = new Thread(s_next_tid++, process); Thread* thread = new Thread(s_next_tid++, process);
if (thread == nullptr) if (thread == nullptr)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
thread->m_is_userspace = true; thread->m_is_userspace = true;
// Allocate stack thread->m_stack = TRY(VirtualRange::create(process->page_table(), 0, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
thread->m_stack = VirtualRange::create(process->page_table(), 0, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present); thread->m_interrupt_stack = TRY(VirtualRange::create(process->page_table(), 0, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
if (thread->m_stack == nullptr)
{
delete thread;
return BAN::Error::from_errno(ENOMEM);
}
// Allocate interrupt stack
thread->m_interrupt_stack = VirtualRange::create(process->page_table(), 0, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
if (thread->m_interrupt_stack == nullptr)
{
delete thread;
return BAN::Error::from_errno(ENOMEM);
}
thread->setup_exec(); thread->setup_exec();
thread_deleter.disable();
return thread; return thread;
} }
@ -91,12 +84,6 @@ namespace Kernel
Thread::~Thread() Thread::~Thread()
{ {
if (m_stack)
delete m_stack;
m_stack = nullptr;
if (m_interrupt_stack)
delete m_interrupt_stack;
m_interrupt_stack = nullptr;
} }
BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t rsp, uintptr_t rip) BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t rsp, uintptr_t rip)
@ -107,10 +94,12 @@ namespace Kernel
Thread* thread = new Thread(s_next_tid++, new_process); Thread* thread = new Thread(s_next_tid++, new_process);
if (thread == nullptr) if (thread == nullptr)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
thread->m_is_userspace = true; thread->m_is_userspace = true;
thread->m_interrupt_stack = m_interrupt_stack->clone(new_process->page_table()); thread->m_interrupt_stack = TRY(m_interrupt_stack->clone(new_process->page_table()));
thread->m_stack = m_stack->clone(new_process->page_table()); thread->m_stack = TRY(m_stack->clone(new_process->page_table()));
thread->m_state = State::Executing; thread->m_state = State::Executing;
thread->m_in_syscall = true; thread->m_in_syscall = true;
@ -118,6 +107,8 @@ namespace Kernel
thread->m_rip = rip; thread->m_rip = rip;
thread->m_rsp = rsp; thread->m_rsp = rsp;
thread_deleter.disable();
return thread; return thread;
} }