From df260fe0e8741840f547a8b7e73e50cb85484a0d Mon Sep 17 00:00:00 2001 From: Bananymous Date: Fri, 24 May 2024 14:14:17 +0300 Subject: [PATCH] Kernel: Process::validate_pointer_access now maps the whole range This fixes a bug where userspace provided address is not fully mapped and the kernel tries to read/write it while using PageTable fast page. In the future userspace input should be copied on syscall entry, so userspace could not modify the input during syscall. Currently there is change that userspace input passes kernel syscall validation and after that userspace could modify the input before the value is actually used. --- kernel/include/kernel/Process.h | 1 + kernel/kernel/Process.cpp | 25 ++++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/kernel/include/kernel/Process.h b/kernel/include/kernel/Process.h index 226af759..97e9aec8 100644 --- a/kernel/include/kernel/Process.h +++ b/kernel/include/kernel/Process.h @@ -198,6 +198,7 @@ namespace Kernel BAN::ErrorOr block_until_exit(pid_t pid); BAN::ErrorOr validate_string_access(const char*); + BAN::ErrorOr validate_pointer_access_check(const void*, size_t); BAN::ErrorOr validate_pointer_access(const void*, size_t); uint64_t signal_pending_mask() const diff --git a/kernel/kernel/Process.cpp b/kernel/kernel/Process.cpp index 359d0403..80945b20 100644 --- a/kernel/kernel/Process.cpp +++ b/kernel/kernel/Process.cpp @@ -1857,7 +1857,7 @@ namespace Kernel return validate_pointer_access(str, strlen(str) + 1); } - BAN::ErrorOr Process::validate_pointer_access(const void* ptr, size_t size) + BAN::ErrorOr Process::validate_pointer_access_check(const void* ptr, size_t size) { ASSERT(&Process::current() == this); auto& thread = Thread::current(); @@ -1894,4 +1894,27 @@ unauthorized_access: return BAN::Error::from_errno(EINTR); } + BAN::ErrorOr Process::validate_pointer_access(const void* ptr, size_t size) + { + // TODO: This seems very slow as we loop over the range twice + + TRY(validate_pointer_access_check(ptr, size)); + + const vaddr_t vaddr = reinterpret_cast(ptr); + + // Make sure all of the pages are mapped here, so demand paging does not happen + // while processing syscall. + const vaddr_t page_start = vaddr & PAGE_ADDR_MASK; + const size_t page_count = range_page_count(vaddr, size); + for (size_t i = 0; i < page_count; i++) + { + const vaddr_t current = page_start + i * PAGE_SIZE; + if (page_table().get_page_flags(current) & PageTable::Flags::Present) + continue; + TRY(Process::allocate_page_for_demand_paging(current)); + } + + return {}; + } + }