Compare commits

...

3 Commits

Author SHA1 Message Date
Bananymous df260fe0e8 Kernel: Process::validate_pointer_access now maps the whole range
This fixes a bug where userspace provided address is not fully mapped
and the kernel tries to read/write it while using PageTable fast page.

In the future userspace input should be copied on syscall entry, so
userspace could not modify the input during syscall. Currently there
is change that userspace input passes kernel syscall validation and
after that userspace could modify the input before the value is
actually used.
2024-05-24 14:14:17 +03:00
Bananymous 2be4fe8404 Kernel: Make PageTable::s_fast_page_lock non-recursive
This lock is only used in wrapper of PageTable. There is no possiblity
of taking the lock outside of these wrappers.
2024-05-24 14:12:35 +03:00
Bananymous 7db7cfe20f BuildSystem: Only use kvm if user has rw access 2024-05-24 11:09:04 +03:00
5 changed files with 34 additions and 6 deletions

View File

@ -17,7 +17,7 @@ extern uint8_t g_userspace_end[];
namespace Kernel
{
RecursiveSpinLock PageTable::s_fast_page_lock;
SpinLock PageTable::s_fast_page_lock;
static PageTable* s_kernel = nullptr;
static bool s_has_nxe = false;
@ -214,7 +214,7 @@ namespace Kernel
ASSERT(s_kernel);
ASSERT(paddr);
SpinLockGuard _(s_fast_page_lock);
ASSERT(s_fast_page_lock.current_processor_has_lock());
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
@ -237,7 +237,7 @@ namespace Kernel
{
ASSERT(s_kernel);
SpinLockGuard _(s_fast_page_lock);
ASSERT(s_fast_page_lock.current_processor_has_lock());
constexpr vaddr_t uc_vaddr = uncanonicalize(fast_page());
constexpr uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;

View File

@ -130,7 +130,7 @@ namespace Kernel
private:
paddr_t m_highest_paging_struct { 0 };
mutable RecursiveSpinLock m_lock;
static RecursiveSpinLock s_fast_page_lock;
static SpinLock s_fast_page_lock;
};
static constexpr size_t range_page_count(vaddr_t start, size_t bytes)

View File

@ -198,6 +198,7 @@ namespace Kernel
BAN::ErrorOr<int> block_until_exit(pid_t pid);
BAN::ErrorOr<void> validate_string_access(const char*);
BAN::ErrorOr<void> validate_pointer_access_check(const void*, size_t);
BAN::ErrorOr<void> validate_pointer_access(const void*, size_t);
uint64_t signal_pending_mask() const

View File

@ -1857,7 +1857,7 @@ namespace Kernel
return validate_pointer_access(str, strlen(str) + 1);
}
BAN::ErrorOr<void> Process::validate_pointer_access(const void* ptr, size_t size)
BAN::ErrorOr<void> Process::validate_pointer_access_check(const void* ptr, size_t size)
{
ASSERT(&Process::current() == this);
auto& thread = Thread::current();
@ -1894,4 +1894,27 @@ unauthorized_access:
return BAN::Error::from_errno(EINTR);
}
BAN::ErrorOr<void> Process::validate_pointer_access(const void* ptr, size_t size)
{
// TODO: This seems very slow as we loop over the range twice
TRY(validate_pointer_access_check(ptr, size));
const vaddr_t vaddr = reinterpret_cast<vaddr_t>(ptr);
// Make sure all of the pages are mapped here, so demand paging does not happen
// while processing syscall.
const vaddr_t page_start = vaddr & PAGE_ADDR_MASK;
const size_t page_count = range_page_count(vaddr, size);
for (size_t i = 0; i < page_count; i++)
{
const vaddr_t current = page_start + i * PAGE_SIZE;
if (page_table().get_page_flags(current) & PageTable::Flags::Present)
continue;
TRY(Process::allocate_page_for_demand_paging(current));
}
return {};
}
}

View File

@ -61,7 +61,11 @@ run_bochs () {
}
if [[ -c /dev/kvm ]]; then
QEMU_ACCEL="-accel kvm"
if [[ -r /dev/kvm ]] && [[ -w /dev/kvm ]]; then
QEMU_ACCEL="-accel kvm"
else
echo "You don't have read/write permissions for /dev/kvm" >&2
fi
fi
if [[ $# -eq 0 ]]; then