Kernel: Add guard pages to kernel and userspace stacks

This commit is contained in:
Bananymous 2025-07-02 23:12:36 +03:00
parent e1319a06f2
commit 6084aae603
10 changed files with 47 additions and 35 deletions

View File

@ -15,21 +15,21 @@ namespace Kernel
public: public:
// Create virtual range to fixed virtual address // Create virtual range to fixed virtual address
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages); static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
// Create virtual range to virtual address range // Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages); static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
~VirtualRange(); ~VirtualRange();
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&); BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
vaddr_t vaddr() const { return m_vaddr; } vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); }
size_t size() const { return m_size; } size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); }
PageTable::flags_t flags() const { return m_flags; } PageTable::flags_t flags() const { return m_flags; }
paddr_t paddr_of(vaddr_t vaddr) const paddr_t paddr_of(vaddr_t vaddr) const
{ {
ASSERT(vaddr % PAGE_SIZE == 0); ASSERT(vaddr % PAGE_SIZE == 0);
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE; const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(index < m_paddrs.size()); ASSERT(index < m_paddrs.size());
const paddr_t paddr = m_paddrs[index]; const paddr_t paddr = m_paddrs[index];
ASSERT(paddr); ASSERT(paddr);
@ -41,12 +41,13 @@ namespace Kernel
BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address); BAN::ErrorOr<void> allocate_page_for_demand_paging(vaddr_t address);
private: private:
VirtualRange(PageTable&, bool preallocated, vaddr_t, size_t, PageTable::flags_t); VirtualRange(PageTable&, bool preallocated, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize(); BAN::ErrorOr<void> initialize();
private: private:
PageTable& m_page_table; PageTable& m_page_table;
const bool m_preallocated; const bool m_preallocated;
const bool m_has_guard_pages;
const vaddr_t m_vaddr; const vaddr_t m_vaddr;
const size_t m_size; const size_t m_size;
const PageTable::flags_t m_flags; const PageTable::flags_t m_flags;

View File

@ -78,7 +78,7 @@ namespace Kernel
KERNEL_OFFSET, UINTPTR_MAX, KERNEL_OFFSET, UINTPTR_MAX,
BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE, BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
return {}; return {};

View File

@ -4,21 +4,30 @@
namespace Kernel namespace Kernel
{ {
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages) BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
{ {
ASSERT(size % PAGE_SIZE == 0); ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0); ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0); ASSERT(vaddr > 0);
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags)); if (add_guard_pages)
{
vaddr -= PAGE_SIZE;
size += 2 * PAGE_SIZE;
}
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags));
ASSERT(page_table.reserve_range(vaddr, size)); ASSERT(page_table.reserve_range(vaddr, size));
TRY(result->initialize()); TRY(result->initialize());
return result; return result;
} }
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages) BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
{ {
if (add_guard_pages)
size += 2 * PAGE_SIZE;
ASSERT(size % PAGE_SIZE == 0); ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0); ASSERT(vaddr_start > 0);
ASSERT(vaddr_start + size <= vaddr_end); ASSERT(vaddr_start + size <= vaddr_end);
@ -31,13 +40,13 @@ namespace Kernel
ASSERT(vaddr_start < vaddr_end); ASSERT(vaddr_start < vaddr_end);
ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE); ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE);
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end); const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
if (vaddr == 0) if (vaddr == 0)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr >= vaddr_start); ASSERT(vaddr >= vaddr_start);
ASSERT(vaddr + size <= vaddr_end); ASSERT(vaddr + size <= vaddr_end);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, vaddr, size, flags); auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags);
if (result_or_error.is_error()) if (result_or_error.is_error())
{ {
page_table.unmap_range(vaddr, size); page_table.unmap_range(vaddr, size);
@ -50,9 +59,10 @@ namespace Kernel
return result; return result;
} }
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, vaddr_t vaddr, size_t size, PageTable::flags_t flags) VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
: m_page_table(page_table) : m_page_table(page_table)
, m_preallocated(preallocated) , m_preallocated(preallocated)
, m_has_guard_pages(has_guard_pages)
, m_vaddr(vaddr) , m_vaddr(vaddr)
, m_size(size) , m_size(size)
, m_flags(flags) , m_flags(flags)
@ -70,26 +80,26 @@ namespace Kernel
BAN::ErrorOr<void> VirtualRange::initialize() BAN::ErrorOr<void> VirtualRange::initialize()
{ {
TRY(m_paddrs.resize(m_size / PAGE_SIZE, 0)); TRY(m_paddrs.resize(size() / PAGE_SIZE, 0));
if (!m_preallocated) if (!m_preallocated)
return {}; return {};
const size_t page_count = m_size / PAGE_SIZE; const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++) for (size_t i = 0; i < page_count; i++)
{ {
m_paddrs[i] = Heap::get().take_free_page(); m_paddrs[i] = Heap::get().take_free_page();
if (m_paddrs[i] == 0) if (m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags); m_page_table.map_page_at(m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
} }
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table) if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
memset(reinterpret_cast<void*>(m_vaddr), 0, m_size); memset(reinterpret_cast<void*>(vaddr()), 0, size());
else else
{ {
const size_t page_count = m_size / PAGE_SIZE; const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++) for (size_t i = m_has_guard_pages; i < page_count; i++)
{ {
PageTable::with_fast_page(m_paddrs[i], [&] { PageTable::with_fast_page(m_paddrs[i], [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE); memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
@ -107,10 +117,10 @@ namespace Kernel
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
auto result = TRY(create_to_vaddr(page_table, m_vaddr, m_size, m_flags, m_preallocated)); auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), m_flags, m_preallocated, m_has_guard_pages));
const size_t page_count = m_size / PAGE_SIZE; const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++) for (size_t i = m_has_guard_pages; i < page_count; i++)
{ {
if (m_paddrs[i] == 0) if (m_paddrs[i] == 0)
continue; continue;
@ -119,11 +129,11 @@ namespace Kernel
result->m_paddrs[i] = Heap::get().take_free_page(); result->m_paddrs[i] = Heap::get().take_free_page();
if (result->m_paddrs[i] == 0) if (result->m_paddrs[i] == 0)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
result->m_page_table.map_page_at(result->m_paddrs[i], m_vaddr + i * PAGE_SIZE, m_flags); result->m_page_table.map_page_at(result->m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
} }
PageTable::with_fast_page(result->m_paddrs[i], [&] { PageTable::with_fast_page(result->m_paddrs[i], [&] {
memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(m_vaddr + i * PAGE_SIZE), PAGE_SIZE); memcpy(PageTable::fast_page_as_ptr(), reinterpret_cast<void*>(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
}); });
} }
@ -137,7 +147,7 @@ namespace Kernel
ASSERT(contains(vaddr)); ASSERT(contains(vaddr));
ASSERT(&PageTable::current() == &m_page_table); ASSERT(&PageTable::current() == &m_page_table);
const size_t index = (vaddr - m_vaddr) / PAGE_SIZE; const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(m_paddrs[index] == 0); ASSERT(m_paddrs[index] == 0);
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);

View File

@ -35,7 +35,7 @@ namespace Kernel
~(uintptr_t)0, ~(uintptr_t)0,
pending_packet_buffer_size, pending_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
ipv4_manager->m_arp_table = TRY(ARPTable::create()); ipv4_manager->m_arp_table = TRY(ARPTable::create());
return ipv4_manager; return ipv4_manager;

View File

@ -16,7 +16,7 @@ namespace Kernel
BAN::numeric_limits<vaddr_t>::max(), BAN::numeric_limits<vaddr_t>::max(),
buffer_size, buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
loopback->set_ipv4_address({ 127, 0, 0, 1 }); loopback->set_ipv4_address({ 127, 0, 0, 1 });
loopback->set_netmask({ 255, 0, 0, 0 }); loopback->set_netmask({ 255, 0, 0, 0 });

View File

@ -32,7 +32,7 @@ namespace Kernel
~(vaddr_t)0, ~(vaddr_t)0,
s_recv_window_buffer_size, s_recv_window_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
socket->m_recv_window.scale_shift = PAGE_SIZE_SHIFT; // use PAGE_SIZE windows socket->m_recv_window.scale_shift = PAGE_SIZE_SHIFT; // use PAGE_SIZE windows
socket->m_send_window.buffer = TRY(VirtualRange::create_to_vaddr_range( socket->m_send_window.buffer = TRY(VirtualRange::create_to_vaddr_range(
@ -41,7 +41,7 @@ namespace Kernel
~(vaddr_t)0, ~(vaddr_t)0,
s_send_window_buffer_size, s_send_window_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
socket->m_thread = TRY(Thread::create_kernel( socket->m_thread = TRY(Thread::create_kernel(
[](void* socket_ptr) [](void* socket_ptr)

View File

@ -17,7 +17,7 @@ namespace Kernel
~(uintptr_t)0, ~(uintptr_t)0,
packet_buffer_size, packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
return socket; return socket;
} }

View File

@ -29,7 +29,7 @@ namespace Kernel
~(uintptr_t)0, ~(uintptr_t)0,
s_packet_buffer_size, s_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, false
)); ));
return socket; return socket;
} }

View File

@ -44,7 +44,8 @@ namespace Kernel
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, static_cast<vaddr_t>(-1), KERNEL_OFFSET, static_cast<vaddr_t>(-1),
16 * PAGE_SIZE, 16 * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, true PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
)); ));
auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid)); auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid));
DevFileSystem::get().remove_from_cache(pts_master); DevFileSystem::get().remove_from_cache(pts_master);

View File

@ -84,7 +84,7 @@ namespace Kernel
~(uintptr_t)0, ~(uintptr_t)0,
kernel_stack_size, kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, true
)); ));
// Initialize stack for returning // Initialize stack for returning
@ -124,7 +124,7 @@ namespace Kernel
0x200000, USERSPACE_END, 0x200000, USERSPACE_END,
kernel_stack_size, kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, true
)); ));
thread->m_userspace_stack = TRY(VirtualRange::create_to_vaddr_range( thread->m_userspace_stack = TRY(VirtualRange::create_to_vaddr_range(
@ -132,7 +132,7 @@ namespace Kernel
0x200000, USERSPACE_END, 0x200000, USERSPACE_END,
userspace_stack_size, userspace_stack_size,
PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true true, true
)); ));
thread_deleter.disable(); thread_deleter.disable();