Kernel: Remove unnused features from VirtualRange

On-demand paging has not been used ever since I made userspace stack be
a normal MemoryRegion.
This commit is contained in:
2026-04-21 19:58:09 +03:00
parent eea0154f18
commit b74812d669
10 changed files with 53 additions and 136 deletions

View File

@@ -9,12 +9,6 @@
namespace Kernel namespace Kernel
{ {
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
class MemoryRegion class MemoryRegion
{ {
BAN_NON_COPYABLE(MemoryRegion); BAN_NON_COPYABLE(MemoryRegion);

View File

@@ -23,4 +23,10 @@ namespace Kernel
using vaddr_t = uintptr_t; using vaddr_t = uintptr_t;
using paddr_t = uint64_t; using paddr_t = uint64_t;
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
} }

View File

@@ -14,42 +14,27 @@ namespace Kernel
BAN_NON_MOVABLE(VirtualRange); BAN_NON_MOVABLE(VirtualRange);
public: public:
// Create virtual range to fixed virtual address static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, AddressRange address_range, size_t, PageTable::flags_t flags, bool add_guard_pages);
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
// Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
~VirtualRange(); ~VirtualRange();
vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); } vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); }
size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); } size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); }
PageTable::flags_t flags() const { return m_flags; } PageTable::flags_t flags() const { return m_flags; }
paddr_t paddr_of(vaddr_t vaddr) const paddr_t paddr_of(vaddr_t vaddr) const { return m_page_table.physical_address_of(vaddr & PAGE_ADDR_MASK); }
{
ASSERT(vaddr % PAGE_SIZE == 0);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(index < m_paddrs.size());
const paddr_t paddr = m_paddrs[index];
ASSERT(paddr);
return paddr;
}
bool contains(vaddr_t address) const { return vaddr() <= address && address < vaddr() + size(); } bool contains(vaddr_t address) const { return vaddr() <= address && address < vaddr() + size(); }
BAN::ErrorOr<bool> allocate_page_for_demand_paging(vaddr_t address);
private: private:
VirtualRange(PageTable&, bool preallocated, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t); VirtualRange(PageTable&, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize(); BAN::ErrorOr<void> initialize();
private: private:
PageTable& m_page_table; PageTable& m_page_table;
const bool m_preallocated;
const bool m_has_guard_pages; const bool m_has_guard_pages;
const vaddr_t m_vaddr; const vaddr_t m_vaddr;
const size_t m_size; const size_t m_size;
const PageTable::flags_t m_flags; const PageTable::flags_t m_flags;
BAN::Vector<paddr_t> m_paddrs;
SpinLock m_lock; SpinLock m_lock;
friend class BAN::UniqPtr<VirtualRange>; friend class BAN::UniqPtr<VirtualRange>;

View File

@@ -84,10 +84,10 @@ namespace Kernel
m_video_buffer = TRY(VirtualRange::create_to_vaddr_range( m_video_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, UINTPTR_MAX, { KERNEL_OFFSET, UINTPTR_MAX },
BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE, BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false false
)); ));
return {}; return {};

View File

@@ -1,67 +1,47 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Memory/Heap.h> #include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h> #include <kernel/Memory/VirtualRange.h>
namespace Kernel namespace Kernel
{ {
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages) BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, AddressRange address_range, size_t size, PageTable::flags_t flags, bool add_guard_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
if (add_guard_pages)
{
vaddr -= PAGE_SIZE;
size += 2 * PAGE_SIZE;
}
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags));
ASSERT(page_table.reserve_range(vaddr, size));
TRY(result->initialize());
return result;
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
{ {
if (add_guard_pages) if (add_guard_pages)
size += 2 * PAGE_SIZE; size += 2 * PAGE_SIZE;
ASSERT(size % PAGE_SIZE == 0); ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
ASSERT(vaddr_start + size <= vaddr_end);
// Align vaddr range to page boundaries // Align vaddr range to page boundaries
if (size_t rem = vaddr_start % PAGE_SIZE) if (const size_t rem = address_range.start % PAGE_SIZE)
vaddr_start += PAGE_SIZE - rem; address_range.start += PAGE_SIZE - rem;
if (size_t rem = vaddr_end % PAGE_SIZE) if (const size_t rem = address_range.end % PAGE_SIZE)
vaddr_end -= rem; address_range.end -= rem;
ASSERT(vaddr_start < vaddr_end);
ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE);
const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end); const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, address_range.start, address_range.end);
if (vaddr == 0) if (vaddr == 0)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr >= vaddr_start);
ASSERT(vaddr + size <= vaddr_end);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags); BAN::ScopeGuard vaddr_cleaner([&page_table, vaddr, size] {
if (result_or_error.is_error())
{
page_table.unmap_range(vaddr, size); page_table.unmap_range(vaddr, size);
return result_or_error.release_error(); });
}
auto result = result_or_error.release_value(); auto result = TRY(BAN::UniqPtr<VirtualRange>::create(
page_table,
add_guard_pages,
vaddr,
size,
flags)
);
TRY(result->initialize()); TRY(result->initialize());
vaddr_cleaner.disable();
return result; return result;
} }
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags) VirtualRange::VirtualRange(PageTable& page_table, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
: m_page_table(page_table) : m_page_table(page_table)
, m_preallocated(preallocated)
, m_has_guard_pages(has_guard_pages) , m_has_guard_pages(has_guard_pages)
, m_vaddr(vaddr) , m_vaddr(vaddr)
, m_size(size) , m_size(size)
@@ -71,70 +51,26 @@ namespace Kernel
VirtualRange::~VirtualRange() VirtualRange::~VirtualRange()
{ {
ASSERT(m_vaddr); ASSERT(m_vaddr);
m_page_table.unmap_range(m_vaddr, m_size); for (size_t off = 0; off < size(); off += PAGE_SIZE)
if (const auto paddr = m_page_table.physical_address_of(vaddr() + off))
for (paddr_t paddr : m_paddrs)
if (paddr != 0)
Heap::get().release_page(paddr); Heap::get().release_page(paddr);
m_page_table.unmap_range(m_vaddr, m_size);
} }
BAN::ErrorOr<void> VirtualRange::initialize() BAN::ErrorOr<void> VirtualRange::initialize()
{ {
TRY(m_paddrs.resize(size() / PAGE_SIZE, 0));
if (!m_preallocated)
return {};
const size_t page_count = size() / PAGE_SIZE; const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++) for (size_t i = 0; i < page_count; i++)
{ {
m_paddrs[i] = Heap::get().take_free_page(); const auto paddr = Heap::get().take_free_page();
if (m_paddrs[i] == 0) if (paddr == 0)
return BAN::Error::from_errno(ENOMEM); return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags); PageTable::with_fast_page(paddr, [] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
m_page_table.map_page_at(paddr, vaddr() + i * PAGE_SIZE, m_flags);
} }
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
memset(reinterpret_cast<void*>(vaddr()), 0, size());
else
{
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
PageTable::with_fast_page(m_paddrs[i], [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
}
}
return {}; return {};
} }
BAN::ErrorOr<bool> VirtualRange::allocate_page_for_demand_paging(vaddr_t vaddr)
{
ASSERT(contains(vaddr));
vaddr &= PAGE_ADDR_MASK;
if (m_preallocated)
return false;
SpinLockGuard _(m_lock);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
if (m_paddrs[index])
return false;
m_paddrs[index] = Heap::get().take_free_page();
if (m_paddrs[index] == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(m_paddrs[index], []{
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
m_page_table.map_page_at(m_paddrs[index], vaddr, m_flags);
return true;
}
} }

View File

@@ -14,11 +14,10 @@ namespace Kernel
loopback->m_buffer = TRY(VirtualRange::create_to_vaddr_range( loopback->m_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, { KERNEL_OFFSET, UINTPTR_MAX },
BAN::numeric_limits<vaddr_t>::max(),
buffer_size * buffer_count, buffer_size * buffer_count,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false false
)); ));
auto* thread = TRY(Thread::create_kernel([](void* loopback_ptr) { auto* thread = TRY(Thread::create_kernel([](void* loopback_ptr) {

View File

@@ -14,11 +14,10 @@ namespace Kernel
auto socket = TRY(BAN::RefPtr<UDPSocket>::create(network_layer, info)); auto socket = TRY(BAN::RefPtr<UDPSocket>::create(network_layer, info));
socket->m_packet_buffer = TRY(VirtualRange::create_to_vaddr_range( socket->m_packet_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, { KERNEL_OFFSET, UINTPTR_MAX },
~(uintptr_t)0,
packet_buffer_size, packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false false
)); ));
return socket; return socket;
} }

View File

@@ -50,11 +50,10 @@ namespace Kernel
auto socket = TRY(BAN::RefPtr<UnixDomainSocket>::create(socket_type, info)); auto socket = TRY(BAN::RefPtr<UnixDomainSocket>::create(socket_type, info));
socket->m_packet_buffer = TRY(VirtualRange::create_to_vaddr_range( socket->m_packet_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, { KERNEL_OFFSET, UINTPTR_MAX },
~(uintptr_t)0,
s_packet_buffer_size, s_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false false
)); ));
return socket; return socket;
} }

View File

@@ -42,10 +42,10 @@ namespace Kernel
auto pts_master_buffer = TRY(VirtualRange::create_to_vaddr_range( auto pts_master_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, static_cast<vaddr_t>(-1), { KERNEL_OFFSET, UINTPTR_MAX },
16 * PAGE_SIZE, 16 * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false false
)); ));
auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid)); auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid));
DevFileSystem::get().remove_from_cache(pts_master); DevFileSystem::get().remove_from_cache(pts_master);

View File

@@ -171,11 +171,10 @@ namespace Kernel
// Initialize stack and registers // Initialize stack and registers
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range( thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(), PageTable::kernel(),
KERNEL_OFFSET, { KERNEL_OFFSET, UINTPTR_MAX },
~(uintptr_t)0,
kernel_stack_size, kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true true
)); ));
// Initialize stack for returning // Initialize stack for returning
@@ -208,10 +207,10 @@ namespace Kernel
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range( thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
page_table, page_table,
s_user_stack_addr_start, USERSPACE_END, { s_user_stack_addr_start, USERSPACE_END },
kernel_stack_size, kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true true
)); ));
auto userspace_stack = TRY(MemoryBackedRegion::create( auto userspace_stack = TRY(MemoryBackedRegion::create(
@@ -356,10 +355,10 @@ namespace Kernel
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range( thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
new_process->page_table(), new_process->page_table(),
s_user_stack_addr_start, USERSPACE_END, { s_user_stack_addr_start, USERSPACE_END },
kernel_stack_size, kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present, PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true true
)); ));
// NOTE: copy [sp, stack_end] so fork return works // NOTE: copy [sp, stack_end] so fork return works