Kernel: Remove unnused features from VirtualRange

On-demand paging has not been used ever since I made userspace stack be
a normal MemoryRegion.
This commit is contained in:
2026-04-21 19:58:09 +03:00
parent eea0154f18
commit b74812d669
10 changed files with 53 additions and 136 deletions

View File

@@ -9,12 +9,6 @@
namespace Kernel
{
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
class MemoryRegion
{
BAN_NON_COPYABLE(MemoryRegion);

View File

@@ -23,4 +23,10 @@ namespace Kernel
using vaddr_t = uintptr_t;
using paddr_t = uint64_t;
struct AddressRange
{
vaddr_t start;
vaddr_t end;
};
}

View File

@@ -14,42 +14,27 @@ namespace Kernel
BAN_NON_MOVABLE(VirtualRange);
public:
// Create virtual range to fixed virtual address
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
// Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages);
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, AddressRange address_range, size_t, PageTable::flags_t flags, bool add_guard_pages);
~VirtualRange();
vaddr_t vaddr() const { return m_vaddr + (m_has_guard_pages ? PAGE_SIZE : 0); }
size_t size() const { return m_size - (m_has_guard_pages ? 2 * PAGE_SIZE : 0); }
PageTable::flags_t flags() const { return m_flags; }
paddr_t paddr_of(vaddr_t vaddr) const
{
ASSERT(vaddr % PAGE_SIZE == 0);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
ASSERT(index < m_paddrs.size());
const paddr_t paddr = m_paddrs[index];
ASSERT(paddr);
return paddr;
}
paddr_t paddr_of(vaddr_t vaddr) const { return m_page_table.physical_address_of(vaddr & PAGE_ADDR_MASK); }
bool contains(vaddr_t address) const { return vaddr() <= address && address < vaddr() + size(); }
BAN::ErrorOr<bool> allocate_page_for_demand_paging(vaddr_t address);
private:
VirtualRange(PageTable&, bool preallocated, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
VirtualRange(PageTable&, bool has_guard_pages, vaddr_t, size_t, PageTable::flags_t);
BAN::ErrorOr<void> initialize();
private:
PageTable& m_page_table;
const bool m_preallocated;
const bool m_has_guard_pages;
const vaddr_t m_vaddr;
const size_t m_size;
const PageTable::flags_t m_flags;
BAN::Vector<paddr_t> m_paddrs;
SpinLock m_lock;
friend class BAN::UniqPtr<VirtualRange>;

View File

@@ -84,10 +84,10 @@ namespace Kernel
m_video_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET, UINTPTR_MAX,
{ KERNEL_OFFSET, UINTPTR_MAX },
BAN::Math::div_round_up<size_t>(m_width * m_height * (BANAN_FB_BPP / 8), PAGE_SIZE) * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
false
));
return {};

View File

@@ -1,67 +1,47 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
namespace Kernel
{
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
{
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
if (add_guard_pages)
{
vaddr -= PAGE_SIZE;
size += 2 * PAGE_SIZE;
}
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags));
ASSERT(page_table.reserve_range(vaddr, size));
TRY(result->initialize());
return result;
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, PageTable::flags_t flags, bool preallocate_pages, bool add_guard_pages)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, AddressRange address_range, size_t size, PageTable::flags_t flags, bool add_guard_pages)
{
if (add_guard_pages)
size += 2 * PAGE_SIZE;
ASSERT(size % PAGE_SIZE == 0);
ASSERT(vaddr_start > 0);
ASSERT(vaddr_start + size <= vaddr_end);
// Align vaddr range to page boundaries
if (size_t rem = vaddr_start % PAGE_SIZE)
vaddr_start += PAGE_SIZE - rem;
if (size_t rem = vaddr_end % PAGE_SIZE)
vaddr_end -= rem;
ASSERT(vaddr_start < vaddr_end);
ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE);
if (const size_t rem = address_range.start % PAGE_SIZE)
address_range.start += PAGE_SIZE - rem;
if (const size_t rem = address_range.end % PAGE_SIZE)
address_range.end -= rem;
const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
const vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, address_range.start, address_range.end);
if (vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
ASSERT(vaddr >= vaddr_start);
ASSERT(vaddr + size <= vaddr_end);
auto result_or_error = BAN::UniqPtr<VirtualRange>::create(page_table, preallocate_pages, add_guard_pages, vaddr, size, flags);
if (result_or_error.is_error())
{
BAN::ScopeGuard vaddr_cleaner([&page_table, vaddr, size] {
page_table.unmap_range(vaddr, size);
return result_or_error.release_error();
}
});
auto result = result_or_error.release_value();
auto result = TRY(BAN::UniqPtr<VirtualRange>::create(
page_table,
add_guard_pages,
vaddr,
size,
flags)
);
TRY(result->initialize());
vaddr_cleaner.disable();
return result;
}
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
VirtualRange::VirtualRange(PageTable& page_table, bool has_guard_pages, vaddr_t vaddr, size_t size, PageTable::flags_t flags)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_has_guard_pages(has_guard_pages)
, m_vaddr(vaddr)
, m_size(size)
@@ -71,70 +51,26 @@ namespace Kernel
VirtualRange::~VirtualRange()
{
ASSERT(m_vaddr);
m_page_table.unmap_range(m_vaddr, m_size);
for (paddr_t paddr : m_paddrs)
if (paddr != 0)
for (size_t off = 0; off < size(); off += PAGE_SIZE)
if (const auto paddr = m_page_table.physical_address_of(vaddr() + off))
Heap::get().release_page(paddr);
m_page_table.unmap_range(m_vaddr, m_size);
}
BAN::ErrorOr<void> VirtualRange::initialize()
{
TRY(m_paddrs.resize(size() / PAGE_SIZE, 0));
if (!m_preallocated)
return {};
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
m_paddrs[i] = Heap::get().take_free_page();
if (m_paddrs[i] == 0)
const auto paddr = Heap::get().take_free_page();
if (paddr == 0)
return BAN::Error::from_errno(ENOMEM);
m_page_table.map_page_at(m_paddrs[i], vaddr() + i * PAGE_SIZE, m_flags);
PageTable::with_fast_page(paddr, [] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
m_page_table.map_page_at(paddr, vaddr() + i * PAGE_SIZE, m_flags);
}
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
memset(reinterpret_cast<void*>(vaddr()), 0, size());
else
{
const size_t page_count = size() / PAGE_SIZE;
for (size_t i = 0; i < page_count; i++)
{
PageTable::with_fast_page(m_paddrs[i], [&] {
memset(PageTable::fast_page_as_ptr(), 0, PAGE_SIZE);
});
}
}
return {};
}
BAN::ErrorOr<bool> VirtualRange::allocate_page_for_demand_paging(vaddr_t vaddr)
{
ASSERT(contains(vaddr));
vaddr &= PAGE_ADDR_MASK;
if (m_preallocated)
return false;
SpinLockGuard _(m_lock);
const size_t index = (vaddr - this->vaddr()) / PAGE_SIZE;
if (m_paddrs[index])
return false;
m_paddrs[index] = Heap::get().take_free_page();
if (m_paddrs[index] == 0)
return BAN::Error::from_errno(ENOMEM);
PageTable::with_fast_page(m_paddrs[index], []{
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
m_page_table.map_page_at(m_paddrs[index], vaddr, m_flags);
return true;
}
}

View File

@@ -14,11 +14,10 @@ namespace Kernel
loopback->m_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
BAN::numeric_limits<vaddr_t>::max(),
{ KERNEL_OFFSET, UINTPTR_MAX },
buffer_size * buffer_count,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
false
));
auto* thread = TRY(Thread::create_kernel([](void* loopback_ptr) {

View File

@@ -14,11 +14,10 @@ namespace Kernel
auto socket = TRY(BAN::RefPtr<UDPSocket>::create(network_layer, info));
socket->m_packet_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
~(uintptr_t)0,
{ KERNEL_OFFSET, UINTPTR_MAX },
packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
false
));
return socket;
}

View File

@@ -50,11 +50,10 @@ namespace Kernel
auto socket = TRY(BAN::RefPtr<UnixDomainSocket>::create(socket_type, info));
socket->m_packet_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
~(uintptr_t)0,
{ KERNEL_OFFSET, UINTPTR_MAX },
s_packet_buffer_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
false
));
return socket;
}

View File

@@ -42,10 +42,10 @@ namespace Kernel
auto pts_master_buffer = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET, static_cast<vaddr_t>(-1),
{ KERNEL_OFFSET, UINTPTR_MAX },
16 * PAGE_SIZE,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, false
false
));
auto pts_master = TRY(BAN::RefPtr<PseudoTerminalMaster>::create(BAN::move(pts_master_buffer), mode, uid, gid));
DevFileSystem::get().remove_from_cache(pts_master);

View File

@@ -171,11 +171,10 @@ namespace Kernel
// Initialize stack and registers
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
~(uintptr_t)0,
{ KERNEL_OFFSET, UINTPTR_MAX },
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true
true
));
// Initialize stack for returning
@@ -208,10 +207,10 @@ namespace Kernel
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
page_table,
s_user_stack_addr_start, USERSPACE_END,
{ s_user_stack_addr_start, USERSPACE_END },
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true
true
));
auto userspace_stack = TRY(MemoryBackedRegion::create(
@@ -356,10 +355,10 @@ namespace Kernel
thread->m_kernel_stack = TRY(VirtualRange::create_to_vaddr_range(
new_process->page_table(),
s_user_stack_addr_start, USERSPACE_END,
{ s_user_stack_addr_start, USERSPACE_END },
kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true, true
true
));
// NOTE: copy [sp, stack_end] so fork return works