Kernel: Rename MMU::{un,}allocate... to MMU::{un,}map

This is more appropriate name for the behaviour :D
This commit is contained in:
Bananymous 2023-04-19 21:50:30 +03:00
parent a159c980ee
commit d38470c8e2
9 changed files with 33 additions and 33 deletions

View File

@ -69,7 +69,7 @@ MMU::MMU()
asm volatile("movl %0, %%cr3" :: "r"(m_highest_paging_struct)); asm volatile("movl %0, %%cr3" :: "r"(m_highest_paging_struct));
} }
void MMU::allocate_page(uintptr_t address, uint8_t flags) void MMU::map_page(uintptr_t address, uint8_t flags)
{ {
#if MMU_DEBUG_PRINT #if MMU_DEBUG_PRINT
dprintln("AllocatePage(0x{8H})", address); dprintln("AllocatePage(0x{8H})", address);
@ -96,15 +96,15 @@ void MMU::allocate_page(uintptr_t address, uint8_t flags)
asm volatile("invlpg (%0)" :: "r"(address) : "memory"); asm volatile("invlpg (%0)" :: "r"(address) : "memory");
} }
void MMU::allocate_range(uintptr_t address, ptrdiff_t size, uint8_t flags) void MMU::map_range(uintptr_t address, ptrdiff_t size, uint8_t flags)
{ {
uintptr_t s_page = address & PAGE_MASK; uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK; uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE) for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
allocate_page(page, flags); map_page(page, flags);
} }
void MMU::unallocate_page(uintptr_t address) void MMU::unmap_page(uintptr_t address)
{ {
#if MMU_DEBUG_PRINT #if MMU_DEBUG_PRINT
dprintln("UnAllocatePage(0x{8H})", address & PAGE_MASK); dprintln("UnAllocatePage(0x{8H})", address & PAGE_MASK);
@ -129,10 +129,10 @@ void MMU::unallocate_page(uintptr_t address)
asm volatile("invlpg (%0)" :: "r"(address & PAGE_MASK) : "memory"); asm volatile("invlpg (%0)" :: "r"(address & PAGE_MASK) : "memory");
} }
void MMU::unallocate_range(uintptr_t address, ptrdiff_t size) void MMU::unmap_range(uintptr_t address, ptrdiff_t size)
{ {
uintptr_t s_page = address & PAGE_MASK; uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK; uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE) for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
unallocate_page(page); unmap_page(page);
} }

View File

@ -88,7 +88,7 @@ MMU::~MMU()
kfree(pml4); kfree(pml4);
} }
void MMU::allocate_page(uintptr_t address, uint8_t flags) void MMU::map_page(uintptr_t address, uint8_t flags)
{ {
ASSERT((address >> 48) == 0); ASSERT((address >> 48) == 0);
@ -140,15 +140,15 @@ void MMU::allocate_page(uintptr_t address, uint8_t flags)
asm volatile("invlpg (%0)" :: "r"(address) : "memory"); asm volatile("invlpg (%0)" :: "r"(address) : "memory");
} }
void MMU::allocate_range(uintptr_t address, ptrdiff_t size, uint8_t flags) void MMU::map_range(uintptr_t address, ptrdiff_t size, uint8_t flags)
{ {
uintptr_t s_page = address & PAGE_MASK; uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK; uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE) for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
allocate_page(page, flags); map_page(page, flags);
} }
void MMU::unallocate_page(uintptr_t address) void MMU::unmap_page(uintptr_t address)
{ {
ASSERT((address >> 48) == 0); ASSERT((address >> 48) == 0);
@ -188,10 +188,10 @@ cleanup_done:
asm volatile("invlpg (%0)" :: "r"(address) : "memory"); asm volatile("invlpg (%0)" :: "r"(address) : "memory");
} }
void MMU::unallocate_range(uintptr_t address, ptrdiff_t size) void MMU::unmap_range(uintptr_t address, ptrdiff_t size)
{ {
uintptr_t s_page = address & PAGE_MASK; uintptr_t s_page = address & PAGE_MASK;
uintptr_t e_page = (address + size - 1) & PAGE_MASK; uintptr_t e_page = (address + size - 1) & PAGE_MASK;
for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE) for (uintptr_t page = s_page; page <= e_page; page += PAGE_SIZE)
unallocate_page(page); unmap_page(page);
} }

View File

@ -20,11 +20,11 @@ public:
MMU(); MMU();
~MMU(); ~MMU();
void allocate_page(uintptr_t, uint8_t); void map_page(uintptr_t, uint8_t);
void allocate_range(uintptr_t, ptrdiff_t, uint8_t); void map_range(uintptr_t, ptrdiff_t, uint8_t);
void unallocate_page(uintptr_t); void unmap_page(uintptr_t);
void unallocate_range(uintptr_t, ptrdiff_t); void unmap_range(uintptr_t, ptrdiff_t);
private: private:
uint64_t* m_highest_paging_struct; uint64_t* m_highest_paging_struct;

View File

@ -105,8 +105,8 @@ namespace Kernel
if (rsdp->revision >= 2) if (rsdp->revision >= 2)
{ {
const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address; const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address;
MMU::get().allocate_page((uintptr_t)xsdt, MMU::Flags::Present); MMU::get().map_page((uintptr_t)xsdt, MMU::Flags::Present);
BAN::ScopeGuard _([xsdt] { MMU::get().unallocate_page((uintptr_t)xsdt); }); BAN::ScopeGuard _([xsdt] { MMU::get().unmap_page((uintptr_t)xsdt); });
if (memcmp(xsdt->signature, "XSDT", 4) != 0) if (memcmp(xsdt->signature, "XSDT", 4) != 0)
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
@ -120,8 +120,8 @@ namespace Kernel
else else
{ {
const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address; const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address;
MMU::get().allocate_page((uintptr_t)rsdt, MMU::Flags::Present); MMU::get().map_page((uintptr_t)rsdt, MMU::Flags::Present);
BAN::ScopeGuard _([rsdt] { MMU::get().unallocate_page((uintptr_t)rsdt); }); BAN::ScopeGuard _([rsdt] { MMU::get().unmap_page((uintptr_t)rsdt); });
if (memcmp(rsdt->signature, "RSDT", 4) != 0) if (memcmp(rsdt->signature, "RSDT", 4) != 0)
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
@ -133,13 +133,13 @@ namespace Kernel
m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4; m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
} }
MMU::get().allocate_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present); MMU::get().map_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present);
for (uint32_t i = 0; i < m_entry_count; i++) for (uint32_t i = 0; i < m_entry_count; i++)
{ {
auto* header = get_header_from_index(i); auto* header = get_header_from_index(i);
MMU::get().allocate_page((uintptr_t)header, MMU::Flags::Present); MMU::get().map_page((uintptr_t)header, MMU::Flags::Present);
MMU::get().allocate_range((uintptr_t)header, header->length, MMU::Flags::Present); MMU::get().map_range((uintptr_t)header, header->length, MMU::Flags::Present);
} }
return {}; return {};

View File

@ -144,10 +144,10 @@ APIC* APIC::create()
return nullptr; return nullptr;
} }
MMU::get().allocate_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present); MMU::get().map_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present);
for (auto& io_apic : apic->m_io_apics) for (auto& io_apic : apic->m_io_apics)
{ {
MMU::get().allocate_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present); MMU::get().map_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present);
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS); io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
} }

View File

@ -36,7 +36,7 @@ namespace Kernel::Memory
m_list_pages = BAN::Math::div_round_up<uint64_t>(m_total_pages * sizeof(node), PAGE_SIZE); m_list_pages = BAN::Math::div_round_up<uint64_t>(m_total_pages * sizeof(node), PAGE_SIZE);
m_reservable_pages = m_total_pages - m_list_pages; m_reservable_pages = m_total_pages - m_list_pages;
MMU::get().allocate_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::ReadWrite | MMU::Flags::Present); MMU::get().map_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::ReadWrite | MMU::Flags::Present);
// Initialize page list so that every page points to the next one // Initialize page list so that every page points to the next one
node* page_list = (node*)m_start; node* page_list = (node*)m_start;
@ -164,7 +164,7 @@ namespace Kernel::Memory
{ {
if (paddr_t page = range.reserve_page(); page != PhysicalRange::invalid) if (paddr_t page = range.reserve_page(); page != PhysicalRange::invalid)
{ {
MMU::get().allocate_page(page, flags); MMU::get().map_page(page, flags);
return page; return page;
} }
} }
@ -177,7 +177,7 @@ namespace Kernel::Memory
{ {
if (range.contains(addr)) if (range.contains(addr))
{ {
MMU::get().unallocate_page(addr); MMU::get().unmap_page(addr);
return; return;
} }
} }

View File

@ -44,7 +44,7 @@ namespace Kernel
[](void* entry_func) [](void* entry_func)
{ {
Thread& current = Thread::current(); Thread& current = Thread::current();
MMU::get().allocate_range(current.stack_base(), current.stack_size(), MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present); MMU::get().map_range(current.stack_base(), current.stack_size(), MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
current.jump_userspace((uintptr_t)entry_func); current.jump_userspace((uintptr_t)entry_func);
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
}, (void*)entry }, (void*)entry

View File

@ -34,7 +34,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
return nullptr; return nullptr;
} }
MMU::get().allocate_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present); MMU::get().map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
auto* driver = new VesaTerminalDriver( auto* driver = new VesaTerminalDriver(
framebuffer.width, framebuffer.width,
@ -51,7 +51,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
VesaTerminalDriver::~VesaTerminalDriver() VesaTerminalDriver::~VesaTerminalDriver()
{ {
MMU::get().unallocate_range(m_address, m_pitch * m_height); MMU::get().unmap_range(m_address, m_pitch * m_height);
} }
void VesaTerminalDriver::set_pixel(uint32_t offset, Color color) void VesaTerminalDriver::set_pixel(uint32_t offset, Color color)

View File

@ -212,8 +212,8 @@ static void jump_userspace()
{ {
using namespace Kernel; using namespace Kernel;
MMU::get().allocate_range((uintptr_t)&g_userspace_start, (uintptr_t)&g_userspace_end - (uintptr_t)&g_userspace_start, MMU::Flags::UserSupervisor | MMU::Flags::Present); MMU::get().map_range((uintptr_t)&g_userspace_start, (uintptr_t)&g_userspace_end - (uintptr_t)&g_userspace_start, MMU::Flags::UserSupervisor | MMU::Flags::Present);
MMU::get().allocate_range((uintptr_t)&g_rodata_start, (uintptr_t)&g_rodata_end - (uintptr_t)&g_rodata_start, MMU::Flags::UserSupervisor | MMU::Flags::Present); MMU::get().map_range((uintptr_t)&g_rodata_start, (uintptr_t)&g_rodata_end - (uintptr_t)&g_rodata_start, MMU::Flags::UserSupervisor | MMU::Flags::Present);
MUST(Process::create_userspace(userspace_entry)); MUST(Process::create_userspace(userspace_entry));
} }