Kernel: PhysicalRange maps its nodes to kernel vaddr space

This keeps the lower half of address space cleaner
This commit is contained in:
Bananymous 2023-06-04 01:20:47 +03:00
parent 35e739dcdd
commit 37b93da650
2 changed files with 23 additions and 19 deletions

View File

@ -15,9 +15,9 @@ namespace Kernel
paddr_t reserve_page(); paddr_t reserve_page();
void release_page(paddr_t); void release_page(paddr_t);
paddr_t start() const { return m_start; } paddr_t start() const { return m_paddr; }
paddr_t end() const { return m_start + m_size; } paddr_t end() const { return m_paddr + m_size; }
bool contains(paddr_t addr) const { return m_start <= addr && addr < m_start + m_size; } bool contains(paddr_t addr) const { return m_paddr <= addr && addr < m_paddr + m_size; }
size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; } size_t usable_memory() const { return m_reservable_pages * PAGE_SIZE; }
@ -35,7 +35,8 @@ namespace Kernel
node* node_address(paddr_t) const; node* node_address(paddr_t) const;
private: private:
paddr_t m_start { 0 }; paddr_t m_paddr { 0 };
vaddr_t m_vaddr { 0 };
size_t m_size { 0 }; size_t m_size { 0 };
uint64_t m_total_pages { 0 }; uint64_t m_total_pages { 0 };

View File

@ -15,15 +15,15 @@ namespace Kernel
return; return;
// Align start to page boundary and after the kernel memory // Align start to page boundary and after the kernel memory
m_start = BAN::Math::max(start, V2P(g_kernel_end)); m_paddr = BAN::Math::max(start, V2P(g_kernel_end));
if (auto rem = m_start % PAGE_SIZE) if (auto rem = m_paddr % PAGE_SIZE)
m_start += PAGE_SIZE - rem; m_paddr += PAGE_SIZE - rem;
if (size <= m_start - start) if (size <= m_paddr - start)
return; return;
// Align size to page boundary // Align size to page boundary
m_size = size - (m_start - start); m_size = size - (m_paddr - start);
if (auto rem = m_size % PAGE_SIZE) if (auto rem = m_size % PAGE_SIZE)
m_size -= rem; m_size -= rem;
@ -40,12 +40,15 @@ namespace Kernel
m_used_pages = 0; m_used_pages = 0;
m_free_pages = m_reservable_pages; m_free_pages = m_reservable_pages;
PageTable::kernel().identity_map_range(m_start, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present); PageTable::kernel().lock();
m_vaddr = PageTable::kernel().get_free_contiguous_pages(m_list_pages, ((vaddr_t)g_kernel_end + PAGE_SIZE - 1) & PAGE_ADDR_MASK);
ASSERT(m_vaddr);
PageTable::kernel().map_range_at(m_paddr, m_vaddr, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
PageTable::kernel().unlock();
// Initialize page list so that every page points to the next one // Initialize page list so that every page points to the next one
node* page_list = (node*)m_start; node* page_list = (node*)m_vaddr;
ASSERT((paddr_t)&page_list[m_reservable_pages - 1] <= m_start + m_size);
for (uint64_t i = 0; i < m_reservable_pages; i++) for (uint64_t i = 0; i < m_reservable_pages; i++)
page_list[i] = { page_list + i - 1, page_list + i + 1 }; page_list[i] = { page_list + i - 1, page_list + i + 1 };
@ -108,17 +111,17 @@ namespace Kernel
paddr_t PhysicalRange::page_address(const node* page) const paddr_t PhysicalRange::page_address(const node* page) const
{ {
ASSERT((paddr_t)page <= m_start + m_reservable_pages * sizeof(node)); ASSERT((vaddr_t)page <= m_vaddr + m_reservable_pages * sizeof(node));
uint64_t page_index = page - (node*)m_start; uint64_t page_index = page - (node*)m_vaddr;
return m_start + (page_index + m_list_pages) * PAGE_SIZE; return m_paddr + (page_index + m_list_pages) * PAGE_SIZE;
} }
PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const PhysicalRange::node* PhysicalRange::node_address(paddr_t page_address) const
{ {
ASSERT(page_address % PAGE_SIZE == 0); ASSERT(page_address % PAGE_SIZE == 0);
ASSERT(m_start + m_list_pages * PAGE_SIZE <= page_address && page_address < m_start + m_size); ASSERT(m_paddr + m_list_pages * PAGE_SIZE <= page_address && page_address < m_paddr + m_size);
uint64_t page_offset = page_address - (m_start + m_list_pages * PAGE_SIZE); uint64_t page_offset = page_address - (m_paddr + m_list_pages * PAGE_SIZE);
return (node*)m_start + page_offset / PAGE_SIZE; return (node*)m_vaddr + page_offset / PAGE_SIZE;
} }
} }