forked from Bananymous/banan-os
Kernel: Change PageTable API
Getting free pages not reserves them, so you don't have to hold the page table lock :)
This commit is contained in:
parent
926df2b276
commit
91f04ce250
|
@ -193,70 +193,69 @@ namespace Kernel
|
|||
|
||||
void PageTable::invalidate(vaddr_t vaddr)
|
||||
{
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
if (this == s_current)
|
||||
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
|
||||
}
|
||||
|
||||
void PageTable::unmap_page(vaddr_t vaddr)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
vaddr &= PAGE_ADDR_MASK;
|
||||
|
||||
if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel))
|
||||
Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel);
|
||||
|
||||
ASSERT(is_canonical(vaddr));
|
||||
vaddr_t uc_vaddr = uncanonicalize(vaddr);
|
||||
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
|
||||
uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
|
||||
if (is_page_free(vaddr))
|
||||
{
|
||||
dwarnln("unmapping unmapped page {8H}", vaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(is_canonical(vaddr));
|
||||
vaddr = uncanonicalize(vaddr);
|
||||
|
||||
uint64_t pml4e = (vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
|
||||
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
|
||||
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
|
||||
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||
|
||||
pt[pte] = 0;
|
||||
invalidate(canonicalize(vaddr));
|
||||
invalidate(vaddr);
|
||||
}
|
||||
|
||||
void PageTable::unmap_range(vaddr_t vaddr, size_t size)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
vaddr_t s_page = vaddr / PAGE_SIZE;
|
||||
vaddr_t e_page = (vaddr + size - 1) / PAGE_SIZE;
|
||||
for (vaddr_t page = s_page; page <= e_page; page++)
|
||||
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
for (vaddr_t page = s_page; page < e_page; page++)
|
||||
unmap_page(page * PAGE_SIZE);
|
||||
}
|
||||
|
||||
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel))
|
||||
Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel);
|
||||
|
||||
ASSERT(is_canonical(vaddr));
|
||||
vaddr = uncanonicalize(vaddr);
|
||||
vaddr_t uc_vaddr = uncanonicalize(vaddr);
|
||||
|
||||
ASSERT(paddr % PAGE_SIZE == 0);
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
ASSERT(flags & Flags::Used);
|
||||
|
||||
uint64_t pml4e = (vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||
uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
|
||||
|
||||
uint64_t extra_flags = 0;
|
||||
if (s_has_nxe && !(flags & Flags::Execute))
|
||||
|
@ -265,7 +264,11 @@ namespace Kernel
|
|||
extra_flags |= 1ull << 8;
|
||||
if (flags & Flags::Reserved)
|
||||
extra_flags |= Flags::Reserved;
|
||||
flags_t uwr_flags = flags & 0b111;
|
||||
|
||||
// NOTE: we add present here, since it has to be available in higher level structures
|
||||
flags_t uwr_flags = (flags & (Flags::UserSupervisor | Flags::ReadWrite)) | Flags::Present;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
|
||||
if ((pml4[pml4e] & uwr_flags) != uwr_flags)
|
||||
|
@ -291,16 +294,17 @@ namespace Kernel
|
|||
pd[pde] |= uwr_flags;
|
||||
}
|
||||
|
||||
if (!(flags & Flags::Present))
|
||||
uwr_flags &= ~Flags::Present;
|
||||
|
||||
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||
pt[pte] = paddr | uwr_flags | extra_flags;
|
||||
|
||||
invalidate(canonicalize(vaddr));
|
||||
invalidate(vaddr);
|
||||
}
|
||||
|
||||
void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
ASSERT(is_canonical(vaddr));
|
||||
|
||||
ASSERT(paddr % PAGE_SIZE == 0);
|
||||
|
@ -309,24 +313,26 @@ namespace Kernel
|
|||
size_t first_page = vaddr / PAGE_SIZE;
|
||||
size_t last_page = (vaddr + size - 1) / PAGE_SIZE;
|
||||
size_t page_count = last_page - first_page + 1;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
for (size_t page = 0; page < page_count; page++)
|
||||
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags);
|
||||
}
|
||||
|
||||
uint64_t PageTable::get_page_data(vaddr_t vaddr) const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
|
||||
ASSERT(is_canonical(vaddr));
|
||||
vaddr = uncanonicalize(vaddr);
|
||||
vaddr_t uc_vaddr = uncanonicalize(vaddr);
|
||||
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
|
||||
uint64_t pml4e = (vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||
uint64_t pml4e = (uc_vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (uc_vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
|
||||
if (!(pml4[pml4e] & Flags::Present))
|
||||
return 0;
|
||||
|
@ -340,7 +346,7 @@ namespace Kernel
|
|||
return 0;
|
||||
|
||||
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||
if (!(pt[pte] & Flags::Present))
|
||||
if (!(pt[pte] & Flags::Used))
|
||||
return 0;
|
||||
|
||||
return pt[pte];
|
||||
|
@ -357,49 +363,90 @@ namespace Kernel
|
|||
return (page_data & PAGE_ADDR_MASK) & ~(1ull << 63);
|
||||
}
|
||||
|
||||
vaddr_t PageTable::get_free_page(vaddr_t first_address)
|
||||
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free)
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
if (only_free && !is_page_free(vaddr))
|
||||
return false;
|
||||
map_page_at(0, vaddr, Flags::Reserved);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageTable::reserve_range(vaddr_t vaddr, size_t bytes, bool only_free)
|
||||
{
|
||||
if (size_t rem = bytes % PAGE_SIZE)
|
||||
bytes += PAGE_SIZE - rem;
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
|
||||
LockGuard _(m_lock);
|
||||
if (only_free && !is_range_free(vaddr, bytes))
|
||||
return false;
|
||||
for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE)
|
||||
reserve_page(vaddr + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
vaddr_t PageTable::reserve_free_page(vaddr_t first_address, vaddr_t last_address)
|
||||
{
|
||||
if (size_t rem = first_address % PAGE_SIZE)
|
||||
first_address += PAGE_SIZE - rem;
|
||||
if (size_t rem = last_address % PAGE_SIZE)
|
||||
last_address -= rem;
|
||||
|
||||
ASSERT(is_canonical(first_address));
|
||||
vaddr_t vaddr = uncanonicalize(first_address);
|
||||
ASSERT(is_canonical(last_address));
|
||||
const vaddr_t uc_vaddr_start = uncanonicalize(first_address);
|
||||
const vaddr_t uc_vaddr_end = uncanonicalize(last_address);
|
||||
|
||||
uint64_t pml4e = (vaddr >> 39) & 0x1FF;
|
||||
uint64_t pdpte = (vaddr >> 30) & 0x1FF;
|
||||
uint64_t pde = (vaddr >> 21) & 0x1FF;
|
||||
uint64_t pte = (vaddr >> 12) & 0x1FF;
|
||||
uint16_t pml4e = (uc_vaddr_start >> 39) & 0x1FF;
|
||||
uint16_t pdpte = (uc_vaddr_start >> 30) & 0x1FF;
|
||||
uint16_t pde = (uc_vaddr_start >> 21) & 0x1FF;
|
||||
uint16_t pte = (uc_vaddr_start >> 12) & 0x1FF;
|
||||
|
||||
const uint16_t e_pml4e = (uc_vaddr_end >> 39) & 0x1FF;
|
||||
const uint16_t e_pdpte = (uc_vaddr_end >> 30) & 0x1FF;
|
||||
const uint16_t e_pde = (uc_vaddr_end >> 21) & 0x1FF;
|
||||
const uint16_t e_pte = (uc_vaddr_end >> 12) & 0x1FF;
|
||||
|
||||
LockGuard _(m_lock);
|
||||
|
||||
// Try to find free page that can be mapped without
|
||||
// allocations (page table with unused entries)
|
||||
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
|
||||
for (; pml4e < 512; pml4e++)
|
||||
{
|
||||
if (pml4e > e_pml4e)
|
||||
break;
|
||||
if (!(pml4[pml4e] & Flags::Present))
|
||||
continue;
|
||||
uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK);
|
||||
for (; pdpte < 512; pdpte++)
|
||||
{
|
||||
if (pml4e == e_pml4e && pdpte > e_pdpte)
|
||||
break;
|
||||
if (!(pdpt[pdpte] & Flags::Present))
|
||||
continue;
|
||||
uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK);
|
||||
for (; pde < 512; pde++)
|
||||
{
|
||||
if (pml4e == e_pml4e && pdpte == e_pdpte && pde > e_pde)
|
||||
break;
|
||||
if (!(pd[pde] & Flags::Present))
|
||||
continue;
|
||||
uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK);
|
||||
for (; pte < 512; pte++)
|
||||
{
|
||||
if (pml4e == e_pml4e && pdpte == e_pdpte && pde == e_pde && pte >= e_pte)
|
||||
break;
|
||||
if (!(pt[pte] & Flags::Used))
|
||||
{
|
||||
vaddr_t vaddr = 0;
|
||||
vaddr |= pml4e << 39;
|
||||
vaddr |= pdpte << 30;
|
||||
vaddr |= pde << 21;
|
||||
vaddr |= pte << 12;
|
||||
pt[pte] |= Flags::Reserved;
|
||||
vaddr |= (uint64_t)pml4e << 39;
|
||||
vaddr |= (uint64_t)pdpte << 30;
|
||||
vaddr |= (uint64_t)pde << 21;
|
||||
vaddr |= (uint64_t)pte << 12;
|
||||
ASSERT(reserve_page(vaddr));
|
||||
return canonicalize(vaddr);
|
||||
}
|
||||
}
|
||||
|
@ -407,46 +454,54 @@ namespace Kernel
|
|||
}
|
||||
}
|
||||
|
||||
// Find any free page page
|
||||
vaddr = first_address;
|
||||
while (is_canonical(vaddr))
|
||||
// Find any free page
|
||||
vaddr_t uc_vaddr = uc_vaddr_start;
|
||||
while (uc_vaddr < uc_vaddr_end)
|
||||
{
|
||||
if (is_page_free(vaddr))
|
||||
if (vaddr_t vaddr = canonicalize(uc_vaddr); is_page_free(vaddr))
|
||||
{
|
||||
map_page_at(0, vaddr, Flags::Reserved);
|
||||
ASSERT(reserve_page(vaddr));
|
||||
return vaddr;
|
||||
}
|
||||
if (vaddr > vaddr + PAGE_SIZE)
|
||||
break;
|
||||
vaddr += PAGE_SIZE;
|
||||
uc_vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
vaddr_t PageTable::get_free_contiguous_pages(size_t page_count, vaddr_t first_address)
|
||||
vaddr_t PageTable::reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address)
|
||||
{
|
||||
if (first_address % PAGE_SIZE)
|
||||
first_address = (first_address + PAGE_SIZE - 1) & PAGE_ADDR_MASK;
|
||||
if (size_t rem = first_address % PAGE_SIZE)
|
||||
first_address += PAGE_SIZE - rem;
|
||||
if (size_t rem = last_address % PAGE_SIZE)
|
||||
last_address -= rem;
|
||||
|
||||
ASSERT(is_canonical(first_address));
|
||||
ASSERT(is_canonical(last_address));
|
||||
|
||||
LockGuard _(m_lock);
|
||||
|
||||
for (vaddr_t vaddr = first_address; is_canonical(vaddr); vaddr += PAGE_SIZE)
|
||||
for (vaddr_t vaddr = first_address; vaddr < last_address;)
|
||||
{
|
||||
bool valid { true };
|
||||
for (size_t page = 0; page < page_count; page++)
|
||||
{
|
||||
if (get_page_flags(vaddr + page * PAGE_SIZE) & Flags::Used)
|
||||
if (!is_canonical(vaddr + page * PAGE_SIZE))
|
||||
{
|
||||
vaddr += page * PAGE_SIZE;
|
||||
vaddr = canonicalize(uncanonicalize(vaddr) + page * PAGE_SIZE);
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
if (!is_page_free(vaddr + page * PAGE_SIZE))
|
||||
{
|
||||
vaddr += (page + 1) * PAGE_SIZE;
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (valid)
|
||||
{
|
||||
for (size_t page = 0; page < page_count; page++)
|
||||
map_page_at(0, vaddr + page * PAGE_SIZE, Flags::Reserved);
|
||||
ASSERT(reserve_range(vaddr, page_count * PAGE_SIZE));
|
||||
return vaddr;
|
||||
}
|
||||
}
|
||||
|
@ -460,13 +515,13 @@ namespace Kernel
|
|||
return !(get_page_flags(page) & Flags::Used);
|
||||
}
|
||||
|
||||
bool PageTable::is_range_free(vaddr_t start, size_t size) const
|
||||
bool PageTable::is_range_free(vaddr_t vaddr, size_t size) const
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
vaddr_t s_page = vaddr / PAGE_SIZE;
|
||||
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
|
||||
|
||||
vaddr_t first_page = start / PAGE_SIZE;
|
||||
vaddr_t last_page = (start + size - 1) / PAGE_SIZE;
|
||||
for (vaddr_t page = first_page; page <= last_page; page++)
|
||||
LockGuard _(m_lock);
|
||||
for (vaddr_t page = s_page; page < e_page; page++)
|
||||
if (!is_page_free(page * PAGE_SIZE))
|
||||
return false;
|
||||
return true;
|
||||
|
|
|
@ -43,8 +43,11 @@ namespace Kernel
|
|||
bool is_page_free(vaddr_t) const;
|
||||
bool is_range_free(vaddr_t, size_t bytes) const;
|
||||
|
||||
vaddr_t get_free_page(vaddr_t first_address = PAGE_SIZE);
|
||||
vaddr_t get_free_contiguous_pages(size_t page_count, vaddr_t first_address = PAGE_SIZE);
|
||||
bool reserve_page(vaddr_t, bool only_free = true);
|
||||
bool reserve_range(vaddr_t, size_t bytes, bool only_free = true);
|
||||
|
||||
vaddr_t reserve_free_page(vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);
|
||||
vaddr_t reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);
|
||||
|
||||
void load();
|
||||
|
||||
|
|
|
@ -14,7 +14,11 @@ namespace Kernel
|
|||
BAN_NON_MOVABLE(VirtualRange);
|
||||
|
||||
public:
|
||||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create(PageTable&, vaddr_t, size_t, uint8_t flags);
|
||||
// Create virtual range to fixed virtual address
|
||||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, uint8_t flags);
|
||||
// Create virtual range to virtual address range
|
||||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, uint8_t flags);
|
||||
// Create virtual range in kernel memory with kmalloc
|
||||
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_kmalloc(size_t);
|
||||
~VirtualRange();
|
||||
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
#define RSPD_SIZE 20
|
||||
#define RSPDv2_SIZE 36
|
||||
|
||||
extern uint8_t g_kernel_end[];
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
|
@ -136,7 +134,7 @@ namespace Kernel
|
|||
}
|
||||
|
||||
size_t needed_pages = range_page_count(m_header_table_paddr, m_entry_count * m_entry_size);
|
||||
m_header_table_vaddr = PageTable::kernel().get_free_contiguous_pages(needed_pages, (vaddr_t)g_kernel_end);
|
||||
m_header_table_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
|
||||
ASSERT(m_header_table_vaddr);
|
||||
|
||||
m_header_table_vaddr += m_header_table_paddr % PAGE_SIZE;
|
||||
|
@ -159,7 +157,7 @@ namespace Kernel
|
|||
PageTable::kernel().unmap_page(0);
|
||||
|
||||
size_t needed_pages = range_page_count(header_paddr, header_length);
|
||||
vaddr_t page_vaddr = PageTable::kernel().get_free_contiguous_pages(needed_pages, (vaddr_t)g_kernel_end);
|
||||
vaddr_t page_vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
|
||||
ASSERT(page_vaddr);
|
||||
|
||||
PageTable::kernel().map_range_at(
|
||||
|
|
|
@ -81,8 +81,6 @@ union RedirectionEntry
|
|||
};
|
||||
};
|
||||
|
||||
extern uint8_t g_kernel_end[];
|
||||
|
||||
using namespace Kernel;
|
||||
|
||||
APIC* APIC::create()
|
||||
|
@ -150,7 +148,7 @@ APIC* APIC::create()
|
|||
|
||||
// Map the local apic to kernel memory
|
||||
{
|
||||
vaddr_t vaddr = PageTable::kernel().get_free_page((vaddr_t)g_kernel_end);
|
||||
vaddr_t vaddr = PageTable::kernel().reserve_free_page(KERNEL_OFFSET);
|
||||
ASSERT(vaddr);
|
||||
dprintln("lapic paddr {8H}", apic->m_local_apic_paddr);
|
||||
apic->m_local_apic_vaddr = vaddr + (apic->m_local_apic_paddr % PAGE_SIZE);
|
||||
|
@ -165,7 +163,7 @@ APIC* APIC::create()
|
|||
// Map io apics to kernel memory
|
||||
for (auto& io_apic : apic->m_io_apics)
|
||||
{
|
||||
vaddr_t vaddr = PageTable::kernel().get_free_page((vaddr_t)g_kernel_end);
|
||||
vaddr_t vaddr = PageTable::kernel().reserve_free_page(KERNEL_OFFSET);
|
||||
ASSERT(vaddr);
|
||||
|
||||
io_apic.vaddr = vaddr + (io_apic.paddr % PAGE_SIZE);
|
||||
|
|
|
@ -212,7 +212,9 @@ namespace Kernel
|
|||
paddr_t page_paddr = Heap::get().take_free_page();
|
||||
ASSERT(page_paddr);
|
||||
|
||||
page_vaddr = m_page_table.get_free_page();
|
||||
page_vaddr = m_page_table.reserve_free_page(0x300000);
|
||||
ASSERT(page_vaddr);
|
||||
|
||||
m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ namespace Kernel
|
|||
|
||||
m_page_table.lock();
|
||||
|
||||
allocation.address = m_page_table.get_free_contiguous_pages(needed_pages, m_first_vaddr);
|
||||
allocation.address = m_page_table.reserve_free_contiguous_pages(needed_pages, m_first_vaddr);
|
||||
ASSERT(allocation.address);
|
||||
|
||||
for (size_t i = 0; i < needed_pages; i++)
|
||||
|
|
|
@ -40,12 +40,10 @@ namespace Kernel
|
|||
m_used_pages = 0;
|
||||
m_free_pages = m_reservable_pages;
|
||||
|
||||
PageTable::kernel().lock();
|
||||
m_vaddr = PageTable::kernel().get_free_contiguous_pages(m_list_pages, ((vaddr_t)g_kernel_end + PAGE_SIZE - 1) & PAGE_ADDR_MASK);
|
||||
m_vaddr = PageTable::kernel().reserve_free_contiguous_pages(m_list_pages, KERNEL_OFFSET);
|
||||
ASSERT(m_vaddr);
|
||||
|
||||
PageTable::kernel().map_range_at(m_paddr, m_vaddr, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
PageTable::kernel().unlock();
|
||||
|
||||
|
||||
// Initialize page list so that every page points to the next one
|
||||
node* page_list = (node*)m_vaddr;
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
#include <BAN/ScopeGuard.h>
|
||||
#include <kernel/LockGuard.h>
|
||||
#include <kernel/Memory/Heap.h>
|
||||
#include <kernel/Memory/VirtualRange.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create(PageTable& page_table, vaddr_t vaddr, size_t size, uint8_t flags)
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr(PageTable& page_table, vaddr_t vaddr, size_t size, uint8_t flags)
|
||||
{
|
||||
ASSERT(size % PAGE_SIZE == 0);
|
||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||
ASSERT(vaddr > 0);
|
||||
|
||||
VirtualRange* result_ptr = new VirtualRange(page_table);
|
||||
if (result_ptr == nullptr)
|
||||
|
@ -16,25 +18,13 @@ namespace Kernel
|
|||
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
|
||||
|
||||
result->m_kmalloc = false;
|
||||
result->m_vaddr = vaddr;
|
||||
result->m_size = size;
|
||||
result->m_flags = flags;
|
||||
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
|
||||
|
||||
page_table.lock();
|
||||
|
||||
if (vaddr == 0)
|
||||
{
|
||||
vaddr = page_table.get_free_contiguous_pages(size / PAGE_SIZE, 0x300000);
|
||||
if (vaddr == 0)
|
||||
{
|
||||
derrorln("out of virtual memory");
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
result->m_vaddr = vaddr;
|
||||
|
||||
ASSERT(page_table.is_range_free(vaddr, size));
|
||||
ASSERT(page_table.reserve_range(vaddr, size));
|
||||
BAN::ScopeGuard unmapper([vaddr, size, &page_table] { page_table.unmap_range(vaddr, size); });
|
||||
|
||||
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
|
||||
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
|
||||
|
@ -50,7 +40,60 @@ namespace Kernel
|
|||
page_table.map_page_at(paddr, vaddr + offset, flags);
|
||||
}
|
||||
|
||||
page_table.unlock();
|
||||
unmapper.disable();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_to_vaddr_range(PageTable& page_table, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t size, uint8_t flags)
|
||||
{
|
||||
ASSERT(size % PAGE_SIZE == 0);
|
||||
ASSERT(vaddr_start > 0);
|
||||
|
||||
// Align vaddr range to page boundaries
|
||||
if (size_t rem = vaddr_start % PAGE_SIZE)
|
||||
vaddr_start += PAGE_SIZE - rem;
|
||||
if (size_t rem = vaddr_end % PAGE_SIZE)
|
||||
vaddr_end -= rem;
|
||||
ASSERT(vaddr_start < vaddr_end);
|
||||
ASSERT(vaddr_end - vaddr_start + 1 >= size / PAGE_SIZE);
|
||||
|
||||
VirtualRange* result_ptr = new VirtualRange(page_table);
|
||||
if (result_ptr == nullptr)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
|
||||
|
||||
result->m_kmalloc = false;
|
||||
result->m_size = size;
|
||||
result->m_flags = flags;
|
||||
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
|
||||
|
||||
vaddr_t vaddr = page_table.reserve_free_contiguous_pages(size / PAGE_SIZE, vaddr_start, vaddr_end);
|
||||
if (vaddr == 0)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
result->m_vaddr = vaddr;
|
||||
|
||||
BAN::ScopeGuard unmapper([vaddr, size, &page_table] { page_table.unmap_range(vaddr, size); });
|
||||
if (vaddr + size > vaddr_end)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
|
||||
result->m_vaddr = vaddr;
|
||||
|
||||
TRY(result->m_physical_pages.reserve(size / PAGE_SIZE));
|
||||
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
|
||||
{
|
||||
paddr_t paddr = Heap::get().take_free_page();
|
||||
if (paddr == 0)
|
||||
{
|
||||
for (paddr_t release : result->m_physical_pages)
|
||||
Heap::get().release_page(release);
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
}
|
||||
MUST(result->m_physical_pages.push_back(paddr));
|
||||
page_table.map_page_at(paddr, vaddr + offset, flags);
|
||||
}
|
||||
|
||||
unmapper.disable();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -92,7 +135,7 @@ namespace Kernel
|
|||
|
||||
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
|
||||
{
|
||||
auto result = TRY(create(page_table, vaddr(), size(), flags()));
|
||||
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags()));
|
||||
|
||||
m_page_table.lock();
|
||||
|
||||
|
|
|
@ -521,7 +521,7 @@ namespace Kernel
|
|||
|
||||
{
|
||||
LockGuard _(m_lock);
|
||||
MUST(m_mapped_ranges.push_back(MUST(VirtualRange::create(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags))));
|
||||
MUST(m_mapped_ranges.push_back(MUST(VirtualRange::create_to_vaddr(page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags))));
|
||||
m_mapped_ranges.back()->set_zero();
|
||||
m_mapped_ranges.back()->copy_from(elf_program_header.p_vaddr % PAGE_SIZE, elf.data() + elf_program_header.p_offset, elf_program_header.p_filesz);
|
||||
}
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
|
||||
using namespace Kernel;
|
||||
|
||||
extern uint8_t g_kernel_end[];
|
||||
|
||||
VesaTerminalDriver* VesaTerminalDriver::create()
|
||||
{
|
||||
if (!(g_multiboot_info->flags & MULTIBOOT_FLAGS_FRAMEBUFFER))
|
||||
|
@ -42,7 +40,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
|
|||
uint64_t last_page = BAN::Math::div_round_up<uint64_t>(framebuffer.addr + framebuffer.pitch * framebuffer.height, PAGE_SIZE);
|
||||
uint64_t needed_pages = last_page - first_page + 1;
|
||||
|
||||
vaddr_t vaddr = PageTable::kernel().get_free_contiguous_pages(needed_pages, (vaddr_t)g_kernel_end);
|
||||
vaddr_t vaddr = PageTable::kernel().reserve_free_contiguous_pages(needed_pages, KERNEL_OFFSET);
|
||||
ASSERT(vaddr);
|
||||
|
||||
PageTable::kernel().map_range_at(framebuffer.addr, vaddr, needed_pages * PAGE_SIZE, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||
|
|
|
@ -104,8 +104,8 @@ namespace Kernel
|
|||
|
||||
thread->m_is_userspace = true;
|
||||
|
||||
thread->m_stack = TRY(VirtualRange::create(process->page_table(), 0, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
|
||||
thread->m_interrupt_stack = TRY(VirtualRange::create(process->page_table(), 0, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
|
||||
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
|
||||
thread->m_interrupt_stack = TRY(VirtualRange::create_to_vaddr_range(process->page_table(), 0x300000, KERNEL_OFFSET, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present));
|
||||
|
||||
thread->setup_exec();
|
||||
|
||||
|
|
Loading…
Reference in New Issue