Kernel: Rename MMU to PageTable
This is more descriptive name for what it actually represents
This commit is contained in:
parent
7151bb86a8
commit
e640344d7a
|
@ -68,7 +68,7 @@ if("${BANAN_ARCH}" STREQUAL "x86_64")
|
||||||
arch/x86_64/GDT.cpp
|
arch/x86_64/GDT.cpp
|
||||||
arch/x86_64/IDT.cpp
|
arch/x86_64/IDT.cpp
|
||||||
arch/x86_64/interrupts.S
|
arch/x86_64/interrupts.S
|
||||||
arch/x86_64/MMU.cpp
|
arch/x86_64/PageTable.cpp
|
||||||
arch/x86_64/Thread.S
|
arch/x86_64/Thread.S
|
||||||
)
|
)
|
||||||
elseif("${BANAN_ARCH}" STREQUAL "i386")
|
elseif("${BANAN_ARCH}" STREQUAL "i386")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#include <kernel/Arch.h>
|
#include <kernel/Arch.h>
|
||||||
#include <kernel/LockGuard.h>
|
#include <kernel/LockGuard.h>
|
||||||
#include <kernel/Memory/kmalloc.h>
|
#include <kernel/Memory/kmalloc.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
#define CLEANUP_STRUCTURE(s) \
|
#define CLEANUP_STRUCTURE(s) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -17,25 +17,25 @@ extern uint8_t g_kernel_end[];
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
static MMU* s_kernel = nullptr;
|
static PageTable* s_kernel = nullptr;
|
||||||
static MMU* s_current = nullptr;
|
static PageTable* s_current = nullptr;
|
||||||
|
|
||||||
void MMU::initialize()
|
void PageTable::initialize()
|
||||||
{
|
{
|
||||||
ASSERT(s_kernel == nullptr);
|
ASSERT(s_kernel == nullptr);
|
||||||
s_kernel = new MMU();
|
s_kernel = new PageTable();
|
||||||
ASSERT(s_kernel);
|
ASSERT(s_kernel);
|
||||||
s_kernel->initialize_kernel();
|
s_kernel->initialize_kernel();
|
||||||
s_kernel->load();
|
s_kernel->load();
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU& MMU::kernel()
|
PageTable& PageTable::kernel()
|
||||||
{
|
{
|
||||||
ASSERT(s_kernel);
|
ASSERT(s_kernel);
|
||||||
return *s_kernel;
|
return *s_kernel;
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU& MMU::current()
|
PageTable& PageTable::current()
|
||||||
{
|
{
|
||||||
ASSERT(s_current);
|
ASSERT(s_current);
|
||||||
return *s_current;
|
return *s_current;
|
||||||
|
@ -49,7 +49,7 @@ namespace Kernel
|
||||||
return (uint64_t*)page;
|
return (uint64_t*)page;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::initialize_kernel()
|
void PageTable::initialize_kernel()
|
||||||
{
|
{
|
||||||
m_highest_paging_struct = allocate_page_aligned_page();
|
m_highest_paging_struct = allocate_page_aligned_page();
|
||||||
memset(m_highest_paging_struct, 0, PAGE_SIZE);
|
memset(m_highest_paging_struct, 0, PAGE_SIZE);
|
||||||
|
@ -59,11 +59,8 @@ namespace Kernel
|
||||||
identity_map_range(PAGE_SIZE, (uintptr_t)g_kernel_end, Flags::ReadWrite | Flags::Present);
|
identity_map_range(PAGE_SIZE, (uintptr_t)g_kernel_end, Flags::ReadWrite | Flags::Present);
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU::MMU()
|
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
|
||||||
{
|
{
|
||||||
if (s_kernel == nullptr)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// Here we copy the s_kernel paging structs since they are
|
// Here we copy the s_kernel paging structs since they are
|
||||||
// global for every process
|
// global for every process
|
||||||
|
|
||||||
|
@ -107,10 +104,14 @@ namespace Kernel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m_highest_paging_struct = pml4;
|
PageTable* result = new PageTable;
|
||||||
|
if (result == nullptr)
|
||||||
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
|
result->m_highest_paging_struct = pml4;
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU::~MMU()
|
PageTable::~PageTable()
|
||||||
{
|
{
|
||||||
uint64_t* pml4 = m_highest_paging_struct;
|
uint64_t* pml4 = m_highest_paging_struct;
|
||||||
for (uint32_t pml4e = 0; pml4e < 512; pml4e++)
|
for (uint32_t pml4e = 0; pml4e < 512; pml4e++)
|
||||||
|
@ -136,25 +137,25 @@ namespace Kernel
|
||||||
kfree(pml4);
|
kfree(pml4);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::load()
|
void PageTable::load()
|
||||||
{
|
{
|
||||||
asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct));
|
asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct));
|
||||||
s_current = this;
|
s_current = this;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::invalidate(vaddr_t vaddr)
|
void PageTable::invalidate(vaddr_t vaddr)
|
||||||
{
|
{
|
||||||
ASSERT(this == s_current);
|
ASSERT(this == s_current);
|
||||||
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
|
asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::identity_map_page(paddr_t address, flags_t flags)
|
void PageTable::identity_map_page(paddr_t address, flags_t flags)
|
||||||
{
|
{
|
||||||
address &= PAGE_ADDR_MASK;
|
address &= PAGE_ADDR_MASK;
|
||||||
map_page_at(address, address, flags);
|
map_page_at(address, address, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::identity_map_range(paddr_t address, size_t size, flags_t flags)
|
void PageTable::identity_map_range(paddr_t address, size_t size, flags_t flags)
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -164,7 +165,7 @@ namespace Kernel
|
||||||
identity_map_page(page * PAGE_SIZE, flags);
|
identity_map_page(page * PAGE_SIZE, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::unmap_page(vaddr_t address)
|
void PageTable::unmap_page(vaddr_t address)
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -197,7 +198,7 @@ namespace Kernel
|
||||||
pml4[pml4e] = 0;
|
pml4[pml4e] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::unmap_range(vaddr_t address, size_t size)
|
void PageTable::unmap_range(vaddr_t address, size_t size)
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -207,7 +208,7 @@ namespace Kernel
|
||||||
unmap_page(page * PAGE_SIZE);
|
unmap_page(page * PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags)
|
void PageTable::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags)
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -252,7 +253,7 @@ namespace Kernel
|
||||||
pt[pte] = paddr | flags;
|
pt[pte] = paddr | flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t MMU::get_page_data(vaddr_t address) const
|
uint64_t PageTable::get_page_data(vaddr_t address) const
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -283,17 +284,17 @@ namespace Kernel
|
||||||
return pt[pte];
|
return pt[pte];
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU::flags_t MMU::get_page_flags(vaddr_t addr) const
|
PageTable::flags_t PageTable::get_page_flags(vaddr_t addr) const
|
||||||
{
|
{
|
||||||
return get_page_data(addr) & PAGE_FLAG_MASK;
|
return get_page_data(addr) & PAGE_FLAG_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
paddr_t MMU::physical_address_of(vaddr_t addr) const
|
paddr_t PageTable::physical_address_of(vaddr_t addr) const
|
||||||
{
|
{
|
||||||
return get_page_data(addr) & PAGE_ADDR_MASK;
|
return get_page_data(addr) & PAGE_ADDR_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
vaddr_t MMU::get_free_page() const
|
vaddr_t PageTable::get_free_page() const
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -343,7 +344,7 @@ namespace Kernel
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
vaddr_t MMU::get_free_contiguous_pages(size_t page_count) const
|
vaddr_t PageTable::get_free_contiguous_pages(size_t page_count) const
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
||||||
|
@ -366,13 +367,13 @@ namespace Kernel
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MMU::is_page_free(vaddr_t page) const
|
bool PageTable::is_page_free(vaddr_t page) const
|
||||||
{
|
{
|
||||||
ASSERT(page % PAGE_SIZE == 0);
|
ASSERT(page % PAGE_SIZE == 0);
|
||||||
return !(get_page_flags(page) & Flags::Present);
|
return !(get_page_flags(page) & Flags::Present);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MMU::is_range_free(vaddr_t start, size_t size) const
|
bool PageTable::is_range_free(vaddr_t start, size_t size) const
|
||||||
{
|
{
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
@ -12,10 +12,10 @@ namespace Kernel
|
||||||
BAN_NON_MOVABLE(FixedWidthAllocator);
|
BAN_NON_MOVABLE(FixedWidthAllocator);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
FixedWidthAllocator(MMU&, uint32_t);
|
FixedWidthAllocator(PageTable&, uint32_t);
|
||||||
~FixedWidthAllocator();
|
~FixedWidthAllocator();
|
||||||
|
|
||||||
BAN::ErrorOr<FixedWidthAllocator*> clone(MMU&);
|
BAN::ErrorOr<FixedWidthAllocator*> clone(PageTable&);
|
||||||
|
|
||||||
vaddr_t allocate();
|
vaddr_t allocate();
|
||||||
bool deallocate(vaddr_t);
|
bool deallocate(vaddr_t);
|
||||||
|
@ -44,7 +44,7 @@ namespace Kernel
|
||||||
private:
|
private:
|
||||||
static constexpr uint32_t m_min_allocation_size = 16;
|
static constexpr uint32_t m_min_allocation_size = 16;
|
||||||
|
|
||||||
MMU& m_mmu;
|
PageTable& m_page_table;
|
||||||
const uint32_t m_allocation_size;
|
const uint32_t m_allocation_size;
|
||||||
|
|
||||||
vaddr_t m_nodes_page { 0 };
|
vaddr_t m_nodes_page { 0 };
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
#include <BAN/LinkedList.h>
|
#include <BAN/LinkedList.h>
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
@ -13,13 +13,13 @@ namespace Kernel
|
||||||
BAN_NON_MOVABLE(GeneralAllocator);
|
BAN_NON_MOVABLE(GeneralAllocator);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
GeneralAllocator(MMU&);
|
GeneralAllocator(PageTable&);
|
||||||
~GeneralAllocator();
|
~GeneralAllocator();
|
||||||
|
|
||||||
vaddr_t allocate(size_t);
|
vaddr_t allocate(size_t);
|
||||||
bool deallocate(vaddr_t);
|
bool deallocate(vaddr_t);
|
||||||
|
|
||||||
BAN::ErrorOr<GeneralAllocator*> clone(MMU&);
|
BAN::ErrorOr<GeneralAllocator*> clone(PageTable&);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Allocation
|
struct Allocation
|
||||||
|
@ -29,7 +29,7 @@ namespace Kernel
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MMU& m_mmu;
|
PageTable& m_page_table;
|
||||||
BAN::LinkedList<Allocation> m_allocations;
|
BAN::LinkedList<Allocation> m_allocations;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <BAN/Errors.h>
|
||||||
#include <kernel/Memory/Types.h>
|
#include <kernel/Memory/Types.h>
|
||||||
#include <kernel/SpinLock.h>
|
#include <kernel/SpinLock.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
class MMU
|
class PageTable
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using flags_t = uint8_t;
|
using flags_t = uint8_t;
|
||||||
|
@ -19,12 +20,12 @@ namespace Kernel
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void initialize();
|
static void initialize();
|
||||||
static MMU& kernel();
|
|
||||||
|
|
||||||
static MMU& current();
|
static PageTable& kernel();
|
||||||
|
static PageTable& current();
|
||||||
|
|
||||||
MMU();
|
static BAN::ErrorOr<PageTable*> create_userspace();
|
||||||
~MMU();
|
~PageTable();
|
||||||
|
|
||||||
void identity_map_page(paddr_t, flags_t);
|
void identity_map_page(paddr_t, flags_t);
|
||||||
void identity_map_range(paddr_t, size_t bytes, flags_t);
|
void identity_map_range(paddr_t, size_t bytes, flags_t);
|
||||||
|
@ -50,6 +51,7 @@ namespace Kernel
|
||||||
void unlock() const { m_lock.unlock(); }
|
void unlock() const { m_lock.unlock(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
PageTable() = default;
|
||||||
uint64_t get_page_data(vaddr_t) const;
|
uint64_t get_page_data(vaddr_t) const;
|
||||||
void initialize_kernel();
|
void initialize_kernel();
|
||||||
|
|
|
@ -1,30 +1,30 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <kernel/CriticalScope.h>
|
#include <kernel/CriticalScope.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
class MMUScope
|
class PageTableScope
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
MMUScope(MMU& mmu)
|
PageTableScope(PageTable& page_table)
|
||||||
: m_old(MMU::current())
|
: m_old(PageTable::current())
|
||||||
, m_temp(mmu)
|
, m_temp(page_table)
|
||||||
{
|
{
|
||||||
if (&m_old != &m_temp)
|
if (&m_old != &m_temp)
|
||||||
m_temp.load();
|
m_temp.load();
|
||||||
}
|
}
|
||||||
~MMUScope()
|
~PageTableScope()
|
||||||
{
|
{
|
||||||
if (&m_old != &m_temp)
|
if (&m_old != &m_temp)
|
||||||
m_old.load();
|
m_old.load();
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
CriticalScope m_scope;
|
CriticalScope m_scope;
|
||||||
MMU& m_old;
|
PageTable& m_old;
|
||||||
MMU& m_temp;
|
PageTable& m_temp;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -2,8 +2,7 @@
|
||||||
|
|
||||||
#include <BAN/Vector.h>
|
#include <BAN/Vector.h>
|
||||||
#include <BAN/NoCopyMove.h>
|
#include <BAN/NoCopyMove.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/Memory/Types.h>
|
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
@ -14,21 +13,21 @@ namespace Kernel
|
||||||
BAN_NON_MOVABLE(VirtualRange);
|
BAN_NON_MOVABLE(VirtualRange);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static VirtualRange* create(MMU&, vaddr_t, size_t, uint8_t flags);
|
static VirtualRange* create(PageTable&, vaddr_t, size_t, uint8_t flags);
|
||||||
static VirtualRange* create_kmalloc(size_t);
|
static VirtualRange* create_kmalloc(size_t);
|
||||||
~VirtualRange();
|
~VirtualRange();
|
||||||
|
|
||||||
VirtualRange* clone(MMU& new_mmu);
|
VirtualRange* clone(PageTable&);
|
||||||
|
|
||||||
vaddr_t vaddr() const { return m_vaddr; }
|
vaddr_t vaddr() const { return m_vaddr; }
|
||||||
size_t size() const { return m_size; }
|
size_t size() const { return m_size; }
|
||||||
uint8_t flags() const { return m_flags; }
|
uint8_t flags() const { return m_flags; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VirtualRange(MMU&);
|
VirtualRange(PageTable&);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MMU& m_mmu;
|
PageTable& m_page_table;
|
||||||
vaddr_t m_vaddr { 0 };
|
vaddr_t m_vaddr { 0 };
|
||||||
size_t m_size { 0 };
|
size_t m_size { 0 };
|
||||||
uint8_t m_flags { 0 };
|
uint8_t m_flags { 0 };
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
#include <kernel/Memory/FixedWidthAllocator.h>
|
#include <kernel/Memory/FixedWidthAllocator.h>
|
||||||
#include <kernel/Memory/GeneralAllocator.h>
|
#include <kernel/Memory/GeneralAllocator.h>
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
|
||||||
#include <kernel/Memory/VirtualRange.h>
|
#include <kernel/Memory/VirtualRange.h>
|
||||||
#include <kernel/SpinLock.h>
|
#include <kernel/SpinLock.h>
|
||||||
#include <kernel/Terminal/TTY.h>
|
#include <kernel/Terminal/TTY.h>
|
||||||
|
@ -70,7 +69,7 @@ namespace Kernel
|
||||||
|
|
||||||
static Process& current() { return Thread::current().process(); }
|
static Process& current() { return Thread::current().process(); }
|
||||||
|
|
||||||
MMU& mmu() { return m_mmu ? *m_mmu : MMU::kernel(); }
|
PageTable& page_table() { return m_page_table ? *m_page_table : PageTable::kernel(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Process(pid_t);
|
Process(pid_t);
|
||||||
|
@ -104,7 +103,7 @@ namespace Kernel
|
||||||
BAN::Vector<FixedWidthAllocator*> m_fixed_width_allocators;
|
BAN::Vector<FixedWidthAllocator*> m_fixed_width_allocators;
|
||||||
GeneralAllocator* m_general_allocator;
|
GeneralAllocator* m_general_allocator;
|
||||||
|
|
||||||
MMU* m_mmu { nullptr };
|
PageTable* m_page_table { nullptr };
|
||||||
TTY* m_tty { nullptr };
|
TTY* m_tty { nullptr };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#include <BAN/ScopeGuard.h>
|
#include <BAN/ScopeGuard.h>
|
||||||
#include <BAN/StringView.h>
|
#include <BAN/StringView.h>
|
||||||
#include <kernel/ACPI.h>
|
#include <kernel/ACPI.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
#define RSPD_SIZE 20
|
#define RSPD_SIZE 20
|
||||||
#define RSPDv2_SIZE 36
|
#define RSPDv2_SIZE 36
|
||||||
|
@ -105,8 +105,8 @@ namespace Kernel
|
||||||
if (rsdp->revision >= 2)
|
if (rsdp->revision >= 2)
|
||||||
{
|
{
|
||||||
const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address;
|
const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address;
|
||||||
MMU::kernel().identity_map_page((uintptr_t)xsdt, MMU::Flags::Present);
|
PageTable::kernel().identity_map_page((uintptr_t)xsdt, PageTable::Flags::Present);
|
||||||
BAN::ScopeGuard _([xsdt] { MMU::kernel().unmap_page((uintptr_t)xsdt); });
|
BAN::ScopeGuard _([xsdt] { PageTable::kernel().unmap_page((uintptr_t)xsdt); });
|
||||||
|
|
||||||
if (memcmp(xsdt->signature, "XSDT", 4) != 0)
|
if (memcmp(xsdt->signature, "XSDT", 4) != 0)
|
||||||
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
|
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
|
||||||
|
@ -120,8 +120,8 @@ namespace Kernel
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address;
|
const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address;
|
||||||
MMU::kernel().identity_map_page((uintptr_t)rsdt, MMU::Flags::Present);
|
PageTable::kernel().identity_map_page((uintptr_t)rsdt, PageTable::Flags::Present);
|
||||||
BAN::ScopeGuard _([rsdt] { MMU::kernel().unmap_page((uintptr_t)rsdt); });
|
BAN::ScopeGuard _([rsdt] { PageTable::kernel().unmap_page((uintptr_t)rsdt); });
|
||||||
|
|
||||||
if (memcmp(rsdt->signature, "RSDT", 4) != 0)
|
if (memcmp(rsdt->signature, "RSDT", 4) != 0)
|
||||||
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
|
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
|
||||||
|
@ -133,13 +133,13 @@ namespace Kernel
|
||||||
m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
|
m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU::kernel().identity_map_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present);
|
PageTable::kernel().identity_map_range(m_header_table, m_entry_count * m_entry_size, PageTable::Flags::Present);
|
||||||
|
|
||||||
for (uint32_t i = 0; i < m_entry_count; i++)
|
for (uint32_t i = 0; i < m_entry_count; i++)
|
||||||
{
|
{
|
||||||
auto* header = get_header_from_index(i);
|
auto* header = get_header_from_index(i);
|
||||||
MMU::kernel().identity_map_page((uintptr_t)header, MMU::Flags::Present);
|
PageTable::kernel().identity_map_page((uintptr_t)header, PageTable::Flags::Present);
|
||||||
MMU::kernel().identity_map_range((uintptr_t)header, header->length, MMU::Flags::Present);
|
PageTable::kernel().identity_map_range((uintptr_t)header, header->length, PageTable::Flags::Present);
|
||||||
}
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#include <kernel/APIC.h>
|
#include <kernel/APIC.h>
|
||||||
#include <kernel/CPUID.h>
|
#include <kernel/CPUID.h>
|
||||||
#include <kernel/IDT.h>
|
#include <kernel/IDT.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
|
@ -146,10 +146,10 @@ APIC* APIC::create()
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU::kernel().identity_map_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
PageTable::kernel().identity_map_page(apic->m_local_apic, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
for (auto& io_apic : apic->m_io_apics)
|
for (auto& io_apic : apic->m_io_apics)
|
||||||
{
|
{
|
||||||
MMU::kernel().identity_map_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
PageTable::kernel().identity_map_page(io_apic.address, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
|
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
#include <kernel/Memory/FixedWidthAllocator.h>
|
#include <kernel/Memory/FixedWidthAllocator.h>
|
||||||
#include <kernel/Memory/MMUScope.h>
|
#include <kernel/Memory/PageTableScope.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
FixedWidthAllocator::FixedWidthAllocator(MMU& mmu, uint32_t allocation_size)
|
FixedWidthAllocator::FixedWidthAllocator(PageTable& page_table, uint32_t allocation_size)
|
||||||
: m_mmu(mmu)
|
: m_page_table(page_table)
|
||||||
, m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size))
|
, m_allocation_size(BAN::Math::max(allocation_size, m_min_allocation_size))
|
||||||
{
|
{
|
||||||
ASSERT(BAN::Math::is_power_of_two(allocation_size));
|
ASSERT(BAN::Math::is_power_of_two(allocation_size));
|
||||||
|
@ -40,9 +40,9 @@ namespace Kernel
|
||||||
if (page_vaddr == 0)
|
if (page_vaddr == 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ASSERT(!m_mmu.is_page_free(page_vaddr));
|
ASSERT(!m_page_table.is_page_free(page_vaddr));
|
||||||
Heap::get().release_page(m_mmu.physical_address_of(page_vaddr));
|
Heap::get().release_page(m_page_table.physical_address_of(page_vaddr));
|
||||||
m_mmu.unmap_page(page_vaddr);
|
m_page_table.unmap_page(page_vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree((void*)m_nodes_page);
|
kfree((void*)m_nodes_page);
|
||||||
|
@ -187,8 +187,8 @@ namespace Kernel
|
||||||
paddr_t page_paddr = Heap::get().take_free_page();
|
paddr_t page_paddr = Heap::get().take_free_page();
|
||||||
ASSERT(page_paddr);
|
ASSERT(page_paddr);
|
||||||
|
|
||||||
page_vaddr = m_mmu.get_free_page();
|
page_vaddr = m_page_table.get_free_page();
|
||||||
m_mmu.map_page_at(page_paddr, page_vaddr, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags)
|
bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags)
|
||||||
|
@ -204,7 +204,7 @@ namespace Kernel
|
||||||
}
|
}
|
||||||
|
|
||||||
// Page is not allocated so the vaddr must not be in use
|
// Page is not allocated so the vaddr must not be in use
|
||||||
ASSERT(m_mmu.is_page_free(vaddr));
|
ASSERT(m_page_table.is_page_free(vaddr));
|
||||||
|
|
||||||
// Allocate the vaddr on empty page
|
// Allocate the vaddr on empty page
|
||||||
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
|
for (uint32_t page_index = 0; page_index < PAGE_SIZE / sizeof(vaddr_t); page_index++)
|
||||||
|
@ -214,7 +214,7 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
paddr_t paddr = Heap::get().take_free_page();
|
paddr_t paddr = Heap::get().take_free_page();
|
||||||
ASSERT(paddr);
|
ASSERT(paddr);
|
||||||
m_mmu.map_page_at(paddr, vaddr, flags);
|
m_page_table.map_page_at(paddr, vaddr, flags);
|
||||||
page_begin = vaddr;
|
page_begin = vaddr;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -223,14 +223,14 @@ namespace Kernel
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<FixedWidthAllocator*> FixedWidthAllocator::clone(MMU& new_mmu)
|
BAN::ErrorOr<FixedWidthAllocator*> FixedWidthAllocator::clone(PageTable& new_page_table)
|
||||||
{
|
{
|
||||||
FixedWidthAllocator* allocator = new FixedWidthAllocator(new_mmu, allocation_size());
|
FixedWidthAllocator* allocator = new FixedWidthAllocator(new_page_table, allocation_size());
|
||||||
if (allocator == nullptr)
|
if (allocator == nullptr)
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
|
|
||||||
MMUScope _(m_mmu);
|
PageTableScope _(m_page_table);
|
||||||
ASSERT(m_mmu.is_page_free(0));
|
ASSERT(m_page_table.is_page_free(0));
|
||||||
|
|
||||||
for (node* node = m_used_list; node; node = node->next)
|
for (node* node = m_used_list; node; node = node->next)
|
||||||
{
|
{
|
||||||
|
@ -238,14 +238,14 @@ namespace Kernel
|
||||||
|
|
||||||
vaddr_t vaddr = address_of_node(node);
|
vaddr_t vaddr = address_of_node(node);
|
||||||
vaddr_t page_begin = vaddr & PAGE_ADDR_MASK;
|
vaddr_t page_begin = vaddr & PAGE_ADDR_MASK;
|
||||||
uint8_t flags = m_mmu.get_page_flags(page_begin);
|
uint8_t flags = m_page_table.get_page_flags(page_begin);
|
||||||
|
|
||||||
// Allocate and copy all data from this allocation to the new one
|
// Allocate and copy all data from this allocation to the new one
|
||||||
if (allocator->allocate_page_if_needed(page_begin, flags))
|
if (allocator->allocate_page_if_needed(page_begin, flags))
|
||||||
{
|
{
|
||||||
paddr_t paddr = new_mmu.physical_address_of(page_begin);
|
paddr_t paddr = new_page_table.physical_address_of(page_begin);
|
||||||
m_mmu.map_page_at(paddr, 0, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
m_mmu.invalidate(0);
|
m_page_table.invalidate(0);
|
||||||
memcpy((void*)0, (void*)page_begin, PAGE_SIZE);
|
memcpy((void*)0, (void*)page_begin, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,8 +254,8 @@ namespace Kernel
|
||||||
allocator->allocate_node(new_node);
|
allocator->allocate_node(new_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
m_mmu.unmap_page(0);
|
m_page_table.unmap_page(0);
|
||||||
m_mmu.invalidate(0);
|
m_page_table.invalidate(0);
|
||||||
|
|
||||||
return allocator;
|
return allocator;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
#include <kernel/Memory/GeneralAllocator.h>
|
#include <kernel/Memory/GeneralAllocator.h>
|
||||||
#include <kernel/Memory/MMUScope.h>
|
#include <kernel/Memory/PageTableScope.h>
|
||||||
#include <kernel/Process.h>
|
#include <kernel/Process.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
GeneralAllocator::GeneralAllocator(MMU& mmu)
|
GeneralAllocator::GeneralAllocator(PageTable& page_table)
|
||||||
: m_mmu(mmu)
|
: m_page_table(page_table)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
GeneralAllocator::~GeneralAllocator()
|
GeneralAllocator::~GeneralAllocator()
|
||||||
|
@ -35,9 +35,9 @@ namespace Kernel
|
||||||
allocation.pages[i] = paddr;
|
allocation.pages[i] = paddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
allocation.address = m_mmu.get_free_contiguous_pages(needed_pages);
|
allocation.address = m_page_table.get_free_contiguous_pages(needed_pages);
|
||||||
for (size_t i = 0; i < needed_pages; i++)
|
for (size_t i = 0; i < needed_pages; i++)
|
||||||
m_mmu.map_page_at(allocation.pages[i], allocation.address + i * PAGE_SIZE, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
m_page_table.map_page_at(allocation.pages[i], allocation.address + i * PAGE_SIZE, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
|
|
||||||
MUST(m_allocations.push_back(BAN::move(allocation)));
|
MUST(m_allocations.push_back(BAN::move(allocation)));
|
||||||
return allocation.address;
|
return allocation.address;
|
||||||
|
@ -50,7 +50,7 @@ namespace Kernel
|
||||||
if (it->address != address)
|
if (it->address != address)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
m_mmu.unmap_range(it->address, it->pages.size() * PAGE_SIZE);
|
m_page_table.unmap_range(it->address, it->pages.size() * PAGE_SIZE);
|
||||||
for (auto paddr : it->pages)
|
for (auto paddr : it->pages)
|
||||||
Heap::get().release_page(paddr);
|
Heap::get().release_page(paddr);
|
||||||
|
|
||||||
|
@ -62,24 +62,24 @@ namespace Kernel
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
BAN::ErrorOr<GeneralAllocator*> GeneralAllocator::clone(MMU& new_mmu)
|
BAN::ErrorOr<GeneralAllocator*> GeneralAllocator::clone(PageTable& new_page_table)
|
||||||
{
|
{
|
||||||
GeneralAllocator* allocator = new GeneralAllocator(new_mmu);
|
GeneralAllocator* allocator = new GeneralAllocator(new_page_table);
|
||||||
if (allocator == nullptr)
|
if (allocator == nullptr)
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
|
|
||||||
MMUScope _(m_mmu);
|
PageTableScope _(m_page_table);
|
||||||
ASSERT(m_mmu.is_page_free(0));
|
ASSERT(m_page_table.is_page_free(0));
|
||||||
|
|
||||||
for (auto& allocation : m_allocations)
|
for (auto& allocation : m_allocations)
|
||||||
{
|
{
|
||||||
Allocation new_allocation;
|
Allocation new_allocation;
|
||||||
ASSERT(new_mmu.is_range_free(allocation.address, allocation.pages.size() * PAGE_SIZE));
|
ASSERT(new_page_table.is_range_free(allocation.address, allocation.pages.size() * PAGE_SIZE));
|
||||||
|
|
||||||
new_allocation.address = allocation.address;
|
new_allocation.address = allocation.address;
|
||||||
MUST(new_allocation.pages.reserve(allocation.pages.size()));
|
MUST(new_allocation.pages.reserve(allocation.pages.size()));
|
||||||
|
|
||||||
uint8_t flags = m_mmu.get_page_flags(allocation.address);
|
uint8_t flags = m_page_table.get_page_flags(allocation.address);
|
||||||
for (size_t i = 0; i < allocation.pages.size(); i++)
|
for (size_t i = 0; i < allocation.pages.size(); i++)
|
||||||
{
|
{
|
||||||
paddr_t paddr = Heap::get().take_free_page();
|
paddr_t paddr = Heap::get().take_free_page();
|
||||||
|
@ -88,17 +88,17 @@ namespace Kernel
|
||||||
vaddr_t vaddr = allocation.address + i * PAGE_SIZE;
|
vaddr_t vaddr = allocation.address + i * PAGE_SIZE;
|
||||||
|
|
||||||
MUST(new_allocation.pages.push_back(paddr));
|
MUST(new_allocation.pages.push_back(paddr));
|
||||||
new_mmu.map_page_at(paddr, vaddr, flags);
|
new_page_table.map_page_at(paddr, vaddr, flags);
|
||||||
|
|
||||||
m_mmu.map_page_at(paddr, 0, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
m_mmu.invalidate(0);
|
m_page_table.invalidate(0);
|
||||||
memcpy((void*)0, (void*)vaddr, PAGE_SIZE);
|
memcpy((void*)0, (void*)vaddr, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
MUST(allocator->m_allocations.push_back(BAN::move(new_allocation)));
|
MUST(allocator->m_allocations.push_back(BAN::move(new_allocation)));
|
||||||
}
|
}
|
||||||
m_mmu.unmap_page(0);
|
m_page_table.unmap_page(0);
|
||||||
m_mmu.invalidate(0);
|
m_page_table.invalidate(0);
|
||||||
|
|
||||||
return allocator;
|
return allocator;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#include <kernel/LockGuard.h>
|
#include <kernel/LockGuard.h>
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/multiboot.h>
|
#include <kernel/multiboot.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#include <BAN/Assert.h>
|
#include <BAN/Assert.h>
|
||||||
#include <BAN/Math.h>
|
#include <BAN/Math.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/Memory/PhysicalRange.h>
|
#include <kernel/Memory/PhysicalRange.h>
|
||||||
|
|
||||||
extern uint8_t g_kernel_end[];
|
extern uint8_t g_kernel_end[];
|
||||||
|
@ -37,7 +37,7 @@ namespace Kernel
|
||||||
m_list_pages = BAN::Math::div_round_up<uint64_t>(m_total_pages * sizeof(node), PAGE_SIZE);
|
m_list_pages = BAN::Math::div_round_up<uint64_t>(m_total_pages * sizeof(node), PAGE_SIZE);
|
||||||
m_reservable_pages = m_total_pages - m_list_pages;
|
m_reservable_pages = m_total_pages - m_list_pages;
|
||||||
|
|
||||||
MMU::kernel().identity_map_range(m_start, m_list_pages * PAGE_SIZE, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
PageTable::kernel().identity_map_range(m_start, m_list_pages * PAGE_SIZE, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
|
|
||||||
// Initialize page list so that every page points to the next one
|
// Initialize page list so that every page points to the next one
|
||||||
node* page_list = (node*)m_start;
|
node* page_list = (node*)m_start;
|
||||||
|
|
|
@ -1,53 +1,53 @@
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/MMUScope.h>
|
#include <kernel/Memory/PageTableScope.h>
|
||||||
#include <kernel/Memory/VirtualRange.h>
|
#include <kernel/Memory/VirtualRange.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
|
||||||
VirtualRange* VirtualRange::create(MMU& mmu, vaddr_t vaddr, size_t size, uint8_t flags)
|
VirtualRange* VirtualRange::create(PageTable& page_table, vaddr_t vaddr, size_t size, uint8_t flags)
|
||||||
{
|
{
|
||||||
ASSERT(size % PAGE_SIZE == 0);
|
ASSERT(size % PAGE_SIZE == 0);
|
||||||
ASSERT(vaddr % PAGE_SIZE == 0);
|
ASSERT(vaddr % PAGE_SIZE == 0);
|
||||||
ASSERT(&mmu != &MMU::kernel());
|
ASSERT(&page_table != &PageTable::kernel());
|
||||||
|
|
||||||
VirtualRange* result = new VirtualRange(mmu);
|
VirtualRange* result = new VirtualRange(page_table);
|
||||||
ASSERT(result);
|
ASSERT(result);
|
||||||
|
|
||||||
result->m_size = size;
|
result->m_size = size;
|
||||||
result->m_flags = flags;
|
result->m_flags = flags;
|
||||||
MUST(result->m_physical_pages.reserve(size / PAGE_SIZE));
|
MUST(result->m_physical_pages.reserve(size / PAGE_SIZE));
|
||||||
|
|
||||||
mmu.lock();
|
page_table.lock();
|
||||||
|
|
||||||
if (vaddr == 0)
|
if (vaddr == 0)
|
||||||
{
|
{
|
||||||
vaddr = mmu.get_free_contiguous_pages(size / PAGE_SIZE);
|
vaddr = page_table.get_free_contiguous_pages(size / PAGE_SIZE);
|
||||||
ASSERT(vaddr);
|
ASSERT(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
result->m_vaddr = vaddr;
|
result->m_vaddr = vaddr;
|
||||||
|
|
||||||
ASSERT(mmu.is_range_free(vaddr, size));
|
ASSERT(page_table.is_range_free(vaddr, size));
|
||||||
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
|
for (size_t offset = 0; offset < size; offset += PAGE_SIZE)
|
||||||
{
|
{
|
||||||
paddr_t paddr = Heap::get().take_free_page();
|
paddr_t paddr = Heap::get().take_free_page();
|
||||||
ASSERT(paddr);
|
ASSERT(paddr);
|
||||||
MUST(result->m_physical_pages.push_back(paddr));
|
MUST(result->m_physical_pages.push_back(paddr));
|
||||||
mmu.map_page_at(paddr, vaddr + offset, flags);
|
page_table.map_page_at(paddr, vaddr + offset, flags);
|
||||||
}
|
}
|
||||||
mmu.unlock();
|
page_table.unlock();
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualRange* VirtualRange::create_kmalloc(size_t size)
|
VirtualRange* VirtualRange::create_kmalloc(size_t size)
|
||||||
{
|
{
|
||||||
VirtualRange* result = new VirtualRange(MMU::kernel());
|
VirtualRange* result = new VirtualRange(PageTable::kernel());
|
||||||
if (result == nullptr)
|
if (result == nullptr)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
result->m_size = size;
|
result->m_size = size;
|
||||||
result->m_flags = MMU::Flags::ReadWrite | MMU::Flags::Present;
|
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
|
||||||
result->m_vaddr = (vaddr_t)kmalloc(size);
|
result->m_vaddr = (vaddr_t)kmalloc(size);
|
||||||
if (result->m_vaddr == 0)
|
if (result->m_vaddr == 0)
|
||||||
{
|
{
|
||||||
|
@ -57,37 +57,37 @@ namespace Kernel
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualRange::VirtualRange(MMU& mmu)
|
VirtualRange::VirtualRange(PageTable& page_table)
|
||||||
: m_mmu(mmu)
|
: m_page_table(page_table)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
VirtualRange::~VirtualRange()
|
VirtualRange::~VirtualRange()
|
||||||
{
|
{
|
||||||
if (&m_mmu == &MMU::kernel())
|
if (&m_page_table == &PageTable::kernel())
|
||||||
{
|
{
|
||||||
kfree((void*)m_vaddr);
|
kfree((void*)m_vaddr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_mmu.unmap_range(vaddr(), size());
|
m_page_table.unmap_range(vaddr(), size());
|
||||||
for (paddr_t page : m_physical_pages)
|
for (paddr_t page : m_physical_pages)
|
||||||
Heap::get().release_page(page);
|
Heap::get().release_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualRange* VirtualRange::clone(MMU& mmu)
|
VirtualRange* VirtualRange::clone(PageTable& page_table)
|
||||||
{
|
{
|
||||||
VirtualRange* result = create(mmu, vaddr(), size(), flags());
|
VirtualRange* result = create(page_table, vaddr(), size(), flags());
|
||||||
|
|
||||||
MMUScope _(m_mmu);
|
PageTableScope _(m_page_table);
|
||||||
ASSERT(m_mmu.is_page_free(0));
|
ASSERT(m_page_table.is_page_free(0));
|
||||||
for (size_t i = 0; i < result->m_physical_pages.size(); i++)
|
for (size_t i = 0; i < result->m_physical_pages.size(); i++)
|
||||||
{
|
{
|
||||||
m_mmu.map_page_at(result->m_physical_pages[i], 0, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
m_page_table.map_page_at(result->m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
m_mmu.invalidate(0);
|
m_page_table.invalidate(0);
|
||||||
memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
|
memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE);
|
||||||
}
|
}
|
||||||
m_mmu.unmap_page(0);
|
m_page_table.unmap_page(0);
|
||||||
m_mmu.invalidate(0);
|
m_page_table.invalidate(0);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#include <kernel/FS/VirtualFileSystem.h>
|
#include <kernel/FS/VirtualFileSystem.h>
|
||||||
#include <kernel/LockGuard.h>
|
#include <kernel/LockGuard.h>
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/MMUScope.h>
|
#include <kernel/Memory/PageTableScope.h>
|
||||||
#include <kernel/Process.h>
|
#include <kernel/Process.h>
|
||||||
#include <kernel/Scheduler.h>
|
#include <kernel/Scheduler.h>
|
||||||
#include <LibELF/ELF.h>
|
#include <LibELF/ELF.h>
|
||||||
|
@ -56,8 +56,7 @@ namespace Kernel
|
||||||
|
|
||||||
auto* process = create_process();
|
auto* process = create_process();
|
||||||
MUST(process->m_working_directory.push_back('/'));
|
MUST(process->m_working_directory.push_back('/'));
|
||||||
process->m_mmu = new MMU();
|
process->m_page_table = MUST(PageTable::create_userspace());
|
||||||
ASSERT(process->m_mmu);
|
|
||||||
|
|
||||||
auto& elf_file_header = elf->file_header_native();
|
auto& elf_file_header = elf->file_header_native();
|
||||||
for (size_t i = 0; i < elf_file_header.e_phnum; i++)
|
for (size_t i = 0; i < elf_file_header.e_phnum; i++)
|
||||||
|
@ -71,18 +70,18 @@ namespace Kernel
|
||||||
case LibELF::PT_LOAD:
|
case LibELF::PT_LOAD:
|
||||||
{
|
{
|
||||||
// TODO: Do some relocations or map kernel to higher half?
|
// TODO: Do some relocations or map kernel to higher half?
|
||||||
ASSERT(process->mmu().is_range_free(elf_program_header.p_vaddr, elf_program_header.p_memsz));
|
ASSERT(process->page_table().is_range_free(elf_program_header.p_vaddr, elf_program_header.p_memsz));
|
||||||
MMU::flags_t flags = MMU::Flags::UserSupervisor | MMU::Flags::Present;
|
uint8_t flags = PageTable::Flags::UserSupervisor | PageTable::Flags::Present;
|
||||||
if (elf_program_header.p_flags & LibELF::PF_W)
|
if (elf_program_header.p_flags & LibELF::PF_W)
|
||||||
flags |= MMU::Flags::ReadWrite;
|
flags |= PageTable::Flags::ReadWrite;
|
||||||
size_t page_start = elf_program_header.p_vaddr / PAGE_SIZE;
|
size_t page_start = elf_program_header.p_vaddr / PAGE_SIZE;
|
||||||
size_t page_end = BAN::Math::div_round_up<size_t>(elf_program_header.p_vaddr + elf_program_header.p_memsz, PAGE_SIZE);
|
size_t page_end = BAN::Math::div_round_up<size_t>(elf_program_header.p_vaddr + elf_program_header.p_memsz, PAGE_SIZE);
|
||||||
|
|
||||||
size_t page_count = page_end - page_start + 1;
|
size_t page_count = page_end - page_start + 1;
|
||||||
MUST(process->m_mapped_ranges.push_back(VirtualRange::create(process->mmu(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags)));
|
MUST(process->m_mapped_ranges.push_back(VirtualRange::create(process->page_table(), page_start * PAGE_SIZE, page_count * PAGE_SIZE, flags)));
|
||||||
|
|
||||||
{
|
{
|
||||||
MMUScope _(process->mmu());
|
PageTableScope _(process->page_table());
|
||||||
memcpy((void*)elf_program_header.p_vaddr, elf->data() + elf_program_header.p_offset, elf_program_header.p_filesz);
|
memcpy((void*)elf_program_header.p_vaddr, elf->data() + elf_program_header.p_offset, elf_program_header.p_filesz);
|
||||||
memset((void*)(elf_program_header.p_vaddr + elf_program_header.p_filesz), 0, elf_program_header.p_memsz - elf_program_header.p_filesz);
|
memset((void*)(elf_program_header.p_vaddr + elf_program_header.p_filesz), 0, elf_program_header.p_memsz - elf_program_header.p_filesz);
|
||||||
}
|
}
|
||||||
|
@ -95,7 +94,7 @@ namespace Kernel
|
||||||
|
|
||||||
char** argv = nullptr;
|
char** argv = nullptr;
|
||||||
{
|
{
|
||||||
MMUScope _(process->mmu());
|
PageTableScope _(process->page_table());
|
||||||
argv = (char**)MUST(process->allocate(sizeof(char**) * 1));
|
argv = (char**)MUST(process->allocate(sizeof(char**) * 1));
|
||||||
argv[0] = (char*)MUST(process->allocate(path.size() + 1));
|
argv[0] = (char*)MUST(process->allocate(path.size() + 1));
|
||||||
memcpy(argv[0], path.data(), path.size());
|
memcpy(argv[0], path.data(), path.size());
|
||||||
|
@ -122,10 +121,10 @@ namespace Kernel
|
||||||
ASSERT(m_fixed_width_allocators.empty());
|
ASSERT(m_fixed_width_allocators.empty());
|
||||||
ASSERT(m_general_allocator == nullptr);
|
ASSERT(m_general_allocator == nullptr);
|
||||||
ASSERT(m_mapped_ranges.empty());
|
ASSERT(m_mapped_ranges.empty());
|
||||||
if (m_mmu)
|
if (m_page_table)
|
||||||
{
|
{
|
||||||
MMU::kernel().load();
|
PageTable::kernel().load();
|
||||||
delete m_mmu;
|
delete m_page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
dprintln("process {} exit", pid());
|
dprintln("process {} exit", pid());
|
||||||
|
@ -155,12 +154,12 @@ namespace Kernel
|
||||||
for (auto& open_fd : m_open_files)
|
for (auto& open_fd : m_open_files)
|
||||||
open_fd.inode = nullptr;
|
open_fd.inode = nullptr;
|
||||||
|
|
||||||
// NOTE: We must unmap ranges while the mmu is still alive
|
// NOTE: We must unmap ranges while the page table is still alive
|
||||||
for (auto* range : m_mapped_ranges)
|
for (auto* range : m_mapped_ranges)
|
||||||
delete range;
|
delete range;
|
||||||
m_mapped_ranges.clear();
|
m_mapped_ranges.clear();
|
||||||
|
|
||||||
// NOTE: We must clear allocators while the mmu is still alive
|
// NOTE: We must clear allocators while the page table is still alive
|
||||||
m_fixed_width_allocators.clear();
|
m_fixed_width_allocators.clear();
|
||||||
if (m_general_allocator)
|
if (m_general_allocator)
|
||||||
{
|
{
|
||||||
|
@ -192,8 +191,7 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
Process* forked = create_process();
|
Process* forked = create_process();
|
||||||
|
|
||||||
forked->m_mmu = new MMU();
|
forked->m_page_table = MUST(PageTable::create_userspace());
|
||||||
ASSERT(forked->m_mmu);
|
|
||||||
|
|
||||||
LockGuard _(m_lock);
|
LockGuard _(m_lock);
|
||||||
forked->m_tty = m_tty;
|
forked->m_tty = m_tty;
|
||||||
|
@ -202,16 +200,16 @@ namespace Kernel
|
||||||
forked->m_open_files = m_open_files;
|
forked->m_open_files = m_open_files;
|
||||||
|
|
||||||
for (auto* mapped_range : m_mapped_ranges)
|
for (auto* mapped_range : m_mapped_ranges)
|
||||||
MUST(forked->m_mapped_ranges.push_back(mapped_range->clone(forked->mmu())));
|
MUST(forked->m_mapped_ranges.push_back(mapped_range->clone(forked->page_table())));
|
||||||
|
|
||||||
ASSERT(m_threads.size() == 1);
|
ASSERT(m_threads.size() == 1);
|
||||||
ASSERT(m_threads.front() == &Thread::current());
|
ASSERT(m_threads.front() == &Thread::current());
|
||||||
|
|
||||||
for (auto& allocator : m_fixed_width_allocators)
|
for (auto& allocator : m_fixed_width_allocators)
|
||||||
MUST(forked->m_fixed_width_allocators.push_back(MUST(allocator->clone(forked->mmu()))));
|
MUST(forked->m_fixed_width_allocators.push_back(MUST(allocator->clone(forked->page_table()))));
|
||||||
|
|
||||||
if (m_general_allocator)
|
if (m_general_allocator)
|
||||||
forked->m_general_allocator = MUST(m_general_allocator->clone(forked->mmu()));
|
forked->m_general_allocator = MUST(m_general_allocator->clone(forked->page_table()));
|
||||||
|
|
||||||
Thread* thread = MUST(m_threads.front()->clone(forked, rsp, rip));
|
Thread* thread = MUST(m_threads.front()->clone(forked, rsp, rip));
|
||||||
forked->add_thread(thread);
|
forked->add_thread(thread);
|
||||||
|
@ -482,7 +480,7 @@ namespace Kernel
|
||||||
|
|
||||||
if (needs_new_allocator)
|
if (needs_new_allocator)
|
||||||
{
|
{
|
||||||
auto* allocator = new FixedWidthAllocator(mmu(), allocation_size);
|
auto* allocator = new FixedWidthAllocator(page_table(), allocation_size);
|
||||||
if (allocator == nullptr)
|
if (allocator == nullptr)
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
TRY(m_fixed_width_allocators.push_back(allocator));
|
TRY(m_fixed_width_allocators.push_back(allocator));
|
||||||
|
@ -495,7 +493,7 @@ namespace Kernel
|
||||||
|
|
||||||
if (!m_general_allocator)
|
if (!m_general_allocator)
|
||||||
{
|
{
|
||||||
m_general_allocator = new GeneralAllocator(mmu());
|
m_general_allocator = new GeneralAllocator(page_table());
|
||||||
if (m_general_allocator == nullptr)
|
if (m_general_allocator == nullptr)
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
|
@ -185,11 +185,11 @@ namespace Kernel
|
||||||
|
|
||||||
if (current.has_process())
|
if (current.has_process())
|
||||||
{
|
{
|
||||||
current.process().mmu().load();
|
current.process().page_table().load();
|
||||||
GDT::set_tss_stack(current.interrupt_stack_base() + current.interrupt_stack_size());
|
GDT::set_tss_stack(current.interrupt_stack_base() + current.interrupt_stack_size());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
MMU::kernel().load();
|
PageTable::kernel().load();
|
||||||
|
|
||||||
switch (current.state())
|
switch (current.state())
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#include <BAN/Errors.h>
|
#include <BAN/Errors.h>
|
||||||
#include <kernel/Debug.h>
|
#include <kernel/Debug.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/multiboot.h>
|
#include <kernel/multiboot.h>
|
||||||
#include <kernel/Terminal/VesaTerminalDriver.h>
|
#include <kernel/Terminal/VesaTerminalDriver.h>
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
MMU::kernel().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
PageTable::kernel().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
|
|
||||||
auto* driver = new VesaTerminalDriver(
|
auto* driver = new VesaTerminalDriver(
|
||||||
framebuffer.width,
|
framebuffer.width,
|
||||||
|
@ -53,7 +53,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
|
||||||
|
|
||||||
VesaTerminalDriver::~VesaTerminalDriver()
|
VesaTerminalDriver::~VesaTerminalDriver()
|
||||||
{
|
{
|
||||||
MMU::kernel().unmap_range(m_address, m_pitch * m_height);
|
PageTable::kernel().unmap_range(m_address, m_pitch * m_height);
|
||||||
}
|
}
|
||||||
|
|
||||||
void VesaTerminalDriver::set_pixel(uint32_t offset, Color color)
|
void VesaTerminalDriver::set_pixel(uint32_t offset, Color color)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#include <BAN/Errors.h>
|
#include <BAN/Errors.h>
|
||||||
#include <kernel/InterruptController.h>
|
#include <kernel/InterruptController.h>
|
||||||
#include <kernel/Memory/kmalloc.h>
|
#include <kernel/Memory/kmalloc.h>
|
||||||
#include <kernel/Memory/MMUScope.h>
|
#include <kernel/Memory/PageTableScope.h>
|
||||||
#include <kernel/Process.h>
|
#include <kernel/Process.h>
|
||||||
#include <kernel/Scheduler.h>
|
#include <kernel/Scheduler.h>
|
||||||
#include <kernel/Thread.h>
|
#include <kernel/Thread.h>
|
||||||
|
@ -54,7 +54,7 @@ namespace Kernel
|
||||||
thread->m_is_userspace = true;
|
thread->m_is_userspace = true;
|
||||||
|
|
||||||
// Allocate stack
|
// Allocate stack
|
||||||
thread->m_stack = VirtualRange::create(process->mmu(), 0, m_userspace_stack_size, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
thread->m_stack = VirtualRange::create(process->page_table(), 0, m_userspace_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
if (thread->m_stack == nullptr)
|
if (thread->m_stack == nullptr)
|
||||||
{
|
{
|
||||||
delete thread;
|
delete thread;
|
||||||
|
@ -62,7 +62,7 @@ namespace Kernel
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate interrupt stack
|
// Allocate interrupt stack
|
||||||
thread->m_interrupt_stack = VirtualRange::create(process->mmu(), 0, m_interrupt_stack_size, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
thread->m_interrupt_stack = VirtualRange::create(process->page_table(), 0, m_interrupt_stack_size, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present);
|
||||||
if (thread->m_interrupt_stack == nullptr)
|
if (thread->m_interrupt_stack == nullptr)
|
||||||
{
|
{
|
||||||
delete thread;
|
delete thread;
|
||||||
|
@ -85,7 +85,7 @@ namespace Kernel
|
||||||
|
|
||||||
// Setup stack for returning
|
// Setup stack for returning
|
||||||
{
|
{
|
||||||
MMUScope _(process->mmu());
|
PageTableScope _(process->page_table());
|
||||||
write_to_stack<sizeof(void*)>(thread->m_rsp, thread);
|
write_to_stack<sizeof(void*)>(thread->m_rsp, thread);
|
||||||
write_to_stack<sizeof(void*)>(thread->m_rsp, &Thread::on_exit);
|
write_to_stack<sizeof(void*)>(thread->m_rsp, &Thread::on_exit);
|
||||||
write_to_stack<sizeof(void*)>(thread->m_rsp, nullptr);
|
write_to_stack<sizeof(void*)>(thread->m_rsp, nullptr);
|
||||||
|
@ -131,8 +131,8 @@ namespace Kernel
|
||||||
return BAN::Error::from_errno(ENOMEM);
|
return BAN::Error::from_errno(ENOMEM);
|
||||||
thread->m_is_userspace = true;
|
thread->m_is_userspace = true;
|
||||||
|
|
||||||
thread->m_interrupt_stack = m_interrupt_stack->clone(new_process->mmu());
|
thread->m_interrupt_stack = m_interrupt_stack->clone(new_process->page_table());
|
||||||
thread->m_stack = m_stack->clone(new_process->mmu());
|
thread->m_stack = m_stack->clone(new_process->page_table());
|
||||||
|
|
||||||
thread->m_state = State::Executing;
|
thread->m_state = State::Executing;
|
||||||
thread->m_in_syscall = true;
|
thread->m_in_syscall = true;
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
#include <kernel/kprint.h>
|
#include <kernel/kprint.h>
|
||||||
#include <kernel/Memory/Heap.h>
|
#include <kernel/Memory/Heap.h>
|
||||||
#include <kernel/Memory/kmalloc.h>
|
#include <kernel/Memory/kmalloc.h>
|
||||||
#include <kernel/Memory/MMU.h>
|
#include <kernel/Memory/PageTable.h>
|
||||||
#include <kernel/multiboot.h>
|
#include <kernel/multiboot.h>
|
||||||
#include <kernel/PCI.h>
|
#include <kernel/PCI.h>
|
||||||
#include <kernel/PIC.h>
|
#include <kernel/PIC.h>
|
||||||
|
@ -128,8 +128,8 @@ extern "C" void kernel_main()
|
||||||
IDT::initialize();
|
IDT::initialize();
|
||||||
dprintln("IDT initialized");
|
dprintln("IDT initialized");
|
||||||
|
|
||||||
MMU::initialize();
|
PageTable::initialize();
|
||||||
dprintln("MMU initialized");
|
dprintln("PageTable initialized");
|
||||||
|
|
||||||
TerminalDriver* terminal_driver = VesaTerminalDriver::create();
|
TerminalDriver* terminal_driver = VesaTerminalDriver::create();
|
||||||
ASSERT(terminal_driver);
|
ASSERT(terminal_driver);
|
||||||
|
|
|
@ -8,20 +8,27 @@
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
char* string = (char*)malloc(10);
|
printf("userspace\n");
|
||||||
strcpy(string, "Hello");
|
|
||||||
|
|
||||||
printf("forking\n");
|
FILE* fp = fopen("/usr/include/kernel/kprint.h", "r");
|
||||||
|
if (fp == NULL)
|
||||||
|
ERROR("fopen");
|
||||||
|
|
||||||
|
char* buffer = (char*)malloc(128);
|
||||||
|
fread(buffer, 1, 100, fp);
|
||||||
|
|
||||||
pid_t pid = fork();
|
pid_t pid = fork();
|
||||||
if (pid == 0)
|
if (pid == 0)
|
||||||
{
|
{
|
||||||
printf("child '%s'\n", string);
|
while (size_t n_read = fread(buffer, 1, 127, fp))
|
||||||
|
fwrite(buffer, 1, n_read, stdout);
|
||||||
|
free(buffer);
|
||||||
|
fclose(fp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
strcpy(string, "World");
|
free(buffer);
|
||||||
printf("parent '%s'\n", string);
|
fclose(fp);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue