Kernel: Rewrite paging and AP initialization
Initial step of paging now just prepares fast page for heap, actual page table initialization happens after heap is initialized which allows x86_64 to never depend on kmalloc for pages. Processor's stacks are now also spawned with PMM/VMM allocated stacks instead of kmalloc identity mapped.
This commit is contained in:
@@ -46,13 +46,22 @@ namespace Kernel
|
||||
};
|
||||
|
||||
public:
|
||||
static void initialize_pre_heap();
|
||||
static void initialize_post_heap();
|
||||
static void initialize_fast_page();
|
||||
static void initialize_and_load();
|
||||
|
||||
static void enable_cpu_features();
|
||||
|
||||
static PageTable& kernel();
|
||||
static PageTable& current() { return *reinterpret_cast<PageTable*>(Processor::get_current_page_table()); }
|
||||
|
||||
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
|
||||
static constexpr vaddr_t fast_page()
|
||||
{
|
||||
#if ARCH(x86_64)
|
||||
return 0xffffffffbfe00000;
|
||||
#elif ARCH(i686)
|
||||
return 0xffe00000;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<with_fast_page_callback F>
|
||||
static void with_fast_page(paddr_t paddr, F callback)
|
||||
@@ -121,7 +130,6 @@ namespace Kernel
|
||||
vaddr_t reserve_free_contiguous_pages(size_t page_count, vaddr_t first_address, vaddr_t last_address = UINTPTR_MAX);
|
||||
|
||||
void load();
|
||||
void initial_load();
|
||||
|
||||
void invalidate_page(vaddr_t addr, bool send_smp_message) { invalidate_range(addr, 1, send_smp_message); }
|
||||
void invalidate_range(vaddr_t addr, size_t pages, bool send_smp_message);
|
||||
@@ -129,14 +137,14 @@ namespace Kernel
|
||||
InterruptState lock() const { return m_lock.lock(); }
|
||||
void unlock(InterruptState state) const { m_lock.unlock(state); }
|
||||
|
||||
paddr_t paddr() const { return m_highest_paging_struct; }
|
||||
|
||||
void debug_dump();
|
||||
|
||||
private:
|
||||
PageTable() = default;
|
||||
uint64_t get_page_data(vaddr_t) const;
|
||||
void initialize_kernel();
|
||||
void map_kernel_memory();
|
||||
void prepare_fast_page();
|
||||
|
||||
static void map_fast_page(paddr_t);
|
||||
static void unmap_fast_page();
|
||||
|
||||
@@ -58,6 +58,8 @@ namespace Kernel
|
||||
static Processor& create(ProcessorID id);
|
||||
static Processor& initialize();
|
||||
|
||||
void allocate_stack();
|
||||
|
||||
static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); }
|
||||
static uint8_t current_index() { return read_gs_sized<uint8_t>(offsetof(Processor, m_index)); }
|
||||
static ProcessorID id_from_index(size_t index);
|
||||
@@ -100,11 +102,8 @@ namespace Kernel
|
||||
handle_smp_messages();
|
||||
}
|
||||
|
||||
static uintptr_t current_stack_bottom() { return read_gs_sized<uintptr_t>(offsetof(Processor, m_stack)); }
|
||||
static uintptr_t current_stack_top() { return current_stack_bottom() + s_stack_size; }
|
||||
|
||||
uintptr_t stack_bottom() const { return reinterpret_cast<uintptr_t>(m_stack); }
|
||||
uintptr_t stack_top() const { return stack_bottom() + s_stack_size; }
|
||||
vaddr_t stack_top_vaddr() const { return m_stack_vaddr + s_stack_size; }
|
||||
paddr_t stack_top_paddr() const { return m_stack_paddr + s_stack_size; }
|
||||
|
||||
static void set_thread_syscall_stack(vaddr_t vaddr) { write_gs_sized<vaddr_t>(offsetof(Processor, m_thread_syscall_stack), vaddr); }
|
||||
|
||||
@@ -215,8 +214,9 @@ namespace Kernel
|
||||
|
||||
Thread* m_sse_thread { nullptr };
|
||||
|
||||
static constexpr size_t s_stack_size { 4096 };
|
||||
void* m_stack { nullptr };
|
||||
static constexpr size_t s_stack_size { PAGE_SIZE };
|
||||
vaddr_t m_stack_vaddr { 0 };
|
||||
paddr_t m_stack_paddr { 0 };
|
||||
|
||||
GDT* m_gdt { nullptr };
|
||||
IDT* m_idt { nullptr };
|
||||
|
||||
Reference in New Issue
Block a user