Kernel: rework the whole PageTable structure
We now have page table structure for kernel memory which is shared between all processes.
This commit is contained in:
		
							parent
							
								
									d99e704728
								
							
						
					
					
						commit
						86df258365
					
				|  | @ -137,21 +137,6 @@ namespace IDT | |||
| 
 | ||||
| 	extern "C" void cpp_isr_handler(uint64_t isr, uint64_t error, const Registers* regs) | ||||
| 	{ | ||||
| 		if (isr == ISR::PageFault) | ||||
| 		{ | ||||
| 			using namespace Kernel; | ||||
| 			 | ||||
| 			vaddr_t vaddr = regs->cr2 & PAGE_ADDR_MASK; | ||||
| 
 | ||||
| 			if (!PageTable::kernel().is_page_free(vaddr)) | ||||
| 			{ | ||||
| 				auto paddr = kmalloc_paddr_of(vaddr); | ||||
| 				ASSERT(paddr.has_value()); | ||||
| 				PageTable::current().map_page_at(paddr.value(), vaddr, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 				return; | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		pid_t tid = Kernel::Scheduler::current_tid(); | ||||
| 		pid_t pid = tid ? Kernel::Process::current().pid() : 0; | ||||
| 
 | ||||
|  |  | |||
|  | @ -4,14 +4,6 @@ | |||
| #include <kernel/Memory/kmalloc.h> | ||||
| #include <kernel/Memory/PageTable.h> | ||||
| 
 | ||||
| #define CLEANUP_STRUCTURE(s)				\ | ||||
| 	do {									\ | ||||
| 		for (uint64_t i = 0; i < 512; i++)	\ | ||||
| 			if ((s)[i] & Flags::Present)	\ | ||||
| 				return;						\ | ||||
| 		kfree(s);							\ | ||||
| 	} while (false) | ||||
| 
 | ||||
| extern uint8_t g_kernel_start[]; | ||||
| extern uint8_t g_kernel_end[]; | ||||
| 
 | ||||
|  | @ -21,6 +13,10 @@ namespace Kernel | |||
| 	static PageTable* s_kernel = nullptr; | ||||
| 	static PageTable* s_current = nullptr; | ||||
| 
 | ||||
| 	// Page Directories for kernel memory (KERNEL_OFFSET -> 0xFFFFFFFFFFFFFFFF)
 | ||||
| 	static paddr_t s_global[(0xFFFFFFFFFFFFFFFF - KERNEL_OFFSET + 1) / (4096ull * 512ull * 512ull)] { }; | ||||
| 	static_assert(sizeof(s_global) / sizeof(*s_global) < 512); | ||||
| 
 | ||||
| 	static constexpr inline bool is_canonical(uintptr_t addr) | ||||
| 	{ | ||||
| 		constexpr uintptr_t mask = 0xFFFF800000000000; | ||||
|  | @ -63,7 +59,7 @@ namespace Kernel | |||
| 		return *s_current; | ||||
| 	} | ||||
| 
 | ||||
| 	static uint64_t* allocate_page_aligned_page() | ||||
| 	static uint64_t* allocate_zeroed_page_aligned_page() | ||||
| 	{ | ||||
| 		void* page = kmalloc(PAGE_SIZE, PAGE_SIZE, true); | ||||
| 		ASSERT(page); | ||||
|  | @ -73,80 +69,77 @@ namespace Kernel | |||
| 
 | ||||
| 	void PageTable::initialize_kernel() | ||||
| 	{ | ||||
| 		for (uint32_t i = 0; i < sizeof(s_global) / sizeof(*s_global); i++) | ||||
| 		{ | ||||
| 			ASSERT(s_global[i] == 0); | ||||
| 			s_global[i] = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 		} | ||||
| 		map_kernel_memory(); | ||||
| 
 | ||||
| 		// Map (0 -> phys_kernel_end) to (KERNEL_OFFSET -> virt_kernel_end)
 | ||||
| 		m_highest_paging_struct = V2P(allocate_page_aligned_page()); | ||||
| 		map_range_at(0, KERNEL_OFFSET, (uintptr_t)g_kernel_end - KERNEL_OFFSET, Flags::ReadWrite | Flags::Present); | ||||
| 	} | ||||
| 
 | ||||
| 	BAN::ErrorOr<PageTable*> PageTable::create_userspace() | ||||
| 	{ | ||||
| 		// Here we copy the s_kernel paging structs since they are
 | ||||
| 		// global for every process
 | ||||
| 
 | ||||
| 		LockGuard _(s_kernel->m_lock); | ||||
| 
 | ||||
| 		uint64_t* global_pml4 = (uint64_t*)P2V(s_kernel->m_highest_paging_struct); | ||||
| 
 | ||||
| 		uint64_t* pml4 = allocate_page_aligned_page(); | ||||
| 		for (uint32_t pml4e = 0; pml4e < 512; pml4e++) | ||||
| 		{ | ||||
| 			if (!(global_pml4[pml4e] & Flags::Present)) | ||||
| 				continue; | ||||
| 
 | ||||
| 			uint64_t* global_pdpt = (uint64_t*)P2V(global_pml4[pml4e] & PAGE_ADDR_MASK); | ||||
| 
 | ||||
| 			uint64_t* pdpt = allocate_page_aligned_page(); | ||||
| 			pml4[pml4e] = V2P(pdpt) | (global_pml4[pml4e] & PAGE_FLAG_MASK); | ||||
| 
 | ||||
| 			for (uint32_t pdpte = 0; pdpte < 512; pdpte++) | ||||
| 			{ | ||||
| 				if (!(global_pdpt[pdpte] & Flags::Present)) | ||||
| 					continue; | ||||
| 
 | ||||
| 				uint64_t* global_pd = (uint64_t*)P2V(global_pdpt[pdpte] & PAGE_ADDR_MASK); | ||||
| 
 | ||||
| 				uint64_t* pd = allocate_page_aligned_page(); | ||||
| 				pdpt[pdpte] = V2P(pd) | (global_pdpt[pdpte] & PAGE_FLAG_MASK); | ||||
| 
 | ||||
| 				for (uint32_t pde = 0; pde < 512; pde++) | ||||
| 				{ | ||||
| 					if (!(global_pd[pde] & Flags::Present)) | ||||
| 						continue; | ||||
| 
 | ||||
| 					uint64_t* global_pt = (uint64_t*)P2V(global_pd[pde] & PAGE_ADDR_MASK); | ||||
| 
 | ||||
| 					uint64_t* pt = allocate_page_aligned_page(); | ||||
| 					pd[pde] = V2P(pt) | (global_pd[pde] & PAGE_FLAG_MASK); | ||||
| 
 | ||||
| 					memcpy(pt, global_pt, PAGE_SIZE); | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		PageTable* result = new PageTable; | ||||
| 		if (result == nullptr) | ||||
| 		PageTable* page_table = new PageTable; | ||||
| 		if (page_table == nullptr) | ||||
| 			return BAN::Error::from_errno(ENOMEM); | ||||
| 		result->m_highest_paging_struct = V2P(pml4); | ||||
| 		return result; | ||||
| 		page_table->map_kernel_memory(); | ||||
| 		return page_table; | ||||
| 	} | ||||
| 
 | ||||
| 	void PageTable::map_kernel_memory() | ||||
| 	{ | ||||
| 		// Verify that kernel memory fits to single page directory pointer table
 | ||||
| 		static_assert(0xFFFFFFFFFFFFFFFF - KERNEL_OFFSET < 4096ull * 512ull * 512ull * 512ull); | ||||
| 
 | ||||
| 		ASSERT(m_highest_paging_struct == 0); | ||||
| 		m_highest_paging_struct = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 
 | ||||
| 		constexpr uint64_t pml4e = (KERNEL_OFFSET >> 39) & 0x1FF; | ||||
| 		constexpr uint64_t pdpte = (KERNEL_OFFSET >> 30) & 0x1FF; | ||||
| 
 | ||||
| 		uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct); | ||||
| 		pml4[pml4e] = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 		pml4[pml4e] = (pml4[pml4e] & PAGE_ADDR_MASK) | (Flags::ReadWrite | Flags::Present); | ||||
| 
 | ||||
| 		uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK); | ||||
| 		for (uint64_t i = 0; pdpte + i < 512; i++) | ||||
| 		{ | ||||
| 			pdpt[pdpte + i] = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 			pdpt[pdpte + i] = s_global[i] | (Flags::ReadWrite | Flags::Present); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	PageTable::~PageTable() | ||||
| 	{ | ||||
| 		uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct); | ||||
| 		for (uint32_t pml4e = 0; pml4e < 512; pml4e++) | ||||
| 		for (uint64_t pml4e = 0; pml4e < 512; pml4e++) | ||||
| 		{ | ||||
| 			if (!(pml4[pml4e] & Flags::Present)) | ||||
| 				continue; | ||||
| 			uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK); | ||||
| 			for (uint32_t pdpte = 0; pdpte < 512; pdpte++) | ||||
| 			for (uint64_t pdpte = 0; pdpte < 512; pdpte++) | ||||
| 			{ | ||||
| 				if (!(pdpt[pdpte] & Flags::Present)) | ||||
| 					continue; | ||||
| 				uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK); | ||||
| 				for (uint32_t pde = 0; pde < 512; pde++) | ||||
| 				for (uint64_t pde = 0; pde < 512; pde++) | ||||
| 				{ | ||||
| 					if (!(pd[pde] & Flags::Present)) | ||||
| 						continue; | ||||
| 
 | ||||
| 					vaddr_t vaddr = 0; | ||||
| 					vaddr |= pml4e << 39; | ||||
| 					vaddr |= pdpte << 30; | ||||
| 					vaddr |= pde   << 21; | ||||
| 					vaddr = canonicalize(vaddr); | ||||
| 
 | ||||
| 					if (vaddr >= KERNEL_OFFSET) | ||||
| 						return; | ||||
| 
 | ||||
| 					kfree((void*)P2V(pd[pde] & PAGE_ADDR_MASK)); | ||||
| 				} | ||||
| 				kfree(pd); | ||||
|  | @ -164,24 +157,8 @@ namespace Kernel | |||
| 
 | ||||
| 	void PageTable::invalidate(vaddr_t vaddr) | ||||
| 	{ | ||||
| 		ASSERT(this == s_current); | ||||
| 		asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory"); | ||||
| 	} | ||||
| 
 | ||||
| 	void PageTable::identity_map_page(paddr_t address, flags_t flags) | ||||
| 	{ | ||||
| 		address &= PAGE_ADDR_MASK; | ||||
| 		map_page_at(address, address, flags); | ||||
| 	} | ||||
| 
 | ||||
| 	void PageTable::identity_map_range(paddr_t address, size_t size, flags_t flags) | ||||
| 	{ | ||||
| 		LockGuard _(m_lock); | ||||
| 
 | ||||
| 		paddr_t s_page = address / PAGE_SIZE; | ||||
| 		paddr_t e_page = (address + size - 1) / PAGE_SIZE; | ||||
| 		for (paddr_t page = s_page; page <= e_page; page++) | ||||
| 			identity_map_page(page * PAGE_SIZE, flags); | ||||
| 		if (this == s_current) | ||||
| 			asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory"); | ||||
| 	} | ||||
| 
 | ||||
| 	void PageTable::unmap_page(vaddr_t vaddr) | ||||
|  | @ -190,6 +167,9 @@ namespace Kernel | |||
| 
 | ||||
| 		vaddr &= PAGE_ADDR_MASK; | ||||
| 
 | ||||
| 		if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel)) | ||||
| 			Kernel::panic("unmapping {8H}, kernel: {}", vaddr, this == s_kernel); | ||||
| 
 | ||||
| 		if (is_page_free(vaddr)) | ||||
| 		{ | ||||
| 			dwarnln("unmapping unmapped page {8H}", vaddr); | ||||
|  | @ -210,12 +190,7 @@ namespace Kernel | |||
| 		uint64_t* pt   = (uint64_t*)P2V(pd[pde]     & PAGE_ADDR_MASK); | ||||
| 
 | ||||
| 		pt[pte] = 0; | ||||
| 		CLEANUP_STRUCTURE(pt); | ||||
| 		pd[pde] = 0; | ||||
| 		CLEANUP_STRUCTURE(pd); | ||||
| 		pdpt[pdpte] = 0; | ||||
| 		CLEANUP_STRUCTURE(pdpt); | ||||
| 		pml4[pml4e] = 0; | ||||
| 		invalidate(canonicalize(vaddr)); | ||||
| 	} | ||||
| 
 | ||||
| 	void PageTable::unmap_range(vaddr_t vaddr, size_t size) | ||||
|  | @ -232,11 +207,14 @@ namespace Kernel | |||
| 	{ | ||||
| 		LockGuard _(m_lock); | ||||
| 
 | ||||
| 		if (vaddr && (vaddr >= KERNEL_OFFSET) != (this == s_kernel)) | ||||
| 			Kernel::panic("mapping {8H} to {8H}, kernel: {}", paddr, vaddr, this == s_kernel); | ||||
| 
 | ||||
| 		ASSERT(is_canonical(vaddr)); | ||||
| 		vaddr = uncanonicalize(vaddr); | ||||
| 
 | ||||
| 		ASSERT(paddr % PAGE_SIZE == 0); | ||||
| 		ASSERT(vaddr % PAGE_SIZE == 0);; | ||||
| 		ASSERT(vaddr % PAGE_SIZE == 0); | ||||
| 
 | ||||
| 		ASSERT(flags & Flags::Present); | ||||
| 
 | ||||
|  | @ -249,7 +227,7 @@ namespace Kernel | |||
| 		if ((pml4[pml4e] & flags) != flags) | ||||
| 		{ | ||||
| 			if (!(pml4[pml4e] & Flags::Present)) | ||||
| 				pml4[pml4e] = V2P(allocate_page_aligned_page()); | ||||
| 				pml4[pml4e] = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 			pml4[pml4e] = (pml4[pml4e] & PAGE_ADDR_MASK) | flags; | ||||
| 		} | ||||
| 
 | ||||
|  | @ -257,7 +235,7 @@ namespace Kernel | |||
| 		if ((pdpt[pdpte] & flags) != flags) | ||||
| 		{ | ||||
| 			if (!(pdpt[pdpte] & Flags::Present)) | ||||
| 				pdpt[pdpte] = V2P(allocate_page_aligned_page()); | ||||
| 				pdpt[pdpte] = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 			pdpt[pdpte] = (pdpt[pdpte] & PAGE_ADDR_MASK) | flags; | ||||
| 		} | ||||
| 
 | ||||
|  | @ -265,12 +243,14 @@ namespace Kernel | |||
| 		if ((pd[pde] & flags) != flags) | ||||
| 		{ | ||||
| 			if (!(pd[pde] & Flags::Present)) | ||||
| 				pd[pde] = V2P(allocate_page_aligned_page()); | ||||
| 				pd[pde] = V2P(allocate_zeroed_page_aligned_page()); | ||||
| 			pd[pde] = (pd[pde] & PAGE_ADDR_MASK) | flags; | ||||
| 		} | ||||
| 
 | ||||
| 		uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK); | ||||
| 		pt[pte] = paddr | flags; | ||||
| 
 | ||||
| 		invalidate(canonicalize(vaddr)); | ||||
| 	} | ||||
| 
 | ||||
| 	void PageTable::map_range_at(paddr_t paddr, vaddr_t vaddr, size_t size, flags_t flags) | ||||
|  | @ -332,29 +312,40 @@ namespace Kernel | |||
| 		return get_page_data(addr) & PAGE_ADDR_MASK; | ||||
| 	} | ||||
| 
 | ||||
| 	vaddr_t PageTable::get_free_page() const | ||||
| 	vaddr_t PageTable::get_free_page(vaddr_t first_address) const | ||||
| 	{ | ||||
| 		LockGuard _(m_lock); | ||||
| 
 | ||||
| 		if (size_t rem = first_address % PAGE_SIZE) | ||||
| 			first_address += PAGE_SIZE - rem; | ||||
| 
 | ||||
| 		ASSERT(is_canonical(first_address)); | ||||
| 		vaddr_t vaddr = uncanonicalize(first_address); | ||||
| 
 | ||||
| 		uint64_t pml4e = (vaddr >> 39) & 0x1FF; | ||||
| 		uint64_t pdpte = (vaddr >> 30) & 0x1FF; | ||||
| 		uint64_t pde   = (vaddr >> 21) & 0x1FF; | ||||
| 		uint64_t pte   = (vaddr >> 12) & 0x1FF; | ||||
| 
 | ||||
| 		// Try to find free page that can be mapped without
 | ||||
| 		// allocations (page table with unused entries)
 | ||||
| 		uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct); | ||||
| 		for (uint64_t pml4e = 0; pml4e < 512; pml4e++) | ||||
| 		for (; pml4e < 512; pml4e++) | ||||
| 		{ | ||||
| 			if (!(pml4[pml4e] & Flags::Present)) | ||||
| 				continue; | ||||
| 			uint64_t* pdpt = (uint64_t*)P2V(pml4[pml4e] & PAGE_ADDR_MASK); | ||||
| 			for (uint64_t pdpte = 0; pdpte < 512; pdpte++) | ||||
| 			for (; pdpte < 512; pdpte++) | ||||
| 			{ | ||||
| 				if (!(pdpt[pdpte] & Flags::Present)) | ||||
| 					continue; | ||||
| 				uint64_t* pd = (uint64_t*)P2V(pdpt[pdpte] & PAGE_ADDR_MASK); | ||||
| 				for (uint64_t pde = 0; pde < 512; pde++) | ||||
| 				for (; pde < 512; pde++) | ||||
| 				{ | ||||
| 					if (!(pd[pde] & Flags::Present)) | ||||
| 						continue; | ||||
| 					uint64_t* pt = (uint64_t*)P2V(pd[pde] & PAGE_ADDR_MASK); | ||||
| 					for (uint64_t pte = !(pml4e + pdpte + pde); pte < 512; pte++) | ||||
| 					for (; pte < 512; pte++) | ||||
| 					{ | ||||
| 						if (!(pt[pte] & Flags::Present)) | ||||
| 						{ | ||||
|  | @ -371,11 +362,13 @@ namespace Kernel | |||
| 		} | ||||
| 
 | ||||
| 		// Find any free page page (except for page 0)
 | ||||
| 		vaddr_t vaddr = PAGE_SIZE; | ||||
| 		while ((vaddr >> 48) == 0) | ||||
| 		vaddr = first_address; | ||||
| 		while (is_canonical(vaddr)) | ||||
| 		{ | ||||
| 			if (!(get_page_flags(vaddr) & Flags::Present)) | ||||
| 			if (is_page_free(vaddr)) | ||||
| 				return vaddr; | ||||
| 			if (vaddr > vaddr + PAGE_SIZE) | ||||
| 				break; | ||||
| 			vaddr += PAGE_SIZE; | ||||
| 		} | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,6 +1,7 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <BAN/Errors.h> | ||||
| #include <BAN/Vector.h> | ||||
| #include <kernel/Memory/Types.h> | ||||
| 
 | ||||
| namespace Kernel | ||||
| { | ||||
|  | @ -93,9 +94,17 @@ namespace Kernel | |||
| 		const SDTHeader* get_header_from_index(size_t); | ||||
| 
 | ||||
| 	private: | ||||
| 		uintptr_t m_header_table = 0; | ||||
| 		paddr_t m_header_table_paddr = 0; | ||||
| 		vaddr_t m_header_table_vaddr = 0; | ||||
| 		uint32_t m_entry_size = 0; | ||||
| 		uint32_t m_entry_count = 0; | ||||
| 
 | ||||
| 		struct MappedPage | ||||
| 		{ | ||||
| 			Kernel::paddr_t paddr; | ||||
| 			Kernel::vaddr_t vaddr; | ||||
| 		}; | ||||
| 		BAN::Vector<MappedPage> m_mapped_headers;		 | ||||
| 	}; | ||||
| 
 | ||||
| } | ||||
|  | @ -2,6 +2,7 @@ | |||
| 
 | ||||
| #include <BAN/Vector.h> | ||||
| #include <kernel/InterruptController.h> | ||||
| #include <kernel/Memory/Types.h> | ||||
| 
 | ||||
| class APIC final : public InterruptController | ||||
| { | ||||
|  | @ -15,6 +16,7 @@ private: | |||
| 	void write_to_local_apic(ptrdiff_t, uint32_t); | ||||
| 
 | ||||
| private: | ||||
| 	~APIC() { ASSERT_NOT_REACHED(); } | ||||
| 	static APIC* create(); | ||||
| 	friend class InterruptController; | ||||
| 
 | ||||
|  | @ -34,7 +36,8 @@ private: | |||
| 	struct IOAPIC | ||||
| 	{ | ||||
| 		uint8_t id; | ||||
| 		uintptr_t address; | ||||
| 		Kernel::paddr_t paddr; | ||||
| 		Kernel::vaddr_t vaddr; | ||||
| 		uint32_t gsi_base; | ||||
| 		uint8_t max_redirs; | ||||
| 
 | ||||
|  | @ -44,7 +47,8 @@ private: | |||
| 
 | ||||
| private: | ||||
| 	BAN::Vector<Processor>	m_processors; | ||||
| 	uintptr_t				m_local_apic = 0; | ||||
| 	Kernel::paddr_t			m_local_apic_paddr = 0; | ||||
| 	Kernel::vaddr_t			m_local_apic_vaddr = 0; | ||||
| 	BAN::Vector<IOAPIC>		m_io_apics;	 | ||||
| 	uint8_t					m_irq_overrides[0x100] {}; | ||||
| }; | ||||
|  | @ -27,9 +27,6 @@ namespace Kernel | |||
| 		static BAN::ErrorOr<PageTable*> create_userspace(); | ||||
| 		~PageTable(); | ||||
| 
 | ||||
| 		void identity_map_page(paddr_t, flags_t); | ||||
| 		void identity_map_range(paddr_t, size_t bytes, flags_t); | ||||
| 
 | ||||
| 		void unmap_page(vaddr_t); | ||||
| 		void unmap_range(vaddr_t, size_t bytes); | ||||
| 
 | ||||
|  | @ -42,10 +39,9 @@ namespace Kernel | |||
| 		bool is_page_free(vaddr_t) const; | ||||
| 		bool is_range_free(vaddr_t, size_t bytes) const; | ||||
| 
 | ||||
| 		vaddr_t get_free_page() const; | ||||
| 		vaddr_t get_free_page(vaddr_t first_address = PAGE_SIZE) const; | ||||
| 		vaddr_t get_free_contiguous_pages(size_t page_count, vaddr_t first_address = PAGE_SIZE) const; | ||||
| 
 | ||||
| 		void invalidate(vaddr_t); | ||||
| 		void load(); | ||||
| 
 | ||||
| 		void lock() const { m_lock.lock(); } | ||||
|  | @ -57,10 +53,19 @@ namespace Kernel | |||
| 		PageTable() = default; | ||||
| 		uint64_t get_page_data(vaddr_t) const; | ||||
| 		void initialize_kernel(); | ||||
| 		void map_kernel_memory(); | ||||
| 		void invalidate(vaddr_t); | ||||
| 
 | ||||
| 	private: | ||||
| 		paddr_t						m_highest_paging_struct { 0 }; | ||||
| 		mutable RecursiveSpinLock	m_lock; | ||||
| 	}; | ||||
| 
 | ||||
| 	static constexpr size_t range_page_count(vaddr_t start, size_t bytes) | ||||
| 	{ | ||||
| 		size_t first_page = start / PAGE_SIZE; | ||||
| 		size_t last_page = BAN::Math::div_round_up<size_t>(start + bytes, PAGE_SIZE); | ||||
| 		return last_page - first_page + 1; | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -6,6 +6,8 @@ | |||
| #define RSPD_SIZE	20 | ||||
| #define RSPDv2_SIZE	36 | ||||
| 
 | ||||
| extern uint8_t g_kernel_end[]; | ||||
| 
 | ||||
| namespace Kernel | ||||
| { | ||||
| 
 | ||||
|  | @ -104,42 +106,73 @@ namespace Kernel | |||
| 
 | ||||
| 		if (rsdp->revision >= 2) | ||||
| 		{ | ||||
| 			const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address; | ||||
| 			PageTable::kernel().identity_map_page((uintptr_t)xsdt, PageTable::Flags::Present); | ||||
| 			BAN::ScopeGuard _([xsdt] { PageTable::kernel().unmap_page((uintptr_t)xsdt); }); | ||||
| 			PageTable::kernel().map_page_at(rsdp->xsdt_address & PAGE_ADDR_MASK, 0, PageTable::Flags::Present); | ||||
| 			const XSDT* xsdt = (const XSDT*)(rsdp->xsdt_address % PAGE_SIZE); | ||||
| 			BAN::ScopeGuard _([xsdt] { PageTable::kernel().unmap_page(0); }); | ||||
| 
 | ||||
| 			if (memcmp(xsdt->signature, "XSDT", 4) != 0) | ||||
| 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | ||||
| 			if (!is_valid_std_header(xsdt)) | ||||
| 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | ||||
| 
 | ||||
| 			m_header_table = (uintptr_t)xsdt->entries; | ||||
| 			m_header_table_paddr = (paddr_t)xsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK); | ||||
| 			m_entry_size = 8; | ||||
| 			m_entry_count = (xsdt->length - sizeof(SDTHeader)) / 8; | ||||
| 		} | ||||
| 		else | ||||
| 		{ | ||||
| 			const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address; | ||||
| 			PageTable::kernel().identity_map_page((vaddr_t)rsdt, PageTable::Flags::Present); | ||||
| 			BAN::ScopeGuard _([rsdt] { PageTable::kernel().unmap_page((vaddr_t)rsdt); }); | ||||
| 			PageTable::kernel().map_page_at(rsdp->rsdt_address & PAGE_ADDR_MASK, 0, PageTable::Flags::Present); | ||||
| 			const RSDT* rsdt = (const RSDT*)((vaddr_t)rsdp->rsdt_address % PAGE_SIZE); | ||||
| 			BAN::ScopeGuard _([rsdt] { PageTable::kernel().unmap_page(0); }); | ||||
| 
 | ||||
| 			if (memcmp(rsdt->signature, "RSDT", 4) != 0) | ||||
| 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | ||||
| 			if (!is_valid_std_header(rsdt)) | ||||
| 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | ||||
| 
 | ||||
| 			m_header_table = (uintptr_t)rsdt->entries; | ||||
| 			m_header_table_paddr = (paddr_t)rsdt->entries + (rsdp->rsdt_address & PAGE_ADDR_MASK); | ||||
| 			m_entry_size = 4; | ||||
| 			m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4; | ||||
| 		} | ||||
| 
 | ||||
| 		PageTable::kernel().identity_map_range(m_header_table, m_entry_count * m_entry_size, PageTable::Flags::Present); | ||||
| 		size_t needed_pages = range_page_count(m_header_table_paddr, m_entry_count * m_entry_size); | ||||
| 		m_header_table_vaddr = PageTable::kernel().get_free_contiguous_pages(needed_pages, (vaddr_t)g_kernel_end); | ||||
| 		ASSERT(m_header_table_vaddr); | ||||
| 
 | ||||
| 		m_header_table_vaddr += m_header_table_paddr % PAGE_SIZE; | ||||
| 
 | ||||
| 		PageTable::kernel().map_range_at( | ||||
| 			m_header_table_paddr & PAGE_ADDR_MASK, | ||||
| 			m_header_table_vaddr & PAGE_ADDR_MASK, | ||||
| 			needed_pages * PAGE_SIZE, | ||||
| 			PageTable::Flags::Present | ||||
| 		); | ||||
| 
 | ||||
| 		for (uint32_t i = 0; i < m_entry_count; i++) | ||||
| 		{ | ||||
| 			auto* header = get_header_from_index(i); | ||||
| 			PageTable::kernel().identity_map_page((uintptr_t)header, PageTable::Flags::Present); | ||||
| 			PageTable::kernel().identity_map_range((uintptr_t)header, header->length, PageTable::Flags::Present); | ||||
| 			paddr_t header_paddr = (m_entry_size == 4) ? | ||||
| 				((uint32_t*)m_header_table_vaddr)[i] : | ||||
| 				((uint64_t*)m_header_table_vaddr)[i]; | ||||
| 			 | ||||
| 			PageTable::kernel().map_page_at(header_paddr & PAGE_ADDR_MASK, 0, PageTable::Flags::Present); | ||||
| 			size_t header_length = ((SDTHeader*)(header_paddr % PAGE_SIZE))->length; | ||||
| 			PageTable::kernel().unmap_page(0); | ||||
| 
 | ||||
| 			size_t needed_pages = range_page_count(header_paddr, header_length); | ||||
| 			vaddr_t page_vaddr = PageTable::kernel().get_free_contiguous_pages(needed_pages, (vaddr_t)g_kernel_end); | ||||
| 			ASSERT(page_vaddr); | ||||
| 			 | ||||
| 			PageTable::kernel().map_range_at( | ||||
| 				header_paddr & PAGE_ADDR_MASK, | ||||
| 				page_vaddr, | ||||
| 				needed_pages * PAGE_SIZE, | ||||
| 				PageTable::Flags::Present | ||||
| 			); | ||||
| 
 | ||||
| 			MUST(m_mapped_headers.push_back({ | ||||
| 				.paddr = header_paddr, | ||||
| 				.vaddr = page_vaddr + (header_paddr % PAGE_SIZE) | ||||
| 			})); | ||||
| 		} | ||||
| 
 | ||||
| 		return {}; | ||||
|  | @ -161,8 +194,15 @@ namespace Kernel | |||
| 		ASSERT(index < m_entry_count); | ||||
| 		ASSERT(m_entry_size == 4 || m_entry_size == 8); | ||||
| 
 | ||||
| 		uintptr_t header_address = (m_entry_size == 4) ? ((uint32_t*)m_header_table)[index] : ((uint64_t*)m_header_table)[index]; | ||||
| 		return (SDTHeader*)header_address; | ||||
| 		paddr_t header_paddr = (m_entry_size == 4) ? | ||||
| 			((uint32_t*)m_header_table_vaddr)[index] : | ||||
| 			((uint64_t*)m_header_table_vaddr)[index]; | ||||
| 
 | ||||
| 		for (const auto& page : m_mapped_headers) | ||||
| 			if (page.paddr == header_paddr) | ||||
| 				return (SDTHeader*)page.vaddr; | ||||
| 
 | ||||
| 		ASSERT_NOT_REACHED(); | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  | @ -81,6 +81,8 @@ union RedirectionEntry | |||
| 	}; | ||||
| }; | ||||
| 
 | ||||
| extern uint8_t g_kernel_end[]; | ||||
| 
 | ||||
| using namespace Kernel; | ||||
| 
 | ||||
| APIC* APIC::create() | ||||
|  | @ -101,7 +103,7 @@ APIC* APIC::create() | |||
| 	} | ||||
| 
 | ||||
| 	APIC* apic = new APIC; | ||||
| 	apic->m_local_apic = madt->local_apic; | ||||
| 	apic->m_local_apic_paddr = madt->local_apic; | ||||
| 	for (uint32_t i = 0x00; i <= 0xFF; i++) | ||||
| 		apic->m_irq_overrides[i] = i; | ||||
| 
 | ||||
|  | @ -121,7 +123,7 @@ APIC* APIC::create() | |||
| 			case 1: | ||||
| 				IOAPIC ioapic; | ||||
| 				ioapic.id			= entry->entry1.ioapic_id; | ||||
| 				ioapic.address		= entry->entry1.ioapic_address; | ||||
| 				ioapic.paddr		= entry->entry1.ioapic_address; | ||||
| 				ioapic.gsi_base		= entry->entry1.gsi_base; | ||||
| 				ioapic.max_redirs	= 0; | ||||
| 				MUST(apic->m_io_apics.push_back(ioapic)); | ||||
|  | @ -130,7 +132,7 @@ APIC* APIC::create() | |||
| 				apic->m_irq_overrides[entry->entry2.irq_source] = entry->entry2.gsi; | ||||
| 				break; | ||||
| 			case 5: | ||||
| 				apic->m_local_apic = entry->entry5.address; | ||||
| 				apic->m_local_apic_paddr = entry->entry5.address; | ||||
| 				break; | ||||
| 			default: | ||||
| 				dprintln("Unhandled madt entry, type {}", entry->type); | ||||
|  | @ -139,17 +141,40 @@ APIC* APIC::create() | |||
| 		madt_entry_addr += entry->length; | ||||
| 	} | ||||
| 
 | ||||
| 	if (apic->m_local_apic == 0 || apic->m_io_apics.empty()) | ||||
| 	if (apic->m_local_apic_paddr == 0 || apic->m_io_apics.empty()) | ||||
| 	{ | ||||
| 		dprintln("MADT did not provide necessary information"); | ||||
| 		delete apic; | ||||
| 		return nullptr; | ||||
| 	} | ||||
| 
 | ||||
| 	PageTable::kernel().identity_map_page(apic->m_local_apic, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 	// Map the local apic to kernel memory
 | ||||
| 	{ | ||||
| 		vaddr_t vaddr = PageTable::kernel().get_free_page((vaddr_t)g_kernel_end); | ||||
| 		ASSERT(vaddr); | ||||
| 		dprintln("lapic paddr {8H}", apic->m_local_apic_paddr); | ||||
| 		apic->m_local_apic_vaddr = vaddr + (apic->m_local_apic_paddr % PAGE_SIZE); | ||||
| 		dprintln("lapic vaddr {8H}", apic->m_local_apic_vaddr); | ||||
| 		PageTable::kernel().map_page_at( | ||||
| 			apic->m_local_apic_paddr & PAGE_ADDR_MASK, | ||||
| 			apic->m_local_apic_vaddr & PAGE_ADDR_MASK, | ||||
| 			PageTable::Flags::ReadWrite | PageTable::Flags::Present | ||||
| 		); | ||||
| 	} | ||||
| 
 | ||||
| 	// Map io apics to kernel memory
 | ||||
| 	for (auto& io_apic : apic->m_io_apics) | ||||
| 	{ | ||||
| 		PageTable::kernel().identity_map_page(io_apic.address, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 		vaddr_t vaddr = PageTable::kernel().get_free_page((vaddr_t)g_kernel_end); | ||||
| 		ASSERT(vaddr); | ||||
| 
 | ||||
| 		io_apic.vaddr = vaddr + (io_apic.paddr % PAGE_SIZE); | ||||
| 
 | ||||
| 		PageTable::kernel().map_page_at( | ||||
| 			io_apic.paddr & PAGE_ADDR_MASK, | ||||
| 			io_apic.vaddr & PAGE_ADDR_MASK, | ||||
| 			PageTable::Flags::ReadWrite | PageTable::Flags::Present | ||||
| 		); | ||||
| 		io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS); | ||||
| 	} | ||||
| 
 | ||||
|  | @ -171,24 +196,24 @@ APIC* APIC::create() | |||
| 
 | ||||
| uint32_t APIC::read_from_local_apic(ptrdiff_t offset) | ||||
| { | ||||
| 	return *(uint32_t*)(m_local_apic + offset); | ||||
| 	return *(uint32_t*)(m_local_apic_vaddr + offset); | ||||
| } | ||||
| 
 | ||||
| void APIC::write_to_local_apic(ptrdiff_t offset, uint32_t data) | ||||
| { | ||||
| 	*(uint32_t*)(m_local_apic + offset) = data; | ||||
| 	*(uint32_t*)(m_local_apic_vaddr + offset) = data; | ||||
| } | ||||
| 
 | ||||
| uint32_t APIC::IOAPIC::read(uint8_t offset) | ||||
| { | ||||
| 	volatile uint32_t* ioapic = (volatile uint32_t*)address; | ||||
| 	volatile uint32_t* ioapic = (volatile uint32_t*)vaddr; | ||||
| 	ioapic[0] = offset; | ||||
| 	return ioapic[4]; | ||||
| } | ||||
| 
 | ||||
| void APIC::IOAPIC::write(uint8_t offset, uint32_t data) | ||||
| { | ||||
| 	volatile uint32_t* ioapic = (volatile uint32_t*)address; | ||||
| 	volatile uint32_t* ioapic = (volatile uint32_t*)vaddr; | ||||
| 	ioapic[0] = offset; | ||||
| 	ioapic[4] = data; | ||||
| } | ||||
|  |  | |||
|  | @ -196,7 +196,6 @@ namespace Kernel | |||
| 
 | ||||
| 		page_vaddr = m_page_table.get_free_page(); | ||||
| 		m_page_table.map_page_at(page_paddr, page_vaddr, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 		m_page_table.invalidate(page_vaddr); | ||||
| 	} | ||||
| 
 | ||||
| 	bool FixedWidthAllocator::allocate_page_if_needed(vaddr_t vaddr, uint8_t flags) | ||||
|  | @ -251,7 +250,6 @@ namespace Kernel | |||
| 			{ | ||||
| 				paddr_t paddr = new_page_table.physical_address_of(page_begin); | ||||
| 				m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 				m_page_table.invalidate(0); | ||||
| 				memcpy((void*)0, (void*)page_begin, PAGE_SIZE); | ||||
| 			} | ||||
| 
 | ||||
|  | @ -261,7 +259,6 @@ namespace Kernel | |||
| 		} | ||||
| 
 | ||||
| 		m_page_table.unmap_page(0); | ||||
| 		m_page_table.invalidate(0); | ||||
| 
 | ||||
| 		m_page_table.unlock(); | ||||
| 
 | ||||
|  |  | |||
|  | @ -109,14 +109,12 @@ namespace Kernel | |||
| 				new_page_table.map_page_at(paddr, vaddr, flags); | ||||
| 
 | ||||
| 				m_page_table.map_page_at(paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 				m_page_table.invalidate(0); | ||||
| 				memcpy((void*)0, (void*)vaddr, PAGE_SIZE); | ||||
| 			} | ||||
| 
 | ||||
| 			MUST(allocator->m_allocations.push_back(BAN::move(new_allocation))); | ||||
| 		} | ||||
| 		m_page_table.unmap_page(0); | ||||
| 		m_page_table.invalidate(0); | ||||
| 
 | ||||
| 		m_page_table.unlock(); | ||||
| 
 | ||||
|  |  | |||
|  | @ -85,11 +85,9 @@ namespace Kernel | |||
| 		for (size_t i = 0; i < result->m_physical_pages.size(); i++) | ||||
| 		{ | ||||
| 			m_page_table.map_page_at(result->m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 			m_page_table.invalidate(0); | ||||
| 			memcpy((void*)0, (void*)(vaddr() + i * PAGE_SIZE), PAGE_SIZE); | ||||
| 		} | ||||
| 		m_page_table.unmap_page(0); | ||||
| 		m_page_table.invalidate(0); | ||||
| 
 | ||||
| 		m_page_table.unlock(); | ||||
| 
 | ||||
|  | @ -112,11 +110,9 @@ namespace Kernel | |||
| 		for (size_t i = 0; i < m_physical_pages.size(); i++) | ||||
| 		{ | ||||
| 			page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 			page_table.invalidate(0); | ||||
| 			memset((void*)0, 0, PAGE_SIZE); | ||||
| 		} | ||||
| 		page_table.unmap_page(0); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		page_table.unlock(); | ||||
| 	} | ||||
|  | @ -147,7 +143,6 @@ namespace Kernel | |||
| 
 | ||||
| 		// NOTE: we map the first page separately since it needs extra calculations
 | ||||
| 		page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		memcpy((void*)off, buffer, PAGE_SIZE - off); | ||||
| 
 | ||||
|  | @ -160,7 +155,6 @@ namespace Kernel | |||
| 			size_t len = BAN::Math::min<size_t>(PAGE_SIZE, bytes); | ||||
| 
 | ||||
| 			page_table.map_page_at(m_physical_pages[i], 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 			page_table.invalidate(0); | ||||
| 
 | ||||
| 			memcpy((void*)0, buffer, len); | ||||
| 
 | ||||
|  | @ -169,7 +163,6 @@ namespace Kernel | |||
| 			i++; | ||||
| 		} | ||||
| 		page_table.unmap_page(0); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		page_table.unlock(); | ||||
| 	} | ||||
|  |  | |||
|  | @ -265,8 +265,6 @@ namespace Kernel | |||
| 			io_write(ATA_PORT_LBA2, (uint8_t)(lba >> 16)); | ||||
| 			io_write(ATA_PORT_COMMAND, ATA_COMMAND_READ_SECTORS); | ||||
| 
 | ||||
| 			PIT::sleep(1); | ||||
| 
 | ||||
| 			for (uint32_t sector = 0; sector < sector_count; sector++) | ||||
| 			{ | ||||
| 				block_until_irq(); | ||||
|  |  | |||
|  | @ -178,7 +178,6 @@ namespace Kernel | |||
| 		ASSERT(page_table.is_page_free(0)); | ||||
| 
 | ||||
| 		page_table.map_page_at(this->paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		for (size_t i = 0; i < PAGE_SIZE / device.sector_size(); i++) | ||||
| 		{ | ||||
|  | @ -188,7 +187,6 @@ namespace Kernel | |||
| 		} | ||||
| 
 | ||||
| 		page_table.unmap_page(0); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		page_table.unlock(); | ||||
| 
 | ||||
|  | @ -210,7 +208,6 @@ namespace Kernel | |||
| 		ASSERT(page_table.is_page_free(0)); | ||||
| 		 | ||||
| 		page_table.map_page_at(this->paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		// Sector not yet cached
 | ||||
| 		if (!(this->sector_mask & (1 << sector_offset))) | ||||
|  | @ -222,7 +219,6 @@ namespace Kernel | |||
| 		memcpy(buffer, (const void*)(sector_offset * device.sector_size()), device.sector_size()); | ||||
| 
 | ||||
| 		page_table.unmap_page(0); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		page_table.unlock(); | ||||
| 
 | ||||
|  | @ -244,14 +240,12 @@ namespace Kernel | |||
| 		ASSERT(page_table.is_page_free(0)); | ||||
| 		 | ||||
| 		page_table.map_page_at(this->paddr, 0, PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 		page_table.invalidate(0); | ||||
| 		 | ||||
| 		memcpy((void*)(sector_offset * device.sector_size()), buffer, device.sector_size()); | ||||
| 		this->sector_mask |= 1 << sector_offset; | ||||
| 		this->dirty_mask |= 1 << sector_offset; | ||||
| 
 | ||||
| 		page_table.unmap_page(0); | ||||
| 		page_table.invalidate(0); | ||||
| 
 | ||||
| 		page_table.unlock(); | ||||
| 
 | ||||
|  |  | |||
|  | @ -6,6 +6,8 @@ | |||
| 
 | ||||
| using namespace Kernel; | ||||
| 
 | ||||
| extern uint8_t g_kernel_end[]; | ||||
| 
 | ||||
| VesaTerminalDriver* VesaTerminalDriver::create() | ||||
| { | ||||
| 	if (!(g_multiboot_info->flags & MULTIBOOT_FLAGS_FRAMEBUFFER)) | ||||
|  | @ -36,14 +38,21 @@ VesaTerminalDriver* VesaTerminalDriver::create() | |||
| 		return nullptr; | ||||
| 	} | ||||
| 
 | ||||
| 	PageTable::kernel().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 	uint64_t first_page = framebuffer.addr / PAGE_SIZE; | ||||
| 	uint64_t last_page = BAN::Math::div_round_up<uint64_t>(framebuffer.addr + framebuffer.pitch * framebuffer.height, PAGE_SIZE); | ||||
| 	uint64_t needed_pages = last_page - first_page + 1; | ||||
| 
 | ||||
| 	vaddr_t vaddr = PageTable::kernel().get_free_contiguous_pages(needed_pages, (vaddr_t)g_kernel_end); | ||||
| 	ASSERT(vaddr); | ||||
| 
 | ||||
| 	PageTable::kernel().map_range_at(framebuffer.addr, vaddr, needed_pages * PAGE_SIZE, PageTable::Flags::UserSupervisor | PageTable::Flags::ReadWrite | PageTable::Flags::Present); | ||||
| 
 | ||||
| 	auto* driver = new VesaTerminalDriver( | ||||
| 		framebuffer.width, | ||||
| 		framebuffer.height, | ||||
| 		framebuffer.pitch, | ||||
| 		framebuffer.bpp, | ||||
| 		framebuffer.addr | ||||
| 		vaddr | ||||
| 	); | ||||
| 	driver->set_cursor_position(0, 0); | ||||
| 	driver->clear(TerminalColor::BLACK); | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue