Kernel: MMU::get() is now MMU::kernel
MMU is can now be locked with RecursiveSpinLock. Scheduler now has get_current_tid() that works before the Scheduler is initialized. This allows RecursiveSpinLock usage early on.
This commit is contained in:
		
							parent
							
								
									a2ee543fa1
								
							
						
					
					
						commit
						869de7283f
					
				|  | @ -1,4 +1,6 @@ | ||||||
| #include <BAN/Errors.h> | #include <BAN/Errors.h> | ||||||
|  | #include <kernel/Arch.h> | ||||||
|  | #include <kernel/LockGuard.h> | ||||||
| #include <kernel/Memory/kmalloc.h> | #include <kernel/Memory/kmalloc.h> | ||||||
| #include <kernel/Memory/MMU.h> | #include <kernel/Memory/MMU.h> | ||||||
| 
 | 
 | ||||||
|  | @ -6,33 +8,34 @@ | ||||||
| #define PAGE_MASK (~FLAGS_MASK) | #define PAGE_MASK (~FLAGS_MASK) | ||||||
| 
 | 
 | ||||||
| #define CLEANUP_STRUCTURE(s)				\ | #define CLEANUP_STRUCTURE(s)				\ | ||||||
|  | 	do {									\ | ||||||
| 		for (uint64_t i = 0; i < 512; i++)	\ | 		for (uint64_t i = 0; i < 512; i++)	\ | ||||||
| 		if (s[i] & Flags::Present)		\ | 			if ((s)[i] & Flags::Present)	\ | ||||||
| 				return;						\ | 				return;						\ | ||||||
| 	kfree(s) | 		kfree(s);							\ | ||||||
| 
 | 	} while (false) | ||||||
| 
 | 
 | ||||||
| extern uint8_t g_kernel_end[]; | extern uint8_t g_kernel_end[]; | ||||||
| 
 | 
 | ||||||
| namespace Kernel | namespace Kernel | ||||||
| { | { | ||||||
| 	 | 	 | ||||||
| 	static MMU* s_instance = nullptr; | 	static MMU* s_kernel = nullptr; | ||||||
| 	static MMU* s_current = nullptr; | 	static MMU* s_current = nullptr; | ||||||
| 
 | 
 | ||||||
| 	void MMU::initialize() | 	void MMU::initialize() | ||||||
| 	{ | 	{ | ||||||
| 		ASSERT(s_instance == nullptr); | 		ASSERT(s_kernel == nullptr); | ||||||
| 		s_instance = new MMU(); | 		s_kernel = new MMU(); | ||||||
| 		ASSERT(s_instance); | 		ASSERT(s_kernel); | ||||||
| 		s_instance->initialize_kernel(); | 		s_kernel->initialize_kernel(); | ||||||
| 		s_instance->load(); | 		s_kernel->load(); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	MMU& MMU::get() | 	MMU& MMU::kernel() | ||||||
| 	{ | 	{ | ||||||
| 		ASSERT(s_instance); | 		ASSERT(s_kernel); | ||||||
| 		return *s_instance; | 		return *s_kernel; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	MMU& MMU::current() | 	MMU& MMU::current() | ||||||
|  | @ -61,13 +64,15 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	MMU::MMU() | 	MMU::MMU() | ||||||
| 	{ | 	{ | ||||||
| 		if (s_instance == nullptr) | 		if (s_kernel == nullptr) | ||||||
| 			return; | 			return; | ||||||
| 		 | 		 | ||||||
| 		// Here we copy the s_instances paging structs since they are
 | 		// Here we copy the s_kernel paging structs since they are
 | ||||||
| 		// global for every process
 | 		// global for every process
 | ||||||
| 
 | 
 | ||||||
| 		uint64_t* global_pml4 = s_instance->m_highest_paging_struct; | 		LockGuard _(s_kernel->m_lock); | ||||||
|  | 
 | ||||||
|  | 		uint64_t* global_pml4 = s_kernel->m_highest_paging_struct; | ||||||
| 
 | 
 | ||||||
| 		uint64_t* pml4 = allocate_page_aligned_page(); | 		uint64_t* pml4 = allocate_page_aligned_page(); | ||||||
| 		for (uint32_t pml4e = 0; pml4e < 512; pml4e++) | 		for (uint32_t pml4e = 0; pml4e < 512; pml4e++) | ||||||
|  | @ -136,6 +141,10 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	void MMU::load() | 	void MMU::load() | ||||||
| 	{ | 	{ | ||||||
|  | 		uintptr_t rsp; | ||||||
|  | 		read_rsp(rsp); | ||||||
|  | 		ASSERT(!is_page_free(rsp & PAGE_MASK)); | ||||||
|  | 
 | ||||||
| 		asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct)); | 		asm volatile("movq %0, %%cr3" :: "r"(m_highest_paging_struct)); | ||||||
| 		s_current = this; | 		s_current = this; | ||||||
| 	} | 	} | ||||||
|  | @ -148,41 +157,39 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	void MMU::identity_map_range(paddr_t address, size_t size, flags_t flags) | 	void MMU::identity_map_range(paddr_t address, size_t size, flags_t flags) | ||||||
| 	{ | 	{ | ||||||
| 		paddr_t s_page = address & PAGE_MASK; | 		LockGuard _(m_lock); | ||||||
| 		paddr_t e_page = (address + size - 1) & PAGE_MASK; | 
 | ||||||
| 		for (paddr_t page = s_page; page <= e_page; page += PAGE_SIZE) | 		paddr_t s_page = address / PAGE_SIZE; | ||||||
| 			identity_map_page(page, flags); | 		paddr_t e_page = (address + size - 1) / PAGE_SIZE; | ||||||
|  | 		for (paddr_t page = s_page; page <= e_page; page++) | ||||||
|  | 			identity_map_page(page * PAGE_SIZE, flags); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	void MMU::unmap_page(vaddr_t address) | 	void MMU::unmap_page(vaddr_t address) | ||||||
| 	{ | 	{ | ||||||
|  | 		LockGuard _(m_lock); | ||||||
|  | 
 | ||||||
| 		ASSERT((address >> 48) == 0); | 		ASSERT((address >> 48) == 0); | ||||||
| 
 | 
 | ||||||
| 		address &= PAGE_MASK; | 		address &= PAGE_MASK; | ||||||
| 
 | 
 | ||||||
|  | 		if (is_page_free(address)) | ||||||
|  | 		{ | ||||||
|  | 			dwarnln("unmapping unmapped page {8H}", address); | ||||||
|  | 			return; | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		uint64_t pml4e = (address >> 39) & 0x1FF; | 		uint64_t pml4e = (address >> 39) & 0x1FF; | ||||||
| 		uint64_t pdpte = (address >> 30) & 0x1FF; | 		uint64_t pdpte = (address >> 30) & 0x1FF; | ||||||
| 		uint64_t pde   = (address >> 21) & 0x1FF; | 		uint64_t pde   = (address >> 21) & 0x1FF; | ||||||
| 		uint64_t pte   = (address >> 12) & 0x1FF; | 		uint64_t pte   = (address >> 12) & 0x1FF; | ||||||
| 		 | 		 | ||||||
| 		uint64_t* pml4 = m_highest_paging_struct; | 		uint64_t* pml4 = m_highest_paging_struct; | ||||||
| 		if (!(pml4[pml4e] & Flags::Present)) |  | ||||||
| 			return; |  | ||||||
| 
 |  | ||||||
| 		uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK); | 		uint64_t* pdpt = (uint64_t*)(pml4[pml4e] & PAGE_MASK); | ||||||
| 		if (!(pdpt[pdpte] & Flags::Present)) |  | ||||||
| 			return; |  | ||||||
| 
 |  | ||||||
| 		uint64_t* pd   = (uint64_t*)(pdpt[pdpte] & PAGE_MASK); | 		uint64_t* pd   = (uint64_t*)(pdpt[pdpte] & PAGE_MASK); | ||||||
| 		if (!(pd[pde] & Flags::Present)) |  | ||||||
| 			return; |  | ||||||
| 
 |  | ||||||
| 		uint64_t* pt   = (uint64_t*)(pd[pde]     & PAGE_MASK); | 		uint64_t* pt   = (uint64_t*)(pd[pde]     & PAGE_MASK); | ||||||
| 		if (!(pt[pte] & Flags::Present)) |  | ||||||
| 			return; |  | ||||||
| 
 | 
 | ||||||
| 		pt[pte] = 0; | 		pt[pte] = 0; | ||||||
| 
 |  | ||||||
| 		CLEANUP_STRUCTURE(pt); | 		CLEANUP_STRUCTURE(pt); | ||||||
| 		pd[pde] = 0; | 		pd[pde] = 0; | ||||||
| 		CLEANUP_STRUCTURE(pd); | 		CLEANUP_STRUCTURE(pd); | ||||||
|  | @ -193,14 +200,18 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	void MMU::unmap_range(vaddr_t address, size_t size) | 	void MMU::unmap_range(vaddr_t address, size_t size) | ||||||
| 	{ | 	{ | ||||||
| 		vaddr_t s_page = address & PAGE_MASK; | 		LockGuard _(m_lock); | ||||||
| 		vaddr_t e_page = (address + size - 1) & PAGE_MASK; | 
 | ||||||
| 		for (vaddr_t page = s_page; page <= e_page; page += PAGE_SIZE) | 		vaddr_t s_page = address / PAGE_SIZE; | ||||||
| 			unmap_page(page); | 		vaddr_t e_page = (address + size - 1) / PAGE_SIZE; | ||||||
|  | 		for (vaddr_t page = s_page; page <= e_page; page++) | ||||||
|  | 			unmap_page(page * PAGE_SIZE); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags) | 	void MMU::map_page_at(paddr_t paddr, vaddr_t vaddr, flags_t flags) | ||||||
| 	{ | 	{ | ||||||
|  | 		LockGuard _(m_lock); | ||||||
|  | 
 | ||||||
| 		ASSERT((paddr >> 48) == 0); | 		ASSERT((paddr >> 48) == 0); | ||||||
| 		ASSERT((vaddr >> 48) == 0); | 		ASSERT((vaddr >> 48) == 0); | ||||||
| 
 | 
 | ||||||
|  | @ -245,6 +256,8 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	uint64_t MMU::get_page_data(vaddr_t address) const | 	uint64_t MMU::get_page_data(vaddr_t address) const | ||||||
| 	{ | 	{ | ||||||
|  | 		LockGuard _(m_lock); | ||||||
|  | 
 | ||||||
| 		ASSERT((address >> 48) == 0); | 		ASSERT((address >> 48) == 0); | ||||||
| 		ASSERT(address % PAGE_SIZE == 0); | 		ASSERT(address % PAGE_SIZE == 0); | ||||||
| 
 | 
 | ||||||
|  | @ -284,6 +297,8 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	vaddr_t MMU::get_free_page() const | 	vaddr_t MMU::get_free_page() const | ||||||
| 	{ | 	{ | ||||||
|  | 		LockGuard _(m_lock); | ||||||
|  | 
 | ||||||
| 		// Try to find free page that can be mapped without
 | 		// Try to find free page that can be mapped without
 | ||||||
| 		// allocations (page table with unused entries)
 | 		// allocations (page table with unused entries)
 | ||||||
| 		vaddr_t* pml4 = m_highest_paging_struct; | 		vaddr_t* pml4 = m_highest_paging_struct; | ||||||
|  | @ -332,6 +347,8 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	vaddr_t MMU::get_free_contiguous_pages(size_t page_count) const | 	vaddr_t MMU::get_free_contiguous_pages(size_t page_count) const | ||||||
| 	{ | 	{ | ||||||
|  | 		LockGuard _(m_lock); | ||||||
|  | 
 | ||||||
| 		for (vaddr_t address = PAGE_SIZE; !(address >> 48); address += PAGE_SIZE) | 		for (vaddr_t address = PAGE_SIZE; !(address >> 48); address += PAGE_SIZE) | ||||||
| 		{ | 		{ | ||||||
| 			bool valid { true }; | 			bool valid { true }; | ||||||
|  | @ -339,7 +356,7 @@ namespace Kernel | ||||||
| 			{ | 			{ | ||||||
| 				if (get_page_flags(address + page * PAGE_SIZE) & Flags::Present) | 				if (get_page_flags(address + page * PAGE_SIZE) & Flags::Present) | ||||||
| 				{ | 				{ | ||||||
| 					address += page; | 					address += page * PAGE_SIZE; | ||||||
| 					valid = false; | 					valid = false; | ||||||
| 					break; | 					break; | ||||||
| 				} | 				} | ||||||
|  | @ -359,8 +376,10 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	bool MMU::is_range_free(vaddr_t start, size_t size) const | 	bool MMU::is_range_free(vaddr_t start, size_t size) const | ||||||
| 	{ | 	{ | ||||||
|  | 		LockGuard _(m_lock); | ||||||
|  | 		 | ||||||
| 		vaddr_t first_page = start / PAGE_SIZE; | 		vaddr_t first_page = start / PAGE_SIZE; | ||||||
| 		vaddr_t last_page = BAN::Math::div_round_up<vaddr_t>(start + size, PAGE_SIZE); | 		vaddr_t last_page = (start + size - 1) / PAGE_SIZE; | ||||||
| 		for (vaddr_t page = first_page; page <= last_page; page++) | 		for (vaddr_t page = first_page; page <= last_page; page++) | ||||||
| 			if (!is_page_free(page * PAGE_SIZE)) | 			if (!is_page_free(page * PAGE_SIZE)) | ||||||
| 				return false; | 				return false; | ||||||
|  |  | ||||||
|  | @ -1,6 +1,7 @@ | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
| #include <kernel/Memory/Heap.h> | #include <kernel/Memory/Types.h> | ||||||
|  | #include <kernel/SpinLock.h> | ||||||
| 
 | 
 | ||||||
| namespace Kernel | namespace Kernel | ||||||
| { | { | ||||||
|  | @ -18,7 +19,7 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 	public: | 	public: | ||||||
| 		static void initialize(); | 		static void initialize(); | ||||||
| 		static MMU& get(); | 		static MMU& kernel(); | ||||||
| 
 | 
 | ||||||
| 		static MMU& current(); | 		static MMU& current(); | ||||||
| 
 | 
 | ||||||
|  | @ -44,12 +45,16 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 		void load(); | 		void load(); | ||||||
| 
 | 
 | ||||||
|  | 		void lock() const { m_lock.lock(); } | ||||||
|  | 		void unlock() const { m_lock.unlock(); } | ||||||
|  | 
 | ||||||
| 	private: | 	private: | ||||||
| 		uint64_t get_page_data(vaddr_t) const; | 		uint64_t get_page_data(vaddr_t) const; | ||||||
| 		void initialize_kernel(); | 		void initialize_kernel(); | ||||||
| 
 | 
 | ||||||
| 	private: | 	private: | ||||||
| 		uint64_t* m_highest_paging_struct; | 		uint64_t*					m_highest_paging_struct { nullptr }; | ||||||
|  | 		mutable RecursiveSpinLock	m_lock; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -68,7 +68,7 @@ namespace Kernel | ||||||
| 
 | 
 | ||||||
| 		static Process& current() { return Thread::current().process(); } | 		static Process& current() { return Thread::current().process(); } | ||||||
| 
 | 
 | ||||||
| 		MMU& mmu() { return m_mmu ? *m_mmu : MMU::get(); } | 		MMU& mmu() { return m_mmu ? *m_mmu : MMU::kernel(); } | ||||||
| 
 | 
 | ||||||
| 	private: | 	private: | ||||||
| 		Process(pid_t); | 		Process(pid_t); | ||||||
|  |  | ||||||
|  | @ -26,6 +26,7 @@ namespace Kernel | ||||||
| 		void unblock_threads(Semaphore*); | 		void unblock_threads(Semaphore*); | ||||||
| 
 | 
 | ||||||
| 		Thread& current_thread(); | 		Thread& current_thread(); | ||||||
|  | 		static pid_t current_tid(); | ||||||
| 
 | 
 | ||||||
| 	private: | 	private: | ||||||
| 		Scheduler() = default; | 		Scheduler() = default; | ||||||
|  |  | ||||||
|  | @ -49,6 +49,8 @@ namespace Kernel | ||||||
| 		uintptr_t interrupt_stack_size() const { return m_interrupt_stack_size; } | 		uintptr_t interrupt_stack_size() const { return m_interrupt_stack_size; } | ||||||
| 
 | 
 | ||||||
| 		static Thread& current() ; | 		static Thread& current() ; | ||||||
|  | 		static pid_t current_tid(); | ||||||
|  | 
 | ||||||
| 		Process& process(); | 		Process& process(); | ||||||
| 		bool has_process() const { return m_process; } | 		bool has_process() const { return m_process; } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -105,8 +105,8 @@ namespace Kernel | ||||||
| 		if (rsdp->revision >= 2) | 		if (rsdp->revision >= 2) | ||||||
| 		{ | 		{ | ||||||
| 			const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address; | 			const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address; | ||||||
| 			MMU::get().identity_map_page((uintptr_t)xsdt, MMU::Flags::Present); | 			MMU::kernel().identity_map_page((uintptr_t)xsdt, MMU::Flags::Present); | ||||||
| 			BAN::ScopeGuard _([xsdt] { MMU::get().unmap_page((uintptr_t)xsdt); }); | 			BAN::ScopeGuard _([xsdt] { MMU::kernel().unmap_page((uintptr_t)xsdt); }); | ||||||
| 
 | 
 | ||||||
| 			if (memcmp(xsdt->signature, "XSDT", 4) != 0) | 			if (memcmp(xsdt->signature, "XSDT", 4) != 0) | ||||||
| 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | ||||||
|  | @ -120,8 +120,8 @@ namespace Kernel | ||||||
| 		else | 		else | ||||||
| 		{ | 		{ | ||||||
| 			const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address; | 			const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address; | ||||||
| 			MMU::get().identity_map_page((uintptr_t)rsdt, MMU::Flags::Present); | 			MMU::kernel().identity_map_page((uintptr_t)rsdt, MMU::Flags::Present); | ||||||
| 			BAN::ScopeGuard _([rsdt] { MMU::get().unmap_page((uintptr_t)rsdt); }); | 			BAN::ScopeGuard _([rsdt] { MMU::kernel().unmap_page((uintptr_t)rsdt); }); | ||||||
| 
 | 
 | ||||||
| 			if (memcmp(rsdt->signature, "RSDT", 4) != 0) | 			if (memcmp(rsdt->signature, "RSDT", 4) != 0) | ||||||
| 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | 				return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid); | ||||||
|  | @ -133,13 +133,13 @@ namespace Kernel | ||||||
| 			m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4; | 			m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		MMU::get().identity_map_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present); | 		MMU::kernel().identity_map_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present); | ||||||
| 
 | 
 | ||||||
| 		for (uint32_t i = 0; i < m_entry_count; i++) | 		for (uint32_t i = 0; i < m_entry_count; i++) | ||||||
| 		{ | 		{ | ||||||
| 			auto* header = get_header_from_index(i); | 			auto* header = get_header_from_index(i); | ||||||
| 			MMU::get().identity_map_page((uintptr_t)header, MMU::Flags::Present); | 			MMU::kernel().identity_map_page((uintptr_t)header, MMU::Flags::Present); | ||||||
| 			MMU::get().identity_map_range((uintptr_t)header, header->length, MMU::Flags::Present); | 			MMU::kernel().identity_map_range((uintptr_t)header, header->length, MMU::Flags::Present); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		return {}; | 		return {}; | ||||||
|  |  | ||||||
|  | @ -146,10 +146,10 @@ APIC* APIC::create() | ||||||
| 		return nullptr; | 		return nullptr; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	MMU::get().identity_map_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present); | 	MMU::kernel().identity_map_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present); | ||||||
| 	for (auto& io_apic : apic->m_io_apics) | 	for (auto& io_apic : apic->m_io_apics) | ||||||
| 	{ | 	{ | ||||||
| 		MMU::get().identity_map_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present); | 		MMU::kernel().identity_map_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present); | ||||||
| 		io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS); | 		io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -130,7 +130,7 @@ namespace Kernel | ||||||
| 		ASSERT(m_general_allocator == nullptr); | 		ASSERT(m_general_allocator == nullptr); | ||||||
| 		if (m_mmu) | 		if (m_mmu) | ||||||
| 		{ | 		{ | ||||||
| 			MMU::get().load(); | 			MMU::kernel().load(); | ||||||
| 			delete m_mmu; | 			delete m_mmu; | ||||||
| 		} | 		} | ||||||
| 		for (auto paddr : m_allocated_pages) | 		for (auto paddr : m_allocated_pages) | ||||||
|  |  | ||||||
|  | @ -52,9 +52,17 @@ namespace Kernel | ||||||
| 		return m_current_thread ? *m_current_thread->thread : *m_idle_thread; | 		return m_current_thread ? *m_current_thread->thread : *m_idle_thread; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	pid_t Scheduler::current_tid() | ||||||
|  | 	{ | ||||||
|  | 		if (s_instance == nullptr) | ||||||
|  | 			return 0; | ||||||
|  | 		return Scheduler::get().current_thread().tid(); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	void Scheduler::reschedule() | 	void Scheduler::reschedule() | ||||||
| 	{ | 	{ | ||||||
| 		VERIFY_CLI(); | 		VERIFY_CLI(); | ||||||
|  | 
 | ||||||
| 		ASSERT(InterruptController::get().is_in_service(PIT_IRQ)); | 		ASSERT(InterruptController::get().is_in_service(PIT_IRQ)); | ||||||
| 		InterruptController::get().eoi(PIT_IRQ); | 		InterruptController::get().eoi(PIT_IRQ); | ||||||
| 
 | 
 | ||||||
|  | @ -174,7 +182,7 @@ namespace Kernel | ||||||
| 			GDT::set_tss_stack(current.interrupt_stack_base() + current.interrupt_stack_size()); | 			GDT::set_tss_stack(current.interrupt_stack_base() + current.interrupt_stack_size()); | ||||||
| 		} | 		} | ||||||
| 		else | 		else | ||||||
| 			MMU::get().load(); | 			MMU::kernel().load(); | ||||||
| 
 | 
 | ||||||
| 		switch (current.state()) | 		switch (current.state()) | ||||||
| 		{ | 		{ | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
|  | #include <kernel/Scheduler.h> | ||||||
| #include <kernel/SpinLock.h> | #include <kernel/SpinLock.h> | ||||||
| #include <kernel/Thread.h> |  | ||||||
| 
 | 
 | ||||||
| namespace Kernel | namespace Kernel | ||||||
| { | { | ||||||
|  | @ -25,7 +25,7 @@ namespace Kernel | ||||||
| 	void RecursiveSpinLock::lock() | 	void RecursiveSpinLock::lock() | ||||||
| 	{ | 	{ | ||||||
| 		// FIXME: is this thread safe?
 | 		// FIXME: is this thread safe?
 | ||||||
| 		if (m_locker == Thread::current().tid()) | 		if (m_locker == Scheduler::current_tid()) | ||||||
| 		{ | 		{ | ||||||
| 			m_lock_depth++; | 			m_lock_depth++; | ||||||
| 		} | 		} | ||||||
|  | @ -33,13 +33,15 @@ namespace Kernel | ||||||
| 		{ | 		{ | ||||||
| 			m_lock.lock(); | 			m_lock.lock(); | ||||||
| 			ASSERT(m_locker == 0); | 			ASSERT(m_locker == 0); | ||||||
| 			m_locker = Thread::current().tid(); | 			m_locker = Scheduler::current_tid(); | ||||||
| 			m_lock_depth = 1; | 			m_lock_depth = 1; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	void RecursiveSpinLock::unlock() | 	void RecursiveSpinLock::unlock() | ||||||
| 	{ | 	{ | ||||||
|  | 		ASSERT(m_lock_depth > 0); | ||||||
|  | 
 | ||||||
| 		m_lock_depth--; | 		m_lock_depth--; | ||||||
| 		if (m_lock_depth == 0) | 		if (m_lock_depth == 0) | ||||||
| 		{ | 		{ | ||||||
|  |  | ||||||
|  | @ -36,7 +36,7 @@ VesaTerminalDriver* VesaTerminalDriver::create() | ||||||
| 		return nullptr; | 		return nullptr; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	MMU::get().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present); | 	MMU::kernel().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present); | ||||||
| 
 | 
 | ||||||
| 	auto* driver = new VesaTerminalDriver( | 	auto* driver = new VesaTerminalDriver( | ||||||
| 		framebuffer.width, | 		framebuffer.width, | ||||||
|  | @ -53,7 +53,7 @@ VesaTerminalDriver* VesaTerminalDriver::create() | ||||||
| 
 | 
 | ||||||
| VesaTerminalDriver::~VesaTerminalDriver() | VesaTerminalDriver::~VesaTerminalDriver() | ||||||
| { | { | ||||||
| 	MMU::get().unmap_range(m_address, m_pitch * m_height); | 	MMU::kernel().unmap_range(m_address, m_pitch * m_height); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void VesaTerminalDriver::set_pixel(uint32_t offset, Color color) | void VesaTerminalDriver::set_pixel(uint32_t offset, Color color) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue