Kernel: MMU::get() is now MMU::kernel
MMU is can now be locked with RecursiveSpinLock. Scheduler now has get_current_tid() that works before the Scheduler is initialized. This allows RecursiveSpinLock usage early on.
This commit is contained in:
@@ -105,8 +105,8 @@ namespace Kernel
|
||||
if (rsdp->revision >= 2)
|
||||
{
|
||||
const XSDT* xsdt = (const XSDT*)rsdp->xsdt_address;
|
||||
MMU::get().identity_map_page((uintptr_t)xsdt, MMU::Flags::Present);
|
||||
BAN::ScopeGuard _([xsdt] { MMU::get().unmap_page((uintptr_t)xsdt); });
|
||||
MMU::kernel().identity_map_page((uintptr_t)xsdt, MMU::Flags::Present);
|
||||
BAN::ScopeGuard _([xsdt] { MMU::kernel().unmap_page((uintptr_t)xsdt); });
|
||||
|
||||
if (memcmp(xsdt->signature, "XSDT", 4) != 0)
|
||||
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
|
||||
@@ -120,8 +120,8 @@ namespace Kernel
|
||||
else
|
||||
{
|
||||
const RSDT* rsdt = (const RSDT*)(uintptr_t)rsdp->rsdt_address;
|
||||
MMU::get().identity_map_page((uintptr_t)rsdt, MMU::Flags::Present);
|
||||
BAN::ScopeGuard _([rsdt] { MMU::get().unmap_page((uintptr_t)rsdt); });
|
||||
MMU::kernel().identity_map_page((uintptr_t)rsdt, MMU::Flags::Present);
|
||||
BAN::ScopeGuard _([rsdt] { MMU::kernel().unmap_page((uintptr_t)rsdt); });
|
||||
|
||||
if (memcmp(rsdt->signature, "RSDT", 4) != 0)
|
||||
return BAN::Error::from_error_code(ErrorCode::ACPI_RootInvalid);
|
||||
@@ -133,13 +133,13 @@ namespace Kernel
|
||||
m_entry_count = (rsdt->length - sizeof(SDTHeader)) / 4;
|
||||
}
|
||||
|
||||
MMU::get().identity_map_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present);
|
||||
MMU::kernel().identity_map_range(m_header_table, m_entry_count * m_entry_size, MMU::Flags::Present);
|
||||
|
||||
for (uint32_t i = 0; i < m_entry_count; i++)
|
||||
{
|
||||
auto* header = get_header_from_index(i);
|
||||
MMU::get().identity_map_page((uintptr_t)header, MMU::Flags::Present);
|
||||
MMU::get().identity_map_range((uintptr_t)header, header->length, MMU::Flags::Present);
|
||||
MMU::kernel().identity_map_page((uintptr_t)header, MMU::Flags::Present);
|
||||
MMU::kernel().identity_map_range((uintptr_t)header, header->length, MMU::Flags::Present);
|
||||
}
|
||||
|
||||
return {};
|
||||
|
||||
@@ -146,10 +146,10 @@ APIC* APIC::create()
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MMU::get().identity_map_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
||||
MMU::kernel().identity_map_page(apic->m_local_apic, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
||||
for (auto& io_apic : apic->m_io_apics)
|
||||
{
|
||||
MMU::get().identity_map_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
||||
MMU::kernel().identity_map_page(io_apic.address, MMU::Flags::ReadWrite | MMU::Flags::Present);
|
||||
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
|
||||
}
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ namespace Kernel
|
||||
ASSERT(m_general_allocator == nullptr);
|
||||
if (m_mmu)
|
||||
{
|
||||
MMU::get().load();
|
||||
MMU::kernel().load();
|
||||
delete m_mmu;
|
||||
}
|
||||
for (auto paddr : m_allocated_pages)
|
||||
|
||||
@@ -52,16 +52,24 @@ namespace Kernel
|
||||
return m_current_thread ? *m_current_thread->thread : *m_idle_thread;
|
||||
}
|
||||
|
||||
pid_t Scheduler::current_tid()
|
||||
{
|
||||
if (s_instance == nullptr)
|
||||
return 0;
|
||||
return Scheduler::get().current_thread().tid();
|
||||
}
|
||||
|
||||
void Scheduler::reschedule()
|
||||
{
|
||||
VERIFY_CLI();
|
||||
|
||||
ASSERT(InterruptController::get().is_in_service(PIT_IRQ));
|
||||
InterruptController::get().eoi(PIT_IRQ);
|
||||
|
||||
if (PIT::ms_since_boot() <= m_last_reschedule)
|
||||
return;
|
||||
m_last_reschedule = PIT::ms_since_boot();
|
||||
|
||||
|
||||
wake_threads();
|
||||
|
||||
if (save_current_thread())
|
||||
@@ -174,7 +182,7 @@ namespace Kernel
|
||||
GDT::set_tss_stack(current.interrupt_stack_base() + current.interrupt_stack_size());
|
||||
}
|
||||
else
|
||||
MMU::get().load();
|
||||
MMU::kernel().load();
|
||||
|
||||
switch (current.state())
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <kernel/Scheduler.h>
|
||||
#include <kernel/SpinLock.h>
|
||||
#include <kernel/Thread.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
@@ -25,7 +25,7 @@ namespace Kernel
|
||||
void RecursiveSpinLock::lock()
|
||||
{
|
||||
// FIXME: is this thread safe?
|
||||
if (m_locker == Thread::current().tid())
|
||||
if (m_locker == Scheduler::current_tid())
|
||||
{
|
||||
m_lock_depth++;
|
||||
}
|
||||
@@ -33,13 +33,15 @@ namespace Kernel
|
||||
{
|
||||
m_lock.lock();
|
||||
ASSERT(m_locker == 0);
|
||||
m_locker = Thread::current().tid();
|
||||
m_locker = Scheduler::current_tid();
|
||||
m_lock_depth = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void RecursiveSpinLock::unlock()
|
||||
{
|
||||
ASSERT(m_lock_depth > 0);
|
||||
|
||||
m_lock_depth--;
|
||||
if (m_lock_depth == 0)
|
||||
{
|
||||
|
||||
@@ -36,7 +36,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MMU::get().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
||||
MMU::kernel().identity_map_range(framebuffer.addr, framebuffer.pitch * framebuffer.height, MMU::Flags::UserSupervisor | MMU::Flags::ReadWrite | MMU::Flags::Present);
|
||||
|
||||
auto* driver = new VesaTerminalDriver(
|
||||
framebuffer.width,
|
||||
@@ -53,7 +53,7 @@ VesaTerminalDriver* VesaTerminalDriver::create()
|
||||
|
||||
VesaTerminalDriver::~VesaTerminalDriver()
|
||||
{
|
||||
MMU::get().unmap_range(m_address, m_pitch * m_height);
|
||||
MMU::kernel().unmap_range(m_address, m_pitch * m_height);
|
||||
}
|
||||
|
||||
void VesaTerminalDriver::set_pixel(uint32_t offset, Color color)
|
||||
|
||||
Reference in New Issue
Block a user