Kernel: Move SpinLock definition to header and fix Scheduler locking

This patch allows inlining of spinlocks :)
This commit is contained in:
Bananymous 2024-03-04 22:36:41 +02:00
parent 9c36d7c338
commit 418bc54f2b
5 changed files with 64 additions and 100 deletions

View File

@ -42,7 +42,6 @@ set(KERNEL_SOURCES
kernel/Input/PS2/Mouse.cpp
kernel/InterruptController.cpp
kernel/kernel.cpp
kernel/Lock/SpinLock.cpp
kernel/Memory/DMARegion.cpp
kernel/Memory/FileBackedRegion.cpp
kernel/Memory/Heap.cpp

View File

@ -1,5 +1,6 @@
#pragma once
#include <BAN/Assert.h>
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <kernel/Processor.h>
@ -17,11 +18,35 @@ namespace Kernel
public:
SpinLock() = default;
InterruptState lock();
void unlock(InterruptState state);
InterruptState lock()
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
auto id = Processor::current_id();
ASSERT(m_locker != id);
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
return state;
}
void unlock(InterruptState state)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(m_locker == Processor::current_id());
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
bool current_processor_has_lock() const
{
return m_locker == Processor::current_id();
}
private:
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
};
class RecursiveSpinLock
@ -32,45 +57,44 @@ namespace Kernel
public:
RecursiveSpinLock() = default;
InterruptState lock();
void unlock(InterruptState state);
private:
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
uint32_t m_lock_depth { 0 };
};
class SpinLockUnsafe
{
BAN_NON_COPYABLE(SpinLockUnsafe);
BAN_NON_MOVABLE(SpinLockUnsafe);
public:
SpinLockUnsafe() = default;
InterruptState lock()
{
auto id = Processor::current_id();
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
auto id = Processor::current_id();
if (m_locker == id)
ASSERT(m_lock_depth > 0);
else
{
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
ASSERT(m_lock_depth == 0);
}
m_lock_depth++;
return state;
}
void unlock(InterruptState state)
{
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
ASSERT(m_locker == Processor::current_id());
ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0)
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
bool is_locked() const { return m_locker != PROCESSOR_NONE; }
bool current_processor_has_lock() const
{
return m_locker == Processor::current_id();
}
private:
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
BAN::Atomic<ProcessorID> m_locker { PROCESSOR_NONE };
uint32_t m_lock_depth { 0 };
};
template<typename Lock>

View File

@ -62,7 +62,7 @@ namespace Kernel
Semaphore* semaphore;
};
SpinLockUnsafe m_lock;
SpinLock m_lock;
Thread* m_idle_thread { nullptr };
BAN::LinkedList<SchedulerThread> m_active_threads;

View File

@ -1,61 +0,0 @@
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Scheduler.h>
// FIXME: try to move these to header
namespace Kernel
{
InterruptState SpinLock::lock()
{
auto id = Processor::current_id();
ASSERT(m_locker != id);
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
return state;
}
void SpinLock::unlock(InterruptState state)
{
ASSERT(m_locker == Processor::current_id());
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
InterruptState RecursiveSpinLock::lock()
{
auto id = Processor::current_id();
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
if (id == m_locker)
ASSERT(m_lock_depth > 0);
else
{
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
__builtin_ia32_pause();
ASSERT(m_lock_depth == 0);
}
m_lock_depth++;
return state;
}
void RecursiveSpinLock::unlock(InterruptState state)
{
ASSERT(m_locker == Processor::current_id());
ASSERT(m_lock_depth > 0);
if (--m_lock_depth == 0)
m_locker.store(PROCESSOR_NONE, BAN::MemoryOrder::memory_order_release);
Processor::set_interrupt_state(state);
}
}

View File

@ -64,7 +64,7 @@ namespace Kernel
auto state = m_lock.lock();
wake_threads();
if (save_current_thread())
return m_lock.unlock(state);
return Processor::set_interrupt_state(state);
advance_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
@ -86,7 +86,7 @@ namespace Kernel
if (m_active_threads.empty() || &current_thread() != m_idle_thread)
return m_lock.unlock(state);
if (save_current_thread())
return m_lock.unlock(state);
return Processor::set_interrupt_state(state);
m_current_thread = m_active_threads.begin();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
@ -94,7 +94,7 @@ namespace Kernel
void Scheduler::wake_threads()
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
@ -124,7 +124,7 @@ namespace Kernel
void Scheduler::advance_current_thread()
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
if (m_active_threads.empty())
{
@ -137,7 +137,7 @@ namespace Kernel
void Scheduler::remove_and_advance_current_thread()
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
ASSERT(m_current_thread);
@ -158,7 +158,7 @@ namespace Kernel
// after getting the rsp
ALWAYS_INLINE bool Scheduler::save_current_thread()
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
uintptr_t rsp, rip;
push_callee_saved();
@ -209,7 +209,7 @@ namespace Kernel
void Scheduler::execute_current_thread_locked()
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
load_temp_stack();
PageTable::kernel().load();
execute_current_thread_stack_loaded();
@ -218,7 +218,7 @@ namespace Kernel
NEVER_INLINE void Scheduler::execute_current_thread_stack_loaded()
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
#if SCHEDULER_VERIFY_STACK
vaddr_t rsp;
@ -281,7 +281,7 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
{
ASSERT(m_lock.is_locked());
ASSERT(m_lock.current_processor_has_lock());
if (save_current_thread())
return;
@ -307,16 +307,18 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
{
SpinLockGuard _(m_lock);
auto state = m_lock.lock();
m_current_thread->semaphore = nullptr;
set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state);
}
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
{
SpinLockGuard _(m_lock);
auto state = m_lock.lock();
m_current_thread->semaphore = semaphore;
set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state);
}
void Scheduler::unblock_threads(Semaphore* semaphore)