Kernel: Optimize all SpinLocks. All locking operations are atomic

This commit is contained in:
Bananymous 2024-01-30 12:39:37 +02:00
parent ca8e7b40bc
commit 95e861bcdd
2 changed files with 17 additions and 60 deletions

View File

@ -35,9 +35,8 @@ namespace Kernel
bool is_locked() const; bool is_locked() const;
private: private:
BAN::Atomic<pid_t> m_locker = -1; BAN::Atomic<pid_t> m_locker = -1;
BAN::Atomic<uint32_t> m_lock_depth = 0; BAN::Atomic<uint32_t> m_lock_depth = 0;
SpinLock m_lock;
}; };
class RecursivePrioritySpinLock class RecursivePrioritySpinLock
@ -54,10 +53,9 @@ namespace Kernel
uint32_t lock_depth() const { return m_lock_depth; } uint32_t lock_depth() const { return m_lock_depth; }
private: private:
pid_t m_locker = -1; BAN::Atomic<pid_t> m_locker = -1;
uint32_t m_queue_length = 0; BAN::Atomic<uint32_t> m_lock_depth = 0;
uint32_t m_lock_depth = 0; BAN::Atomic<uint32_t> m_queue_length = 0;
SpinLock m_lock;
}; };
} }

View File

@ -25,40 +25,21 @@ namespace Kernel
void RecursiveSpinLock::lock() void RecursiveSpinLock::lock()
{ {
pid_t tid = Scheduler::current_tid(); pid_t tid = Scheduler::current_tid();
if (m_locker != tid)
while (true)
{ {
m_lock.lock(); while (!m_locker.compare_exchange(-1, tid))
if (m_locker == tid) Scheduler::get().reschedule();
{ ASSERT(m_lock_depth == 0);
m_lock_depth++;
break;
}
if (m_locker == -1)
{
m_locker = tid;
m_lock_depth = 1;
break;
}
m_lock.unlock();
} }
m_lock_depth++;
m_lock.unlock();
} }
void RecursiveSpinLock::unlock() void RecursiveSpinLock::unlock()
{ {
m_lock.lock();
ASSERT(m_lock_depth > 0); ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid()); ASSERT(m_locker == Scheduler::current_tid());
if (--m_lock_depth == 0)
m_lock_depth--;
if (m_lock_depth == 0)
m_locker = -1; m_locker = -1;
m_lock.unlock();
} }
bool RecursiveSpinLock::is_locked() const bool RecursiveSpinLock::is_locked() const
@ -71,38 +52,20 @@ namespace Kernel
pid_t tid = Scheduler::current_tid(); pid_t tid = Scheduler::current_tid();
bool has_priority = !Thread::current().is_userspace(); bool has_priority = !Thread::current().is_userspace();
if (has_priority) if (has_priority)
{
m_lock.lock();
m_queue_length++; m_queue_length++;
m_lock.unlock();
}
while (true) if (m_locker != tid)
{ {
m_lock.lock(); while (!((has_priority || m_queue_length == 0) && m_locker.compare_exchange(-1, tid)))
if (m_locker == tid) Scheduler::get().reschedule();
{ ASSERT(m_lock_depth == 0);
m_lock_depth++;
break;
}
if (m_locker == -1 && (has_priority || m_queue_length == 0))
{
m_locker = tid;
m_lock_depth = 1;
break;
}
m_lock.unlock();
} }
m_lock_depth++;
m_lock.unlock();
} }
void RecursivePrioritySpinLock::unlock() void RecursivePrioritySpinLock::unlock()
{ {
m_lock.lock();
ASSERT(m_lock_depth > 0); ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid()); ASSERT(m_locker == Scheduler::current_tid());
@ -110,12 +73,8 @@ namespace Kernel
if (has_priority) if (has_priority)
m_queue_length--; m_queue_length--;
m_lock_depth--; if (--m_lock_depth == 0)
if (m_lock_depth == 0)
m_locker = -1; m_locker = -1;
m_lock.unlock();
} }
bool RecursivePrioritySpinLock::is_locked() const bool RecursivePrioritySpinLock::is_locked() const