Kernel: Optimize all SpinLocks. All locking operations are atomic

This commit is contained in:
Bananymous 2024-01-30 12:39:37 +02:00
parent ca8e7b40bc
commit 95e861bcdd
2 changed files with 17 additions and 60 deletions

View File

@ -35,9 +35,8 @@ namespace Kernel
bool is_locked() const;
private:
BAN::Atomic<pid_t> m_locker = -1;
BAN::Atomic<uint32_t> m_lock_depth = 0;
SpinLock m_lock;
BAN::Atomic<pid_t> m_locker = -1;
BAN::Atomic<uint32_t> m_lock_depth = 0;
};
class RecursivePrioritySpinLock
@ -54,10 +53,9 @@ namespace Kernel
uint32_t lock_depth() const { return m_lock_depth; }
private:
pid_t m_locker = -1;
uint32_t m_queue_length = 0;
uint32_t m_lock_depth = 0;
SpinLock m_lock;
BAN::Atomic<pid_t> m_locker = -1;
BAN::Atomic<uint32_t> m_lock_depth = 0;
BAN::Atomic<uint32_t> m_queue_length = 0;
};
}

View File

@ -25,40 +25,21 @@ namespace Kernel
void RecursiveSpinLock::lock()
{
pid_t tid = Scheduler::current_tid();
while (true)
if (m_locker != tid)
{
m_lock.lock();
if (m_locker == tid)
{
m_lock_depth++;
break;
}
if (m_locker == -1)
{
m_locker = tid;
m_lock_depth = 1;
break;
}
m_lock.unlock();
while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
ASSERT(m_lock_depth == 0);
}
m_lock.unlock();
m_lock_depth++;
}
void RecursiveSpinLock::unlock()
{
m_lock.lock();
ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid());
m_lock_depth--;
if (m_lock_depth == 0)
if (--m_lock_depth == 0)
m_locker = -1;
m_lock.unlock();
}
bool RecursiveSpinLock::is_locked() const
@ -71,38 +52,20 @@ namespace Kernel
pid_t tid = Scheduler::current_tid();
bool has_priority = !Thread::current().is_userspace();
if (has_priority)
{
m_lock.lock();
m_queue_length++;
m_lock.unlock();
}
while (true)
if (m_locker != tid)
{
m_lock.lock();
if (m_locker == tid)
{
m_lock_depth++;
break;
}
if (m_locker == -1 && (has_priority || m_queue_length == 0))
{
m_locker = tid;
m_lock_depth = 1;
break;
}
m_lock.unlock();
while (!((has_priority || m_queue_length == 0) && m_locker.compare_exchange(-1, tid)))
Scheduler::get().reschedule();
ASSERT(m_lock_depth == 0);
}
m_lock.unlock();
m_lock_depth++;
}
void RecursivePrioritySpinLock::unlock()
{
m_lock.lock();
ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid());
@ -110,12 +73,8 @@ namespace Kernel
if (has_priority)
m_queue_length--;
m_lock_depth--;
if (m_lock_depth == 0)
if (--m_lock_depth == 0)
m_locker = -1;
m_lock.unlock();
}
bool RecursivePrioritySpinLock::is_locked() const