Kernel: Locks allow locking after locker is invalid

SpinLock and RecursiveSpinLock will now allow locking after the initial
locker is invalid. This allows us to kill threads even if they are holding
internal locks
This commit is contained in:
Bananymous 2023-07-27 18:36:44 +03:00
parent f609170a6a
commit 2f52001c6d
3 changed files with 22 additions and 8 deletions

View File

@ -19,7 +19,7 @@ namespace Kernel
bool is_locked() const; bool is_locked() const;
private: private:
volatile int m_lock = 0; volatile pid_t m_locker = -1;
}; };
class RecursiveSpinLock class RecursiveSpinLock

View File

@ -1,7 +1,6 @@
#pragma once #pragma once
#include <kernel/PCI.h> #include <kernel/PCI.h>
#include <kernel/SpinLock.h>
#include <kernel/Storage/StorageController.h> #include <kernel/Storage/StorageController.h>
namespace Kernel namespace Kernel

View File

@ -1,24 +1,39 @@
#include <kernel/Scheduler.h> #include <kernel/Scheduler.h>
#include <kernel/SpinLock.h> #include <kernel/SpinLock.h>
#include <kernel/CriticalScope.h>
namespace Kernel namespace Kernel
{ {
void SpinLock::lock() void SpinLock::lock()
{ {
while (__sync_lock_test_and_set(&m_lock, 1)) pid_t tid = Scheduler::current_tid();
while (m_lock) while (true)
Scheduler::get().reschedule(); {
{
CriticalScope _;
ASSERT(m_locker != tid);
if (m_locker == -1 || !Scheduler::is_valid_tid(m_locker))
{
m_locker = tid;
break;
}
}
Scheduler::get().reschedule();
}
} }
void SpinLock::unlock() void SpinLock::unlock()
{ {
__sync_lock_release(&m_lock); CriticalScope _;
ASSERT(m_locker == Scheduler::current_tid());
m_locker = -1;
} }
bool SpinLock::is_locked() const bool SpinLock::is_locked() const
{ {
return m_lock; CriticalScope _;
return m_locker != -1;
} }
void RecursiveSpinLock::lock() void RecursiveSpinLock::lock()
@ -33,7 +48,7 @@ namespace Kernel
m_lock_depth++; m_lock_depth++;
break; break;
} }
if (m_locker == -1) if (m_locker == -1 || !Scheduler::is_valid_tid(m_locker))
{ {
m_locker = tid; m_locker = tid;
m_lock_depth = 1; m_lock_depth = 1;