Kernel: Locks allow locking after locker is invalid
SpinLock and RecursiveSpinLock will now allow locking after the initial locker is invalid. This allows us to kill threads even if they are holding internal locks
This commit is contained in:
@@ -19,7 +19,7 @@ namespace Kernel
|
||||
bool is_locked() const;
|
||||
|
||||
private:
|
||||
volatile int m_lock = 0;
|
||||
volatile pid_t m_locker = -1;
|
||||
};
|
||||
|
||||
class RecursiveSpinLock
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <kernel/PCI.h>
|
||||
#include <kernel/SpinLock.h>
|
||||
#include <kernel/Storage/StorageController.h>
|
||||
|
||||
namespace Kernel
|
||||
|
||||
@@ -1,24 +1,39 @@
|
||||
#include <kernel/Scheduler.h>
|
||||
#include <kernel/SpinLock.h>
|
||||
#include <kernel/CriticalScope.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
void SpinLock::lock()
|
||||
{
|
||||
while (__sync_lock_test_and_set(&m_lock, 1))
|
||||
while (m_lock)
|
||||
Scheduler::get().reschedule();
|
||||
pid_t tid = Scheduler::current_tid();
|
||||
while (true)
|
||||
{
|
||||
{
|
||||
CriticalScope _;
|
||||
ASSERT(m_locker != tid);
|
||||
if (m_locker == -1 || !Scheduler::is_valid_tid(m_locker))
|
||||
{
|
||||
m_locker = tid;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Scheduler::get().reschedule();
|
||||
}
|
||||
}
|
||||
|
||||
void SpinLock::unlock()
|
||||
{
|
||||
__sync_lock_release(&m_lock);
|
||||
CriticalScope _;
|
||||
ASSERT(m_locker == Scheduler::current_tid());
|
||||
m_locker = -1;
|
||||
}
|
||||
|
||||
bool SpinLock::is_locked() const
|
||||
{
|
||||
return m_lock;
|
||||
CriticalScope _;
|
||||
return m_locker != -1;
|
||||
}
|
||||
|
||||
void RecursiveSpinLock::lock()
|
||||
@@ -33,7 +48,7 @@ namespace Kernel
|
||||
m_lock_depth++;
|
||||
break;
|
||||
}
|
||||
if (m_locker == -1)
|
||||
if (m_locker == -1 || !Scheduler::is_valid_tid(m_locker))
|
||||
{
|
||||
m_locker = tid;
|
||||
m_lock_depth = 1;
|
||||
|
||||
Reference in New Issue
Block a user