Kernel: Make RecursiveSpinLock thread safe

also SpinLock is now implemented with gcc builtins
This commit is contained in:
Bananymous 2023-05-29 19:38:09 +03:00
parent 0c316ebfb2
commit dd3f34cb2c
4 changed files with 35 additions and 39 deletions

View File

@ -69,7 +69,6 @@ if("${BANAN_ARCH}" STREQUAL "x86_64")
arch/x86_64/IDT.cpp arch/x86_64/IDT.cpp
arch/x86_64/interrupts.S arch/x86_64/interrupts.S
arch/x86_64/MMU.cpp arch/x86_64/MMU.cpp
arch/x86_64/SpinLock.S
arch/x86_64/Thread.S arch/x86_64/Thread.S
) )
elseif("${BANAN_ARCH}" STREQUAL "i386") elseif("${BANAN_ARCH}" STREQUAL "i386")

View File

@ -1,17 +0,0 @@
.global spinlock_lock_asm
spinlock_lock_asm:
lock; btsq $0, (%rdi)
jnc .done
.retry:
pause
testq $1, (%rdi)
jne .retry
lock; btsq $0, (%rdi)
jc .retry
.done:
ret
.global spinlock_unlock_asm
spinlock_unlock_asm:
movl $0, (%rdi)
ret

View File

@ -19,7 +19,7 @@ namespace Kernel
bool is_locked() const; bool is_locked() const;
private: private:
int m_lock = 0; volatile int m_lock = 0;
}; };
class RecursiveSpinLock class RecursiveSpinLock
@ -34,7 +34,7 @@ namespace Kernel
bool is_locked() const; bool is_locked() const;
private: private:
pid_t m_locker = 0; pid_t m_locker = -1;
uint32_t m_lock_depth = 0; uint32_t m_lock_depth = 0;
SpinLock m_lock; SpinLock m_lock;
}; };

View File

@ -4,17 +4,16 @@
namespace Kernel namespace Kernel
{ {
extern "C" void spinlock_lock_asm(int*);
extern "C" void spinlock_unlock_asm(int*);
void SpinLock::lock() void SpinLock::lock()
{ {
spinlock_lock_asm(&m_lock); while (__sync_lock_test_and_set(&m_lock, 1))
while (m_lock)
__builtin_ia32_pause();
} }
void SpinLock::unlock() void SpinLock::unlock()
{ {
spinlock_unlock_asm(&m_lock); __sync_lock_release(&m_lock);
} }
bool SpinLock::is_locked() const bool SpinLock::is_locked() const
@ -24,35 +23,50 @@ namespace Kernel
void RecursiveSpinLock::lock() void RecursiveSpinLock::lock()
{ {
// FIXME: is this thread safe? pid_t tid = Scheduler::current_tid();
if (m_locker == Scheduler::current_tid())
{ while (true)
m_lock_depth++;
}
else
{ {
// Wait for us to be the locker or the lock being free
while (m_locker != -1 && m_locker != tid)
__builtin_ia32_pause();
m_lock.lock(); m_lock.lock();
ASSERT(m_locker == 0); if (m_locker == tid)
m_locker = Scheduler::current_tid(); {
m_lock_depth = 1; m_lock_depth++;
break;
}
if (m_locker == -1)
{
m_locker = tid;
m_lock_depth = 1;
break;
}
m_lock.unlock();
} }
m_lock.unlock();
} }
void RecursiveSpinLock::unlock() void RecursiveSpinLock::unlock()
{ {
m_lock.lock();
ASSERT(m_lock_depth > 0); ASSERT(m_lock_depth > 0);
ASSERT(m_locker == Scheduler::current_tid());
m_lock_depth--; m_lock_depth--;
if (m_lock_depth == 0) if (m_lock_depth == 0)
{ m_locker = -1;
m_locker = 0;
m_lock.unlock(); m_lock.unlock();
}
} }
bool RecursiveSpinLock::is_locked() const bool RecursiveSpinLock::is_locked() const
{ {
return m_lock.is_locked(); return m_locker != -1;
} }
} }