Kernel: SpinLocks now reschedule if they cannot aquire the lock
This allows us to not actually spin doing nothing while waiting for another (not executing) to release the lock. This api won't probably work when we get to SMP
This commit is contained in:
parent
5032e79be3
commit
c0fe4756cb
|
@ -8,7 +8,7 @@ namespace Kernel
|
||||||
{
|
{
|
||||||
while (__sync_lock_test_and_set(&m_lock, 1))
|
while (__sync_lock_test_and_set(&m_lock, 1))
|
||||||
while (m_lock)
|
while (m_lock)
|
||||||
__builtin_ia32_pause();
|
Scheduler::get().reschedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
void SpinLock::unlock()
|
void SpinLock::unlock()
|
||||||
|
@ -27,10 +27,6 @@ namespace Kernel
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
// Wait for us to be the locker or the lock being free
|
|
||||||
while (m_locker != -1 && m_locker != tid)
|
|
||||||
__builtin_ia32_pause();
|
|
||||||
|
|
||||||
m_lock.lock();
|
m_lock.lock();
|
||||||
if (m_locker == tid)
|
if (m_locker == tid)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue