Kernel: Rewrite ThreadBlocker
This gets rid of a very old bug where kernel panics when thread is being woken up and unblocked at the same time on different cores. This required adding a new lock to SchedulerQueue::Node and adding a cap to how many threads a threadblocker can simultaneously block. I don't think I ever block more than five threads on the same ThreadBlocker so this should be fine.
This commit is contained in:
parent
41e1819072
commit
92e4078287
|
@ -14,33 +14,12 @@ namespace Kernel
|
|||
class BaseMutex;
|
||||
class Thread;
|
||||
class ThreadBlocker;
|
||||
struct SchedulerQueueNode;
|
||||
|
||||
class SchedulerQueue
|
||||
{
|
||||
public:
|
||||
struct Node
|
||||
{
|
||||
Node(Thread* thread)
|
||||
: thread(thread)
|
||||
{}
|
||||
|
||||
Thread* const thread;
|
||||
|
||||
Node* next { nullptr };
|
||||
Node* prev { nullptr };
|
||||
|
||||
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
|
||||
|
||||
ThreadBlocker* blocker { nullptr };
|
||||
Node* block_chain_next { nullptr };
|
||||
Node* block_chain_prev { nullptr };
|
||||
|
||||
ProcessorID processor_id { PROCESSOR_NONE };
|
||||
bool blocked { false };
|
||||
|
||||
uint64_t last_start_ns { 0 };
|
||||
uint64_t time_used_ns { 0 };
|
||||
};
|
||||
using Node = SchedulerQueueNode;
|
||||
|
||||
public:
|
||||
void add_thread_to_back(Node*);
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
#pragma once
|
||||
|
||||
#include <kernel/ProcessorID.h>
|
||||
#include <kernel/Lock/SpinLock.h>
|
||||
|
||||
namespace Kernel
|
||||
{
|
||||
|
||||
class Thread;
|
||||
class ThreadBlocker;
|
||||
|
||||
struct SchedulerQueueNode
|
||||
{
|
||||
SchedulerQueueNode(Thread* thread)
|
||||
: thread(thread)
|
||||
{}
|
||||
|
||||
Thread* const thread;
|
||||
|
||||
SchedulerQueueNode* next { nullptr };
|
||||
SchedulerQueueNode* prev { nullptr };
|
||||
|
||||
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
|
||||
|
||||
SpinLock blocker_lock;
|
||||
ThreadBlocker* blocker { nullptr };
|
||||
|
||||
ProcessorID processor_id { PROCESSOR_NONE };
|
||||
bool blocked { false };
|
||||
|
||||
uint64_t last_start_ns { 0 };
|
||||
uint64_t time_used_ns { 0 };
|
||||
};
|
||||
|
||||
}
|
|
@ -33,7 +33,9 @@ namespace Kernel
|
|||
|
||||
private:
|
||||
SpinLock m_lock;
|
||||
SchedulerQueue::Node* m_block_chain { nullptr };
|
||||
|
||||
SchedulerQueue::Node* m_block_chain[32] {};
|
||||
size_t m_block_chain_length { 0 };
|
||||
|
||||
friend class Scheduler;
|
||||
};
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <kernel/Lock/Mutex.h>
|
||||
#include <kernel/Process.h>
|
||||
#include <kernel/Scheduler.h>
|
||||
#include <kernel/SchedulerQueueNode.h>
|
||||
#include <kernel/Thread.h>
|
||||
#include <kernel/Timer/Timer.h>
|
||||
|
||||
|
@ -307,8 +308,11 @@ namespace Kernel
|
|||
while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns)
|
||||
{
|
||||
auto* node = m_block_queue.pop_front();
|
||||
if (node->blocker)
|
||||
node->blocker->remove_blocked_thread(node);
|
||||
{
|
||||
SpinLockGuard _(node->blocker_lock);
|
||||
if (node->blocker)
|
||||
node->blocker->remove_blocked_thread(node);
|
||||
}
|
||||
node->blocked = false;
|
||||
update_most_loaded_node_queue(node, &m_run_queue);
|
||||
m_run_queue.add_thread_to_back(node);
|
||||
|
@ -336,8 +340,11 @@ namespace Kernel
|
|||
return;
|
||||
if (node != m_current)
|
||||
m_block_queue.remove_node(node);
|
||||
if (node->blocker)
|
||||
node->blocker->remove_blocked_thread(node);
|
||||
{
|
||||
SpinLockGuard _(node->blocker_lock);
|
||||
if (node->blocker)
|
||||
node->blocker->remove_blocked_thread(node);
|
||||
}
|
||||
node->blocked = false;
|
||||
if (node != m_current)
|
||||
m_run_queue.add_thread_to_back(node);
|
||||
|
@ -618,8 +625,13 @@ namespace Kernel
|
|||
|
||||
m_current->blocked = true;
|
||||
m_current->wake_time_ns = wake_time_ns;
|
||||
if (blocker)
|
||||
blocker->add_thread_to_block_queue(m_current);
|
||||
|
||||
{
|
||||
SpinLockGuard _(m_current->blocker_lock);
|
||||
if (blocker)
|
||||
blocker->add_thread_to_block_queue(m_current);
|
||||
}
|
||||
|
||||
update_most_loaded_node_queue(m_current, &m_block_queue);
|
||||
|
||||
uint32_t lock_depth = 0;
|
||||
|
@ -642,10 +654,7 @@ namespace Kernel
|
|||
|
||||
void Scheduler::unblock_thread(Thread* thread)
|
||||
{
|
||||
auto state = Processor::get_interrupt_state();
|
||||
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||
unblock_thread(thread->m_scheduler_node);
|
||||
Processor::set_interrupt_state(state);
|
||||
}
|
||||
|
||||
Thread& Scheduler::current_thread()
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <kernel/Processor.h>
|
||||
#include <kernel/SchedulerQueueNode.h>
|
||||
#include <kernel/ThreadBlocker.h>
|
||||
#include <kernel/Timer/Timer.h>
|
||||
|
||||
|
@ -22,71 +23,60 @@ namespace Kernel
|
|||
|
||||
void ThreadBlocker::unblock()
|
||||
{
|
||||
SchedulerQueue::Node* block_chain;
|
||||
decltype(m_block_chain) temp_block_chain;
|
||||
size_t temp_block_chain_length { 0 };
|
||||
|
||||
{
|
||||
SpinLockGuard _(m_lock);
|
||||
block_chain = m_block_chain;
|
||||
m_block_chain = nullptr;
|
||||
for (size_t i = 0; i < m_block_chain_length; i++)
|
||||
temp_block_chain[i] = m_block_chain[i];
|
||||
temp_block_chain_length = m_block_chain_length;
|
||||
m_block_chain_length = 0;
|
||||
}
|
||||
|
||||
for (auto* node = block_chain; node;)
|
||||
{
|
||||
ASSERT(node->blocked);
|
||||
|
||||
auto* next = node->block_chain_next;
|
||||
node->blocker = nullptr;
|
||||
node->block_chain_next = nullptr;
|
||||
node->block_chain_prev = nullptr;
|
||||
Processor::scheduler().unblock_thread(node);
|
||||
node = next;
|
||||
}
|
||||
for (size_t i = 0; i < temp_block_chain_length; i++)
|
||||
Processor::scheduler().unblock_thread(temp_block_chain[i]);
|
||||
}
|
||||
|
||||
void ThreadBlocker::add_thread_to_block_queue(SchedulerQueue::Node* node)
|
||||
{
|
||||
ASSERT(node->blocker_lock.current_processor_has_lock());
|
||||
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
ASSERT(m_block_chain_length < sizeof(m_block_chain) / sizeof(m_block_chain[0]));
|
||||
|
||||
ASSERT(node);
|
||||
ASSERT(node->blocked);
|
||||
ASSERT(node->blocker == nullptr);
|
||||
ASSERT(node->block_chain_prev == nullptr);
|
||||
ASSERT(node->block_chain_next == nullptr);
|
||||
|
||||
SpinLockGuard _(m_lock);
|
||||
for (size_t i = 0 ; i < m_block_chain_length; i++)
|
||||
ASSERT(m_block_chain[i] != node);
|
||||
m_block_chain[m_block_chain_length++] = node;
|
||||
|
||||
node->blocker = this;
|
||||
node->block_chain_prev = nullptr;
|
||||
node->block_chain_next = m_block_chain;
|
||||
if (m_block_chain)
|
||||
m_block_chain->block_chain_prev = node;
|
||||
m_block_chain = node;
|
||||
}
|
||||
|
||||
void ThreadBlocker::remove_blocked_thread(SchedulerQueue::Node* node)
|
||||
{
|
||||
ASSERT(node->blocker_lock.current_processor_has_lock());
|
||||
|
||||
SpinLockGuard _(m_lock);
|
||||
|
||||
ASSERT(node);
|
||||
ASSERT(node->blocked);
|
||||
ASSERT(node->blocker == this);
|
||||
|
||||
if (node == m_block_chain)
|
||||
for (size_t i = 0 ; i < m_block_chain_length; i++)
|
||||
{
|
||||
ASSERT(node->block_chain_prev == nullptr);
|
||||
m_block_chain = node->block_chain_next;
|
||||
if (m_block_chain)
|
||||
m_block_chain->block_chain_prev = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
ASSERT(node->block_chain_prev);
|
||||
node->block_chain_prev->block_chain_next = node->block_chain_next;
|
||||
if (node->block_chain_next)
|
||||
node->block_chain_next->block_chain_prev = node->block_chain_prev;
|
||||
if (m_block_chain[i] != node)
|
||||
continue;
|
||||
for (size_t j = i + 1; j < m_block_chain_length; j++)
|
||||
m_block_chain[j - 1] = m_block_chain[j];
|
||||
m_block_chain_length--;
|
||||
}
|
||||
|
||||
node->blocker = nullptr;
|
||||
node->block_chain_next = nullptr;
|
||||
node->block_chain_prev = nullptr;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue