Kernel Rework ThreadBlocker

I don't know why I though the block chain had to be stored fully in the
ThreadBlocker, that did not even fix the problem I was trying to fix
when I last rewrote it. Roll back to doubly linked list of block chain
and now just check that the node is contained within the ThreadBlocker
before removing and after acquiring the ThreadBlocker's lock. Also there
is no need to have a separate lock the node's blocker field. We can just
perform an atomic reads and writes to it. We can still get a blocker
that the node is no longer part of, but this can be resolved with a
simple check. This patch reduces ThreadBlocker's size from over 200
bytes to just 12 bytes +4 bytes padding
This commit is contained in:
2026-04-20 12:45:41 +03:00
parent 5b7b2d7ac3
commit 71649ffe09
4 changed files with 49 additions and 54 deletions

View File

@@ -22,8 +22,9 @@ namespace Kernel
uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
SpinLock blocker_lock;
ThreadBlocker* blocker { nullptr };
BAN::Atomic<ThreadBlocker*> blocker { nullptr };
SchedulerQueueNode* block_chain_prev { nullptr };
SchedulerQueueNode* block_chain_next { nullptr };
ProcessorID processor_id { PROCESSOR_NONE };
bool blocked { false };

View File

@@ -15,7 +15,6 @@ namespace Kernel
void block_with_wake_time_ns(uint64_t wake_time_ns, BaseMutex*);
void unblock();
void block_with_timeout_ms(uint64_t timeout_ms, BaseMutex* mutex)
{
ASSERT(!BAN::Math::will_multiplication_overflow<uint64_t>(timeout_ms, 1'000'000));
@@ -29,14 +28,12 @@ namespace Kernel
private:
void add_thread_to_block_queue(SchedulerQueue::Node*);
void remove_blocked_thread(SchedulerQueue::Node*);
void remove_thread_from_block_queue(SchedulerQueue::Node*);
private:
SchedulerQueue::Node* m_block_chain { nullptr };
SpinLock m_lock;
SchedulerQueue::Node* m_block_chain[32] {};
size_t m_block_chain_length { 0 };
friend class Scheduler;
};

View File

@@ -307,15 +307,14 @@ namespace Kernel
void Scheduler::wake_up_sleeping_threads()
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
const uint64_t current_ns = SystemTimer::get().ns_since_boot();
while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns)
{
auto* node = m_block_queue.pop_front();
{
SpinLockGuard _(node->blocker_lock);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
}
if (auto* blocker = node->blocker.load())
blocker->remove_thread_from_block_queue(node);
node->blocked = false;
update_most_loaded_node_queue(node, &m_run_queue);
m_run_queue.add_thread_to_back(node);
@@ -368,11 +367,8 @@ namespace Kernel
return;
if (node != m_current)
m_block_queue.remove_node(node);
{
SpinLockGuard _(node->blocker_lock);
if (node->blocker)
node->blocker->remove_blocked_thread(node);
}
if (auto* blocker = node->blocker.load())
blocker->remove_thread_from_block_queue(node);
node->blocked = false;
if (node != m_current)
m_run_queue.add_thread_to_back(node);
@@ -665,11 +661,8 @@ namespace Kernel
m_current->blocked = true;
m_current->wake_time_ns = wake_time_ns;
{
SpinLockGuard _(m_current->blocker_lock);
if (blocker)
blocker->add_thread_to_block_queue(m_current);
}
if (blocker)
blocker->add_thread_to_block_queue(m_current);
update_most_loaded_node_queue(m_current, &m_block_queue);

View File

@@ -23,60 +23,64 @@ namespace Kernel
void ThreadBlocker::unblock()
{
decltype(m_block_chain) temp_block_chain;
size_t temp_block_chain_length { 0 };
SpinLockGuard _(m_lock);
for (auto* node = m_block_chain; node;)
{
SpinLockGuard _(m_lock);
for (size_t i = 0; i < m_block_chain_length; i++)
temp_block_chain[i] = m_block_chain[i];
temp_block_chain_length = m_block_chain_length;
m_block_chain_length = 0;
auto* next = node->block_chain_next;
ASSERT(node->blocked);
ASSERT(node->blocker == this);
node->blocker.store(nullptr);
node->block_chain_prev = nullptr;
node->block_chain_next = nullptr;
Processor::scheduler().unblock_thread(node);
node = next;
}
for (size_t i = 0; i < temp_block_chain_length; i++)
Processor::scheduler().unblock_thread(temp_block_chain[i]);
m_block_chain = nullptr;
}
void ThreadBlocker::add_thread_to_block_queue(SchedulerQueue::Node* node)
{
ASSERT(node->blocker_lock.current_processor_has_lock());
SpinLockGuard _(m_lock);
ASSERT(m_block_chain_length < sizeof(m_block_chain) / sizeof(m_block_chain[0]));
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == nullptr);
for (size_t i = 0 ; i < m_block_chain_length; i++)
ASSERT(m_block_chain[i] != node);
m_block_chain[m_block_chain_length++] = node;
node->blocker.store(this);
node->block_chain_prev = nullptr;
node->block_chain_next = m_block_chain;
node->blocker = this;
if (m_block_chain)
m_block_chain->block_chain_prev = node;
m_block_chain = node;
}
void ThreadBlocker::remove_blocked_thread(SchedulerQueue::Node* node)
void ThreadBlocker::remove_thread_from_block_queue(SchedulerQueue::Node* node)
{
ASSERT(node->blocker_lock.current_processor_has_lock());
SpinLockGuard _(m_lock);
ASSERT(node);
// NOTE: this is possible if we got here while another
// core was doing an unblock on this blocker
if (node->blocker.load() != this)
return;
ASSERT(node->blocked);
ASSERT(node->blocker == this);
for (size_t i = 0 ; i < m_block_chain_length; i++)
{
if (m_block_chain[i] != node)
continue;
for (size_t j = i + 1; j < m_block_chain_length; j++)
m_block_chain[j - 1] = m_block_chain[j];
m_block_chain_length--;
}
if (node->block_chain_prev)
node->block_chain_prev->block_chain_next = node->block_chain_next;
if (node->block_chain_next)
node->block_chain_next->block_chain_prev = node->block_chain_prev;
node->blocker = nullptr;
if (node == m_block_chain)
m_block_chain = node->block_chain_next;
node->blocker.store(nullptr);
node->block_chain_prev = nullptr;
node->block_chain_next = nullptr;
}
}