Kernel: Make thread unblocking O(1)

This is still bit broken. VirtualBox seems to freeze sometimes, but I
could not recreate this on qemu (with and without kvm) or real hardware.
This commit is contained in:
Bananymous 2024-07-24 00:31:01 +03:00
parent 9548c592a3
commit bb1738db8c
10 changed files with 211 additions and 199 deletions

View File

@ -1,13 +1,13 @@
#pragma once #pragma once
#include <BAN/Atomic.h> #include <BAN/Atomic.h>
#include <BAN/Formatter.h>
#include <BAN/ForwardList.h> #include <BAN/ForwardList.h>
#include <kernel/Arch.h> #include <kernel/Arch.h>
#include <kernel/GDT.h> #include <kernel/GDT.h>
#include <kernel/IDT.h> #include <kernel/IDT.h>
#include <kernel/InterruptStack.h> #include <kernel/InterruptStack.h>
#include <kernel/ProcessorID.h>
#include <kernel/Scheduler.h> #include <kernel/Scheduler.h>
namespace Kernel namespace Kernel
@ -19,29 +19,6 @@ namespace Kernel
Enabled, Enabled,
}; };
class ProcessorID
{
public:
using value_type = uint32_t;
public:
ProcessorID() = default;
uint32_t as_u32() const { return m_id; }
bool operator==(ProcessorID other) const { return m_id == other.m_id; }
private:
explicit ProcessorID(uint32_t id) : m_id(id) {}
private:
uint32_t m_id = static_cast<uint32_t>(-1);
friend class Processor;
friend class APIC;
};
constexpr ProcessorID PROCESSOR_NONE { };
#if ARCH(x86_64) || ARCH(i686) #if ARCH(x86_64) || ARCH(i686)
class Processor class Processor
{ {
@ -66,9 +43,8 @@ namespace Kernel
uintptr_t vaddr; uintptr_t vaddr;
size_t page_count; size_t page_count;
} flush_tlb; } flush_tlb;
Scheduler::NewThreadRequest new_thread; SchedulerQueue::Node* new_thread;
Scheduler::UnblockRequest unblock_thread; SchedulerQueue::Node* unblock_thread;
uintptr_t scheduler_preemption;
}; };
}; };
@ -209,14 +185,3 @@ namespace Kernel
#endif #endif
} }
namespace BAN::Formatter
{
template<typename F>
void print_argument(F putc, Kernel::ProcessorID processor_id, const ValueFormat& format)
{
print_argument(putc, processor_id.as_u32(), format);
}
}

View File

@ -0,0 +1,42 @@
#pragma once
#include <BAN/Formatter.h>
namespace Kernel
{
class ProcessorID
{
public:
using value_type = uint32_t;
public:
ProcessorID() = default;
uint32_t as_u32() const { return m_id; }
bool operator==(ProcessorID other) const { return m_id == other.m_id; }
private:
explicit ProcessorID(uint32_t id) : m_id(id) {}
private:
uint32_t m_id = static_cast<uint32_t>(-1);
friend class Processor;
friend class APIC;
};
inline constexpr ProcessorID PROCESSOR_NONE { };
}
namespace BAN::Formatter
{
template<typename F>
void print_argument(F putc, Kernel::ProcessorID processor_id, const ValueFormat& format)
{
print_argument(putc, processor_id.as_u32(), format);
}
}

View File

@ -4,6 +4,7 @@
#include <BAN/ForwardList.h> #include <BAN/ForwardList.h>
#include <BAN/NoCopyMove.h> #include <BAN/NoCopyMove.h>
#include <kernel/InterruptStack.h> #include <kernel/InterruptStack.h>
#include <kernel/ProcessorID.h>
#include <sys/types.h> #include <sys/types.h>
@ -22,13 +23,20 @@ namespace Kernel
: thread(thread) : thread(thread)
{} {}
Thread* const thread;
Node* next { nullptr }; Node* next { nullptr };
Node* prev { nullptr }; Node* prev { nullptr };
Thread* thread;
ThreadBlocker* blocker { nullptr };
uint64_t wake_time_ns { static_cast<uint64_t>(-1) }; uint64_t wake_time_ns { static_cast<uint64_t>(-1) };
ThreadBlocker* blocker { nullptr };
Node* block_chain_next { nullptr };
Node* block_chain_prev { nullptr };
ProcessorID processor_id { PROCESSOR_NONE };
bool blocked { false };
uint64_t last_start_ns { 0 }; uint64_t last_start_ns { 0 };
uint64_t time_used_ns { 0 }; uint64_t time_used_ns { 0 };
}; };
@ -58,22 +66,11 @@ namespace Kernel
struct NewThreadRequest struct NewThreadRequest
{ {
SchedulerQueue::Node* node; SchedulerQueue::Node* node;
bool blocked;
}; };
struct UnblockRequest struct UnblockRequest
{ {
enum class Type SchedulerQueue::Node* node;
{
ThreadBlocker,
ThreadID,
};
Type type;
union
{
ThreadBlocker* blocker;
pid_t tid;
};
}; };
public: public:
@ -88,8 +85,7 @@ namespace Kernel
BAN::ErrorOr<void> add_thread(Thread*); BAN::ErrorOr<void> add_thread(Thread*);
void block_current_thread(ThreadBlocker* thread_blocker, uint64_t wake_time_ns); void block_current_thread(ThreadBlocker* thread_blocker, uint64_t wake_time_ns);
void unblock_threads(ThreadBlocker*); void unblock_thread(Thread*);
void unblock_thread(pid_t tid);
Thread& current_thread(); Thread& current_thread();
Thread& idle_thread(); Thread& idle_thread();
@ -104,20 +100,17 @@ namespace Kernel
void update_most_loaded_node_queue(SchedulerQueue::Node*, SchedulerQueue* target_queue); void update_most_loaded_node_queue(SchedulerQueue::Node*, SchedulerQueue* target_queue);
void remove_node_from_most_loaded(SchedulerQueue::Node*); void remove_node_from_most_loaded(SchedulerQueue::Node*);
bool do_unblock(ThreadBlocker*);
bool do_unblock(pid_t);
void do_load_balancing(); void do_load_balancing();
class ProcessorID find_least_loaded_processor() const; class ProcessorID find_least_loaded_processor() const;
void handle_unblock_request(const UnblockRequest&); void add_thread(SchedulerQueue::Node*);
void handle_new_thread_request(const NewThreadRequest&); void unblock_thread(SchedulerQueue::Node*);
private: private:
SchedulerQueue m_run_queue; SchedulerQueue m_run_queue;
SchedulerQueue m_block_queue; SchedulerQueue m_block_queue;
SchedulerQueue::Node* m_current { nullptr }; SchedulerQueue::Node* m_current { nullptr };
bool m_current_will_block { false };
uint32_t m_thread_count { 0 }; uint32_t m_thread_count { 0 };
@ -141,6 +134,7 @@ namespace Kernel
Thread* m_idle_thread { nullptr }; Thread* m_idle_thread { nullptr };
friend class ThreadBlocker;
friend class Processor; friend class Processor;
}; };

View File

@ -3,8 +3,9 @@
#include <BAN/NoCopyMove.h> #include <BAN/NoCopyMove.h>
#include <BAN/RefPtr.h> #include <BAN/RefPtr.h>
#include <BAN/UniqPtr.h> #include <BAN/UniqPtr.h>
#include <kernel/Memory/VirtualRange.h>
#include <kernel/InterruptStack.h> #include <kernel/InterruptStack.h>
#include <kernel/Memory/VirtualRange.h>
#include <kernel/ThreadBlocker.h>
#include <signal.h> #include <signal.h>
#include <sys/types.h> #include <sys/types.h>
@ -96,25 +97,27 @@ namespace Kernel
void on_exit(); void on_exit();
private: private:
static constexpr size_t m_kernel_stack_size = PAGE_SIZE * 64; static constexpr size_t m_kernel_stack_size { PAGE_SIZE * 64 };
static constexpr size_t m_userspace_stack_size = PAGE_SIZE * 64; static constexpr size_t m_userspace_stack_size { PAGE_SIZE * 64 };
BAN::UniqPtr<VirtualRange> m_kernel_stack; BAN::UniqPtr<VirtualRange> m_kernel_stack;
BAN::UniqPtr<VirtualRange> m_userspace_stack; BAN::UniqPtr<VirtualRange> m_userspace_stack;
const pid_t m_tid { 0 }; const pid_t m_tid { 0 };
State m_state { State::NotStarted }; State m_state { State::NotStarted };
Process* m_process { nullptr }; Process* m_process { nullptr };
bool m_is_userspace { false }; bool m_is_userspace { false };
bool m_delete_process { false }; bool m_delete_process { false };
InterruptStack m_interrupt_stack { }; SchedulerQueue::Node* m_scheduler_node { nullptr };
InterruptRegisters m_interrupt_registers { };
uint64_t m_signal_pending_mask { 0 }; InterruptStack m_interrupt_stack { };
uint64_t m_signal_block_mask { 0 }; InterruptRegisters m_interrupt_registers { };
SpinLock m_signal_lock;
uint64_t m_signal_pending_mask { 0 };
uint64_t m_signal_block_mask { 0 };
SpinLock m_signal_lock;
static_assert(_SIGMAX < 64); static_assert(_SIGMAX < 64);
BAN::Atomic<uint32_t> m_mutex_count { 0 }; BAN::Atomic<uint32_t> m_mutex_count { 0 };
#if __enable_sse #if __enable_sse
alignas(16) uint8_t m_sse_storage[512] {}; alignas(16) uint8_t m_sse_storage[512] {};

View File

@ -1,5 +1,8 @@
#pragma once #pragma once
#include <kernel/Lock/SpinLock.h>
#include <kernel/Scheduler.h>
namespace Kernel namespace Kernel
{ {
@ -12,6 +15,16 @@ namespace Kernel
void block_with_timeout_ns(uint64_t timeout_ns); void block_with_timeout_ns(uint64_t timeout_ns);
void block_with_wake_time_ns(uint64_t wake_time_ns); void block_with_wake_time_ns(uint64_t wake_time_ns);
void unblock(); void unblock();
private:
void add_thread_to_block_queue(SchedulerQueue::Node*);
void remove_blocked_thread(SchedulerQueue::Node*);
private:
SpinLock m_lock;
SchedulerQueue::Node* m_block_chain { nullptr };
friend class Scheduler;
}; };
} }

View File

@ -1616,8 +1616,7 @@ namespace Kernel
if (signal) if (signal)
{ {
process.add_pending_signal(signal); process.add_pending_signal(signal);
// FIXME: This feels hacky Processor::scheduler().unblock_thread(process.m_threads.front());
Processor::scheduler().unblock_thread(process.m_threads.front()->tid());
} }
return (pid > 0) ? BAN::Iteration::Break : BAN::Iteration::Continue; return (pid > 0) ? BAN::Iteration::Break : BAN::Iteration::Continue;
} }

View File

@ -238,10 +238,10 @@ namespace Kernel
asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory"); asm volatile("invlpg (%0)" :: "r"(message->flush_tlb.vaddr + i * PAGE_SIZE) : "memory");
break; break;
case SMPMessage::Type::NewThread: case SMPMessage::Type::NewThread:
processor.m_scheduler->handle_new_thread_request(message->new_thread); processor.m_scheduler->add_thread(message->new_thread);
break; break;
case SMPMessage::Type::UnblockThread: case SMPMessage::Type::UnblockThread:
processor.m_scheduler->handle_unblock_request(message->unblock_thread); processor.m_scheduler->unblock_thread(message->unblock_thread);
break; break;
} }

View File

@ -208,7 +208,7 @@ namespace Kernel
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
// If there are no other threads in run queue, reschedule can be no-op :) // If there are no other threads in run queue, reschedule can be no-op :)
if (m_run_queue.empty() && !m_current_will_block && current_thread().state() == Thread::State::Executing) if (m_run_queue.empty() && (!m_current || !m_current->blocked) && current_thread().state() == Thread::State::Executing)
return; return;
if (m_current == nullptr) if (m_current == nullptr)
@ -230,18 +230,15 @@ namespace Kernel
m_current->thread->interrupt_stack() = *interrupt_stack; m_current->thread->interrupt_stack() = *interrupt_stack;
m_current->thread->interrupt_registers() = *interrupt_registers; m_current->thread->interrupt_registers() = *interrupt_registers;
m_current->time_used_ns += current_ns - m_current->last_start_ns; m_current->time_used_ns += current_ns - m_current->last_start_ns;
add_current_to_most_loaded(m_current_will_block ? &m_block_queue : &m_run_queue); add_current_to_most_loaded(m_current->blocked ? &m_block_queue : &m_run_queue);
if (!m_current_will_block) if (!m_current->blocked)
m_run_queue.add_thread_to_back(m_current); m_run_queue.add_thread_to_back(m_current);
else else
{
m_current_will_block = false;
m_block_queue.add_thread_with_wake_time(m_current); m_block_queue.add_thread_with_wake_time(m_current);
}
break; break;
} }
case Thread::State::NotStarted: case Thread::State::NotStarted:
ASSERT(!m_current_will_block); ASSERT(!m_current->blocked);
m_current->time_used_ns = 0; m_current->time_used_ns = 0;
remove_node_from_most_loaded(m_current); remove_node_from_most_loaded(m_current);
m_run_queue.add_thread_to_back(m_current); m_run_queue.add_thread_to_back(m_current);
@ -306,6 +303,9 @@ namespace Kernel
while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns) while (!m_block_queue.empty() && current_ns >= m_block_queue.front()->wake_time_ns)
{ {
auto* node = m_block_queue.pop_front(); auto* node = m_block_queue.pop_front();
if (node->blocker)
node->blocker->remove_blocked_thread(node);
node->blocked = false;
update_most_loaded_node_queue(node, &m_run_queue); update_most_loaded_node_queue(node, &m_run_queue);
m_run_queue.add_thread_to_back(node); m_run_queue.add_thread_to_back(node);
} }
@ -321,79 +321,44 @@ namespace Kernel
} }
} }
void Scheduler::handle_unblock_request(const UnblockRequest& request) void Scheduler::unblock_thread(SchedulerQueue::Node* node)
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
switch (request.type) if (node->processor_id == Processor::current_id())
{ {
case UnblockRequest::Type::ThreadBlocker: ASSERT(node->blocked);
do_unblock(request.blocker); m_block_queue.remove_node(node);
break; if (node->blocker)
case UnblockRequest::Type::ThreadID: node->blocker->remove_blocked_thread(node);
do_unblock(request.tid); node->blocked = false;
break; m_run_queue.add_thread_to_back(node);
default:
ASSERT_NOT_REACHED();
} }
}
void Scheduler::handle_new_thread_request(const NewThreadRequest& reqeuest)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
if (reqeuest.blocked)
m_block_queue.add_thread_with_wake_time(reqeuest.node);
else else
m_run_queue.add_thread_to_back(reqeuest.node); {
Processor::send_smp_message(node->processor_id, {
.type = Processor::SMPMessage::Type::UnblockThread,
.unblock_thread = node
});
}
Processor::set_interrupt_state(state);
} }
bool Scheduler::do_unblock(ThreadBlocker* blocker) void Scheduler::add_thread(SchedulerQueue::Node* node)
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
// FIXME: This could _easily_ be O(1) ASSERT(node->processor_id == Processor::current_id());
bool did_unblock = false; if (!node->blocked)
m_run_queue.add_thread_to_back(node);
else
m_block_queue.add_thread_with_wake_time(node);
if (m_current && m_current->blocker == blocker && m_current_will_block) Processor::set_interrupt_state(state);
{
m_current_will_block = false;
did_unblock = true;
}
SchedulerQueue::Node* match;
while ((match = m_block_queue.remove_with_condition([blocker](const auto* node) { return node->blocker == blocker; })))
{
dprintln_if(DEBUG_SCHEDULER, "CPU {}: unblock blocker {} (tid {})", Processor::current_id(), blocker, match->thread->tid());
update_most_loaded_node_queue(match, &m_run_queue);
m_run_queue.add_thread_to_back(match);
did_unblock = true;
}
return did_unblock;
}
bool Scheduler::do_unblock(pid_t tid)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
// FIXME: This could _easily_ be O(1)
if (m_current && m_current->thread->tid() == tid && m_current_will_block)
{
m_current_will_block = false;
return true;
}
auto* match = m_block_queue.remove_with_condition([tid](const auto* node) { return node->thread->tid() == tid; });
if (match == nullptr)
return false;
dprintln_if(DEBUG_SCHEDULER, "CPU {}: unblock tid {}", Processor::current_id(), tid);
update_most_loaded_node_queue(match, &m_run_queue);
m_run_queue.add_thread_to_back(match);
return true;
} }
ProcessorID Scheduler::find_least_loaded_processor() const ProcessorID Scheduler::find_least_loaded_processor() const
@ -561,12 +526,11 @@ namespace Kernel
m_thread_count--; m_thread_count--;
} }
thread_info.node->processor_id = least_loaded_id;
Processor::send_smp_message(least_loaded_id, { Processor::send_smp_message(least_loaded_id, {
.type = Processor::SMPMessage::Type::NewThread, .type = Processor::SMPMessage::Type::NewThread,
.new_thread = { .new_thread = thread_info.node
.node = thread_info.node,
.blocked = thread_info.queue == &m_block_queue
}
}); });
thread_info.node = nullptr; thread_info.node = nullptr;
@ -601,22 +565,16 @@ namespace Kernel
const size_t processor_index = s_next_processor_index++ % Processor::count(); const size_t processor_index = s_next_processor_index++ % Processor::count();
const auto processor_id = Processor::id_from_index(processor_index); const auto processor_id = Processor::id_from_index(processor_index);
new_node->processor_id = processor_id;
thread->m_scheduler_node = new_node;
if (processor_id == Processor::current_id()) if (processor_id == Processor::current_id())
{ add_thread(new_node);
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
m_run_queue.add_thread_to_back(new_node);
m_thread_count++;
Processor::set_interrupt_state(state);
}
else else
{ {
Processor::send_smp_message(processor_id, { Processor::send_smp_message(processor_id, {
.type = Processor::SMPMessage::Type::NewThread, .type = Processor::SMPMessage::Type::NewThread,
.new_thread = { .new_thread = new_node
.node = new_node,
.blocked = false
}
}); });
} }
@ -628,48 +586,22 @@ namespace Kernel
auto state = Processor::get_interrupt_state(); auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled); Processor::set_interrupt_state(InterruptState::Disabled);
m_current->blocker = blocker; ASSERT(!m_current->blocked);
m_current->blocked = true;
m_current->wake_time_ns = wake_time_ns; m_current->wake_time_ns = wake_time_ns;
m_current_will_block = true; if (blocker)
blocker->add_thread_to_block_queue(m_current);
Processor::yield(); Processor::yield();
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
void Scheduler::unblock_threads(ThreadBlocker* blocker) void Scheduler::unblock_thread(Thread* thread)
{ {
auto state = Processor::get_interrupt_state(); auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled); Processor::set_interrupt_state(InterruptState::Disabled);
unblock_thread(thread->m_scheduler_node);
do_unblock(blocker);
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::UnblockThread,
.unblock_thread = {
.type = UnblockRequest::Type::ThreadBlocker,
.blocker = blocker
}
});
Processor::set_interrupt_state(state);
}
void Scheduler::unblock_thread(pid_t tid)
{
auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Disabled);
if (!do_unblock(tid))
{
Processor::broadcast_smp_message({
.type = Processor::SMPMessage::Type::UnblockThread,
.unblock_thread = {
.type = UnblockRequest::Type::ThreadID,
.tid = tid
}
});
}
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }

View File

@ -401,7 +401,7 @@ namespace Kernel
{ {
m_signal_pending_mask |= mask; m_signal_pending_mask |= mask;
if (this != &Thread::current()) if (this != &Thread::current())
Processor::scheduler().unblock_thread(tid()); Processor::scheduler().unblock_thread(this);
return true; return true;
} }
return false; return false;

View File

@ -7,7 +7,7 @@ namespace Kernel
void ThreadBlocker::block_indefinite() void ThreadBlocker::block_indefinite()
{ {
Processor::scheduler().block_current_thread(this, ~static_cast<uint64_t>(0)); Processor::scheduler().block_current_thread(this, static_cast<uint64_t>(-1));
} }
void ThreadBlocker::block_with_timeout_ns(uint64_t timeout_ns) void ThreadBlocker::block_with_timeout_ns(uint64_t timeout_ns)
@ -22,7 +22,71 @@ namespace Kernel
void ThreadBlocker::unblock() void ThreadBlocker::unblock()
{ {
Processor::scheduler().unblock_threads(this); SchedulerQueue::Node* block_chain;
{
SpinLockGuard _(m_lock);
block_chain = m_block_chain;
m_block_chain = nullptr;
}
for (auto* node = block_chain; node;)
{
ASSERT(node->blocked);
auto* next = node->block_chain_next;
node->blocker = nullptr;
node->block_chain_next = nullptr;
node->block_chain_prev = nullptr;
Processor::scheduler().unblock_thread(node);
node = next;
}
} }
void ThreadBlocker::add_thread_to_block_queue(SchedulerQueue::Node* node)
{
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == nullptr);
ASSERT(node->block_chain_prev == nullptr);
ASSERT(node->block_chain_next == nullptr);
SpinLockGuard _(m_lock);
node->blocker = this;
node->block_chain_prev = nullptr;
node->block_chain_next = m_block_chain;
if (m_block_chain)
m_block_chain->block_chain_prev = node;
m_block_chain = node;
}
void ThreadBlocker::remove_blocked_thread(SchedulerQueue::Node* node)
{
SpinLockGuard _(m_lock);
ASSERT(node);
ASSERT(node->blocked);
ASSERT(node->blocker == this);
if (node == m_block_chain)
{
ASSERT(node->block_chain_prev == nullptr);
m_block_chain = node->block_chain_next;
if (m_block_chain)
m_block_chain->block_chain_prev = nullptr;
}
else
{
ASSERT(node->block_chain_prev);
node->block_chain_prev->block_chain_next = node->block_chain_next;
if (node->block_chain_next)
node->block_chain_next->block_chain_prev = node->block_chain_prev;
}
node->blocker = nullptr;
node->block_chain_next = nullptr;
node->block_chain_prev = nullptr;
}
} }