Kernel: Rewrite scheduler thread lists

Scheduler now has its own data SchedulerQueue which holds active nad
blocking thread lists. This removes need for BAN/Errors.h and making
current thread separate element instead of iterator into linked list.
This makes it possible to have current_thread on each processor
instead of a global one in Scheduler.
This commit is contained in:
Bananymous 2024-03-08 22:13:45 +02:00
parent 1a1f9b1cf2
commit e636dce919
5 changed files with 162 additions and 117 deletions

View File

@ -52,7 +52,8 @@ namespace Kernel
void exit(int status, int signal);
void add_thread(Thread*);
void on_thread_exit(Thread&);
// returns true if thread was the last one
bool on_thread_exit(Thread&);
pid_t sid() const { return m_sid; }
pid_t pgrp() const { return m_pgrp; }

View File

@ -1,6 +1,6 @@
#pragma once
#include <BAN/LinkedList.h>
#include <kernel/SchedulerQueue.h>
#include <kernel/Semaphore.h>
#include <kernel/Thread.h>
@ -30,7 +30,6 @@ namespace Kernel
static pid_t current_tid();
[[noreturn]] void execute_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void delete_current_process_and_thread();
// This is no return if called on current thread
@ -41,34 +40,22 @@ namespace Kernel
void set_current_thread_sleeping_impl(uint64_t wake_time);
void wake_threads();
[[nodiscard]] bool save_current_thread();
void remove_and_advance_current_thread();
void advance_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void execute_current_thread_stack_loaded();
BAN::ErrorOr<void> add_thread(Thread*);
private:
struct SchedulerThread
{
SchedulerThread(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time;
Semaphore* semaphore;
};
SpinLock m_lock;
Thread* m_idle_thread { nullptr };
BAN::LinkedList<SchedulerThread> m_active_threads;
BAN::LinkedList<SchedulerThread> m_sleeping_threads;
SchedulerQueue m_active_threads;
SchedulerQueue m_blocking_threads;
BAN::LinkedList<SchedulerThread>::iterator m_current_thread;
Thread* m_idle_thread { nullptr };
SchedulerQueue::Node* m_current_thread { nullptr };
friend class Process;
};

View File

@ -0,0 +1,126 @@
#pragma once
#include <BAN/Assert.h>
#include <BAN/NoCopyMove.h>
#include <stdint.h>
namespace Kernel
{
class Thread;
class Semaphore;
class SchedulerQueue
{
BAN_NON_COPYABLE(SchedulerQueue);
BAN_NON_MOVABLE(SchedulerQueue);
public:
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time { 0 };
Semaphore* semaphore { nullptr };
private:
Node* next { nullptr };
friend class SchedulerQueue;
friend class Scheduler;
};
public:
SchedulerQueue() = default;
~SchedulerQueue() { ASSERT_NOT_REACHED(); }
bool empty() const { return m_front == nullptr; }
Node* pop_front()
{
ASSERT(!empty());
Node* node = m_front;
m_front = m_front->next;
if (m_front == nullptr)
m_back = nullptr;
node->next = nullptr;
return node;
}
void push_back(Node* node)
{
ASSERT(node);
node->next = nullptr;
(empty() ? m_front : m_back->next) = node;
m_back = node;
}
void add_with_wake_time(Node* node)
{
ASSERT(node);
node->next = nullptr;
if (empty() || node->wake_time >= m_back->wake_time)
{
push_back(node);
return;
}
if (node->wake_time < m_front->wake_time)
{
node->next = m_front;
m_front = node;
return;
}
Node* prev = m_front;
for (; node->wake_time >= prev->next->wake_time; prev = prev->next)
continue;
node->next = prev->next;
prev->next = node;
}
void remove_with_wake_time(SchedulerQueue& out, uint64_t current_time)
{
while (!empty() && m_front->wake_time <= current_time)
out.push_back(pop_front());
}
template<typename F>
void remove_with_condition(SchedulerQueue& out, F comp)
{
while (!empty() && comp(m_front))
out.push_back(pop_front());
if (empty())
return;
for (Node* prev = m_front; prev->next;)
{
Node* node = prev->next;
if (!comp(node))
prev = prev->next;
else
{
prev->next = node->next;
if (node == m_back)
m_back = prev;
out.push_back(node);
}
}
}
private:
Node* m_front { nullptr };
Node* m_back { nullptr };
};
}

View File

@ -218,7 +218,7 @@ namespace Kernel
m_loadable_elf.clear();
}
void Process::on_thread_exit(Thread& thread)
bool Process::on_thread_exit(Thread& thread)
{
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
@ -230,9 +230,7 @@ namespace Kernel
m_threads.clear();
thread.setup_process_cleanup();
// NOTE: This function is only called from scheduler when it is already locked
Scheduler::get().execute_current_thread_locked();
ASSERT_NOT_REACHED();
return true;
}
for (size_t i = 0; i < m_threads.size(); i++)
@ -240,7 +238,7 @@ namespace Kernel
if (m_threads[i] == &thread)
{
m_threads.remove(i);
return;
return false;
}
}

View File

@ -42,7 +42,7 @@ namespace Kernel
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
m_lock.lock();
ASSERT(!m_active_threads.empty());
m_current_thread = m_active_threads.begin();
advance_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
@ -62,7 +62,7 @@ namespace Kernel
void Scheduler::timer_reschedule()
{
auto state = m_lock.lock();
wake_threads();
m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot());
if (save_current_thread())
return Processor::set_interrupt_state(state);
advance_current_thread();
@ -83,34 +83,22 @@ namespace Kernel
void Scheduler::reschedule_if_idling()
{
auto state = m_lock.lock();
if (m_active_threads.empty() || &current_thread() != m_idle_thread)
if (m_active_threads.empty() || m_current_thread)
return m_lock.unlock(state);
if (save_current_thread())
return Processor::set_interrupt_state(state);
m_current_thread = m_active_threads.begin();
advance_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
void Scheduler::wake_threads()
{
ASSERT(m_lock.current_processor_has_lock());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
{
m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
m_sleeping_threads.begin()
);
}
}
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{
auto* node = new SchedulerQueue::Node(thread);
if (node == nullptr)
return BAN::Error::from_errno(ENOMEM);
SpinLockGuard _(m_lock);
TRY(m_active_threads.emplace_back(thread));
m_active_threads.push_back(node);
return {};
}
@ -126,32 +114,12 @@ namespace Kernel
{
ASSERT(m_lock.current_processor_has_lock());
if (m_active_threads.empty())
{
m_current_thread = {};
return;
}
if (!m_current_thread || ++m_current_thread == m_active_threads.end())
m_current_thread = m_active_threads.begin();
}
if (m_current_thread)
m_active_threads.push_back(m_current_thread);
m_current_thread = nullptr;
void Scheduler::remove_and_advance_current_thread()
{
ASSERT(m_lock.current_processor_has_lock());
ASSERT(m_current_thread);
if (m_active_threads.size() == 1)
{
m_active_threads.remove(m_current_thread);
m_current_thread = {};
}
else
{
auto temp = m_current_thread;
advance_current_thread();
m_active_threads.remove(temp);
}
if (!m_active_threads.empty())
m_current_thread = m_active_threads.pop_front();
}
// NOTE: this is declared always inline, so we don't corrupt the stack
@ -189,11 +157,11 @@ namespace Kernel
ASSERT(thread->has_process());
delete &thread->process();
remove_and_advance_current_thread();
delete thread;
delete m_current_thread;
m_current_thread = nullptr;
advance_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
@ -244,12 +212,14 @@ namespace Kernel
{
Thread* thread = m_current_thread->thread;
if (thread->has_process())
thread->process().on_thread_exit(*thread);
remove_and_advance_current_thread();
if (thread->process().on_thread_exit(*thread))
break;
delete thread;
delete m_current_thread;
m_current_thread = nullptr;
advance_current_thread();
current = &current_thread();
}
@ -286,21 +256,11 @@ namespace Kernel
if (save_current_thread())
return;
auto it = m_sleeping_threads.begin();
for (; it != m_sleeping_threads.end(); it++)
if (wake_time <= it->wake_time)
break;
m_current_thread->wake_time = wake_time;
m_active_threads.move_element_to_other_linked_list(
m_sleeping_threads,
it,
m_current_thread
);
m_blocking_threads.add_with_wake_time(m_current_thread);
m_current_thread = nullptr;
m_current_thread = {};
advance_current_thread();
execute_current_thread_locked();
ASSERT_NOT_REACHED();
}
@ -324,40 +284,13 @@ namespace Kernel
void Scheduler::unblock_threads(Semaphore* semaphore)
{
SpinLockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
{
if (it->semaphore == semaphore)
{
it = m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
it
);
}
else
{
it++;
}
}
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->semaphore == semaphore; });
}
void Scheduler::unblock_thread(pid_t tid)
{
SpinLockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
{
if (it->thread->tid() == tid)
{
m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
it
);
return;
}
}
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->thread->tid() == tid; });
}
}