From e636dce919cb934f02c6692feb867aa588630434 Mon Sep 17 00:00:00 2001 From: Bananymous Date: Fri, 8 Mar 2024 22:13:45 +0200 Subject: [PATCH] Kernel: Rewrite scheduler thread lists Scheduler now has its own data SchedulerQueue which holds active nad blocking thread lists. This removes need for BAN/Errors.h and making current thread separate element instead of iterator into linked list. This makes it possible to have current_thread on each processor instead of a global one in Scheduler. --- kernel/include/kernel/Process.h | 3 +- kernel/include/kernel/Scheduler.h | 25 ++--- kernel/include/kernel/SchedulerQueue.h | 126 +++++++++++++++++++++++++ kernel/kernel/Process.cpp | 8 +- kernel/kernel/Scheduler.cpp | 117 +++++------------------ 5 files changed, 162 insertions(+), 117 deletions(-) create mode 100644 kernel/include/kernel/SchedulerQueue.h diff --git a/kernel/include/kernel/Process.h b/kernel/include/kernel/Process.h index 75d2434a2f..1927a23e27 100644 --- a/kernel/include/kernel/Process.h +++ b/kernel/include/kernel/Process.h @@ -52,7 +52,8 @@ namespace Kernel void exit(int status, int signal); void add_thread(Thread*); - void on_thread_exit(Thread&); + // returns true if thread was the last one + bool on_thread_exit(Thread&); pid_t sid() const { return m_sid; } pid_t pgrp() const { return m_pgrp; } diff --git a/kernel/include/kernel/Scheduler.h b/kernel/include/kernel/Scheduler.h index 0935675f15..95afc0da78 100644 --- a/kernel/include/kernel/Scheduler.h +++ b/kernel/include/kernel/Scheduler.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -30,7 +30,6 @@ namespace Kernel static pid_t current_tid(); [[noreturn]] void execute_current_thread(); - [[noreturn]] void execute_current_thread_locked(); [[noreturn]] void delete_current_process_and_thread(); // This is no return if called on current thread @@ -41,34 +40,22 @@ namespace Kernel void set_current_thread_sleeping_impl(uint64_t wake_time); - void wake_threads(); [[nodiscard]] bool save_current_thread(); - void remove_and_advance_current_thread(); void advance_current_thread(); + [[noreturn]] void execute_current_thread_locked(); [[noreturn]] void execute_current_thread_stack_loaded(); BAN::ErrorOr add_thread(Thread*); private: - struct SchedulerThread - { - SchedulerThread(Thread* thread) - : thread(thread) - {} - - Thread* thread; - uint64_t wake_time; - Semaphore* semaphore; - }; - SpinLock m_lock; - Thread* m_idle_thread { nullptr }; - BAN::LinkedList m_active_threads; - BAN::LinkedList m_sleeping_threads; + SchedulerQueue m_active_threads; + SchedulerQueue m_blocking_threads; - BAN::LinkedList::iterator m_current_thread; + Thread* m_idle_thread { nullptr }; + SchedulerQueue::Node* m_current_thread { nullptr }; friend class Process; }; diff --git a/kernel/include/kernel/SchedulerQueue.h b/kernel/include/kernel/SchedulerQueue.h new file mode 100644 index 0000000000..c43455df3a --- /dev/null +++ b/kernel/include/kernel/SchedulerQueue.h @@ -0,0 +1,126 @@ +#pragma once + +#include +#include + +#include + +namespace Kernel +{ + + class Thread; + class Semaphore; + + class SchedulerQueue + { + BAN_NON_COPYABLE(SchedulerQueue); + BAN_NON_MOVABLE(SchedulerQueue); + + public: + struct Node + { + Node(Thread* thread) + : thread(thread) + {} + + Thread* thread; + uint64_t wake_time { 0 }; + Semaphore* semaphore { nullptr }; + + private: + Node* next { nullptr }; + friend class SchedulerQueue; + friend class Scheduler; + }; + + public: + SchedulerQueue() = default; + ~SchedulerQueue() { ASSERT_NOT_REACHED(); } + + bool empty() const { return m_front == nullptr; } + + Node* pop_front() + { + ASSERT(!empty()); + + Node* node = m_front; + + m_front = m_front->next; + if (m_front == nullptr) + m_back = nullptr; + + node->next = nullptr; + + return node; + } + + void push_back(Node* node) + { + ASSERT(node); + node->next = nullptr; + + (empty() ? m_front : m_back->next) = node; + m_back = node; + } + + void add_with_wake_time(Node* node) + { + ASSERT(node); + node->next = nullptr; + + if (empty() || node->wake_time >= m_back->wake_time) + { + push_back(node); + return; + } + + if (node->wake_time < m_front->wake_time) + { + node->next = m_front; + m_front = node; + return; + } + + Node* prev = m_front; + for (; node->wake_time >= prev->next->wake_time; prev = prev->next) + continue; + node->next = prev->next; + prev->next = node; + } + + void remove_with_wake_time(SchedulerQueue& out, uint64_t current_time) + { + while (!empty() && m_front->wake_time <= current_time) + out.push_back(pop_front()); + } + + template + void remove_with_condition(SchedulerQueue& out, F comp) + { + while (!empty() && comp(m_front)) + out.push_back(pop_front()); + + if (empty()) + return; + + for (Node* prev = m_front; prev->next;) + { + Node* node = prev->next; + if (!comp(node)) + prev = prev->next; + else + { + prev->next = node->next; + if (node == m_back) + m_back = prev; + out.push_back(node); + } + } + } + + private: + Node* m_front { nullptr }; + Node* m_back { nullptr }; + }; + +} diff --git a/kernel/kernel/Process.cpp b/kernel/kernel/Process.cpp index adde8e70b1..722f95d823 100644 --- a/kernel/kernel/Process.cpp +++ b/kernel/kernel/Process.cpp @@ -218,7 +218,7 @@ namespace Kernel m_loadable_elf.clear(); } - void Process::on_thread_exit(Thread& thread) + bool Process::on_thread_exit(Thread& thread) { ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); @@ -230,9 +230,7 @@ namespace Kernel m_threads.clear(); thread.setup_process_cleanup(); - // NOTE: This function is only called from scheduler when it is already locked - Scheduler::get().execute_current_thread_locked(); - ASSERT_NOT_REACHED(); + return true; } for (size_t i = 0; i < m_threads.size(); i++) @@ -240,7 +238,7 @@ namespace Kernel if (m_threads[i] == &thread) { m_threads.remove(i); - return; + return false; } } diff --git a/kernel/kernel/Scheduler.cpp b/kernel/kernel/Scheduler.cpp index 90c1a4080e..7535b15bf8 100644 --- a/kernel/kernel/Scheduler.cpp +++ b/kernel/kernel/Scheduler.cpp @@ -42,7 +42,7 @@ namespace Kernel ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); m_lock.lock(); ASSERT(!m_active_threads.empty()); - m_current_thread = m_active_threads.begin(); + advance_current_thread(); execute_current_thread_locked(); ASSERT_NOT_REACHED(); } @@ -62,7 +62,7 @@ namespace Kernel void Scheduler::timer_reschedule() { auto state = m_lock.lock(); - wake_threads(); + m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot()); if (save_current_thread()) return Processor::set_interrupt_state(state); advance_current_thread(); @@ -83,34 +83,22 @@ namespace Kernel void Scheduler::reschedule_if_idling() { auto state = m_lock.lock(); - if (m_active_threads.empty() || ¤t_thread() != m_idle_thread) + if (m_active_threads.empty() || m_current_thread) return m_lock.unlock(state); if (save_current_thread()) return Processor::set_interrupt_state(state); - m_current_thread = m_active_threads.begin(); + advance_current_thread(); execute_current_thread_locked(); ASSERT_NOT_REACHED(); } - void Scheduler::wake_threads() - { - ASSERT(m_lock.current_processor_has_lock()); - - uint64_t current_time = SystemTimer::get().ms_since_boot(); - while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time) - { - m_sleeping_threads.move_element_to_other_linked_list( - m_active_threads, - m_active_threads.end(), - m_sleeping_threads.begin() - ); - } - } - BAN::ErrorOr Scheduler::add_thread(Thread* thread) { + auto* node = new SchedulerQueue::Node(thread); + if (node == nullptr) + return BAN::Error::from_errno(ENOMEM); SpinLockGuard _(m_lock); - TRY(m_active_threads.emplace_back(thread)); + m_active_threads.push_back(node); return {}; } @@ -126,32 +114,12 @@ namespace Kernel { ASSERT(m_lock.current_processor_has_lock()); - if (m_active_threads.empty()) - { - m_current_thread = {}; - return; - } - if (!m_current_thread || ++m_current_thread == m_active_threads.end()) - m_current_thread = m_active_threads.begin(); - } + if (m_current_thread) + m_active_threads.push_back(m_current_thread); + m_current_thread = nullptr; - void Scheduler::remove_and_advance_current_thread() - { - ASSERT(m_lock.current_processor_has_lock()); - - ASSERT(m_current_thread); - - if (m_active_threads.size() == 1) - { - m_active_threads.remove(m_current_thread); - m_current_thread = {}; - } - else - { - auto temp = m_current_thread; - advance_current_thread(); - m_active_threads.remove(temp); - } + if (!m_active_threads.empty()) + m_current_thread = m_active_threads.pop_front(); } // NOTE: this is declared always inline, so we don't corrupt the stack @@ -189,11 +157,11 @@ namespace Kernel ASSERT(thread->has_process()); delete &thread->process(); - - remove_and_advance_current_thread(); - delete thread; + delete m_current_thread; + m_current_thread = nullptr; + advance_current_thread(); execute_current_thread_locked(); ASSERT_NOT_REACHED(); } @@ -244,12 +212,14 @@ namespace Kernel { Thread* thread = m_current_thread->thread; if (thread->has_process()) - thread->process().on_thread_exit(*thread); - - remove_and_advance_current_thread(); + if (thread->process().on_thread_exit(*thread)) + break; delete thread; + delete m_current_thread; + m_current_thread = nullptr; + advance_current_thread(); current = ¤t_thread(); } @@ -286,21 +256,11 @@ namespace Kernel if (save_current_thread()) return; - auto it = m_sleeping_threads.begin(); - for (; it != m_sleeping_threads.end(); it++) - if (wake_time <= it->wake_time) - break; - m_current_thread->wake_time = wake_time; - m_active_threads.move_element_to_other_linked_list( - m_sleeping_threads, - it, - m_current_thread - ); + m_blocking_threads.add_with_wake_time(m_current_thread); + m_current_thread = nullptr; - m_current_thread = {}; advance_current_thread(); - execute_current_thread_locked(); ASSERT_NOT_REACHED(); } @@ -324,40 +284,13 @@ namespace Kernel void Scheduler::unblock_threads(Semaphore* semaphore) { SpinLockGuard _(m_lock); - - for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();) - { - if (it->semaphore == semaphore) - { - it = m_sleeping_threads.move_element_to_other_linked_list( - m_active_threads, - m_active_threads.end(), - it - ); - } - else - { - it++; - } - } + m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->semaphore == semaphore; }); } void Scheduler::unblock_thread(pid_t tid) { SpinLockGuard _(m_lock); - - for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++) - { - if (it->thread->tid() == tid) - { - m_sleeping_threads.move_element_to_other_linked_list( - m_active_threads, - m_active_threads.end(), - it - ); - return; - } - } + m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->thread->tid() == tid; }); } }