Compare commits

...

9 Commits

Author SHA1 Message Date
Bananymous e65bc040af Kernel: Now all active processors are used in scheduling
When a timer reschedule happens, ipi is broadcasted too all
processors for them to perform a reschedule!
2024-03-09 23:53:50 +02:00
Bananymous 89ca4c8a8b Kernel: Implement IPI broadcasting 2024-03-09 23:53:38 +02:00
Bananymous 2323a55517 Kernel: Debug lock is locked while dumping stack trace 2024-03-09 23:52:06 +02:00
Bananymous 45d6caa1d0 Kernel: APs now start their idle threads when scheduler is started 2024-03-09 23:51:40 +02:00
Bananymous 55d2a64f54 Kernel: Map interrupt handlers for all processors
This doesn't mean that processors will actually handle the irqs
2024-03-09 23:50:57 +02:00
Bananymous 2420886c2c Kernel: Move current and idle thread to Processor 2024-03-08 23:39:29 +02:00
Bananymous e636dce919 Kernel: Rewrite scheduler thread lists
Scheduler now has its own data SchedulerQueue which holds active nad
blocking thread lists. This removes need for BAN/Errors.h and making
current thread separate element instead of iterator into linked list.
This makes it possible to have current_thread on each processor
instead of a global one in Scheduler.
2024-03-08 22:13:45 +02:00
Bananymous 1a1f9b1cf2 Kernel: Fix {read,write}_gs_sized input operands to work always 2024-03-08 22:12:33 +02:00
Bananymous 54d0cb47cd BAN: Update ASSERT_NOT_REACHED message 2024-03-08 22:11:39 +02:00
17 changed files with 263 additions and 162 deletions

View File

@ -8,6 +8,7 @@
? __ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT(" #cond ") failed") \ ? __ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT(" #cond ") failed") \
: (void)0) : (void)0)
#define ASSERT_NOT_REACHED() ASSERT(false) #define ASSERT_NOT_REACHED() \
__ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT_NOT_REACHED() reached")
[[noreturn]] void __ban_assertion_failed(const char* location, const char* msg); [[noreturn]] void __ban_assertion_failed(const char* location, const char* msg);

View File

@ -10,7 +10,7 @@
#include <kernel/Timer/PIT.h> #include <kernel/Timer/PIT.h>
#define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) #define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) #define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) X(32)
namespace Kernel namespace Kernel
{ {
@ -310,10 +310,12 @@ done:
else else
{ {
InterruptController::get().eoi(irq); InterruptController::get().eoi(irq);
if (s_interruptables[irq]) if (irq == IRQ_IPI)
s_interruptables[irq]->handle_irq(); Scheduler::get().reschedule();
else if (auto* handler = s_interruptables[irq])
handler->handle_irq();
else else
dprintln("no handler for irq 0x{2H}\n", irq); dprintln("no handler for irq 0x{2H}", irq);
} }
Scheduler::get().reschedule_if_idling(); Scheduler::get().reschedule_if_idling();
@ -356,7 +358,7 @@ done:
extern "C" void syscall_asm(); extern "C" void syscall_asm();
IDT* IDT::create(bool is_bsb) IDT* IDT::create()
{ {
auto* idt = new IDT(); auto* idt = new IDT();
ASSERT(idt); ASSERT(idt);
@ -367,12 +369,8 @@ done:
ISR_LIST_X ISR_LIST_X
#undef X #undef X
// FIXME: distribute IRQs more evenly?
#define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num); #define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num);
if (is_bsb)
{
IRQ_LIST_X IRQ_LIST_X
}
#undef X #undef X
idt->register_syscall_handler(0x80, syscall_asm); idt->register_syscall_handler(0x80, syscall_asm);

View File

@ -174,6 +174,7 @@ irq 28
irq 29 irq 29
irq 30 irq 30
irq 31 irq 31
irq 32
// arguments in RAX, RBX, RCX, RDX, RSI, RDI // arguments in RAX, RBX, RCX, RDX, RSI, RDI
// System V ABI: RDI, RSI, RDX, RCX, R8, R9 // System V ABI: RDI, RSI, RDX, RCX, R8, R9

View File

@ -19,6 +19,8 @@ namespace Kernel
virtual BAN::Optional<uint8_t> get_free_irq() override; virtual BAN::Optional<uint8_t> get_free_irq() override;
virtual void initialize_multiprocessor() override; virtual void initialize_multiprocessor() override;
virtual void broadcast_ipi() override;
virtual void enable() override;
private: private:
uint32_t read_from_local_apic(ptrdiff_t); uint32_t read_from_local_apic(ptrdiff_t);

View File

@ -7,6 +7,7 @@
#include <stdint.h> #include <stdint.h>
constexpr uint8_t IRQ_VECTOR_BASE = 0x20; constexpr uint8_t IRQ_VECTOR_BASE = 0x20;
constexpr uint8_t IRQ_IPI = 32;
namespace Kernel namespace Kernel
{ {
@ -34,7 +35,7 @@ namespace Kernel
BAN_NON_MOVABLE(IDT); BAN_NON_MOVABLE(IDT);
public: public:
static IDT* create(bool is_bsb); static IDT* create();
[[noreturn]] static void force_triple_fault(); [[noreturn]] static void force_triple_fault();

View File

@ -21,6 +21,8 @@ namespace Kernel
static InterruptController& get(); static InterruptController& get();
virtual void initialize_multiprocessor() = 0; virtual void initialize_multiprocessor() = 0;
virtual void broadcast_ipi() = 0;
virtual void enable() = 0;
virtual BAN::ErrorOr<void> reserve_irq(uint8_t irq) = 0; virtual BAN::ErrorOr<void> reserve_irq(uint8_t irq) = 0;
virtual BAN::Optional<uint8_t> get_free_irq() = 0; virtual BAN::Optional<uint8_t> get_free_irq() = 0;

View File

@ -17,6 +17,8 @@ namespace Kernel
virtual BAN::Optional<uint8_t> get_free_irq() override; virtual BAN::Optional<uint8_t> get_free_irq() override;
virtual void initialize_multiprocessor() override; virtual void initialize_multiprocessor() override;
virtual void broadcast_ipi() override {}
virtual void enable() override {}
static void remap(); static void remap();
static void mask_all(); static void mask_all();

View File

@ -52,7 +52,8 @@ namespace Kernel
void exit(int status, int signal); void exit(int status, int signal);
void add_thread(Thread*); void add_thread(Thread*);
void on_thread_exit(Thread&); // returns true if thread was the last one
bool on_thread_exit(Thread&);
pid_t sid() const { return m_sid; } pid_t sid() const { return m_sid; }
pid_t pgrp() const { return m_pgrp; } pid_t pgrp() const { return m_pgrp; }

View File

@ -5,6 +5,7 @@
#include <kernel/Arch.h> #include <kernel/Arch.h>
#include <kernel/GDT.h> #include <kernel/GDT.h>
#include <kernel/IDT.h> #include <kernel/IDT.h>
#include <kernel/SchedulerQueue.h>
namespace Kernel namespace Kernel
{ {
@ -27,6 +28,7 @@ namespace Kernel
public: public:
static Processor& create(ProcessorID id); static Processor& create(ProcessorID id);
static Processor& initialize(); static Processor& initialize();
static void allocate_idle_thread();
static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); } static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); }
@ -62,6 +64,10 @@ namespace Kernel
static void* get_current_page_table() { return read_gs_ptr(offsetof(Processor, m_current_page_table)); } static void* get_current_page_table() { return read_gs_ptr(offsetof(Processor, m_current_page_table)); }
static void set_current_page_table(void* page_table) { write_gs_ptr(offsetof(Processor, m_current_page_table), page_table); } static void set_current_page_table(void* page_table) { write_gs_ptr(offsetof(Processor, m_current_page_table), page_table); }
static Thread* idle_thread() { return reinterpret_cast<Thread*>(read_gs_ptr(offsetof(Processor, m_idle_thread))); }
static SchedulerQueue::Node* get_current_thread() { return reinterpret_cast<SchedulerQueue::Node*>(read_gs_ptr(offsetof(Processor, m_current_thread))); }
static void set_current_thread(SchedulerQueue::Node* thread) { write_gs_ptr(offsetof(Processor, m_current_thread), thread); }
private: private:
Processor() = default; Processor() = default;
~Processor() { ASSERT_NOT_REACHED(); } ~Processor() { ASSERT_NOT_REACHED(); }
@ -69,7 +75,7 @@ namespace Kernel
template<typename T> template<typename T>
static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8) static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8)
{ {
#define __ASM_INPUT(operation) operation " %%gs:(%[offset]), %[result]" : [result]"=rm"(result) : [offset]"rm"(offset) #define __ASM_INPUT(operation) operation " %%gs:%a[offset], %[result]" : [result]"=r"(result) : [offset]"ir"(offset)
T result; T result;
if constexpr(sizeof(T) == 8) if constexpr(sizeof(T) == 8)
asm volatile(__ASM_INPUT("movq")); asm volatile(__ASM_INPUT("movq"));
@ -86,7 +92,7 @@ namespace Kernel
template<typename T> template<typename T>
static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8) static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8)
{ {
#define __ASM_INPUT(operation) operation " %[value], %%gs:(%[offset])" :: [value]"rm"(value), [offset]"rm"(offset) : "memory" #define __ASM_INPUT(operation) operation " %[value], %%gs:%a[offset]" :: [value]"r"(value), [offset]"ir"(offset) : "memory"
if constexpr(sizeof(T) == 8) if constexpr(sizeof(T) == 8)
asm volatile(__ASM_INPUT("movq")); asm volatile(__ASM_INPUT("movq"));
if constexpr(sizeof(T) == 4) if constexpr(sizeof(T) == 4)
@ -112,6 +118,9 @@ namespace Kernel
GDT* m_gdt { nullptr }; GDT* m_gdt { nullptr };
IDT* m_idt { nullptr }; IDT* m_idt { nullptr };
Thread* m_idle_thread { nullptr };
SchedulerQueue::Node* m_current_thread { nullptr };
void* m_current_page_table { nullptr }; void* m_current_page_table { nullptr };
friend class BAN::Array<Processor, 0xFF>; friend class BAN::Array<Processor, 0xFF>;

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include <BAN/LinkedList.h> #include <kernel/SchedulerQueue.h>
#include <kernel/Semaphore.h> #include <kernel/Semaphore.h>
#include <kernel/Thread.h> #include <kernel/Thread.h>
@ -12,6 +12,7 @@ namespace Kernel
public: public:
static BAN::ErrorOr<void> initialize(); static BAN::ErrorOr<void> initialize();
static Scheduler& get(); static Scheduler& get();
static bool is_started();
[[noreturn]] void start(); [[noreturn]] void start();
@ -30,7 +31,6 @@ namespace Kernel
static pid_t current_tid(); static pid_t current_tid();
[[noreturn]] void execute_current_thread(); [[noreturn]] void execute_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void delete_current_process_and_thread(); [[noreturn]] void delete_current_process_and_thread();
// This is no return if called on current thread // This is no return if called on current thread
@ -39,36 +39,21 @@ namespace Kernel
private: private:
Scheduler() = default; Scheduler() = default;
void set_current_thread_sleeping_impl(uint64_t wake_time); void set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time);
void wake_threads();
[[nodiscard]] bool save_current_thread(); [[nodiscard]] bool save_current_thread();
void remove_and_advance_current_thread();
void advance_current_thread(); void advance_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void execute_current_thread_stack_loaded(); [[noreturn]] void execute_current_thread_stack_loaded();
BAN::ErrorOr<void> add_thread(Thread*); BAN::ErrorOr<void> add_thread(Thread*);
private: private:
struct SchedulerThread
{
SchedulerThread(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time;
Semaphore* semaphore;
};
SpinLock m_lock; SpinLock m_lock;
Thread* m_idle_thread { nullptr }; SchedulerQueue m_active_threads;
BAN::LinkedList<SchedulerThread> m_active_threads; SchedulerQueue m_blocking_threads;
BAN::LinkedList<SchedulerThread> m_sleeping_threads;
BAN::LinkedList<SchedulerThread>::iterator m_current_thread;
friend class Process; friend class Process;
}; };

View File

@ -0,0 +1,126 @@
#pragma once
#include <BAN/Assert.h>
#include <BAN/NoCopyMove.h>
#include <stdint.h>
namespace Kernel
{
class Thread;
class Semaphore;
class SchedulerQueue
{
BAN_NON_COPYABLE(SchedulerQueue);
BAN_NON_MOVABLE(SchedulerQueue);
public:
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time { 0 };
Semaphore* semaphore { nullptr };
private:
Node* next { nullptr };
friend class SchedulerQueue;
friend class Scheduler;
};
public:
SchedulerQueue() = default;
~SchedulerQueue() { ASSERT_NOT_REACHED(); }
bool empty() const { return m_front == nullptr; }
Node* pop_front()
{
ASSERT(!empty());
Node* node = m_front;
m_front = m_front->next;
if (m_front == nullptr)
m_back = nullptr;
node->next = nullptr;
return node;
}
void push_back(Node* node)
{
ASSERT(node);
node->next = nullptr;
(empty() ? m_front : m_back->next) = node;
m_back = node;
}
void add_with_wake_time(Node* node)
{
ASSERT(node);
node->next = nullptr;
if (empty() || node->wake_time >= m_back->wake_time)
{
push_back(node);
return;
}
if (node->wake_time < m_front->wake_time)
{
node->next = m_front;
m_front = node;
return;
}
Node* prev = m_front;
for (; node->wake_time >= prev->next->wake_time; prev = prev->next)
continue;
node->next = prev->next;
prev->next = node;
}
void remove_with_wake_time(SchedulerQueue& out, uint64_t current_time)
{
while (!empty() && m_front->wake_time <= current_time)
out.push_back(pop_front());
}
template<typename F>
void remove_with_condition(SchedulerQueue& out, F comp)
{
while (!empty() && comp(m_front))
out.push_back(pop_front());
if (empty())
return;
for (Node* prev = m_front; prev->next;)
{
Node* node = prev->next;
if (!comp(node))
prev = prev->next;
else
{
prev->next = node->next;
if (node == m_back)
m_back = prev;
out.push_back(node);
}
}
}
private:
Node* m_front { nullptr };
Node* m_back { nullptr };
};
}

View File

@ -215,9 +215,8 @@ namespace Kernel
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS); io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
} }
// Mask all interrupts // Enable local apic
uint32_t sivr = apic->read_from_local_apic(LAPIC_SIV_REG); apic->write_to_local_apic(LAPIC_SIV_REG, apic->read_from_local_apic(LAPIC_SIV_REG) | 0x1FF);
apic->write_to_local_apic(LAPIC_SIV_REG, sivr | 0x1FF);
return apic; return apic;
} }
@ -313,6 +312,28 @@ namespace Kernel
dprintln("{} processors started", *g_ap_running_count); dprintln("{} processors started", *g_ap_running_count);
} }
void APIC::broadcast_ipi()
{
write_to_local_apic(LAPIC_ICR_HI_REG, (read_from_local_apic(LAPIC_ICR_HI_REG) & 0x00FFFFFF) | 0xFF000000);
write_to_local_apic(LAPIC_ICR_LO_REG,
(read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_reserved_mask)
| ICR_LO_delivery_mode_fixed
| ICR_LO_destination_mode_physical
| ICR_LO_level_assert
| ICR_LO_trigger_mode_level
| ICR_LO_destination_shorthand_all_excluding_self
| (IRQ_VECTOR_BASE + IRQ_IPI)
);
while ((read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_delivery_status_send_pending) == ICR_LO_delivery_status_send_pending)
__builtin_ia32_pause();
}
void APIC::enable()
{
write_to_local_apic(LAPIC_SIV_REG, read_from_local_apic(LAPIC_SIV_REG) | 0x1FF);
}
uint32_t APIC::read_from_local_apic(ptrdiff_t offset) uint32_t APIC::read_from_local_apic(ptrdiff_t offset)
{ {
return MMIO::read32(m_local_apic_vaddr + offset); return MMIO::read32(m_local_apic_vaddr + offset);
@ -370,7 +391,8 @@ namespace Kernel
redir.vector = IRQ_VECTOR_BASE + irq; redir.vector = IRQ_VECTOR_BASE + irq;
redir.mask = 0; redir.mask = 0;
redir.destination = m_processors.front().apic_id; // FIXME: distribute IRQs more evenly?
redir.destination = Kernel::Processor::bsb_id();
ioapic->write(IOAPIC_REDIRS + gsi * 2, redir.lo_dword); ioapic->write(IOAPIC_REDIRS + gsi * 2, redir.lo_dword);
ioapic->write(IOAPIC_REDIRS + gsi * 2 + 1, redir.hi_dword); ioapic->write(IOAPIC_REDIRS + gsi * 2 + 1, redir.hi_dword);

View File

@ -25,6 +25,8 @@ namespace Debug
uintptr_t rip; uintptr_t rip;
}; };
SpinLockGuard _(s_debug_lock);
stackframe* frame = (stackframe*)__builtin_frame_address(0); stackframe* frame = (stackframe*)__builtin_frame_address(0);
if (!frame) if (!frame)
{ {

View File

@ -218,7 +218,7 @@ namespace Kernel
m_loadable_elf.clear(); m_loadable_elf.clear();
} }
void Process::on_thread_exit(Thread& thread) bool Process::on_thread_exit(Thread& thread)
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
@ -230,9 +230,7 @@ namespace Kernel
m_threads.clear(); m_threads.clear();
thread.setup_process_cleanup(); thread.setup_process_cleanup();
// NOTE: This function is only called from scheduler when it is already locked return true;
Scheduler::get().execute_current_thread_locked();
ASSERT_NOT_REACHED();
} }
for (size_t i = 0; i < m_threads.size(); i++) for (size_t i = 0; i < m_threads.size(); i++)
@ -240,7 +238,7 @@ namespace Kernel
if (m_threads[i] == &thread) if (m_threads[i] == &thread)
{ {
m_threads.remove(i); m_threads.remove(i);
return; return false;
} }
} }

View File

@ -1,7 +1,6 @@
#include <kernel/Memory/kmalloc.h> #include <kernel/Memory/kmalloc.h>
#include <kernel/Processor.h> #include <kernel/Processor.h>
#include <kernel/Thread.h>
#include <kernel/Debug.h>
namespace Kernel namespace Kernel
{ {
@ -43,7 +42,7 @@ namespace Kernel
processor.m_gdt = GDT::create(); processor.m_gdt = GDT::create();
ASSERT(processor.m_gdt); ASSERT(processor.m_gdt);
processor.m_idt = IDT::create(id == s_bsb_id); processor.m_idt = IDT::create();
ASSERT(processor.m_idt); ASSERT(processor.m_idt);
return processor; return processor;
@ -67,4 +66,11 @@ namespace Kernel
return processor; return processor;
} }
void Processor::allocate_idle_thread()
{
ASSERT(idle_thread() == nullptr);
auto* idle_thread = MUST(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
write_gs_ptr(offsetof(Processor, m_idle_thread), idle_thread);
}
} }

View File

@ -15,6 +15,7 @@ namespace Kernel
extern "C" [[noreturn]] void continue_thread(uintptr_t rsp, uintptr_t rip); extern "C" [[noreturn]] void continue_thread(uintptr_t rsp, uintptr_t rip);
static Scheduler* s_instance = nullptr; static Scheduler* s_instance = nullptr;
static BAN::Atomic<bool> s_started { false };
ALWAYS_INLINE static void load_temp_stack() ALWAYS_INLINE static void load_temp_stack()
{ {
@ -24,10 +25,9 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::initialize() BAN::ErrorOr<void> Scheduler::initialize()
{ {
ASSERT(s_instance == nullptr); ASSERT(s_instance == nullptr);
Scheduler* scheduler = new Scheduler(); s_instance = new Scheduler();
ASSERT(scheduler); ASSERT(s_instance);
scheduler->m_idle_thread = TRY(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr)); Processor::allocate_idle_thread();
s_instance = scheduler;
return {}; return {};
} }
@ -41,15 +41,21 @@ namespace Kernel
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
m_lock.lock(); m_lock.lock();
ASSERT(!m_active_threads.empty()); s_started = true;
m_current_thread = m_active_threads.begin(); advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
bool Scheduler::is_started()
{
return s_started;
}
Thread& Scheduler::current_thread() Thread& Scheduler::current_thread()
{ {
return m_current_thread ? *m_current_thread->thread : *m_idle_thread; auto* current = Processor::get_current_thread();
return current ? *current->thread : *Processor::idle_thread();
} }
pid_t Scheduler::current_tid() pid_t Scheduler::current_tid()
@ -61,8 +67,12 @@ namespace Kernel
void Scheduler::timer_reschedule() void Scheduler::timer_reschedule()
{ {
// Broadcast IPI to all other processors for them
// to perform reschedule
InterruptController::get().broadcast_ipi();
auto state = m_lock.lock(); auto state = m_lock.lock();
wake_threads(); m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot());
if (save_current_thread()) if (save_current_thread())
return Processor::set_interrupt_state(state); return Processor::set_interrupt_state(state);
advance_current_thread(); advance_current_thread();
@ -83,34 +93,22 @@ namespace Kernel
void Scheduler::reschedule_if_idling() void Scheduler::reschedule_if_idling()
{ {
auto state = m_lock.lock(); auto state = m_lock.lock();
if (m_active_threads.empty() || &current_thread() != m_idle_thread) if (m_active_threads.empty() || Processor::get_current_thread())
return m_lock.unlock(state); return m_lock.unlock(state);
if (save_current_thread()) if (save_current_thread())
return Processor::set_interrupt_state(state); return Processor::set_interrupt_state(state);
m_current_thread = m_active_threads.begin(); advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
void Scheduler::wake_threads()
{
ASSERT(m_lock.current_processor_has_lock());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
{
m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
m_sleeping_threads.begin()
);
}
}
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread) BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{ {
auto* node = new SchedulerQueue::Node(thread);
if (node == nullptr)
return BAN::Error::from_errno(ENOMEM);
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
TRY(m_active_threads.emplace_back(thread)); m_active_threads.push_back(node);
return {}; return {};
} }
@ -126,32 +124,12 @@ namespace Kernel
{ {
ASSERT(m_lock.current_processor_has_lock()); ASSERT(m_lock.current_processor_has_lock());
if (m_active_threads.empty()) if (auto* current = Processor::get_current_thread())
{ m_active_threads.push_back(current);
m_current_thread = {}; Processor::set_current_thread(nullptr);
return;
}
if (!m_current_thread || ++m_current_thread == m_active_threads.end())
m_current_thread = m_active_threads.begin();
}
void Scheduler::remove_and_advance_current_thread() if (!m_active_threads.empty())
{ Processor::set_current_thread(m_active_threads.pop_front());
ASSERT(m_lock.current_processor_has_lock());
ASSERT(m_current_thread);
if (m_active_threads.size() == 1)
{
m_active_threads.remove(m_current_thread);
m_current_thread = {};
}
else
{
auto temp = m_current_thread;
advance_current_thread();
m_active_threads.remove(temp);
}
} }
// NOTE: this is declared always inline, so we don't corrupt the stack // NOTE: this is declared always inline, so we don't corrupt the stack
@ -185,15 +163,14 @@ namespace Kernel
load_temp_stack(); load_temp_stack();
PageTable::kernel().load(); PageTable::kernel().load();
Thread* thread = m_current_thread->thread; auto* current = Processor::get_current_thread();
ASSERT(current);
ASSERT(thread->has_process()); delete &current->thread->process();
delete &thread->process(); delete current->thread;
delete current;
remove_and_advance_current_thread(); Processor::set_current_thread(nullptr);
delete thread;
advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
@ -242,14 +219,16 @@ namespace Kernel
while (current->state() == Thread::State::Terminated) while (current->state() == Thread::State::Terminated)
{ {
Thread* thread = m_current_thread->thread; auto* node = Processor::get_current_thread();
if (thread->has_process()) if (node->thread->has_process())
thread->process().on_thread_exit(*thread); if (node->thread->process().on_thread_exit(*node->thread))
break;
remove_and_advance_current_thread(); delete node->thread;
delete node;
delete thread; Processor::set_current_thread(nullptr);
advance_current_thread();
current = &current_thread(); current = &current_thread();
} }
@ -279,28 +258,20 @@ namespace Kernel
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time) void Scheduler::set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time)
{ {
ASSERT(m_lock.current_processor_has_lock()); ASSERT(m_lock.current_processor_has_lock());
if (save_current_thread()) if (save_current_thread())
return; return;
auto it = m_sleeping_threads.begin(); auto* current = Processor::get_current_thread();
for (; it != m_sleeping_threads.end(); it++) current->semaphore = semaphore;
if (wake_time <= it->wake_time) current->wake_time = wake_time;
break; m_blocking_threads.add_with_wake_time(current);
Processor::set_current_thread(nullptr);
m_current_thread->wake_time = wake_time;
m_active_threads.move_element_to_other_linked_list(
m_sleeping_threads,
it,
m_current_thread
);
m_current_thread = {};
advance_current_thread(); advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
@ -308,56 +279,27 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping(uint64_t wake_time) void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
{ {
auto state = m_lock.lock(); auto state = m_lock.lock();
m_current_thread->semaphore = nullptr; set_current_thread_sleeping_impl(nullptr, wake_time);
set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time) void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
{ {
auto state = m_lock.lock(); auto state = m_lock.lock();
m_current_thread->semaphore = semaphore; set_current_thread_sleeping_impl(semaphore, wake_time);
set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
void Scheduler::unblock_threads(Semaphore* semaphore) void Scheduler::unblock_threads(Semaphore* semaphore)
{ {
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->semaphore == semaphore; });
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
{
if (it->semaphore == semaphore)
{
it = m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
it
);
}
else
{
it++;
}
}
} }
void Scheduler::unblock_thread(pid_t tid) void Scheduler::unblock_thread(pid_t tid)
{ {
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->thread->tid() == tid; });
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
{
if (it->thread->tid() == tid)
{
m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
it
);
return;
}
}
} }
} }

View File

@ -213,9 +213,12 @@ extern "C" void ap_main()
Processor::initialize(); Processor::initialize();
PageTable::kernel().initial_load(); PageTable::kernel().initial_load();
Processor::allocate_idle_thread();
InterruptController::get().enable();
dprintln("ap{} initialized", Processor::current_id()); dprintln("ap{} initialized", Processor::current_id());
for (;;) while (!Scheduler::is_started())
asm volatile("hlt"); __builtin_ia32_pause();
Scheduler::get().start();
} }