Compare commits

..

No commits in common. "e65bc040af3ef27c2535eb327f261ffc4523d4e3" and "23a2f8b90314b9597640e5bc2ed8fc2b6277e8f7" have entirely different histories.

17 changed files with 162 additions and 263 deletions

View File

@ -8,7 +8,6 @@
? __ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT(" #cond ") failed") \ ? __ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT(" #cond ") failed") \
: (void)0) : (void)0)
#define ASSERT_NOT_REACHED() \ #define ASSERT_NOT_REACHED() ASSERT(false)
__ban_assertion_failed(__FILE__ ":" __ban_assert_stringify(__LINE__), "ASSERT_NOT_REACHED() reached")
[[noreturn]] void __ban_assertion_failed(const char* location, const char* msg); [[noreturn]] void __ban_assertion_failed(const char* location, const char* msg);

View File

@ -10,7 +10,7 @@
#include <kernel/Timer/PIT.h> #include <kernel/Timer/PIT.h>
#define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) #define ISR_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
#define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31) X(32) #define IRQ_LIST_X X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
namespace Kernel namespace Kernel
{ {
@ -310,12 +310,10 @@ done:
else else
{ {
InterruptController::get().eoi(irq); InterruptController::get().eoi(irq);
if (irq == IRQ_IPI) if (s_interruptables[irq])
Scheduler::get().reschedule(); s_interruptables[irq]->handle_irq();
else if (auto* handler = s_interruptables[irq])
handler->handle_irq();
else else
dprintln("no handler for irq 0x{2H}", irq); dprintln("no handler for irq 0x{2H}\n", irq);
} }
Scheduler::get().reschedule_if_idling(); Scheduler::get().reschedule_if_idling();
@ -358,7 +356,7 @@ done:
extern "C" void syscall_asm(); extern "C" void syscall_asm();
IDT* IDT::create() IDT* IDT::create(bool is_bsb)
{ {
auto* idt = new IDT(); auto* idt = new IDT();
ASSERT(idt); ASSERT(idt);
@ -369,8 +367,12 @@ done:
ISR_LIST_X ISR_LIST_X
#undef X #undef X
// FIXME: distribute IRQs more evenly?
#define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num); #define X(num) idt->register_interrupt_handler(IRQ_VECTOR_BASE + num, irq ## num);
IRQ_LIST_X if (is_bsb)
{
IRQ_LIST_X
}
#undef X #undef X
idt->register_syscall_handler(0x80, syscall_asm); idt->register_syscall_handler(0x80, syscall_asm);

View File

@ -174,7 +174,6 @@ irq 28
irq 29 irq 29
irq 30 irq 30
irq 31 irq 31
irq 32
// arguments in RAX, RBX, RCX, RDX, RSI, RDI // arguments in RAX, RBX, RCX, RDX, RSI, RDI
// System V ABI: RDI, RSI, RDX, RCX, R8, R9 // System V ABI: RDI, RSI, RDX, RCX, R8, R9

View File

@ -19,8 +19,6 @@ namespace Kernel
virtual BAN::Optional<uint8_t> get_free_irq() override; virtual BAN::Optional<uint8_t> get_free_irq() override;
virtual void initialize_multiprocessor() override; virtual void initialize_multiprocessor() override;
virtual void broadcast_ipi() override;
virtual void enable() override;
private: private:
uint32_t read_from_local_apic(ptrdiff_t); uint32_t read_from_local_apic(ptrdiff_t);

View File

@ -7,7 +7,6 @@
#include <stdint.h> #include <stdint.h>
constexpr uint8_t IRQ_VECTOR_BASE = 0x20; constexpr uint8_t IRQ_VECTOR_BASE = 0x20;
constexpr uint8_t IRQ_IPI = 32;
namespace Kernel namespace Kernel
{ {
@ -35,7 +34,7 @@ namespace Kernel
BAN_NON_MOVABLE(IDT); BAN_NON_MOVABLE(IDT);
public: public:
static IDT* create(); static IDT* create(bool is_bsb);
[[noreturn]] static void force_triple_fault(); [[noreturn]] static void force_triple_fault();

View File

@ -21,8 +21,6 @@ namespace Kernel
static InterruptController& get(); static InterruptController& get();
virtual void initialize_multiprocessor() = 0; virtual void initialize_multiprocessor() = 0;
virtual void broadcast_ipi() = 0;
virtual void enable() = 0;
virtual BAN::ErrorOr<void> reserve_irq(uint8_t irq) = 0; virtual BAN::ErrorOr<void> reserve_irq(uint8_t irq) = 0;
virtual BAN::Optional<uint8_t> get_free_irq() = 0; virtual BAN::Optional<uint8_t> get_free_irq() = 0;

View File

@ -17,8 +17,6 @@ namespace Kernel
virtual BAN::Optional<uint8_t> get_free_irq() override; virtual BAN::Optional<uint8_t> get_free_irq() override;
virtual void initialize_multiprocessor() override; virtual void initialize_multiprocessor() override;
virtual void broadcast_ipi() override {}
virtual void enable() override {}
static void remap(); static void remap();
static void mask_all(); static void mask_all();

View File

@ -52,8 +52,7 @@ namespace Kernel
void exit(int status, int signal); void exit(int status, int signal);
void add_thread(Thread*); void add_thread(Thread*);
// returns true if thread was the last one void on_thread_exit(Thread&);
bool on_thread_exit(Thread&);
pid_t sid() const { return m_sid; } pid_t sid() const { return m_sid; }
pid_t pgrp() const { return m_pgrp; } pid_t pgrp() const { return m_pgrp; }

View File

@ -5,7 +5,6 @@
#include <kernel/Arch.h> #include <kernel/Arch.h>
#include <kernel/GDT.h> #include <kernel/GDT.h>
#include <kernel/IDT.h> #include <kernel/IDT.h>
#include <kernel/SchedulerQueue.h>
namespace Kernel namespace Kernel
{ {
@ -28,7 +27,6 @@ namespace Kernel
public: public:
static Processor& create(ProcessorID id); static Processor& create(ProcessorID id);
static Processor& initialize(); static Processor& initialize();
static void allocate_idle_thread();
static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); } static ProcessorID current_id() { return read_gs_sized<ProcessorID>(offsetof(Processor, m_id)); }
@ -64,10 +62,6 @@ namespace Kernel
static void* get_current_page_table() { return read_gs_ptr(offsetof(Processor, m_current_page_table)); } static void* get_current_page_table() { return read_gs_ptr(offsetof(Processor, m_current_page_table)); }
static void set_current_page_table(void* page_table) { write_gs_ptr(offsetof(Processor, m_current_page_table), page_table); } static void set_current_page_table(void* page_table) { write_gs_ptr(offsetof(Processor, m_current_page_table), page_table); }
static Thread* idle_thread() { return reinterpret_cast<Thread*>(read_gs_ptr(offsetof(Processor, m_idle_thread))); }
static SchedulerQueue::Node* get_current_thread() { return reinterpret_cast<SchedulerQueue::Node*>(read_gs_ptr(offsetof(Processor, m_current_thread))); }
static void set_current_thread(SchedulerQueue::Node* thread) { write_gs_ptr(offsetof(Processor, m_current_thread), thread); }
private: private:
Processor() = default; Processor() = default;
~Processor() { ASSERT_NOT_REACHED(); } ~Processor() { ASSERT_NOT_REACHED(); }
@ -75,7 +69,7 @@ namespace Kernel
template<typename T> template<typename T>
static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8) static T read_gs_sized(uintptr_t offset) requires(sizeof(T) <= 8)
{ {
#define __ASM_INPUT(operation) operation " %%gs:%a[offset], %[result]" : [result]"=r"(result) : [offset]"ir"(offset) #define __ASM_INPUT(operation) operation " %%gs:(%[offset]), %[result]" : [result]"=rm"(result) : [offset]"rm"(offset)
T result; T result;
if constexpr(sizeof(T) == 8) if constexpr(sizeof(T) == 8)
asm volatile(__ASM_INPUT("movq")); asm volatile(__ASM_INPUT("movq"));
@ -92,7 +86,7 @@ namespace Kernel
template<typename T> template<typename T>
static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8) static void write_gs_sized(uintptr_t offset, T value) requires(sizeof(T) <= 8)
{ {
#define __ASM_INPUT(operation) operation " %[value], %%gs:%a[offset]" :: [value]"r"(value), [offset]"ir"(offset) : "memory" #define __ASM_INPUT(operation) operation " %[value], %%gs:(%[offset])" :: [value]"rm"(value), [offset]"rm"(offset) : "memory"
if constexpr(sizeof(T) == 8) if constexpr(sizeof(T) == 8)
asm volatile(__ASM_INPUT("movq")); asm volatile(__ASM_INPUT("movq"));
if constexpr(sizeof(T) == 4) if constexpr(sizeof(T) == 4)
@ -118,9 +112,6 @@ namespace Kernel
GDT* m_gdt { nullptr }; GDT* m_gdt { nullptr };
IDT* m_idt { nullptr }; IDT* m_idt { nullptr };
Thread* m_idle_thread { nullptr };
SchedulerQueue::Node* m_current_thread { nullptr };
void* m_current_page_table { nullptr }; void* m_current_page_table { nullptr };
friend class BAN::Array<Processor, 0xFF>; friend class BAN::Array<Processor, 0xFF>;

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include <kernel/SchedulerQueue.h> #include <BAN/LinkedList.h>
#include <kernel/Semaphore.h> #include <kernel/Semaphore.h>
#include <kernel/Thread.h> #include <kernel/Thread.h>
@ -12,7 +12,6 @@ namespace Kernel
public: public:
static BAN::ErrorOr<void> initialize(); static BAN::ErrorOr<void> initialize();
static Scheduler& get(); static Scheduler& get();
static bool is_started();
[[noreturn]] void start(); [[noreturn]] void start();
@ -31,6 +30,7 @@ namespace Kernel
static pid_t current_tid(); static pid_t current_tid();
[[noreturn]] void execute_current_thread(); [[noreturn]] void execute_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void delete_current_process_and_thread(); [[noreturn]] void delete_current_process_and_thread();
// This is no return if called on current thread // This is no return if called on current thread
@ -39,21 +39,36 @@ namespace Kernel
private: private:
Scheduler() = default; Scheduler() = default;
void set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time); void set_current_thread_sleeping_impl(uint64_t wake_time);
void wake_threads();
[[nodiscard]] bool save_current_thread(); [[nodiscard]] bool save_current_thread();
void remove_and_advance_current_thread();
void advance_current_thread(); void advance_current_thread();
[[noreturn]] void execute_current_thread_locked();
[[noreturn]] void execute_current_thread_stack_loaded(); [[noreturn]] void execute_current_thread_stack_loaded();
BAN::ErrorOr<void> add_thread(Thread*); BAN::ErrorOr<void> add_thread(Thread*);
private: private:
struct SchedulerThread
{
SchedulerThread(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time;
Semaphore* semaphore;
};
SpinLock m_lock; SpinLock m_lock;
SchedulerQueue m_active_threads; Thread* m_idle_thread { nullptr };
SchedulerQueue m_blocking_threads; BAN::LinkedList<SchedulerThread> m_active_threads;
BAN::LinkedList<SchedulerThread> m_sleeping_threads;
BAN::LinkedList<SchedulerThread>::iterator m_current_thread;
friend class Process; friend class Process;
}; };

View File

@ -1,126 +0,0 @@
#pragma once
#include <BAN/Assert.h>
#include <BAN/NoCopyMove.h>
#include <stdint.h>
namespace Kernel
{
class Thread;
class Semaphore;
class SchedulerQueue
{
BAN_NON_COPYABLE(SchedulerQueue);
BAN_NON_MOVABLE(SchedulerQueue);
public:
struct Node
{
Node(Thread* thread)
: thread(thread)
{}
Thread* thread;
uint64_t wake_time { 0 };
Semaphore* semaphore { nullptr };
private:
Node* next { nullptr };
friend class SchedulerQueue;
friend class Scheduler;
};
public:
SchedulerQueue() = default;
~SchedulerQueue() { ASSERT_NOT_REACHED(); }
bool empty() const { return m_front == nullptr; }
Node* pop_front()
{
ASSERT(!empty());
Node* node = m_front;
m_front = m_front->next;
if (m_front == nullptr)
m_back = nullptr;
node->next = nullptr;
return node;
}
void push_back(Node* node)
{
ASSERT(node);
node->next = nullptr;
(empty() ? m_front : m_back->next) = node;
m_back = node;
}
void add_with_wake_time(Node* node)
{
ASSERT(node);
node->next = nullptr;
if (empty() || node->wake_time >= m_back->wake_time)
{
push_back(node);
return;
}
if (node->wake_time < m_front->wake_time)
{
node->next = m_front;
m_front = node;
return;
}
Node* prev = m_front;
for (; node->wake_time >= prev->next->wake_time; prev = prev->next)
continue;
node->next = prev->next;
prev->next = node;
}
void remove_with_wake_time(SchedulerQueue& out, uint64_t current_time)
{
while (!empty() && m_front->wake_time <= current_time)
out.push_back(pop_front());
}
template<typename F>
void remove_with_condition(SchedulerQueue& out, F comp)
{
while (!empty() && comp(m_front))
out.push_back(pop_front());
if (empty())
return;
for (Node* prev = m_front; prev->next;)
{
Node* node = prev->next;
if (!comp(node))
prev = prev->next;
else
{
prev->next = node->next;
if (node == m_back)
m_back = prev;
out.push_back(node);
}
}
}
private:
Node* m_front { nullptr };
Node* m_back { nullptr };
};
}

View File

@ -215,8 +215,9 @@ namespace Kernel
io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS); io_apic.max_redirs = io_apic.read(IOAPIC_MAX_REDIRS);
} }
// Enable local apic // Mask all interrupts
apic->write_to_local_apic(LAPIC_SIV_REG, apic->read_from_local_apic(LAPIC_SIV_REG) | 0x1FF); uint32_t sivr = apic->read_from_local_apic(LAPIC_SIV_REG);
apic->write_to_local_apic(LAPIC_SIV_REG, sivr | 0x1FF);
return apic; return apic;
} }
@ -312,28 +313,6 @@ namespace Kernel
dprintln("{} processors started", *g_ap_running_count); dprintln("{} processors started", *g_ap_running_count);
} }
void APIC::broadcast_ipi()
{
write_to_local_apic(LAPIC_ICR_HI_REG, (read_from_local_apic(LAPIC_ICR_HI_REG) & 0x00FFFFFF) | 0xFF000000);
write_to_local_apic(LAPIC_ICR_LO_REG,
(read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_reserved_mask)
| ICR_LO_delivery_mode_fixed
| ICR_LO_destination_mode_physical
| ICR_LO_level_assert
| ICR_LO_trigger_mode_level
| ICR_LO_destination_shorthand_all_excluding_self
| (IRQ_VECTOR_BASE + IRQ_IPI)
);
while ((read_from_local_apic(LAPIC_ICR_LO_REG) & ICR_LO_delivery_status_send_pending) == ICR_LO_delivery_status_send_pending)
__builtin_ia32_pause();
}
void APIC::enable()
{
write_to_local_apic(LAPIC_SIV_REG, read_from_local_apic(LAPIC_SIV_REG) | 0x1FF);
}
uint32_t APIC::read_from_local_apic(ptrdiff_t offset) uint32_t APIC::read_from_local_apic(ptrdiff_t offset)
{ {
return MMIO::read32(m_local_apic_vaddr + offset); return MMIO::read32(m_local_apic_vaddr + offset);
@ -391,8 +370,7 @@ namespace Kernel
redir.vector = IRQ_VECTOR_BASE + irq; redir.vector = IRQ_VECTOR_BASE + irq;
redir.mask = 0; redir.mask = 0;
// FIXME: distribute IRQs more evenly? redir.destination = m_processors.front().apic_id;
redir.destination = Kernel::Processor::bsb_id();
ioapic->write(IOAPIC_REDIRS + gsi * 2, redir.lo_dword); ioapic->write(IOAPIC_REDIRS + gsi * 2, redir.lo_dword);
ioapic->write(IOAPIC_REDIRS + gsi * 2 + 1, redir.hi_dword); ioapic->write(IOAPIC_REDIRS + gsi * 2 + 1, redir.hi_dword);

View File

@ -25,8 +25,6 @@ namespace Debug
uintptr_t rip; uintptr_t rip;
}; };
SpinLockGuard _(s_debug_lock);
stackframe* frame = (stackframe*)__builtin_frame_address(0); stackframe* frame = (stackframe*)__builtin_frame_address(0);
if (!frame) if (!frame)
{ {

View File

@ -218,7 +218,7 @@ namespace Kernel
m_loadable_elf.clear(); m_loadable_elf.clear();
} }
bool Process::on_thread_exit(Thread& thread) void Process::on_thread_exit(Thread& thread)
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
@ -230,7 +230,9 @@ namespace Kernel
m_threads.clear(); m_threads.clear();
thread.setup_process_cleanup(); thread.setup_process_cleanup();
return true; // NOTE: This function is only called from scheduler when it is already locked
Scheduler::get().execute_current_thread_locked();
ASSERT_NOT_REACHED();
} }
for (size_t i = 0; i < m_threads.size(); i++) for (size_t i = 0; i < m_threads.size(); i++)
@ -238,7 +240,7 @@ namespace Kernel
if (m_threads[i] == &thread) if (m_threads[i] == &thread)
{ {
m_threads.remove(i); m_threads.remove(i);
return false; return;
} }
} }

View File

@ -1,6 +1,7 @@
#include <kernel/Memory/kmalloc.h> #include <kernel/Memory/kmalloc.h>
#include <kernel/Processor.h> #include <kernel/Processor.h>
#include <kernel/Thread.h>
#include <kernel/Debug.h>
namespace Kernel namespace Kernel
{ {
@ -42,7 +43,7 @@ namespace Kernel
processor.m_gdt = GDT::create(); processor.m_gdt = GDT::create();
ASSERT(processor.m_gdt); ASSERT(processor.m_gdt);
processor.m_idt = IDT::create(); processor.m_idt = IDT::create(id == s_bsb_id);
ASSERT(processor.m_idt); ASSERT(processor.m_idt);
return processor; return processor;
@ -66,11 +67,4 @@ namespace Kernel
return processor; return processor;
} }
void Processor::allocate_idle_thread()
{
ASSERT(idle_thread() == nullptr);
auto* idle_thread = MUST(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
write_gs_ptr(offsetof(Processor, m_idle_thread), idle_thread);
}
} }

View File

@ -15,7 +15,6 @@ namespace Kernel
extern "C" [[noreturn]] void continue_thread(uintptr_t rsp, uintptr_t rip); extern "C" [[noreturn]] void continue_thread(uintptr_t rsp, uintptr_t rip);
static Scheduler* s_instance = nullptr; static Scheduler* s_instance = nullptr;
static BAN::Atomic<bool> s_started { false };
ALWAYS_INLINE static void load_temp_stack() ALWAYS_INLINE static void load_temp_stack()
{ {
@ -25,9 +24,10 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::initialize() BAN::ErrorOr<void> Scheduler::initialize()
{ {
ASSERT(s_instance == nullptr); ASSERT(s_instance == nullptr);
s_instance = new Scheduler(); Scheduler* scheduler = new Scheduler();
ASSERT(s_instance); ASSERT(scheduler);
Processor::allocate_idle_thread(); scheduler->m_idle_thread = TRY(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
s_instance = scheduler;
return {}; return {};
} }
@ -41,21 +41,15 @@ namespace Kernel
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled); ASSERT(Processor::get_interrupt_state() == InterruptState::Disabled);
m_lock.lock(); m_lock.lock();
s_started = true; ASSERT(!m_active_threads.empty());
advance_current_thread(); m_current_thread = m_active_threads.begin();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
bool Scheduler::is_started()
{
return s_started;
}
Thread& Scheduler::current_thread() Thread& Scheduler::current_thread()
{ {
auto* current = Processor::get_current_thread(); return m_current_thread ? *m_current_thread->thread : *m_idle_thread;
return current ? *current->thread : *Processor::idle_thread();
} }
pid_t Scheduler::current_tid() pid_t Scheduler::current_tid()
@ -67,12 +61,8 @@ namespace Kernel
void Scheduler::timer_reschedule() void Scheduler::timer_reschedule()
{ {
// Broadcast IPI to all other processors for them
// to perform reschedule
InterruptController::get().broadcast_ipi();
auto state = m_lock.lock(); auto state = m_lock.lock();
m_blocking_threads.remove_with_wake_time(m_active_threads, SystemTimer::get().ms_since_boot()); wake_threads();
if (save_current_thread()) if (save_current_thread())
return Processor::set_interrupt_state(state); return Processor::set_interrupt_state(state);
advance_current_thread(); advance_current_thread();
@ -93,22 +83,34 @@ namespace Kernel
void Scheduler::reschedule_if_idling() void Scheduler::reschedule_if_idling()
{ {
auto state = m_lock.lock(); auto state = m_lock.lock();
if (m_active_threads.empty() || Processor::get_current_thread()) if (m_active_threads.empty() || &current_thread() != m_idle_thread)
return m_lock.unlock(state); return m_lock.unlock(state);
if (save_current_thread()) if (save_current_thread())
return Processor::set_interrupt_state(state); return Processor::set_interrupt_state(state);
advance_current_thread(); m_current_thread = m_active_threads.begin();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
void Scheduler::wake_threads()
{
ASSERT(m_lock.current_processor_has_lock());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
{
m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
m_sleeping_threads.begin()
);
}
}
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread) BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{ {
auto* node = new SchedulerQueue::Node(thread);
if (node == nullptr)
return BAN::Error::from_errno(ENOMEM);
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
m_active_threads.push_back(node); TRY(m_active_threads.emplace_back(thread));
return {}; return {};
} }
@ -124,12 +126,32 @@ namespace Kernel
{ {
ASSERT(m_lock.current_processor_has_lock()); ASSERT(m_lock.current_processor_has_lock());
if (auto* current = Processor::get_current_thread()) if (m_active_threads.empty())
m_active_threads.push_back(current); {
Processor::set_current_thread(nullptr); m_current_thread = {};
return;
}
if (!m_current_thread || ++m_current_thread == m_active_threads.end())
m_current_thread = m_active_threads.begin();
}
if (!m_active_threads.empty()) void Scheduler::remove_and_advance_current_thread()
Processor::set_current_thread(m_active_threads.pop_front()); {
ASSERT(m_lock.current_processor_has_lock());
ASSERT(m_current_thread);
if (m_active_threads.size() == 1)
{
m_active_threads.remove(m_current_thread);
m_current_thread = {};
}
else
{
auto temp = m_current_thread;
advance_current_thread();
m_active_threads.remove(temp);
}
} }
// NOTE: this is declared always inline, so we don't corrupt the stack // NOTE: this is declared always inline, so we don't corrupt the stack
@ -163,14 +185,15 @@ namespace Kernel
load_temp_stack(); load_temp_stack();
PageTable::kernel().load(); PageTable::kernel().load();
auto* current = Processor::get_current_thread(); Thread* thread = m_current_thread->thread;
ASSERT(current);
delete &current->thread->process(); ASSERT(thread->has_process());
delete current->thread; delete &thread->process();
delete current;
Processor::set_current_thread(nullptr); remove_and_advance_current_thread();
delete thread;
advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
@ -219,16 +242,14 @@ namespace Kernel
while (current->state() == Thread::State::Terminated) while (current->state() == Thread::State::Terminated)
{ {
auto* node = Processor::get_current_thread(); Thread* thread = m_current_thread->thread;
if (node->thread->has_process()) if (thread->has_process())
if (node->thread->process().on_thread_exit(*node->thread)) thread->process().on_thread_exit(*thread);
break;
delete node->thread; remove_and_advance_current_thread();
delete node;
Processor::set_current_thread(nullptr); delete thread;
advance_current_thread();
current = &current_thread(); current = &current_thread();
} }
@ -258,20 +279,28 @@ namespace Kernel
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
void Scheduler::set_current_thread_sleeping_impl(Semaphore* semaphore, uint64_t wake_time) void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
{ {
ASSERT(m_lock.current_processor_has_lock()); ASSERT(m_lock.current_processor_has_lock());
if (save_current_thread()) if (save_current_thread())
return; return;
auto* current = Processor::get_current_thread(); auto it = m_sleeping_threads.begin();
current->semaphore = semaphore; for (; it != m_sleeping_threads.end(); it++)
current->wake_time = wake_time; if (wake_time <= it->wake_time)
m_blocking_threads.add_with_wake_time(current); break;
Processor::set_current_thread(nullptr);
m_current_thread->wake_time = wake_time;
m_active_threads.move_element_to_other_linked_list(
m_sleeping_threads,
it,
m_current_thread
);
m_current_thread = {};
advance_current_thread(); advance_current_thread();
execute_current_thread_locked(); execute_current_thread_locked();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
@ -279,27 +308,56 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping(uint64_t wake_time) void Scheduler::set_current_thread_sleeping(uint64_t wake_time)
{ {
auto state = m_lock.lock(); auto state = m_lock.lock();
set_current_thread_sleeping_impl(nullptr, wake_time); m_current_thread->semaphore = nullptr;
set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time) void Scheduler::block_current_thread(Semaphore* semaphore, uint64_t wake_time)
{ {
auto state = m_lock.lock(); auto state = m_lock.lock();
set_current_thread_sleeping_impl(semaphore, wake_time); m_current_thread->semaphore = semaphore;
set_current_thread_sleeping_impl(wake_time);
Processor::set_interrupt_state(state); Processor::set_interrupt_state(state);
} }
void Scheduler::unblock_threads(Semaphore* semaphore) void Scheduler::unblock_threads(Semaphore* semaphore)
{ {
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->semaphore == semaphore; });
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
{
if (it->semaphore == semaphore)
{
it = m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
it
);
}
else
{
it++;
}
}
} }
void Scheduler::unblock_thread(pid_t tid) void Scheduler::unblock_thread(pid_t tid)
{ {
SpinLockGuard _(m_lock); SpinLockGuard _(m_lock);
m_blocking_threads.remove_with_condition(m_active_threads, [&](auto* node) { return node->thread->tid() == tid; });
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
{
if (it->thread->tid() == tid)
{
m_sleeping_threads.move_element_to_other_linked_list(
m_active_threads,
m_active_threads.end(),
it
);
return;
}
}
} }
} }

View File

@ -213,12 +213,9 @@ extern "C" void ap_main()
Processor::initialize(); Processor::initialize();
PageTable::kernel().initial_load(); PageTable::kernel().initial_load();
Processor::allocate_idle_thread();
InterruptController::get().enable();
dprintln("ap{} initialized", Processor::current_id()); dprintln("ap{} initialized", Processor::current_id());
while (!Scheduler::is_started()) for (;;)
__builtin_ia32_pause(); asm volatile("hlt");
Scheduler::get().start();
} }