Kernel: Improve multithreading support
We can now use arbitary BAN::function<void(...)> as the Thread. I also implemented multithreading for i386 since it was not done on the initial multithreading commit.
This commit is contained in:
parent
777ede328e
commit
5b5e620d8a
|
@ -0,0 +1,38 @@
|
||||||
|
# uint32_t read_rip()
|
||||||
|
.global read_rip
|
||||||
|
read_rip:
|
||||||
|
popl %eax
|
||||||
|
jmp *%eax
|
||||||
|
|
||||||
|
exit_thread_trampoline:
|
||||||
|
addl $16, %esp
|
||||||
|
popl %eax
|
||||||
|
pushl $0x696969
|
||||||
|
pushl %eax
|
||||||
|
ret
|
||||||
|
|
||||||
|
# void start_thread(uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t rsp, uint32_t rbp, uint32_t rip)
|
||||||
|
.global start_thread
|
||||||
|
start_thread:
|
||||||
|
movl %esp, %eax
|
||||||
|
movl 28(%eax), %ecx
|
||||||
|
movl 24(%eax), %ebp
|
||||||
|
movl 20(%eax), %esp
|
||||||
|
|
||||||
|
pushl 16(%eax)
|
||||||
|
pushl 12(%eax)
|
||||||
|
pushl 8(%eax)
|
||||||
|
pushl 4(%eax)
|
||||||
|
pushl $exit_thread_trampoline
|
||||||
|
|
||||||
|
sti
|
||||||
|
jmp *%ecx
|
||||||
|
|
||||||
|
# void continue_thread(uint32_t rsp, uint32_t rbp, uint32_t rip)
|
||||||
|
.global continue_thread
|
||||||
|
continue_thread:
|
||||||
|
movl 12(%esp), %ecx
|
||||||
|
movl 8(%esp), %ebp
|
||||||
|
movl 4(%esp), %esp
|
||||||
|
movl $0, %eax
|
||||||
|
jmp *%ecx
|
|
@ -8,4 +8,5 @@ $(ARCHDIR)/boot.o \
|
||||||
$(ARCHDIR)/IDT.o \
|
$(ARCHDIR)/IDT.o \
|
||||||
$(ARCHDIR)/MMU.o \
|
$(ARCHDIR)/MMU.o \
|
||||||
$(ARCHDIR)/SpinLock.o \
|
$(ARCHDIR)/SpinLock.o \
|
||||||
|
$(ARCHDIR)/Thread.o \
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
# uint64_t read_rip()
|
||||||
|
.global read_rip
|
||||||
|
read_rip:
|
||||||
|
popq %rax
|
||||||
|
jmp *%rax
|
||||||
|
|
||||||
|
.global get_thread_at_exit
|
||||||
|
get_thread_at_exit:
|
||||||
|
movq 8(%rdi), %rax
|
||||||
|
ret
|
||||||
|
|
||||||
|
exit_thread_trampoline:
|
||||||
|
movq 8(%rsp), %rdi
|
||||||
|
ret
|
||||||
|
|
||||||
|
# void start_thread(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t rsp, uint64_t rbp, uint64_t rip)
|
||||||
|
.global start_thread
|
||||||
|
start_thread:
|
||||||
|
movq 8(%rsp), %rcx
|
||||||
|
movq %r8, %rsp
|
||||||
|
movq %r9, %rbp
|
||||||
|
pushq $exit_thread_trampoline
|
||||||
|
sti
|
||||||
|
jmp *%rcx
|
||||||
|
|
||||||
|
# void continue_thread(uint64_t rsp, uint64_t rbp, uint64_t rip)
|
||||||
|
.global continue_thread
|
||||||
|
continue_thread:
|
||||||
|
movq %rdi, %rsp
|
||||||
|
movq %rsi, %rbp
|
||||||
|
movq $0, %rax
|
||||||
|
jmp *%rdx
|
|
@ -9,4 +9,5 @@ $(ARCHDIR)/IDT.o \
|
||||||
$(ARCHDIR)/interrupts.o \
|
$(ARCHDIR)/interrupts.o \
|
||||||
$(ARCHDIR)/MMU.o \
|
$(ARCHDIR)/MMU.o \
|
||||||
$(ARCHDIR)/SpinLock.o \
|
$(ARCHDIR)/SpinLock.o \
|
||||||
|
$(ARCHDIR)/Thread.o \
|
||||||
|
|
|
@ -13,16 +13,26 @@ namespace Kernel
|
||||||
BAN_NON_MOVABLE(Scheduler);
|
BAN_NON_MOVABLE(Scheduler);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void Initialize();
|
static void initialize();
|
||||||
static Scheduler& Get();
|
static Scheduler& get();
|
||||||
|
|
||||||
const Thread& CurrentThread() const;
|
const Thread& current_thread() const;
|
||||||
|
|
||||||
void AddThread(void(*)());
|
template<typename... Args>
|
||||||
void Switch();
|
void add_thread(const BAN::Function<void(Args...)>& func, Args... args)
|
||||||
void Start();
|
{
|
||||||
|
uintptr_t flags;
|
||||||
|
asm volatile("pushf; pop %0" : "=r"(flags));
|
||||||
|
asm volatile("cli");
|
||||||
|
MUST(m_threads.emplace_back(func, BAN::forward<Args>(args)...));
|
||||||
|
if (flags & (1 << 9))
|
||||||
|
asm volatile("sti");
|
||||||
|
}
|
||||||
|
|
||||||
static constexpr size_t ms_between_switch = 4;
|
void switch_thread();
|
||||||
|
void start();
|
||||||
|
|
||||||
|
static constexpr size_t ms_between_switch = 1;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Scheduler() {}
|
Scheduler() {}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <BAN/Memory.h>
|
#include <BAN/Function.h>
|
||||||
|
#include <BAN/NoCopyMove.h>
|
||||||
|
|
||||||
namespace Kernel
|
namespace Kernel
|
||||||
{
|
{
|
||||||
|
@ -20,28 +21,45 @@ namespace Kernel
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Thread(void(*)());
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Wpmf-conversions"
|
||||||
|
template<typename... Args>
|
||||||
|
Thread(const BAN::Function<void(Args...)>& func, Args... args)
|
||||||
|
: Thread((uintptr_t)(void*)&BAN::Function<void(Args...)>::operator(), (uintptr_t)&func, ((uintptr_t)args)...)
|
||||||
|
{
|
||||||
|
static_assert(((BAN::is_integral_v<Args> || BAN::is_pointer_v<Args>) && ...));
|
||||||
|
}
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
|
|
||||||
~Thread();
|
~Thread();
|
||||||
|
|
||||||
uint32_t id() const { return m_id; }
|
uint32_t id() const { return m_id; }
|
||||||
|
|
||||||
void set_rip(uintptr_t rip) { m_rip = rip; }
|
|
||||||
void set_rsp(uintptr_t rsp) { m_rsp = rsp; }
|
void set_rsp(uintptr_t rsp) { m_rsp = rsp; }
|
||||||
|
void set_rbp(uintptr_t rbp) { m_rbp = rbp; }
|
||||||
|
void set_rip(uintptr_t rip) { m_rip = rip; }
|
||||||
void set_state(State state) { m_state = state; }
|
void set_state(State state) { m_state = state; }
|
||||||
uintptr_t rip() const { return m_rip; }
|
|
||||||
uintptr_t rsp() const { return m_rsp; }
|
uintptr_t rsp() const { return m_rsp; }
|
||||||
|
uintptr_t rbp() const { return m_rbp; }
|
||||||
|
uintptr_t rip() const { return m_rip; }
|
||||||
State state() const { return m_state; }
|
State state() const { return m_state; }
|
||||||
|
|
||||||
|
const uintptr_t* args() const { return m_args; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static void on_exit();
|
Thread(uintptr_t rip, uintptr_t func, uintptr_t arg1 = 0, uintptr_t arg2 = 0, uintptr_t arg3 = 0);
|
||||||
|
void on_exit();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void* m_stack_base = nullptr;
|
void* m_stack_base = nullptr;
|
||||||
State m_state = State::NotStarted;
|
State m_state = State::NotStarted;
|
||||||
|
uintptr_t m_args[4] = {};
|
||||||
uintptr_t m_rip = 0;
|
uintptr_t m_rip = 0;
|
||||||
|
uintptr_t m_rbp = 0;
|
||||||
uintptr_t m_rsp = 0;
|
uintptr_t m_rsp = 0;
|
||||||
const uint32_t m_id = 0;
|
const uint32_t m_id = 0;
|
||||||
|
|
||||||
|
alignas(max_align_t) uint8_t m_function[BAN::Function<void()>::size()] { 0 };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
|
@ -7,18 +7,9 @@ namespace Kernel
|
||||||
|
|
||||||
static Scheduler* s_instance = nullptr;
|
static Scheduler* s_instance = nullptr;
|
||||||
|
|
||||||
|
extern "C" void start_thread(uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t rsp, uintptr_t rbp, uintptr_t rip);
|
||||||
|
extern "C" void continue_thread(uintptr_t rsp, uintptr_t rbp, uintptr_t rip);
|
||||||
extern "C" uintptr_t read_rip();
|
extern "C" uintptr_t read_rip();
|
||||||
asm(
|
|
||||||
".global read_rip;"
|
|
||||||
"read_rip:"
|
|
||||||
#if ARCH(x86_64)
|
|
||||||
"popq %rax;"
|
|
||||||
"jmp *%rax"
|
|
||||||
#else
|
|
||||||
"popl %eax;"
|
|
||||||
"jmp *%eax"
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
void Scheduler::initialize()
|
void Scheduler::initialize()
|
||||||
{
|
{
|
||||||
|
@ -37,13 +28,14 @@ namespace Kernel
|
||||||
return *m_current_iterator;
|
return *m_current_iterator;
|
||||||
}
|
}
|
||||||
|
|
||||||
//void Scheduler::AddThread(const BAN::Function<void()>& function)
|
|
||||||
//{
|
|
||||||
// MUST(m_threads.EmplaceBack(function));
|
|
||||||
//}
|
|
||||||
|
|
||||||
void Scheduler::switch_thread()
|
void Scheduler::switch_thread()
|
||||||
{
|
{
|
||||||
|
uintptr_t rsp, rbp, rip;
|
||||||
|
if (!(rip = read_rip()))
|
||||||
|
return;
|
||||||
|
read_rsp(rsp);
|
||||||
|
read_rbp(rbp);
|
||||||
|
|
||||||
static uint8_t cnt = 0;
|
static uint8_t cnt = 0;
|
||||||
if (cnt++ % ms_between_switch)
|
if (cnt++ % ms_between_switch)
|
||||||
return;
|
return;
|
||||||
|
@ -63,6 +55,8 @@ namespace Kernel
|
||||||
Thread& current = *m_current_iterator;
|
Thread& current = *m_current_iterator;
|
||||||
Thread& next = *next_iterator;
|
Thread& next = *next_iterator;
|
||||||
|
|
||||||
|
ASSERT(next.state() == Thread::State::Paused || next.state() == Thread::State::NotStarted);
|
||||||
|
|
||||||
if (current.state() == Thread::State::Done)
|
if (current.state() == Thread::State::Done)
|
||||||
{
|
{
|
||||||
// NOTE: this does not invalidate the next/next_iterator
|
// NOTE: this does not invalidate the next/next_iterator
|
||||||
|
@ -71,21 +65,11 @@ namespace Kernel
|
||||||
m_current_iterator = decltype(m_threads)::iterator();
|
m_current_iterator = decltype(m_threads)::iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t rip = read_rip();
|
|
||||||
if (rip == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
uintptr_t rsp;
|
|
||||||
#if ARCH(x86_64)
|
|
||||||
asm volatile("movq %%rsp, %0" : "=r"(rsp));
|
|
||||||
#else
|
|
||||||
asm volatile("movl %%esp, %0" : "=r"(rsp));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (m_current_iterator)
|
if (m_current_iterator)
|
||||||
{
|
{
|
||||||
current.set_rip(rip);
|
|
||||||
current.set_rsp(rsp);
|
current.set_rsp(rsp);
|
||||||
|
current.set_rbp(rbp);
|
||||||
|
current.set_rip(rip);
|
||||||
current.set_state(Thread::State::Paused);
|
current.set_state(Thread::State::Paused);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,33 +77,16 @@ namespace Kernel
|
||||||
|
|
||||||
if (next.state() == Thread::State::NotStarted)
|
if (next.state() == Thread::State::NotStarted)
|
||||||
{
|
{
|
||||||
InterruptController::Get().EOI(PIT_IRQ);
|
InterruptController::get().eoi(PIT_IRQ);
|
||||||
next.set_state(Thread::State::Running);
|
next.set_state(Thread::State::Running);
|
||||||
asm volatile(
|
const uintptr_t* args = next.args();
|
||||||
#if ARCH(x86_64)
|
start_thread(args[0], args[1], args[2], args[3], next.rsp(), next.rbp(), next.rip());
|
||||||
"movq %0, %%rsp;"
|
|
||||||
#else
|
|
||||||
"movl %0, %%esp;"
|
|
||||||
#endif
|
|
||||||
"sti;"
|
|
||||||
"jmp *%1;"
|
|
||||||
:: "r"(next.rsp()), "r"(next.rip())
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
else if (next.state() == Thread::State::Paused)
|
else if (next.state() == Thread::State::Paused)
|
||||||
{
|
{
|
||||||
next.set_state(Thread::State::Running);
|
next.set_state(Thread::State::Running);
|
||||||
asm volatile(
|
BOCHS_BREAK();
|
||||||
#if ARCH(x86_64)
|
continue_thread(next.rsp(), next.rbp(), next.rip());
|
||||||
"movq %0, %%rsp;"
|
|
||||||
"movq $0, %%rax;"
|
|
||||||
#else
|
|
||||||
"movl %0, %%esp;"
|
|
||||||
"movl $0, %%eax;"
|
|
||||||
#endif
|
|
||||||
"jmp *%1;"
|
|
||||||
:: "r"(next.rsp()), "r"(next.rip())
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
|
@ -134,16 +101,11 @@ namespace Kernel
|
||||||
Thread& current = *m_current_iterator;
|
Thread& current = *m_current_iterator;
|
||||||
ASSERT(current.state() == Thread::State::NotStarted);
|
ASSERT(current.state() == Thread::State::NotStarted);
|
||||||
current.set_state(Thread::State::Running);
|
current.set_state(Thread::State::Running);
|
||||||
asm volatile(
|
|
||||||
#if ARCH(x86_64)
|
const uintptr_t* args = current.args();
|
||||||
"movq %0, %%rsp;"
|
start_thread(args[0], args[1], args[2], args[3], current.rsp(), current.rbp(), current.rip());
|
||||||
#else
|
|
||||||
"movl %0, %%esp;"
|
ASSERT(false);
|
||||||
#endif
|
|
||||||
"sti;"
|
|
||||||
"jmp *%1;"
|
|
||||||
:: "r"(current.rsp()), "r"(current.rip())
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -14,23 +14,37 @@ namespace Kernel
|
||||||
|
|
||||||
static constexpr size_t thread_stack_size = PAGE_SIZE;
|
static constexpr size_t thread_stack_size = PAGE_SIZE;
|
||||||
|
|
||||||
template<typename T>
|
template<size_t size, typename T>
|
||||||
static void write_to_stack(uintptr_t& rsp, const T& value)
|
static void write_to_stack(uintptr_t& rsp, const T& value)
|
||||||
{
|
{
|
||||||
rsp -= sizeof(T);
|
rsp -= size;
|
||||||
*(T*)rsp = value;
|
memcpy((void*)rsp, (void*)&value, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread::Thread(void(*function)())
|
Thread::Thread(uintptr_t rip, uintptr_t func, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
|
||||||
: m_id(s_next_id++)
|
: m_id(s_next_id++)
|
||||||
{
|
{
|
||||||
m_stack_base = kmalloc(thread_stack_size, PAGE_SIZE);
|
m_stack_base = kmalloc(thread_stack_size, PAGE_SIZE);
|
||||||
ASSERT(m_stack_base);
|
ASSERT(m_stack_base);
|
||||||
|
|
||||||
m_rip = (uintptr_t)function;
|
m_rbp = (uintptr_t)m_stack_base + thread_stack_size;
|
||||||
m_rsp = (uintptr_t)m_stack_base + thread_stack_size;
|
m_rsp = m_rbp;
|
||||||
write_to_stack(m_rsp, this);
|
m_rip = rip;
|
||||||
write_to_stack(m_rsp, &Thread::on_exit);
|
m_args[1] = arg1;
|
||||||
|
m_args[2] = arg2;
|
||||||
|
m_args[3] = arg3;
|
||||||
|
|
||||||
|
// NOTE: in System V ABI arg0 is the pointer to 'this'
|
||||||
|
// we copy the function object to Thread object
|
||||||
|
// so we can ensure the lifetime of it. We store
|
||||||
|
// it as raw bytes so that Thread can be non-templated.
|
||||||
|
// This requires BAN::Function to be trivially copyable
|
||||||
|
// but for now it should be.
|
||||||
|
memcpy(m_function, (void*)func, sizeof(m_function));
|
||||||
|
m_args[0] = (uintptr_t)m_function;
|
||||||
|
|
||||||
|
write_to_stack<sizeof(void*)>(m_rsp, this);
|
||||||
|
write_to_stack<sizeof(void*)>(m_rsp, &Thread::on_exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread::~Thread()
|
Thread::~Thread()
|
||||||
|
@ -40,13 +54,7 @@ namespace Kernel
|
||||||
|
|
||||||
void Thread::on_exit()
|
void Thread::on_exit()
|
||||||
{
|
{
|
||||||
Thread* thread = nullptr;
|
m_state = State::Done;
|
||||||
#if ARCH(x86_64)
|
|
||||||
asm volatile("movq (%%rsp), %0" : "=r"(thread));
|
|
||||||
#else
|
|
||||||
asm volatile("movl (%%esp), %0" : "=r"(thread));
|
|
||||||
#endif
|
|
||||||
thread->m_state = State::Done;
|
|
||||||
for (;;) asm volatile("hlt");
|
for (;;) asm volatile("hlt");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue