Kernel: Rewrite whole scheduler
Current context saving was very hacky and dependant on compiler behaviour that was not consistent. Now we always use iret for context saving. This makes everything more clean.
This commit is contained in:
@@ -4,36 +4,19 @@ read_ip:
|
||||
popq %rax
|
||||
jmp *%rax
|
||||
|
||||
exit_thread_trampoline:
|
||||
# void start_thread()
|
||||
.global start_kernel_thread
|
||||
start_kernel_thread:
|
||||
# STACK LAYOUT
|
||||
# on_exit arg
|
||||
# on_exit func
|
||||
# entry arg
|
||||
# entry func
|
||||
|
||||
movq 8(%rsp), %rdi
|
||||
ret
|
||||
movq 0(%rsp), %rsi
|
||||
call *%rsi
|
||||
|
||||
# void start_thread(uint64_t sp, uint64_t ip)
|
||||
.global start_thread
|
||||
start_thread:
|
||||
movq %rdi, %rsp
|
||||
popq %rdi
|
||||
movq $0, %rbp
|
||||
pushq $exit_thread_trampoline
|
||||
sti
|
||||
jmp *%rsi
|
||||
|
||||
# void continue_thread(uint64_t sp, uint64_t ip)
|
||||
.global continue_thread
|
||||
continue_thread:
|
||||
movq %rdi, %rsp
|
||||
movq $0, %rax
|
||||
jmp *%rsi
|
||||
|
||||
# void thread_userspace_trampoline(uint64_t sp, uint64_t ip, int argc, char** argv, char** envp)
|
||||
.global thread_userspace_trampoline
|
||||
thread_userspace_trampoline:
|
||||
pushq $0x23
|
||||
pushq %rdi
|
||||
pushfq
|
||||
pushq $0x1B
|
||||
pushq %rsi
|
||||
movq %rdx, %rdi
|
||||
movq %rcx, %rsi
|
||||
movq %r8, %rdx
|
||||
iretq
|
||||
movq 24(%rsp), %rdi
|
||||
movq 16(%rsp), %rsi
|
||||
call *%rsi
|
||||
|
||||
Reference in New Issue
Block a user