Kernel: Fix sse state saving

This was broken when I added SMP support. This patch makes sse kind of
dumb as it is saved and restored on every interrupt, but now it at least
works properly... I'll have to look into how sse can get optimized
nicely with SMP. Simple way would be pinning each thread to a specific
processor and doing pretty much what I had before, but sse thread saved
in processor rather than static global.
This commit is contained in:
Bananymous 2024-07-16 23:15:11 +03:00
parent 7a0fb9a57f
commit 4b917390ac
4 changed files with 25 additions and 66 deletions

View File

@ -8,7 +8,8 @@
"${workspaceFolder}/userspace/libraries/*/include" "${workspaceFolder}/userspace/libraries/*/include"
], ],
"defines": [ "defines": [
"__arch=x86_64" "__arch=x86_64",
"__enable_sse=1"
], ],
"compilerPath": "${workspaceFolder}/toolchain/local/bin/x86_64-banan_os-gcc", "compilerPath": "${workspaceFolder}/toolchain/local/bin/x86_64-banan_os-gcc",
"cStandard": "c17", "cStandard": "c17",
@ -24,7 +25,8 @@
], ],
"defines": [ "defines": [
"__arch=x86_64", "__arch=x86_64",
"__is_kernel" "__is_kernel",
"__enable_sse=1"
], ],
"compilerPath": "${workspaceFolder}/toolchain/local/bin/x86_64-banan_os-gcc", "compilerPath": "${workspaceFolder}/toolchain/local/bin/x86_64-banan_os-gcc",
"cStandard": "c17", "cStandard": "c17",

View File

@ -82,7 +82,6 @@ namespace Kernel
#if __enable_sse #if __enable_sse
void save_sse(); void save_sse();
void load_sse(); void load_sse();
static Thread* sse_thread();
#endif #endif
void add_mutex() { m_mutex_count++; } void add_mutex() { m_mutex_count++; }

View File

@ -173,6 +173,10 @@ namespace Kernel
if (tid) if (tid)
{ {
#if __enable_sse
Thread::current().save_sse();
#endif
if (isr == ISR::PageFault) if (isr == ISR::PageFault)
{ {
// Check if stack is OOB // Check if stack is OOB
@ -218,31 +222,6 @@ namespace Kernel
} }
} }
} }
#if __enable_sse
else if (isr == ISR::DeviceNotAvailable)
{
#if ARCH(x86_64)
asm volatile(
"movq %cr0, %rax;"
"andq $~(1 << 3), %rax;"
"movq %rax, %cr0;"
);
#elif ARCH(i686)
asm volatile(
"movl %cr0, %eax;"
"andl $~(1 << 3), %eax;"
"movl %eax, %cr0;"
);
#endif
if (auto* current = &Thread::current(); current != Thread::sse_thread())
{
if (auto* sse = Thread::sse_thread())
sse->save_sse();
current->load_sse();
}
goto done;
}
#endif
} }
Debug::s_debug_lock.lock(); Debug::s_debug_lock.lock();
@ -334,7 +313,11 @@ namespace Kernel
ASSERT(Thread::current().state() != Thread::State::Terminated); ASSERT(Thread::current().state() != Thread::State::Terminated);
done: done:
#if __enable_sse
Thread::current().load_sse();
#else
return; return;
#endif
} }
extern "C" void cpp_yield_handler(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers) extern "C" void cpp_yield_handler(InterruptStack* interrupt_stack, InterruptRegisters* interrupt_registers)
@ -357,6 +340,10 @@ done:
asm volatile("cli; 1: hlt; jmp 1b"); asm volatile("cli; 1: hlt; jmp 1b");
} }
#if __enable_sse
Thread::current().save_sse();
#endif
if (!InterruptController::get().is_in_service(irq)) if (!InterruptController::get().is_in_service(irq))
dprintln("spurious irq 0x{2H}", irq); dprintln("spurious irq 0x{2H}", irq);
else else
@ -377,6 +364,10 @@ done:
Scheduler::get().reschedule_if_idling(); Scheduler::get().reschedule_if_idling();
ASSERT(Thread::current().state() != Thread::State::Terminated); ASSERT(Thread::current().state() != Thread::State::Terminated);
#if __enable_sse
Thread::current().load_sse();
#endif
} }
void IDT::register_interrupt_handler(uint8_t index, void (*handler)()) void IDT::register_interrupt_handler(uint8_t index, void (*handler)())

View File

@ -113,33 +113,8 @@ namespace Kernel
: m_tid(tid), m_process(process) : m_tid(tid), m_process(process)
{ {
#if __enable_sse #if __enable_sse
#if ARCH(x86_64) // initializes sse storage to valid state
uintptr_t cr0;
asm volatile(
"movq %%cr0, %%rax;"
"movq %%rax, %[cr0];"
"andq $~(1 << 3), %%rax;"
"movq %%rax, %%cr0;"
: [cr0]"=r"(cr0)
:: "rax"
);
save_sse(); save_sse();
asm volatile("movq %0, %%cr0" :: "r"(cr0));
#elif ARCH(i686)
uintptr_t cr0;
asm volatile(
"movl %%cr0, %%eax;"
"movl %%eax, %[cr0];"
"andl $~(1 << 3), %%eax;"
"movl %%eax, %%cr0;"
: [cr0]"=r"(cr0)
:: "eax"
);
save_sse();
asm volatile("movl %0, %%cr0" :: "r"(cr0));
#else
#error
#endif
#endif #endif
} }
@ -482,8 +457,6 @@ namespace Kernel
} }
#if __enable_sse #if __enable_sse
static Thread* s_sse_thread = nullptr;
void Thread::save_sse() void Thread::save_sse()
{ {
asm volatile("fxsave %0" :: "m"(m_sse_storage)); asm volatile("fxsave %0" :: "m"(m_sse_storage));
@ -492,12 +465,6 @@ namespace Kernel
void Thread::load_sse() void Thread::load_sse()
{ {
asm volatile("fxrstor %0" :: "m"(m_sse_storage)); asm volatile("fxrstor %0" :: "m"(m_sse_storage));
s_sse_thread = this;
}
Thread* Thread::sse_thread()
{
return s_sse_thread;
} }
#endif #endif