Kernel: Fix signal generation

We need to have interrupts enabled when signal kills the process as
process does mutex locking. Also signals are now only checked when
returning to userspace in the same place where userspace segments are
loaded.
This commit is contained in:
2026-04-04 22:36:54 +03:00
parent 47d85eb281
commit a29681a524
7 changed files with 166 additions and 143 deletions

View File

@@ -1,20 +1,20 @@
.macro maybe_load_kernel_segments, n .macro intr_header, n
testb $3, \n(%esp) pushal
jz 1f; jnp 1f testb $3, \n+8*4(%esp)
jz 1f
movw $0x10, %ax movw $0x10, %ax
movw %ax, %ds movw %ax, %ds
movw %ax, %es movw %ax, %es
movw %ax, %fs movw %ax, %fs
movw $0x28, %ax movw $0x28, %ax
movw %ax, %gs movw %ax, %gs
1: 1: cld
.endm .endm
.macro maybe_load_userspace_segments, n .macro intr_footer, n
testb $3, \n(%esp) testb $3, \n+8*4(%esp)
jz 1f; jnp 1f jz 1f
call cpp_check_signal
movw $(0x20 | 3), %bx movw $(0x20 | 3), %bx
movw %bx, %ds movw %bx, %ds
movw %bx, %es movw %bx, %es
@@ -22,14 +22,11 @@
movw %bx, %fs movw %bx, %fs
movw $(0x38 | 3), %bx movw $(0x38 | 3), %bx
movw %bx, %gs movw %bx, %gs
1: 1: popal
.endm .endm
isr_stub: isr_stub:
pushal intr_header 12
maybe_load_kernel_segments 44
cld
movl %cr0, %eax; pushl %eax movl %cr0, %eax; pushl %eax
movl %cr2, %eax; pushl %eax movl %cr2, %eax; pushl %eax
movl %cr3, %eax; pushl %eax movl %cr3, %eax; pushl %eax
@@ -57,15 +54,12 @@ isr_stub:
movl %ebp, %esp movl %ebp, %esp
addl $24, %esp addl $24, %esp
maybe_load_userspace_segments 44 intr_footer 12
popal
addl $8, %esp addl $8, %esp
iret iret
irq_stub: irq_stub:
pushal intr_header 12
maybe_load_kernel_segments 44
cld
movl 32(%esp), %edi # interrupt number movl 32(%esp), %edi # interrupt number
@@ -78,16 +72,13 @@ irq_stub:
movl %ebp, %esp movl %ebp, %esp
maybe_load_userspace_segments 44 intr_footer 12
popal
addl $8, %esp addl $8, %esp
iret iret
.global asm_ipi_handler .global asm_ipi_handler
asm_ipi_handler: asm_ipi_handler:
pushal intr_header 4
maybe_load_kernel_segments 36
cld
movl %esp, %ebp movl %esp, %ebp
andl $-16, %esp andl $-16, %esp
@@ -96,15 +87,12 @@ asm_ipi_handler:
movl %ebp, %esp movl %ebp, %esp
maybe_load_userspace_segments 36 intr_footer 4
popal
iret iret
.global asm_timer_handler .global asm_timer_handler
asm_timer_handler: asm_timer_handler:
pushal intr_header 4
maybe_load_kernel_segments 36
cld
movl %esp, %ebp movl %esp, %ebp
andl $-16, %esp andl $-16, %esp
@@ -113,8 +101,7 @@ asm_timer_handler:
movl %ebp, %esp movl %ebp, %esp
maybe_load_userspace_segments 36 intr_footer 4
popal
iret iret
.macro isr n .macro isr n

View File

@@ -1,12 +1,4 @@
.macro swapgs_if_necessary, n .macro intr_header, n
testb $3, \n(%rsp)
jz 1f; jnp 1f
swapgs
1:
.endm
.macro pushaq, n
swapgs_if_necessary \n
pushq %rax pushq %rax
pushq %rcx pushq %rcx
pushq %rdx pushq %rdx
@@ -22,10 +14,18 @@
pushq %r13 pushq %r13
pushq %r14 pushq %r14
pushq %r15 pushq %r15
testb $3, \n+15*8(%rsp)
jz 1f
swapgs
1: cld
.endm .endm
.macro popaq, n .macro intr_footer, n
popq %r15 testb $3, \n+15*8(%rsp)
jz 1f
call cpp_check_signal
swapgs
1: popq %r15
popq %r14 popq %r14
popq %r13 popq %r13
popq %r12 popq %r12
@@ -40,12 +40,10 @@
popq %rdx popq %rdx
popq %rcx popq %rcx
popq %rax popq %rax
swapgs_if_necessary \n
.endm .endm
isr_stub: isr_stub:
pushaq 24 intr_header 24
cld
movq %cr0, %rax; pushq %rax movq %cr0, %rax; pushq %rax
movq %cr2, %rax; pushq %rax movq %cr2, %rax; pushq %rax
movq %cr3, %rax; pushq %rax movq %cr3, %rax; pushq %rax
@@ -58,33 +56,33 @@ isr_stub:
call cpp_isr_handler call cpp_isr_handler
addq $32, %rsp addq $32, %rsp
popaq 24 intr_footer 24
addq $16, %rsp addq $16, %rsp
iretq iretq
irq_stub: irq_stub:
pushaq 24 intr_header 24
cld xorq %rbp, %rbp
movq 120(%rsp), %rdi # irq number movq 120(%rsp), %rdi # irq number
call cpp_irq_handler call cpp_irq_handler
popaq 24 intr_footer 24
addq $16, %rsp addq $16, %rsp
iretq iretq
.global asm_ipi_handler .global asm_ipi_handler
asm_ipi_handler: asm_ipi_handler:
pushaq 8 intr_header 8
cld xorq %rbp, %rbp
call cpp_ipi_handler call cpp_ipi_handler
popaq 8 intr_footer 8
iretq iretq
.global asm_timer_handler .global asm_timer_handler
asm_timer_handler: asm_timer_handler:
pushaq 8 intr_header 8
cld xorq %rbp, %rbp
call cpp_timer_handler call cpp_timer_handler
popaq 8 intr_footer 8
iretq iretq
.macro isr n .macro isr n

View File

@@ -56,12 +56,10 @@ namespace Kernel
// Returns true, if thread is going to trigger signal // Returns true, if thread is going to trigger signal
bool is_interrupted_by_signal(bool skip_stop_and_cont = false) const; bool is_interrupted_by_signal(bool skip_stop_and_cont = false) const;
// Returns true if pending signal can be added to thread
bool can_add_signal_to_execute() const;
bool will_execute_signal() const;
// Returns true if handled signal had SA_RESTART // Returns true if handled signal had SA_RESTART
bool handle_signal(int signal = 0, const siginfo_t& signal_info = {}); bool handle_signal_if_interrupted();
void add_signal(int signal, const siginfo_t& info); bool handle_signal(int signal, const siginfo_t&);
void add_signal(int signal, const siginfo_t&);
void set_suspend_signal_mask(uint64_t sigmask); void set_suspend_signal_mask(uint64_t sigmask);
static bool is_stopping_signal(int signal); static bool is_stopping_signal(int signal);
@@ -153,6 +151,15 @@ namespace Kernel
bool currently_on_alternate_stack() const; bool currently_on_alternate_stack() const;
struct signal_handle_info_t
{
vaddr_t signal_handler;
vaddr_t signal_stack_top;
bool has_sa_restart;
};
signal_handle_info_t remove_signal_and_get_info(int signal);
void handle_signal_impl(int signal, const siginfo_t&, vaddr_t signal_handler, vaddr_t signal_stack_top);
private: private:
// NOTE: this is the first member to force it being last destructed // NOTE: this is the first member to force it being last destructed
// {kernel,userspace}_stack has to be destroyed before page table // {kernel,userspace}_stack has to be destroyed before page table

View File

@@ -221,7 +221,13 @@ namespace Kernel
if (result.is_error()) if (result.is_error())
{ {
dwarnln("Demand paging: {}", result.error()); dwarnln("Demand paging: {}", result.error());
// TODO: this is too strict, we should maybe do SIGBUS and
// SIGKILL only on recursive exceptions
Processor::set_interrupt_state(InterruptState::Enabled);
Thread::current().handle_signal(SIGKILL, {}); Thread::current().handle_signal(SIGKILL, {});
Processor::set_interrupt_state(InterruptState::Disabled);
return; return;
} }
@@ -321,7 +327,7 @@ namespace Kernel
Debug::s_debug_lock.unlock(InterruptState::Disabled); Debug::s_debug_lock.unlock(InterruptState::Disabled);
if (tid && Thread::current().is_userspace()) if (tid && GDT::is_user_segment(interrupt_stack->cs))
{ {
// TODO: Confirm and fix the exception to signal mappings // TODO: Confirm and fix the exception to signal mappings
@@ -366,7 +372,9 @@ namespace Kernel
break; break;
} }
Processor::set_interrupt_state(InterruptState::Enabled);
Thread::current().handle_signal(signal_info.si_signo, signal_info); Thread::current().handle_signal(signal_info.si_signo, signal_info);
Processor::set_interrupt_state(InterruptState::Disabled);
} }
else else
{ {
@@ -401,10 +409,6 @@ namespace Kernel
Process::update_alarm_queue(); Process::update_alarm_queue();
Processor::scheduler().timer_interrupt(); Processor::scheduler().timer_interrupt();
auto& current_thread = Thread::current();
if (current_thread.can_add_signal_to_execute())
current_thread.handle_signal();
} }
extern "C" void cpp_irq_handler(uint32_t irq) extern "C" void cpp_irq_handler(uint32_t irq)
@@ -428,15 +432,18 @@ namespace Kernel
else else
dprintln("no handler for irq 0x{2H}", irq); dprintln("no handler for irq 0x{2H}", irq);
auto& current_thread = Thread::current();
if (current_thread.can_add_signal_to_execute())
current_thread.handle_signal();
Processor::scheduler().reschedule_if_idle(); Processor::scheduler().reschedule_if_idle();
ASSERT(Thread::current().state() != Thread::State::Terminated); ASSERT(Thread::current().state() != Thread::State::Terminated);
} }
extern "C" void cpp_check_signal()
{
Processor::set_interrupt_state(InterruptState::Enabled);
Thread::current().handle_signal_if_interrupted();
Processor::set_interrupt_state(InterruptState::Disabled);
}
void IDT::register_interrupt_handler(uint8_t index, void (*handler)(), uint8_t ist) void IDT::register_interrupt_handler(uint8_t index, void (*handler)(), uint8_t ist)
{ {
auto& desc = m_idt[index]; auto& desc = m_idt[index];
@@ -476,7 +483,6 @@ namespace Kernel
IRQ_LIST_X IRQ_LIST_X
#undef X #undef X
extern "C" void asm_yield_handler();
extern "C" void asm_ipi_handler(); extern "C" void asm_ipi_handler();
extern "C" void asm_timer_handler(); extern "C" void asm_timer_handler();
#if ARCH(i686) #if ARCH(i686)

View File

@@ -303,6 +303,8 @@ namespace Kernel
void Process::exit(int status, int signal) void Process::exit(int status, int signal)
{ {
ASSERT(Processor::get_interrupt_state() == InterruptState::Enabled);
bool expected = false; bool expected = false;
if (!m_is_exiting.compare_exchange(expected, true)) if (!m_is_exiting.compare_exchange(expected, true))
{ {
@@ -310,9 +312,6 @@ namespace Kernel
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
const auto state = Processor::get_interrupt_state();
Processor::set_interrupt_state(InterruptState::Enabled);
if (m_parent) if (m_parent)
{ {
Process* parent_process = nullptr; Process* parent_process = nullptr;
@@ -369,8 +368,6 @@ namespace Kernel
while (m_threads.size() > 1) while (m_threads.size() > 1)
Processor::yield(); Processor::yield();
Processor::set_interrupt_state(state);
Thread::current().on_exit(); Thread::current().on_exit();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
@@ -2951,7 +2948,7 @@ namespace Kernel
for (;;) for (;;)
{ {
while (Thread::current().will_exit_because_of_signal()) while (Thread::current().will_exit_because_of_signal())
Thread::current().handle_signal(); Thread::current().handle_signal_if_interrupted();
SpinLockGuard guard(m_signal_lock); SpinLockGuard guard(m_signal_lock);
if (!m_stopped) if (!m_stopped)

View File

@@ -94,13 +94,11 @@ namespace Kernel
Process::current().wait_while_stopped(); Process::current().wait_while_stopped();
Processor::set_interrupt_state(InterruptState::Disabled); if (Thread::current().handle_signal_if_interrupted())
if (ret.is_error() && ret.error().get_error_code() == EINTR && is_restartable_syscall(syscall))
ret = BAN::Error::from_errno(ERESTART);
auto& current_thread = Thread::current(); Processor::set_interrupt_state(InterruptState::Disabled);
if (current_thread.can_add_signal_to_execute())
if (current_thread.handle_signal())
if (ret.is_error() && ret.error().get_error_code() == EINTR && is_restartable_syscall(syscall))
ret = BAN::Error::from_errno(ERESTART);
ASSERT(Kernel::Thread::current().state() == Kernel::Thread::State::Executing); ASSERT(Kernel::Thread::current().state() == Kernel::Thread::State::Executing);

View File

@@ -572,19 +572,6 @@ namespace Kernel
return false; return false;
} }
bool Thread::can_add_signal_to_execute() const
{
return is_interrupted_by_signal() && m_mutex_count == 0;
}
bool Thread::will_execute_signal() const
{
if (!is_userspace() || m_state != State::Executing)
return false;
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
return interrupt_stack.ip == (uintptr_t)signal_trampoline;
}
bool Thread::will_exit_because_of_signal() const bool Thread::will_exit_because_of_signal() const
{ {
const uint64_t full_pending_mask = m_signal_pending_mask | process().signal_pending_mask(); const uint64_t full_pending_mask = m_signal_pending_mask | process().signal_pending_mask();
@@ -596,21 +583,17 @@ namespace Kernel
return false; return false;
} }
bool Thread::handle_signal(int signal, const siginfo_t& _signal_info) bool Thread::handle_signal_if_interrupted()
{ {
ASSERT(&Thread::current() == this); int signal;
ASSERT(is_userspace()); siginfo_t signal_info;
signal_handle_info_t handle_info;
auto state = m_signal_lock.lock();
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
ASSERT(GDT::is_user_segment(interrupt_stack.cs));
auto signal_info = _signal_info;
if (signal == 0)
{ {
const uint64_t process_signal_pending_mask = process().signal_pending_mask(); SpinLockGuard _1(m_signal_lock);
SpinLockGuard _2(m_process->m_signal_lock);
const uint64_t process_signal_pending_mask = m_process->m_signal_pending_mask;
const uint64_t full_pending_mask = m_signal_pending_mask | process_signal_pending_mask; const uint64_t full_pending_mask = m_signal_pending_mask | process_signal_pending_mask;
for (signal = _SIGMIN; signal <= _SIGMAX; signal++) for (signal = _SIGMIN; signal <= _SIGMAX; signal++)
{ {
@@ -618,42 +601,77 @@ namespace Kernel
if ((full_pending_mask & mask) && !(m_signal_block_mask & mask)) if ((full_pending_mask & mask) && !(m_signal_block_mask & mask))
break; break;
} }
ASSERT(signal <= _SIGMAX); if (signal > _SIGMAX)
return false;
if (process_signal_pending_mask & (1ull << signal)) if (process_signal_pending_mask & (1ull << signal))
signal_info = process().m_signal_infos[signal]; signal_info = m_process->m_signal_infos[signal];
else else
signal_info = m_signal_infos[signal]; signal_info = m_signal_infos[signal];
}
else handle_info = remove_signal_and_get_info(signal);
{
ASSERT(signal >= _SIGMIN);
ASSERT(signal <= _SIGMAX);
} }
vaddr_t signal_handler; handle_signal_impl(
bool has_sa_restart; signal,
signal_info,
handle_info.signal_handler,
handle_info.signal_stack_top
);
return handle_info.has_sa_restart;
}
bool Thread::handle_signal(int signal, const siginfo_t& signal_info)
{
ASSERT(&Thread::current() == this);
ASSERT(is_userspace());
ASSERT(signal >= _SIGMIN);
ASSERT(signal <= _SIGMAX);
signal_handle_info_t handle_info;
{
SpinLockGuard _1(m_signal_lock);
SpinLockGuard _2(m_process->m_signal_lock);
handle_info = remove_signal_and_get_info(signal);
}
handle_signal_impl(
signal,
signal_info,
handle_info.signal_handler,
handle_info.signal_stack_top
);
return handle_info.has_sa_restart;
}
Thread::signal_handle_info_t Thread::remove_signal_and_get_info(int signal)
{
ASSERT(m_signal_lock.current_processor_has_lock());
ASSERT(m_process->m_signal_lock.current_processor_has_lock());
ASSERT(signal >= _SIGMIN);
ASSERT(signal <= _SIGMAX);
auto& handler = m_process->m_signal_handlers[signal];
const vaddr_t signal_handler = (handler.sa_flags & SA_SIGINFO)
? reinterpret_cast<vaddr_t>(handler.sa_sigaction)
: reinterpret_cast<vaddr_t>(handler.sa_handler);
const bool has_sa_restart = !!(handler.sa_flags & SA_RESTART);
vaddr_t signal_stack_top = 0; vaddr_t signal_stack_top = 0;
if (m_signal_alt_stack.ss_flags != SS_DISABLE && (handler.sa_flags & SA_ONSTACK) && !currently_on_alternate_stack())
signal_stack_top = reinterpret_cast<vaddr_t>(m_signal_alt_stack.ss_sp) + m_signal_alt_stack.ss_size;
{ if (handler.sa_flags & SA_RESETHAND)
SpinLockGuard _(m_process->m_signal_lock); handler = { .sa_handler = SIG_DFL, .sa_mask = 0, .sa_flags = 0 };
auto& handler = m_process->m_signal_handlers[signal];
signal_handler = (handler.sa_flags & SA_SIGINFO)
? reinterpret_cast<vaddr_t>(handler.sa_sigaction)
: reinterpret_cast<vaddr_t>(handler.sa_handler);
has_sa_restart = !!(handler.sa_flags & SA_RESTART);
const auto& alt_stack = m_signal_alt_stack;
if (alt_stack.ss_flags != SS_DISABLE && (handler.sa_flags & SA_ONSTACK) && !currently_on_alternate_stack())
signal_stack_top = reinterpret_cast<vaddr_t>(alt_stack.ss_sp) + alt_stack.ss_size;
if (handler.sa_flags & SA_RESETHAND)
handler = { .sa_handler = SIG_DFL, .sa_mask = 0, .sa_flags = 0 };
}
m_signal_pending_mask &= ~(1ull << signal); m_signal_pending_mask &= ~(1ull << signal);
process().remove_pending_signal(signal); m_process->m_signal_pending_mask &= ~(1ull << signal);
if (m_signal_suspend_mask.has_value()) if (m_signal_suspend_mask.has_value())
{ {
@@ -661,7 +679,21 @@ namespace Kernel
m_signal_suspend_mask.clear(); m_signal_suspend_mask.clear();
} }
m_signal_lock.unlock(state); return {
.signal_handler = signal_handler,
.signal_stack_top = signal_stack_top,
.has_sa_restart = has_sa_restart,
};
}
void Thread::handle_signal_impl(int signal, const siginfo_t& signal_info, vaddr_t signal_handler, vaddr_t signal_stack_top)
{
ASSERT(this == &Thread::current());
ASSERT(is_userspace());
ASSERT(Processor::get_interrupt_state() == InterruptState::Enabled);
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
ASSERT(GDT::is_user_segment(interrupt_stack.cs));
if (signal_handler == (vaddr_t)SIG_IGN) if (signal_handler == (vaddr_t)SIG_IGN)
; ;
@@ -696,10 +728,8 @@ namespace Kernel
{ {
if (m_process->page_table().get_page_flags(pages[i]) & PageTable::Flags::Present) if (m_process->page_table().get_page_flags(pages[i]) & PageTable::Flags::Present)
continue; continue;
Processor::set_interrupt_state(InterruptState::Enabled);
if (auto ret = m_process->allocate_page_for_demand_paging(pages[i], true, false); ret.is_error() || !ret.value()) if (auto ret = m_process->allocate_page_for_demand_paging(pages[i], true, false); ret.is_error() || !ret.value())
m_process->exit(128 + SIGSEGV, SIGSEGV); m_process->exit(128 + SIGSEGV, SIGSEGV);
Processor::set_interrupt_state(InterruptState::Disabled);
} }
} }
@@ -711,11 +741,13 @@ namespace Kernel
write_to_stack(interrupt_stack.sp, interrupt_stack.flags); write_to_stack(interrupt_stack.sp, interrupt_stack.flags);
{ {
signal_info.si_signo = signal;
signal_info.si_addr = reinterpret_cast<void*>(interrupt_stack.ip);
interrupt_stack.sp -= sizeof(siginfo_t); interrupt_stack.sp -= sizeof(siginfo_t);
*reinterpret_cast<siginfo_t*>(interrupt_stack.sp) = signal_info;
auto& stack_signal_info = *reinterpret_cast<siginfo_t*>(interrupt_stack.sp);
stack_signal_info = signal_info;
stack_signal_info.si_signo = signal;
stack_signal_info.si_addr = reinterpret_cast<void*>(interrupt_stack.ip);
static_assert(sizeof(siginfo_t) % sizeof(uintptr_t) == 0); static_assert(sizeof(siginfo_t) % sizeof(uintptr_t) == 0);
} }
@@ -752,8 +784,6 @@ namespace Kernel
panic("Executing unhandled signal {}", signal); panic("Executing unhandled signal {}", signal);
} }
} }
return has_sa_restart;
} }
void Thread::add_signal(int signal, const siginfo_t& info) void Thread::add_signal(int signal, const siginfo_t& info)