Kernel/LibC: Implement sigaltstack
This commit is contained in:
parent
def236b7cd
commit
0dfe0b7023
|
@ -34,4 +34,6 @@ signal_trampoline:
|
|||
addl $8, %esp
|
||||
popf
|
||||
|
||||
movl (%esp), %esp
|
||||
|
||||
ret
|
||||
|
|
|
@ -59,5 +59,7 @@ signal_trampoline:
|
|||
addq $16, %rsp
|
||||
popfq
|
||||
|
||||
movq (%rsp), %rsp
|
||||
|
||||
// return over red-zone
|
||||
ret $128
|
||||
|
|
|
@ -195,6 +195,7 @@ namespace Kernel
|
|||
BAN::ErrorOr<long> sys_sigprocmask(int how, const sigset_t* set, sigset_t* oset);
|
||||
BAN::ErrorOr<long> sys_sigsuspend(const sigset_t* set);
|
||||
BAN::ErrorOr<long> sys_sigwait(const sigset_t* set, int* sig);
|
||||
BAN::ErrorOr<long> sys_sigaltstack(const stack_t* ss, stack_t* oss);
|
||||
|
||||
BAN::ErrorOr<long> sys_futex(int op, const uint32_t* addr, uint32_t val, const timespec* abstime);
|
||||
BAN::ErrorOr<long> sys_yield();
|
||||
|
|
|
@ -63,6 +63,8 @@ namespace Kernel
|
|||
void add_signal(int signal);
|
||||
void set_suspend_signal_mask(uint64_t sigmask);
|
||||
|
||||
BAN::ErrorOr<void> sigaltstack(const stack_t* ss, stack_t* oss);
|
||||
|
||||
// blocks current thread and returns either on unblock, eintr, spuriously or after timeout
|
||||
// if mutex is not nullptr, it will be atomically freed before blocking and automatically locked on wake
|
||||
BAN::ErrorOr<void> sleep_or_eintr_ns(uint64_t ns);
|
||||
|
@ -140,6 +142,8 @@ namespace Kernel
|
|||
static void on_exit_trampoline(Thread*);
|
||||
void on_exit();
|
||||
|
||||
bool currently_on_alternate_stack() const;
|
||||
|
||||
private:
|
||||
// NOTE: this is the first member to force it being last destructed
|
||||
// {kernel,userspace}_stack has to be destroyed before page table
|
||||
|
@ -164,6 +168,7 @@ namespace Kernel
|
|||
uint64_t m_signal_block_mask { 0 };
|
||||
BAN::Optional<uint64_t> m_signal_suspend_mask;
|
||||
SpinLock m_signal_lock;
|
||||
stack_t m_signal_alt_stack { nullptr, 0, SS_DISABLE };
|
||||
static_assert(_SIGMAX < 64);
|
||||
|
||||
mutable SpinLock m_cpu_time_lock;
|
||||
|
|
|
@ -2701,6 +2701,19 @@ namespace Kernel
|
|||
}
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_sigaltstack(const stack_t* ss, stack_t* oss)
|
||||
{
|
||||
LockGuard _(m_process_lock);
|
||||
if (ss != nullptr)
|
||||
TRY(validate_pointer_access(ss, sizeof(stack_t), false));
|
||||
if (oss != nullptr)
|
||||
TRY(validate_pointer_access(oss, sizeof(stack_t), true));
|
||||
|
||||
TRY(Thread::current().sigaltstack(ss, oss));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<long> Process::sys_futex(int op, const uint32_t* addr, uint32_t val, const timespec* abstime)
|
||||
{
|
||||
const vaddr_t vaddr = reinterpret_cast<vaddr_t>(addr);
|
||||
|
|
|
@ -92,14 +92,14 @@ namespace Kernel
|
|||
if (ret.is_error() && ret.error().is_kernel_error())
|
||||
Kernel::panic("Kernel error while returning to userspace {}", ret.error());
|
||||
|
||||
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||
|
||||
auto& current_thread = Thread::current();
|
||||
if (current_thread.can_add_signal_to_execute())
|
||||
if (current_thread.handle_signal())
|
||||
if (ret.is_error() && ret.error().get_error_code() == EINTR && is_restartable_syscall(syscall))
|
||||
ret = BAN::Error::from_errno(ERESTART);
|
||||
|
||||
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||
|
||||
ASSERT(Kernel::Thread::current().state() == Kernel::Thread::State::Executing);
|
||||
|
||||
if (ret.is_error())
|
||||
|
|
|
@ -507,6 +507,7 @@ namespace Kernel
|
|||
ASSERT(is_userspace());
|
||||
|
||||
auto state = m_signal_lock.lock();
|
||||
ASSERT(state == InterruptState::Disabled);
|
||||
|
||||
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(kernel_stack_top() - sizeof(InterruptStack));
|
||||
ASSERT(GDT::is_user_segment(interrupt_stack.cs));
|
||||
|
@ -530,6 +531,7 @@ namespace Kernel
|
|||
|
||||
vaddr_t signal_handler;
|
||||
bool has_sa_restart;
|
||||
vaddr_t signal_stack_top = 0;
|
||||
{
|
||||
SpinLockGuard _(m_process->m_signal_lock);
|
||||
|
||||
|
@ -542,6 +544,10 @@ namespace Kernel
|
|||
handler.sa_handler = SIG_DFL;
|
||||
|
||||
has_sa_restart = !!(handler.sa_flags & SA_RESTART);
|
||||
|
||||
const auto& alt_stack = m_signal_alt_stack;
|
||||
if (alt_stack.ss_flags != SS_DISABLE && (handler.sa_flags & SA_ONSTACK) && !currently_on_alternate_stack())
|
||||
signal_stack_top = reinterpret_cast<vaddr_t>(alt_stack.ss_sp) + alt_stack.ss_size;
|
||||
}
|
||||
|
||||
m_signal_pending_mask &= ~(1ull << signal);
|
||||
|
@ -553,6 +559,8 @@ namespace Kernel
|
|||
m_signal_suspend_mask.clear();
|
||||
}
|
||||
|
||||
m_signal_lock.unlock(state);
|
||||
|
||||
if (signal_handler == (vaddr_t)SIG_IGN)
|
||||
;
|
||||
else if (signal_handler != (vaddr_t)SIG_DFL)
|
||||
|
@ -561,7 +569,32 @@ namespace Kernel
|
|||
#if ARCH(x86_64)
|
||||
interrupt_stack.sp -= 128; // skip possible red-zone
|
||||
#endif
|
||||
|
||||
{
|
||||
// Make sure stack is allocated
|
||||
|
||||
const vaddr_t pages[3] {
|
||||
(interrupt_stack.sp - sizeof(uintptr_t)) & PAGE_ADDR_MASK,
|
||||
(signal_stack_top - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK,
|
||||
(signal_stack_top - 1 * sizeof(uintptr_t)) & PAGE_ADDR_MASK,
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < 3; i++)
|
||||
{
|
||||
if (m_process->page_table().get_page_flags(pages[i]) & PageTable::Flags::Present)
|
||||
continue;
|
||||
Processor::set_interrupt_state(InterruptState::Enabled);
|
||||
if (auto ret = m_process->allocate_page_for_demand_paging(pages[i], true, false); ret.is_error() || !ret.value())
|
||||
m_process->exit(128 + SIGSEGV, SIGSEGV);
|
||||
Processor::set_interrupt_state(InterruptState::Disabled);
|
||||
}
|
||||
}
|
||||
|
||||
write_to_stack(interrupt_stack.sp, interrupt_stack.ip);
|
||||
const vaddr_t old_stack = interrupt_stack.sp;
|
||||
if (signal_stack_top)
|
||||
interrupt_stack.sp = signal_stack_top;
|
||||
write_to_stack(interrupt_stack.sp, old_stack);
|
||||
write_to_stack(interrupt_stack.sp, interrupt_stack.flags);
|
||||
write_to_stack(interrupt_stack.sp, signal);
|
||||
write_to_stack(interrupt_stack.sp, signal_handler);
|
||||
|
@ -582,7 +615,6 @@ namespace Kernel
|
|||
case SIGTRAP:
|
||||
case SIGXCPU:
|
||||
case SIGXFSZ:
|
||||
m_signal_lock.unlock(state);
|
||||
process().exit(128 + signal, signal | 0x80);
|
||||
ASSERT_NOT_REACHED();
|
||||
|
||||
|
@ -598,7 +630,6 @@ namespace Kernel
|
|||
case SIGPOLL:
|
||||
case SIGPROF:
|
||||
case SIGVTALRM:
|
||||
m_signal_lock.unlock(state);
|
||||
process().exit(128 + signal, signal);
|
||||
ASSERT_NOT_REACHED();
|
||||
|
||||
|
@ -620,8 +651,6 @@ namespace Kernel
|
|||
}
|
||||
}
|
||||
|
||||
m_signal_lock.unlock(state);
|
||||
|
||||
return has_sa_restart;
|
||||
}
|
||||
|
||||
|
@ -657,6 +686,46 @@ namespace Kernel
|
|||
m_signal_block_mask = sigmask;
|
||||
}
|
||||
|
||||
bool Thread::currently_on_alternate_stack() const
|
||||
{
|
||||
ASSERT(m_signal_lock.current_processor_has_lock());
|
||||
|
||||
if (m_signal_alt_stack.ss_flags == SS_ONSTACK)
|
||||
return false;
|
||||
|
||||
const vaddr_t stack_bottom = reinterpret_cast<vaddr_t>(m_signal_alt_stack.ss_sp);
|
||||
const vaddr_t stack_top = stack_bottom + m_signal_alt_stack.ss_size;
|
||||
const vaddr_t sp = m_interrupt_stack.sp;
|
||||
return stack_bottom <= sp && sp <= stack_top;
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> Thread::sigaltstack(const stack_t* ss, stack_t* oss)
|
||||
{
|
||||
SpinLockGuard _(m_signal_lock);
|
||||
|
||||
const bool on_alt_stack = currently_on_alternate_stack();
|
||||
|
||||
if (oss)
|
||||
{
|
||||
*oss = m_signal_alt_stack;
|
||||
if (on_alt_stack)
|
||||
oss->ss_flags = SS_ONSTACK;
|
||||
}
|
||||
|
||||
if (ss)
|
||||
{
|
||||
if (on_alt_stack)
|
||||
return BAN::Error::from_errno(EPERM);
|
||||
if (ss->ss_flags && ss->ss_flags != SS_DISABLE)
|
||||
return BAN::Error::from_errno(EINVAL);
|
||||
if (ss->ss_size < MINSIGSTKSZ)
|
||||
return BAN::Error::from_errno(ENOMEM);
|
||||
m_signal_alt_stack = *ss;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
BAN::ErrorOr<void> Thread::sleep_or_eintr_ns(uint64_t ns)
|
||||
{
|
||||
if (is_interrupted_by_signal())
|
||||
|
|
|
@ -132,8 +132,9 @@ struct sigevent
|
|||
#define SA_SIGINFO 0x010
|
||||
#define SA_NOCLDWAIT 0x020
|
||||
#define SA_NODEFER 0x040
|
||||
#define SS_ONSTACK 0x080
|
||||
#define SS_DISABLE 0x100
|
||||
|
||||
#define SS_ONSTACK 1
|
||||
#define SS_DISABLE 2
|
||||
|
||||
#define MINSIGSTKSZ 4096
|
||||
#define SIGSTKSZ 4096
|
||||
|
|
|
@ -92,6 +92,7 @@ __BEGIN_DECLS
|
|||
O(SYS_SIGPROCMASK, sigprocmask) \
|
||||
O(SYS_SIGSUSPEND, sigsuspend) \
|
||||
O(SYS_SIGWAIT, sigwait) \
|
||||
O(SYS_SIGALTSTACK, sigaltstack) \
|
||||
O(SYS_SETITIMER, setitimer) \
|
||||
O(SYS_POSIX_OPENPT, posix_openpt) \
|
||||
O(SYS_PTSNAME, ptsname) \
|
||||
|
|
|
@ -176,3 +176,10 @@ int sigwait(const sigset_t* __restrict set, int* __restrict sig)
|
|||
return errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sigaltstack(const stack_t* __restrict ss, stack_t* __restrict oss)
|
||||
{
|
||||
if (syscall(SYS_SIGALTSTACK, ss, oss) == -1)
|
||||
return errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue