Kernel: Signals are not queued anymore

Posix doesn't require signal queing if you don't use sigqueue() which
we don't support. Process also has its own pending signal mask.
This commit is contained in:
Bananymous 2023-07-29 16:54:31 +03:00
parent 925df39107
commit acf125c853
5 changed files with 61 additions and 35 deletions

View File

@ -160,6 +160,7 @@ namespace Kernel
BAN::UniqPtr<GeneralAllocator> m_general_allocator; BAN::UniqPtr<GeneralAllocator> m_general_allocator;
vaddr_t m_signal_handlers[_SIGMAX + 1] { }; vaddr_t m_signal_handlers[_SIGMAX + 1] { };
uint64_t m_signal_pending_mask { 0 };
bool m_is_userspace { false }; bool m_is_userspace { false };
userspace_info_t m_userspace_info; userspace_info_t m_userspace_info;

View File

@ -1,6 +1,5 @@
#pragma once #pragma once
#include <BAN/CircularQueue.h>
#include <BAN/NoCopyMove.h> #include <BAN/NoCopyMove.h>
#include <BAN/RefPtr.h> #include <BAN/RefPtr.h>
#include <BAN/UniqPtr.h> #include <BAN/UniqPtr.h>
@ -50,8 +49,8 @@ namespace Kernel
bool has_signal_to_execute() const; bool has_signal_to_execute() const;
void set_signal_done(int signal); void set_signal_done(int signal);
void handle_next_signal(); void handle_signal(int signal = 0);
void queue_signal(int signal); bool add_signal(int signal);
void set_return_rsp(uintptr_t& rsp) { m_return_rsp = &rsp; } void set_return_rsp(uintptr_t& rsp) { m_return_rsp = &rsp; }
void set_return_rip(uintptr_t& rip) { m_return_rip = &rip; } void set_return_rip(uintptr_t& rip) { m_return_rip = &rip; }
@ -109,8 +108,8 @@ namespace Kernel
uintptr_t* m_return_rsp { nullptr }; uintptr_t* m_return_rsp { nullptr };
uintptr_t* m_return_rip { nullptr }; uintptr_t* m_return_rip { nullptr };
BAN::CircularQueue<int, 10> m_signal_queue; uint64_t m_signal_pending_mask { 0 };
uint64_t m_signal_mask { 0 }; uint64_t m_signal_block_mask { 0 };
int m_handling_signal { 0 }; int m_handling_signal { 0 };
static_assert(_SIGMAX < 64); static_assert(_SIGMAX < 64);

View File

@ -845,14 +845,14 @@ namespace Kernel
return Process::current().sys_raise(signal); return Process::current().sys_raise(signal);
LockGuard process_guard(s_process_lock); LockGuard process_guard(s_process_lock);
CriticalScope _;
for (auto* process : s_processes) for (auto* process : s_processes)
{ {
if (process->pid() == pid) if (process->pid() == pid)
{ {
if (signal) if (signal == 0)
process->m_threads.front()->queue_signal(signal); return 0;
CriticalScope _;
process->m_signal_pending_mask |= 1ull << signal;
return 0; return 0;
} }
} }
@ -864,14 +864,21 @@ namespace Kernel
{ {
if (signal < _SIGMIN || signal > _SIGMAX) if (signal < _SIGMIN || signal > _SIGMAX)
return BAN::Error::from_errno(EINVAL); return BAN::Error::from_errno(EINVAL);
ASSERT(m_threads.size() == 1); ASSERT(this == &Process::current());
CriticalScope _; CriticalScope _;
// FIXME: support raise with signal blocked
Thread& current = Thread::current(); Thread& current = Thread::current();
current.queue_signal(signal); if (current.add_signal(signal))
current.handle_next_signal(); {
current.handle_signal(signal);
return 0; return 0;
} }
ASSERT_NOT_REACHED();
}
BAN::ErrorOr<long> Process::sys_tcsetpgrp(int fd, pid_t pgid) BAN::ErrorOr<long> Process::sys_tcsetpgrp(int fd, pid_t pgid)
{ {
LockGuard _(m_lock); LockGuard _(m_lock);

View File

@ -248,7 +248,7 @@ namespace Kernel
start_thread(current->rsp(), current->rip()); start_thread(current->rsp(), current->rip());
case Thread::State::Executing: case Thread::State::Executing:
while (current->has_signal_to_execute() && current->state() == Thread::State::Executing) while (current->has_signal_to_execute() && current->state() == Thread::State::Executing)
current->handle_next_signal(); current->handle_signal();
// fall through // fall through
case Thread::State::Terminating: case Thread::State::Terminating:
continue_thread(current->rsp(), current->rip()); continue_thread(current->rsp(), current->rip());

View File

@ -183,7 +183,7 @@ namespace Kernel
// Setup stack for returning // Setup stack for returning
{ {
// FIXME: don't use PageTableScope // FIXME: don't use PageTableScope
PageTableScope _(m_process->page_table()); PageTableScope _(process().page_table());
write_to_stack(m_rsp, this); write_to_stack(m_rsp, this);
write_to_stack(m_rsp, &Thread::on_exit); write_to_stack(m_rsp, &Thread::on_exit);
write_to_stack(m_rsp, nullptr); write_to_stack(m_rsp, nullptr);
@ -204,12 +204,13 @@ namespace Kernel
m_rsp = stack_base() + stack_size(); m_rsp = stack_base() + stack_size();
m_rip = (uintptr_t)entry; m_rip = (uintptr_t)entry;
m_signal_mask = ~0ull; m_signal_pending_mask = 0;
m_signal_block_mask = ~0ull;
// Setup stack for returning // Setup stack for returning
{ {
// FIXME: don't use PageTableScope // FIXME: don't use PageTableScope
PageTableScope _(m_process->page_table()); PageTableScope _(process().page_table());
write_to_stack(m_rsp, this); write_to_stack(m_rsp, this);
write_to_stack(m_rsp, &Thread::on_exit); write_to_stack(m_rsp, &Thread::on_exit);
write_to_stack(m_rsp, m_process); write_to_stack(m_rsp, m_process);
@ -218,7 +219,10 @@ namespace Kernel
bool Thread::has_signal_to_execute() const bool Thread::has_signal_to_execute() const
{ {
return !m_signal_queue.empty() && !m_handling_signal; if (!m_process || m_handling_signal)
return false;
uint64_t full_pending_mask = m_signal_pending_mask | m_process->m_signal_pending_mask;
return full_pending_mask & ~m_signal_block_mask;
} }
void Thread::set_signal_done(int signal) void Thread::set_signal_done(int signal)
@ -232,26 +236,40 @@ namespace Kernel
m_handling_signal = 0; m_handling_signal = 0;
} }
void Thread::handle_next_signal() void Thread::handle_signal(int signal)
{ {
ASSERT(!interrupts_enabled()); ASSERT(!interrupts_enabled());
ASSERT(!m_signal_queue.empty());
ASSERT(&Thread::current() == this); ASSERT(&Thread::current() == this);
ASSERT(is_userspace()); ASSERT(is_userspace());
int signal = m_signal_queue.front(); if (signal == 0)
ASSERT(signal >= _SIGMIN && signal <= _SIGMAX); {
m_signal_queue.pop(); uint64_t full_pending_mask = m_signal_pending_mask | process().m_signal_pending_mask;
for (signal = _SIGMIN; signal <= _SIGMAX; signal++)
{
uint64_t mask = 1ull << signal;
if ((full_pending_mask & mask) && !(m_signal_block_mask & mask))
break;
}
ASSERT(signal <= _SIGMAX);
}
else
{
uint64_t full_pending_mask = m_signal_pending_mask | process().m_signal_pending_mask;
uint64_t mask = 1ull << signal;
ASSERT(full_pending_mask & mask);
ASSERT(!(m_signal_block_mask & mask));
}
uintptr_t& return_rsp = this->return_rsp(); uintptr_t& return_rsp = this->return_rsp();
uintptr_t& return_rip = this->return_rip(); uintptr_t& return_rip = this->return_rip();
vaddr_t signal_handler = process().m_signal_handlers[signal]; vaddr_t signal_handler = process().m_signal_handlers[signal];
// Skip masked and ignored signals m_signal_pending_mask &= ~(1ull << signal);
if (m_signal_mask & (1ull << signal)) process().m_signal_pending_mask &= ~(1ull << signal);
;
else if (signal_handler == (vaddr_t)SIG_IGN) if (signal_handler == (vaddr_t)SIG_IGN)
; ;
else if (signal_handler != (vaddr_t)SIG_DFL) else if (signal_handler != (vaddr_t)SIG_DFL)
{ {
@ -321,17 +339,18 @@ namespace Kernel
} }
} }
void Thread::queue_signal(int signal) bool Thread::add_signal(int signal)
{ {
ASSERT(!interrupts_enabled()); ASSERT(!interrupts_enabled());
if (m_signal_queue.full()) uint64_t mask = 1ull << signal;
if (!(m_signal_block_mask & mask))
{ {
dwarnln("Signal queue full"); m_signal_pending_mask |= mask;
return;
}
m_signal_queue.push(signal);
if (this != &Thread::current()) if (this != &Thread::current())
Scheduler::get().unblock_thread(tid()); Scheduler::get().unblock_thread(tid());
return true;
}
return false;
} }
void Thread::validate_stack() const void Thread::validate_stack() const