* Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
* Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
* Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
*
* Distributed under the terms of the MIT License.
*/
#include <ksignal.h>
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include <OS.h>
#include <KernelExport.h>
#include <cpu.h>
#include <core_dump.h>
#include <debug.h>
#include <kernel.h>
#include <kscheduler.h>
#include <sem.h>
#include <syscall_restart.h>
#include <syscall_utils.h>
#include <team.h>
#include <thread.h>
#include <tracing.h>
#include <user_debugger.h>
#include <user_thread.h>
#include <util/AutoLock.h>
#include <util/ThreadAutoLock.h>
#ifdef TRACE_SIGNAL
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define BLOCKABLE_SIGNALS \
(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP) \
| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD) \
| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
#define STOP_SIGNALS \
(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
#define CONTINUE_SIGNALS \
(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
#define DEFAULT_IGNORE_SIGNALS \
(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
| SIGNAL_TO_MASK(SIGCONT) \
| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
#define NON_DEFERRABLE_SIGNALS \
(KILL_SIGNALS \
| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD) \
| SIGNAL_TO_MASK(SIGILL) \
| SIGNAL_TO_MASK(SIGFPE) \
| SIGNAL_TO_MASK(SIGSEGV))
static const struct {
const char* name;
int32 priority;
} kSignalInfos[__MAX_SIGNO + 1] = {
{"NONE", -1},
{"HUP", 0},
{"INT", 0},
{"QUIT", 0},
{"ILL", 0},
{"CHLD", 0},
{"ABRT", 0},
{"PIPE", 0},
{"FPE", 0},
{"KILL", 100},
{"STOP", 0},
{"SEGV", 0},
{"CONT", 0},
{"TSTP", 0},
{"ALRM", 0},
{"TERM", 0},
{"TTIN", 0},
{"TTOU", 0},
{"USR1", 0},
{"USR2", 0},
{"WINCH", 0},
{"KILLTHR", 100},
{"TRAP", 0},
{"POLL", 0},
{"PROF", 0},
{"SYS", 0},
{"URG", 0},
{"VTALRM", 0},
{"XCPU", 0},
{"XFSZ", 0},
{"SIGBUS", 0},
{"SIGRESERVED1", 0},
{"SIGRESERVED2", 0},
{"SIGRT1", 8},
{"SIGRT2", 7},
{"SIGRT3", 6},
{"SIGRT4", 5},
{"SIGRT5", 4},
{"SIGRT6", 3},
{"SIGRT7", 2},
{"SIGRT8", 1},
{"invalid 41", 0},
{"invalid 42", 0},
{"invalid 43", 0},
{"invalid 44", 0},
{"invalid 45", 0},
{"invalid 46", 0},
{"invalid 47", 0},
{"invalid 48", 0},
{"invalid 49", 0},
{"invalid 50", 0},
{"invalid 51", 0},
{"invalid 52", 0},
{"invalid 53", 0},
{"invalid 54", 0},
{"invalid 55", 0},
{"invalid 56", 0},
{"invalid 57", 0},
{"invalid 58", 0},
{"invalid 59", 0},
{"invalid 60", 0},
{"invalid 61", 0},
{"invalid 62", 0},
{"CANCEL_THREAD", 0},
{"CONTINUE_THREAD", 0}
};
static inline const char*
signal_name(uint32 number)
{
return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
}
struct SignalHandledCaller {
SignalHandledCaller(Signal* signal)
:
fSignal(signal)
{
}
~SignalHandledCaller()
{
Done();
}
void Done()
{
if (fSignal != NULL) {
fSignal->Handled();
fSignal = NULL;
}
}
private:
Signal* fSignal;
};
The limit defines the maximum the counter may reach. Since the
BReferenceable's reference count is used, it is assumed that the owning
team holds a reference and the reference count is one greater than the
counter value.
\param limit The maximum allowed value the counter may have. When
\code < 0 \endcode, the value is not limited.
*/
QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
:
fLimit(limit)
{
}
\return \c true, if incrementing the counter succeeded, \c false otherwise.
*/
bool
QueuedSignalsCounter::Increment()
{
if (fLimit < 0) {
AcquireReference();
return true;
}
if (atomic_add(&fReferenceCount, 1) > fLimit) {
ReleaseReference();
return false;
}
return true;
}
Signal::Signal()
:
fCounter(NULL),
fPending(false)
{
}
Signal::Signal(const Signal& other)
:
fCounter(NULL),
fNumber(other.fNumber),
fSignalCode(other.fSignalCode),
fErrorCode(other.fErrorCode),
fSendingProcess(other.fSendingProcess),
fSendingUser(other.fSendingUser),
fStatus(other.fStatus),
fPollBand(other.fPollBand),
fAddress(other.fAddress),
fUserValue(other.fUserValue),
fPending(false)
{
}
Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
pid_t sendingProcess)
:
fCounter(NULL),
fNumber(number),
fSignalCode(signalCode),
fErrorCode(errorCode),
fSendingProcess(sendingProcess),
fSendingUser(getuid()),
fStatus(0),
fPollBand(0),
fAddress(NULL),
fPending(false)
{
fUserValue.sival_ptr = NULL;
}
Signal::~Signal()
{
if (fCounter != NULL)
fCounter->ReleaseReference();
}
Also enforces the current team's signal queuing limit.
\param signal The signal to clone.
\param queuingRequired If \c true, the function will return an error code
when creating the clone fails for any reason. Otherwise, the function
will set \a _signalToQueue to \c NULL, but still return \c B_OK.
\param _signalToQueue Return parameter. Set to the clone of the signal.
\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
\c B_OK, when creating the signal clone succeeds, another error code,
when it fails.
*/
status_t
Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
Signal*& _signalToQueue)
{
_signalToQueue = NULL;
if (!are_interrupts_enabled())
return queuingRequired ? B_BAD_VALUE : B_OK;
QueuedSignalsCounter* counter
= thread_get_current_thread()->team->QueuedSignalsCounter();
if (!counter->Increment())
return queuingRequired ? EAGAIN : B_OK;
Signal* signalToQueue = new(std::nothrow) Signal(signal);
if (signalToQueue == NULL) {
counter->Decrement();
return queuingRequired ? B_NO_MEMORY : B_OK;
}
signalToQueue->fCounter = counter;
_signalToQueue = signalToQueue;
return B_OK;
}
void
Signal::SetTo(uint32 number)
{
Team* team = thread_get_current_thread()->team;
fNumber = number;
fSignalCode = SI_USER;
fErrorCode = 0;
fSendingProcess = team->id;
fSendingUser = team->effective_uid;
fStatus = 0;
fPollBand = 0;
fAddress = NULL;
fUserValue.sival_ptr = NULL;
}
int32
Signal::Priority() const
{
return kSignalInfos[fNumber].priority;
}
void
Signal::Handled()
{
ReleaseReference();
}
void
Signal::LastReferenceReleased()
{
if (are_interrupts_enabled())
delete this;
else
deferred_delete(this);
}
PendingSignals::PendingSignals()
:
fQueuedSignalsMask(0),
fUnqueuedSignalsMask(0)
{
}
PendingSignals::~PendingSignals()
{
Clear();
}
highest priority.
\param nonBlocked The mask with the non-blocked signals.
\return The priority of the highest priority non-blocked signal, or, if all
signals are blocked, \c -1.
*/
int32
PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
{
Signal* queuedSignal;
int32 unqueuedSignal;
return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
}
void
PendingSignals::Clear()
{
while (Signal* signal = fQueuedSignals.RemoveHead())
signal->Handled();
fQueuedSignalsMask = 0;
fUnqueuedSignalsMask = 0;
}
Takes over the reference to the signal from the caller.
*/
void
PendingSignals::AddSignal(Signal* signal)
{
int32 priority = signal->Priority();
Signal* otherSignal = NULL;
for (SignalList::Iterator it = fQueuedSignals.GetIterator();
(otherSignal = it.Next()) != NULL;) {
if (priority > otherSignal->Priority())
break;
}
fQueuedSignals.InsertBefore(otherSignal, signal);
signal->SetPending(true);
fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
}
void
PendingSignals::RemoveSignal(Signal* signal)
{
signal->SetPending(false);
fQueuedSignals.Remove(signal);
_UpdateQueuedSignalMask();
}
void
PendingSignals::RemoveSignals(sigset_t mask)
{
if ((fQueuedSignalsMask & mask) != 0) {
for (SignalList::Iterator it = fQueuedSignals.GetIterator();
Signal* signal = it.Next();) {
if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
it.Remove();
signal->SetPending(false);
signal->Handled();
}
}
fQueuedSignalsMask &= ~mask;
}
fUnqueuedSignalsMask &= ~mask;
}
The caller gets a reference to the returned signal, if any.
\param nonBlocked The mask of non-blocked signals.
\param buffer If the signal is not queued this buffer is returned. In this
case the method acquires a reference to \a buffer, so that the caller
gets a reference also in this case.
\return The removed signal or \c NULL, if all signals are blocked.
*/
Signal*
PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
{
Signal* queuedSignal;
int32 unqueuedSignal;
if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
return NULL;
if (queuedSignal != NULL) {
fQueuedSignals.Remove(queuedSignal);
queuedSignal->SetPending(false);
_UpdateQueuedSignalMask();
return queuedSignal;
}
fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
buffer.SetTo(unqueuedSignal);
buffer.AcquireReference();
return &buffer;
}
highest priority.
\param blocked The mask with the non-blocked signals.
\param _queuedSignal If the found signal is a queued signal, the variable
will be set to that signal, otherwise to \c NULL.
\param _unqueuedSignal If the found signal is an unqueued signal, the
variable is set to that signal's number, otherwise to \c -1.
\return The priority of the highest priority non-blocked signal, or, if all
signals are blocked, \c -1.
*/
int32
PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
Signal*& _queuedSignal, int32& _unqueuedSignal) const
{
Signal* queuedSignal = NULL;
int32 queuedPriority = -1;
if ((fQueuedSignalsMask & nonBlocked) != 0) {
for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
Signal* signal = it.Next();) {
if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
queuedPriority = signal->Priority();
queuedSignal = signal;
break;
}
}
}
int32 unqueuedSignal = -1;
int32 unqueuedPriority = -1;
sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
if (unqueuedSignals != 0) {
int32 signal = 1;
while (unqueuedSignals != 0) {
sigset_t mask = SIGNAL_TO_MASK(signal);
if ((unqueuedSignals & mask) != 0) {
int32 priority = kSignalInfos[signal].priority;
if (priority > unqueuedPriority) {
unqueuedSignal = signal;
unqueuedPriority = priority;
}
unqueuedSignals &= ~mask;
}
signal++;
}
}
if (queuedPriority >= unqueuedPriority) {
_queuedSignal = queuedSignal;
_unqueuedSignal = -1;
return queuedPriority;
}
_queuedSignal = NULL;
_unqueuedSignal = unqueuedSignal;
return unqueuedPriority;
}
void
PendingSignals::_UpdateQueuedSignalMask()
{
sigset_t mask = 0;
for (SignalList::Iterator it = fQueuedSignals.GetIterator();
Signal* signal = it.Next();) {
mask |= SIGNAL_TO_MASK(signal->Number());
}
fQueuedSignalsMask = mask;
}
#if SIGNAL_TRACING
namespace SignalTracing {
class HandleSignal : public AbstractTraceEntry {
public:
HandleSignal(uint32 signal)
:
fSignal(signal)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("signal handle: %" B_PRIu32 " (%s)" , fSignal,
signal_name(fSignal));
}
private:
uint32 fSignal;
};
class ExecuteSignalHandler : public AbstractTraceEntry {
public:
ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
:
fSignal(signal),
fHandler((void*)handler->sa_handler)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
"handler: %p", fSignal, signal_name(fSignal), fHandler);
}
private:
uint32 fSignal;
void* fHandler;
};
class SendSignal : public AbstractTraceEntry {
public:
SendSignal(pid_t target, uint32 signal, uint32 flags)
:
fTarget(target),
fSignal(signal),
fFlags(flags)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
signal_name(fSignal), fFlags);
}
private:
pid_t fTarget;
uint32 fSignal;
uint32 fFlags;
};
class SigAction : public AbstractTraceEntry {
public:
SigAction(uint32 signal, const struct sigaction* act)
:
fSignal(signal),
fAction(*act)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("signal action: signal: %" B_PRIu32 " (%s), "
"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
fSignal, signal_name(fSignal), fAction.sa_handler,
fAction.sa_flags, (uint64)fAction.sa_mask);
}
private:
uint32 fSignal;
struct sigaction fAction;
};
class SigProcMask : public AbstractTraceEntry {
public:
SigProcMask(int how, sigset_t mask)
:
fHow(how),
fMask(mask),
fOldMask(thread_get_current_thread()->sig_block_mask)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
const char* how = "invalid";
switch (fHow) {
case SIG_BLOCK:
how = "block";
break;
case SIG_UNBLOCK:
how = "unblock";
break;
case SIG_SETMASK:
how = "set";
break;
}
out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
(long long)fMask, (long long)fOldMask);
}
private:
int fHow;
sigset_t fMask;
sigset_t fOldMask;
};
class SigSuspend : public AbstractTraceEntry {
public:
SigSuspend(sigset_t mask)
:
fMask(mask),
fOldMask(thread_get_current_thread()->sig_block_mask)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("signal suspend: %#llx, old mask: %#llx",
(long long)fMask, (long long)fOldMask);
}
private:
sigset_t fMask;
sigset_t fOldMask;
};
class SigSuspendDone : public AbstractTraceEntry {
public:
SigSuspendDone()
:
fSignals(thread_get_current_thread()->ThreadPendingSignals())
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("signal suspend done: %#" B_PRIx32, fSignals);
}
private:
uint32 fSignals;
};
}
# define T(x) new(std::nothrow) SignalTracing::x
#else
# define T(x)
#endif
pending.
The caller must hold \c team->signal_lock.
*/
static void
update_thread_signals_flag(Thread* thread)
{
sigset_t mask = ~thread->sig_block_mask;
if ((thread->AllPendingSignals() & mask) != 0)
atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
else
atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
}
are pending.
The caller must hold \c team->signal_lock.
*/
static void
update_current_thread_signals_flag()
{
update_thread_signals_flag(thread_get_current_thread());
}
what signals are pending.
The caller must hold \c signal_lock.
*/
static void
update_team_threads_signal_flag(Team* team)
{
for (Thread* thread = team->thread_list.First(); thread != NULL;
thread = team->thread_list.GetNext(thread)) {
update_thread_signals_flag(thread);
}
}
The caller must not hold any locks.
\param thread The current thread.
\param signal The signal to be handled.
\param handler The installed signal handler for the signal.
\param deadly Indicates whether the signal is deadly.
\return \c true, if the signal shall be handled, \c false, if it shall be
ignored.
*/
static bool
notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
bool deadly)
{
uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
thread->debug_info.ignore_signals_once &= ~signalMask;
return true;
}
if ((thread->debug_info.ignore_signals & signalMask) != 0)
return true;
threadDebugInfoLocker.Unlock();
siginfo_t info;
info.si_signo = signal->Number();
info.si_code = signal->SignalCode();
info.si_errno = signal->ErrorCode();
info.si_pid = signal->SendingProcess();
info.si_uid = signal->SendingUser();
info.si_addr = signal->Address();
info.si_status = signal->Status();
info.si_band = signal->PollBand();
info.si_value = signal->UserValue();
return user_debug_handle_signal(signal->Number(), &handler, &info, deadly);
}
is pending in the given thread or its team.
After dequeuing the signal the Thread::flags field of the affected threads
are updated.
The caller gets a reference to the returned signal, if any.
The caller must hold \c team->signal_lock.
\param thread The thread.
\param nonBlocked The mask of non-blocked signals.
\param buffer If the signal is not queued this buffer is returned. In this
case the method acquires a reference to \a buffer, so that the caller
gets a reference also in this case.
\return The removed signal or \c NULL, if all signals are blocked.
*/
static Signal*
dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
Signal& buffer)
{
Team* team = thread->team;
Signal* signal;
if (team->HighestPendingSignalPriority(nonBlocked)
> thread->HighestPendingSignalPriority(nonBlocked)) {
signal = team->DequeuePendingSignal(nonBlocked, buffer);
update_team_threads_signal_flag(team);
} else {
signal = thread->DequeuePendingSignal(nonBlocked, buffer);
update_thread_signals_flag(thread);
}
return signal;
}
static status_t
setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
sigset_t signalMask)
{
signal_frame_data frameData;
frameData.info.si_signo = signal->Number();
frameData.info.si_code = signal->SignalCode();
frameData.info.si_errno = signal->ErrorCode();
frameData.info.si_pid = signal->SendingProcess();
frameData.info.si_uid = signal->SendingUser();
frameData.info.si_addr = signal->Address();
frameData.info.si_status = signal->Status();
frameData.info.si_band = signal->PollBand();
frameData.info.si_value = signal->UserValue();
frameData.context.uc_link = thread->user_signal_context;
frameData.context.uc_sigmask = signalMask;
frameData.user_data = action->sa_userdata;
frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
frameData.handler = frameData.siginfo_handler
? (void*)action->sa_sigaction : (void*)action->sa_handler;
frameData.thread_flags = atomic_and(&thread->flags,
~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
memcpy(frameData.syscall_restart_parameters,
thread->syscall_restart.parameters,
sizeof(frameData.syscall_restart_parameters));
frameData.commpage_address = thread->team->commpage_address;
return arch_setup_signal_frame(thread, action, &frameData);
}
signal handler is prepared, or whatever the signal demands.
The function will not return, when a deadly signal is encountered. The
function will suspend the thread indefinitely, when a stop signal is
encountered.
Interrupts must be enabled.
\param thread The current thread.
*/
void
handle_signals(Thread* thread)
{
Team* team = thread->team;
TeamLocker teamLocker(team);
InterruptsSpinLocker locker(thread->team->signal_lock);
sigset_t nonBlockedMask = ~thread->sig_block_mask;
sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
arch_cpu_enable_user_access();
if (thread->user_thread->defer_signals > 0
&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
&& thread->sigsuspend_original_unblocked_mask == 0) {
thread->user_thread->pending_signals = signalMask;
arch_cpu_disable_user_access();
return;
}
thread->user_thread->pending_signals = 0;
arch_cpu_disable_user_access();
uint32 restartFlags = atomic_and(&thread->flags,
~THREAD_FLAGS_DONT_RESTART_SYSCALL);
bool alwaysRestart
= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
bool restart = alwaysRestart
|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
bool initialIteration = true;
while (true) {
if (initialIteration) {
initialIteration = false;
} else {
teamLocker.Lock();
locker.Lock();
signalMask = thread->AllPendingSignals() & nonBlockedMask;
}
if ((signalMask & KILL_SIGNALS) == 0) {
if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
!= 0) {
locker.Unlock();
teamLocker.Unlock();
core_dump_trap_thread();
continue;
}
if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
!= 0) {
locker.Unlock();
teamLocker.Unlock();
user_debug_stop_thread();
continue;
}
}
if ((signalMask & nonBlockedMask) == 0)
break;
Signal stackSignal;
Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
stackSignal);
ASSERT(signal != NULL);
SignalHandledCaller signalHandledCaller(signal);
locker.Unlock();
struct sigaction handler;
if (signal->Number() <= MAX_SIGNAL_NUMBER) {
handler = team->SignalActionFor(signal->Number());
} else {
handler.sa_handler = SIG_DFL;
handler.sa_flags = 0;
}
if ((handler.sa_flags & SA_ONESHOT) != 0
&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
}
T(HandleSignal(signal->Number()));
teamLocker.Unlock();
bool debugSignal = (~atomic_get(&team->debug_info.flags)
& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
== 0;
TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
kSignalInfos[signal->Number()].name));
if (handler.sa_handler == SIG_IGN) {
if (debugSignal)
notify_debugger(thread, signal, handler, false);
continue;
} else if (handler.sa_handler == SIG_DFL) {
if (signal->Number() >= SIGNAL_REALTIME_MIN
&& signal->Number() <= SIGNAL_REALTIME_MAX) {
if (debugSignal)
notify_debugger(thread, signal, handler, false);
continue;
}
bool killTeam = false;
switch (signal->Number()) {
case SIGCHLD:
case SIGWINCH:
case SIGURG:
if (debugSignal)
notify_debugger(thread, signal, handler, false);
continue;
case SIGNAL_DEBUG_THREAD:
continue;
case SIGNAL_CANCEL_THREAD:
handler.sa_handler = thread->cancel_function;
handler.sa_flags = 0;
handler.sa_mask = 0;
handler.sa_userdata = NULL;
restart = false;
break;
case SIGNAL_CONTINUE_THREAD:
restart = false;
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
continue;
case SIGCONT:
if (debugSignal
&& !notify_debugger(thread, signal, handler, false))
continue;
if (thread == team->main_thread) {
team->LockTeamAndParent(false);
team_set_job_control_state(team,
JOB_CONTROL_STATE_CONTINUED, signal);
team->UnlockTeamAndParent();
}
continue;
case SIGSTOP:
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
{
if (debugSignal
&& !notify_debugger(thread, signal, handler, false))
continue;
team->LockProcessGroup();
AutoLocker<ProcessGroup> groupLocker(team->group, true);
if (signal->Number() != SIGSTOP
&& team->group->IsOrphaned()) {
continue;
}
if (thread == team->main_thread) {
team->LockTeamAndParent(false);
team_set_job_control_state(team,
JOB_CONTROL_STATE_STOPPED, signal);
Team* parentTeam = team->parent;
struct sigaction& parentHandler
= parentTeam->SignalActionFor(SIGCHLD);
if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
team->id);
childSignal.SetStatus(signal->Number());
childSignal.SetSendingUser(signal->SendingUser());
send_signal_to_team(parentTeam, childSignal, 0);
}
team->UnlockTeamAndParent();
}
groupLocker.Unlock();
locker.Lock();
bool resume = (thread->AllPendingSignals()
& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
locker.Unlock();
if (!resume)
thread_suspend();
continue;
}
case SIGSEGV:
case SIGBUS:
case SIGFPE:
case SIGILL:
case SIGTRAP:
case SIGABRT:
case SIGKILL:
case SIGQUIT:
case SIGPOLL:
case SIGPROF:
case SIGSYS:
case SIGVTALRM:
case SIGXCPU:
case SIGXFSZ:
default:
TRACE(("Shutting down team %" B_PRId32 " due to signal %"
B_PRIu32 " received in thread %" B_PRIu32 " \n",
team->id, signal->Number(), thread->id));
killTeam = true;
case SIGKILLTHR:
if (debugSignal && signal->Number() != SIGKILL
&& signal->Number() != SIGKILLTHR
&& !notify_debugger(thread, signal, handler, true)) {
continue;
}
if (killTeam || thread == team->main_thread) {
teamLocker.Lock();
if (!team->exit.initialized) {
team->exit.reason = CLD_KILLED;
team->exit.signal = signal->Number();
team->exit.signaling_user = signal->SendingUser();
team->exit.status = 0;
team->exit.initialized = true;
}
teamLocker.Unlock();
if (thread != team->main_thread) {
Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
team->id);
send_signal_to_thread_id(team->id, childSignal, 0);
}
}
signalHandledCaller.Done();
thread_exit();
}
}
if (debugSignal && !notify_debugger(thread, signal, handler, false))
continue;
if (!restart
|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
}
T(ExecuteSignalHandler(signal->Number(), &handler));
TRACE(("### Setting up custom signal handler frame...\n"));
locker.Lock();
sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
? ~thread->sigsuspend_original_unblocked_mask
: thread->sig_block_mask;
thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
if ((handler.sa_flags & SA_NOMASK) == 0) {
thread->sig_block_mask
|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
}
update_current_thread_signals_flag();
locker.Unlock();
setup_signal_frame(thread, &handler, signal, oldBlockMask);
thread->sigsuspend_original_unblocked_mask = 0;
return;
}
if (thread->sigsuspend_original_unblocked_mask != 0) {
restart = true;
atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
} else if (!restart) {
atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
}
}
its threads).
The caller must hold the team's lock and \c signal_lock.
*/
bool
is_team_signal_blocked(Team* team, int signal)
{
sigset_t mask = SIGNAL_TO_MASK(signal);
for (Thread* thread = team->thread_list.First(); thread != NULL;
thread = team->thread_list.GetNext(thread)) {
if ((thread->sig_block_mask & mask) == 0)
return false;
}
return true;
}
stack pointer.
Fills in \a stack with either the signal stack or the thread's user stack.
\param address A stack pointer address to be used to determine the used
stack.
\param stack Filled in by the function.
*/
void
signal_get_user_stack(addr_t address, stack_t* stack)
{
Thread* thread = thread_get_current_thread();
if (thread->signal_stack_enabled && address >= thread->signal_stack_base
&& address < thread->signal_stack_base + thread->signal_stack_size) {
stack->ss_sp = (void*)thread->signal_stack_base;
stack->ss_size = thread->signal_stack_size;
} else {
stack->ss_sp = (void*)thread->user_stack_base;
stack->ss_size = thread->user_stack_size;
}
stack->ss_flags = 0;
}
The caller must hold \c team->signal_lock.
\param thread The current thread.
*/
static bool
has_signals_pending(Thread* thread)
{
return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
}
target team.
\param team The target team.
*/
static bool
has_permission_to_signal(Team* team)
{
uid_t currentUser = thread_get_current_thread()->team->effective_uid;
return currentUser == 0 || currentUser == team->effective_uid;
}
makes sure the thread gets the signal, i.e. unblocks it if needed.
The caller must hold \c team->signal_lock.
\param thread The thread the signal shall be delivered to.
\param signalNumber The number of the signal to be delivered. If \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
\param signal If non-NULL the signal to be queued (has number
\a signalNumber in this case). The caller transfers an object reference
to this function. If \c NULL an unqueued signal will be delivered to the
thread.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
Signal* signal, uint32 flags)
{
ASSERT(signal == NULL || signalNumber == signal->Number());
T(SendSignal(thread->id, signalNumber, flags));
BReference<Signal> signalReference(signal, true);
if ((flags & B_CHECK_PERMISSION) != 0) {
if (!has_permission_to_signal(thread->team))
return EPERM;
}
if (signalNumber == 0)
return B_OK;
if (thread->team == team_get_kernel_team()) {
thread_continue(thread);
return B_OK;
}
if (signal != NULL)
thread->AddPendingSignal(signal);
else
thread->AddPendingSignal(signalNumber);
signalReference.Detach();
switch (signalNumber) {
case SIGKILL:
{
Thread* mainThread = thread->team->main_thread;
if (mainThread != NULL && mainThread != thread) {
mainThread->AddPendingSignal(SIGKILLTHR);
thread->going_to_suspend = false;
SpinLocker locker(mainThread->scheduler_lock);
if (mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
else
thread_interrupt(mainThread, true);
locker.Unlock();
update_thread_signals_flag(mainThread);
}
}
case SIGKILLTHR:
{
thread->going_to_suspend = false;
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
else
thread_interrupt(thread, true);
break;
}
case SIGNAL_DEBUG_THREAD:
{
thread->going_to_suspend = false;
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
else
thread_interrupt(thread, false);
break;
}
case SIGNAL_CONTINUE_THREAD:
{
thread->going_to_suspend = false;
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
break;
}
case SIGCONT:
{
thread->going_to_suspend = false;
SpinLocker locker(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(thread);
else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
thread_interrupt(thread, false);
thread->RemovePendingSignals(STOP_SIGNALS);
break;
}
default:
if ((thread->AllPendingSignals()
& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
!= 0) {
SpinLocker locker(thread->scheduler_lock);
thread_interrupt(thread, false);
}
break;
}
update_thread_signals_flag(thread);
return B_OK;
}
\param thread The thread the signal shall be sent to.
\param signal The signal to be delivered. If the signal's number is \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
The given object will be copied. The caller retains ownership.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
{
Signal* signalToQueue = NULL;
status_t error = Signal::CreateQueuable(signal,
(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
if (error != B_OK)
return error;
InterruptsReadSpinLocker teamLocker(thread->team_lock);
SpinLocker locker(thread->team->signal_lock);
error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
flags);
if (error != B_OK)
return error;
locker.Unlock();
teamLocker.Unlock();
if ((flags & B_DO_NOT_RESCHEDULE) == 0)
scheduler_reschedule_if_necessary();
return B_OK;
}
\param threadID The ID of the thread the signal shall be sent to.
\param signal The signal to be delivered. If the signal's number is \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
The given object will be copied. The caller retains ownership.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
{
Thread* thread = Thread::Get(threadID);
if (thread == NULL)
return B_BAD_THREAD_ID;
BReference<Thread> threadReference(thread, true);
return send_signal_to_thread(thread, signal, flags);
}
The caller must hold \c signal_lock.
\param team The team the signal shall be sent to.
\param signalNumber The number of the signal to be delivered. If \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
\param signal If non-NULL the signal to be queued (has number
\a signalNumber in this case). The caller transfers an object reference
to this function. If \c NULL an unqueued signal will be delivered to the
thread.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
uint32 flags)
{
ASSERT(signal == NULL || signalNumber == signal->Number());
T(SendSignal(team->id, signalNumber, flags));
BReference<Signal> signalReference(signal, true);
if ((flags & B_CHECK_PERMISSION) != 0) {
if (!has_permission_to_signal(team))
return EPERM;
}
if (signalNumber == 0)
return B_OK;
if (team == team_get_kernel_team()) {
return EPERM;
}
if (signal != NULL)
team->AddPendingSignal(signal);
else
team->AddPendingSignal(signalNumber);
signalReference.Detach();
switch (signalNumber) {
case SIGKILL:
case SIGKILLTHR:
{
Thread* mainThread = team->main_thread;
if (mainThread != NULL) {
mainThread->AddPendingSignal(signalNumber);
mainThread->going_to_suspend = false;
SpinLocker _(mainThread->scheduler_lock);
if (mainThread->state == B_THREAD_SUSPENDED)
scheduler_enqueue_in_run_queue(mainThread);
else
thread_interrupt(mainThread, true);
}
break;
}
case SIGCONT:
for (Thread* thread = team->thread_list.First(); thread != NULL;
thread = team->thread_list.GetNext(thread)) {
thread->going_to_suspend = false;
SpinLocker _(thread->scheduler_lock);
if (thread->state == B_THREAD_SUSPENDED) {
scheduler_enqueue_in_run_queue(thread);
} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
!= 0) {
thread_interrupt(thread, false);
}
thread->RemovePendingSignals(STOP_SIGNALS);
}
team->RemovePendingSignals(STOP_SIGNALS);
break;
case SIGSTOP:
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
for (Thread* thread = team->thread_list.First(); thread != NULL;
thread = team->thread_list.GetNext(thread)) {
thread->AddPendingSignal(signalNumber);
}
if (signal != NULL) {
team->RemovePendingSignal(signal);
signalReference.SetTo(signal, true);
} else
team->RemovePendingSignal(signalNumber);
default:
for (Thread* thread = team->thread_list.First(); thread != NULL;
thread = team->thread_list.GetNext(thread)) {
sigset_t nonBlocked = ~thread->sig_block_mask
| SIGNAL_TO_MASK(SIGCHLD);
if ((thread->AllPendingSignals() & nonBlocked) != 0) {
SpinLocker _(thread->scheduler_lock);
thread_interrupt(thread, false);
}
}
break;
}
update_team_threads_signal_flag(team);
return B_OK;
}
\param team The team the signal shall be sent to.
\param signal The signal to be delivered. If the signal's number is \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
The given object will be copied. The caller retains ownership.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
{
Signal* signalToQueue = NULL;
status_t error = Signal::CreateQueuable(signal,
(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
if (error != B_OK)
return error;
InterruptsSpinLocker locker(team->signal_lock);
error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
flags);
locker.Unlock();
if ((flags & B_DO_NOT_RESCHEDULE) == 0)
scheduler_reschedule_if_necessary();
return error;
}
\param teamID The ID of the team the signal shall be sent to.
\param signal The signal to be delivered. If the signal's number is \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
The given object will be copied. The caller retains ownership.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
{
Team* team = Team::Get(teamID);
if (team == NULL)
return B_BAD_TEAM_ID;
BReference<Team> teamReference(team, true);
return send_signal_to_team(team, signal, flags);
}
The caller must hold the process group's lock. Interrupts must be enabled.
\param group The the process group the signal shall be sent to.
\param signal The signal to be delivered. If the signal's number is \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
The given object will be copied. The caller retains ownership.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
uint32 flags)
{
T(SendSignal(-group->id, signal.Number(), flags));
bool firstTeam = true;
for (Team* team = group->teams.First(); team != NULL; team = group->teams.GetNext(team)) {
status_t error = send_signal_to_team(team, signal,
flags | B_DO_NOT_RESCHEDULE);
if (firstTeam) {
if (error != B_OK)
return error;
firstTeam = false;
}
}
if ((flags & B_DO_NOT_RESCHEDULE) == 0)
scheduler_reschedule_if_necessary();
return B_OK;
}
The caller must not hold any process group, team, or thread lock. Interrupts
must be enabled.
\param groupID The ID of the process group the signal shall be sent to.
\param signal The signal to be delivered. If the signal's number is \c 0, no
actual signal will be delivered. Only delivery checks will be performed.
The given object will be copied. The caller retains ownership.
\param flags A bitwise combination of any number of the following:
- \c B_CHECK_PERMISSION: Check the caller's permission to send the
target thread the signal.
- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
woken up, the scheduler will be invoked. If set that will not be
done explicitly, but rescheduling can still happen, e.g. when the
current thread's time slice runs out.
\return \c B_OK, when the signal was delivered successfully, another error
code otherwise.
*/
status_t
send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
{
ProcessGroup* group = ProcessGroup::Get(groupID);
if (group == NULL)
return B_BAD_TEAM_ID;
BReference<ProcessGroup> groupReference(group, true);
T(SendSignal(-group->id, signal.Number(), flags));
AutoLocker<ProcessGroup> groupLocker(group);
status_t error = send_signal_to_process_group_locked(group, signal,
flags | B_DO_NOT_RESCHEDULE);
if (error != B_OK)
return error;
groupLocker.Unlock();
if ((flags & B_DO_NOT_RESCHEDULE) == 0)
scheduler_reschedule_if_necessary();
return B_OK;
}
static status_t
send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
uint32 flags)
{
if (signalNumber > MAX_SIGNAL_NUMBER)
return B_BAD_VALUE;
Thread* thread = thread_get_current_thread();
Signal signal(signalNumber,
(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
B_OK, thread->team->id);
signal.SetUserValue(userValue);
if (id > 0)
return send_signal_to_thread_id(id, signal, flags);
if (id == 0)
return send_signal_to_thread(thread, signal, flags);
if (id == -1) {
return send_signal_to_team_id(thread->team->id, signal, flags);
}
return send_signal_to_process_group(-id, signal, flags);
}
int
send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
{
union sigval userValue;
userValue.sival_ptr = NULL;
return send_signal_internal(id, signalNumber, userValue, flags);
}
int
send_signal(pid_t threadID, uint signal)
{
return send_signal_etc(threadID, signal, 0);
}
static int
sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
{
Thread* thread = thread_get_current_thread();
InterruptsSpinLocker _(thread->team->signal_lock);
sigset_t oldMask = thread->sig_block_mask;
if (set != NULL) {
T(SigProcMask(how, *set));
switch (how) {
case SIG_BLOCK:
thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
break;
case SIG_UNBLOCK:
thread->sig_block_mask &= ~*set;
break;
case SIG_SETMASK:
thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
break;
default:
return B_BAD_VALUE;
}
update_current_thread_signals_flag();
}
if (oldSet != NULL)
*oldSet = oldMask;
return B_OK;
}
int
sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
{
RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
}
*/
static status_t
sigaction_internal(int signal, const struct sigaction* act,
struct sigaction* oldAction)
{
if (signal < 1 || signal > MAX_SIGNAL_NUMBER
|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
return B_BAD_VALUE;
Team* team = thread_get_current_thread()->team;
TeamLocker teamLocker(team);
struct sigaction& teamHandler = team->SignalActionFor(signal);
if (oldAction) {
*oldAction = teamHandler;
}
if (act) {
T(SigAction(signal, act));
teamHandler = *act;
teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
}
if ((act && act->sa_handler == SIG_IGN)
|| (act && act->sa_handler == SIG_DFL
&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
InterruptsSpinLocker locker(team->signal_lock);
team->RemovePendingSignal(signal);
for (Thread* thread = team->thread_list.First(); thread != NULL;
thread = team->thread_list.GetNext(thread)) {
thread->RemovePendingSignal(signal);
}
}
return B_OK;
}
int
sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
{
RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
}
signal in \a info.
The \c flags and \c timeout combination must either define an infinite
timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
*/
static status_t
sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
bigtime_t timeout)
{
sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
flags |= B_CAN_INTERRUPT;
bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
Thread* thread = thread_get_current_thread();
InterruptsSpinLocker locker(thread->team->signal_lock);
bool timedOut = false;
status_t error = B_OK;
while (!timedOut) {
sigset_t pendingSignals = thread->AllPendingSignals();
if ((pendingSignals & KILL_SIGNALS) != 0)
return B_INTERRUPTED;
if ((pendingSignals & requestedSignals) != 0) {
Signal stackSignal;
Signal* signal = dequeue_thread_or_team_signal(thread,
requestedSignals, stackSignal);
ASSERT(signal != NULL);
SignalHandledCaller signalHandledCaller(signal);
locker.Unlock();
info->si_signo = signal->Number();
info->si_code = signal->SignalCode();
info->si_errno = signal->ErrorCode();
info->si_pid = signal->SendingProcess();
info->si_uid = signal->SendingUser();
info->si_addr = signal->Address();
info->si_status = signal->Status();
info->si_band = signal->PollBand();
info->si_value = signal->UserValue();
return B_OK;
}
if (!canWait)
return B_WOULD_BLOCK;
sigset_t blockedSignals = thread->sig_block_mask;
if ((pendingSignals & ~blockedSignals) != 0) {
return B_INTERRUPTED;
}
thread->sig_block_mask = blockedSignals & ~requestedSignals;
while (!has_signals_pending(thread)) {
thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
NULL);
locker.Unlock();
if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
error = thread_block_with_timeout(flags, timeout);
if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
error = B_WOULD_BLOCK;
timedOut = true;
locker.Lock();
break;
}
} else
thread_block();
locker.Lock();
}
thread->sig_block_mask = blockedSignals;
update_current_thread_signals_flag();
}
return error;
}
Before returning, the original signal block mask is reinstantiated.
*/
static status_t
sigsuspend_internal(const sigset_t* _mask)
{
sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
T(SigSuspend(mask));
Thread* thread = thread_get_current_thread();
InterruptsSpinLocker locker(thread->team->signal_lock);
sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
update_current_thread_signals_flag();
while (!has_signals_pending(thread)) {
thread_prepare_to_block(thread, B_CAN_INTERRUPT,
THREAD_BLOCK_TYPE_SIGNAL, NULL);
locker.Unlock();
thread_block();
locker.Lock();
}
thread->sigsuspend_original_unblocked_mask = ~oldMask;
T(SigSuspendDone());
return B_INTERRUPTED;
}
static status_t
sigpending_internal(sigset_t* set)
{
Thread* thread = thread_get_current_thread();
if (set == NULL)
return B_BAD_VALUE;
InterruptsSpinLocker locker(thread->team->signal_lock);
*set = thread->AllPendingSignals() & thread->sig_block_mask;
return B_OK;
}
\param id Specifies the ID of the target:
- \code id > 0 \endcode: If \a toThread is \c true, the target is the
thread with ID \a id, otherwise the team with the ID \a id.
- \code id == 0 \endcode: If toThread is \c true, the target is the
current thread, otherwise the current team.
- \code id == -1 \endcode: The target are all teams the current team has
permission to send signals to. Currently not implemented correctly.
- \code id < -1 \endcode: The target are is the process group with ID
\c -id.
\param signalNumber The signal number. \c 0 to just perform checks, but not
actually send any signal.
\param userUserValue A user value to be associated with the signal. Might be
ignored unless signal queuing is forced. Can be \c NULL.
\param flags A bitwise or of any number of the following:
- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
instead of falling back to unqueued signals, when queuing isn't
possible.
- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
\c thread_id rather than a \c team_id. Ignored when the \a id is
\code < 0 \endcode -- then the target is a process group.
\return \c B_OK on success, another error code otherwise.
*/
status_t
_user_send_signal(int32 id, uint32 signalNumber,
const union sigval* userUserValue, uint32 flags)
{
flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
flags |= B_CHECK_PERMISSION;
union sigval userValue;
if (userUserValue != NULL) {
if (!IS_USER_ADDRESS(userUserValue)
|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
!= B_OK) {
return B_BAD_ADDRESS;
}
} else
userValue.sival_ptr = NULL;
if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
return send_signal_internal(id, signalNumber, userValue, flags);
if (signalNumber > MAX_SIGNAL_NUMBER)
return B_BAD_VALUE;
Thread* thread = thread_get_current_thread();
Signal signal(signalNumber,
(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
B_OK, thread->team->id);
signal.SetUserValue(userValue);
return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
signal, flags);
}
status_t
_user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
{
sigset_t set, oldSet;
status_t status;
if ((userSet != NULL && (!IS_USER_ADDRESS(userSet)
|| user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK))
|| (userOldSet != NULL && (!IS_USER_ADDRESS(userOldSet)
|| user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK)))
return B_BAD_ADDRESS;
status = sigprocmask_internal(how, userSet ? &set : NULL,
userOldSet ? &oldSet : NULL);
if (status >= B_OK && userOldSet != NULL
&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
return B_BAD_ADDRESS;
return status;
}
status_t
_user_sigaction(int signal, const struct sigaction *userAction,
struct sigaction *userOldAction)
{
struct sigaction act, oact;
status_t status;
if ((userAction != NULL && (!IS_USER_ADDRESS(userAction)
|| user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK))
|| (userOldAction != NULL && (!IS_USER_ADDRESS(userOldAction)
|| user_memcpy(&oact, userOldAction, sizeof(struct sigaction))
< B_OK)))
return B_BAD_ADDRESS;
status = sigaction_internal(signal, userAction ? &act : NULL,
userOldAction ? &oact : NULL);
if (status >= B_OK && userOldAction != NULL
&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
return B_BAD_ADDRESS;
return status;
}
status_t
_user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
bigtime_t timeout)
{
sigset_t set;
if (userSet == NULL || !IS_USER_ADDRESS(userSet)
|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
return B_BAD_ADDRESS;
}
if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
return B_BAD_ADDRESS;
syscall_restart_handle_timeout_pre(flags, timeout);
flags |= B_CAN_INTERRUPT;
siginfo_t info;
status_t status = sigwait_internal(&set, &info, flags, timeout);
if (status == B_OK) {
if (userInfo != NULL)
status = user_memcpy(userInfo, &info, sizeof(info));
} else if (status == B_INTERRUPTED) {
Thread* thread = thread_get_current_thread();
atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
}
return syscall_restart_handle_timeout_post(status, timeout);
}
status_t
_user_sigsuspend(const sigset_t *userMask)
{
sigset_t mask;
if (userMask == NULL)
return B_BAD_VALUE;
if (!IS_USER_ADDRESS(userMask)
|| user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK) {
return B_BAD_ADDRESS;
}
return sigsuspend_internal(&mask);
}
status_t
_user_sigpending(sigset_t *userSet)
{
sigset_t set;
int status;
if (userSet == NULL)
return B_BAD_VALUE;
if (!IS_USER_ADDRESS(userSet))
return B_BAD_ADDRESS;
status = sigpending_internal(&set);
if (status == B_OK
&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
return B_BAD_ADDRESS;
return status;
}
status_t
_user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
{
Thread *thread = thread_get_current_thread();
struct stack_t newStack, oldStack;
bool onStack = false;
if ((newUserStack != NULL && (!IS_USER_ADDRESS(newUserStack)
|| user_memcpy(&newStack, newUserStack, sizeof(stack_t)) < B_OK))
|| (oldUserStack != NULL && (!IS_USER_ADDRESS(oldUserStack)
|| user_memcpy(&oldStack, oldUserStack, sizeof(stack_t)) < B_OK)))
return B_BAD_ADDRESS;
if (thread->signal_stack_enabled) {
onStack = arch_on_signal_stack(thread);
}
if (oldUserStack != NULL) {
oldStack.ss_sp = (void *)thread->signal_stack_base;
oldStack.ss_size = thread->signal_stack_size;
oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
| (onStack ? SS_ONSTACK : 0);
}
if (newUserStack != NULL) {
if ((newStack.ss_flags & ~SS_DISABLE) != 0)
return B_BAD_VALUE;
if ((newStack.ss_flags & SS_DISABLE) == 0) {
if (newStack.ss_size < MINSIGSTKSZ)
return B_NO_MEMORY;
if (onStack)
return B_NOT_ALLOWED;
if (!IS_USER_ADDRESS(newStack.ss_sp))
return B_BAD_VALUE;
thread->signal_stack_base = (addr_t)newStack.ss_sp;
thread->signal_stack_size = newStack.ss_size;
thread->signal_stack_enabled = true;
} else
thread->signal_stack_enabled = false;
}
if (oldUserStack != NULL
&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
return B_BAD_ADDRESS;
return B_OK;
}
handler call.
This syscall is invoked when a signal handler function returns. It
deconstructs the signal handler frame and restores the stack and register
state of the function that was interrupted by a signal. The syscall is
therefore somewhat unusual, since it does not return to the calling
function, but to someplace else. In case the signal interrupted a syscall,
it will appear as if the syscall just returned. That is also the reason, why
this syscall returns an int64, since it needs to return the value the
interrupted syscall returns, which is potentially 64 bits wide.
\param userSignalFrameData The signal frame data created for the signal
handler. Potentially some data (e.g. registers) have been modified by
the signal handler.
\return In case the signal interrupted a syscall, the return value of that
syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
the value might need to be tailored such that after a return to userland
the interrupted environment is identical to the interrupted one (unless
explicitly modified). E.g. for x86 to achieve that, the return value
must contain the eax|edx values of the interrupted environment.
*/
int64
_user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
{
syscall_64_bit_return_value();
Thread *thread = thread_get_current_thread();
signal_frame_data signalFrameData;
if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
|| user_memcpy(&signalFrameData, userSignalFrameData,
sizeof(signalFrameData)) != B_OK) {
dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
"copy signal frame data (%p) from userland. Killing thread...\n",
thread->id, userSignalFrameData);
kill_thread(thread->id);
return B_BAD_ADDRESS;
}
InterruptsSpinLocker locker(thread->team->signal_lock);
thread->sig_block_mask
= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
update_current_thread_signals_flag();
locker.Unlock();
atomic_and(&thread->flags,
~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
atomic_or(&thread->flags, signalFrameData.thread_flags
& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
memcpy(thread->syscall_restart.parameters,
signalFrameData.syscall_restart_parameters,
sizeof(thread->syscall_restart.parameters));
thread->user_signal_context = signalFrameData.context.uc_link;
if (thread->user_signal_context != NULL
&& !IS_USER_ADDRESS(thread->user_signal_context)) {
thread->user_signal_context = NULL;
}
return arch_restore_signal_frame(&signalFrameData);
}