aboutsummaryrefslogtreecommitdiff
path: root/linux-user/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user/signal.c')
-rw-r--r--linux-user/signal.c915
1 files changed, 641 insertions, 274 deletions
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 602b631b92..05dc4afb52 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -18,81 +18,72 @@
*/
#include "qemu/osdep.h"
#include "qemu/bitops.h"
+#include "gdbstub/user.h"
+#include "hw/core/tcg-cpu-ops.h"
+
#include <sys/ucontext.h>
#include <sys/resource.h>
#include "qemu.h"
-#include "qemu-common.h"
+#include "user-internals.h"
+#include "strace.h"
+#include "loader.h"
#include "trace.h"
#include "signal-common.h"
+#include "host-signal.h"
+#include "user/safe-syscall.h"
+#include "tcg/tcg.h"
-struct target_sigaltstack target_sigaltstack_used = {
- .ss_sp = 0,
- .ss_size = 0,
- .ss_flags = TARGET_SS_DISABLE,
-};
+/* target_siginfo_t must fit in gdbstub's siginfo save area. */
+QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
static struct target_sigaction sigact_table[TARGET_NSIG];
static void host_signal_handler(int host_signum, siginfo_t *info,
void *puc);
-static uint8_t host_to_target_signal_table[_NSIG] = {
- [SIGHUP] = TARGET_SIGHUP,
- [SIGINT] = TARGET_SIGINT,
- [SIGQUIT] = TARGET_SIGQUIT,
- [SIGILL] = TARGET_SIGILL,
- [SIGTRAP] = TARGET_SIGTRAP,
- [SIGABRT] = TARGET_SIGABRT,
-/* [SIGIOT] = TARGET_SIGIOT,*/
- [SIGBUS] = TARGET_SIGBUS,
- [SIGFPE] = TARGET_SIGFPE,
- [SIGKILL] = TARGET_SIGKILL,
- [SIGUSR1] = TARGET_SIGUSR1,
- [SIGSEGV] = TARGET_SIGSEGV,
- [SIGUSR2] = TARGET_SIGUSR2,
- [SIGPIPE] = TARGET_SIGPIPE,
- [SIGALRM] = TARGET_SIGALRM,
- [SIGTERM] = TARGET_SIGTERM,
-#ifdef SIGSTKFLT
- [SIGSTKFLT] = TARGET_SIGSTKFLT,
+/* Fallback addresses into sigtramp page. */
+abi_ulong default_sigreturn;
+abi_ulong default_rt_sigreturn;
+
+/*
+ * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
+ * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
+ * Signal number 0 is reserved for use as kill(pid, 0), to test whether
+ * a process exists without sending it a signal.
+ */
+#ifdef __SIGRTMAX
+QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
#endif
- [SIGCHLD] = TARGET_SIGCHLD,
- [SIGCONT] = TARGET_SIGCONT,
- [SIGSTOP] = TARGET_SIGSTOP,
- [SIGTSTP] = TARGET_SIGTSTP,
- [SIGTTIN] = TARGET_SIGTTIN,
- [SIGTTOU] = TARGET_SIGTTOU,
- [SIGURG] = TARGET_SIGURG,
- [SIGXCPU] = TARGET_SIGXCPU,
- [SIGXFSZ] = TARGET_SIGXFSZ,
- [SIGVTALRM] = TARGET_SIGVTALRM,
- [SIGPROF] = TARGET_SIGPROF,
- [SIGWINCH] = TARGET_SIGWINCH,
- [SIGIO] = TARGET_SIGIO,
- [SIGPWR] = TARGET_SIGPWR,
- [SIGSYS] = TARGET_SIGSYS,
- /* next signals stay the same */
- /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
- host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
- To fix this properly we need to do manual signal delivery multiplexed
- over a single host signal. */
- [__SIGRTMIN] = __SIGRTMAX,
- [__SIGRTMAX] = __SIGRTMIN,
+static uint8_t host_to_target_signal_table[_NSIG] = {
+#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
+ MAKE_SIGNAL_LIST
+#undef MAKE_SIG_ENTRY
};
-static uint8_t target_to_host_signal_table[_NSIG];
+static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
+
+/* valid sig is between 1 and _NSIG - 1 */
int host_to_target_signal(int sig)
{
- if (sig < 0 || sig >= _NSIG)
+ if (sig < 1) {
return sig;
+ }
+ if (sig >= _NSIG) {
+ return TARGET_NSIG + 1;
+ }
return host_to_target_signal_table[sig];
}
+/* valid sig is between 1 and TARGET_NSIG */
int target_to_host_signal(int sig)
{
- if (sig < 0 || sig >= _NSIG)
+ if (sig < 1) {
return sig;
+ }
+ if (sig > TARGET_NSIG) {
+ return _NSIG;
+ }
return target_to_host_signal_table[sig];
}
@@ -113,11 +104,15 @@ static inline int target_sigismember(const target_sigset_t *set, int signum)
void host_to_target_sigset_internal(target_sigset_t *d,
const sigset_t *s)
{
- int i;
+ int host_sig, target_sig;
target_sigemptyset(d);
- for (i = 1; i <= TARGET_NSIG; i++) {
- if (sigismember(s, i)) {
- target_sigaddset(d, host_to_target_signal(i));
+ for (host_sig = 1; host_sig < _NSIG; host_sig++) {
+ target_sig = host_to_target_signal(host_sig);
+ if (target_sig < 1 || target_sig > TARGET_NSIG) {
+ continue;
+ }
+ if (sigismember(s, host_sig)) {
+ target_sigaddset(d, target_sig);
}
}
}
@@ -135,11 +130,15 @@ void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
void target_to_host_sigset_internal(sigset_t *d,
const target_sigset_t *s)
{
- int i;
+ int host_sig, target_sig;
sigemptyset(d);
- for (i = 1; i <= TARGET_NSIG; i++) {
- if (target_sigismember(s, i)) {
- sigaddset(d, target_to_host_signal(i));
+ for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
+ host_sig = target_to_host_signal(target_sig);
+ if (host_sig < 1 || host_sig >= _NSIG) {
+ continue;
+ }
+ if (target_sigismember(s, target_sig)) {
+ sigaddset(d, host_sig);
}
}
}
@@ -176,7 +175,7 @@ void target_to_host_old_sigset(sigset_t *sigset,
int block_signals(void)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
sigset_t set;
/* It's OK to block everything including SIGSEGV, because we won't
@@ -186,19 +185,19 @@ int block_signals(void)
sigfillset(&set);
sigprocmask(SIG_SETMASK, &set, 0);
- return atomic_xchg(&ts->signal_pending, 1);
+ return qatomic_xchg(&ts->signal_pending, 1);
}
/* Wrapper for sigprocmask function
* Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
- * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
+ * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
* a signal was already pending and the syscall must be restarted, or
* 0 on success.
* If set is NULL, this is guaranteed not to fail.
*/
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
if (oldset) {
*oldset = ts->signal_mask;
@@ -208,7 +207,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
int i;
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
switch (how) {
@@ -236,29 +235,31 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
return 0;
}
-#if !defined(TARGET_NIOS2)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/
void set_sigmask(const sigset_t *set)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
ts->signal_mask = *set;
}
-#endif
/* sigaltstack management */
int on_sig_stack(unsigned long sp)
{
- return (sp - target_sigaltstack_used.ss_sp
- < target_sigaltstack_used.ss_size);
+ TaskState *ts = get_task_state(thread_cpu);
+
+ return (sp - ts->sigaltstack_used.ss_sp
+ < ts->sigaltstack_used.ss_size);
}
int sas_ss_flags(unsigned long sp)
{
- return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
+ TaskState *ts = get_task_state(thread_cpu);
+
+ return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
: on_sig_stack(sp) ? SS_ONSTACK : 0);
}
@@ -267,17 +268,65 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
/*
* This is the X/Open sanctioned signal stack switching.
*/
+ TaskState *ts = get_task_state(thread_cpu);
+
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
- return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
+ return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
}
return sp;
}
void target_save_altstack(target_stack_t *uss, CPUArchState *env)
{
- __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
+ TaskState *ts = get_task_state(thread_cpu);
+
+ __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
__put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
- __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
+ __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
+}
+
+abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
+{
+ TaskState *ts = get_task_state(thread_cpu);
+ size_t minstacksize = TARGET_MINSIGSTKSZ;
+ target_stack_t ss;
+
+#if defined(TARGET_PPC64)
+ /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
+ struct image_info *image = ts->info;
+ if (get_ppc64_abi(image) > 1) {
+ minstacksize = 4096;
+ }
+#endif
+
+ __get_user(ss.ss_sp, &uss->ss_sp);
+ __get_user(ss.ss_size, &uss->ss_size);
+ __get_user(ss.ss_flags, &uss->ss_flags);
+
+ if (on_sig_stack(get_sp_from_cpustate(env))) {
+ return -TARGET_EPERM;
+ }
+
+ switch (ss.ss_flags) {
+ default:
+ return -TARGET_EINVAL;
+
+ case TARGET_SS_DISABLE:
+ ss.ss_size = 0;
+ ss.ss_sp = 0;
+ break;
+
+ case TARGET_SS_ONSTACK:
+ case 0:
+ if (ss.ss_size < minstacksize) {
+ return -TARGET_ENOMEM;
+ }
+ break;
+ }
+
+ ts->sigaltstack_used.ss_sp = ss.ss_sp;
+ ts->sigaltstack_used.ss_size = ss.ss_size;
+ return 0;
}
/* siginfo conversion */
@@ -332,8 +381,12 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
case TARGET_SIGCHLD:
tinfo->_sifields._sigchld._pid = info->si_pid;
tinfo->_sifields._sigchld._uid = info->si_uid;
- tinfo->_sifields._sigchld._status
- = host_to_target_waitstatus(info->si_status);
+ if (si_code == CLD_EXITED)
+ tinfo->_sifields._sigchld._status = info->si_status;
+ else
+ tinfo->_sifields._sigchld._status
+ = host_to_target_signal(info->si_status & 0x7f)
+ | (info->si_status & ~0x7f);
tinfo->_sifields._sigchld._utime = info->si_utime;
tinfo->_sifields._sigchld._stime = info->si_stime;
si_type = QEMU_SI_CHLD;
@@ -359,8 +412,8 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
tinfo->si_code = deposit32(si_code, 16, 16, si_type);
}
-void tswap_siginfo(target_siginfo_t *tinfo,
- const target_siginfo_t *info)
+static void tswap_siginfo(target_siginfo_t *tinfo,
+ const target_siginfo_t *info)
{
int si_type = extract32(info->si_code, 16, 16);
int si_code = sextract32(info->si_code, 0, 16);
@@ -442,26 +495,6 @@ void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
info->si_value.sival_ptr = (void *)(long)sival_ptr;
}
-static int fatal_signal (int sig)
-{
- switch (sig) {
- case TARGET_SIGCHLD:
- case TARGET_SIGURG:
- case TARGET_SIGWINCH:
- /* Ignored by default. */
- return 0;
- case TARGET_SIGCONT:
- case TARGET_SIGSTOP:
- case TARGET_SIGTSTP:
- case TARGET_SIGTTIN:
- case TARGET_SIGTTOU:
- /* Job control signals. */
- return 0;
- default:
- return 1;
- }
-}
-
/* returns 1 if given signal should dump core if not handled */
static int core_dump_signal(int sig)
{
@@ -479,50 +512,110 @@ static int core_dump_signal(int sig)
}
}
-void signal_init(void)
+static void signal_table_init(void)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
- struct sigaction act;
- struct sigaction oact;
- int i, j;
- int host_sig;
+ int hsig, tsig, count;
+
+ /*
+ * Signals are supported starting from TARGET_SIGRTMIN and going up
+ * until we run out of host realtime signals. Glibc uses the lower 2
+ * RT signals and (hopefully) nobody uses the upper ones.
+ * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
+ * To fix this properly we would need to do manual signal delivery
+ * multiplexed over a single host signal.
+ * Attempts for configure "missing" signals via sigaction will be
+ * silently ignored.
+ *
+ * Remap the target SIGABRT, so that we can distinguish host abort
+ * from guest abort. When the guest registers a signal handler or
+ * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
+ * arrives at dump_core_and_abort(), we will map back to host SIGABRT
+ * so that the parent (native or emulated) sees the correct signal.
+ * Finally, also map host to guest SIGABRT so that the emulated
+ * parent sees the correct mapping from wait status.
+ */
+
+ hsig = SIGRTMIN;
+ host_to_target_signal_table[SIGABRT] = 0;
+ host_to_target_signal_table[hsig++] = TARGET_SIGABRT;
- /* generate signal conversion tables */
- for(i = 1; i < _NSIG; i++) {
- if (host_to_target_signal_table[i] == 0)
- host_to_target_signal_table[i] = i;
+ for (tsig = TARGET_SIGRTMIN;
+ hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
+ hsig++, tsig++) {
+ host_to_target_signal_table[hsig] = tsig;
+ }
+
+ /* Invert the mapping that has already been assigned. */
+ for (hsig = 1; hsig < _NSIG; hsig++) {
+ tsig = host_to_target_signal_table[hsig];
+ if (tsig) {
+ assert(target_to_host_signal_table[tsig] == 0);
+ target_to_host_signal_table[tsig] = hsig;
+ }
+ }
+
+ host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
+
+ /* Map everything else out-of-bounds. */
+ for (hsig = 1; hsig < _NSIG; hsig++) {
+ if (host_to_target_signal_table[hsig] == 0) {
+ host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
+ }
}
- for(i = 1; i < _NSIG; i++) {
- j = host_to_target_signal_table[i];
- target_to_host_signal_table[j] = i;
+ for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
+ if (target_to_host_signal_table[tsig] == 0) {
+ target_to_host_signal_table[tsig] = _NSIG;
+ count++;
+ }
}
+ trace_signal_table_init(count);
+}
+
+void signal_init(void)
+{
+ TaskState *ts = get_task_state(thread_cpu);
+ struct sigaction act, oact;
+
+ /* initialize signal conversion tables */
+ signal_table_init();
+
/* Set the signal mask from the host mask. */
sigprocmask(0, 0, &ts->signal_mask);
- /* set all host signal handlers. ALL signals are blocked during
- the handlers to serialize them. */
- memset(sigact_table, 0, sizeof(sigact_table));
-
sigfillset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
act.sa_sigaction = host_signal_handler;
- for(i = 1; i <= TARGET_NSIG; i++) {
- host_sig = target_to_host_signal(i);
- sigaction(host_sig, NULL, &oact);
- if (oact.sa_sigaction == (void *)SIG_IGN) {
- sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
- } else if (oact.sa_sigaction == (void *)SIG_DFL) {
- sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
+
+ /*
+ * A parent process may configure ignored signals, but all other
+ * signals are default. For any target signals that have no host
+ * mapping, set to ignore. For all core_dump_signal, install our
+ * host signal handler so that we may invoke dump_core_and_abort.
+ * This includes SIGSEGV and SIGBUS, which are also need our signal
+ * handler for paging and exceptions.
+ */
+ for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
+ int hsig = target_to_host_signal(tsig);
+ abi_ptr thand = TARGET_SIG_IGN;
+
+ if (hsig >= _NSIG) {
+ continue;
}
- /* If there's already a handler installed then something has
- gone horribly wrong, so don't even try to handle that case. */
- /* Install some handlers for our own use. We need at least
- SIGSEGV and SIGBUS, to detect exceptions. We can not just
- trap all signals because it affects syscall interrupt
- behavior. But do trap all default-fatal signals. */
- if (fatal_signal (i))
- sigaction(host_sig, &act, NULL);
+
+ /* As we force remap SIGABRT, cannot probe and install in one step. */
+ if (tsig == TARGET_SIGABRT) {
+ sigaction(SIGABRT, NULL, &oact);
+ sigaction(hsig, &act, NULL);
+ } else {
+ struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
+ sigaction(hsig, iact, &oact);
+ }
+
+ if (oact.sa_sigaction != (void *)SIG_IGN) {
+ thand = TARGET_SIG_DFL;
+ }
+ sigact_table[tsig - 1]._sa_handler = thand;
}
}
@@ -533,15 +626,30 @@ void signal_init(void)
void force_sig(int sig)
{
CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
- target_siginfo_t info;
+ target_siginfo_t info = {};
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._kill._pid = 0;
info._sifields._kill._uid = 0;
- queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
+ queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
+}
+
+/*
+ * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
+ * 'force' part is handled in process_pending_signals().
+ */
+void force_sig_fault(int sig, int code, abi_ulong addr)
+{
+ CPUState *cpu = thread_cpu;
+ target_siginfo_t info = {};
+
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = code;
+ info._sifields._sigfault._addr = addr;
+ queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
}
/* Force a SIGSEGV if we couldn't write to memory trying to set
@@ -559,20 +667,80 @@ void force_sigsegv(int oldsig)
}
force_sig(TARGET_SIGSEGV);
}
-
#endif
+void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type, bool maperr, uintptr_t ra)
+{
+ const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+
+ if (tcg_ops->record_sigsegv) {
+ tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
+ }
+
+ force_sig_fault(TARGET_SIGSEGV,
+ maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
+ addr);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, ra);
+}
+
+void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type, uintptr_t ra)
+{
+ const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+
+ if (tcg_ops->record_sigbus) {
+ tcg_ops->record_sigbus(cpu, addr, access_type, ra);
+ }
+
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, ra);
+}
+
/* abort execution with signal */
-static void QEMU_NORETURN dump_core_and_abort(int target_sig)
+static G_NORETURN
+void die_with_signal(int host_sig)
{
- CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
- TaskState *ts = (TaskState *)cpu->opaque;
+ struct sigaction act = {
+ .sa_handler = SIG_DFL,
+ };
+
+ /*
+ * The proper exit code for dying from an uncaught signal is -<signal>.
+ * The kernel doesn't allow exit() or _exit() to pass a negative value.
+ * To get the proper exit code we need to actually die from an uncaught
+ * signal. Here the default signal handler is installed, we send
+ * the signal and we wait for it to arrive.
+ */
+ sigfillset(&act.sa_mask);
+ sigaction(host_sig, &act, NULL);
+
+ kill(getpid(), host_sig);
+
+ /* Make sure the signal isn't masked (reusing the mask inside of act). */
+ sigdelset(&act.sa_mask, host_sig);
+ sigsuspend(&act.sa_mask);
+
+ /* unreachable */
+ _exit(EXIT_FAILURE);
+}
+
+static G_NORETURN
+void dump_core_and_abort(CPUArchState *env, int target_sig)
+{
+ CPUState *cpu = env_cpu(env);
+ TaskState *ts = get_task_state(cpu);
int host_sig, core_dumped = 0;
- struct sigaction act;
- host_sig = target_to_host_signal(target_sig);
- trace_user_force_sig(env, target_sig, host_sig);
+ /* On exit, undo the remapping of SIGABRT. */
+ if (target_sig == TARGET_SIGABRT) {
+ host_sig = SIGABRT;
+ } else {
+ host_sig = target_to_host_signal(target_sig);
+ }
+ trace_user_dump_core_and_abort(env, target_sig, host_sig);
gdb_signalled(env, target_sig);
/* dump core if supported by target binary format */
@@ -592,37 +760,17 @@ static void QEMU_NORETURN dump_core_and_abort(int target_sig)
target_sig, strsignal(host_sig), "core dumped" );
}
- /* The proper exit code for dying from an uncaught signal is
- * -<signal>. The kernel doesn't allow exit() or _exit() to pass
- * a negative value. To get the proper exit code we need to
- * actually die from an uncaught signal. Here the default signal
- * handler is installed, we send ourself a signal and we wait for
- * it to arrive. */
- sigfillset(&act.sa_mask);
- act.sa_handler = SIG_DFL;
- act.sa_flags = 0;
- sigaction(host_sig, &act, NULL);
-
- /* For some reason raise(host_sig) doesn't send the signal when
- * statically linked on x86-64. */
- kill(getpid(), host_sig);
-
- /* Make sure the signal isn't masked (just reuse the mask inside
- of act) */
- sigdelset(&act.sa_mask, host_sig);
- sigsuspend(&act.sa_mask);
-
- /* unreachable */
- abort();
+ preexit_cleanup(env, 128 + target_sig);
+ die_with_signal(host_sig);
}
/* queue a signal so that it will be send to the virtual CPU as soon
as possible */
-int queue_signal(CPUArchState *env, int sig, int si_type,
- target_siginfo_t *info)
+void queue_signal(CPUArchState *env, int sig, int si_type,
+ target_siginfo_t *info)
{
- CPUState *cpu = ENV_GET_CPU(env);
- TaskState *ts = cpu->opaque;
+ CPUState *cpu = env_cpu(env);
+ TaskState *ts = get_task_state(cpu);
trace_user_queue_signal(env, sig);
@@ -631,68 +779,255 @@ int queue_signal(CPUArchState *env, int sig, int si_type,
ts->sync_signal.info = *info;
ts->sync_signal.pending = sig;
/* signal that a new signal is pending */
- atomic_set(&ts->signal_pending, 1);
- return 1; /* indicates that the signal was queued */
+ qatomic_set(&ts->signal_pending, 1);
}
-#ifndef HAVE_SAFE_SYSCALL
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
static inline void rewind_if_in_safe_syscall(void *puc)
{
- /* Default version: never rewind */
+ host_sigcontext *uc = (host_sigcontext *)puc;
+ uintptr_t pcreg = host_signal_pc(uc);
+
+ if (pcreg > (uintptr_t)safe_syscall_start
+ && pcreg < (uintptr_t)safe_syscall_end) {
+ host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
+ }
}
-#endif
-static void host_signal_handler(int host_signum, siginfo_t *info,
- void *puc)
+static G_NORETURN
+void die_from_signal(siginfo_t *info)
+{
+ char sigbuf[4], codebuf[12];
+ const char *sig, *code = NULL;
+
+ switch (info->si_signo) {
+ case SIGSEGV:
+ sig = "SEGV";
+ switch (info->si_code) {
+ case SEGV_MAPERR:
+ code = "MAPERR";
+ break;
+ case SEGV_ACCERR:
+ code = "ACCERR";
+ break;
+ }
+ break;
+ case SIGBUS:
+ sig = "BUS";
+ switch (info->si_code) {
+ case BUS_ADRALN:
+ code = "ADRALN";
+ break;
+ case BUS_ADRERR:
+ code = "ADRERR";
+ break;
+ }
+ break;
+ case SIGILL:
+ sig = "ILL";
+ switch (info->si_code) {
+ case ILL_ILLOPC:
+ code = "ILLOPC";
+ break;
+ case ILL_ILLOPN:
+ code = "ILLOPN";
+ break;
+ case ILL_ILLADR:
+ code = "ILLADR";
+ break;
+ case ILL_PRVOPC:
+ code = "PRVOPC";
+ break;
+ case ILL_PRVREG:
+ code = "PRVREG";
+ break;
+ case ILL_COPROC:
+ code = "COPROC";
+ break;
+ }
+ break;
+ case SIGFPE:
+ sig = "FPE";
+ switch (info->si_code) {
+ case FPE_INTDIV:
+ code = "INTDIV";
+ break;
+ case FPE_INTOVF:
+ code = "INTOVF";
+ break;
+ }
+ break;
+ case SIGTRAP:
+ sig = "TRAP";
+ break;
+ default:
+ snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
+ sig = sigbuf;
+ break;
+ }
+ if (code == NULL) {
+ snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
+ code = codebuf;
+ }
+
+ error_report("QEMU internal SIG%s {code=%s, addr=%p}",
+ sig, code, info->si_addr);
+ die_with_signal(info->si_signo);
+}
+
+static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
+ host_sigcontext *uc)
{
- CPUArchState *env = thread_cpu->env_ptr;
- CPUState *cpu = ENV_GET_CPU(env);
- TaskState *ts = cpu->opaque;
+ uintptr_t host_addr = (uintptr_t)info->si_addr;
+ /*
+ * Convert forcefully to guest address space: addresses outside
+ * reserved_va are still valid to report via SEGV_MAPERR.
+ */
+ bool is_valid = h2g_valid(host_addr);
+ abi_ptr guest_addr = h2g_nocheck(host_addr);
+ uintptr_t pc = host_signal_pc(uc);
+ bool is_write = host_signal_write(info, uc);
+ MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
+ bool maperr;
+
+ /* If this was a write to a TB protected page, restart. */
+ if (is_write
+ && is_valid
+ && info->si_code == SEGV_ACCERR
+ && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
+ pc, guest_addr)) {
+ return;
+ }
- int sig;
+ /*
+ * If the access was not on behalf of the guest, within the executable
+ * mapping of the generated code buffer, then it is a host bug.
+ */
+ if (access_type != MMU_INST_FETCH
+ && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
+ die_from_signal(info);
+ }
+
+ maperr = true;
+ if (is_valid && info->si_code == SEGV_ACCERR) {
+ /*
+ * With reserved_va, the whole address space is PROT_NONE,
+ * which means that we may get ACCERR when we want MAPERR.
+ */
+ if (page_get_flags(guest_addr) & PAGE_VALID) {
+ maperr = false;
+ } else {
+ info->si_code = SEGV_MAPERR;
+ }
+ }
+
+ sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
+ cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
+}
+
+static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
+ host_sigcontext *uc)
+{
+ uintptr_t pc = host_signal_pc(uc);
+ bool is_write = host_signal_write(info, uc);
+ MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
+
+ /*
+ * If the access was not on behalf of the guest, within the executable
+ * mapping of the generated code buffer, then it is a host bug.
+ */
+ if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
+ die_from_signal(info);
+ }
+
+ if (info->si_code == BUS_ADRALN) {
+ uintptr_t host_addr = (uintptr_t)info->si_addr;
+ abi_ptr guest_addr = h2g_nocheck(host_addr);
+
+ sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
+ cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
+ }
+ return pc;
+}
+
+static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
+{
+ CPUState *cpu = thread_cpu;
+ CPUArchState *env = cpu_env(cpu);
+ TaskState *ts = get_task_state(cpu);
target_siginfo_t tinfo;
- ucontext_t *uc = puc;
+ host_sigcontext *uc = puc;
struct emulated_sigtable *k;
+ int guest_sig;
+ uintptr_t pc = 0;
+ bool sync_sig = false;
+ void *sigmask;
- /* the CPU emulator uses some host signals to detect exceptions,
- we forward to it some signals */
- if ((host_signum == SIGSEGV || host_signum == SIGBUS)
- && info->si_code > 0) {
- if (cpu_signal_handler(host_signum, info, puc))
+ /*
+ * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
+ * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
+ * SIGFPE, SIGTRAP are always host bugs.
+ */
+ if (info->si_code > 0) {
+ switch (host_sig) {
+ case SIGSEGV:
+ /* Only returns on handle_sigsegv_accerr_write success. */
+ host_sigsegv_handler(cpu, info, uc);
return;
+ case SIGBUS:
+ pc = host_sigbus_handler(cpu, info, uc);
+ sync_sig = true;
+ break;
+ case SIGILL:
+ case SIGFPE:
+ case SIGTRAP:
+ die_from_signal(info);
+ }
}
/* get target signal number */
- sig = host_to_target_signal(host_signum);
- if (sig < 1 || sig > TARGET_NSIG)
+ guest_sig = host_to_target_signal(host_sig);
+ if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
return;
- trace_user_host_signal(env, host_signum, sig);
-
- rewind_if_in_safe_syscall(puc);
+ }
+ trace_user_host_signal(env, host_sig, guest_sig);
host_to_target_siginfo_noswap(&tinfo, info);
- k = &ts->sigtab[sig - 1];
+ k = &ts->sigtab[guest_sig - 1];
k->info = tinfo;
- k->pending = sig;
+ k->pending = guest_sig;
ts->signal_pending = 1;
- /* Block host signals until target signal handler entered. We
+ /*
+ * For synchronous signals, unwind the cpu state to the faulting
+ * insn and then exit back to the main loop so that the signal
+ * is delivered immediately.
+ */
+ if (sync_sig) {
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, pc);
+ }
+
+ rewind_if_in_safe_syscall(puc);
+
+ /*
+ * Block host signals until target signal handler entered. We
* can't block SIGSEGV or SIGBUS while we're executing guest
* code in case the guest code provokes one in the window between
* now and it getting out to the main loop. Signals will be
* unblocked again in process_pending_signals().
*
- * WARNING: we cannot use sigfillset() here because the uc_sigmask
+ * WARNING: we cannot use sigfillset() here because the sigmask
* field is a kernel sigset_t, which is much smaller than the
* libc sigset_t which sigfillset() operates on. Using sigfillset()
* would write 0xff bytes off the end of the structure and trash
* data on the struct.
- * We can't use sizeof(uc->uc_sigmask) either, because the libc
- * headers define the struct field with the wrong (too large) type.
*/
- memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
- sigdelset(&uc->uc_sigmask, SIGSEGV);
- sigdelset(&uc->uc_sigmask, SIGBUS);
+ sigmask = host_signal_mask(uc);
+ memset(sigmask, 0xff, SIGSET_T_SIZE);
+ sigdelset(sigmask, SIGSEGV);
+ sigdelset(sigmask, SIGBUS);
/* interrupt the virtual CPU as soon as possible */
cpu_exit(thread_cpu);
@@ -700,92 +1035,66 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
/* do_sigaltstack() returns target values and errnos. */
/* compare linux/kernel/signal.c:do_sigaltstack() */
-abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
+abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
+ CPUArchState *env)
{
- int ret;
- struct target_sigaltstack oss;
+ target_stack_t oss, *uoss = NULL;
+ abi_long ret = -TARGET_EFAULT;
- /* XXX: test errors */
- if(uoss_addr)
- {
- __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
- __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
- __put_user(sas_ss_flags(sp), &oss.ss_flags);
+ if (uoss_addr) {
+ /* Verify writability now, but do not alter user memory yet. */
+ if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
+ goto out;
+ }
+ target_save_altstack(&oss, env);
}
- if(uss_addr)
- {
- struct target_sigaltstack *uss;
- struct target_sigaltstack ss;
- size_t minstacksize = TARGET_MINSIGSTKSZ;
+ if (uss_addr) {
+ target_stack_t *uss;
-#if defined(TARGET_PPC64)
- /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
- struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
- if (get_ppc64_abi(image) > 1) {
- minstacksize = 4096;
- }
-#endif
-
- ret = -TARGET_EFAULT;
if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
goto out;
}
- __get_user(ss.ss_sp, &uss->ss_sp);
- __get_user(ss.ss_size, &uss->ss_size);
- __get_user(ss.ss_flags, &uss->ss_flags);
- unlock_user_struct(uss, uss_addr, 0);
-
- ret = -TARGET_EPERM;
- if (on_sig_stack(sp))
- goto out;
-
- ret = -TARGET_EINVAL;
- if (ss.ss_flags != TARGET_SS_DISABLE
- && ss.ss_flags != TARGET_SS_ONSTACK
- && ss.ss_flags != 0)
+ ret = target_restore_altstack(uss, env);
+ if (ret) {
goto out;
-
- if (ss.ss_flags == TARGET_SS_DISABLE) {
- ss.ss_size = 0;
- ss.ss_sp = 0;
- } else {
- ret = -TARGET_ENOMEM;
- if (ss.ss_size < minstacksize) {
- goto out;
- }
- }
-
- target_sigaltstack_used.ss_sp = ss.ss_sp;
- target_sigaltstack_used.ss_size = ss.ss_size;
+ }
}
if (uoss_addr) {
- ret = -TARGET_EFAULT;
- if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
- goto out;
+ memcpy(uoss, &oss, sizeof(oss));
+ unlock_user_struct(uoss, uoss_addr, 1);
+ uoss = NULL;
}
-
ret = 0;
-out:
+
+ out:
+ if (uoss) {
+ unlock_user_struct(uoss, uoss_addr, 0);
+ }
return ret;
}
/* do_sigaction() return target values and host errnos */
int do_sigaction(int sig, const struct target_sigaction *act,
- struct target_sigaction *oact)
+ struct target_sigaction *oact, abi_ulong ka_restorer)
{
struct target_sigaction *k;
- struct sigaction act1;
int host_sig;
int ret = 0;
- if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
+ trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
+
+ if (sig < 1 || sig > TARGET_NSIG) {
+ return -TARGET_EINVAL;
+ }
+
+ if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
return -TARGET_EINVAL;
}
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
k = &sigact_table[sig - 1];
@@ -799,34 +1108,58 @@ int do_sigaction(int sig, const struct target_sigaction *act,
oact->sa_mask = k->sa_mask;
}
if (act) {
- /* FIXME: This is not threadsafe. */
__get_user(k->_sa_handler, &act->_sa_handler);
__get_user(k->sa_flags, &act->sa_flags);
#ifdef TARGET_ARCH_HAS_SA_RESTORER
__get_user(k->sa_restorer, &act->sa_restorer);
#endif
+#ifdef TARGET_ARCH_HAS_KA_RESTORER
+ k->ka_restorer = ka_restorer;
+#endif
/* To be swapped in target_to_host_sigset. */
k->sa_mask = act->sa_mask;
/* we update the host linux signal state */
host_sig = target_to_host_signal(sig);
+ trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
+ if (host_sig > SIGRTMAX) {
+ /* we don't have enough host signals to map all target signals */
+ qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
+ sig);
+ /*
+ * we don't return an error here because some programs try to
+ * register an handler for all possible rt signals even if they
+ * don't need it.
+ * An error here can abort them whereas there can be no problem
+ * to not have the signal available later.
+ * This is the case for golang,
+ * See https://github.com/golang/go/issues/33746
+ * So we silently ignore the error.
+ */
+ return 0;
+ }
if (host_sig != SIGSEGV && host_sig != SIGBUS) {
+ struct sigaction act1;
+
sigfillset(&act1.sa_mask);
act1.sa_flags = SA_SIGINFO;
- if (k->sa_flags & TARGET_SA_RESTART)
- act1.sa_flags |= SA_RESTART;
- /* NOTE: it is important to update the host kernel signal
- ignore state to avoid getting unexpected interrupted
- syscalls */
if (k->_sa_handler == TARGET_SIG_IGN) {
+ /*
+ * It is important to update the host kernel signal ignore
+ * state to avoid getting unexpected interrupted syscalls.
+ */
act1.sa_sigaction = (void *)SIG_IGN;
} else if (k->_sa_handler == TARGET_SIG_DFL) {
- if (fatal_signal (sig))
+ if (core_dump_signal(sig)) {
act1.sa_sigaction = host_signal_handler;
- else
+ } else {
act1.sa_sigaction = (void *)SIG_DFL;
+ }
} else {
act1.sa_sigaction = host_signal_handler;
+ if (k->sa_flags & TARGET_SA_RESTART) {
+ act1.sa_flags |= SA_RESTART;
+ }
}
ret = sigaction(host_sig, &act1, NULL);
}
@@ -837,18 +1170,30 @@ int do_sigaction(int sig, const struct target_sigaction *act,
static void handle_pending_signal(CPUArchState *cpu_env, int sig,
struct emulated_sigtable *k)
{
- CPUState *cpu = ENV_GET_CPU(cpu_env);
+ CPUState *cpu = env_cpu(cpu_env);
abi_ulong handler;
sigset_t set;
+ target_siginfo_t unswapped;
target_sigset_t target_old_set;
struct target_sigaction *sa;
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */
k->pending = 0;
- sig = gdb_handlesig(cpu, sig);
+ /*
+ * Writes out siginfo values byteswapped, accordingly to the target.
+ * It also cleans the si_type from si_code making it correct for
+ * the target. We must hold on to the original unswapped copy for
+ * strace below, because si_type is still required there.
+ */
+ if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
+ unswapped = k->info;
+ }
+ tswap_siginfo(&k->info, &k->info);
+
+ sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
if (!sig) {
sa = NULL;
handler = TARGET_SIG_IGN;
@@ -857,8 +1202,8 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
handler = sa->_sa_handler;
}
- if (do_strace) {
- print_taken_signal(sig, &k->info);
+ if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
+ print_taken_signal(sig, &unswapped);
}
if (handler == TARGET_SIG_DFL) {
@@ -869,12 +1214,12 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
sig != TARGET_SIGURG &&
sig != TARGET_SIGWINCH &&
sig != TARGET_SIGCONT) {
- dump_core_and_abort(sig);
+ dump_core_and_abort(cpu_env, sig);
}
} else if (handler == TARGET_SIG_IGN) {
/* ignore sig */
} else if (handler == TARGET_SIG_ERR) {
- dump_core_and_abort(sig);
+ dump_core_and_abort(cpu_env, sig);
} else {
/* compute the blocked signals during the handler execution */
sigset_t *blocked_set;
@@ -922,14 +1267,13 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
void process_pending_signals(CPUArchState *cpu_env)
{
- CPUState *cpu = ENV_GET_CPU(cpu_env);
+ CPUState *cpu = env_cpu(cpu_env);
int sig;
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
sigset_t set;
sigset_t *blocked_set;
- while (atomic_read(&ts->signal_pending)) {
- /* FIXME: This is not threadsafe. */
+ while (qatomic_read(&ts->signal_pending)) {
sigfillset(&set);
sigprocmask(SIG_SETMASK, &set, 0);
@@ -972,7 +1316,7 @@ void process_pending_signals(CPUArchState *cpu_env)
* of unblocking might cause us to take another host signal which
* will set signal_pending again).
*/
- atomic_set(&ts->signal_pending, 0);
+ qatomic_set(&ts->signal_pending, 0);
ts->in_sigsuspend = 0;
set = ts->signal_mask;
sigdelset(&set, SIGSEGV);
@@ -981,3 +1325,26 @@ void process_pending_signals(CPUArchState *cpu_env)
}
ts->in_sigsuspend = 0;
}
+
+int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
+ target_ulong sigsize)
+{
+ TaskState *ts = get_task_state(thread_cpu);
+ sigset_t *host_set = &ts->sigsuspend_mask;
+ target_sigset_t *target_sigset;
+
+ if (sigsize != sizeof(*target_sigset)) {
+ /* Like the kernel, we enforce correct size sigsets */
+ return -TARGET_EINVAL;
+ }
+
+ target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
+ if (!target_sigset) {
+ return -TARGET_EFAULT;
+ }
+ target_to_host_sigset(host_set, target_sigset);
+ unlock_user(target_sigset, sigset, 0);
+
+ *pset = host_set;
+ return 0;
+}