aboutsummaryrefslogtreecommitdiff
path: root/linux-user
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user')
-rw-r--r--linux-user/aarch64/Makefile.vdso15
-rw-r--r--linux-user/aarch64/cpu_loop.c62
-rw-r--r--linux-user/aarch64/meson.build11
-rw-r--r--linux-user/aarch64/signal.c277
-rw-r--r--linux-user/aarch64/target_cpu.h5
-rw-r--r--linux-user/aarch64/target_flat.h1
-rw-r--r--linux-user/aarch64/target_mman.h22
-rw-r--r--linux-user/aarch64/target_prctl.h227
-rw-r--r--linux-user/aarch64/target_proc.h1
-rw-r--r--linux-user/aarch64/target_resource.h1
-rw-r--r--linux-user/aarch64/target_signal.h20
-rw-r--r--linux-user/aarch64/target_structs.h59
-rw-r--r--linux-user/aarch64/target_syscall.h26
-rwxr-xr-xlinux-user/aarch64/vdso-be.sobin0 -> 3224 bytes
-rwxr-xr-xlinux-user/aarch64/vdso-le.sobin0 -> 3224 bytes
-rw-r--r--linux-user/aarch64/vdso.S75
-rw-r--r--linux-user/aarch64/vdso.ld72
-rw-r--r--linux-user/alpha/cpu_loop.c81
-rw-r--r--linux-user/alpha/signal.c44
-rw-r--r--linux-user/alpha/target_elf.h2
-rw-r--r--linux-user/alpha/target_mman.h36
-rw-r--r--linux-user/alpha/target_prctl.h1
-rw-r--r--linux-user/alpha/target_proc.h67
-rw-r--r--linux-user/alpha/target_resource.h21
-rw-r--r--linux-user/alpha/target_signal.h2
-rw-r--r--linux-user/alpha/target_syscall.h1
-rw-r--r--linux-user/arm/Makefile.vdso17
-rw-r--r--linux-user/arm/cpu_loop.c218
-rw-r--r--linux-user/arm/meson.build12
-rw-r--r--linux-user/arm/nwfpe/double_cpdo.c4
-rw-r--r--linux-user/arm/nwfpe/fpa11_cpdt.c4
-rw-r--r--linux-user/arm/signal.c504
-rw-r--r--linux-user/arm/target_cpu.h6
-rw-r--r--linux-user/arm/target_flat.h1
-rw-r--r--linux-user/arm/target_mman.h12
-rw-r--r--linux-user/arm/target_prctl.h1
-rw-r--r--linux-user/arm/target_proc.h101
-rw-r--r--linux-user/arm/target_resource.h1
-rw-r--r--linux-user/arm/target_signal.h20
-rw-r--r--linux-user/arm/target_structs.h60
-rw-r--r--linux-user/arm/target_syscall.h3
-rw-r--r--linux-user/arm/vdso-asmoffset.h3
-rwxr-xr-xlinux-user/arm/vdso-be.sobin0 -> 2648 bytes
-rwxr-xr-xlinux-user/arm/vdso-le.sobin0 -> 2648 bytes
-rw-r--r--linux-user/arm/vdso.S174
-rw-r--r--linux-user/arm/vdso.ld67
-rw-r--r--linux-user/cpu_loop-common.h14
-rw-r--r--linux-user/cris/cpu_loop.c29
-rw-r--r--linux-user/cris/signal.c33
-rw-r--r--linux-user/cris/target_mman.h13
-rw-r--r--linux-user/cris/target_prctl.h1
-rw-r--r--linux-user/cris/target_proc.h1
-rw-r--r--linux-user/cris/target_resource.h1
-rw-r--r--linux-user/cris/target_signal.h20
-rw-r--r--linux-user/cris/target_structs.h59
-rw-r--r--linux-user/cris/target_syscall.h1
-rw-r--r--linux-user/elfload.c2554
-rw-r--r--linux-user/exit.c11
-rw-r--r--linux-user/fd-trans.c263
-rw-r--r--linux-user/fd-trans.h1
-rw-r--r--linux-user/flat.h5
-rw-r--r--linux-user/flatload.c311
-rw-r--r--linux-user/gen-vdso-elfn.c.inc314
-rw-r--r--linux-user/gen-vdso.c223
-rw-r--r--linux-user/generic/signal.h15
-rw-r--r--linux-user/generic/target_errno_defs.h17
-rw-r--r--linux-user/generic/target_flat.h (renamed from linux-user/target_flat.h)0
-rw-r--r--linux-user/generic/target_mman.h163
-rw-r--r--linux-user/generic/target_prctl_unalign.h27
-rw-r--r--linux-user/generic/target_resource.h38
-rw-r--r--linux-user/generic/target_structs.h (renamed from linux-user/nios2/target_structs.h)8
-rw-r--r--linux-user/hexagon/cpu_loop.c31
-rw-r--r--linux-user/hexagon/signal.c40
-rw-r--r--linux-user/hexagon/target_elf.h20
-rw-r--r--linux-user/hexagon/target_mman.h14
-rw-r--r--linux-user/hexagon/target_prctl.h1
-rw-r--r--linux-user/hexagon/target_proc.h1
-rw-r--r--linux-user/hexagon/target_resource.h1
-rw-r--r--linux-user/hexagon/target_signal.h15
-rw-r--r--linux-user/hexagon/target_structs.h55
-rw-r--r--linux-user/host/aarch64/hostdep.h38
-rw-r--r--linux-user/host/aarch64/safe-syscall.inc.S75
-rw-r--r--linux-user/host/arm/hostdep.h38
-rw-r--r--linux-user/host/arm/safe-syscall.inc.S90
-rw-r--r--linux-user/host/i386/hostdep.h38
-rw-r--r--linux-user/host/i386/safe-syscall.inc.S100
-rw-r--r--linux-user/host/ia64/hostdep.h15
-rw-r--r--linux-user/host/mips/hostdep.h15
-rw-r--r--linux-user/host/ppc/hostdep.h15
-rw-r--r--linux-user/host/ppc64/hostdep.h38
-rw-r--r--linux-user/host/ppc64/safe-syscall.inc.S96
-rw-r--r--linux-user/host/riscv32/hostdep.h11
-rw-r--r--linux-user/host/riscv64/hostdep.h34
-rw-r--r--linux-user/host/riscv64/safe-syscall.inc.S77
-rw-r--r--linux-user/host/s390/hostdep.h15
-rw-r--r--linux-user/host/s390x/hostdep.h38
-rw-r--r--linux-user/host/s390x/safe-syscall.inc.S90
-rw-r--r--linux-user/host/sparc/hostdep.h15
-rw-r--r--linux-user/host/sparc64/hostdep.h15
-rw-r--r--linux-user/host/x32/hostdep.h15
-rw-r--r--linux-user/host/x86_64/hostdep.h38
-rw-r--r--linux-user/host/x86_64/safe-syscall.inc.S91
-rw-r--r--linux-user/hppa/Makefile.vdso11
-rw-r--r--linux-user/hppa/cpu_loop.c63
-rw-r--r--linux-user/hppa/meson.build5
-rw-r--r--linux-user/hppa/signal.c85
-rw-r--r--linux-user/hppa/target_elf.h2
-rw-r--r--linux-user/hppa/target_mman.h35
-rw-r--r--linux-user/hppa/target_prctl.h1
-rw-r--r--linux-user/hppa/target_proc.h26
-rw-r--r--linux-user/hppa/target_resource.h1
-rw-r--r--linux-user/hppa/target_signal.h3
-rw-r--r--linux-user/hppa/target_syscall.h3
-rw-r--r--linux-user/hppa/vdso-asmoffset.h12
-rw-r--r--linux-user/hppa/vdso.S165
-rw-r--r--linux-user/hppa/vdso.ld77
-rwxr-xr-xlinux-user/hppa/vdso.sobin0 -> 2104 bytes
-rw-r--r--linux-user/i386/Makefile.vdso11
-rw-r--r--linux-user/i386/cpu_loop.c175
-rw-r--r--linux-user/i386/meson.build7
-rw-r--r--linux-user/i386/signal.c320
-rw-r--r--linux-user/i386/target_elf.h2
-rw-r--r--linux-user/i386/target_mman.h17
-rw-r--r--linux-user/i386/target_prctl.h1
-rw-r--r--linux-user/i386/target_proc.h1
-rw-r--r--linux-user/i386/target_resource.h1
-rw-r--r--linux-user/i386/target_signal.h20
-rw-r--r--linux-user/i386/target_structs.h59
-rw-r--r--linux-user/i386/target_syscall.h1
-rw-r--r--linux-user/i386/vdso-asmoffset.h6
-rw-r--r--linux-user/i386/vdso.S143
-rw-r--r--linux-user/i386/vdso.ld76
-rwxr-xr-xlinux-user/i386/vdso.sobin0 -> 2672 bytes
-rw-r--r--linux-user/include/host/aarch64/host-signal.h87
-rw-r--r--linux-user/include/host/arm/host-signal.h43
-rw-r--r--linux-user/include/host/i386/host-signal.h38
-rw-r--r--linux-user/include/host/loongarch64/host-signal.h93
-rw-r--r--linux-user/include/host/mips/host-signal.h75
-rw-r--r--linux-user/include/host/ppc/host-signal.h39
-rw-r--r--linux-user/include/host/ppc64/host-signal.h41
-rw-r--r--linux-user/include/host/riscv/host-signal.h71
-rw-r--r--linux-user/include/host/s390x/host-signal.h138
-rw-r--r--linux-user/include/host/sparc64/host-signal.h64
-rw-r--r--linux-user/include/host/x86_64/host-signal.h37
-rw-r--r--linux-user/include/special-errno.h32
-rw-r--r--linux-user/ioctls.h37
-rw-r--r--linux-user/linux_loop.h2
-rw-r--r--linux-user/linuxload.c147
-rw-r--r--linux-user/loader.h69
-rw-r--r--linux-user/loongarch64/Makefile.vdso11
-rw-r--r--linux-user/loongarch64/cpu_loop.c109
-rw-r--r--linux-user/loongarch64/meson.build4
-rw-r--r--linux-user/loongarch64/signal.c454
-rw-r--r--linux-user/loongarch64/sockbits.h11
-rw-r--r--linux-user/loongarch64/syscall_nr.h (renamed from linux-user/nios2/syscall_nr.h)57
-rw-r--r--linux-user/loongarch64/target_cpu.h34
-rw-r--r--linux-user/loongarch64/target_elf.h12
-rw-r--r--linux-user/loongarch64/target_errno_defs.h12
-rw-r--r--linux-user/loongarch64/target_fcntl.h11
-rw-r--r--linux-user/loongarch64/target_mman.h12
-rw-r--r--linux-user/loongarch64/target_prctl.h1
-rw-r--r--linux-user/loongarch64/target_proc.h1
-rw-r--r--linux-user/loongarch64/target_resource.h11
-rw-r--r--linux-user/loongarch64/target_signal.h13
-rw-r--r--linux-user/loongarch64/target_structs.h11
-rw-r--r--linux-user/loongarch64/target_syscall.h41
-rw-r--r--linux-user/loongarch64/termbits.h11
-rw-r--r--linux-user/loongarch64/vdso-asmoffset.h8
-rw-r--r--linux-user/loongarch64/vdso.S130
-rw-r--r--linux-user/loongarch64/vdso.ld73
-rwxr-xr-xlinux-user/loongarch64/vdso.sobin0 -> 3560 bytes
-rw-r--r--linux-user/m68k/cpu_loop.c53
-rw-r--r--linux-user/m68k/signal.c59
-rw-r--r--linux-user/m68k/target_cpu.h2
-rw-r--r--linux-user/m68k/target_flat.h1
-rw-r--r--linux-user/m68k/target_mman.h6
-rw-r--r--linux-user/m68k/target_prctl.h1
-rw-r--r--linux-user/m68k/target_proc.h16
-rw-r--r--linux-user/m68k/target_resource.h1
-rw-r--r--linux-user/m68k/target_signal.h20
-rw-r--r--linux-user/m68k/target_structs.h59
-rw-r--r--linux-user/m68k/target_syscall.h1
-rw-r--r--linux-user/main.c294
-rw-r--r--linux-user/meson.build23
-rw-r--r--linux-user/microblaze/cpu_loop.c92
-rw-r--r--linux-user/microblaze/signal.c30
-rw-r--r--linux-user/microblaze/target_flat.h1
-rw-r--r--linux-user/microblaze/target_mman.h12
-rw-r--r--linux-user/microblaze/target_prctl.h1
-rw-r--r--linux-user/microblaze/target_proc.h1
-rw-r--r--linux-user/microblaze/target_resource.h1
-rw-r--r--linux-user/microblaze/target_signal.h20
-rw-r--r--linux-user/microblaze/target_structs.h59
-rw-r--r--linux-user/microblaze/target_syscall.h1
-rw-r--r--linux-user/mips/cpu_loop.c191
-rw-r--r--linux-user/mips/signal.c51
-rw-r--r--linux-user/mips/target_elf.h3
-rw-r--r--linux-user/mips/target_mman.h29
-rw-r--r--linux-user/mips/target_prctl.h88
-rw-r--r--linux-user/mips/target_proc.h1
-rw-r--r--linux-user/mips/target_resource.h24
-rw-r--r--linux-user/mips/target_signal.h2
-rw-r--r--linux-user/mips/target_syscall.h7
-rw-r--r--linux-user/mips64/target_mman.h1
-rw-r--r--linux-user/mips64/target_prctl.h1
-rw-r--r--linux-user/mips64/target_proc.h1
-rw-r--r--linux-user/mips64/target_resource.h1
-rw-r--r--linux-user/mips64/target_signal.h3
-rw-r--r--linux-user/mips64/target_syscall.h7
-rw-r--r--linux-user/mmap.c1488
-rw-r--r--linux-user/nios2/cpu_loop.c155
-rw-r--r--linux-user/nios2/signal.c231
-rw-r--r--linux-user/nios2/sockbits.h1
-rw-r--r--linux-user/nios2/target_cpu.h48
-rw-r--r--linux-user/nios2/target_elf.h14
-rw-r--r--linux-user/nios2/target_errno_defs.h7
-rw-r--r--linux-user/nios2/target_fcntl.h11
-rw-r--r--linux-user/nios2/target_signal.h22
-rw-r--r--linux-user/nios2/target_syscall.h38
-rw-r--r--linux-user/nios2/termbits.h1
-rw-r--r--linux-user/openrisc/cpu_loop.c48
-rw-r--r--linux-user/openrisc/signal.c24
-rw-r--r--linux-user/openrisc/target_mman.h11
-rw-r--r--linux-user/openrisc/target_prctl.h1
-rw-r--r--linux-user/openrisc/target_proc.h1
-rw-r--r--linux-user/openrisc/target_resource.h1
-rw-r--r--linux-user/openrisc/target_signal.h25
-rw-r--r--linux-user/openrisc/target_structs.h59
-rw-r--r--linux-user/openrisc/target_syscall.h1
-rw-r--r--linux-user/ppc/Makefile.vdso20
-rw-r--r--linux-user/ppc/cpu_loop.c169
-rw-r--r--linux-user/ppc/meson.build12
-rw-r--r--linux-user/ppc/signal.c124
-rw-r--r--linux-user/ppc/target_mman.h29
-rw-r--r--linux-user/ppc/target_prctl.h1
-rw-r--r--linux-user/ppc/target_proc.h1
-rw-r--r--linux-user/ppc/target_resource.h1
-rw-r--r--linux-user/ppc/target_signal.h20
-rw-r--r--linux-user/ppc/target_syscall.h7
-rw-r--r--linux-user/ppc/vdso-32.ld70
-rwxr-xr-xlinux-user/ppc/vdso-32.sobin0 -> 3020 bytes
-rw-r--r--linux-user/ppc/vdso-64.ld68
-rwxr-xr-xlinux-user/ppc/vdso-64.sobin0 -> 3896 bytes
-rwxr-xr-xlinux-user/ppc/vdso-64le.sobin0 -> 3896 bytes
-rw-r--r--linux-user/ppc/vdso-asmoffset.h20
-rw-r--r--linux-user/ppc/vdso.S239
-rw-r--r--linux-user/qemu.h45
-rw-r--r--linux-user/riscv/Makefile.vdso15
-rw-r--r--linux-user/riscv/cpu_loop.c49
-rw-r--r--linux-user/riscv/meson.build7
-rw-r--r--linux-user/riscv/signal.c42
-rw-r--r--linux-user/riscv/syscall32_nr.h1
-rw-r--r--linux-user/riscv/syscall64_nr.h1
-rw-r--r--linux-user/riscv/target_elf.h3
-rw-r--r--linux-user/riscv/target_mman.h11
-rw-r--r--linux-user/riscv/target_prctl.h1
-rw-r--r--linux-user/riscv/target_proc.h37
-rw-r--r--linux-user/riscv/target_resource.h1
-rw-r--r--linux-user/riscv/target_signal.h14
-rw-r--r--linux-user/riscv/target_structs.h47
-rw-r--r--linux-user/riscv/target_syscall.h4
-rwxr-xr-xlinux-user/riscv/vdso-32.sobin0 -> 2980 bytes
-rwxr-xr-xlinux-user/riscv/vdso-64.sobin0 -> 3944 bytes
-rw-r--r--linux-user/riscv/vdso-asmoffset.h9
-rw-r--r--linux-user/riscv/vdso.S187
-rw-r--r--linux-user/riscv/vdso.ld74
-rw-r--r--linux-user/s390x/Makefile.vdso11
-rw-r--r--linux-user/s390x/cpu_loop.c34
-rw-r--r--linux-user/s390x/meson.build6
-rw-r--r--linux-user/s390x/signal.c48
-rw-r--r--linux-user/s390x/target_mman.h21
-rw-r--r--linux-user/s390x/target_prctl.h1
-rw-r--r--linux-user/s390x/target_proc.h109
-rw-r--r--linux-user/s390x/target_resource.h1
-rw-r--r--linux-user/s390x/target_signal.h17
-rw-r--r--linux-user/s390x/target_syscall.h1
-rw-r--r--linux-user/s390x/vdso-asmoffset.h2
-rw-r--r--linux-user/s390x/vdso.S61
-rw-r--r--linux-user/s390x/vdso.ld72
-rwxr-xr-xlinux-user/s390x/vdso.sobin0 -> 3464 bytes
-rw-r--r--linux-user/safe-syscall.S30
-rw-r--r--linux-user/safe-syscall.h154
-rw-r--r--linux-user/semihost.c48
-rw-r--r--linux-user/sh4/cpu_loop.c19
-rw-r--r--linux-user/sh4/signal.c64
-rw-r--r--linux-user/sh4/target_flat.h1
-rw-r--r--linux-user/sh4/target_mman.h8
-rw-r--r--linux-user/sh4/target_prctl.h1
-rw-r--r--linux-user/sh4/target_proc.h1
-rw-r--r--linux-user/sh4/target_resource.h1
-rw-r--r--linux-user/sh4/target_signal.h20
-rw-r--r--linux-user/sh4/target_structs.h59
-rw-r--r--linux-user/sh4/target_syscall.h1
-rw-r--r--linux-user/sh4/termbits.h206
-rw-r--r--linux-user/signal-common.h88
-rw-r--r--linux-user/signal.c672
-rw-r--r--linux-user/sparc/cpu_loop.c231
-rw-r--r--linux-user/sparc/signal.c74
-rw-r--r--linux-user/sparc/target_cpu.h17
-rw-r--r--linux-user/sparc/target_mman.h35
-rw-r--r--linux-user/sparc/target_prctl.h1
-rw-r--r--linux-user/sparc/target_proc.h16
-rw-r--r--linux-user/sparc/target_resource.h17
-rw-r--r--linux-user/sparc/target_signal.h7
-rw-r--r--linux-user/sparc/target_syscall.h7
-rw-r--r--linux-user/strace.c960
-rw-r--r--linux-user/strace.h4
-rw-r--r--linux-user/strace.list115
-rw-r--r--linux-user/syscall.c3273
-rw-r--r--linux-user/syscall_defs.h2436
-rw-r--r--linux-user/syscall_types.h11
-rw-r--r--linux-user/thunk.c481
-rw-r--r--linux-user/trace-events2
-rw-r--r--linux-user/uaccess.c4
-rw-r--r--linux-user/uname.c7
-rw-r--r--linux-user/uname.h2
-rw-r--r--linux-user/user-internals.h45
-rw-r--r--linux-user/user-mmap.h37
-rw-r--r--linux-user/vm86.c18
-rw-r--r--linux-user/x86_64/Makefile.vdso11
-rw-r--r--linux-user/x86_64/meson.build4
-rw-r--r--linux-user/x86_64/target_elf.h2
-rw-r--r--linux-user/x86_64/target_mman.h16
-rw-r--r--linux-user/x86_64/target_prctl.h1
-rw-r--r--linux-user/x86_64/target_proc.h1
-rw-r--r--linux-user/x86_64/target_resource.h1
-rw-r--r--linux-user/x86_64/target_signal.h21
-rw-r--r--linux-user/x86_64/target_structs.h36
-rw-r--r--linux-user/x86_64/target_syscall.h1
-rw-r--r--linux-user/x86_64/vdso.S78
-rw-r--r--linux-user/x86_64/vdso.ld73
-rwxr-xr-xlinux-user/x86_64/vdso.sobin0 -> 2968 bytes
-rw-r--r--linux-user/xtensa/cpu_loop.c39
-rw-r--r--linux-user/xtensa/signal.c90
-rw-r--r--linux-user/xtensa/target_mman.h29
-rw-r--r--linux-user/xtensa/target_prctl.h1
-rw-r--r--linux-user/xtensa/target_proc.h1
-rw-r--r--linux-user/xtensa/target_resource.h1
-rw-r--r--linux-user/xtensa/target_signal.h19
-rw-r--r--linux-user/xtensa/target_structs.h2
340 files changed, 15878 insertions, 9789 deletions
diff --git a/linux-user/aarch64/Makefile.vdso b/linux-user/aarch64/Makefile.vdso
new file mode 100644
index 0000000000..599958116b
--- /dev/null
+++ b/linux-user/aarch64/Makefile.vdso
@@ -0,0 +1,15 @@
+include $(BUILD_DIR)/tests/tcg/aarch64-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/aarch64
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so
+
+LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \
+ -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld
+
+$(SUBDIR)/vdso-be.so: vdso.S vdso.ld
+ $(CC) -o $@ $(LDFLAGS) -mbig-endian $<
+
+$(SUBDIR)/vdso-le.so: vdso.S vdso.ld
+ $(CC) -o $@ $(LDFLAGS) -mlittle-endian $<
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
index 034b737435..71cdc8be50 100644
--- a/linux-user/aarch64/cpu_loop.c
+++ b/linux-user/aarch64/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -26,6 +25,7 @@
#include "qemu/guest-random.h"
#include "semihosting/common-semi.h"
#include "target/arm/syndrome.h"
+#include "target/arm/cpu-features.h"
#define get_user_code_u32(x, gaddr, env) \
({ abi_long __r = get_user_u32((x), (gaddr)); \
@@ -79,7 +79,7 @@
void cpu_loop(CPUARMState *env)
{
CPUState *cs = env_cpu(env);
- int trapnr, ec, fsc, si_code;
+ int trapnr, ec, fsc, si_code, si_signo;
abi_long ret;
for (;;) {
@@ -90,6 +90,8 @@ void cpu_loop(CPUARMState *env)
switch (trapnr) {
case EXCP_SWI:
+ /* On syscall, PSTATE.ZA is preserved, PSTATE.SM is cleared. */
+ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK);
ret = do_syscall(env,
env->xregs[8],
env->xregs[0],
@@ -99,9 +101,9 @@ void cpu_loop(CPUARMState *env)
env->xregs[4],
env->xregs[5],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->pc -= 4;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->xregs[0] = ret;
}
break;
@@ -113,35 +115,49 @@ void cpu_loop(CPUARMState *env)
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- /* We should only arrive here with EC in {DATAABORT, INSNABORT}. */
ec = syn_get_ec(env->exception.syndrome);
- assert(ec == EC_DATAABORT || ec == EC_INSNABORT);
-
- /* Both EC have the same format for FSC, or close enough. */
- fsc = extract32(env->exception.syndrome, 0, 6);
- switch (fsc) {
- case 0x04 ... 0x07: /* Translation fault, level {0-3} */
- si_code = TARGET_SEGV_MAPERR;
- break;
- case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
- case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
- si_code = TARGET_SEGV_ACCERR;
+ switch (ec) {
+ case EC_DATAABORT:
+ case EC_INSNABORT:
+ /* Both EC have the same format for FSC, or close enough. */
+ fsc = extract32(env->exception.syndrome, 0, 6);
+ switch (fsc) {
+ case 0x04 ... 0x07: /* Translation fault, level {0-3} */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_MAPERR;
+ break;
+ case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
+ case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_ACCERR;
+ break;
+ case 0x11: /* Synchronous Tag Check Fault */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_MTESERR;
+ break;
+ case 0x21: /* Alignment fault */
+ si_signo = TARGET_SIGBUS;
+ si_code = TARGET_BUS_ADRALN;
+ break;
+ default:
+ g_assert_not_reached();
+ }
break;
- case 0x11: /* Synchronous Tag Check Fault */
- si_code = TARGET_SEGV_MTESERR;
+ case EC_PCALIGNMENT:
+ si_signo = TARGET_SIGBUS;
+ si_code = TARGET_BUS_ADRALN;
break;
default:
g_assert_not_reached();
}
-
- force_sig_fault(TARGET_SIGSEGV, si_code, env->exception.vaddress);
+ force_sig_fault(si_signo, si_code, env->exception.vaddress);
break;
case EXCP_DEBUG:
case EXCP_BKPT:
force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_SEMIHOST:
- env->xregs[0] = do_common_semihosting(cs);
+ do_common_semihosting(cs);
env->pc += 4;
break;
case EXCP_YIELD:
@@ -173,7 +189,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
struct image_info *info = ts->info;
int i;
@@ -188,7 +204,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
}
env->pc = regs->pc;
env->xregs[31] = regs->sp;
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
env->cp15.sctlr_el[1] |= SCTLR_E0E;
for (i = 1; i < 4; ++i) {
env->cp15.sctlr_el[i] |= SCTLR_EE;
diff --git a/linux-user/aarch64/meson.build b/linux-user/aarch64/meson.build
new file mode 100644
index 0000000000..248c578d15
--- /dev/null
+++ b/linux-user/aarch64/meson.build
@@ -0,0 +1,11 @@
+# TARGET_BIG_ENDIAN is defined to 'n' for little-endian; which means it
+# is always true as far as source_set.apply() is concerned. Always build
+# both header files and include the right one via #if.
+
+vdso_be_inc = gen_vdso.process('vdso-be.so',
+ extra_args: ['-r', '__kernel_rt_sigreturn'])
+
+vdso_le_inc = gen_vdso.process('vdso-le.so',
+ extra_args: ['-r', '__kernel_rt_sigreturn'])
+
+linux_user_ss.add(when: 'TARGET_AARCH64', if_true: [vdso_be_inc, vdso_le_inc])
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index 49025648cb..bc7a13800d 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -21,6 +21,7 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
+#include "target/arm/cpu-features.h"
struct target_sigcontext {
uint64_t fault_address;
@@ -78,7 +79,8 @@ struct target_extra_context {
struct target_sve_context {
struct target_aarch64_ctx head;
uint16_t vl;
- uint16_t reserved[3];
+ uint16_t flags;
+ uint16_t reserved[2];
/* The actual SVE data immediately follows. It is laid out
* according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
* the original struct pointer.
@@ -101,6 +103,24 @@ struct target_sve_context {
#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
+#define TARGET_SVE_SIG_FLAG_SM 1
+
+#define TARGET_ZA_MAGIC 0x54366345
+
+struct target_za_context {
+ struct target_aarch64_ctx head;
+ uint16_t vl;
+ uint16_t reserved[3];
+ /* The actual ZA data immediately follows. */
+};
+
+#define TARGET_ZA_SIG_REGS_OFFSET \
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -109,7 +129,6 @@ struct target_rt_sigframe {
struct target_rt_frame_record {
uint64_t fp;
uint64_t lr;
- uint32_t tramp[2];
};
static void target_setup_general_frame(struct target_rt_sigframe *sf,
@@ -148,7 +167,7 @@ static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
__put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
__put_user(q[1], &fpsimd->vregs[i * 2]);
#else
@@ -174,13 +193,17 @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
}
static void target_setup_sve_record(struct target_sve_context *sve,
- CPUARMState *env, int vq, int size)
+ CPUARMState *env, int size)
{
- int i, j;
+ int i, j, vq = sve_vq(env);
+ memset(sve, 0, sizeof(*sve));
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
__put_user(size, &sve->head.size);
__put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
+ }
/* Note that SVE regs are stored as a byte stream, with each byte element
* at a subsequent address. This corresponds to a little-endian store
@@ -201,6 +224,35 @@ static void target_setup_sve_record(struct target_sve_context *sve,
}
}
+static void target_setup_za_record(struct target_za_context *za,
+ CPUARMState *env, int size)
+{
+ int vq = sme_vq(env);
+ int vl = vq * TARGET_SVE_VQ_BYTES;
+ int i, j;
+
+ memset(za, 0, sizeof(*za));
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
+ __put_user(size, &za->head.size);
+ __put_user(vl, &za->vl);
+
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+ return;
+ }
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
+
+ /*
+ * Note that ZA vectors are stored as a byte stream,
+ * with each byte element at a subsequent address.
+ */
+ for (i = 0; i < vl; ++i) {
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+ for (j = 0; j < vq * 2; ++j) {
+ __put_user_e(env->zarray[i].d[j], z + j, le);
+ }
+ }
+}
+
static void target_restore_general_frame(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -234,7 +286,7 @@ static void target_restore_fpsimd_record(CPUARMState *env,
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
__get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
__get_user(q[1], &fpsimd->vregs[i * 2]);
#else
@@ -244,12 +296,50 @@ static void target_restore_fpsimd_record(CPUARMState *env,
}
}
-static void target_restore_sve_record(CPUARMState *env,
- struct target_sve_context *sve, int vq)
+static bool target_restore_sve_record(CPUARMState *env,
+ struct target_sve_context *sve,
+ int size, int *svcr)
{
- int i, j;
+ int i, j, vl, vq, flags;
+ bool sm;
- /* Note that SVE regs are stored as a byte stream, with each byte element
+ __get_user(vl, &sve->vl);
+ __get_user(flags, &sve->flags);
+
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
+
+ /* The cpu must support Streaming or Non-streaming SVE. */
+ if (sm
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
+ return false;
+ }
+
+ /*
+ * Note that we cannot use sve_vq() because that depends on the
+ * current setting of PSTATE.SM, not the state to be restored.
+ */
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
+
+ /* Reject mismatched VL. */
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
+ return false;
+ }
+
+ /* Accept empty record -- used to clear PSTATE.SM. */
+ if (size <= sizeof(*sve)) {
+ return true;
+ }
+
+ /* Reject non-empty but incomplete record. */
+ if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
+ return false;
+ }
+
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
+
+ /*
+ * Note that SVE regs are stored as a byte stream, with each byte element
* at a subsequent address. This corresponds to a little-endian load
* of our 64-bit hunks.
*/
@@ -271,6 +361,46 @@ static void target_restore_sve_record(CPUARMState *env,
}
}
}
+ return true;
+}
+
+static bool target_restore_za_record(CPUARMState *env,
+ struct target_za_context *za,
+ int size, int *svcr)
+{
+ int i, j, vl, vq;
+
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ return false;
+ }
+
+ __get_user(vl, &za->vl);
+ vq = sme_vq(env);
+
+ /* Reject mismatched VL. */
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
+ return false;
+ }
+
+ /* Accept empty record -- used to clear PSTATE.ZA. */
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+ return true;
+ }
+
+ /* Reject non-empty but incomplete record. */
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
+ return false;
+ }
+
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
+
+ for (i = 0; i < vl; ++i) {
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+ for (j = 0; j < vq * 2; ++j) {
+ __get_user_e(env->zarray[i].d[j], z + j, le);
+ }
+ }
+ return true;
}
static int target_restore_sigframe(CPUARMState *env,
@@ -279,10 +409,12 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_aarch64_ctx *ctx, *extra = NULL;
struct target_fpsimd_context *fpsimd = NULL;
struct target_sve_context *sve = NULL;
+ struct target_za_context *za = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
- bool err = false;
- int vq = 0, sve_size = 0;
+ int sve_size = 0;
+ int za_size = 0;
+ int svcr = 0;
target_restore_general_frame(env, sf);
@@ -295,8 +427,7 @@ static int target_restore_sigframe(CPUARMState *env,
switch (magic) {
case 0:
if (size != 0) {
- err = true;
- goto exit;
+ goto err;
}
if (used_extra) {
ctx = NULL;
@@ -308,42 +439,46 @@ static int target_restore_sigframe(CPUARMState *env,
case TARGET_FPSIMD_MAGIC:
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
- err = true;
- goto exit;
+ goto err;
}
fpsimd = (struct target_fpsimd_context *)ctx;
break;
case TARGET_SVE_MAGIC:
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
- vq = (env->vfp.zcr_el[1] & 0xf) + 1;
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
- if (!sve && size == sve_size) {
- sve = (struct target_sve_context *)ctx;
- break;
- }
+ if (sve || size < sizeof(struct target_sve_context)) {
+ goto err;
}
- err = true;
- goto exit;
+ sve = (struct target_sve_context *)ctx;
+ sve_size = size;
+ break;
+
+ case TARGET_ZA_MAGIC:
+ if (za || size < sizeof(struct target_za_context)) {
+ goto err;
+ }
+ za = (struct target_za_context *)ctx;
+ za_size = size;
+ break;
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
- err = true;
- goto exit;
+ goto err;
}
__get_user(extra_datap,
&((struct target_extra_context *)ctx)->datap);
__get_user(extra_size,
&((struct target_extra_context *)ctx)->size);
extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
+ if (!extra) {
+ return 1;
+ }
break;
default:
/* Unknown record -- we certainly didn't generate it.
* Did we in fact get out of sync?
*/
- err = true;
- goto exit;
+ goto err;
}
ctx = (void *)ctx + size;
}
@@ -352,17 +487,26 @@ static int target_restore_sigframe(CPUARMState *env,
if (fpsimd) {
target_restore_fpsimd_record(env, fpsimd);
} else {
- err = true;
+ goto err;
}
/* SVE data, if present, overwrites FPSIMD data. */
- if (sve) {
- target_restore_sve_record(env, sve, vq);
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
+ goto err;
+ }
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
+ goto err;
}
+ if (env->svcr != svcr) {
+ env->svcr = svcr;
+ arm_rebuild_hflags(env);
+ }
+ unlock_user(extra, extra_datap, 0);
+ return 0;
- exit:
+ err:
unlock_user(extra, extra_datap, 0);
- return err;
+ return 1;
}
static abi_ulong get_sigframe(struct target_sigaction *ka,
@@ -424,7 +568,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
.total_size = offsetof(struct target_rt_sigframe,
uc.tuc_mcontext.__reserved),
};
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
+ int sve_size = 0, za_size = 0;
struct target_rt_sigframe *frame;
struct target_rt_frame_record *fr;
abi_ulong frame_addr, return_addr;
@@ -434,11 +579,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
&layout);
/* SVE state needs saving only if it exists. */
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
- vq = (env->vfp.zcr_el[1] & 0xf) + 1;
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
sve_ofs = alloc_sigframe_space(sve_size, &layout);
}
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ /* ZA state needs saving only if it is enabled. */
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
+ } else {
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
+ }
+ za_ofs = alloc_sigframe_space(za_size, &layout);
+ }
if (layout.extra_ofs) {
/* Reserve space for the extra end marker. The standard end marker
@@ -461,9 +615,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
layout.total_size = MAX(layout.total_size,
sizeof(struct target_rt_sigframe));
- /* Reserve space for the return code. On a real system this would
- * be within the VDSO. So, despite the name this is not a "real"
- * record within the frame.
+ /*
+ * Reserve space for the standard frame unwind pair: fp, lr.
+ * Despite the name this is not a "real" record within the frame.
*/
fr_ofs = layout.total_size;
layout.total_size += sizeof(struct target_rt_frame_record);
@@ -485,7 +639,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
target_setup_end_record((void *)frame + layout.extra_end_ofs);
}
if (sve_ofs) {
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
+ }
+ if (za_ofs) {
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
}
/* Set up the stack frame for unwinding. */
@@ -496,15 +653,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
if (ka->sa_flags & TARGET_SA_RESTORER) {
return_addr = ka->sa_restorer;
} else {
- /*
- * mov x8,#__NR_rt_sigreturn; svc #0
- * Since these are instructions they need to be put as little-endian
- * regardless of target default or current CPU endianness.
- */
- __put_user_e(0xd2801168, &fr->tramp[0], le);
- __put_user_e(0xd4000001, &fr->tramp[1], le);
- return_addr = frame_addr + fr_ofs
- + offsetof(struct target_rt_frame_record, tramp);
+ return_addr = default_rt_sigreturn;
}
env->xregs[0] = usig;
env->xregs[29] = frame_addr + fr_ofs;
@@ -517,8 +666,11 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
env->btype = 2;
}
+ /* Invoke the signal handler with both SM and ZA disabled. */
+ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
+
if (info) {
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
}
@@ -565,15 +717,32 @@ long do_rt_sigreturn(CPUARMState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
long do_sigreturn(CPUARMState *env)
{
return do_rt_sigreturn(env);
}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
+ assert(tramp != NULL);
+
+ /*
+ * mov x8,#__NR_rt_sigreturn; svc #0
+ * Since these are instructions they need to be put as little-endian
+ * regardless of target default or current CPU endianness.
+ */
+ __put_user_e(0xd2801168, &tramp[0], le);
+ __put_user_e(0xd4000001, &tramp[1], le);
+
+ default_rt_sigreturn = sigtramp_page;
+ unlock_user(tramp, sigtramp_page, 8);
+}
diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h
index 97a477bd3e..f90359faf2 100644
--- a/linux-user/aarch64/target_cpu.h
+++ b/linux-user/aarch64/target_cpu.h
@@ -34,10 +34,13 @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags)
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
{
- /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
+ /*
+ * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
* different from AArch32 Linux, which uses TPIDRRO.
*/
env->cp15.tpidr_el[0] = newtls;
+ /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */
+ env->cp15.tpidr2_el0 = 0;
}
static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
diff --git a/linux-user/aarch64/target_flat.h b/linux-user/aarch64/target_flat.h
new file mode 100644
index 0000000000..bc83224cea
--- /dev/null
+++ b/linux-user/aarch64/target_flat.h
@@ -0,0 +1 @@
+#include "../generic/target_flat.h"
diff --git a/linux-user/aarch64/target_mman.h b/linux-user/aarch64/target_mman.h
new file mode 100644
index 0000000000..69ec5d5739
--- /dev/null
+++ b/linux-user/aarch64/target_mman.h
@@ -0,0 +1,22 @@
+#ifndef AARCH64_TARGET_MMAN_H
+#define AARCH64_TARGET_MMAN_H
+
+#define TARGET_PROT_BTI 0x10
+#define TARGET_PROT_MTE 0x20
+
+/*
+ * arch/arm64/include/asm/processor.h:
+ *
+ * TASK_UNMAPPED_BASE DEFAULT_MAP_WINDOW / 4
+ * DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64
+ * DEFAULT_MAP_WINDOW_64 UL(1) << VA_BITS_MIN
+ * VA_BITS_MIN 48 (unless explicitly configured smaller)
+ */
+#define TASK_UNMAPPED_BASE (1ull << (48 - 2))
+
+/* arch/arm64/include/asm/elf.h */
+#define ELF_ET_DYN_BASE TARGET_PAGE_ALIGN((1ull << 48) / 3 * 2)
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
new file mode 100644
index 0000000000..aa8e203c15
--- /dev/null
+++ b/linux-user/aarch64/target_prctl.h
@@ -0,0 +1,227 @@
+/*
+ * AArch64 specific prctl functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef AARCH64_TARGET_PRCTL_H
+#define AARCH64_TARGET_PRCTL_H
+
+#include "target/arm/cpu-features.h"
+
+static abi_long do_prctl_sve_get_vl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ /* PSTATE.SM is always unset on syscall entry. */
+ return sve_vq(env) * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_sve_get_vl do_prctl_sve_get_vl
+
+static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
+{
+ /*
+ * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
+ * i.e. VL=8192, even though the current architectural maximum is VQ=16.
+ */
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
+ uint32_t vq, old_vq;
+
+ /* PSTATE.SM is always unset on syscall entry. */
+ old_vq = sve_vq(env);
+
+ /*
+ * Bound the value of arg2, so that we know that it fits into
+ * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to
+ * sort out the length supported by the cpu.
+ */
+ vq = MAX(arg2 / 16, 1);
+ vq = MIN(vq, ARM_MAX_VQ);
+ env->vfp.zcr_el[1] = vq - 1;
+ arm_rebuild_hflags(env);
+
+ vq = sve_vq(env);
+ if (vq < old_vq) {
+ aarch64_sve_narrow_vq(env, vq);
+ }
+ return vq * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_sve_set_vl do_prctl_sve_set_vl
+
+static abi_long do_prctl_sme_get_vl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ if (cpu_isar_feature(aa64_sme, cpu)) {
+ return sme_vq(env) * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_sme_get_vl do_prctl_sme_get_vl
+
+static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
+{
+ /*
+ * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
+ * i.e. VL=8192, even though the architectural maximum is VQ=16.
+ */
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
+ int vq, old_vq;
+
+ old_vq = sme_vq(env);
+
+ /*
+ * Bound the value of vq, so that we know that it fits into
+ * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
+ * on syscall entry, we are not modifying the current SVE
+ * vector length.
+ */
+ vq = MAX(arg2 / 16, 1);
+ vq = MIN(vq, 16);
+ env->vfp.smcr_el[1] =
+ FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
+
+ /* Delay rebuilding hflags until we know if ZA must change. */
+ vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
+
+ if (vq != old_vq) {
+ /*
+ * PSTATE.ZA state is cleared on any change to SVL.
+ * We need not call arm_rebuild_hflags because PSTATE.SM was
+ * cleared on syscall entry, so this hasn't changed VL.
+ */
+ env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
+ arm_rebuild_hflags(env);
+ }
+ return vq * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_sme_set_vl do_prctl_sme_set_vl
+
+static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
+ int ret = 0;
+ Error *err = NULL;
+
+ if (arg2 == 0) {
+ arg2 = all;
+ } else if (arg2 & ~all) {
+ return -TARGET_EINVAL;
+ }
+ if (arg2 & PR_PAC_APIAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apia,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APIBKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apib,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APDAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apda,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APDBKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apdb,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APGAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apga,
+ sizeof(ARMPACKey), &err);
+ }
+ if (ret != 0) {
+ /*
+ * Some unknown failure in the crypto. The best
+ * we can do is log it and fail the syscall.
+ * The real syscall cannot fail this way.
+ */
+ qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -TARGET_EIO;
+ }
+ return 0;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_reset_keys do_prctl_reset_keys
+
+static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
+{
+ abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= PR_MTE_TCF_MASK;
+ valid_mask |= PR_MTE_TAG_MASK;
+ }
+
+ if (arg2 & ~valid_mask) {
+ return -TARGET_EINVAL;
+ }
+ env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
+
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ /*
+ * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
+ *
+ * The kernel has a per-cpu configuration for the sysadmin,
+ * /sys/devices/system/cpu/cpu<N>/mte_tcf_preferred,
+ * which qemu does not implement.
+ *
+ * Because there is no performance difference between the modes, and
+ * because SYNC is most useful for debugging MTE errors, choose SYNC
+ * as the preferred mode. With this preference, and the way the API
+ * uses only two bits, there is no way for the program to select
+ * ASYMM mode.
+ */
+ unsigned tcf = 0;
+ if (arg2 & PR_MTE_TCF_SYNC) {
+ tcf = 1;
+ } else if (arg2 & PR_MTE_TCF_ASYNC) {
+ tcf = 2;
+ }
+ env->cp15.sctlr_el[1] = deposit64(env->cp15.sctlr_el[1], 38, 2, tcf);
+
+ /*
+ * Write PR_MTE_TAG to GCR_EL1[Exclude].
+ * Note that the syscall uses an include mask,
+ * and hardware uses an exclude mask -- invert.
+ */
+ env->cp15.gcr_el1 =
+ deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
+ arm_rebuild_hflags(env);
+ }
+ return 0;
+}
+#define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
+
+static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ abi_long ret = 0;
+
+ if (env->tagged_addr_enable) {
+ ret |= PR_TAGGED_ADDR_ENABLE;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ /* See do_prctl_set_tagged_addr_ctrl. */
+ ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
+ ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
+ }
+ return ret;
+}
+#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
+
+#endif /* AARCH64_TARGET_PRCTL_H */
diff --git a/linux-user/aarch64/target_proc.h b/linux-user/aarch64/target_proc.h
new file mode 100644
index 0000000000..907df4dcd2
--- /dev/null
+++ b/linux-user/aarch64/target_proc.h
@@ -0,0 +1 @@
+#include "../arm/target_proc.h"
diff --git a/linux-user/aarch64/target_resource.h b/linux-user/aarch64/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/aarch64/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
index 18013e1b23..40e399d990 100644
--- a/linux-user/aarch64/target_signal.h
+++ b/linux-user/aarch64/target_signal.h
@@ -1,28 +1,12 @@
#ifndef AARCH64_TARGET_SIGNAL_H
#define AARCH64_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* AARCH64_TARGET_SIGNAL_H */
diff --git a/linux-user/aarch64/target_structs.h b/linux-user/aarch64/target_structs.h
index 7c748344ca..3a06f373c3 100644
--- a/linux-user/aarch64/target_structs.h
+++ b/linux-user/aarch64/target_structs.h
@@ -1,58 +1 @@
-/*
- * ARM AArch64 specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef AARCH64_TARGET_STRUCTS_H
-#define AARCH64_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
index 76f6c3391d..c055133725 100644
--- a/linux-user/aarch64/target_syscall.h
+++ b/linux-user/aarch64/target_syscall.h
@@ -8,39 +8,15 @@ struct target_pt_regs {
uint64_t pstate;
};
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
#define UNAME_MACHINE "aarch64_be"
#else
#define UNAME_MACHINE "aarch64"
#endif
#define UNAME_MINIMUM_RELEASE "3.8.0"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
-#define TARGET_PR_SVE_SET_VL 50
-#define TARGET_PR_SVE_GET_VL 51
-
-#define TARGET_PR_PAC_RESET_KEYS 54
-# define TARGET_PR_PAC_APIAKEY (1 << 0)
-# define TARGET_PR_PAC_APIBKEY (1 << 1)
-# define TARGET_PR_PAC_APDAKEY (1 << 2)
-# define TARGET_PR_PAC_APDBKEY (1 << 3)
-# define TARGET_PR_PAC_APGAKEY (1 << 4)
-
-#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
-#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
-# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
-/* MTE tag check fault modes */
-# define TARGET_PR_MTE_TCF_SHIFT 1
-# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT)
-/* MTE tag inclusion mask */
-# define TARGET_PR_MTE_TAG_SHIFT 3
-# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT)
-
#endif /* AARCH64_TARGET_SYSCALL_H */
diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so
new file mode 100755
index 0000000000..808206ade8
--- /dev/null
+++ b/linux-user/aarch64/vdso-be.so
Binary files differ
diff --git a/linux-user/aarch64/vdso-le.so b/linux-user/aarch64/vdso-le.so
new file mode 100755
index 0000000000..941aaf2993
--- /dev/null
+++ b/linux-user/aarch64/vdso-le.so
Binary files differ
diff --git a/linux-user/aarch64/vdso.S b/linux-user/aarch64/vdso.S
new file mode 100644
index 0000000000..a0ac1487b0
--- /dev/null
+++ b/linux-user/aarch64/vdso.S
@@ -0,0 +1,75 @@
+/*
+ * aarch64 linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+
+/* ??? These are in include/elf.h, which is not ready for inclusion in asm. */
+#define NT_GNU_PROPERTY_TYPE_0 5
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
+
+#define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \
+ (GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC)
+
+ .section .note.gnu.property
+ .align 3
+ .long 2f - 1f
+ .long 6f - 3f
+ .long NT_GNU_PROPERTY_TYPE_0
+1: .string "GNU"
+2: .align 3
+3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
+ .long 5f - 4f
+4: .long GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
+5: .align 3
+6:
+
+ .text
+
+.macro endf name
+ .globl \name
+ .type \name, @function
+ .size \name, . - \name
+.endm
+
+.macro vdso_syscall name, nr
+\name:
+ bti c
+ mov x8, #\nr
+ svc #0
+ ret
+endf \name
+.endm
+
+ .cfi_startproc
+
+vdso_syscall __kernel_gettimeofday, __NR_gettimeofday
+vdso_syscall __kernel_clock_gettime, __NR_clock_gettime
+vdso_syscall __kernel_clock_getres, __NR_clock_getres
+
+ .cfi_endproc
+
+
+/*
+ * TODO: The kernel makes a big deal of turning off the .cfi directives,
+ * because they cause libgcc to crash, but that's because they're wrong.
+ *
+ * For now, elide the unwind info for __kernel_rt_sigreturn and rely on
+ * the libgcc fallback routine as we have always done. This requires
+ * that the code sequence used be exact.
+ *
+ * Add a nop as a spacer to ensure that unwind does not pick up the
+ * unwind info from the preceding syscall.
+ */
+ nop
+__kernel_rt_sigreturn:
+ /* No BTI C insn here -- we arrive via RET. */
+ mov x8, #__NR_rt_sigreturn
+ svc #0
+endf __kernel_rt_sigreturn
diff --git a/linux-user/aarch64/vdso.ld b/linux-user/aarch64/vdso.ld
new file mode 100644
index 0000000000..4c12f33352
--- /dev/null
+++ b/linux-user/aarch64/vdso.ld
@@ -0,0 +1,72 @@
+/*
+ * Linker script for linux aarch64 replacement vdso.
+ *
+ * Copyright 2021 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_2.6.39 {
+ global:
+ __kernel_rt_sigreturn;
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS;
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ /*
+ * We can't prelink to any address without knowing something about
+ * the virtual memory space of the host, since that leaks over into
+ * the available memory space of the guest.
+ */
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ .data : {
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load =0xd503201f
+}
diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c
index 1b00a81385..2ea039aa71 100644
--- a/linux-user/alpha/cpu_loop.c
+++ b/linux-user/alpha/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -27,8 +26,7 @@
void cpu_loop(CPUAlphaState *env)
{
CPUState *cs = env_cpu(env);
- int trapnr;
- target_siginfo_t info;
+ int trapnr, si_code;
abi_long sysret;
while (1) {
@@ -54,35 +52,12 @@ void cpu_loop(CPUAlphaState *env)
fprintf(stderr, "External interrupt. Exit\n");
exit(EXIT_FAILURE);
break;
- case EXCP_MMFAULT:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
- ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
- info._sifields._sigfault._addr = env->trap_arg0;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case EXCP_UNALIGN:
- info.si_signo = TARGET_SIGBUS;
- info.si_errno = 0;
- info.si_code = TARGET_BUS_ADRALN;
- info._sifields._sigfault._addr = env->trap_arg0;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case EXCP_OPCDEC:
do_sigill:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPC;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc);
break;
case EXCP_ARITH:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_FLTINV;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_FLTINV, env->pc);
break;
case EXCP_FEN:
/* No-op. Linux simply re-enables the FPU. */
@@ -91,20 +66,10 @@ void cpu_loop(CPUAlphaState *env)
switch (env->error_code) {
case 0x80:
/* BPT */
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
+ goto do_sigtrap_brkpt;
case 0x81:
/* BUGCHK */
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = 0;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
+ goto do_sigtrap_unk;
case 0x83:
/* CALLSYS */
trapnr = env->ir[IR_V0];
@@ -113,11 +78,11 @@ void cpu_loop(CPUAlphaState *env)
env->ir[IR_A2], env->ir[IR_A3],
env->ir[IR_A4], env->ir[IR_A5],
0, 0);
- if (sysret == -TARGET_ERESTARTSYS) {
+ if (sysret == -QEMU_ERESTARTSYS) {
env->pc -= 4;
break;
}
- if (sysret == -TARGET_QEMU_ESIGRETURN) {
+ if (sysret == -QEMU_ESIGRETURN) {
break;
}
/* Syscall writes 0 to V0 to bypass error check, similar
@@ -145,47 +110,43 @@ void cpu_loop(CPUAlphaState *env)
abort();
case 0xAA:
/* GENTRAP */
- info.si_signo = TARGET_SIGFPE;
switch (env->ir[IR_A0]) {
case TARGET_GEN_INTOVF:
- info.si_code = TARGET_FPE_INTOVF;
+ si_code = TARGET_FPE_INTOVF;
break;
case TARGET_GEN_INTDIV:
- info.si_code = TARGET_FPE_INTDIV;
+ si_code = TARGET_FPE_INTDIV;
break;
case TARGET_GEN_FLTOVF:
- info.si_code = TARGET_FPE_FLTOVF;
+ si_code = TARGET_FPE_FLTOVF;
break;
case TARGET_GEN_FLTUND:
- info.si_code = TARGET_FPE_FLTUND;
+ si_code = TARGET_FPE_FLTUND;
break;
case TARGET_GEN_FLTINV:
- info.si_code = TARGET_FPE_FLTINV;
+ si_code = TARGET_FPE_FLTINV;
break;
case TARGET_GEN_FLTINE:
- info.si_code = TARGET_FPE_FLTRES;
+ si_code = TARGET_FPE_FLTRES;
break;
case TARGET_GEN_ROPRAND:
- info.si_code = 0;
+ si_code = TARGET_FPE_FLTUNK;
break;
default:
- info.si_signo = TARGET_SIGTRAP;
- info.si_code = 0;
- break;
+ goto do_sigtrap_unk;
}
- info.si_errno = 0;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGFPE, si_code, env->pc);
break;
default:
goto do_sigill;
}
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ do_sigtrap_brkpt:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
+ break;
+ do_sigtrap_unk:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_UNK, env->pc);
break;
case EXCP_INTERRUPT:
/* Just indicate that signals should be handled asap. */
diff --git a/linux-user/alpha/signal.c b/linux-user/alpha/signal.c
index 3a820f616b..896c2c148a 100644
--- a/linux-user/alpha/signal.c
+++ b/linux-user/alpha/signal.c
@@ -55,13 +55,11 @@ struct target_ucontext {
struct target_sigframe {
struct target_sigcontext sc;
- unsigned int retcode[3];
};
struct target_rt_sigframe {
target_siginfo_t info;
struct target_ucontext uc;
- unsigned int retcode[3];
};
#define INSN_MOV_R30_R16 0x47fe0410
@@ -142,12 +140,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
if (ka->ka_restorer) {
r26 = ka->ka_restorer;
} else {
- __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
- __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
- &frame->retcode[1]);
- __put_user(INSN_CALLSYS, &frame->retcode[2]);
- /* imb() */
- r26 = frame_addr + offsetof(struct target_sigframe, retcode);
+ r26 = default_sigreturn;
}
unlock_user_struct(frame, frame_addr, 1);
@@ -180,7 +173,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
goto give_sigsegv;
}
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
@@ -196,12 +189,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
if (ka->ka_restorer) {
r26 = ka->ka_restorer;
} else {
- __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
- __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
- &frame->retcode[1]);
- __put_user(INSN_CALLSYS, &frame->retcode[2]);
- /* imb(); */
- r26 = frame_addr + offsetof(struct target_rt_sigframe, retcode);
+ r26 = default_rt_sigreturn;
}
if (err) {
@@ -237,11 +225,11 @@ long do_sigreturn(CPUAlphaState *env)
restore_sigcontext(env, sc);
unlock_user_struct(sc, sc_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUAlphaState *env)
@@ -261,11 +249,29 @@ long do_rt_sigreturn(CPUAlphaState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6 * 4, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ __put_user(INSN_MOV_R30_R16, &tramp[0]);
+ __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, &tramp[1]);
+ __put_user(INSN_CALLSYS, &tramp[2]);
+
+ default_rt_sigreturn = sigtramp_page + 3 * 4;
+ __put_user(INSN_MOV_R30_R16, &tramp[3]);
+ __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &tramp[4]);
+ __put_user(INSN_CALLSYS, &tramp[5]);
+
+ unlock_user(tramp, sigtramp_page, 6 * 4);
}
diff --git a/linux-user/alpha/target_elf.h b/linux-user/alpha/target_elf.h
index 344e9f4d39..b77d638f6d 100644
--- a/linux-user/alpha/target_elf.h
+++ b/linux-user/alpha/target_elf.h
@@ -9,6 +9,6 @@
#define ALPHA_TARGET_ELF_H
static inline const char *cpu_get_model(uint32_t eflags)
{
- return "any";
+ return "ev67";
}
#endif
diff --git a/linux-user/alpha/target_mman.h b/linux-user/alpha/target_mman.h
new file mode 100644
index 0000000000..8edfe2b88c
--- /dev/null
+++ b/linux-user/alpha/target_mman.h
@@ -0,0 +1,36 @@
+#ifndef ALPHA_TARGET_MMAN_H
+#define ALPHA_TARGET_MMAN_H
+
+#define TARGET_MAP_ANONYMOUS 0x10
+#define TARGET_MAP_FIXED 0x100
+#define TARGET_MAP_GROWSDOWN 0x01000
+#define TARGET_MAP_DENYWRITE 0x02000
+#define TARGET_MAP_EXECUTABLE 0x04000
+#define TARGET_MAP_LOCKED 0x08000
+#define TARGET_MAP_NORESERVE 0x10000
+#define TARGET_MAP_POPULATE 0x20000
+#define TARGET_MAP_NONBLOCK 0x40000
+#define TARGET_MAP_STACK 0x80000
+#define TARGET_MAP_HUGETLB 0x100000
+#define TARGET_MAP_FIXED_NOREPLACE 0x200000
+
+#define TARGET_MADV_DONTNEED 6
+
+#define TARGET_MS_ASYNC 1
+#define TARGET_MS_SYNC 2
+#define TARGET_MS_INVALIDATE 4
+
+/*
+ * arch/alpha/include/asm/processor.h:
+ *
+ * TASK_UNMAPPED_BASE TASK_SIZE / 2
+ * TASK_SIZE 0x40000000000UL
+ */
+#define TASK_UNMAPPED_BASE 0x20000000000ull
+
+/* arch/alpha/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/alpha/target_prctl.h b/linux-user/alpha/target_prctl.h
new file mode 100644
index 0000000000..5629ddbf39
--- /dev/null
+++ b/linux-user/alpha/target_prctl.h
@@ -0,0 +1 @@
+#include "../generic/target_prctl_unalign.h"
diff --git a/linux-user/alpha/target_proc.h b/linux-user/alpha/target_proc.h
new file mode 100644
index 0000000000..dac37dffc9
--- /dev/null
+++ b/linux-user/alpha/target_proc.h
@@ -0,0 +1,67 @@
+/*
+ * Alpha specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ALPHA_TARGET_PROC_H
+#define ALPHA_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int max_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ unsigned long cpu_mask;
+ char model[32];
+ const char *p, *q;
+ int t;
+
+ p = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(env_cpu(cpu_env))));
+ q = strchr(p, '-');
+ t = q - p;
+ assert(t < sizeof(model));
+ memcpy(model, p, t);
+ model[t] = 0;
+
+ t = sched_getaffinity(getpid(), sizeof(cpu_mask), (cpu_set_t *)&cpu_mask);
+ if (t < 0) {
+ if (num_cpus >= sizeof(cpu_mask) * 8) {
+ cpu_mask = -1;
+ } else {
+ cpu_mask = (1UL << num_cpus) - 1;
+ }
+ }
+
+ dprintf(fd,
+ "cpu\t\t\t: Alpha\n"
+ "cpu model\t\t: %s\n"
+ "cpu variation\t\t: 0\n"
+ "cpu revision\t\t: 0\n"
+ "cpu serial number\t: JA00000000\n"
+ "system type\t\t: QEMU\n"
+ "system variation\t: QEMU_v" QEMU_VERSION "\n"
+ "system revision\t\t: 0\n"
+ "system serial number\t: AY00000000\n"
+ "cycle frequency [Hz]\t: 250000000\n"
+ "timer frequency [Hz]\t: 250.00\n"
+ "page size [bytes]\t: %d\n"
+ "phys. address bits\t: %d\n"
+ "max. addr. space #\t: 255\n"
+ "BogoMIPS\t\t: 2500.00\n"
+ "kernel unaligned acc\t: 0 (pc=0,va=0)\n"
+ "user unaligned acc\t: 0 (pc=0,va=0)\n"
+ "platform string\t\t: AlphaServer QEMU user-mode VM\n"
+ "cpus detected\t\t: %d\n"
+ "cpus active\t\t: %d\n"
+ "cpu active mask\t\t: %016lx\n"
+ "L1 Icache\t\t: n/a\n"
+ "L1 Dcache\t\t: n/a\n"
+ "L2 cache\t\t: n/a\n"
+ "L3 cache\t\t: n/a\n",
+ model, TARGET_PAGE_SIZE, TARGET_PHYS_ADDR_SPACE_BITS,
+ max_cpus, num_cpus, cpu_mask);
+
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* ALPHA_TARGET_PROC_H */
diff --git a/linux-user/alpha/target_resource.h b/linux-user/alpha/target_resource.h
new file mode 100644
index 0000000000..c9b082faee
--- /dev/null
+++ b/linux-user/alpha/target_resource.h
@@ -0,0 +1,21 @@
+#ifndef ALPHA_TARGET_RESOURCE_H
+#define ALPHA_TARGET_RESOURCE_H
+
+#include "../generic/target_resource.h"
+
+#undef TARGET_RLIM_INFINITY
+#define TARGET_RLIM_INFINITY 0x7fffffffffffffffull
+
+#undef TARGET_RLIMIT_NOFILE
+#define TARGET_RLIMIT_NOFILE 6
+
+#undef TARGET_RLIMIT_AS
+#define TARGET_RLIMIT_AS 7
+
+#undef TARGET_RLIMIT_NPROC
+#define TARGET_RLIMIT_NPROC 8
+
+#undef TARGET_RLIMIT_MEMLOCK
+#define TARGET_RLIMIT_MEMLOCK 9
+
+#endif
diff --git a/linux-user/alpha/target_signal.h b/linux-user/alpha/target_signal.h
index 250642913e..bbb06e5463 100644
--- a/linux-user/alpha/target_signal.h
+++ b/linux-user/alpha/target_signal.h
@@ -62,7 +62,6 @@ typedef struct target_sigaltstack {
#define TARGET_SA_SIGINFO 0x00000040
#define TARGET_MINSIGSTKSZ 4096
-#define TARGET_SIGSTKSZ 16384
/* From <asm/gentrap.h>. */
#define TARGET_GEN_INTOVF -1 /* integer overflow */
@@ -93,6 +92,7 @@ typedef struct target_sigaltstack {
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_KA_RESTORER
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
diff --git a/linux-user/alpha/target_syscall.h b/linux-user/alpha/target_syscall.h
index 03091bf0a8..fda3a49f29 100644
--- a/linux-user/alpha/target_syscall.h
+++ b/linux-user/alpha/target_syscall.h
@@ -63,7 +63,6 @@ struct target_pt_regs {
#define TARGET_UAC_NOPRINT 1
#define TARGET_UAC_NOFIX 2
#define TARGET_UAC_SIGBUS 4
-#define TARGET_MINSIGSTKSZ 4096
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
diff --git a/linux-user/arm/Makefile.vdso b/linux-user/arm/Makefile.vdso
new file mode 100644
index 0000000000..2d098a5748
--- /dev/null
+++ b/linux-user/arm/Makefile.vdso
@@ -0,0 +1,17 @@
+include $(BUILD_DIR)/tests/tcg/arm-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/arm
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so
+
+# Adding -use-blx disables unneeded interworking without actually using blx.
+LDFLAGS = -nostdlib -shared -Wl,-use-blx \
+ -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \
+ -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld
+
+$(SUBDIR)/vdso-be.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS) -mbig-endian $<
+
+$(SUBDIR)/vdso-le.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS) -mlittle-endian $<
diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c
index ae09adcb95..db1a41e27f 100644
--- a/linux-user/arm/cpu_loop.c
+++ b/linux-user/arm/cpu_loop.c
@@ -18,13 +18,13 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "elf.h"
#include "cpu_loop-common.h"
#include "signal-common.h"
#include "semihosting/common-semi.h"
+#include "target/arm/syndrome.h"
#define get_user_code_u32(x, gaddr, env) \
({ abi_long __r = get_user_u32((x), (gaddr)); \
@@ -74,10 +74,71 @@
put_user_u16(__x, (gaddr)); \
})
-/* Commpage handling -- there is no commpage for AArch64 */
+/*
+ * Similar to code in accel/tcg/user-exec.c, but outside the execution loop.
+ * Must be called with mmap_lock.
+ * We get the PC of the entry address - which is as good as anything,
+ * on a real kernel what you get depends on which mode it uses.
+ */
+static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size)
+{
+ int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID;
+ int page_flags;
+
+ /* Enforce guest required alignment. */
+ if (unlikely(addr & (size - 1))) {
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
+ return NULL;
+ }
+
+ page_flags = page_get_flags(addr);
+ if (unlikely((page_flags & need_flags) != need_flags)) {
+ force_sig_fault(TARGET_SIGSEGV,
+ page_flags & PAGE_VALID ?
+ TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
+ return NULL;
+ }
+
+ return g2h(env_cpu(env), addr);
+}
+
+/*
+ * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
+ * Input:
+ * r0 = oldval
+ * r1 = newval
+ * r2 = pointer to target value
+ *
+ * Output:
+ * r0 = 0 if *ptr was changed, non-0 if no exchange happened
+ * C set if *ptr was changed, clear if no exchange happened
+ */
+static void arm_kernel_cmpxchg32_helper(CPUARMState *env)
+{
+ uint32_t oldval, newval, val, addr, cpsr, *host_addr;
+
+ /* Swap if host != guest endianness, for the host cmpxchg below */
+ oldval = tswap32(env->regs[0]);
+ newval = tswap32(env->regs[1]);
+ addr = env->regs[2];
+
+ mmap_lock();
+ host_addr = atomic_mmu_lookup(env, addr, 4);
+ if (!host_addr) {
+ mmap_unlock();
+ return;
+ }
+
+ val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
+ mmap_unlock();
+
+ cpsr = (val == oldval) * CPSR_C;
+ cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
+ env->regs[0] = cpsr ? 0 : -1;
+}
/*
- * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
+ * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
* Input:
* r0 = pointer to oldval
* r1 = pointer to newval
@@ -94,57 +155,58 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
{
uint64_t oldval, newval, val;
uint32_t addr, cpsr;
+ uint64_t *host_addr;
- /* Based on the 32 bit code in do_kernel_trap */
-
- /* XXX: This only works between threads, not between processes.
- It's probably possible to implement this with native host
- operations. However things like ldrex/strex are much harder so
- there's not much point trying. */
- start_exclusive();
- cpsr = cpsr_read(env);
- addr = env->regs[2];
-
- if (get_user_u64(oldval, env->regs[0])) {
- env->exception.vaddress = env->regs[0];
- goto segv;
- };
-
- if (get_user_u64(newval, env->regs[1])) {
- env->exception.vaddress = env->regs[1];
+ addr = env->regs[0];
+ if (get_user_u64(oldval, addr)) {
goto segv;
- };
+ }
- if (get_user_u64(val, addr)) {
- env->exception.vaddress = addr;
+ addr = env->regs[1];
+ if (get_user_u64(newval, addr)) {
goto segv;
}
- if (val == oldval) {
- val = newval;
+ mmap_lock();
+ addr = env->regs[2];
+ host_addr = atomic_mmu_lookup(env, addr, 8);
+ if (!host_addr) {
+ mmap_unlock();
+ return;
+ }
- if (put_user_u64(val, addr)) {
- env->exception.vaddress = addr;
- goto segv;
- };
+ /* Swap if host != guest endianness, for the host cmpxchg below */
+ oldval = tswap64(oldval);
+ newval = tswap64(newval);
- env->regs[0] = 0;
- cpsr |= CPSR_C;
+#ifdef CONFIG_ATOMIC64
+ val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
+ cpsr = (val == oldval) * CPSR_C;
+#else
+ /*
+ * This only works between threads, not between processes, but since
+ * the host has no 64-bit cmpxchg, it is the best that we can do.
+ */
+ start_exclusive();
+ val = *host_addr;
+ if (val == oldval) {
+ *host_addr = newval;
+ cpsr = CPSR_C;
} else {
- env->regs[0] = -1;
- cpsr &= ~CPSR_C;
+ cpsr = 0;
}
- cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
end_exclusive();
+#endif
+ mmap_unlock();
+
+ cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
+ env->regs[0] = cpsr ? 0 : -1;
return;
-segv:
- end_exclusive();
- /* We get the PC of the entry address - which is as good as anything,
- on a real kernel what you get depends on which mode it uses. */
- /* XXX: check env->error_code */
- force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
- env->exception.vaddress);
+ segv:
+ force_sig_fault(TARGET_SIGSEGV,
+ page_get_flags(addr) & PAGE_VALID ?
+ TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
}
/* Handle a jump to the kernel code page. */
@@ -152,36 +214,13 @@ static int
do_kernel_trap(CPUARMState *env)
{
uint32_t addr;
- uint32_t cpsr;
- uint32_t val;
switch (env->regs[15]) {
case 0xffff0fa0: /* __kernel_memory_barrier */
- /* ??? No-op. Will need to do better for SMP. */
+ smp_mb();
break;
case 0xffff0fc0: /* __kernel_cmpxchg */
- /* XXX: This only works between threads, not between processes.
- It's probably possible to implement this with native host
- operations. However things like ldrex/strex are much harder so
- there's not much point trying. */
- start_exclusive();
- cpsr = cpsr_read(env);
- addr = env->regs[2];
- /* FIXME: This should SEGV if the access fails. */
- if (get_user_u32(val, addr))
- val = ~env->regs[0];
- if (val == env->regs[0]) {
- val = env->regs[1];
- /* FIXME: Check for segfaults. */
- put_user_u32(val, addr);
- env->regs[0] = 0;
- cpsr |= CPSR_C;
- } else {
- env->regs[0] = -1;
- cpsr &= ~CPSR_C;
- }
- cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
- end_exclusive();
+ arm_kernel_cmpxchg32_helper(env);
break;
case 0xffff0fe0: /* __kernel_get_tls */
env->regs[0] = cpu_get_tls(env);
@@ -196,7 +235,7 @@ do_kernel_trap(CPUARMState *env)
/* Jump back to the caller. */
addr = env->regs[14];
if (addr & 1) {
- env->thumb = 1;
+ env->thumb = true;
addr &= ~1;
}
env->regs[15] = addr;
@@ -224,7 +263,7 @@ static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
{
- TaskState *ts = env_cpu(env)->opaque;
+ TaskState *ts = get_task_state(env_cpu(env));
int rc = EmulateAll(opcode, &ts->fpa, env);
int raise, enabled;
@@ -280,7 +319,7 @@ static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
void cpu_loop(CPUARMState *env)
{
CPUState *cs = env_cpu(env);
- int trapnr;
+ int trapnr, si_signo, si_code;
unsigned int n, insn;
abi_ulong ret;
@@ -322,7 +361,7 @@ void cpu_loop(CPUARMState *env)
break;
case EXCP_SWI:
{
- env->eabi = 1;
+ env->eabi = true;
/* system call */
if (env->thumb) {
/* Thumb is always EABI style with syscall number in r7 */
@@ -348,7 +387,7 @@ void cpu_loop(CPUARMState *env)
* > 0xfffff and are handled below as out-of-range.
*/
n ^= ARM_SYSCALL_BASE;
- env->eabi = 0;
+ env->eabi = false;
}
}
@@ -406,16 +445,16 @@ void cpu_loop(CPUARMState *env)
env->regs[4],
env->regs[5],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->regs[15] -= env->thumb ? 2 : 4;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->regs[0] = ret;
}
}
}
break;
case EXCP_SEMIHOST:
- env->regs[0] = do_common_semihosting(cs);
+ do_common_semihosting(cs);
env->regs[15] += env->thumb ? 2 : 4;
break;
case EXCP_INTERRUPT:
@@ -423,9 +462,30 @@ void cpu_loop(CPUARMState *env)
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- /* XXX: check env->error_code */
- force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
- env->exception.vaddress);
+ /* For user-only we don't set TTBCR_EAE, so look at the FSR. */
+ switch (env->exception.fsr & 0x1f) {
+ case 0x1: /* Alignment */
+ si_signo = TARGET_SIGBUS;
+ si_code = TARGET_BUS_ADRALN;
+ break;
+ case 0x3: /* Access flag fault, level 1 */
+ case 0x6: /* Access flag fault, level 2 */
+ case 0x9: /* Domain fault, level 1 */
+ case 0xb: /* Domain fault, level 2 */
+ case 0xd: /* Permission fault, level 1 */
+ case 0xf: /* Permission fault, level 2 */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_ACCERR;
+ break;
+ case 0x5: /* Translation fault, level 1 */
+ case 0x7: /* Translation fault, level 2 */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_MAPERR;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ force_sig_fault(si_signo, si_code, env->exception.vaddress);
break;
case EXCP_DEBUG:
case EXCP_BKPT:
@@ -454,7 +514,7 @@ void cpu_loop(CPUARMState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
int i;
@@ -463,7 +523,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
for(i = 0; i < 16; i++) {
env->regs[i] = regs->uregs[i];
}
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
/* Enable BE8. */
if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
&& (info->elf_flags & EF_ARM_BE8)) {
diff --git a/linux-user/arm/meson.build b/linux-user/arm/meson.build
index 5a93c925cf..c4bb9af5b8 100644
--- a/linux-user/arm/meson.build
+++ b/linux-user/arm/meson.build
@@ -5,3 +5,15 @@ syscall_nr_generators += {
arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
output: '@BASENAME@_nr.h')
}
+
+# TARGET_BIG_ENDIAN is defined to 'n' for little-endian; which means it
+# is always true as far as source_set.apply() is concerned. Always build
+# both header files and include the right one via #if.
+
+vdso_be_inc = gen_vdso.process('vdso-be.so',
+ extra_args: ['-s', 'sigreturn_codes'])
+
+vdso_le_inc = gen_vdso.process('vdso-le.so',
+ extra_args: ['-s', 'sigreturn_codes'])
+
+linux_user_ss.add(when: 'TARGET_ARM', if_true: [vdso_be_inc, vdso_le_inc])
diff --git a/linux-user/arm/nwfpe/double_cpdo.c b/linux-user/arm/nwfpe/double_cpdo.c
index 1cef380852..d45ece2e2f 100644
--- a/linux-user/arm/nwfpe/double_cpdo.c
+++ b/linux-user/arm/nwfpe/double_cpdo.c
@@ -150,7 +150,7 @@ unsigned int DoubleCPDO(const unsigned int opcode)
case MNF_CODE:
{
unsigned int *p = (unsigned int*)&rFm;
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
p[0] ^= 0x80000000;
#else
p[1] ^= 0x80000000;
@@ -162,7 +162,7 @@ unsigned int DoubleCPDO(const unsigned int opcode)
case ABS_CODE:
{
unsigned int *p = (unsigned int*)&rFm;
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
p[0] &= 0x7fffffff;
#else
p[1] &= 0x7fffffff;
diff --git a/linux-user/arm/nwfpe/fpa11_cpdt.c b/linux-user/arm/nwfpe/fpa11_cpdt.c
index c32b0c2faa..fee525937c 100644
--- a/linux-user/arm/nwfpe/fpa11_cpdt.c
+++ b/linux-user/arm/nwfpe/fpa11_cpdt.c
@@ -44,7 +44,7 @@ void loadDouble(const unsigned int Fn, target_ulong addr)
unsigned int *p;
p = (unsigned int*)&fpa11->fpreg[Fn].fDouble;
fpa11->fType[Fn] = typeDouble;
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
/* FIXME - handle failure of get_user() */
get_user_u32(p[0], addr); /* sign & exponent */
get_user_u32(p[1], addr + 4);
@@ -147,7 +147,7 @@ void storeDouble(const unsigned int Fn, target_ulong addr)
default: val = fpa11->fpreg[Fn].fDouble;
}
/* FIXME - handle put_user() failures */
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
put_user_u32(p[0], addr); /* msw */
put_user_u32(p[1], addr + 4); /* lsw */
#else
diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c
index ed144f9455..8db1c4b233 100644
--- a/linux-user/arm/signal.c
+++ b/linux-user/arm/signal.c
@@ -21,6 +21,8 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
+#include "target/arm/cpu-features.h"
+#include "vdso-asmoffset.h"
struct target_sigcontext {
abi_ulong trap_no;
@@ -46,15 +48,7 @@ struct target_sigcontext {
abi_ulong fault_address;
};
-struct target_ucontext_v1 {
- abi_ulong tuc_flags;
- abi_ulong tuc_link;
- target_stack_t tuc_stack;
- struct target_sigcontext tuc_mcontext;
- target_sigset_t tuc_sigmask; /* mask last for extensibility */
-};
-
-struct target_ucontext_v2 {
+struct target_ucontext {
abi_ulong tuc_flags;
abi_ulong tuc_link;
target_stack_t tuc_stack;
@@ -98,68 +92,35 @@ struct target_iwmmxt_sigframe {
#define TARGET_VFP_MAGIC 0x56465001
#define TARGET_IWMMXT_MAGIC 0x12ef842a
-struct sigframe_v1
-{
- struct target_sigcontext sc;
- abi_ulong extramask[TARGET_NSIG_WORDS-1];
- abi_ulong retcode[4];
-};
-
-struct sigframe_v2
-{
- struct target_ucontext_v2 uc;
- abi_ulong retcode[4];
-};
-
-struct rt_sigframe_v1
+struct sigframe
{
- abi_ulong pinfo;
- abi_ulong puc;
- struct target_siginfo info;
- struct target_ucontext_v1 uc;
+ struct target_ucontext uc;
abi_ulong retcode[4];
};
-struct rt_sigframe_v2
+struct rt_sigframe
{
struct target_siginfo info;
- struct target_ucontext_v2 uc;
- abi_ulong retcode[4];
+ struct sigframe sig;
};
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
+QEMU_BUILD_BUG_ON(offsetof(struct sigframe, retcode[3])
+ != SIGFRAME_RC3_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(struct rt_sigframe, sig.retcode[3])
+ != RT_SIGFRAME_RC3_OFFSET);
-/*
- * For Thumb syscalls, we pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
-#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
-
-static const abi_ulong retcodes[4] = {
- SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
- SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
-};
+static abi_ptr sigreturn_fdpic_tramp;
/*
- * Stub needed to make sure the FD register (r9) contains the right
- * value.
+ * Up to 3 words of 'retcode' in the sigframe are code,
+ * with retcode[3] being used by fdpic for the function descriptor.
+ * This code is not actually executed, but is retained for ABI compat.
+ *
+ * We will create a table of 8 retcode variants in the sigtramp page.
+ * Let each table entry use 3 words.
*/
-static const unsigned long sigreturn_fdpic_codes[3] = {
- 0xe59fc004, /* ldr r12, [pc, #4] to read function descriptor */
- 0xe59c9004, /* ldr r9, [r12, #4] to setup GOT */
- 0xe59cf000 /* ldr pc, [r12] to jump into restorer */
-};
-
-static const unsigned long sigreturn_fdpic_thumb_codes[3] = {
- 0xc008f8df, /* ldr r12, [pc, #8] to read function descriptor */
- 0x9004f8dc, /* ldr r9, [r12, #4] to setup GOT */
- 0xf000f8dc /* ldr pc, [r12] to jump into restorer */
-};
+#define RETCODE_WORDS 3
+#define RETCODE_BYTES (RETCODE_WORDS * 4)
static inline int valid_user_regs(CPUARMState *regs)
{
@@ -206,16 +167,19 @@ get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
return (sp - framesize) & ~7;
}
+static void write_arm_sigreturn(uint32_t *rc, int syscall);
+static void write_arm_fdpic_sigreturn(uint32_t *rc, int ofs);
+
static int
-setup_return(CPUARMState *env, struct target_sigaction *ka,
- abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
+setup_return(CPUARMState *env, struct target_sigaction *ka, int usig,
+ struct sigframe *frame, abi_ulong sp_addr)
{
abi_ulong handler = 0;
abi_ulong handler_fdpic_GOT = 0;
abi_ulong retcode;
-
- int thumb;
- int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info);
+ bool is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info);
+ bool is_rt = ka->sa_flags & TARGET_SA_SIGINFO;
+ bool thumb;
if (is_fdpic) {
/* In FDPIC mode, ka->_sa_handler points to a function
@@ -230,7 +194,6 @@ setup_return(CPUARMState *env, struct target_sigaction *ka,
} else {
handler = ka->_sa_handler;
}
-
thumb = handler & 1;
uint32_t cpsr = cpsr_read(env);
@@ -247,46 +210,39 @@ setup_return(CPUARMState *env, struct target_sigaction *ka,
cpsr &= ~CPSR_E;
}
+ /* Our vdso default_sigreturn label is a table of entry points. */
+ retcode = default_sigreturn + (is_fdpic * 2 + is_rt) * 8;
+
+ /*
+ * Put the sigreturn code on the stack no matter which return
+ * mechanism we use in order to remain ABI compliant.
+ * Because this is about ABI, always use the A32 instructions,
+ * despite the fact that our actual vdso trampoline is T16.
+ */
+ if (is_fdpic) {
+ write_arm_fdpic_sigreturn(frame->retcode,
+ is_rt ? RT_SIGFRAME_RC3_OFFSET
+ : SIGFRAME_RC3_OFFSET);
+ } else {
+ write_arm_sigreturn(frame->retcode,
+ is_rt ? TARGET_NR_rt_sigreturn
+ : TARGET_NR_sigreturn);
+ }
+
if (ka->sa_flags & TARGET_SA_RESTORER) {
if (is_fdpic) {
- /* For FDPIC we ensure that the restorer is called with a
- * correct r9 value. For that we need to write code on
- * the stack that sets r9 and jumps back to restorer
- * value.
- */
- if (thumb) {
- __put_user(sigreturn_fdpic_thumb_codes[0], rc);
- __put_user(sigreturn_fdpic_thumb_codes[1], rc + 1);
- __put_user(sigreturn_fdpic_thumb_codes[2], rc + 2);
- __put_user((abi_ulong)ka->sa_restorer, rc + 3);
- } else {
- __put_user(sigreturn_fdpic_codes[0], rc);
- __put_user(sigreturn_fdpic_codes[1], rc + 1);
- __put_user(sigreturn_fdpic_codes[2], rc + 2);
- __put_user((abi_ulong)ka->sa_restorer, rc + 3);
- }
-
- retcode = rc_addr + thumb;
+ /* Place the function descriptor in slot 3. */
+ __put_user((abi_ulong)ka->sa_restorer, &frame->retcode[3]);
} else {
retcode = ka->sa_restorer;
}
- } else {
- unsigned int idx = thumb;
-
- if (ka->sa_flags & TARGET_SA_SIGINFO) {
- idx += 2;
- }
-
- __put_user(retcodes[idx], rc);
-
- retcode = rc_addr + thumb;
}
env->regs[0] = usig;
if (is_fdpic) {
env->regs[9] = handler_fdpic_GOT;
}
- env->regs[13] = frame_addr;
+ env->regs[13] = sp_addr;
env->regs[14] = retcode;
env->regs[15] = handler & (thumb ? ~1 : ~3);
cpsr_write(env, cpsr, CPSR_IT | CPSR_T | CPSR_E, CPSRWriteByInstr);
@@ -294,7 +250,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka,
return 0;
}
-static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
+static abi_ulong *setup_sigframe_vfp(abi_ulong *regspace, CPUARMState *env)
{
int i;
struct target_vfp_sigframe *vfpframe;
@@ -311,8 +267,7 @@ static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
return (abi_ulong*)(vfpframe+1);
}
-static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
- CPUARMState *env)
+static abi_ulong *setup_sigframe_iwmmxt(abi_ulong *regspace, CPUARMState *env)
{
int i;
struct target_iwmmxt_sigframe *iwmmxtframe;
@@ -331,15 +286,15 @@ static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
return (abi_ulong*)(iwmmxtframe+1);
}
-static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
- target_sigset_t *set, CPUARMState *env)
+static void setup_sigframe(struct target_ucontext *uc,
+ target_sigset_t *set, CPUARMState *env)
{
struct target_sigaltstack stack;
int i;
abi_ulong *regspace;
/* Clear all the bits of the ucontext we don't use. */
- memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
+ memset(uc, 0, offsetof(struct target_ucontext, tuc_mcontext));
memset(&stack, 0, sizeof(stack));
target_save_altstack(&stack, env);
@@ -349,10 +304,10 @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
/* Save coprocessor signal frame. */
regspace = uc->tuc_regspace;
if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
- regspace = setup_sigframe_v2_vfp(regspace, env);
+ regspace = setup_sigframe_vfp(regspace, env);
}
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- regspace = setup_sigframe_v2_iwmmxt(regspace, env);
+ regspace = setup_sigframe_iwmmxt(regspace, env);
}
/* Write terminating magic word */
@@ -363,114 +318,23 @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
}
}
-/* compare linux/arch/arm/kernel/signal.c:setup_frame() */
-static void setup_frame_v1(int usig, struct target_sigaction *ka,
- target_sigset_t *set, CPUARMState *regs)
-{
- struct sigframe_v1 *frame;
- abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
- int i;
-
- trace_user_setup_frame(regs, frame_addr);
- if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- goto sigsegv;
- }
-
- setup_sigcontext(&frame->sc, regs, set->sig[0]);
-
- for(i = 1; i < TARGET_NSIG_WORDS; i++) {
- __put_user(set->sig[i], &frame->extramask[i - 1]);
- }
-
- if (setup_return(regs, ka, frame->retcode, frame_addr, usig,
- frame_addr + offsetof(struct sigframe_v1, retcode))) {
- goto sigsegv;
- }
-
- unlock_user_struct(frame, frame_addr, 1);
- return;
-sigsegv:
- unlock_user_struct(frame, frame_addr, 1);
- force_sigsegv(usig);
-}
-
-static void setup_frame_v2(int usig, struct target_sigaction *ka,
- target_sigset_t *set, CPUARMState *regs)
-{
- struct sigframe_v2 *frame;
- abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
-
- trace_user_setup_frame(regs, frame_addr);
- if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- goto sigsegv;
- }
-
- setup_sigframe_v2(&frame->uc, set, regs);
-
- if (setup_return(regs, ka, frame->retcode, frame_addr, usig,
- frame_addr + offsetof(struct sigframe_v2, retcode))) {
- goto sigsegv;
- }
-
- unlock_user_struct(frame, frame_addr, 1);
- return;
-sigsegv:
- unlock_user_struct(frame, frame_addr, 1);
- force_sigsegv(usig);
-}
-
void setup_frame(int usig, struct target_sigaction *ka,
target_sigset_t *set, CPUARMState *regs)
{
- if (get_osversion() >= 0x020612) {
- setup_frame_v2(usig, ka, set, regs);
- } else {
- setup_frame_v1(usig, ka, set, regs);
- }
-}
-
-/* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
-static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
- target_siginfo_t *info,
- target_sigset_t *set, CPUARMState *env)
-{
- struct rt_sigframe_v1 *frame;
- abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
- struct target_sigaltstack stack;
- int i;
- abi_ulong info_addr, uc_addr;
+ struct sigframe *frame;
+ abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
- trace_user_setup_rt_frame(env, frame_addr);
+ trace_user_setup_frame(regs, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
goto sigsegv;
}
- info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
- __put_user(info_addr, &frame->pinfo);
- uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
- __put_user(uc_addr, &frame->puc);
- tswap_siginfo(&frame->info, info);
+ setup_sigframe(&frame->uc, set, regs);
- /* Clear all the bits of the ucontext we don't use. */
- memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
-
- memset(&stack, 0, sizeof(stack));
- target_save_altstack(&stack, env);
- memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
-
- setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
- for(i = 0; i < TARGET_NSIG_WORDS; i++) {
- __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
- }
-
- if (setup_return(env, ka, frame->retcode, frame_addr, usig,
- frame_addr + offsetof(struct rt_sigframe_v1, retcode))) {
+ if (setup_return(regs, ka, usig, frame, frame_addr)) {
goto sigsegv;
}
- env->regs[1] = info_addr;
- env->regs[2] = uc_addr;
-
unlock_user_struct(frame, frame_addr, 1);
return;
sigsegv:
@@ -478,11 +342,11 @@ sigsegv:
force_sigsegv(usig);
}
-static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
- target_siginfo_t *info,
- target_sigset_t *set, CPUARMState *env)
+void setup_rt_frame(int usig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPUARMState *env)
{
- struct rt_sigframe_v2 *frame;
+ struct rt_sigframe *frame;
abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
abi_ulong info_addr, uc_addr;
@@ -491,14 +355,13 @@ static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
goto sigsegv;
}
- info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
- uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
- tswap_siginfo(&frame->info, info);
+ info_addr = frame_addr + offsetof(struct rt_sigframe, info);
+ uc_addr = frame_addr + offsetof(struct rt_sigframe, sig.uc);
+ frame->info = *info;
- setup_sigframe_v2(&frame->uc, set, env);
+ setup_sigframe(&frame->sig.uc, set, env);
- if (setup_return(env, ka, frame->retcode, frame_addr, usig,
- frame_addr + offsetof(struct rt_sigframe_v2, retcode))) {
+ if (setup_return(env, ka, usig, &frame->sig, frame_addr)) {
goto sigsegv;
}
@@ -512,17 +375,6 @@ sigsegv:
force_sigsegv(usig);
}
-void setup_rt_frame(int usig, struct target_sigaction *ka,
- target_siginfo_t *info,
- target_sigset_t *set, CPUARMState *env)
-{
- if (get_osversion() >= 0x020612) {
- setup_rt_frame_v2(usig, ka, info, set, env);
- } else {
- setup_rt_frame_v1(usig, ka, info, set, env);
- }
-}
-
static int
restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
{
@@ -553,55 +405,7 @@ restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
return err;
}
-static long do_sigreturn_v1(CPUARMState *env)
-{
- abi_ulong frame_addr;
- struct sigframe_v1 *frame = NULL;
- target_sigset_t set;
- sigset_t host_set;
- int i;
-
- /*
- * Since we stacked the signal on a 64-bit boundary,
- * then 'sp' should be word aligned here. If it's
- * not, then the user is trying to mess with us.
- */
- frame_addr = env->regs[13];
- trace_user_do_sigreturn(env, frame_addr);
- if (frame_addr & 7) {
- goto badframe;
- }
-
- if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
- goto badframe;
- }
-
- __get_user(set.sig[0], &frame->sc.oldmask);
- for(i = 1; i < TARGET_NSIG_WORDS; i++) {
- __get_user(set.sig[i], &frame->extramask[i - 1]);
- }
-
- target_to_host_sigset_internal(&host_set, &set);
- set_sigmask(&host_set);
-
- if (restore_sigcontext(env, &frame->sc)) {
- goto badframe;
- }
-
-#if 0
- /* Send SIGTRAP if we're single-stepping */
- if (ptrace_cancel_bpt(current))
- send_sig(SIGTRAP, current, 1);
-#endif
- unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
-
-badframe:
- force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
-}
-
-static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
+static abi_ulong *restore_sigframe_vfp(CPUARMState *env, abi_ulong *regspace)
{
int i;
abi_ulong magic, sz;
@@ -631,8 +435,8 @@ static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
return (abi_ulong*)(vfpframe + 1);
}
-static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
- abi_ulong *regspace)
+static abi_ulong *restore_sigframe_iwmmxt(CPUARMState *env,
+ abi_ulong *regspace)
{
int i;
abi_ulong magic, sz;
@@ -656,9 +460,9 @@ static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
return (abi_ulong*)(iwmmxtframe + 1);
}
-static int do_sigframe_return_v2(CPUARMState *env,
- target_ulong context_addr,
- struct target_ucontext_v2 *uc)
+static int do_sigframe_return(CPUARMState *env,
+ target_ulong context_addr,
+ struct target_ucontext *uc)
{
sigset_t host_set;
abi_ulong *regspace;
@@ -666,19 +470,20 @@ static int do_sigframe_return_v2(CPUARMState *env,
target_to_host_sigset(&host_set, &uc->tuc_sigmask);
set_sigmask(&host_set);
- if (restore_sigcontext(env, &uc->tuc_mcontext))
+ if (restore_sigcontext(env, &uc->tuc_mcontext)) {
return 1;
+ }
/* Restore coprocessor signal frame */
regspace = uc->tuc_regspace;
if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
- regspace = restore_sigframe_v2_vfp(env, regspace);
+ regspace = restore_sigframe_vfp(env, regspace);
if (!regspace) {
return 1;
}
}
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- regspace = restore_sigframe_v2_iwmmxt(env, regspace);
+ regspace = restore_sigframe_iwmmxt(env, regspace);
if (!regspace) {
return 1;
}
@@ -695,10 +500,10 @@ static int do_sigframe_return_v2(CPUARMState *env,
return 0;
}
-static long do_sigreturn_v2(CPUARMState *env)
+long do_sigreturn(CPUARMState *env)
{
abi_ulong frame_addr;
- struct sigframe_v2 *frame = NULL;
+ struct sigframe *frame = NULL;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -715,36 +520,25 @@ static long do_sigreturn_v2(CPUARMState *env)
goto badframe;
}
- if (do_sigframe_return_v2(env,
- frame_addr
- + offsetof(struct sigframe_v2, uc),
- &frame->uc)) {
+ if (do_sigframe_return(env,
+ frame_addr + offsetof(struct sigframe, uc),
+ &frame->uc)) {
goto badframe;
}
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
-}
-
-long do_sigreturn(CPUARMState *env)
-{
- if (get_osversion() >= 0x020612) {
- return do_sigreturn_v2(env);
- } else {
- return do_sigreturn_v1(env);
- }
+ return -QEMU_ESIGRETURN;
}
-static long do_rt_sigreturn_v1(CPUARMState *env)
+long do_rt_sigreturn(CPUARMState *env)
{
abi_ulong frame_addr;
- struct rt_sigframe_v1 *frame = NULL;
- sigset_t host_set;
+ struct rt_sigframe *frame = NULL;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -761,70 +555,92 @@ static long do_rt_sigreturn_v1(CPUARMState *env)
goto badframe;
}
- target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
- set_sigmask(&host_set);
-
- if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
+ if (do_sigframe_return(env,
+ frame_addr + offsetof(struct rt_sigframe, sig.uc),
+ &frame->sig.uc)) {
goto badframe;
}
- target_restore_altstack(&frame->uc.tuc_stack, env);
-
-#if 0
- /* Send SIGTRAP if we're single-stepping */
- if (ptrace_cancel_bpt(current))
- send_sig(SIGTRAP, current, 1);
-#endif
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
-static long do_rt_sigreturn_v2(CPUARMState *env)
-{
- abi_ulong frame_addr;
- struct rt_sigframe_v2 *frame = NULL;
+/*
+ * EABI syscalls pass the number via r7.
+ * Note that the kernel still adds the OABI syscall number to the trap,
+ * presumably for backward ABI compatibility with unwinders.
+ */
+#define ARM_MOV_R7_IMM(X) (0xe3a07000 | (X))
+#define ARM_SWI_SYS(X) (0xef000000 | (X) | ARM_SYSCALL_BASE)
- /*
- * Since we stacked the signal on a 64-bit boundary,
- * then 'sp' should be word aligned here. If it's
- * not, then the user is trying to mess with us.
- */
- frame_addr = env->regs[13];
- trace_user_do_rt_sigreturn(env, frame_addr);
- if (frame_addr & 7) {
- goto badframe;
- }
+#define THUMB_MOVS_R7_IMM(X) (0x2700 | (X))
+#define THUMB_SWI_SYS 0xdf00
- if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
- goto badframe;
- }
+static void write_arm_sigreturn(uint32_t *rc, int syscall)
+{
+ __put_user(ARM_MOV_R7_IMM(syscall), rc);
+ __put_user(ARM_SWI_SYS(syscall), rc + 1);
+ /* Wrote 8 of 12 bytes */
+}
- if (do_sigframe_return_v2(env,
- frame_addr
- + offsetof(struct rt_sigframe_v2, uc),
- &frame->uc)) {
- goto badframe;
- }
+static void write_thm_sigreturn(uint32_t *rc, int syscall)
+{
+ __put_user(THUMB_SWI_SYS << 16 | THUMB_MOVS_R7_IMM(syscall), rc);
+ /* Wrote 4 of 12 bytes */
+}
- unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+/*
+ * Stub needed to make sure the FD register (r9) contains the right value.
+ * Use the same instruction sequence as the kernel.
+ */
+static void write_arm_fdpic_sigreturn(uint32_t *rc, int ofs)
+{
+ assert(ofs <= 0xfff);
+ __put_user(0xe59d3000 | ofs, rc + 0); /* ldr r3, [sp, #ofs] */
+ __put_user(0xe8930908, rc + 1); /* ldm r3, { r3, r9 } */
+ __put_user(0xe12fff13, rc + 2); /* bx r3 */
+ /* Wrote 12 of 12 bytes */
+}
-badframe:
- unlock_user_struct(frame, frame_addr, 0);
- force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+static void write_thm_fdpic_sigreturn(void *vrc, int ofs)
+{
+ uint16_t *rc = vrc;
+
+ assert((ofs & ~0x3fc) == 0);
+ __put_user(0x9b00 | (ofs >> 2), rc + 0); /* ldr r3, [sp, #ofs] */
+ __put_user(0xcb0c, rc + 1); /* ldm r3, { r2, r3 } */
+ __put_user(0x4699, rc + 2); /* mov r9, r3 */
+ __put_user(0x4710, rc + 3); /* bx r2 */
+ /* Wrote 8 of 12 bytes */
}
-long do_rt_sigreturn(CPUARMState *env)
+void setup_sigtramp(abi_ulong sigtramp_page)
{
- if (get_osversion() >= 0x020612) {
- return do_rt_sigreturn_v2(env);
- } else {
- return do_rt_sigreturn_v1(env);
- }
+ uint32_t total_size = 8 * RETCODE_BYTES;
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, total_size, 0);
+
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ write_arm_sigreturn(&tramp[0 * RETCODE_WORDS], TARGET_NR_sigreturn);
+ write_thm_sigreturn(&tramp[1 * RETCODE_WORDS], TARGET_NR_sigreturn);
+ write_arm_sigreturn(&tramp[2 * RETCODE_WORDS], TARGET_NR_rt_sigreturn);
+ write_thm_sigreturn(&tramp[3 * RETCODE_WORDS], TARGET_NR_rt_sigreturn);
+
+ sigreturn_fdpic_tramp = sigtramp_page + 4 * RETCODE_BYTES;
+ write_arm_fdpic_sigreturn(tramp + 4 * RETCODE_WORDS,
+ offsetof(struct sigframe, retcode[3]));
+ write_thm_fdpic_sigreturn(tramp + 5 * RETCODE_WORDS,
+ offsetof(struct sigframe, retcode[3]));
+ write_arm_fdpic_sigreturn(tramp + 6 * RETCODE_WORDS,
+ offsetof(struct rt_sigframe, sig.retcode[3]));
+ write_thm_fdpic_sigreturn(tramp + 7 * RETCODE_WORDS,
+ offsetof(struct rt_sigframe, sig.retcode[3]));
+
+ unlock_user(tramp, sigtramp_page, total_size);
}
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
index 709d19bc9e..f6383a7cd1 100644
--- a/linux-user/arm/target_cpu.h
+++ b/linux-user/arm/target_cpu.h
@@ -30,13 +30,13 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
* the high addresses. Restrict linux-user to the
* cached write-back RAM in the system map.
*/
- return 0x80000000ul;
+ return 0x7ffffffful;
} else {
/*
* We need to be able to map the commpage.
- * See validate_guest_space in linux-user/elfload.c.
+ * See init_guest_commpage in linux-user/elfload.c.
*/
- return 0xffff0000ul;
+ return 0xfffffffful;
}
}
#define MAX_RESERVED_VA arm_max_reserved_va
diff --git a/linux-user/arm/target_flat.h b/linux-user/arm/target_flat.h
new file mode 100644
index 0000000000..bc83224cea
--- /dev/null
+++ b/linux-user/arm/target_flat.h
@@ -0,0 +1 @@
+#include "../generic/target_flat.h"
diff --git a/linux-user/arm/target_mman.h b/linux-user/arm/target_mman.h
new file mode 100644
index 0000000000..51005da869
--- /dev/null
+++ b/linux-user/arm/target_mman.h
@@ -0,0 +1,12 @@
+/*
+ * arch/arm/include/asm/memory.h
+ * TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
+ * TASK_SIZE CONFIG_PAGE_OFFSET
+ * CONFIG_PAGE_OFFSET 0xC0000000 (default in Kconfig)
+ */
+#define TASK_UNMAPPED_BASE 0x40000000
+
+/* arch/arm/include/asm/elf.h */
+#define ELF_ET_DYN_BASE 0x00400000
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/arm/target_prctl.h b/linux-user/arm/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/arm/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/arm/target_proc.h b/linux-user/arm/target_proc.h
new file mode 100644
index 0000000000..ac75af9ca6
--- /dev/null
+++ b/linux-user/arm/target_proc.h
@@ -0,0 +1,101 @@
+/*
+ * Arm specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ARM_TARGET_PROC_H
+#define ARM_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ ARMCPU *cpu = env_archcpu(cpu_env);
+ int arch, midr_rev, midr_part, midr_var, midr_impl;
+ target_ulong elf_hwcap = get_elf_hwcap();
+ target_ulong elf_hwcap2 = get_elf_hwcap2();
+ const char *elf_name;
+ int num_cpus, len_part, len_var;
+
+#if TARGET_BIG_ENDIAN
+# define END_SUFFIX "b"
+#else
+# define END_SUFFIX "l"
+#endif
+
+ arch = 8;
+ elf_name = "v8" END_SUFFIX;
+ midr_rev = FIELD_EX32(cpu->midr, MIDR_EL1, REVISION);
+ midr_part = FIELD_EX32(cpu->midr, MIDR_EL1, PARTNUM);
+ midr_var = FIELD_EX32(cpu->midr, MIDR_EL1, VARIANT);
+ midr_impl = FIELD_EX32(cpu->midr, MIDR_EL1, IMPLEMENTER);
+ len_part = 3;
+ len_var = 1;
+
+#ifndef TARGET_AARCH64
+ /* For simplicity, treat ARMv8 as an arm64 kernel with CONFIG_COMPAT. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ arch = 7;
+ midr_var = (cpu->midr >> 16) & 0x7f;
+ len_var = 2;
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ elf_name = "armv7m" END_SUFFIX;
+ } else {
+ elf_name = "armv7" END_SUFFIX;
+ }
+ } else {
+ midr_part = cpu->midr >> 4;
+ len_part = 7;
+ if (arm_feature(&cpu->env, ARM_FEATURE_V6)) {
+ arch = 6;
+ elf_name = "armv6" END_SUFFIX;
+ } else if (arm_feature(&cpu->env, ARM_FEATURE_V5)) {
+ arch = 5;
+ elf_name = "armv5t" END_SUFFIX;
+ } else {
+ arch = 4;
+ elf_name = "armv4" END_SUFFIX;
+ }
+ }
+ }
+#endif
+
+#undef END_SUFFIX
+
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ for (int i = 0; i < num_cpus; i++) {
+ dprintf(fd,
+ "processor\t: %d\n"
+ "model name\t: ARMv%d Processor rev %d (%s)\n"
+ "BogoMIPS\t: 100.00\n"
+ "Features\t:",
+ i, arch, midr_rev, elf_name);
+
+ for (target_ulong j = elf_hwcap; j ; j &= j - 1) {
+ dprintf(fd, " %s", elf_hwcap_str(ctz64(j)));
+ }
+ for (target_ulong j = elf_hwcap2; j ; j &= j - 1) {
+ dprintf(fd, " %s", elf_hwcap2_str(ctz64(j)));
+ }
+
+ dprintf(fd, "\n"
+ "CPU implementer\t: 0x%02x\n"
+ "CPU architecture: %d\n"
+ "CPU variant\t: 0x%0*x\n",
+ midr_impl, arch, len_var, midr_var);
+ if (arch >= 7) {
+ dprintf(fd, "CPU part\t: 0x%0*x\n", len_part, midr_part);
+ }
+ dprintf(fd, "CPU revision\t: %d\n\n", midr_rev);
+ }
+
+ if (arch < 8) {
+ dprintf(fd, "Hardware\t: QEMU v%s %s\n", QEMU_VERSION,
+ cpu->dtb_compatible ? : "");
+ dprintf(fd, "Revision\t: 0000\n");
+ dprintf(fd, "Serial\t\t: 0000000000000000\n");
+ }
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* ARM_TARGET_PROC_H */
diff --git a/linux-user/arm/target_resource.h b/linux-user/arm/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/arm/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h
index 0998dd6dfa..0e6351d9f7 100644
--- a/linux-user/arm/target_signal.h
+++ b/linux-user/arm/target_signal.h
@@ -1,25 +1,9 @@
#ifndef ARM_TARGET_SIGNAL_H
#define ARM_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* ARM_TARGET_SIGNAL_H */
diff --git a/linux-user/arm/target_structs.h b/linux-user/arm/target_structs.h
index 339b070bf1..3a06f373c3 100644
--- a/linux-user/arm/target_structs.h
+++ b/linux-user/arm/target_structs.h
@@ -1,59 +1 @@
-/*
- * ARM specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef ARM_TARGET_STRUCTS_H
-#define ARM_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
- abi_ulong __unused1;
- abi_ulong shm_dtime; /* time of last shmdt() */
- abi_ulong __unused2;
- abi_ulong shm_ctime; /* time of last change by shmctl() */
- abi_ulong __unused3;
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-struct target_oabi_flock64 {
- abi_short l_type;
- abi_short l_whence;
- abi_llong l_start;
- abi_llong l_len;
- abi_int l_pid;
-} QEMU_PACKED;
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/arm/target_syscall.h b/linux-user/arm/target_syscall.h
index e870ed7a54..412ad434cf 100644
--- a/linux-user/arm/target_syscall.h
+++ b/linux-user/arm/target_syscall.h
@@ -18,7 +18,7 @@ struct target_pt_regs {
#define ARM_NR_set_tls (ARM_NR_BASE + 5)
#define ARM_NR_get_tls (ARM_NR_BASE + 6)
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
#define UNAME_MACHINE "armv5teb"
#else
#define UNAME_MACHINE "armv5tel"
@@ -27,7 +27,6 @@ struct target_pt_regs {
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/arm/vdso-asmoffset.h b/linux-user/arm/vdso-asmoffset.h
new file mode 100644
index 0000000000..252a95c46e
--- /dev/null
+++ b/linux-user/arm/vdso-asmoffset.h
@@ -0,0 +1,3 @@
+/* offsetof(struct sigframe, retcode[3]) */
+#define SIGFRAME_RC3_OFFSET 756
+#define RT_SIGFRAME_RC3_OFFSET 884
diff --git a/linux-user/arm/vdso-be.so b/linux-user/arm/vdso-be.so
new file mode 100755
index 0000000000..69cafbb956
--- /dev/null
+++ b/linux-user/arm/vdso-be.so
Binary files differ
diff --git a/linux-user/arm/vdso-le.so b/linux-user/arm/vdso-le.so
new file mode 100755
index 0000000000..ad05a12518
--- /dev/null
+++ b/linux-user/arm/vdso-le.so
Binary files differ
diff --git a/linux-user/arm/vdso.S b/linux-user/arm/vdso.S
new file mode 100644
index 0000000000..b3bb6491dc
--- /dev/null
+++ b/linux-user/arm/vdso.S
@@ -0,0 +1,174 @@
+/*
+ * arm linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include "vdso-asmoffset.h"
+
+/*
+ * All supported cpus have T16 instructions: at least arm4t.
+ *
+ * We support user-user with m-profile cpus as an extension, because it
+ * is useful for testing gcc, which requires we avoid A32 instructions.
+ */
+ .thumb
+ .arch armv4t
+ .eabi_attribute Tag_FP_arch, 0
+ .eabi_attribute Tag_ARM_ISA_use, 0
+
+ .text
+
+.macro raw_syscall n
+ .ifne \n < 0x100
+ mov r7, #\n
+ .elseif \n < 0x1ff
+ mov r7, #0xff
+ add r7, #(\n - 0xff)
+ .else
+ .err
+ .endif
+ swi #0
+.endm
+
+.macro fdpic_thunk ofs
+ ldr r3, [sp, #\ofs]
+ ldmia r2, {r2, r3}
+ mov r9, r3
+ bx r2
+.endm
+
+.macro endf name
+ .globl \name
+ .type \name, %function
+ .size \name, . - \name
+.endm
+
+/*
+ * We must save/restore r7 for the EABI syscall number.
+ * While we're doing that, we might as well save LR to get a free return,
+ * and a branch that is interworking back to ARMv5.
+ */
+
+.macro SYSCALL name, nr
+\name:
+ .cfi_startproc
+ push {r7, lr}
+ .cfi_adjust_cfa_offset 8
+ .cfi_offset r7, -8
+ .cfi_offset lr, -4
+ raw_syscall \nr
+ pop {r7, pc}
+ .cfi_endproc
+endf \name
+.endm
+
+SYSCALL __vdso_clock_gettime, __NR_clock_gettime
+SYSCALL __vdso_clock_gettime64, __NR_clock_gettime64
+SYSCALL __vdso_clock_getres, __NR_clock_getres
+SYSCALL __vdso_gettimeofday, __NR_gettimeofday
+
+
+/*
+ * We, like the real kernel, use a table of sigreturn trampolines.
+ * Unlike the real kernel, we do not attempt to pack this into as
+ * few bytes as possible -- simply use 8 bytes per slot.
+ *
+ * Within each slot, use the exact same code sequence as the kernel,
+ * lest we trip up someone doing code inspection.
+ */
+
+.macro slot n
+ .balign 8
+ .org sigreturn_codes + 8 * \n
+.endm
+
+.macro cfi_fdpic_r9 ofs
+ /*
+ * fd = *(r13 + ofs)
+ * r9 = *(fd + 4)
+ *
+ * DW_CFA_expression r9, length (7),
+ * DW_OP_breg13, ofs, DW_OP_deref,
+ * DW_OP_plus_uconst, 4, DW_OP_deref
+ */
+ .cfi_escape 0x10, 9, 7, 0x7d, (\ofs & 0x7f) + 0x80, (\ofs >> 7), 0x06, 0x23, 4, 0x06
+.endm
+
+.macro cfi_fdpic_pc ofs
+ /*
+ * fd = *(r13 + ofs)
+ * pc = *fd
+ *
+ * DW_CFA_expression lr (14), length (5),
+ * DW_OP_breg13, ofs, DW_OP_deref, DW_OP_deref
+ */
+ .cfi_escape 0x10, 14, 5, 0x7d, (\ofs & 0x7f) + 0x80, (\ofs >> 7), 0x06, 0x06
+.endm
+
+/*
+ * Start the unwind info at least one instruction before the signal
+ * trampoline, because the unwinder will assume we are returning
+ * after a call site.
+ */
+ .cfi_startproc simple
+ .cfi_signal_frame
+ .cfi_return_column 15
+
+ .cfi_def_cfa sp, 32 + 64
+ .cfi_offset r0, -16 * 4
+ .cfi_offset r1, -15 * 4
+ .cfi_offset r2, -14 * 4
+ .cfi_offset r3, -13 * 4
+ .cfi_offset r4, -12 * 4
+ .cfi_offset r5, -11 * 4
+ .cfi_offset r6, -10 * 4
+ .cfi_offset r7, -9 * 4
+ .cfi_offset r8, -8 * 4
+ .cfi_offset r9, -7 * 4
+ .cfi_offset r10, -6 * 4
+ .cfi_offset r11, -5 * 4
+ .cfi_offset r12, -4 * 4
+ .cfi_offset r13, -3 * 4
+ .cfi_offset r14, -2 * 4
+ .cfi_offset r15, -1 * 4
+
+ nop
+
+ .balign 16
+sigreturn_codes:
+ /* [EO]ABI sigreturn */
+ slot 0
+ raw_syscall __NR_sigreturn
+
+ .cfi_def_cfa_offset 160 + 64
+
+ /* [EO]ABI rt_sigreturn */
+ slot 1
+ raw_syscall __NR_rt_sigreturn
+
+ .cfi_endproc
+
+ /* FDPIC sigreturn */
+ .cfi_startproc
+ cfi_fdpic_pc SIGFRAME_RC3_OFFSET
+ cfi_fdpic_r9 SIGFRAME_RC3_OFFSET
+
+ slot 2
+ fdpic_thunk SIGFRAME_RC3_OFFSET
+ .cfi_endproc
+
+ /* FDPIC rt_sigreturn */
+ .cfi_startproc
+ cfi_fdpic_pc RT_SIGFRAME_RC3_OFFSET
+ cfi_fdpic_r9 RT_SIGFRAME_RC3_OFFSET
+
+ slot 3
+ fdpic_thunk RT_SIGFRAME_RC3_OFFSET
+ .cfi_endproc
+
+ .balign 16
+endf sigreturn_codes
diff --git a/linux-user/arm/vdso.ld b/linux-user/arm/vdso.ld
new file mode 100644
index 0000000000..3b00adf27a
--- /dev/null
+++ b/linux-user/arm/vdso.ld
@@ -0,0 +1,67 @@
+/*
+ * Linker script for linux arm replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ __vdso_clock_gettime64;
+
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ .data : {
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load
+}
diff --git a/linux-user/cpu_loop-common.h b/linux-user/cpu_loop-common.h
index 8828af28a4..e644d2ef90 100644
--- a/linux-user/cpu_loop-common.h
+++ b/linux-user/cpu_loop-common.h
@@ -21,17 +21,11 @@
#define CPU_LOOP_COMMON_H
#include "exec/log.h"
+#include "special-errno.h"
-#define EXCP_DUMP(env, fmt, ...) \
-do { \
- CPUState *cs = env_cpu(env); \
- fprintf(stderr, fmt , ## __VA_ARGS__); \
- cpu_dump_state(cs, stderr, 0); \
- if (qemu_log_separate()) { \
- qemu_log(fmt, ## __VA_ARGS__); \
- log_cpu_state(cs, 0); \
- } \
-} while (0)
+void target_exception_dump(CPUArchState *env, const char *fmt, int code);
+#define EXCP_DUMP(env, fmt, code) \
+ target_exception_dump(env, fmt, code)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs);
#endif
diff --git a/linux-user/cris/cpu_loop.c b/linux-user/cris/cpu_loop.c
index b9085619c4..04c9086b6d 100644
--- a/linux-user/cris/cpu_loop.c
+++ b/linux-user/cris/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -28,8 +27,7 @@ void cpu_loop(CPUCRISState *env)
{
CPUState *cs = env_cpu(env);
int trapnr, ret;
- target_siginfo_t info;
-
+
while (1) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
@@ -37,19 +35,9 @@ void cpu_loop(CPUCRISState *env)
process_queued_cpu_work(cs);
switch (trapnr) {
- case 0xaa:
- {
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->pregs[PR_EDA];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
- break;
case EXCP_INTERRUPT:
- /* just indicate that signals should be handled asap */
- break;
+ /* just indicate that signals should be handled asap */
+ break;
case EXCP_BREAK:
ret = do_syscall(env,
env->regs[9],
@@ -60,17 +48,14 @@ void cpu_loop(CPUCRISState *env)
env->pregs[7],
env->pregs[11],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->pc -= 2;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->regs[10] = ret;
}
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
@@ -87,7 +72,7 @@ void cpu_loop(CPUCRISState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
env->regs[0] = regs->r0;
diff --git a/linux-user/cris/signal.c b/linux-user/cris/signal.c
index 2c39bdf727..4f532b2903 100644
--- a/linux-user/cris/signal.c
+++ b/linux-user/cris/signal.c
@@ -97,6 +97,14 @@ static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
return sp - framesize;
}
+static void setup_sigreturn(uint16_t *retcode)
+{
+ /* This is movu.w __NR_sigreturn, r9; break 13; */
+ __put_user(0x9c5f, retcode + 0);
+ __put_user(TARGET_NR_sigreturn, retcode + 1);
+ __put_user(0xe93d, retcode + 2);
+}
+
void setup_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUCRISState *env)
{
@@ -112,14 +120,8 @@ void setup_frame(int sig, struct target_sigaction *ka,
/*
* The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
* use this trampoline anymore but it sets it up for GDB.
- * In QEMU, using the trampoline simplifies things a bit so we use it.
- *
- * This is movu.w __NR_sigreturn, r9; break 13;
*/
- __put_user(0x9c5f, frame->retcode+0);
- __put_user(TARGET_NR_sigreturn,
- frame->retcode + 1);
- __put_user(0xe93d, frame->retcode + 2);
+ setup_sigreturn(frame->retcode);
/* Save the mask. */
__put_user(set->sig[0], &frame->sc.oldmask);
@@ -135,7 +137,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
env->regs[10] = sig;
env->pc = (unsigned long) ka->_sa_handler;
/* Link SRP so the guest returns through the trampoline. */
- env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
+ env->pregs[PR_SRP] = default_sigreturn;
unlock_user_struct(frame, frame_addr, 1);
return;
@@ -175,10 +177,10 @@ long do_sigreturn(CPUCRISState *env)
restore_sigcontext(&frame->sc, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUCRISState *env)
@@ -187,3 +189,14 @@ long do_rt_sigreturn(CPUCRISState *env)
qemu_log_mask(LOG_UNIMP, "do_rt_sigreturn: not implemented\n");
return -TARGET_ENOSYS;
}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ setup_sigreturn(tramp);
+
+ unlock_user(tramp, sigtramp_page, 6);
+}
diff --git a/linux-user/cris/target_mman.h b/linux-user/cris/target_mman.h
new file mode 100644
index 0000000000..9ace8ac292
--- /dev/null
+++ b/linux-user/cris/target_mman.h
@@ -0,0 +1,13 @@
+/*
+ * arch/cris/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+ *
+ * arch/cris/include/arch-v32/arch/processor.h
+ * TASK_SIZE 0xb0000000
+ */
+#define TASK_UNMAPPED_BASE TARGET_PAGE_ALIGN(0xb0000000 / 3)
+
+/* arch/cris/include/uapi/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/cris/target_prctl.h b/linux-user/cris/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/cris/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/cris/target_proc.h b/linux-user/cris/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/cris/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/cris/target_resource.h b/linux-user/cris/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/cris/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/cris/target_signal.h b/linux-user/cris/target_signal.h
index 495a142896..ab0653fcdc 100644
--- a/linux-user/cris/target_signal.h
+++ b/linux-user/cris/target_signal.h
@@ -1,25 +1,9 @@
#ifndef CRIS_TARGET_SIGNAL_H
#define CRIS_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* CRIS_TARGET_SIGNAL_H */
diff --git a/linux-user/cris/target_structs.h b/linux-user/cris/target_structs.h
index f949d2331e..3a06f373c3 100644
--- a/linux-user/cris/target_structs.h
+++ b/linux-user/cris/target_structs.h
@@ -1,58 +1 @@
-/*
- * CRIS specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef CRIS_TARGET_STRUCTS_H
-#define CRIS_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/cris/target_syscall.h b/linux-user/cris/target_syscall.h
index 19e1281403..0b5ebf1f02 100644
--- a/linux-user/cris/target_syscall.h
+++ b/linux-user/cris/target_syscall.h
@@ -39,7 +39,6 @@ struct target_pt_regs {
};
#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 5f9e2141ad..f9461d2844 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -2,11 +2,15 @@
#include "qemu/osdep.h"
#include <sys/param.h>
+#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/shm.h>
#include "qemu.h"
+#include "user/tswap-target.h"
+#include "user/guest-base.h"
#include "user-internals.h"
+#include "signal-common.h"
#include "loader.h"
#include "user-mmap.h"
#include "disas/disas.h"
@@ -16,7 +20,15 @@
#include "qemu/guest-random.h"
#include "qemu/units.h"
#include "qemu/selfmap.h"
+#include "qemu/lockable.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "target_signal.h"
+#include "tcg/debuginfo.h"
+
+#ifdef TARGET_ARM
+#include "target/arm/cpu-features.h"
+#endif
#ifdef _ARCH_PPC64
#undef ARCH_DLINFO
@@ -28,6 +40,19 @@
#undef ELF_ARCH
#endif
+#ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
+#endif
+
+typedef struct {
+ const uint8_t *image;
+ const uint32_t *relocs;
+ unsigned image_size;
+ unsigned reloc_count;
+ unsigned sigreturn_ofs;
+ unsigned rt_sigreturn_ofs;
+} VdsoImageInfo;
+
#define ELF_OSABI ELFOSABI_SYSV
/* from personality.h */
@@ -103,7 +128,7 @@ int info_is_fdpic(struct image_info *info)
#define ELIBBAD 80
#endif
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
#define ELF_DATA ELFDATA2MSB
#else
#define ELF_DATA ELFDATA2LSB
@@ -128,19 +153,6 @@ typedef abi_int target_pid_t;
#ifdef TARGET_I386
-#define ELF_PLATFORM get_elf_platform()
-
-static const char *get_elf_platform(void)
-{
- static char elf_platform[] = "i386";
- int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
- if (family > 6)
- family = 6;
- if (family >= 3)
- elf_platform[1] = '0' + family;
- return elf_platform;
-}
-
#define ELF_HWCAP get_elf_hwcap()
static uint32_t get_elf_hwcap(void)
@@ -151,11 +163,11 @@ static uint32_t get_elf_hwcap(void)
}
#ifdef TARGET_X86_64
-#define ELF_START_MMAP 0x2aaaaab000ULL
-
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_X86_64
+#define ELF_PLATFORM "x86_64"
+
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
{
regs->rax = 0;
@@ -204,10 +216,29 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
}
+#if ULONG_MAX > UINT32_MAX
+#define INIT_GUEST_COMMPAGE
+static bool init_guest_commpage(void)
+{
+ /*
+ * The vsyscall page is at a high negative address aka kernel space,
+ * which means that we cannot actually allocate it with target_mmap.
+ * We still should be able to use page_set_flags, unless the user
+ * has specified -R reserved_va, which would trigger an assert().
+ */
+ if (reserved_va != 0 &&
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
+ error_report("Cannot allocate vsyscall page");
+ exit(EXIT_FAILURE);
+ }
+ page_set_flags(TARGET_VSYSCALL_PAGE,
+ TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
+ PAGE_EXEC | PAGE_VALID);
+ return true;
+}
+#endif
#else
-#define ELF_START_MMAP 0x80000000
-
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -219,6 +250,22 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_386
+#define ELF_PLATFORM get_elf_platform()
+#define EXSTACK_DEFAULT true
+
+static const char *get_elf_platform(void)
+{
+ static char elf_platform[] = "i386";
+ int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
+ if (family > 6) {
+ family = 6;
+ }
+ if (family >= 3) {
+ elf_platform[1] = '0' + family;
+ }
+ return elf_platform;
+}
+
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
{
@@ -265,22 +312,36 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[15] = tswapreg(env->regs[R_ESP]);
(*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff);
}
-#endif
+
+/*
+ * i386 is the only target which supplies AT_SYSINFO for the vdso.
+ * All others only supply AT_SYSINFO_EHDR.
+ */
+#define DLINFO_ARCH_ITEMS (vdso_info != NULL)
+#define ARCH_DLINFO \
+ do { \
+ if (vdso_info) { \
+ NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \
+ } \
+ } while (0)
+
+#endif /* TARGET_X86_64 */
+
+#define VDSO_HEADER "vdso.c.inc"
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
-#endif
+#endif /* TARGET_I386 */
#ifdef TARGET_ARM
#ifndef TARGET_AARCH64
/* 32 bit ARM definitions */
-#define ELF_START_MMAP 0x80000000
-
#define ELF_ARCH EM_ARM
#define ELF_CLASS ELFCLASS32
+#define EXSTACK_DEFAULT true
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
@@ -376,6 +437,12 @@ enum
ARM_HWCAP_ARM_VFPD32 = 1 << 19,
ARM_HWCAP_ARM_LPAE = 1 << 20,
ARM_HWCAP_ARM_EVTSTRM = 1 << 21,
+ ARM_HWCAP_ARM_FPHP = 1 << 22,
+ ARM_HWCAP_ARM_ASIMDHP = 1 << 23,
+ ARM_HWCAP_ARM_ASIMDDP = 1 << 24,
+ ARM_HWCAP_ARM_ASIMDFHM = 1 << 25,
+ ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26,
+ ARM_HWCAP_ARM_I8MM = 1 << 27,
};
enum {
@@ -384,17 +451,36 @@ enum {
ARM_HWCAP2_ARM_SHA1 = 1 << 2,
ARM_HWCAP2_ARM_SHA2 = 1 << 3,
ARM_HWCAP2_ARM_CRC32 = 1 << 4,
+ ARM_HWCAP2_ARM_SB = 1 << 5,
+ ARM_HWCAP2_ARM_SSBS = 1 << 6,
};
/* The commpage only exists for 32 bit kernels */
-#define ARM_COMMPAGE (intptr_t)0xffff0f00u
+#define HI_COMMPAGE (intptr_t)0xffff0f00u
static bool init_guest_commpage(void)
{
- void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size);
- void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ int host_page_size = qemu_real_host_page_size();
+ abi_ptr commpage;
+ void *want;
+ void *addr;
+
+ /*
+ * M-profile allocates maximum of 2GB address space, so can never
+ * allocate the commpage. Skip it.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ return true;
+ }
+
+ commpage = HI_COMMPAGE & -host_page_size;
+ want = g2h_untagged(commpage);
+ addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE |
+ (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE),
+ -1, 0);
if (addr == MAP_FAILED) {
perror("Allocating guest commpage");
@@ -407,17 +493,20 @@ static bool init_guest_commpage(void)
/* Set kernel helper versions; rest of page is 0. */
__put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
- if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
+ if (mprotect(addr, host_page_size, PROT_READ)) {
perror("Protecting guest commpage");
exit(EXIT_FAILURE);
}
+
+ page_set_flags(commpage, commpage | (host_page_size - 1),
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
#define ELF_HWCAP get_elf_hwcap()
#define ELF_HWCAP2 get_elf_hwcap2()
-static uint32_t get_elf_hwcap(void)
+uint32_t get_elf_hwcap(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
@@ -455,23 +544,86 @@ static uint32_t get_elf_hwcap(void)
}
}
GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
+ /*
+ * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same
+ * isar_feature function for both. The kernel reports them as two hwcaps.
+ */
+ GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP);
+ GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP);
+ GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP);
+ GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM);
+ GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16);
+ GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM);
return hwcaps;
}
-static uint32_t get_elf_hwcap2(void)
+uint64_t get_elf_hwcap2(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
- uint32_t hwcaps = 0;
+ uint64_t hwcaps = 0;
GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
+ GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB);
+ GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS);
return hwcaps;
}
+const char *elf_hwcap_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp",
+ [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half",
+ [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb",
+ [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit",
+ [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult",
+ [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp",
+ [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp",
+ [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java",
+ [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt",
+ [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch",
+ [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee",
+ [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16",
+ [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4",
+ [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva",
+ [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32",
+ [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae",
+ [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm",
+ [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp",
+ [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp",
+ [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp",
+ [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm",
+ [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16",
+ [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
+const char *elf_hwcap2_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes",
+ [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull",
+ [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1",
+ [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2",
+ [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32",
+ [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb",
+ [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
#undef GET_FEATURE
#undef GET_FEATURE_ID
@@ -479,9 +631,9 @@ static uint32_t get_elf_hwcap2(void)
static const char *get_elf_platform(void)
{
- CPUARMState *env = thread_cpu->env_ptr;
+ CPUARMState *env = cpu_env(thread_cpu);
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
# define END "b"
#else
# define END "l"
@@ -508,11 +660,10 @@ static const char *get_elf_platform(void)
#else
/* 64 bit ARM definitions */
-#define ELF_START_MMAP 0x80000000
#define ELF_ARCH EM_AARCH64
#define ELF_CLASS ELFCLASS64
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
# define ELF_PLATFORM "aarch64_be"
#else
# define ELF_PLATFORM "aarch64"
@@ -599,6 +750,32 @@ enum {
ARM_HWCAP2_A64_RNG = 1 << 16,
ARM_HWCAP2_A64_BTI = 1 << 17,
ARM_HWCAP2_A64_MTE = 1 << 18,
+ ARM_HWCAP2_A64_ECV = 1 << 19,
+ ARM_HWCAP2_A64_AFP = 1 << 20,
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
+ ARM_HWCAP2_A64_SME = 1 << 23,
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
+ ARM_HWCAP2_A64_WFXT = 1ULL << 31,
+ ARM_HWCAP2_A64_EBF16 = 1ULL << 32,
+ ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33,
+ ARM_HWCAP2_A64_CSSC = 1ULL << 34,
+ ARM_HWCAP2_A64_RPRFM = 1ULL << 35,
+ ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36,
+ ARM_HWCAP2_A64_SME2 = 1ULL << 37,
+ ARM_HWCAP2_A64_SME2P1 = 1ULL << 38,
+ ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39,
+ ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40,
+ ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41,
+ ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42,
+ ARM_HWCAP2_A64_MOPS = 1ULL << 43,
+ ARM_HWCAP2_A64_HBC = 1ULL << 44,
};
#define ELF_HWCAP get_elf_hwcap()
@@ -607,7 +784,7 @@ enum {
#define GET_FEATURE_ID(feat, hwcap) \
do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
-static uint32_t get_elf_hwcap(void)
+uint32_t get_elf_hwcap(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
@@ -629,12 +806,14 @@ static uint32_t get_elf_hwcap(void)
GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
+ GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT);
GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
+ GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT);
GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
@@ -645,10 +824,10 @@ static uint32_t get_elf_hwcap(void)
return hwcaps;
}
-static uint32_t get_elf_hwcap2(void)
+uint64_t get_elf_hwcap2(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
- uint32_t hwcaps = 0;
+ uint64_t hwcaps = 0;
GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
@@ -668,19 +847,129 @@ static uint32_t get_elf_hwcap2(void)
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
+ GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3);
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
+ ARM_HWCAP2_A64_SME_F32F32 |
+ ARM_HWCAP2_A64_SME_B16F32 |
+ ARM_HWCAP2_A64_SME_F16F32 |
+ ARM_HWCAP2_A64_SME_I8I32));
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
+ GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC);
+ GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS);
return hwcaps;
}
+const char *elf_hwcap_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd",
+ [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
+ [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes",
+ [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2",
+ [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32",
+ [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics",
+ [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp",
+ [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm",
+ [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt",
+ [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma",
+ [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc",
+ [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3",
+ [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3",
+ [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512",
+ [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm",
+ [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit",
+ [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat",
+ [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc",
+ [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm",
+ [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs",
+ [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb",
+ [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca",
+ [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
+const char *elf_hwcap2_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4",
+ [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2",
+ [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16",
+ [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16",
+ [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh",
+ [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng",
+ [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti",
+ [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte",
+ [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv",
+ [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp",
+ [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres",
+ [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64",
+ [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt",
+ [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc",
+ [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops",
+ [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
#undef GET_FEATURE_ID
#endif /* not TARGET_AARCH64 */
+
+#if TARGET_BIG_ENDIAN
+# define VDSO_HEADER "vdso-be.c.inc"
+#else
+# define VDSO_HEADER "vdso-le.c.inc"
+#endif
+
#endif /* TARGET_ARM */
#ifdef TARGET_SPARC
#ifdef TARGET_SPARC64
-#define ELF_START_MMAP 0x80000000
#define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
| HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
#ifndef TARGET_ABI32
@@ -692,7 +981,6 @@ static uint32_t get_elf_hwcap2(void)
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_SPARCV9
#else
-#define ELF_START_MMAP 0x80000000
#define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
| HWCAP_SPARC_MULDIV)
#define ELF_CLASS ELFCLASS32
@@ -714,9 +1002,8 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_PPC
#define ELF_MACHINE PPC_ELF_MACHINE
-#define ELF_START_MMAP 0x80000000
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
+#if defined(TARGET_PPC64)
#define elf_check_arch(x) ( (x) == EM_PPC64 )
@@ -725,6 +1012,7 @@ static inline void init_thread(struct target_pt_regs *regs,
#else
#define ELF_CLASS ELFCLASS32
+#define EXSTACK_DEFAULT true
#endif
@@ -777,6 +1065,8 @@ enum {
QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
+ QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */
+ QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */
};
#define ELF_HWCAP get_elf_hwcap()
@@ -834,6 +1124,8 @@ static uint32_t get_elf_hwcap2(void)
QEMU_PPC_FEATURE2_VEC_CRYPTO);
GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128);
+ GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 |
+ QEMU_PPC_FEATURE2_MMA);
#undef GET_FEATURE
#undef GET_FEATURE2
@@ -868,7 +1160,7 @@ static uint32_t get_elf_hwcap2(void)
static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
{
_regs->gpr[1] = infop->start_stack;
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
+#if defined(TARGET_PPC64)
if (get_ppc64_abi(infop) < 2) {
uint64_t val;
get_user_u64(val, infop->entry + 8);
@@ -899,22 +1191,126 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
(*regs)[33] = tswapreg(env->msr);
(*regs)[35] = tswapreg(env->ctr);
(*regs)[36] = tswapreg(env->lr);
- (*regs)[37] = tswapreg(env->xer);
+ (*regs)[37] = tswapreg(cpu_read_xer(env));
- for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
- ccr |= env->crf[i] << (32 - ((i + 1) * 4));
- }
+ ccr = ppc_get_cr(env);
(*regs)[38] = tswapreg(ccr);
}
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
+#ifndef TARGET_PPC64
+# define VDSO_HEADER "vdso-32.c.inc"
+#elif TARGET_BIG_ENDIAN
+# define VDSO_HEADER "vdso-64.c.inc"
+#else
+# define VDSO_HEADER "vdso-64le.c.inc"
+#endif
+
#endif
-#ifdef TARGET_MIPS
+#ifdef TARGET_LOONGARCH64
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_ARCH EM_LOONGARCH
+#define EXSTACK_DEFAULT true
+
+#define elf_check_arch(x) ((x) == EM_LOONGARCH)
+
+#define VDSO_HEADER "vdso.c.inc"
+
+static inline void init_thread(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ /*Set crmd PG,DA = 1,0 */
+ regs->csr.crmd = 2 << 3;
+ regs->csr.era = infop->entry;
+ regs->regs[3] = infop->start_stack;
+}
+
+/* See linux kernel: arch/loongarch/include/asm/elf.h */
+#define ELF_NREG 45
+typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
+
+enum {
+ TARGET_EF_R0 = 0,
+ TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33,
+ TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34,
+};
+
+static void elf_core_copy_regs(target_elf_gregset_t *regs,
+ const CPULoongArchState *env)
+{
+ int i;
+
+ (*regs)[TARGET_EF_R0] = 0;
+
+ for (i = 1; i < ARRAY_SIZE(env->gpr); i++) {
+ (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]);
+ }
+
+ (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc);
+ (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV);
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#define ELF_HWCAP get_elf_hwcap()
-#define ELF_START_MMAP 0x80000000
+/* See arch/loongarch/include/uapi/asm/hwcap.h */
+enum {
+ HWCAP_LOONGARCH_CPUCFG = (1 << 0),
+ HWCAP_LOONGARCH_LAM = (1 << 1),
+ HWCAP_LOONGARCH_UAL = (1 << 2),
+ HWCAP_LOONGARCH_FPU = (1 << 3),
+ HWCAP_LOONGARCH_LSX = (1 << 4),
+ HWCAP_LOONGARCH_LASX = (1 << 5),
+ HWCAP_LOONGARCH_CRC32 = (1 << 6),
+ HWCAP_LOONGARCH_COMPLEX = (1 << 7),
+ HWCAP_LOONGARCH_CRYPTO = (1 << 8),
+ HWCAP_LOONGARCH_LVZ = (1 << 9),
+ HWCAP_LOONGARCH_LBT_X86 = (1 << 10),
+ HWCAP_LOONGARCH_LBT_ARM = (1 << 11),
+ HWCAP_LOONGARCH_LBT_MIPS = (1 << 12),
+};
+
+static uint32_t get_elf_hwcap(void)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ hwcaps |= HWCAP_LOONGARCH_CRC32;
+
+ if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) {
+ hwcaps |= HWCAP_LOONGARCH_UAL;
+ }
+
+ if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) {
+ hwcaps |= HWCAP_LOONGARCH_FPU;
+ }
+
+ if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) {
+ hwcaps |= HWCAP_LOONGARCH_LAM;
+ }
+
+ if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
+ hwcaps |= HWCAP_LOONGARCH_LSX;
+ }
+
+ if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
+ hwcaps |= HWCAP_LOONGARCH_LASX;
+ }
+
+ return hwcaps;
+}
+
+#define ELF_PLATFORM "loongarch"
+
+#endif /* TARGET_LOONGARCH64 */
+
+#ifdef TARGET_MIPS
#ifdef TARGET_MIPS64
#define ELF_CLASS ELFCLASS64
@@ -922,8 +1318,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
#define ELF_CLASS ELFCLASS32
#endif
#define ELF_ARCH EM_MIPS
-
-#define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS)
+#define EXSTACK_DEFAULT true
#ifdef TARGET_ABI_MIPSN32
#define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
@@ -931,6 +1326,37 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
#define elf_check_abi(x) (!((x) & EF_MIPS_ABI2))
#endif
+#define ELF_BASE_PLATFORM get_elf_base_platform()
+
+#define MATCH_PLATFORM_INSN(_flags, _base_platform) \
+ do { if ((cpu->env.insn_flags & (_flags)) == _flags) \
+ { return _base_platform; } } while (0)
+
+static const char *get_elf_base_platform(void)
+{
+ MIPSCPU *cpu = MIPS_CPU(thread_cpu);
+
+ /* 64 bit ISAs goes first */
+ MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6");
+ MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5");
+ MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2");
+ MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64");
+ MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5");
+ MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4");
+ MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3");
+
+ /* 32 bit ISAs */
+ MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6");
+ MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5");
+ MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2");
+ MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32");
+ MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2");
+
+ /* Fallback */
+ return "mips";
+}
+#undef MATCH_PLATFORM_INSN
+
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
{
@@ -1043,8 +1469,6 @@ static uint32_t get_elf_hwcap(void)
#ifdef TARGET_MICROBLAZE
-#define ELF_START_MMAP 0x80000000
-
#define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
#define ELF_CLASS ELFCLASS32
@@ -1083,67 +1507,8 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env
#endif /* TARGET_MICROBLAZE */
-#ifdef TARGET_NIOS2
-
-#define ELF_START_MMAP 0x80000000
-
-#define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
-
-#define ELF_CLASS ELFCLASS32
-#define ELF_ARCH EM_ALTERA_NIOS2
-
-static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
-{
- regs->ea = infop->entry;
- regs->sp = infop->start_stack;
- regs->estatus = 0x3;
-}
-
-#define ELF_EXEC_PAGESIZE 4096
-
-#define USE_ELF_CORE_DUMP
-#define ELF_NREG 49
-typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
-
-/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
-static void elf_core_copy_regs(target_elf_gregset_t *regs,
- const CPUNios2State *env)
-{
- int i;
-
- (*regs)[0] = -1;
- for (i = 1; i < 8; i++) /* r0-r7 */
- (*regs)[i] = tswapreg(env->regs[i + 7]);
-
- for (i = 8; i < 16; i++) /* r8-r15 */
- (*regs)[i] = tswapreg(env->regs[i - 8]);
-
- for (i = 16; i < 24; i++) /* r16-r23 */
- (*regs)[i] = tswapreg(env->regs[i + 7]);
- (*regs)[24] = -1; /* R_ET */
- (*regs)[25] = -1; /* R_BT */
- (*regs)[26] = tswapreg(env->regs[R_GP]);
- (*regs)[27] = tswapreg(env->regs[R_SP]);
- (*regs)[28] = tswapreg(env->regs[R_FP]);
- (*regs)[29] = tswapreg(env->regs[R_EA]);
- (*regs)[30] = -1; /* R_SSTATUS */
- (*regs)[31] = tswapreg(env->regs[R_RA]);
-
- (*regs)[32] = tswapreg(env->regs[R_PC]);
-
- (*regs)[33] = -1; /* R_STATUS */
- (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
-
- for (i = 35; i < 49; i++) /* ... */
- (*regs)[i] = -1;
-}
-
-#endif /* TARGET_NIOS2 */
-
#ifdef TARGET_OPENRISC
-#define ELF_START_MMAP 0x08000000
-
#define ELF_ARCH EM_OPENRISC
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
@@ -1180,8 +1545,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#ifdef TARGET_SH4
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_SH
@@ -1262,8 +1625,6 @@ static uint32_t get_elf_hwcap(void)
#ifdef TARGET_CRIS
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_CRIS
@@ -1279,8 +1640,6 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_M68K
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_68K
@@ -1330,8 +1689,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *e
#ifdef TARGET_ALPHA
-#define ELF_START_MMAP (0x30000000000ULL)
-
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_ALPHA
@@ -1349,8 +1706,6 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_S390X
-#define ELF_START_MMAP (0x20000000000ULL)
-
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
@@ -1362,7 +1717,7 @@ static inline void init_thread(struct target_pt_regs *regs,
#define GET_FEATURE(_feat, _hwcap) \
do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
-static uint32_t get_elf_hwcap(void)
+uint32_t get_elf_hwcap(void)
{
/*
* Let's assume we always have esan3 and zarch.
@@ -1380,14 +1735,47 @@ static uint32_t get_elf_hwcap(void)
}
GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT);
+ GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2);
return hwcap;
}
+const char *elf_hwcap_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [HWCAP_S390_NR_ESAN3] = "esan3",
+ [HWCAP_S390_NR_ZARCH] = "zarch",
+ [HWCAP_S390_NR_STFLE] = "stfle",
+ [HWCAP_S390_NR_MSA] = "msa",
+ [HWCAP_S390_NR_LDISP] = "ldisp",
+ [HWCAP_S390_NR_EIMM] = "eimm",
+ [HWCAP_S390_NR_DFP] = "dfp",
+ [HWCAP_S390_NR_HPAGE] = "edat",
+ [HWCAP_S390_NR_ETF3EH] = "etf3eh",
+ [HWCAP_S390_NR_HIGH_GPRS] = "highgprs",
+ [HWCAP_S390_NR_TE] = "te",
+ [HWCAP_S390_NR_VXRS] = "vx",
+ [HWCAP_S390_NR_VXRS_BCD] = "vxd",
+ [HWCAP_S390_NR_VXRS_EXT] = "vxe",
+ [HWCAP_S390_NR_GS] = "gs",
+ [HWCAP_S390_NR_VXRS_EXT2] = "vxe2",
+ [HWCAP_S390_NR_VXRS_PDE] = "vxp",
+ [HWCAP_S390_NR_SORT] = "sort",
+ [HWCAP_S390_NR_DFLT] = "dflt",
+ [HWCAP_S390_NR_NNPA] = "nnpa",
+ [HWCAP_S390_NR_PCI_MIO] = "pcimio",
+ [HWCAP_S390_NR_SIE] = "sie",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
{
regs->psw.addr = infop->entry;
- regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
+ regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+ PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \
+ PSW_MASK_32;
regs->gprs[15] = infop->start_stack;
}
@@ -1424,17 +1812,20 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
+#define VDSO_HEADER "vdso.c.inc"
+
#endif /* TARGET_S390X */
#ifdef TARGET_RISCV
-#define ELF_START_MMAP 0x80000000
#define ELF_ARCH EM_RISCV
#ifdef TARGET_RISCV32
#define ELF_CLASS ELFCLASS32
+#define VDSO_HEADER "vdso-32.c.inc"
#else
#define ELF_CLASS ELFCLASS64
+#define VDSO_HEADER "vdso-64.c.inc"
#endif
#define ELF_HWCAP get_elf_hwcap()
@@ -1444,9 +1835,10 @@ static uint32_t get_elf_hwcap(void)
#define MISA_BIT(EXT) (1 << (EXT - 'A'))
RISCVCPU *cpu = RISCV_CPU(thread_cpu);
uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A')
- | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C');
+ | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C')
+ | MISA_BIT('V');
- return cpu->env.misa & mask;
+ return cpu->env.misa_ext & mask;
#undef MISA_BIT
}
@@ -1463,32 +1855,63 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_HPPA
-#define ELF_START_MMAP 0x80000000
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_PARISC
#define ELF_PLATFORM "PARISC"
#define STACK_GROWS_DOWN 0
#define STACK_ALIGNMENT 64
+#define VDSO_HEADER "vdso.c.inc"
+
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
{
regs->iaoq[0] = infop->entry;
regs->iaoq[1] = infop->entry + 4;
regs->gr[23] = 0;
- regs->gr[24] = infop->arg_start;
- regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong);
+ regs->gr[24] = infop->argv;
+ regs->gr[25] = infop->argc;
/* The top-of-stack contains a linkage buffer. */
regs->gr[30] = infop->start_stack + 64;
regs->gr[31] = infop->entry;
}
+#define LO_COMMPAGE 0
+
+static bool init_guest_commpage(void)
+{
+ /* If reserved_va, then we have already mapped 0 page on the host. */
+ if (!reserved_va) {
+ void *want, *addr;
+
+ want = g2h_untagged(LO_COMMPAGE);
+ addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0);
+ if (addr == MAP_FAILED) {
+ perror("Allocating guest commpage");
+ exit(EXIT_FAILURE);
+ }
+ if (addr != want) {
+ return false;
+ }
+ }
+
+ /*
+ * On Linux, page zero is normally marked execute only + gateway.
+ * Normal read or write is supposed to fail (thus PROT_NONE above),
+ * but specific offsets have kernel code mapped to raise permissions
+ * and implement syscalls. Here, simply mark the page executable.
+ * Special case the entry points during translation (see do_page_zero).
+ */
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
+ PAGE_EXEC | PAGE_VALID);
+ return true;
+}
+
#endif /* TARGET_HPPA */
#ifdef TARGET_XTENSA
-#define ELF_START_MMAP 0x20000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
@@ -1499,6 +1922,15 @@ static inline void init_thread(struct target_pt_regs *regs,
regs->windowstart = 1;
regs->areg[1] = infop->start_stack;
regs->pc = infop->entry;
+ if (info_is_fdpic(infop)) {
+ regs->areg[4] = infop->loadmap_addr;
+ regs->areg[5] = infop->interpreter_loadmap_addr;
+ if (infop->interpreter_loadmap_addr) {
+ regs->areg[6] = infop->interpreter_pt_dynamic_addr;
+ } else {
+ regs->areg[6] = infop->pt_dynamic_addr;
+ }
+ }
}
/* See linux kernel: arch/xtensa/include/asm/elf.h. */
@@ -1545,8 +1977,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#ifdef TARGET_HEXAGON
-#define ELF_START_MMAP 0x20000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_HEXAGON
@@ -1559,6 +1989,10 @@ static inline void init_thread(struct target_pt_regs *regs,
#endif /* TARGET_HEXAGON */
+#ifndef ELF_BASE_PLATFORM
+#define ELF_BASE_PLATFORM (NULL)
+#endif
+
#ifndef ELF_PLATFORM
#define ELF_PLATFORM (NULL)
#endif
@@ -1594,6 +2028,10 @@ static inline void init_thread(struct target_pt_regs *regs,
#define bswaptls(ptr) bswap32s(ptr)
#endif
+#ifndef EXSTACK_DEFAULT
+#define EXSTACK_DEFAULT false
+#endif
+
#include "elf.h"
/* We must delay the following stanzas until after "elf.h". */
@@ -1648,15 +2086,6 @@ struct exec
#define ZMAGIC 0413
#define QMAGIC 0314
-/* Necessary parameters */
-#define TARGET_ELF_EXEC_PAGESIZE \
- (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
- TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
-#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
-#define TARGET_ELF_PAGESTART(_v) ((_v) & \
- ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
-#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
-
#define DLINFO_ITEMS 16
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
@@ -1745,7 +2174,8 @@ static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
#ifdef USE_ELF_CORE_DUMP
static int elf_core_dump(int, const CPUArchState *);
#endif /* USE_ELF_CORE_DUMP */
-static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
+static void load_symbols(struct elfhdr *hdr, const ImageSource *src,
+ abi_ulong load_bias);
/* Verify the portions of EHDR within E_IDENT for the target.
This can be performed before bswapping the entire header. */
@@ -1869,17 +2299,28 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
struct image_info *info)
{
abi_ulong size, error, guard;
+ int prot;
size = guest_stack_size;
if (size < STACK_LOWER_LIMIT) {
size = STACK_LOWER_LIMIT;
}
- guard = TARGET_PAGE_SIZE;
- if (guard < qemu_real_host_page_size) {
- guard = qemu_real_host_page_size;
+
+ if (STACK_GROWS_DOWN) {
+ guard = TARGET_PAGE_SIZE;
+ if (guard < qemu_real_host_page_size()) {
+ guard = qemu_real_host_page_size();
+ }
+ } else {
+ /* no guard page for hppa target where stack grows upwards. */
+ guard = 0;
}
- error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
+ prot = PROT_READ | PROT_WRITE;
+ if (info->exec_stack) {
+ prot |= PROT_EXEC;
+ }
+ error = target_mmap(0, size + guard, prot,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (error == -1) {
perror("mmap stack");
@@ -1892,59 +2333,81 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
info->stack_limit = error + guard;
return info->stack_limit + size - sizeof(void *);
} else {
- target_mprotect(error + size, guard, PROT_NONE);
info->stack_limit = error + size;
return error;
}
}
-/* Map and zero the bss. We need to explicitly zero any fractional pages
- after the data section (i.e. bss). */
-static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
+/**
+ * zero_bss:
+ *
+ * Map and zero the bss. We need to explicitly zero any fractional pages
+ * after the data section (i.e. bss). Return false on mapping failure.
+ */
+static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss,
+ int prot, Error **errp)
{
- uintptr_t host_start, host_map_start, host_end;
-
- last_bss = TARGET_PAGE_ALIGN(last_bss);
-
- /* ??? There is confusion between qemu_real_host_page_size and
- qemu_host_page_size here and elsewhere in target_mmap, which
- may lead to the end of the data section mapping from the file
- not being mapped. At least there was an explicit test and
- comment for that here, suggesting that "the file size must
- be known". The comment probably pre-dates the introduction
- of the fstat system call in target_mmap which does in fact
- find out the size. What isn't clear is if the workaround
- here is still actually needed. For now, continue with it,
- but merge it with the "normal" mmap that would allocate the bss. */
-
- host_start = (uintptr_t) g2h_untagged(elf_bss);
- host_end = (uintptr_t) g2h_untagged(last_bss);
- host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
-
- if (host_map_start < host_end) {
- void *p = mmap((void *)host_map_start, host_end - host_map_start,
- prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED) {
- perror("cannot mmap brk");
- exit(-1);
- }
+ abi_ulong align_bss;
+
+ /* We only expect writable bss; the code segment shouldn't need this. */
+ if (!(prot & PROT_WRITE)) {
+ error_setg(errp, "PT_LOAD with non-writable bss");
+ return false;
}
- /* Ensure that the bss page(s) are valid */
- if ((page_get_flags(last_bss-1) & prot) != prot) {
- page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
+ align_bss = TARGET_PAGE_ALIGN(start_bss);
+ end_bss = TARGET_PAGE_ALIGN(end_bss);
+
+ if (start_bss < align_bss) {
+ int flags = page_get_flags(start_bss);
+
+ if (!(flags & PAGE_BITS)) {
+ /*
+ * The whole address space of the executable was reserved
+ * at the start, therefore all pages will be VALID.
+ * But assuming there are no PROT_NONE PT_LOAD segments,
+ * a PROT_NONE page means no data all bss, and we can
+ * simply extend the new anon mapping back to the start
+ * of the page of bss.
+ */
+ align_bss -= TARGET_PAGE_SIZE;
+ } else {
+ /*
+ * The start of the bss shares a page with something.
+ * The only thing that we expect is the data section,
+ * which would already be marked writable.
+ * Overlapping the RX code segment seems malformed.
+ */
+ if (!(flags & PAGE_WRITE)) {
+ error_setg(errp, "PT_LOAD with bss overlapping "
+ "non-writable page");
+ return false;
+ }
+
+ /* The page is already mapped and writable. */
+ memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
+ }
}
- if (host_start < host_map_start) {
- memset((void *)host_start, 0, host_map_start - host_start);
+ if (align_bss < end_bss &&
+ target_mmap(align_bss, end_bss - align_bss, prot,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
+ error_setg_errno(errp, errno, "Error mapping bss");
+ return false;
}
+ return true;
}
-#ifdef TARGET_ARM
+#if defined(TARGET_ARM)
static int elf_is_fdpic(struct elfhdr *exec)
{
return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
}
+#elif defined(TARGET_XTENSA)
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+ return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC;
+}
#else
/* Default implementation, always false. */
static int elf_is_fdpic(struct elfhdr *exec)
@@ -1981,7 +2444,8 @@ static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong s
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
struct elfhdr *exec,
struct image_info *info,
- struct image_info *interp_info)
+ struct image_info *interp_info,
+ struct image_info *vdso_info)
{
abi_ulong sp;
abi_ulong u_argc, u_argv, u_envp, u_auxv;
@@ -1989,8 +2453,8 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
int i;
abi_ulong u_rand_bytes;
uint8_t k_rand_bytes[16];
- abi_ulong u_platform;
- const char *k_platform;
+ abi_ulong u_platform, u_base_platform;
+ const char *k_platform, *k_base_platform;
const int n = sizeof(elf_addr_t);
sp = p;
@@ -2012,6 +2476,22 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
}
}
+ u_base_platform = 0;
+ k_base_platform = ELF_BASE_PLATFORM;
+ if (k_base_platform) {
+ size_t len = strlen(k_base_platform) + 1;
+ if (STACK_GROWS_DOWN) {
+ sp -= (len + n - 1) & ~(n - 1);
+ u_base_platform = sp;
+ /* FIXME - check return value of memcpy_to_target() for failure */
+ memcpy_to_target(sp, k_base_platform, len);
+ } else {
+ memcpy_to_target(sp, k_base_platform, len);
+ u_base_platform = sp;
+ sp += len + 1;
+ }
+ }
+
u_platform = 0;
k_platform = ELF_PLATFORM;
if (k_platform) {
@@ -2053,8 +2533,15 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
}
size = (DLINFO_ITEMS + 1) * 2;
- if (k_platform)
+ if (k_base_platform) {
+ size += 2;
+ }
+ if (k_platform) {
+ size += 2;
+ }
+ if (vdso_info) {
size += 2;
+ }
#ifdef DLINFO_ARCH_ITEMS
size += DLINFO_ARCH_ITEMS * 2;
#endif
@@ -2080,8 +2567,10 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
u_envp = u_argv + (argc + 1) * n;
u_auxv = u_envp + (envc + 1) * n;
info->saved_auxv = u_auxv;
- info->arg_start = u_argv;
- info->arg_end = u_argv + argc * n;
+ info->argc = argc;
+ info->envc = envc;
+ info->argv = u_argv;
+ info->envp = u_envp;
/* This is correct because Linux defines
* elf_addr_t as Elf32_Off / Elf64_Off
@@ -2104,13 +2593,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
- if ((info->alignment & ~qemu_host_page_mask) != 0) {
- /* Target doesn't support host page size alignment */
- NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
- } else {
- NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
- qemu_host_page_size)));
- }
+ NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
NEW_AUX_ENT(AT_ENTRY, info->entry);
@@ -2128,9 +2611,15 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
#endif
+ if (u_base_platform) {
+ NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform);
+ }
if (u_platform) {
NEW_AUX_ENT(AT_PLATFORM, u_platform);
}
+ if (vdso_info) {
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr);
+ }
NEW_AUX_ENT (AT_NULL, 0);
#undef NEW_AUX_ENT
@@ -2160,320 +2649,367 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
return sp;
}
-#ifndef ARM_COMMPAGE
-#define ARM_COMMPAGE 0
+#if defined(HI_COMMPAGE)
+#define LO_COMMPAGE -1
+#elif defined(LO_COMMPAGE)
+#define HI_COMMPAGE 0
+#else
+#define HI_COMMPAGE 0
+#define LO_COMMPAGE -1
+#ifndef INIT_GUEST_COMMPAGE
#define init_guest_commpage() true
#endif
+#endif
-static void pgb_fail_in_use(const char *image_name)
+/**
+ * pgb_try_mmap:
+ * @addr: host start address
+ * @addr_last: host last address
+ * @keep: do not unmap the probe region
+ *
+ * Return 1 if [@addr, @addr_last] is not mapped in the host,
+ * return 0 if it is not available to map, and -1 on mmap error.
+ * If @keep, the region is left mapped on success, otherwise unmapped.
+ */
+static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep)
{
- error_report("%s: requires virtual address space that is in use "
- "(omit the -B option or choose a different value)",
- image_name);
- exit(EXIT_FAILURE);
+ size_t size = addr_last - addr + 1;
+ void *p = mmap((void *)addr, size, PROT_NONE,
+ MAP_ANONYMOUS | MAP_PRIVATE |
+ MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0);
+ int ret;
+
+ if (p == MAP_FAILED) {
+ return errno == EEXIST ? 0 : -1;
+ }
+ ret = p == (void *)addr;
+ if (!keep || !ret) {
+ munmap(p, size);
+ }
+ return ret;
}
-static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
- abi_ulong guest_hiaddr, long align)
+/**
+ * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk)
+ * @addr: host address
+ * @addr_last: host last address
+ * @brk: host brk
+ *
+ * Like pgb_try_mmap, but additionally reserve some memory following brk.
+ */
+static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last,
+ uintptr_t brk, bool keep)
{
- const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
- void *addr, *test;
+ uintptr_t brk_last = brk + 16 * MiB - 1;
- if (!QEMU_IS_ALIGNED(guest_base, align)) {
- fprintf(stderr, "Requested guest base %p does not satisfy "
- "host minimum alignment (0x%lx)\n",
- (void *)guest_base, align);
- exit(EXIT_FAILURE);
+ /* Do not map anything close to the host brk. */
+ if (addr <= brk_last && brk <= addr_last) {
+ return 0;
}
+ return pgb_try_mmap(addr, addr_last, keep);
+}
- /* Sanity check the guest binary. */
- if (reserved_va) {
- if (guest_hiaddr > reserved_va) {
- error_report("%s: requires more than reserved virtual "
- "address space (0x%" PRIx64 " > 0x%lx)",
- image_name, (uint64_t)guest_hiaddr, reserved_va);
- exit(EXIT_FAILURE);
+/**
+ * pgb_try_mmap_set:
+ * @ga: set of guest addrs
+ * @base: guest_base
+ * @brk: host brk
+ *
+ * Return true if all @ga can be mapped by the host at @base.
+ * On success, retain the mapping at index 0 for reserved_va.
+ */
+
+typedef struct PGBAddrs {
+ uintptr_t bounds[3][2]; /* start/last pairs */
+ int nbounds;
+} PGBAddrs;
+
+static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk)
+{
+ for (int i = ga->nbounds - 1; i >= 0; --i) {
+ if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base,
+ ga->bounds[i][1] + base,
+ brk, i == 0 && reserved_va) <= 0) {
+ return false;
}
+ }
+ return true;
+}
+
+/**
+ * pgb_addr_set:
+ * @ga: output set of guest addrs
+ * @guest_loaddr: guest image low address
+ * @guest_loaddr: guest image high address
+ * @identity: create for identity mapping
+ *
+ * Fill in @ga with the image, COMMPAGE and NULL page.
+ */
+static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr,
+ abi_ulong guest_hiaddr, bool try_identity)
+{
+ int n;
+
+ /*
+ * With a low commpage, or a guest mapped very low,
+ * we may not be able to use the identity map.
+ */
+ if (try_identity) {
+ if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) {
+ return false;
+ }
+ if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) {
+ return false;
+ }
+ }
+
+ memset(ga, 0, sizeof(*ga));
+ n = 0;
+
+ if (reserved_va) {
+ ga->bounds[n][0] = try_identity ? mmap_min_addr : 0;
+ ga->bounds[n][1] = reserved_va;
+ n++;
+ /* LO_COMMPAGE and NULL handled by reserving from 0. */
} else {
-#if HOST_LONG_BITS < TARGET_ABI_BITS
- if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
- error_report("%s: requires more virtual address space "
- "than the host can provide (0x%" PRIx64 ")",
- image_name, (uint64_t)guest_hiaddr - guest_base);
- exit(EXIT_FAILURE);
+ /* Add any LO_COMMPAGE or NULL page. */
+ if (LO_COMMPAGE != -1) {
+ ga->bounds[n][0] = 0;
+ ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1;
+ n++;
+ } else if (!try_identity) {
+ ga->bounds[n][0] = 0;
+ ga->bounds[n][1] = TARGET_PAGE_SIZE - 1;
+ n++;
+ }
+
+ /* Add the guest image for ET_EXEC. */
+ if (guest_loaddr) {
+ ga->bounds[n][0] = guest_loaddr;
+ ga->bounds[n][1] = guest_hiaddr;
+ n++;
}
-#endif
}
/*
- * Expand the allocation to the entire reserved_va.
- * Exclude the mmap_min_addr hole.
+ * Temporarily disable
+ * "comparison is always false due to limited range of data type"
+ * due to comparison between unsigned and (possible) 0.
*/
- if (reserved_va) {
- guest_loaddr = (guest_base >= mmap_min_addr ? 0
- : mmap_min_addr - guest_base);
- guest_hiaddr = reserved_va;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+
+ /* Add any HI_COMMPAGE not covered by reserved_va. */
+ if (reserved_va < HI_COMMPAGE) {
+ ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask();
+ ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1;
+ n++;
+ }
+
+#pragma GCC diagnostic pop
+
+ ga->nbounds = n;
+ return true;
+}
+
+static void pgb_fail_in_use(const char *image_name)
+{
+ error_report("%s: requires virtual address space that is in use "
+ "(omit the -B option or choose a different value)",
+ image_name);
+ exit(EXIT_FAILURE);
+}
+
+static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr,
+ uintptr_t guest_hiaddr, uintptr_t align)
+{
+ PGBAddrs ga;
+ uintptr_t brk = (uintptr_t)sbrk(0);
+
+ if (!QEMU_IS_ALIGNED(guest_base, align)) {
+ fprintf(stderr, "Requested guest base %p does not satisfy "
+ "host minimum alignment (0x%" PRIxPTR ")\n",
+ (void *)guest_base, align);
+ exit(EXIT_FAILURE);
}
- /* Reserve the address space for the binary, or reserved_va. */
- test = g2h_untagged(guest_loaddr);
- addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
- if (test != addr) {
+ if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base)
+ || !pgb_try_mmap_set(&ga, guest_base, brk)) {
pgb_fail_in_use(image_name);
}
}
/**
- * pgd_find_hole_fallback: potential mmap address
- * @guest_size: size of available space
- * @brk: location of break
- * @align: memory alignment
+ * pgb_find_fallback:
*
- * This is a fallback method for finding a hole in the host address
- * space if we don't have the benefit of being able to access
- * /proc/self/map. It can potentially take a very long time as we can
- * only dumbly iterate up the host address space seeing if the
- * allocation would work.
+ * This is a fallback method for finding holes in the host address space
+ * if we don't have the benefit of being able to access /proc/self/map.
+ * It can potentially take a very long time as we can only dumbly iterate
+ * up the host address space seeing if the allocation would work.
*/
-static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk,
- long align, uintptr_t offset)
+static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align,
+ uintptr_t brk)
{
- uintptr_t base;
+ /* TODO: come up with a better estimate of how much to skip. */
+ uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB;
- /* Start (aligned) at the bottom and work our way up */
- base = ROUND_UP(mmap_min_addr, align);
-
- while (true) {
- uintptr_t align_start, end;
- align_start = ROUND_UP(base, align);
- end = align_start + guest_size + offset;
-
- /* if brk is anywhere in the range give ourselves some room to grow. */
- if (align_start <= brk && brk < end) {
- base = brk + (16 * MiB);
- continue;
- } else if (align_start + guest_size < align_start) {
- /* we have run out of space */
+ for (uintptr_t base = skip; ; base += skip) {
+ base = ROUND_UP(base, align);
+ if (pgb_try_mmap_set(ga, base, brk)) {
+ return base;
+ }
+ if (base >= -skip) {
return -1;
- } else {
- int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE |
- MAP_FIXED_NOREPLACE;
- void * mmap_start = mmap((void *) align_start, guest_size,
- PROT_NONE, flags, -1, 0);
- if (mmap_start != MAP_FAILED) {
- munmap(mmap_start, guest_size);
- if (mmap_start == (void *) align_start) {
- return (uintptr_t) mmap_start + offset;
- }
- }
- base += qemu_host_page_size;
}
}
}
-/* Return value for guest_base, or -1 if no hole found. */
-static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
- long align, uintptr_t offset)
+static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base,
+ IntervalTreeRoot *root)
{
- GSList *maps, *iter;
- uintptr_t this_start, this_end, next_start, brk;
- intptr_t ret = -1;
-
- assert(QEMU_IS_ALIGNED(guest_loaddr, align));
-
- maps = read_self_maps();
-
- /* Read brk after we've read the maps, which will malloc. */
- brk = (uintptr_t)sbrk(0);
+ for (int i = ga->nbounds - 1; i >= 0; --i) {
+ uintptr_t s = base + ga->bounds[i][0];
+ uintptr_t l = base + ga->bounds[i][1];
+ IntervalTreeNode *n;
+
+ if (l < s) {
+ /* Wraparound. Skip to advance S to mmap_min_addr. */
+ return mmap_min_addr - s;
+ }
- if (!maps) {
- ret = pgd_find_hole_fallback(guest_size, brk, align, offset);
- return ret == -1 ? -1 : ret - guest_loaddr;
+ n = interval_tree_iter_first(root, s, l);
+ if (n != NULL) {
+ /* Conflict. Skip to advance S to LAST + 1. */
+ return n->last - s + 1;
+ }
}
+ return 0; /* success */
+}
- /* The first hole is before the first map entry. */
- this_start = mmap_min_addr;
-
- for (iter = maps; iter;
- this_start = next_start, iter = g_slist_next(iter)) {
- uintptr_t align_start, hole_size;
-
- this_end = ((MapInfo *)iter->data)->start;
- next_start = ((MapInfo *)iter->data)->end;
- align_start = ROUND_UP(this_start + offset, align);
+static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root,
+ uintptr_t align, uintptr_t brk)
+{
+ uintptr_t last = mmap_min_addr;
+ uintptr_t base, skip;
- /* Skip holes that are too small. */
- if (align_start >= this_end) {
- continue;
- }
- hole_size = this_end - align_start;
- if (hole_size < guest_size) {
- continue;
+ while (true) {
+ base = ROUND_UP(last, align);
+ if (base < last) {
+ return -1;
}
- /* If this hole contains brk, give ourselves some room to grow. */
- if (this_start <= brk && brk < this_end) {
- hole_size -= guest_size;
- if (sizeof(uintptr_t) == 8 && hole_size >= 1 * GiB) {
- align_start += 1 * GiB;
- } else if (hole_size >= 16 * MiB) {
- align_start += 16 * MiB;
- } else {
- align_start = (this_end - guest_size) & -align;
- if (align_start < this_start) {
- continue;
- }
- }
+ skip = pgb_try_itree(ga, base, root);
+ if (skip == 0) {
+ break;
}
- /* Record the lowest successful match. */
- if (ret < 0) {
- ret = align_start - guest_loaddr;
- }
- /* If this hole contains the identity map, select it. */
- if (align_start <= guest_loaddr &&
- guest_loaddr + guest_size <= this_end) {
- ret = 0;
- }
- /* If this hole ends above the identity map, stop looking. */
- if (this_end >= guest_loaddr) {
- break;
+ last = base + skip;
+ if (last < base) {
+ return -1;
}
}
- free_self_maps(maps);
- return ret;
+ /*
+ * We've chosen 'base' based on holes in the interval tree,
+ * but we don't yet know if it is a valid host address.
+ * Because it is the first matching hole, if the host addresses
+ * are invalid we know there are no further matches.
+ */
+ return pgb_try_mmap_set(ga, base, brk) ? base : -1;
}
-static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
- abi_ulong orig_hiaddr, long align)
+static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr,
+ uintptr_t guest_hiaddr, uintptr_t align)
{
- uintptr_t loaddr = orig_loaddr;
- uintptr_t hiaddr = orig_hiaddr;
- uintptr_t offset = 0;
- uintptr_t addr;
-
- if (hiaddr != orig_hiaddr) {
- error_report("%s: requires virtual address space that the "
- "host cannot provide (0x%" PRIx64 ")",
- image_name, (uint64_t)orig_hiaddr);
- exit(EXIT_FAILURE);
- }
-
- loaddr &= -align;
- if (ARM_COMMPAGE) {
- /*
- * Extend the allocation to include the commpage.
- * For a 64-bit host, this is just 4GiB; for a 32-bit host we
- * need to ensure there is space bellow the guest_base so we
- * can map the commpage in the place needed when the address
- * arithmetic wraps around.
- */
- if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
- hiaddr = (uintptr_t) 4 << 30;
- } else {
- offset = -(ARM_COMMPAGE & -align);
+ IntervalTreeRoot *root;
+ uintptr_t brk, ret;
+ PGBAddrs ga;
+
+ /* Try the identity map first. */
+ if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) {
+ brk = (uintptr_t)sbrk(0);
+ if (pgb_try_mmap_set(&ga, 0, brk)) {
+ guest_base = 0;
+ return;
}
}
- addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
- if (addr == -1) {
- /*
- * If ARM_COMMPAGE, there *might* be a non-consecutive allocation
- * that can satisfy both. But as the normal arm32 link base address
- * is ~32k, and we extend down to include the commpage, making the
- * overhead only ~96k, this is unlikely.
- */
- error_report("%s: Unable to allocate %#zx bytes of "
- "virtual address space", image_name,
- (size_t)(hiaddr - loaddr));
- exit(EXIT_FAILURE);
- }
-
- guest_base = addr;
-}
-
-static void pgb_dynamic(const char *image_name, long align)
-{
/*
- * The executable is dynamic and does not require a fixed address.
- * All we need is a commpage that satisfies align.
- * If we do not need a commpage, leave guest_base == 0.
+ * Rebuild the address set for non-identity map.
+ * This differs in the mapping of the guest NULL page.
*/
- if (ARM_COMMPAGE) {
- uintptr_t addr, commpage;
+ pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false);
- /* 64-bit hosts should have used reserved_va. */
- assert(sizeof(uintptr_t) == 4);
+ root = read_self_maps();
+ /* Read brk after we've read the maps, which will malloc. */
+ brk = (uintptr_t)sbrk(0);
+
+ if (!root) {
+ ret = pgb_find_fallback(&ga, align, brk);
+ } else {
/*
- * By putting the commpage at the first hole, that puts guest_base
- * just above that, and maximises the positive guest addresses.
+ * Reserve the area close to the host brk.
+ * This will be freed with the rest of the tree.
*/
- commpage = ARM_COMMPAGE & -align;
- addr = pgb_find_hole(commpage, -commpage, align, 0);
- assert(addr != -1);
- guest_base = addr;
- }
-}
+ IntervalTreeNode *b = g_new0(IntervalTreeNode, 1);
+ b->start = brk;
+ b->last = brk + 16 * MiB - 1;
+ interval_tree_insert(b, root);
-static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
- abi_ulong guest_hiaddr, long align)
-{
- int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
- void *addr, *test;
-
- if (guest_hiaddr > reserved_va) {
- error_report("%s: requires more than reserved virtual "
- "address space (0x%" PRIx64 " > 0x%lx)",
- image_name, (uint64_t)guest_hiaddr, reserved_va);
- exit(EXIT_FAILURE);
+ ret = pgb_find_itree(&ga, root, align, brk);
+ free_self_maps(root);
}
- /* Widen the "image" to the entire reserved address space. */
- pgb_static(image_name, 0, reserved_va, align);
+ if (ret == -1) {
+ int w = TARGET_LONG_BITS / 4;
- /* osdep.h defines this as 0 if it's missing */
- flags |= MAP_FIXED_NOREPLACE;
+ error_report("%s: Unable to find a guest_base to satisfy all "
+ "guest address mapping requirements", image_name);
- /* Reserve the memory on the host. */
- assert(guest_base != 0);
- test = g2h_untagged(0);
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
- if (addr == MAP_FAILED || addr != test) {
- error_report("Unable to reserve 0x%lx bytes of virtual address "
- "space at %p (%s) for use as guest address space (check your"
- "virtual memory ulimit setting, min_mmap_addr or reserve less "
- "using -R option)", reserved_va, test, strerror(errno));
+ for (int i = 0; i < ga.nbounds; ++i) {
+ error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n",
+ w, (uint64_t)ga.bounds[i][0],
+ w, (uint64_t)ga.bounds[i][1]);
+ }
exit(EXIT_FAILURE);
}
+ guest_base = ret;
}
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
abi_ulong guest_hiaddr)
{
/* In order to use host shmat, we must be able to honor SHMLBA. */
- uintptr_t align = MAX(SHMLBA, qemu_host_page_size);
+ uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE);
+
+ /* Sanity check the guest binary. */
+ if (reserved_va) {
+ if (guest_hiaddr > reserved_va) {
+ error_report("%s: requires more than reserved virtual "
+ "address space (0x%" PRIx64 " > 0x%lx)",
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ if (guest_hiaddr != (uintptr_t)guest_hiaddr) {
+ error_report("%s: requires more virtual address space "
+ "than the host can provide (0x%" PRIx64 ")",
+ image_name, (uint64_t)guest_hiaddr + 1);
+ exit(EXIT_FAILURE);
+ }
+ }
if (have_guest_base) {
- pgb_have_guest_base(image_name, guest_loaddr, guest_hiaddr, align);
- } else if (reserved_va) {
- pgb_reserved_va(image_name, guest_loaddr, guest_hiaddr, align);
- } else if (guest_loaddr) {
- pgb_static(image_name, guest_loaddr, guest_hiaddr, align);
+ pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align);
} else {
- pgb_dynamic(image_name, align);
+ pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align);
}
/* Reserve and initialize the commpage. */
if (!init_guest_commpage()) {
- /*
- * With have_guest_base, the user has selected the address and
- * we are trying to work with that. Otherwise, we have selected
- * free space and init_guest_commpage must succeeded.
- */
- assert(have_guest_base);
- pgb_fail_in_use(image_name);
+ /* We have already probed for the commpage being free. */
+ g_assert_not_reached();
}
assert(QEMU_IS_ALIGNED(guest_base, align));
@@ -2541,10 +3077,9 @@ static bool parse_elf_property(const uint32_t *data, int *off, int datasz,
}
/* Process NT_GNU_PROPERTY_TYPE_0. */
-static bool parse_elf_properties(int image_fd,
+static bool parse_elf_properties(const ImageSource *src,
struct image_info *info,
const struct elf_phdr *phdr,
- char bprm_buf[BPRM_BUF_SIZE],
Error **errp)
{
union {
@@ -2572,14 +3107,8 @@ static bool parse_elf_properties(int image_fd,
return false;
}
- if (phdr->p_offset + n <= BPRM_BUF_SIZE) {
- memcpy(&note, bprm_buf + phdr->p_offset, n);
- } else {
- ssize_t len = pread(image_fd, &note, n, phdr->p_offset);
- if (len != n) {
- error_setg_errno(errp, errno, "Error reading file header");
- return false;
- }
+ if (!imgsrc_read(&note, phdr->p_offset, n, src, errp)) {
+ return false;
}
/*
@@ -2625,29 +3154,34 @@ static bool parse_elf_properties(int image_fd,
}
}
-/* Load an ELF image into the address space.
-
- IMAGE_NAME is the filename of the image, to use in error messages.
- IMAGE_FD is the open file descriptor for the image.
-
- BPRM_BUF is a copy of the beginning of the file; this of course
- contains the elf file header at offset 0. It is assumed that this
- buffer is sufficiently aligned to present no problems to the host
- in accessing data at aligned offsets within the buffer.
-
- On return: INFO values will be filled in, as necessary or available. */
+/**
+ * load_elf_image: Load an ELF image into the address space.
+ * @image_name: the filename of the image, to use in error messages.
+ * @src: the ImageSource from which to read.
+ * @info: info collected from the loaded image.
+ * @ehdr: the ELF header, not yet bswapped.
+ * @pinterp_name: record any PT_INTERP string found.
+ *
+ * On return: @info values will be filled in, as necessary or available.
+ */
-static void load_elf_image(const char *image_name, int image_fd,
- struct image_info *info, char **pinterp_name,
- char bprm_buf[BPRM_BUF_SIZE])
+static void load_elf_image(const char *image_name, const ImageSource *src,
+ struct image_info *info, struct elfhdr *ehdr,
+ char **pinterp_name)
{
- struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
- struct elf_phdr *phdr;
+ g_autofree struct elf_phdr *phdr = NULL;
abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
- int i, retval, prot_exec;
+ int i, prot_exec;
Error *err = NULL;
- /* First of all, some simple consistency checks */
+ /*
+ * First of all, some simple consistency checks.
+ * Note that we rely on the bswapped ehdr staying in bprm_buf,
+ * for later use by load_elf_binary and create_elf_tables.
+ */
+ if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) {
+ goto exit_errmsg;
+ }
if (!elf_check_ident(ehdr)) {
error_setg(&err, "Invalid ELF image for this architecture");
goto exit_errmsg;
@@ -2658,15 +3192,11 @@ static void load_elf_image(const char *image_name, int image_fd,
goto exit_errmsg;
}
- i = ehdr->e_phnum * sizeof(struct elf_phdr);
- if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
- phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
- } else {
- phdr = (struct elf_phdr *) alloca(i);
- retval = pread(image_fd, phdr, i, ehdr->e_phoff);
- if (retval != i) {
- goto exit_read;
- }
+ phdr = imgsrc_read_alloc(ehdr->e_phoff,
+ ehdr->e_phnum * sizeof(struct elf_phdr),
+ src, &err);
+ if (phdr == NULL) {
+ goto exit_errmsg;
}
bswap_phdr(phdr, ehdr->e_phnum);
@@ -2681,14 +3211,15 @@ static void load_elf_image(const char *image_name, int image_fd,
*/
loaddr = -1, hiaddr = 0;
info->alignment = 0;
+ info->exec_stack = EXSTACK_DEFAULT;
for (i = 0; i < ehdr->e_phnum; ++i) {
struct elf_phdr *eppnt = phdr + i;
if (eppnt->p_type == PT_LOAD) {
- abi_ulong a = eppnt->p_vaddr - eppnt->p_offset;
+ abi_ulong a = eppnt->p_vaddr & TARGET_PAGE_MASK;
if (a < loaddr) {
loaddr = a;
}
- a = eppnt->p_vaddr + eppnt->p_memsz;
+ a = eppnt->p_vaddr + eppnt->p_memsz - 1;
if (a > hiaddr) {
hiaddr = a;
}
@@ -2702,17 +3233,10 @@ static void load_elf_image(const char *image_name, int image_fd,
goto exit_errmsg;
}
- interp_name = g_malloc(eppnt->p_filesz);
-
- if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
- memcpy(interp_name, bprm_buf + eppnt->p_offset,
- eppnt->p_filesz);
- } else {
- retval = pread(image_fd, interp_name, eppnt->p_filesz,
- eppnt->p_offset);
- if (retval != eppnt->p_filesz) {
- goto exit_read;
- }
+ interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz,
+ src, &err);
+ if (interp_name == NULL) {
+ goto exit_errmsg;
}
if (interp_name[eppnt->p_filesz - 1] != 0) {
error_setg(&err, "Invalid PT_INTERP entry");
@@ -2720,28 +3244,17 @@ static void load_elf_image(const char *image_name, int image_fd,
}
*pinterp_name = g_steal_pointer(&interp_name);
} else if (eppnt->p_type == PT_GNU_PROPERTY) {
- if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
+ if (!parse_elf_properties(src, info, eppnt, &err)) {
goto exit_errmsg;
}
+ } else if (eppnt->p_type == PT_GNU_STACK) {
+ info->exec_stack = eppnt->p_flags & PF_X;
}
}
- if (pinterp_name != NULL) {
- /*
- * This is the main executable.
- *
- * Reserve extra space for brk.
- * We hold on to this space while placing the interpreter
- * and the stack, lest they be placed immediately after
- * the data segment and block allocation from the brk.
- *
- * 16MB is chosen as "large enough" without being so large
- * as to allow the result to not fit with a 32-bit guest on
- * a 32-bit host.
- */
- info->reserve_brk = 16 * MiB;
- hiaddr += info->reserve_brk;
+ load_addr = loaddr;
+ if (pinterp_name != NULL) {
if (ehdr->e_type == ET_EXEC) {
/*
* Make sure that the low address does not conflict with
@@ -2749,31 +3262,55 @@ static void load_elf_image(const char *image_name, int image_fd,
*/
probe_guest_base(image_name, loaddr, hiaddr);
} else {
+ abi_ulong align;
+
/*
* The binary is dynamic, but we still need to
* select guest_base. In this case we pass a size.
*/
probe_guest_base(image_name, 0, hiaddr - loaddr);
+
+ /*
+ * Avoid collision with the loader by providing a different
+ * default load address.
+ */
+ load_addr += elf_et_dyn_base;
+
+ /*
+ * TODO: Better support for mmap alignment is desirable.
+ * Since we do not have complete control over the guest
+ * address space, we prefer the kernel to choose some address
+ * rather than force the use of LOAD_ADDR via MAP_FIXED.
+ * But without MAP_FIXED we cannot guarantee alignment,
+ * only suggest it.
+ */
+ align = pow2ceil(info->alignment);
+ if (align) {
+ load_addr &= -align;
+ }
}
}
/*
* Reserve address space for all of this.
*
- * In the case of ET_EXEC, we supply MAP_FIXED so that we get
- * exactly the address range that is required.
+ * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get
+ * exactly the address range that is required. Without reserved_va,
+ * the guest address space is not isolated. We have attempted to avoid
+ * conflict with the host program itself via probe_guest_base, but using
+ * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check.
*
* Otherwise this is ET_DYN, and we are searching for a location
* that can hold the memory space required. If the image is
- * pre-linked, LOADDR will be non-zero, and the kernel should
+ * pre-linked, LOAD_ADDR will be non-zero, and the kernel should
* honor that address if it happens to be free.
*
* In both cases, we will overwrite pages in this range with mappings
* from the executable.
*/
- load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
+ load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
- (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
+ (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
-1, 0);
if (load_addr == -1) {
goto exit_mmap;
@@ -2808,7 +3345,8 @@ static void load_elf_image(const char *image_name, int image_fd,
info->end_code = 0;
info->start_data = -1;
info->end_data = 0;
- info->brk = 0;
+ /* Usual start for brk is after all sections of the main executable. */
+ info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias);
info->elf_flags = ehdr->e_flags;
prot_exec = PROT_EXEC;
@@ -2834,7 +3372,7 @@ static void load_elf_image(const char *image_name, int image_fd,
for (i = 0; i < ehdr->e_phnum; i++) {
struct elf_phdr *eppnt = phdr + i;
if (eppnt->p_type == PT_LOAD) {
- abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
+ abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
int elf_prot = 0;
if (eppnt->p_flags & PF_R) {
@@ -2848,8 +3386,8 @@ static void load_elf_image(const char *image_name, int image_fd,
}
vaddr = load_bias + eppnt->p_vaddr;
- vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
- vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
+ vaddr_po = vaddr & ~TARGET_PAGE_MASK;
+ vaddr_ps = vaddr & TARGET_PAGE_MASK;
vaddr_ef = vaddr + eppnt->p_filesz;
vaddr_em = vaddr + eppnt->p_memsz;
@@ -2859,30 +3397,18 @@ static void load_elf_image(const char *image_name, int image_fd,
* but no backing file segment.
*/
if (eppnt->p_filesz != 0) {
- vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
- error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
- MAP_PRIVATE | MAP_FIXED,
- image_fd, eppnt->p_offset - vaddr_po);
-
+ error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
+ elf_prot, MAP_PRIVATE | MAP_FIXED,
+ src, eppnt->p_offset - vaddr_po);
if (error == -1) {
goto exit_mmap;
}
+ }
- /*
- * If the load segment requests extra zeros (e.g. bss), map it.
- */
- if (eppnt->p_filesz < eppnt->p_memsz) {
- zero_bss(vaddr_ef, vaddr_em, elf_prot);
- }
- } else if (eppnt->p_memsz != 0) {
- vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_memsz + vaddr_po);
- error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
- MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
- -1, 0);
-
- if (error == -1) {
- goto exit_mmap;
- }
+ /* If the load segment requests extra zeros (e.g. bss), map it. */
+ if (vaddr_ef < vaddr_em &&
+ !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) {
+ goto exit_errmsg;
}
/* Find the full program boundaries. */
@@ -2902,26 +3428,14 @@ static void load_elf_image(const char *image_name, int image_fd,
info->end_data = vaddr_ef;
}
}
- if (vaddr_em > info->brk) {
- info->brk = vaddr_em;
- }
#ifdef TARGET_MIPS
} else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
Mips_elf_abiflags_v0 abiflags;
- if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) {
- error_setg(&err, "Invalid PT_MIPS_ABIFLAGS entry");
+
+ if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags),
+ src, &err)) {
goto exit_errmsg;
}
- if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
- memcpy(&abiflags, bprm_buf + eppnt->p_offset,
- sizeof(Mips_elf_abiflags_v0));
- } else {
- retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0),
- eppnt->p_offset);
- if (retval != sizeof(Mips_elf_abiflags_v0)) {
- goto exit_read;
- }
- }
bswap_mips_abiflags(&abiflags);
info->fp_abi = abiflags.fp_abi;
#endif
@@ -2934,21 +3448,16 @@ static void load_elf_image(const char *image_name, int image_fd,
}
if (qemu_log_enabled()) {
- load_symbols(ehdr, image_fd, load_bias);
+ load_symbols(ehdr, src, load_bias);
}
+ debuginfo_report_elf(image_name, src->fd, load_bias);
+
mmap_unlock();
- close(image_fd);
+ close(src->fd);
return;
- exit_read:
- if (retval >= 0) {
- error_setg(&err, "Incomplete read of file header");
- } else {
- error_setg_errno(&err, errno, "Error reading file header");
- }
- goto exit_errmsg;
exit_mmap:
error_setg_errno(&err, errno, "Error mapping file");
goto exit_errmsg;
@@ -2960,6 +3469,8 @@ static void load_elf_image(const char *image_name, int image_fd,
static void load_elf_interp(const char *filename, struct image_info *info,
char bprm_buf[BPRM_BUF_SIZE])
{
+ struct elfhdr ehdr;
+ ImageSource src;
int fd, retval;
Error *err = NULL;
@@ -2977,18 +3488,65 @@ static void load_elf_interp(const char *filename, struct image_info *info,
exit(-1);
}
- if (retval < BPRM_BUF_SIZE) {
- memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
+ src.fd = fd;
+ src.cache = bprm_buf;
+ src.cache_size = retval;
+
+ load_elf_image(filename, &src, info, &ehdr, NULL);
+}
+
+#ifdef VDSO_HEADER
+#include VDSO_HEADER
+#define vdso_image_info() &vdso_image_info
+#else
+#define vdso_image_info() NULL
+#endif
+
+static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso)
+{
+ ImageSource src;
+ struct elfhdr ehdr;
+ abi_ulong load_bias, load_addr;
+
+ src.fd = -1;
+ src.cache = vdso->image;
+ src.cache_size = vdso->image_size;
+
+ load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL);
+ load_addr = info->load_addr;
+ load_bias = info->load_bias;
+
+ /*
+ * We need to relocate the VDSO image. The one built into the kernel
+ * is built for a fixed address. The one built for QEMU is not, since
+ * that requires close control of the guest address space.
+ * We pre-processed the image to locate all of the addresses that need
+ * to be updated.
+ */
+ for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) {
+ abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]);
+ *addr = tswapal(tswapal(*addr) + load_bias);
}
- load_elf_image(filename, fd, info, NULL, bprm_buf);
+ /* Install signal trampolines, if present. */
+ if (vdso->sigreturn_ofs) {
+ default_sigreturn = load_addr + vdso->sigreturn_ofs;
+ }
+ if (vdso->rt_sigreturn_ofs) {
+ default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs;
+ }
+
+ /* Remove write from VDSO segment. */
+ target_mprotect(info->start_data, info->end_data - info->start_data,
+ PROT_READ | PROT_EXEC);
}
static int symfind(const void *s0, const void *s1)
{
- target_ulong addr = *(target_ulong *)s0;
struct elf_sym *sym = (struct elf_sym *)s1;
+ __typeof(sym->st_value) addr = *(uint64_t *)s0;
int result = 0;
+
if (addr < sym->st_value) {
result = -1;
} else if (addr >= sym->st_value + sym->st_size) {
@@ -2997,7 +3555,7 @@ static int symfind(const void *s0, const void *s1)
return result;
}
-static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
+static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr)
{
#if ELF_CLASS == ELFCLASS32
struct elf_sym *syms = s->disas_symtab.elf32;
@@ -3016,7 +3574,7 @@ static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
return "";
}
-/* FIXME: This should use elf_ops.h */
+/* FIXME: This should use elf_ops.h.inc */
static int symcmp(const void *s0, const void *s1)
{
struct elf_sym *sym0 = (struct elf_sym *)s0;
@@ -3027,19 +3585,20 @@ static int symcmp(const void *s0, const void *s1)
}
/* Best attempt to load symbols from this ELF object. */
-static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
+static void load_symbols(struct elfhdr *hdr, const ImageSource *src,
+ abi_ulong load_bias)
{
int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
- uint64_t segsz;
- struct elf_shdr *shdr;
+ g_autofree struct elf_shdr *shdr = NULL;
char *strings = NULL;
- struct syminfo *s = NULL;
- struct elf_sym *new_syms, *syms = NULL;
+ struct elf_sym *syms = NULL;
+ struct elf_sym *new_syms;
+ uint64_t segsz;
shnum = hdr->e_shnum;
- i = shnum * sizeof(struct elf_shdr);
- shdr = (struct elf_shdr *)alloca(i);
- if (pread(fd, shdr, i, hdr->e_shoff) != i) {
+ shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr),
+ src, NULL);
+ if (shdr == NULL) {
return;
}
@@ -3057,31 +3616,33 @@ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
found:
/* Now know where the strtab and symtab are. Snarf them. */
- s = g_try_new(struct syminfo, 1);
- if (!s) {
- goto give_up;
- }
segsz = shdr[str_idx].sh_size;
- s->disas_strtab = strings = g_try_malloc(segsz);
- if (!strings ||
- pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) {
+ strings = g_try_malloc(segsz);
+ if (!strings) {
goto give_up;
}
-
- segsz = shdr[sym_idx].sh_size;
- syms = g_try_malloc(segsz);
- if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) {
+ if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) {
goto give_up;
}
+ segsz = shdr[sym_idx].sh_size;
if (segsz / sizeof(struct elf_sym) > INT_MAX) {
- /* Implausibly large symbol table: give up rather than ploughing
- * on with the number of symbols calculation overflowing
+ /*
+ * Implausibly large symbol table: give up rather than ploughing
+ * on with the number of symbols calculation overflowing.
*/
goto give_up;
}
nsyms = segsz / sizeof(struct elf_sym);
+ syms = g_try_malloc(segsz);
+ if (!syms) {
+ goto give_up;
+ }
+ if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) {
+ goto give_up;
+ }
+
for (i = 0; i < nsyms; ) {
bswap_sym(syms + i);
/* Throw away entries which we do not need. */
@@ -3106,10 +3667,12 @@ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
goto give_up;
}
- /* Attempt to free the storage associated with the local symbols
- that we threw away. Whether or not this has any effect on the
- memory allocation depends on the malloc implementation and how
- many symbols we managed to discard. */
+ /*
+ * Attempt to free the storage associated with the local symbols
+ * that we threw away. Whether or not this has any effect on the
+ * memory allocation depends on the malloc implementation and how
+ * many symbols we managed to discard.
+ */
new_syms = g_try_renew(struct elf_sym, syms, nsyms);
if (new_syms == NULL) {
goto give_up;
@@ -3118,20 +3681,23 @@ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
qsort(syms, nsyms, sizeof(*syms), symcmp);
- s->disas_num_syms = nsyms;
+ {
+ struct syminfo *s = g_new(struct syminfo, 1);
+
+ s->disas_strtab = strings;
+ s->disas_num_syms = nsyms;
#if ELF_CLASS == ELFCLASS32
- s->disas_symtab.elf32 = syms;
+ s->disas_symtab.elf32 = syms;
#else
- s->disas_symtab.elf64 = syms;
+ s->disas_symtab.elf64 = syms;
#endif
- s->lookup_symbol = lookup_symbolxx;
- s->next = syminfos;
- syminfos = s;
-
+ s->lookup_symbol = lookup_symbolxx;
+ s->next = syminfos;
+ syminfos = s;
+ }
return;
-give_up:
- g_free(s);
+ give_up:
g_free(strings);
g_free(syms);
}
@@ -3173,8 +3739,14 @@ uint32_t get_elf_eflags(int fd)
int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
{
- struct image_info interp_info;
- struct elfhdr elf_ex;
+ /*
+ * We need a copy of the elf header for passing to create_elf_tables.
+ * We will have overwritten the original when we re-use bprm->buf
+ * while loading the interpreter. Allocate the storage for this now
+ * and let elf_load_image do any swapping that may be required.
+ */
+ struct elfhdr ehdr;
+ struct image_info interp_info, vdso_info;
char *elf_interpreter = NULL;
char *scratch;
@@ -3183,15 +3755,7 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
#endif
- info->start_mmap = (abi_ulong)ELF_START_MMAP;
-
- load_elf_image(bprm->filename, bprm->fd, info,
- &elf_interpreter, bprm->buf);
-
- /* ??? We need a copy of the elf header for passing to create_elf_tables.
- If we do nothing, we'll have overwritten this when we re-use bprm->buf
- when we load the interpreter. */
- elf_ex = *(struct elfhdr *)bprm->buf;
+ load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
@@ -3230,6 +3794,19 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
if (elf_interpreter) {
load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
+ /*
+ * While unusual because of ELF_ET_DYN_BASE, if we are unlucky
+ * with the mappings the interpreter can be loaded above but
+ * near the main executable, which can leave very little room
+ * for the heap.
+ * If the current brk has less than 16MB, use the end of the
+ * interpreter.
+ */
+ if (interp_info.brk > info->brk &&
+ interp_info.load_bias - info->brk < 16 * MiB) {
+ info->brk = interp_info.brk;
+ }
+
/* If the program interpreter is one of these two, then assume
an iBCS2 image. Otherwise assume a native linux image. */
@@ -3241,16 +3818,38 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
and some applications "depend" upon this behavior. Since
we do not have the power to recompile these, we emulate
the SVr4 behavior. Sigh. */
- target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC,
+ MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0);
}
#ifdef TARGET_MIPS
info->interp_fp_abi = interp_info.fp_abi;
#endif
}
- bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
- info, (elf_interpreter ? &interp_info : NULL));
+ /*
+ * Load a vdso if available, which will amongst other things contain the
+ * signal trampolines. Otherwise, allocate a separate page for them.
+ */
+ const VdsoImageInfo *vdso = vdso_image_info();
+ if (vdso) {
+ load_elf_vdso(&vdso_info, vdso);
+ info->vdso = vdso_info.load_bias;
+ } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) {
+ abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (tramp_page == -1) {
+ return -errno;
+ }
+
+ setup_sigtramp(tramp_page);
+ target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC);
+ }
+
+ bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info,
+ elf_interpreter ? &interp_info : NULL,
+ vdso ? &vdso_info : NULL);
info->start_stack = bprm->p;
/* If we have an interpreter, set that as the program's entry point.
@@ -3267,21 +3866,12 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
bprm->core_dump = &elf_core_dump;
#endif
- /*
- * If we reserved extra space for brk, release it now.
- * The implementation of do_brk in syscalls.c expects to be able
- * to mmap pages in this space.
- */
- if (info->reserve_brk) {
- abi_ulong start_brk = HOST_PAGE_ALIGN(info->brk);
- abi_ulong end_brk = HOST_PAGE_ALIGN(info->brk + info->reserve_brk);
- target_munmap(start_brk, end_brk - start_brk);
- }
-
return 0;
}
#ifdef USE_ELF_CORE_DUMP
+#include "exec/translate-all.h"
+
/*
* Definitions to generate Intel SVR4-like core files.
* These mostly have the same names as the SVR4 types with "target_elf_"
@@ -3321,18 +3911,6 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
* Example for ARM target is provided in this file.
*/
-/* An ELF note in memory */
-struct memelfnote {
- const char *name;
- size_t namesz;
- size_t namesz_rounded;
- int type;
- size_t datasz;
- size_t datasz_rounded;
- void *data;
- size_t notesz;
-};
-
struct target_elf_siginfo {
abi_int si_signo; /* signal number */
abi_int si_code; /* extra code */
@@ -3372,77 +3950,6 @@ struct target_elf_prpsinfo {
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-/* Here is the structure in which status of each thread is captured. */
-struct elf_thread_status {
- QTAILQ_ENTRY(elf_thread_status) ets_link;
- struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
-#if 0
- elf_fpregset_t fpu; /* NT_PRFPREG */
- struct task_struct *thread;
- elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
-#endif
- struct memelfnote notes[1];
- int num_notes;
-};
-
-struct elf_note_info {
- struct memelfnote *notes;
- struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
- struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
-
- QTAILQ_HEAD(, elf_thread_status) thread_list;
-#if 0
- /*
- * Current version of ELF coredump doesn't support
- * dumping fp regs etc.
- */
- elf_fpregset_t *fpu;
- elf_fpxregset_t *xfpu;
- int thread_status_size;
-#endif
- int notes_size;
- int numnote;
-};
-
-struct vm_area_struct {
- target_ulong vma_start; /* start vaddr of memory region */
- target_ulong vma_end; /* end vaddr of memory region */
- abi_ulong vma_flags; /* protection etc. flags for the region */
- QTAILQ_ENTRY(vm_area_struct) vma_link;
-};
-
-struct mm_struct {
- QTAILQ_HEAD(, vm_area_struct) mm_mmap;
- int mm_count; /* number of mappings */
-};
-
-static struct mm_struct *vma_init(void);
-static void vma_delete(struct mm_struct *);
-static int vma_add_mapping(struct mm_struct *, target_ulong,
- target_ulong, abi_ulong);
-static int vma_get_mapping_count(const struct mm_struct *);
-static struct vm_area_struct *vma_first(const struct mm_struct *);
-static struct vm_area_struct *vma_next(struct vm_area_struct *);
-static abi_ulong vma_dump_size(const struct vm_area_struct *);
-static int vma_walker(void *priv, target_ulong start, target_ulong end,
- unsigned long flags);
-
-static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
-static void fill_note(struct memelfnote *, const char *, int,
- unsigned int, void *);
-static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
-static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
-static void fill_auxv_note(struct memelfnote *, const TaskState *);
-static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
-static size_t note_size(const struct memelfnote *);
-static void free_note_info(struct elf_note_info *);
-static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
-static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
-
-static int dump_write(int, const void *, size_t);
-static int write_note(struct memelfnote *, int);
-static int write_note_info(struct elf_note_info *, int);
-
#ifdef BSWAP_NEEDED
static void bswap_prstatus(struct target_elf_prstatus *prstatus)
{
@@ -3485,145 +3992,66 @@ static inline void bswap_note(struct elf_note *en) { }
#endif /* BSWAP_NEEDED */
/*
- * Minimal support for linux memory regions. These are needed
- * when we are finding out what memory exactly belongs to
- * emulated process. No locks needed here, as long as
- * thread that received the signal is stopped.
- */
-
-static struct mm_struct *vma_init(void)
-{
- struct mm_struct *mm;
-
- if ((mm = g_malloc(sizeof (*mm))) == NULL)
- return (NULL);
-
- mm->mm_count = 0;
- QTAILQ_INIT(&mm->mm_mmap);
-
- return (mm);
-}
-
-static void vma_delete(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
-
- while ((vma = vma_first(mm)) != NULL) {
- QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
- g_free(vma);
- }
- g_free(mm);
-}
-
-static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
- target_ulong end, abi_ulong flags)
-{
- struct vm_area_struct *vma;
-
- if ((vma = g_malloc0(sizeof (*vma))) == NULL)
- return (-1);
-
- vma->vma_start = start;
- vma->vma_end = end;
- vma->vma_flags = flags;
-
- QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
- mm->mm_count++;
-
- return (0);
-}
-
-static struct vm_area_struct *vma_first(const struct mm_struct *mm)
-{
- return (QTAILQ_FIRST(&mm->mm_mmap));
-}
-
-static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
-{
- return (QTAILQ_NEXT(vma, vma_link));
-}
-
-static int vma_get_mapping_count(const struct mm_struct *mm)
-{
- return (mm->mm_count);
-}
-
-/*
* Calculate file (dump) size of given memory region.
*/
-static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
+static size_t vma_dump_size(target_ulong start, target_ulong end,
+ unsigned long flags)
{
- /* if we cannot even read the first page, skip it */
- if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
- return (0);
+ /* The area must be readable. */
+ if (!(flags & PAGE_READ)) {
+ return 0;
+ }
/*
* Usually we don't dump executable pages as they contain
* non-writable code that debugger can read directly from
- * target library etc. However, thread stacks are marked
- * also executable so we read in first page of given region
- * and check whether it contains elf header. If there is
- * no elf header, we dump it.
+ * target library etc. If there is no elf header, we dump it.
*/
- if (vma->vma_flags & PROT_EXEC) {
- char page[TARGET_PAGE_SIZE];
-
- if (copy_from_user(page, vma->vma_start, sizeof (page))) {
- return 0;
- }
- if ((page[EI_MAG0] == ELFMAG0) &&
- (page[EI_MAG1] == ELFMAG1) &&
- (page[EI_MAG2] == ELFMAG2) &&
- (page[EI_MAG3] == ELFMAG3)) {
- /*
- * Mappings are possibly from ELF binary. Don't dump
- * them.
- */
- return (0);
- }
+ if (!(flags & PAGE_WRITE_ORG) &&
+ (flags & PAGE_EXEC) &&
+ memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) {
+ return 0;
}
- return (vma->vma_end - vma->vma_start);
+ return end - start;
}
-static int vma_walker(void *priv, target_ulong start, target_ulong end,
- unsigned long flags)
+static size_t size_note(const char *name, size_t datasz)
{
- struct mm_struct *mm = (struct mm_struct *)priv;
+ size_t namesz = strlen(name) + 1;
- vma_add_mapping(mm, start, end, flags);
- return (0);
+ namesz = ROUND_UP(namesz, 4);
+ datasz = ROUND_UP(datasz, 4);
+
+ return sizeof(struct elf_note) + namesz + datasz;
}
-static void fill_note(struct memelfnote *note, const char *name, int type,
- unsigned int sz, void *data)
+static void *fill_note(void **pptr, int type, const char *name, size_t datasz)
{
- unsigned int namesz;
+ void *ptr = *pptr;
+ struct elf_note *n = ptr;
+ size_t namesz = strlen(name) + 1;
- namesz = strlen(name) + 1;
- note->name = name;
- note->namesz = namesz;
- note->namesz_rounded = roundup(namesz, sizeof (int32_t));
- note->type = type;
- note->datasz = sz;
- note->datasz_rounded = roundup(sz, sizeof (int32_t));
+ n->n_namesz = namesz;
+ n->n_descsz = datasz;
+ n->n_type = type;
+ bswap_note(n);
- note->data = data;
+ ptr += sizeof(*n);
+ memcpy(ptr, name, namesz);
- /*
- * We calculate rounded up note size here as specified by
- * ELF document.
- */
- note->notesz = sizeof (struct elf_note) +
- note->namesz_rounded + note->datasz_rounded;
+ namesz = ROUND_UP(namesz, 4);
+ datasz = ROUND_UP(datasz, 4);
+
+ *pptr = ptr + namesz + datasz;
+ return ptr + namesz;
}
static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
uint32_t flags)
{
- (void) memset(elf, 0, sizeof(*elf));
+ memcpy(elf->e_ident, ELFMAG, SELFMAG);
- (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
elf->e_ident[EI_CLASS] = ELF_CLASS;
elf->e_ident[EI_DATA] = ELF_DATA;
elf->e_ident[EI_VERSION] = EV_CURRENT;
@@ -3641,95 +4069,79 @@ static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
bswap_ehdr(elf);
}
-static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
+static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset)
{
phdr->p_type = PT_NOTE;
phdr->p_offset = offset;
- phdr->p_vaddr = 0;
- phdr->p_paddr = 0;
phdr->p_filesz = sz;
- phdr->p_memsz = 0;
- phdr->p_flags = 0;
- phdr->p_align = 0;
bswap_phdr(phdr, 1);
}
-static size_t note_size(const struct memelfnote *note)
-{
- return (note->notesz);
-}
-
-static void fill_prstatus(struct target_elf_prstatus *prstatus,
- const TaskState *ts, int signr)
+static void fill_prstatus_note(void *data, const TaskState *ts,
+ CPUState *cpu, int signr)
{
- (void) memset(prstatus, 0, sizeof (*prstatus));
- prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
- prstatus->pr_pid = ts->ts_tid;
- prstatus->pr_ppid = getppid();
- prstatus->pr_pgrp = getpgrp();
- prstatus->pr_sid = getsid(0);
-
- bswap_prstatus(prstatus);
+ /*
+ * Because note memory is only aligned to 4, and target_elf_prstatus
+ * may well have higher alignment requirements, fill locally and
+ * memcpy to the destination afterward.
+ */
+ struct target_elf_prstatus prstatus = {
+ .pr_info.si_signo = signr,
+ .pr_cursig = signr,
+ .pr_pid = ts->ts_tid,
+ .pr_ppid = getppid(),
+ .pr_pgrp = getpgrp(),
+ .pr_sid = getsid(0),
+ };
+
+ elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu));
+ bswap_prstatus(&prstatus);
+ memcpy(data, &prstatus, sizeof(prstatus));
}
-static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
+static void fill_prpsinfo_note(void *data, const TaskState *ts)
{
+ /*
+ * Because note memory is only aligned to 4, and target_elf_prpsinfo
+ * may well have higher alignment requirements, fill locally and
+ * memcpy to the destination afterward.
+ */
+ struct target_elf_prpsinfo psinfo = {
+ .pr_pid = getpid(),
+ .pr_ppid = getppid(),
+ .pr_pgrp = getpgrp(),
+ .pr_sid = getsid(0),
+ .pr_uid = getuid(),
+ .pr_gid = getgid(),
+ };
char *base_filename;
- unsigned int i, len;
-
- (void) memset(psinfo, 0, sizeof (*psinfo));
+ size_t len;
len = ts->info->env_strings - ts->info->arg_strings;
- if (len >= ELF_PRARGSZ)
- len = ELF_PRARGSZ - 1;
- if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) {
- return -EFAULT;
- }
- for (i = 0; i < len; i++)
- if (psinfo->pr_psargs[i] == 0)
- psinfo->pr_psargs[i] = ' ';
- psinfo->pr_psargs[len] = 0;
-
- psinfo->pr_pid = getpid();
- psinfo->pr_ppid = getppid();
- psinfo->pr_pgrp = getpgrp();
- psinfo->pr_sid = getsid(0);
- psinfo->pr_uid = getuid();
- psinfo->pr_gid = getgid();
+ len = MIN(len, ELF_PRARGSZ);
+ memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len);
+ for (size_t i = 0; i < len; i++) {
+ if (psinfo.pr_psargs[i] == 0) {
+ psinfo.pr_psargs[i] = ' ';
+ }
+ }
base_filename = g_path_get_basename(ts->bprm->filename);
/*
* Using strncpy here is fine: at max-length,
* this field is not NUL-terminated.
*/
- (void) strncpy(psinfo->pr_fname, base_filename,
- sizeof(psinfo->pr_fname));
-
+ strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname));
g_free(base_filename);
- bswap_psinfo(psinfo);
- return (0);
+
+ bswap_psinfo(&psinfo);
+ memcpy(data, &psinfo, sizeof(psinfo));
}
-static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
+static void fill_auxv_note(void *data, const TaskState *ts)
{
- elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
- elf_addr_t orig_auxv = auxv;
- void *ptr;
- int len = ts->info->auxv_len;
-
- /*
- * Auxiliary vector is stored in target process stack. It contains
- * {type, value} pairs that we need to dump into note. This is not
- * strictly necessary but we do it here for sake of completeness.
- */
-
- /* read in whole auxv vector and copy it to memelfnote */
- ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
- if (ptr != NULL) {
- fill_note(note, "CORE", NT_AUXV, len, ptr);
- unlock_user(ptr, auxv, len);
- }
+ memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len);
}
/*
@@ -3753,27 +4165,9 @@ static int dump_write(int fd, const void *ptr, size_t size)
{
const char *bufp = (const char *)ptr;
ssize_t bytes_written, bytes_left;
- struct rlimit dumpsize;
- off_t pos;
bytes_written = 0;
- getrlimit(RLIMIT_CORE, &dumpsize);
- if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
- if (errno == ESPIPE) { /* not a seekable stream */
- bytes_left = size;
- } else {
- return pos;
- }
- } else {
- if (dumpsize.rlim_cur <= pos) {
- return -1;
- } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
- bytes_left = size;
- } else {
- size_t limit_left=dumpsize.rlim_cur - pos;
- bytes_left = limit_left >= size ? size : limit_left ;
- }
- }
+ bytes_left = size;
/*
* In normal conditions, single write(2) should do but
@@ -3795,135 +4189,76 @@ static int dump_write(int fd, const void *ptr, size_t size)
return (0);
}
-static int write_note(struct memelfnote *men, int fd)
+static int wmr_page_unprotect_regions(void *opaque, target_ulong start,
+ target_ulong end, unsigned long flags)
{
- struct elf_note en;
+ if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) {
+ size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size());
- en.n_namesz = men->namesz;
- en.n_type = men->type;
- en.n_descsz = men->datasz;
-
- bswap_note(&en);
-
- if (dump_write(fd, &en, sizeof(en)) != 0)
- return (-1);
- if (dump_write(fd, men->name, men->namesz_rounded) != 0)
- return (-1);
- if (dump_write(fd, men->data, men->datasz_rounded) != 0)
- return (-1);
-
- return (0);
+ while (1) {
+ page_unprotect(start, 0);
+ if (end - start <= step) {
+ break;
+ }
+ start += step;
+ }
+ }
+ return 0;
}
-static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
-{
- CPUState *cpu = env_cpu((CPUArchState *)env);
- TaskState *ts = (TaskState *)cpu->opaque;
- struct elf_thread_status *ets;
-
- ets = g_malloc0(sizeof (*ets));
- ets->num_notes = 1; /* only prstatus is dumped */
- fill_prstatus(&ets->prstatus, ts, 0);
- elf_core_copy_regs(&ets->prstatus.pr_reg, env);
- fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
- &ets->prstatus);
+typedef struct {
+ unsigned count;
+ size_t size;
+} CountAndSizeRegions;
- QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
-
- info->notes_size += note_size(&ets->notes[0]);
-}
-
-static void init_note_info(struct elf_note_info *info)
+static int wmr_count_and_size_regions(void *opaque, target_ulong start,
+ target_ulong end, unsigned long flags)
{
- /* Initialize the elf_note_info structure so that it is at
- * least safe to call free_note_info() on it. Must be
- * called before calling fill_note_info().
- */
- memset(info, 0, sizeof (*info));
- QTAILQ_INIT(&info->thread_list);
-}
+ CountAndSizeRegions *css = opaque;
-static int fill_note_info(struct elf_note_info *info,
- long signr, const CPUArchState *env)
-{
-#define NUMNOTES 3
- CPUState *cpu = env_cpu((CPUArchState *)env);
- TaskState *ts = (TaskState *)cpu->opaque;
- int i;
-
- info->notes = g_new0(struct memelfnote, NUMNOTES);
- if (info->notes == NULL)
- return (-ENOMEM);
- info->prstatus = g_malloc0(sizeof (*info->prstatus));
- if (info->prstatus == NULL)
- return (-ENOMEM);
- info->psinfo = g_malloc0(sizeof (*info->psinfo));
- if (info->prstatus == NULL)
- return (-ENOMEM);
-
- /*
- * First fill in status (and registers) of current thread
- * including process info & aux vector.
- */
- fill_prstatus(info->prstatus, ts, signr);
- elf_core_copy_regs(&info->prstatus->pr_reg, env);
- fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
- sizeof (*info->prstatus), info->prstatus);
- fill_psinfo(info->psinfo, ts);
- fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
- sizeof (*info->psinfo), info->psinfo);
- fill_auxv_note(&info->notes[2], ts);
- info->numnote = 3;
-
- info->notes_size = 0;
- for (i = 0; i < info->numnote; i++)
- info->notes_size += note_size(&info->notes[i]);
-
- /* read and fill status of all threads */
- cpu_list_lock();
- CPU_FOREACH(cpu) {
- if (cpu == thread_cpu) {
- continue;
- }
- fill_thread_info(info, (CPUArchState *)cpu->env_ptr);
- }
- cpu_list_unlock();
-
- return (0);
+ css->count++;
+ css->size += vma_dump_size(start, end, flags);
+ return 0;
}
-static void free_note_info(struct elf_note_info *info)
+typedef struct {
+ struct elf_phdr *phdr;
+ off_t offset;
+} FillRegionPhdr;
+
+static int wmr_fill_region_phdr(void *opaque, target_ulong start,
+ target_ulong end, unsigned long flags)
{
- struct elf_thread_status *ets;
+ FillRegionPhdr *d = opaque;
+ struct elf_phdr *phdr = d->phdr;
- while (!QTAILQ_EMPTY(&info->thread_list)) {
- ets = QTAILQ_FIRST(&info->thread_list);
- QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
- g_free(ets);
- }
+ phdr->p_type = PT_LOAD;
+ phdr->p_vaddr = start;
+ phdr->p_paddr = 0;
+ phdr->p_filesz = vma_dump_size(start, end, flags);
+ phdr->p_offset = d->offset;
+ d->offset += phdr->p_filesz;
+ phdr->p_memsz = end - start;
+ phdr->p_flags = (flags & PAGE_READ ? PF_R : 0)
+ | (flags & PAGE_WRITE_ORG ? PF_W : 0)
+ | (flags & PAGE_EXEC ? PF_X : 0);
+ phdr->p_align = ELF_EXEC_PAGESIZE;
- g_free(info->prstatus);
- g_free(info->psinfo);
- g_free(info->notes);
+ bswap_phdr(phdr, 1);
+ d->phdr = phdr + 1;
+ return 0;
}
-static int write_note_info(struct elf_note_info *info, int fd)
+static int wmr_write_region(void *opaque, target_ulong start,
+ target_ulong end, unsigned long flags)
{
- struct elf_thread_status *ets;
- int i, error = 0;
-
- /* write prstatus, psinfo and auxv for current thread */
- for (i = 0; i < info->numnote; i++)
- if ((error = write_note(&info->notes[i], fd)) != 0)
- return (error);
+ int fd = *(int *)opaque;
+ size_t size = vma_dump_size(start, end, flags);
- /* write prstatus for each thread */
- QTAILQ_FOREACH(ets, &info->thread_list, ets_link) {
- if ((error = write_note(&ets->notes[0], fd)) != 0)
- return (error);
+ if (!size) {
+ return 0;
}
-
- return (0);
+ return dump_write(fd, g2h_untagged(start), size);
}
/*
@@ -3972,147 +4307,128 @@ static int write_note_info(struct elf_note_info *info, int fd)
static int elf_core_dump(int signr, const CPUArchState *env)
{
const CPUState *cpu = env_cpu((CPUArchState *)env);
- const TaskState *ts = (const TaskState *)cpu->opaque;
- struct vm_area_struct *vma = NULL;
- g_autofree char *corefile = NULL;
- struct elf_note_info info;
- struct elfhdr elf;
- struct elf_phdr phdr;
+ const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
struct rlimit dumpsize;
- struct mm_struct *mm = NULL;
- off_t offset = 0, data_offset = 0;
- int segs = 0;
+ CountAndSizeRegions css;
+ off_t offset, note_offset, data_offset;
+ size_t note_size;
+ int cpus, ret;
int fd = -1;
+ CPUState *cpu_iter;
- init_note_info(&info);
+ if (prctl(PR_GET_DUMPABLE) == 0) {
+ return 0;
+ }
- errno = 0;
- getrlimit(RLIMIT_CORE, &dumpsize);
- if (dumpsize.rlim_cur == 0)
+ if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) {
return 0;
+ }
- corefile = core_dump_filename(ts);
+ cpu_list_lock();
+ mmap_lock();
- if ((fd = open(corefile, O_WRONLY | O_CREAT,
- S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
- return (-errno);
+ /* By unprotecting, we merge vmas that might be split. */
+ walk_memory_regions(NULL, wmr_page_unprotect_regions);
/*
* Walk through target process memory mappings and
- * set up structure containing this information. After
- * this point vma_xxx functions can be used.
+ * set up structure containing this information.
*/
- if ((mm = vma_init()) == NULL)
- goto out;
-
- walk_memory_regions(mm, vma_walker);
- segs = vma_get_mapping_count(mm);
+ memset(&css, 0, sizeof(css));
+ walk_memory_regions(&css, wmr_count_and_size_regions);
- /*
- * Construct valid coredump ELF header. We also
- * add one more segment for notes.
- */
- fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
- if (dump_write(fd, &elf, sizeof (elf)) != 0)
- goto out;
+ cpus = 0;
+ CPU_FOREACH(cpu_iter) {
+ cpus++;
+ }
- /* fill in the in-memory version of notes */
- if (fill_note_info(&info, signr, env) < 0)
- goto out;
+ offset = sizeof(struct elfhdr);
+ offset += (css.count + 1) * sizeof(struct elf_phdr);
+ note_offset = offset;
- offset += sizeof (elf); /* elf header */
- offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
+ offset += size_note("CORE", ts->info->auxv_len);
+ offset += size_note("CORE", sizeof(struct target_elf_prpsinfo));
+ offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus;
+ note_size = offset - note_offset;
+ data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE);
- /* write out notes program header */
- fill_elf_note_phdr(&phdr, info.notes_size, offset);
+ /* Do not dump if the corefile size exceeds the limit. */
+ if (dumpsize.rlim_cur != RLIM_INFINITY
+ && dumpsize.rlim_cur < data_offset + css.size) {
+ errno = 0;
+ goto out;
+ }
- offset += info.notes_size;
- if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
+ {
+ g_autofree char *corefile = core_dump_filename(ts);
+ fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ }
+ if (fd < 0) {
goto out;
+ }
/*
- * ELF specification wants data to start at page boundary so
- * we align it here.
+ * There is a fair amount of alignment padding within the notes
+ * as well as preceeding the process memory. Allocate a zeroed
+ * block to hold it all. Write all of the headers directly into
+ * this buffer and then write it out as a block.
*/
- data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+ {
+ g_autofree void *header = g_malloc0(data_offset);
+ FillRegionPhdr frp;
+ void *hptr, *dptr;
+
+ /* Create elf file header. */
+ hptr = header;
+ fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0);
+ hptr += sizeof(struct elfhdr);
+
+ /* Create elf program headers. */
+ fill_elf_note_phdr(hptr, note_size, note_offset);
+ hptr += sizeof(struct elf_phdr);
+
+ frp.phdr = hptr;
+ frp.offset = data_offset;
+ walk_memory_regions(&frp, wmr_fill_region_phdr);
+ hptr = frp.phdr;
+
+ /* Create the notes. */
+ dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len);
+ fill_auxv_note(dptr, ts);
+
+ dptr = fill_note(&hptr, NT_PRPSINFO, "CORE",
+ sizeof(struct target_elf_prpsinfo));
+ fill_prpsinfo_note(dptr, ts);
+
+ CPU_FOREACH(cpu_iter) {
+ dptr = fill_note(&hptr, NT_PRSTATUS, "CORE",
+ sizeof(struct target_elf_prstatus));
+ fill_prstatus_note(dptr, ts, cpu_iter,
+ cpu_iter == cpu ? signr : 0);
+ }
- /*
- * Write program headers for memory regions mapped in
- * the target process.
- */
- for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
- (void) memset(&phdr, 0, sizeof (phdr));
-
- phdr.p_type = PT_LOAD;
- phdr.p_offset = offset;
- phdr.p_vaddr = vma->vma_start;
- phdr.p_paddr = 0;
- phdr.p_filesz = vma_dump_size(vma);
- offset += phdr.p_filesz;
- phdr.p_memsz = vma->vma_end - vma->vma_start;
- phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
- if (vma->vma_flags & PROT_WRITE)
- phdr.p_flags |= PF_W;
- if (vma->vma_flags & PROT_EXEC)
- phdr.p_flags |= PF_X;
- phdr.p_align = ELF_EXEC_PAGESIZE;
-
- bswap_phdr(&phdr, 1);
- if (dump_write(fd, &phdr, sizeof(phdr)) != 0) {
+ if (dump_write(fd, header, data_offset) < 0) {
goto out;
}
}
/*
- * Next we write notes just after program headers. No
- * alignment needed here.
+ * Finally write process memory into the corefile as well.
*/
- if (write_note_info(&info, fd) < 0)
- goto out;
-
- /* align data to page boundary */
- if (lseek(fd, data_offset, SEEK_SET) != data_offset)
+ if (walk_memory_regions(&fd, wmr_write_region) < 0) {
goto out;
-
- /*
- * Finally we can dump process memory into corefile as well.
- */
- for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
- abi_ulong addr;
- abi_ulong end;
-
- end = vma->vma_start + vma_dump_size(vma);
-
- for (addr = vma->vma_start; addr < end;
- addr += TARGET_PAGE_SIZE) {
- char page[TARGET_PAGE_SIZE];
- int error;
-
- /*
- * Read in page from target process memory and
- * write it to coredump file.
- */
- error = copy_from_user(page, addr, sizeof (page));
- if (error != 0) {
- (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
- addr);
- errno = -error;
- goto out;
- }
- if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
- goto out;
- }
}
+ errno = 0;
out:
- free_note_info(&info);
- if (mm != NULL)
- vma_delete(mm);
- (void) close(fd);
-
- if (errno != 0)
- return (-errno);
- return (0);
+ ret = -errno;
+ mmap_unlock();
+ cpu_list_unlock();
+ if (fd >= 0) {
+ close(fd);
+ }
+ return ret;
}
#endif /* USE_ELF_CORE_DUMP */
diff --git a/linux-user/exit.c b/linux-user/exit.c
index fa6ef0b9b4..1ff8fe4f07 100644
--- a/linux-user/exit.c
+++ b/linux-user/exit.c
@@ -17,12 +17,11 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
-#include "exec/gdbstub.h"
+#include "tcg/perf.h"
+#include "gdbstub/syscalls.h"
#include "qemu.h"
#include "user-internals.h"
-#ifdef CONFIG_GPROF
-#include <sys/gmon.h>
-#endif
+#include "qemu/plugin.h"
#ifdef CONFIG_GCOV
extern void __gcov_dump(void);
@@ -30,12 +29,10 @@ extern void __gcov_dump(void);
void preexit_cleanup(CPUArchState *env, int code)
{
-#ifdef CONFIG_GPROF
- _mcleanup();
-#endif
#ifdef CONFIG_GCOV
__gcov_dump();
#endif
gdb_exit(code);
qemu_plugin_user_exit();
+ perf_exit();
}
diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c
index 6941089959..c04a97c73a 100644
--- a/linux-user/fd-trans.c
+++ b/linux-user/fd-trans.c
@@ -138,6 +138,9 @@ enum {
QEMU_IFLA_PROP_LIST,
QEMU_IFLA_ALT_IFNAME,
QEMU_IFLA_PERM_ADDRESS,
+ QEMU_IFLA_PROTO_DOWN_REASON,
+ QEMU_IFLA_PARENT_DEV_NAME,
+ QEMU_IFLA_PARENT_DEV_BUS_NAME,
QEMU___IFLA_MAX
};
@@ -179,6 +182,8 @@ enum {
QEMU_IFLA_BRPORT_BACKUP_PORT,
QEMU_IFLA_BRPORT_MRP_RING_OPEN,
QEMU_IFLA_BRPORT_MRP_IN_OPEN,
+ QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
QEMU___IFLA_BRPORT_MAX
};
@@ -268,6 +273,37 @@ enum {
QEMU___RTA_MAX
};
+enum {
+ QEMU_IFLA_VF_STATS_RX_PACKETS,
+ QEMU_IFLA_VF_STATS_TX_PACKETS,
+ QEMU_IFLA_VF_STATS_RX_BYTES,
+ QEMU_IFLA_VF_STATS_TX_BYTES,
+ QEMU_IFLA_VF_STATS_BROADCAST,
+ QEMU_IFLA_VF_STATS_MULTICAST,
+ QEMU_IFLA_VF_STATS_PAD,
+ QEMU_IFLA_VF_STATS_RX_DROPPED,
+ QEMU_IFLA_VF_STATS_TX_DROPPED,
+ QEMU__IFLA_VF_STATS_MAX,
+};
+
+enum {
+ QEMU_IFLA_VF_UNSPEC,
+ QEMU_IFLA_VF_MAC,
+ QEMU_IFLA_VF_VLAN,
+ QEMU_IFLA_VF_TX_RATE,
+ QEMU_IFLA_VF_SPOOFCHK,
+ QEMU_IFLA_VF_LINK_STATE,
+ QEMU_IFLA_VF_RATE,
+ QEMU_IFLA_VF_RSS_QUERY_EN,
+ QEMU_IFLA_VF_STATS,
+ QEMU_IFLA_VF_TRUST,
+ QEMU_IFLA_VF_IB_NODE_GUID,
+ QEMU_IFLA_VF_IB_PORT_GUID,
+ QEMU_IFLA_VF_VLAN_LIST,
+ QEMU_IFLA_VF_BROADCAST,
+ QEMU__IFLA_VF_MAX,
+};
+
TargetFdTrans **target_fd_trans;
QemuMutex target_fd_trans_lock;
unsigned int target_fd_max;
@@ -573,6 +609,8 @@ static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
/* uin32_t */
case QEMU_IFLA_BRPORT_COST:
case QEMU_IFLA_BRPORT_BACKUP_PORT:
+ case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT:
+ case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
@@ -805,6 +843,145 @@ static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
return 0;
}
+static abi_long host_to_target_data_vlan_list_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct ifla_vf_vlan_info *vlan_info;
+
+ switch (nlattr->nla_type) {
+ /* struct ifla_vf_vlan_info */
+ case IFLA_VF_VLAN_INFO:
+ vlan_info = NLA_DATA(nlattr);
+ vlan_info->vf = tswap32(vlan_info->vf);
+ vlan_info->vlan = tswap32(vlan_info->vlan);
+ vlan_info->qos = tswap32(vlan_info->qos);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VLAN LIST type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_vf_stats_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint64_t *u64;
+
+ switch (nlattr->nla_type) {
+ /* uint64_t */
+ case QEMU_IFLA_VF_STATS_RX_PACKETS:
+ case QEMU_IFLA_VF_STATS_TX_PACKETS:
+ case QEMU_IFLA_VF_STATS_RX_BYTES:
+ case QEMU_IFLA_VF_STATS_TX_BYTES:
+ case QEMU_IFLA_VF_STATS_BROADCAST:
+ case QEMU_IFLA_VF_STATS_MULTICAST:
+ case QEMU_IFLA_VF_STATS_PAD:
+ case QEMU_IFLA_VF_STATS_RX_DROPPED:
+ case QEMU_IFLA_VF_STATS_TX_DROPPED:
+ u64 = NLA_DATA(nlattr);
+ *u64 = tswap64(*u64);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VF STATS type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_vfinfo_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct ifla_vf_mac *mac;
+ struct ifla_vf_vlan *vlan;
+ struct ifla_vf_vlan_info *vlan_info;
+ struct ifla_vf_spoofchk *spoofchk;
+ struct ifla_vf_rate *rate;
+ struct ifla_vf_link_state *link_state;
+ struct ifla_vf_rss_query_en *rss_query_en;
+ struct ifla_vf_trust *trust;
+ struct ifla_vf_guid *guid;
+
+ switch (nlattr->nla_type) {
+ /* struct ifla_vf_mac */
+ case QEMU_IFLA_VF_MAC:
+ mac = NLA_DATA(nlattr);
+ mac->vf = tswap32(mac->vf);
+ break;
+ /* struct ifla_vf_broadcast */
+ case QEMU_IFLA_VF_BROADCAST:
+ break;
+ /* struct struct ifla_vf_vlan */
+ case QEMU_IFLA_VF_VLAN:
+ vlan = NLA_DATA(nlattr);
+ vlan->vf = tswap32(vlan->vf);
+ vlan->vlan = tswap32(vlan->vlan);
+ vlan->qos = tswap32(vlan->qos);
+ break;
+ /* struct ifla_vf_vlan_info */
+ case QEMU_IFLA_VF_TX_RATE:
+ vlan_info = NLA_DATA(nlattr);
+ vlan_info->vf = tswap32(vlan_info->vf);
+ vlan_info->vlan = tswap32(vlan_info->vlan);
+ vlan_info->qos = tswap32(vlan_info->qos);
+ break;
+ /* struct ifla_vf_spoofchk */
+ case QEMU_IFLA_VF_SPOOFCHK:
+ spoofchk = NLA_DATA(nlattr);
+ spoofchk->vf = tswap32(spoofchk->vf);
+ spoofchk->setting = tswap32(spoofchk->setting);
+ break;
+ /* struct ifla_vf_rate */
+ case QEMU_IFLA_VF_RATE:
+ rate = NLA_DATA(nlattr);
+ rate->vf = tswap32(rate->vf);
+ rate->min_tx_rate = tswap32(rate->min_tx_rate);
+ rate->max_tx_rate = tswap32(rate->max_tx_rate);
+ break;
+ /* struct ifla_vf_link_state */
+ case QEMU_IFLA_VF_LINK_STATE:
+ link_state = NLA_DATA(nlattr);
+ link_state->vf = tswap32(link_state->vf);
+ link_state->link_state = tswap32(link_state->link_state);
+ break;
+ /* struct ifla_vf_rss_query_en */
+ case QEMU_IFLA_VF_RSS_QUERY_EN:
+ rss_query_en = NLA_DATA(nlattr);
+ rss_query_en->vf = tswap32(rss_query_en->vf);
+ rss_query_en->setting = tswap32(rss_query_en->setting);
+ break;
+ /* struct ifla_vf_trust */
+ case QEMU_IFLA_VF_TRUST:
+ trust = NLA_DATA(nlattr);
+ trust->vf = tswap32(trust->vf);
+ trust->setting = tswap32(trust->setting);
+ break;
+ /* struct ifla_vf_guid */
+ case QEMU_IFLA_VF_IB_NODE_GUID:
+ case QEMU_IFLA_VF_IB_PORT_GUID:
+ guid = NLA_DATA(nlattr);
+ guid->vf = tswap32(guid->vf);
+ guid->guid = tswap32(guid->guid);
+ break;
+ /* nested */
+ case QEMU_IFLA_VF_VLAN_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_vlan_list_nlattr);
+ case QEMU_IFLA_VF_STATS:
+ return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_vf_stats_nlattr);
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VFINFO type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
@@ -818,9 +995,12 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
case QEMU_IFLA_ADDRESS:
case QEMU_IFLA_BROADCAST:
case QEMU_IFLA_PERM_ADDRESS:
+ case QEMU_IFLA_PHYS_PORT_ID:
/* string */
case QEMU_IFLA_IFNAME:
case QEMU_IFLA_QDISC:
+ case QEMU_IFLA_PARENT_DEV_NAME:
+ case QEMU_IFLA_PARENT_DEV_BUS_NAME:
break;
/* uin8_t */
case QEMU_IFLA_OPERSTATE:
@@ -939,6 +1119,10 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
NULL,
host_to_target_data_xdp_nlattr);
+ case QEMU_IFLA_VFINFO_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ NULL,
+ host_to_target_data_vfinfo_nlattr);
default:
qemu_log_mask(LOG_UNIMP, "Unknown host QEMU_IFLA type: %d\n",
rtattr->rta_type);
@@ -1100,6 +1284,49 @@ static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
}
+static abi_long target_to_host_for_each_nlattr(struct nlattr *nlattr,
+ size_t len,
+ abi_long (*target_to_host_nlattr)
+ (struct nlattr *))
+{
+ unsigned short aligned_nla_len;
+ abi_long ret;
+
+ while (len > sizeof(struct nlattr)) {
+ if (tswap16(nlattr->nla_len) < sizeof(struct rtattr) ||
+ tswap16(nlattr->nla_len) > len) {
+ break;
+ }
+ nlattr->nla_len = tswap16(nlattr->nla_len);
+ nlattr->nla_type = tswap16(nlattr->nla_type);
+ ret = target_to_host_nlattr(nlattr);
+ if (ret < 0) {
+ return ret;
+ }
+
+ aligned_nla_len = NLA_ALIGN(nlattr->nla_len);
+ if (aligned_nla_len >= len) {
+ break;
+ }
+ len -= aligned_nla_len;
+ nlattr = (struct nlattr *)(((char *)nlattr) + aligned_nla_len);
+ }
+ return 0;
+}
+
+static abi_long target_to_host_data_inet6_nlattr(struct nlattr *nlattr)
+{
+ switch (nlattr->nla_type) {
+ /* uint8_t */
+ case QEMU_IFLA_INET6_ADDR_GEN_MODE:
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown target AF_INET6 type: %d\n",
+ nlattr->nla_type);
+ }
+ return 0;
+}
+
static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
size_t len,
abi_long (*target_to_host_rtattr)
@@ -1130,16 +1357,35 @@ static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
return 0;
}
+static abi_long target_to_host_data_spec_nlattr(struct nlattr *nlattr)
+{
+ switch (nlattr->nla_type & NLA_TYPE_MASK) {
+ case AF_INET6:
+ return target_to_host_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
+ target_to_host_data_inet6_nlattr);
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown target AF_SPEC type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
- switch (rtattr->rta_type) {
+ switch (rtattr->rta_type & NLA_TYPE_MASK) {
/* uint32_t */
+ case QEMU_IFLA_MTU:
+ case QEMU_IFLA_TXQLEN:
case QEMU_IFLA_EXT_MASK:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
+ case QEMU_IFLA_AF_SPEC:
+ return target_to_host_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ target_to_host_data_spec_nlattr);
default:
qemu_log_mask(LOG_UNIMP, "Unknown target QEMU_IFLA type: %d\n",
rtattr->rta_type);
@@ -1438,7 +1684,7 @@ TargetFdTrans target_signalfd_trans = {
.host_to_target_data = host_to_target_data_signalfd,
};
-static abi_long swap_data_eventfd(void *buf, size_t len)
+static abi_long swap_data_u64(void *buf, size_t len)
{
uint64_t *counter = buf;
int i;
@@ -1456,13 +1702,16 @@ static abi_long swap_data_eventfd(void *buf, size_t len)
}
TargetFdTrans target_eventfd_trans = {
- .host_to_target_data = swap_data_eventfd,
- .target_to_host_data = swap_data_eventfd,
+ .host_to_target_data = swap_data_u64,
+ .target_to_host_data = swap_data_u64,
+};
+
+TargetFdTrans target_timerfd_trans = {
+ .host_to_target_data = swap_data_u64,
};
-#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
- (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
- defined(__NR_inotify_init1))
+#if defined(CONFIG_INOTIFY) && (defined(TARGET_NR_inotify_init) || \
+ defined(TARGET_NR_inotify_init1))
static abi_long host_to_target_data_inotify(void *buf, size_t len)
{
struct inotify_event *ev;
diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h
index 1b9fa2041c..910faaf237 100644
--- a/linux-user/fd-trans.h
+++ b/linux-user/fd-trans.h
@@ -130,6 +130,7 @@ extern TargetFdTrans target_netlink_route_trans;
extern TargetFdTrans target_netlink_audit_trans;
extern TargetFdTrans target_signalfd_trans;
extern TargetFdTrans target_eventfd_trans;
+extern TargetFdTrans target_timerfd_trans;
#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
(defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
defined(__NR_inotify_init1))
diff --git a/linux-user/flat.h b/linux-user/flat.h
index ed518e2013..e374b73e26 100644
--- a/linux-user/flat.h
+++ b/linux-user/flat.h
@@ -12,11 +12,8 @@
#define FLAT_VERSION 0x00000004L
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-#define MAX_SHARED_LIBS (4)
-#else
+/* QEMU doesn't support bflt shared libraries */
#define MAX_SHARED_LIBS (1)
-#endif
/*
* To make everything easier to port and manage cross platform
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
index e4c2f89a22..04d8138d12 100644
--- a/linux-user/flatload.c
+++ b/linux-user/flatload.c
@@ -29,8 +29,6 @@
* JAN/99 -- coded full program relocation (gerg@snapgear.com)
*/
-/* ??? ZFLAT and shared library support is currently disabled. */
-
/****************************************************************************/
#include "qemu/osdep.h"
@@ -64,10 +62,6 @@ struct lib_info {
short loaded; /* Has this library been loaded? */
};
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-static int load_flat_shared_library(int id, struct lib_info *p);
-#endif
-
struct linux_binprm;
/****************************************************************************/
@@ -108,153 +102,6 @@ static int target_pread(int fd, abi_ulong ptr, abi_ulong len,
unlock_user(buf, ptr, len);
return ret;
}
-/****************************************************************************/
-
-#ifdef CONFIG_BINFMT_ZFLAT
-
-#include <linux/zlib.h>
-
-#define LBUFSIZE 4000
-
-/* gzip flag byte */
-#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
-#define COMMENT 0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
-#define RESERVED 0xC0 /* bit 6,7: reserved */
-
-static int decompress_exec(
- struct linux_binprm *bprm,
- unsigned long offset,
- char *dst,
- long len,
- int fd)
-{
- unsigned char *buf;
- z_stream strm;
- loff_t fpos;
- int ret, retval;
-
- DBG_FLT("decompress_exec(offset=%x,buf=%x,len=%x)\n",(int)offset, (int)dst, (int)len);
-
- memset(&strm, 0, sizeof(strm));
- strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
- if (strm.workspace == NULL) {
- DBG_FLT("binfmt_flat: no memory for decompress workspace\n");
- return -ENOMEM;
- }
- buf = kmalloc(LBUFSIZE, GFP_KERNEL);
- if (buf == NULL) {
- DBG_FLT("binfmt_flat: no memory for read buffer\n");
- retval = -ENOMEM;
- goto out_free;
- }
-
- /* Read in first chunk of data and parse gzip header. */
- fpos = offset;
- ret = bprm->file->f_op->read(bprm->file, buf, LBUFSIZE, &fpos);
-
- strm.next_in = buf;
- strm.avail_in = ret;
- strm.total_in = 0;
-
- retval = -ENOEXEC;
-
- /* Check minimum size -- gzip header */
- if (ret < 10) {
- DBG_FLT("binfmt_flat: file too small?\n");
- goto out_free_buf;
- }
-
- /* Check gzip magic number */
- if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) {
- DBG_FLT("binfmt_flat: unknown compression magic?\n");
- goto out_free_buf;
- }
-
- /* Check gzip method */
- if (buf[2] != 8) {
- DBG_FLT("binfmt_flat: unknown compression method?\n");
- goto out_free_buf;
- }
- /* Check gzip flags */
- if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) ||
- (buf[3] & RESERVED)) {
- DBG_FLT("binfmt_flat: unknown flags?\n");
- goto out_free_buf;
- }
-
- ret = 10;
- if (buf[3] & EXTRA_FIELD) {
- ret += 2 + buf[10] + (buf[11] << 8);
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (EXTRA)?\n");
- goto out_free_buf;
- }
- }
- if (buf[3] & ORIG_NAME) {
- for (; ret < LBUFSIZE && (buf[ret] != 0); ret++)
- ;
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (ORIG_NAME)?\n");
- goto out_free_buf;
- }
- }
- if (buf[3] & COMMENT) {
- for (; ret < LBUFSIZE && (buf[ret] != 0); ret++)
- ;
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (COMMENT)?\n");
- goto out_free_buf;
- }
- }
-
- strm.next_in += ret;
- strm.avail_in -= ret;
-
- strm.next_out = dst;
- strm.avail_out = len;
- strm.total_out = 0;
-
- if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) {
- DBG_FLT("binfmt_flat: zlib init failed?\n");
- goto out_free_buf;
- }
-
- while ((ret = zlib_inflate(&strm, Z_NO_FLUSH)) == Z_OK) {
- ret = bprm->file->f_op->read(bprm->file, buf, LBUFSIZE, &fpos);
- if (ret <= 0)
- break;
- if (is_error(ret)) {
- break;
- }
- len -= ret;
-
- strm.next_in = buf;
- strm.avail_in = ret;
- strm.total_in = 0;
- }
-
- if (ret < 0) {
- DBG_FLT("binfmt_flat: decompression failed (%d), %s\n",
- ret, strm.msg);
- goto out_zlib;
- }
-
- retval = 0;
-out_zlib:
- zlib_inflateEnd(&strm);
-out_free_buf:
- kfree(buf);
-out_free:
- kfree(strm.workspace);
-out:
- return retval;
-}
-
-#endif /* CONFIG_BINFMT_ZFLAT */
/****************************************************************************/
@@ -268,40 +115,7 @@ calc_reloc(abi_ulong r, struct lib_info *p, int curid, int internalp)
abi_ulong text_len;
abi_ulong start_code;
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-#error needs checking
- if (r == 0)
- id = curid; /* Relocs of 0 are always self referring */
- else {
- id = (r >> 24) & 0xff; /* Find ID for this reloc */
- r &= 0x00ffffff; /* Trim ID off here */
- }
- if (id >= MAX_SHARED_LIBS) {
- fprintf(stderr, "BINFMT_FLAT: reference 0x%x to shared library %d\n",
- (unsigned) r, id);
- goto failed;
- }
- if (curid != id) {
- if (internalp) {
- fprintf(stderr, "BINFMT_FLAT: reloc address 0x%x not "
- "in same module (%d != %d)\n",
- (unsigned) r, curid, id);
- goto failed;
- } else if (!p[id].loaded && is_error(load_flat_shared_library(id, p))) {
- fprintf(stderr, "BINFMT_FLAT: failed to load library %d\n", id);
- goto failed;
- }
- /* Check versioning information (i.e. time stamps) */
- if (p[id].build_date && p[curid].build_date
- && p[curid].build_date < p[id].build_date) {
- fprintf(stderr, "BINFMT_FLAT: library %d is younger than %d\n",
- id, curid);
- goto failed;
- }
- }
-#else
id = 0;
-#endif
start_brk = p[id].start_brk;
start_data = p[id].start_data;
@@ -425,12 +239,10 @@ static int load_flat_file(struct linux_binprm * bprm,
if (rev == OLD_FLAT_VERSION && flat_old_ram_flag(flags))
flags = FLAT_FLAG_RAM;
-#ifndef CONFIG_BINFMT_ZFLAT
if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) {
- fprintf(stderr, "Support for ZFLAT executables is not enabled\n");
+ fprintf(stderr, "ZFLAT executables are not supported\n");
return -ENOEXEC;
}
-#endif
/*
* calculate the extra space we need to map in
@@ -448,7 +260,7 @@ static int load_flat_file(struct linux_binprm * bprm,
* Allocate the address space.
*/
probe_guest_base(bprm->filename, 0,
- text_len + data_len + extra + indx_len);
+ text_len + data_len + extra + indx_len - 1);
/*
* there are a couple of cases here, the separate code/data
@@ -463,7 +275,7 @@ static int load_flat_file(struct linux_binprm * bprm,
DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
textpos = target_mmap(0, text_len, PROT_READ|PROT_EXEC,
- MAP_PRIVATE, bprm->fd, 0);
+ MAP_PRIVATE, bprm->src.fd, 0);
if (textpos == -1) {
fprintf(stderr, "Unable to mmap process text\n");
return -1;
@@ -483,17 +295,9 @@ static int load_flat_file(struct linux_binprm * bprm,
(int)(data_len + bss_len + stack_len), (int)datapos);
fpos = ntohl(hdr->data_start);
-#ifdef CONFIG_BINFMT_ZFLAT
- if (flags & FLAT_FLAG_GZDATA) {
- result = decompress_exec(bprm, fpos, (char *) datapos,
- data_len + (relocs * sizeof(abi_ulong)))
- } else
-#endif
- {
- result = target_pread(bprm->fd, datapos,
- data_len + (relocs * sizeof(abi_ulong)),
- fpos);
- }
+ result = target_pread(bprm->src.fd, datapos,
+ data_len + (relocs * sizeof(abi_ulong)),
+ fpos);
if (result < 0) {
fprintf(stderr, "Unable to read data+bss\n");
return result;
@@ -515,38 +319,12 @@ static int load_flat_file(struct linux_binprm * bprm,
datapos = realdatastart + indx_len;
reloc = (textpos + ntohl(hdr->reloc_start) + indx_len);
-#ifdef CONFIG_BINFMT_ZFLAT
-#error code needs checking
- /*
- * load it all in and treat it like a RAM load from now on
- */
- if (flags & FLAT_FLAG_GZIP) {
- result = decompress_exec(bprm, sizeof (struct flat_hdr),
- (((char *) textpos) + sizeof (struct flat_hdr)),
- (text_len + data_len + (relocs * sizeof(unsigned long))
- - sizeof (struct flat_hdr)),
- 0);
- memmove((void *) datapos, (void *) realdatastart,
- data_len + (relocs * sizeof(unsigned long)));
- } else if (flags & FLAT_FLAG_GZDATA) {
- fpos = 0;
- result = bprm->file->f_op->read(bprm->file,
- (char *) textpos, text_len, &fpos);
- if (!is_error(result)) {
- result = decompress_exec(bprm, text_len, (char *) datapos,
- data_len + (relocs * sizeof(unsigned long)), 0);
- }
- }
- else
-#endif
- {
- result = target_pread(bprm->fd, textpos,
- text_len, 0);
- if (result >= 0) {
- result = target_pread(bprm->fd, datapos,
- data_len + (relocs * sizeof(abi_ulong)),
- ntohl(hdr->data_start));
- }
+ result = target_pread(bprm->src.fd, textpos,
+ text_len, 0);
+ if (result >= 0) {
+ result = target_pread(bprm->src.fd, datapos,
+ data_len + (relocs * sizeof(abi_ulong)),
+ ntohl(hdr->data_start));
}
if (result < 0) {
fprintf(stderr, "Unable to read code+data+bss\n");
@@ -678,44 +456,6 @@ static int load_flat_file(struct linux_binprm * bprm,
/****************************************************************************/
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-
-/*
- * Load a shared library into memory. The library gets its own data
- * segment (including bss) but not argv/argc/environ.
- */
-
-static int load_flat_shared_library(int id, struct lib_info *libs)
-{
- struct linux_binprm bprm;
- int res;
- char buf[16];
-
- /* Create the file name */
- sprintf(buf, "/lib/lib%d.so", id);
-
- /* Open the file up */
- bprm.filename = buf;
- bprm.file = open_exec(bprm.filename);
- res = PTR_ERR(bprm.file);
- if (IS_ERR(bprm.file))
- return res;
-
- res = prepare_binprm(&bprm);
-
- if (!is_error(res)) {
- res = load_flat_file(&bprm, libs, id, NULL);
- }
- if (bprm.file) {
- allow_write_access(bprm.file);
- fput(bprm.file);
- bprm.file = NULL;
- }
- return(res);
-}
-
-#endif /* CONFIG_BINFMT_SHARED_FLAT */
-
int load_flt_binary(struct linux_binprm *bprm, struct image_info *info)
{
struct lib_info libinfo[MAX_SHARED_LIBS];
@@ -755,15 +495,15 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info)
/* Update data segment pointers for all libraries */
for (i=0; i<MAX_SHARED_LIBS; i++) {
if (libinfo[i].loaded) {
- abi_ulong p;
- p = libinfo[i].start_data;
+ abi_ulong seg;
+ seg = libinfo[i].start_data;
for (j=0; j<MAX_SHARED_LIBS; j++) {
- p -= 4;
+ seg -= 4;
/* FIXME - handle put_user() failures */
if (put_user_ual(libinfo[j].loaded
? libinfo[j].start_data
: UNLOADED_LIB,
- p))
+ seg))
return -EFAULT;
}
}
@@ -780,7 +520,7 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info)
/* Enforce final stack alignment of 16 bytes. This is sufficient
for all current targets, and excess alignment is harmless. */
stack_len = bprm->envc + bprm->argc + 2;
- stack_len += flat_argvp_envp_on_stack() ? 2 : 0; /* arvg, argp */
+ stack_len += flat_argvp_envp_on_stack() ? 2 : 0; /* argv, argp */
stack_len += 1; /* argc */
stack_len *= sizeof(abi_ulong);
sp -= (sp - stack_len) & 15;
@@ -793,25 +533,12 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info)
*/
start_addr = libinfo[0].entry;
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-#error here
- for (i = MAX_SHARED_LIBS-1; i>0; i--) {
- if (libinfo[i].loaded) {
- /* Push previous first to call address */
- --sp;
- if (put_user_ual(start_addr, sp))
- return -EFAULT;
- start_addr = libinfo[i].entry;
- }
- }
-#endif
-
/* Stash our initial stack pointer into the mm structure */
info->start_code = libinfo[0].start_code;
- info->end_code = libinfo[0].start_code = libinfo[0].text_len;
+ info->end_code = libinfo[0].start_code + libinfo[0].text_len;
info->start_data = libinfo[0].start_data;
info->end_data = libinfo[0].end_data;
- info->start_brk = libinfo[0].start_brk;
+ info->brk = libinfo[0].start_brk;
info->start_stack = sp;
info->stack_limit = libinfo[0].start_brk;
info->entry = start_addr;
diff --git a/linux-user/gen-vdso-elfn.c.inc b/linux-user/gen-vdso-elfn.c.inc
new file mode 100644
index 0000000000..95856eb839
--- /dev/null
+++ b/linux-user/gen-vdso-elfn.c.inc
@@ -0,0 +1,314 @@
+/*
+ * Post-process a vdso elf image for inclusion into qemu.
+ * Elf size specialization.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+static void elfN(bswap_ehdr)(ElfN(Ehdr) *ehdr)
+{
+ bswaps(&ehdr->e_type); /* Object file type */
+ bswaps(&ehdr->e_machine); /* Architecture */
+ bswaps(&ehdr->e_version); /* Object file version */
+ bswaps(&ehdr->e_entry); /* Entry point virtual address */
+ bswaps(&ehdr->e_phoff); /* Program header table file offset */
+ bswaps(&ehdr->e_shoff); /* Section header table file offset */
+ bswaps(&ehdr->e_flags); /* Processor-specific flags */
+ bswaps(&ehdr->e_ehsize); /* ELF header size in bytes */
+ bswaps(&ehdr->e_phentsize); /* Program header table entry size */
+ bswaps(&ehdr->e_phnum); /* Program header table entry count */
+ bswaps(&ehdr->e_shentsize); /* Section header table entry size */
+ bswaps(&ehdr->e_shnum); /* Section header table entry count */
+ bswaps(&ehdr->e_shstrndx); /* Section header string table index */
+}
+
+static void elfN(bswap_phdr)(ElfN(Phdr) *phdr)
+{
+ bswaps(&phdr->p_type); /* Segment type */
+ bswaps(&phdr->p_flags); /* Segment flags */
+ bswaps(&phdr->p_offset); /* Segment file offset */
+ bswaps(&phdr->p_vaddr); /* Segment virtual address */
+ bswaps(&phdr->p_paddr); /* Segment physical address */
+ bswaps(&phdr->p_filesz); /* Segment size in file */
+ bswaps(&phdr->p_memsz); /* Segment size in memory */
+ bswaps(&phdr->p_align); /* Segment alignment */
+}
+
+static void elfN(bswap_shdr)(ElfN(Shdr) *shdr)
+{
+ bswaps(&shdr->sh_name);
+ bswaps(&shdr->sh_type);
+ bswaps(&shdr->sh_flags);
+ bswaps(&shdr->sh_addr);
+ bswaps(&shdr->sh_offset);
+ bswaps(&shdr->sh_size);
+ bswaps(&shdr->sh_link);
+ bswaps(&shdr->sh_info);
+ bswaps(&shdr->sh_addralign);
+ bswaps(&shdr->sh_entsize);
+}
+
+static void elfN(bswap_sym)(ElfN(Sym) *sym)
+{
+ bswaps(&sym->st_name);
+ bswaps(&sym->st_value);
+ bswaps(&sym->st_size);
+ bswaps(&sym->st_shndx);
+}
+
+static void elfN(bswap_dyn)(ElfN(Dyn) *dyn)
+{
+ bswaps(&dyn->d_tag); /* Dynamic type tag */
+ bswaps(&dyn->d_un.d_ptr); /* Dynamic ptr or val, in union */
+}
+
+static void elfN(search_symtab)(ElfN(Shdr) *shdr, unsigned sym_idx,
+ void *buf, bool need_bswap)
+{
+ unsigned str_idx = shdr[sym_idx].sh_link;
+ ElfN(Sym) *sym = buf + shdr[sym_idx].sh_offset;
+ unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*sym);
+ const char *str = buf + shdr[str_idx].sh_offset;
+
+ for (unsigned i = 0; i < sym_n; ++i) {
+ const char *name;
+
+ if (need_bswap) {
+ elfN(bswap_sym)(sym + i);
+ }
+ name = str + sym[i].st_name;
+
+ if (sigreturn_sym && strcmp(sigreturn_sym, name) == 0) {
+ sigreturn_addr = sym[i].st_value;
+ }
+ if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) {
+ rt_sigreturn_addr = sym[i].st_value;
+ }
+ }
+}
+
+static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
+{
+ ElfN(Ehdr) *ehdr = buf;
+ ElfN(Phdr) *phdr;
+ ElfN(Shdr) *shdr;
+ unsigned phnum, shnum;
+ unsigned dynamic_ofs = 0;
+ unsigned dynamic_addr = 0;
+ unsigned symtab_idx = 0;
+ unsigned dynsym_idx = 0;
+ unsigned first_segsz = 0;
+ int errors = 0;
+
+ if (need_bswap) {
+ elfN(bswap_ehdr)(ehdr);
+ }
+
+ phnum = ehdr->e_phnum;
+ phdr = buf + ehdr->e_phoff;
+ if (need_bswap) {
+ for (unsigned i = 0; i < phnum; ++i) {
+ elfN(bswap_phdr)(phdr + i);
+ }
+ }
+
+ shnum = ehdr->e_shnum;
+ shdr = buf + ehdr->e_shoff;
+ if (need_bswap) {
+ for (unsigned i = 0; i < shnum; ++i) {
+ elfN(bswap_shdr)(shdr + i);
+ }
+ }
+ for (unsigned i = 0; i < shnum; ++i) {
+ switch (shdr[i].sh_type) {
+ case SHT_SYMTAB:
+ symtab_idx = i;
+ break;
+ case SHT_DYNSYM:
+ dynsym_idx = i;
+ break;
+ }
+ }
+
+ /*
+ * Validate the VDSO is created as we expect: that PT_PHDR,
+ * PT_DYNAMIC, and PT_NOTE located in a writable data segment.
+ * PHDR and DYNAMIC require relocation, and NOTE will get the
+ * linux version number.
+ */
+ for (unsigned i = 0; i < phnum; ++i) {
+ if (phdr[i].p_type != PT_LOAD) {
+ continue;
+ }
+ if (first_segsz != 0) {
+ fprintf(stderr, "Multiple LOAD segments\n");
+ errors++;
+ }
+ if (phdr[i].p_offset != 0) {
+ fprintf(stderr, "LOAD segment does not cover EHDR\n");
+ errors++;
+ }
+ if (phdr[i].p_vaddr != 0) {
+ fprintf(stderr, "LOAD segment not loaded at address 0\n");
+ errors++;
+ }
+ first_segsz = phdr[i].p_filesz;
+ if (first_segsz < ehdr->e_phoff + phnum * sizeof(*phdr)) {
+ fprintf(stderr, "LOAD segment does not cover PHDRs\n");
+ errors++;
+ }
+ if ((phdr[i].p_flags & (PF_R | PF_W)) != (PF_R | PF_W)) {
+ fprintf(stderr, "LOAD segment is not read-write\n");
+ errors++;
+ }
+ }
+ for (unsigned i = 0; i < phnum; ++i) {
+ const char *which;
+
+ switch (phdr[i].p_type) {
+ case PT_PHDR:
+ which = "PT_PHDR";
+ break;
+ case PT_NOTE:
+ which = "PT_NOTE";
+ break;
+ case PT_DYNAMIC:
+ dynamic_ofs = phdr[i].p_offset;
+ dynamic_addr = phdr[i].p_vaddr;
+ which = "PT_DYNAMIC";
+ break;
+ default:
+ continue;
+ }
+ if (first_segsz < phdr[i].p_vaddr + phdr[i].p_filesz) {
+ fprintf(stderr, "LOAD segment does not cover %s\n", which);
+ errors++;
+ }
+ }
+ if (errors) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* Relocate the program headers. */
+ for (unsigned i = 0; i < phnum; ++i) {
+ output_reloc(outf, buf, &phdr[i].p_vaddr);
+ output_reloc(outf, buf, &phdr[i].p_paddr);
+ }
+
+ /* Relocate the DYNAMIC entries. */
+ if (dynamic_addr) {
+ ElfN(Dyn) *dyn = buf + dynamic_ofs;
+ __typeof(dyn->d_tag) tag;
+
+ do {
+
+ if (need_bswap) {
+ elfN(bswap_dyn)(dyn);
+ }
+ tag = dyn->d_tag;
+
+ switch (tag) {
+ case DT_HASH:
+ case DT_SYMTAB:
+ case DT_STRTAB:
+ case DT_VERDEF:
+ case DT_VERSYM:
+ case DT_PLTGOT:
+ case DT_ADDRRNGLO ... DT_ADDRRNGHI:
+ /* These entries store an address in the entry. */
+ output_reloc(outf, buf, &dyn->d_un.d_val);
+ break;
+
+ case DT_NULL:
+ case DT_STRSZ:
+ case DT_SONAME:
+ case DT_DEBUG:
+ case DT_FLAGS:
+ case DT_FLAGS_1:
+ case DT_SYMBOLIC:
+ case DT_BIND_NOW:
+ case DT_VERDEFNUM:
+ case DT_VALRNGLO ... DT_VALRNGHI:
+ /* These entries store an integer in the entry. */
+ break;
+
+ case DT_SYMENT:
+ if (dyn->d_un.d_val != sizeof(ElfN(Sym))) {
+ fprintf(stderr, "VDSO has incorrect dynamic symbol size\n");
+ errors++;
+ }
+ break;
+
+ case DT_REL:
+ case DT_RELSZ:
+ case DT_RELA:
+ case DT_RELASZ:
+ /*
+ * These entries indicate that the VDSO was built incorrectly.
+ * It should not have any real relocations.
+ * ??? The RISC-V toolchain will emit these even when there
+ * are no relocations. Validate zeros.
+ */
+ if (dyn->d_un.d_val != 0) {
+ fprintf(stderr, "VDSO has dynamic relocations\n");
+ errors++;
+ }
+ break;
+ case DT_RELENT:
+ case DT_RELAENT:
+ case DT_TEXTREL:
+ /* These entries store an integer in the entry. */
+ /* Should not be required; see above. */
+ break;
+
+ case DT_NEEDED:
+ case DT_VERNEED:
+ case DT_PLTREL:
+ case DT_JMPREL:
+ case DT_RPATH:
+ case DT_RUNPATH:
+ fprintf(stderr, "VDSO has external dependencies\n");
+ errors++;
+ break;
+
+ case PT_LOPROC + 3:
+ if (ehdr->e_machine == EM_PPC64) {
+ break; /* DT_PPC64_OPT: integer bitmask */
+ }
+ goto do_default;
+
+ default:
+ do_default:
+ /* This is probably something target specific. */
+ fprintf(stderr, "VDSO has unknown DYNAMIC entry (%lx)\n",
+ (unsigned long)tag);
+ errors++;
+ break;
+ }
+ dyn++;
+ } while (tag != DT_NULL);
+ if (errors) {
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* Relocate the dynamic symbol table. */
+ if (dynsym_idx) {
+ ElfN(Sym) *sym = buf + shdr[dynsym_idx].sh_offset;
+ unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*sym);
+
+ for (unsigned i = 0; i < sym_n; ++i) {
+ output_reloc(outf, buf, &sym[i].st_value);
+ }
+ }
+
+ /* Search both dynsym and symtab for the signal return symbols. */
+ if (dynsym_idx) {
+ elfN(search_symtab)(shdr, dynsym_idx, buf, need_bswap);
+ }
+ if (symtab_idx) {
+ elfN(search_symtab)(shdr, symtab_idx, buf, need_bswap);
+ }
+}
diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c
new file mode 100644
index 0000000000..31e333be80
--- /dev/null
+++ b/linux-user/gen-vdso.c
@@ -0,0 +1,223 @@
+/*
+ * Post-process a vdso elf image for inclusion into qemu.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <endian.h>
+#include <unistd.h>
+#include "elf.h"
+
+
+#define bswap_(p) _Generic(*(p), \
+ uint16_t: __builtin_bswap16, \
+ uint32_t: __builtin_bswap32, \
+ uint64_t: __builtin_bswap64, \
+ int16_t: __builtin_bswap16, \
+ int32_t: __builtin_bswap32, \
+ int64_t: __builtin_bswap64)
+#define bswaps(p) (*(p) = bswap_(p)(*(p)))
+
+static void output_reloc(FILE *outf, void *buf, void *loc)
+{
+ fprintf(outf, " 0x%08tx,\n", loc - buf);
+}
+
+static const char *sigreturn_sym;
+static const char *rt_sigreturn_sym;
+
+static unsigned sigreturn_addr;
+static unsigned rt_sigreturn_addr;
+
+#define N 32
+#define elfN(x) elf32_##x
+#define ElfN(x) Elf32_##x
+#include "gen-vdso-elfn.c.inc"
+#undef N
+#undef elfN
+#undef ElfN
+
+#define N 64
+#define elfN(x) elf64_##x
+#define ElfN(x) Elf64_##x
+#include "gen-vdso-elfn.c.inc"
+#undef N
+#undef elfN
+#undef ElfN
+
+
+int main(int argc, char **argv)
+{
+ FILE *inf, *outf;
+ long total_len;
+ const char *prefix = "vdso";
+ const char *inf_name;
+ const char *outf_name = NULL;
+ unsigned char *buf;
+ bool need_bswap;
+
+ while (1) {
+ int opt = getopt(argc, argv, "o:p:r:s:");
+ if (opt < 0) {
+ break;
+ }
+ switch (opt) {
+ case 'o':
+ outf_name = optarg;
+ break;
+ case 'p':
+ prefix = optarg;
+ break;
+ case 'r':
+ rt_sigreturn_sym = optarg;
+ break;
+ case 's':
+ sigreturn_sym = optarg;
+ break;
+ default:
+ usage:
+ fprintf(stderr, "usage: [-p prefix] [-r rt-sigreturn-name] "
+ "[-s sigreturn-name] -o output-file input-file\n");
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (optind >= argc || outf_name == NULL) {
+ goto usage;
+ }
+ inf_name = argv[optind];
+
+ /*
+ * Open the input and output files.
+ */
+ inf = fopen(inf_name, "rb");
+ if (inf == NULL) {
+ goto perror_inf;
+ }
+ outf = fopen(outf_name, "w");
+ if (outf == NULL) {
+ goto perror_outf;
+ }
+
+ /*
+ * Read the input file into a buffer.
+ * We expect the vdso to be small, on the order of one page,
+ * therefore we do not expect a partial read.
+ */
+ fseek(inf, 0, SEEK_END);
+ total_len = ftell(inf);
+ fseek(inf, 0, SEEK_SET);
+
+ buf = malloc(total_len);
+ if (buf == NULL) {
+ goto perror_inf;
+ }
+
+ errno = 0;
+ if (fread(buf, 1, total_len, inf) != total_len) {
+ if (errno) {
+ goto perror_inf;
+ }
+ fprintf(stderr, "%s: incomplete read\n", inf_name);
+ return EXIT_FAILURE;
+ }
+ fclose(inf);
+
+ /*
+ * Write out the vdso image now, before we make local changes.
+ */
+
+ fprintf(outf,
+ "/* Automatically generated from linux-user/gen-vdso.c. */\n"
+ "\n"
+ "static const uint8_t %s_image[] = {",
+ prefix);
+ for (long i = 0; i < total_len; ++i) {
+ if (i % 12 == 0) {
+ fputs("\n ", outf);
+ }
+ fprintf(outf, " 0x%02x,", buf[i]);
+ }
+ fprintf(outf, "\n};\n\n");
+
+ /*
+ * Identify which elf flavor we're processing.
+ * The first 16 bytes of the file are e_ident.
+ */
+
+ if (buf[EI_MAG0] != ELFMAG0 || buf[EI_MAG1] != ELFMAG1 ||
+ buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) {
+ fprintf(stderr, "%s: not an elf file\n", inf_name);
+ return EXIT_FAILURE;
+ }
+ switch (buf[EI_DATA]) {
+ case ELFDATA2LSB:
+ need_bswap = BYTE_ORDER != LITTLE_ENDIAN;
+ break;
+ case ELFDATA2MSB:
+ need_bswap = BYTE_ORDER != BIG_ENDIAN;
+ break;
+ default:
+ fprintf(stderr, "%s: invalid elf EI_DATA (%u)\n",
+ inf_name, buf[EI_DATA]);
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * We need to relocate the VDSO image. The one built into the kernel
+ * is built for a fixed address. The one we built for QEMU is not,
+ * since that requires close control of the guest address space.
+ *
+ * Output relocation addresses as we go.
+ */
+
+ fprintf(outf, "static const unsigned %s_relocs[] = {\n", prefix);
+
+ switch (buf[EI_CLASS]) {
+ case ELFCLASS32:
+ elf32_process(outf, buf, need_bswap);
+ break;
+ case ELFCLASS64:
+ elf64_process(outf, buf, need_bswap);
+ break;
+ default:
+ fprintf(stderr, "%s: invalid elf EI_CLASS (%u)\n",
+ inf_name, buf[EI_CLASS]);
+ return EXIT_FAILURE;
+ }
+
+ fprintf(outf, "};\n\n"); /* end vdso_relocs. */
+
+ fprintf(outf, "static const VdsoImageInfo %s_image_info = {\n", prefix);
+ fprintf(outf, " .image = %s_image,\n", prefix);
+ fprintf(outf, " .relocs = %s_relocs,\n", prefix);
+ fprintf(outf, " .image_size = sizeof(%s_image),\n", prefix);
+ fprintf(outf, " .reloc_count = ARRAY_SIZE(%s_relocs),\n", prefix);
+ fprintf(outf, " .sigreturn_ofs = 0x%x,\n", sigreturn_addr);
+ fprintf(outf, " .rt_sigreturn_ofs = 0x%x,\n", rt_sigreturn_addr);
+ fprintf(outf, "};\n");
+
+ /*
+ * Everything should have gone well.
+ */
+ if (fclose(outf)) {
+ goto perror_outf;
+ }
+ return EXIT_SUCCESS;
+
+ perror_inf:
+ perror(inf_name);
+ return EXIT_FAILURE;
+
+ perror_outf:
+ perror(outf_name);
+ return EXIT_FAILURE;
+}
diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h
index 943bc1a1e2..6fd05b77bb 100644
--- a/linux-user/generic/signal.h
+++ b/linux-user/generic/signal.h
@@ -55,6 +55,21 @@
#define TARGET_SIG_UNBLOCK 1 /* for unblocking signals */
#define TARGET_SIG_SETMASK 2 /* for setting the signal mask */
+/* this struct defines a stack used during syscall handling */
+typedef struct target_sigaltstack {
+ abi_ulong ss_sp;
+ abi_int ss_flags;
+ abi_ulong ss_size;
+} target_stack_t;
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
/* mask for all SS_xxx flags */
diff --git a/linux-user/generic/target_errno_defs.h b/linux-user/generic/target_errno_defs.h
index 17d85e0b61..c2f9d403e7 100644
--- a/linux-user/generic/target_errno_defs.h
+++ b/linux-user/generic/target_errno_defs.h
@@ -147,21 +147,4 @@
#define TARGET_ERFKILL 132 /* Operation not possible due to RF-kill */
#define TARGET_EHWPOISON 133 /* Memory page has hardware error */
-/* QEMU internal, not visible to the guest. This is returned when a
- * system call should be restarted, to tell the main loop that it
- * should wind the guest PC backwards so it will re-execute the syscall
- * after handling any pending signals. They match with the ones the guest
- * kernel uses for the same purpose.
- */
-#define TARGET_ERESTARTSYS 512 /* Restart system call (if SA_RESTART) */
-
-/* QEMU internal, not visible to the guest. This is returned by the
- * do_sigreturn() code after a successful sigreturn syscall, to indicate
- * that it has correctly set the guest registers and so the main loop
- * should not touch them. We use the value the guest would use for
- * ERESTART_NOINTR (which is kernel internal) to guarantee that we won't
- * clash with a valid guest errno now or in the future.
- */
-#define TARGET_QEMU_ESIGRETURN 513 /* Return from signal */
-
#endif
diff --git a/linux-user/target_flat.h b/linux-user/generic/target_flat.h
index 8fe189ea6f..8fe189ea6f 100644
--- a/linux-user/target_flat.h
+++ b/linux-user/generic/target_flat.h
diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h
new file mode 100644
index 0000000000..ec76a91b46
--- /dev/null
+++ b/linux-user/generic/target_mman.h
@@ -0,0 +1,163 @@
+#ifndef LINUX_USER_TARGET_MMAN_H
+#define LINUX_USER_TARGET_MMAN_H
+
+/* These are defined in linux/mmap.h */
+#define TARGET_MAP_SHARED 0x01
+#define TARGET_MAP_PRIVATE 0x02
+#define TARGET_MAP_SHARED_VALIDATE 0x03
+
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#ifndef TARGET_MAP_GROWSDOWN
+#define TARGET_MAP_GROWSDOWN 0x0100
+#endif
+#ifndef TARGET_MAP_DENYWRITE
+#define TARGET_MAP_DENYWRITE 0x0800
+#endif
+#ifndef TARGET_MAP_EXECUTABLE
+#define TARGET_MAP_EXECUTABLE 0x1000
+#endif
+#ifndef TARGET_MAP_LOCKED
+#define TARGET_MAP_LOCKED 0x2000
+#endif
+#ifndef TARGET_MAP_NORESERVE
+#define TARGET_MAP_NORESERVE 0x4000
+#endif
+
+/* Defined in asm-generic/mman-common.h */
+#ifndef TARGET_PROT_SEM
+#define TARGET_PROT_SEM 0x08
+#endif
+
+#ifndef TARGET_MAP_TYPE
+#define TARGET_MAP_TYPE 0x0f
+#endif
+#ifndef TARGET_MAP_FIXED
+#define TARGET_MAP_FIXED 0x10
+#endif
+#ifndef TARGET_MAP_ANONYMOUS
+#define TARGET_MAP_ANONYMOUS 0x20
+#endif
+#ifndef TARGET_MAP_POPULATE
+#define TARGET_MAP_POPULATE 0x008000
+#endif
+#ifndef TARGET_MAP_NONBLOCK
+#define TARGET_MAP_NONBLOCK 0x010000
+#endif
+#ifndef TARGET_MAP_STACK
+#define TARGET_MAP_STACK 0x020000
+#endif
+#ifndef TARGET_MAP_HUGETLB
+#define TARGET_MAP_HUGETLB 0x040000
+#endif
+#ifndef TARGET_MAP_SYNC
+#define TARGET_MAP_SYNC 0x080000
+#endif
+#ifndef TARGET_MAP_FIXED_NOREPLACE
+#define TARGET_MAP_FIXED_NOREPLACE 0x100000
+#endif
+#ifndef TARGET_MAP_UNINITIALIZED
+#define TARGET_MAP_UNINITIALIZED 0x4000000
+#endif
+
+#ifndef TARGET_MADV_NORMAL
+#define TARGET_MADV_NORMAL 0
+#endif
+
+#ifndef TARGET_MADV_RANDOM
+#define TARGET_MADV_RANDOM 1
+#endif
+
+#ifndef TARGET_MADV_SEQUENTIAL
+#define TARGET_MADV_SEQUENTIAL 2
+#endif
+
+#ifndef TARGET_MADV_WILLNEED
+#define TARGET_MADV_WILLNEED 3
+#endif
+
+#ifndef TARGET_MADV_DONTNEED
+#define TARGET_MADV_DONTNEED 4
+#endif
+
+#ifndef TARGET_MADV_FREE
+#define TARGET_MADV_FREE 8
+#endif
+
+#ifndef TARGET_MADV_REMOVE
+#define TARGET_MADV_REMOVE 9
+#endif
+
+#ifndef TARGET_MADV_DONTFORK
+#define TARGET_MADV_DONTFORK 10
+#endif
+
+#ifndef TARGET_MADV_DOFORK
+#define TARGET_MADV_DOFORK 11
+#endif
+
+#ifndef TARGET_MADV_MERGEABLE
+#define TARGET_MADV_MERGEABLE 12
+#endif
+
+#ifndef TARGET_MADV_UNMERGEABLE
+#define TARGET_MADV_UNMERGEABLE 13
+#endif
+
+#ifndef TARGET_MADV_HUGEPAGE
+#define TARGET_MADV_HUGEPAGE 14
+#endif
+
+#ifndef TARGET_MADV_NOHUGEPAGE
+#define TARGET_MADV_NOHUGEPAGE 15
+#endif
+
+#ifndef TARGET_MADV_DONTDUMP
+#define TARGET_MADV_DONTDUMP 16
+#endif
+
+#ifndef TARGET_MADV_DODUMP
+#define TARGET_MADV_DODUMP 17
+#endif
+
+#ifndef TARGET_MADV_WIPEONFORK
+#define TARGET_MADV_WIPEONFORK 18
+#endif
+
+#ifndef TARGET_MADV_KEEPONFORK
+#define TARGET_MADV_KEEPONFORK 19
+#endif
+
+#ifndef TARGET_MADV_COLD
+#define TARGET_MADV_COLD 20
+#endif
+
+#ifndef TARGET_MADV_PAGEOUT
+#define TARGET_MADV_PAGEOUT 21
+#endif
+
+#ifndef TARGET_MADV_POPULATE_READ
+#define TARGET_MADV_POPULATE_READ 22
+#endif
+
+#ifndef TARGET_MADV_POPULATE_WRITE
+#define TARGET_MADV_POPULATE_WRITE 23
+#endif
+
+#ifndef TARGET_MADV_DONTNEED_LOCKED
+#define TARGET_MADV_DONTNEED_LOCKED 24
+#endif
+
+
+#ifndef TARGET_MS_ASYNC
+#define TARGET_MS_ASYNC 1
+#endif
+
+#ifndef TARGET_MS_INVALIDATE
+#define TARGET_MS_INVALIDATE 2
+#endif
+
+#ifndef TARGET_MS_SYNC
+#define TARGET_MS_SYNC 4
+#endif
+
+#endif
diff --git a/linux-user/generic/target_prctl_unalign.h b/linux-user/generic/target_prctl_unalign.h
new file mode 100644
index 0000000000..bc3b83af2a
--- /dev/null
+++ b/linux-user/generic/target_prctl_unalign.h
@@ -0,0 +1,27 @@
+/*
+ * Generic prctl unalign functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef GENERIC_TARGET_PRCTL_UNALIGN_H
+#define GENERIC_TARGET_PRCTL_UNALIGN_H
+
+static abi_long do_prctl_get_unalign(CPUArchState *env, target_long arg2)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t res = PR_UNALIGN_NOPRINT;
+ if (cs->prctl_unalign_sigbus) {
+ res |= PR_UNALIGN_SIGBUS;
+ }
+ return put_user_u32(res, arg2);
+}
+#define do_prctl_get_unalign do_prctl_get_unalign
+
+static abi_long do_prctl_set_unalign(CPUArchState *env, target_long arg2)
+{
+ env_cpu(env)->prctl_unalign_sigbus = arg2 & PR_UNALIGN_SIGBUS;
+ return 0;
+}
+#define do_prctl_set_unalign do_prctl_set_unalign
+
+#endif /* GENERIC_TARGET_PRCTL_UNALIGN_H */
diff --git a/linux-user/generic/target_resource.h b/linux-user/generic/target_resource.h
new file mode 100644
index 0000000000..37d3eb09b3
--- /dev/null
+++ b/linux-user/generic/target_resource.h
@@ -0,0 +1,38 @@
+/*
+ * Target definitions of RLIMIT_* constants. These may be overridden by an
+ * architecture specific header if needed.
+ */
+
+#ifndef GENERIC_TARGET_RESOURCE_H
+#define GENERIC_TARGET_RESOURCE_H
+
+struct target_rlimit {
+ abi_ulong rlim_cur;
+ abi_ulong rlim_max;
+};
+
+struct target_rlimit64 {
+ abi_ullong rlim_cur;
+ abi_ullong rlim_max;
+};
+
+#define TARGET_RLIM_INFINITY ((abi_ulong)-1)
+
+#define TARGET_RLIMIT_CPU 0
+#define TARGET_RLIMIT_FSIZE 1
+#define TARGET_RLIMIT_DATA 2
+#define TARGET_RLIMIT_STACK 3
+#define TARGET_RLIMIT_CORE 4
+#define TARGET_RLIMIT_RSS 5
+#define TARGET_RLIMIT_NPROC 6
+#define TARGET_RLIMIT_NOFILE 7
+#define TARGET_RLIMIT_MEMLOCK 8
+#define TARGET_RLIMIT_AS 9
+#define TARGET_RLIMIT_LOCKS 10
+#define TARGET_RLIMIT_SIGPENDING 11
+#define TARGET_RLIMIT_MSGQUEUE 12
+#define TARGET_RLIMIT_NICE 13
+#define TARGET_RLIMIT_RTPRIO 14
+#define TARGET_RLIMIT_RTTIME 15
+
+#endif
diff --git a/linux-user/nios2/target_structs.h b/linux-user/generic/target_structs.h
index daa2886f98..09ff858b6e 100644
--- a/linux-user/nios2/target_structs.h
+++ b/linux-user/generic/target_structs.h
@@ -1,7 +1,7 @@
/*
- * Nios2 specific structures for linux-user
+ * Generic structures for linux-user
*
- * Copyright (c) 2016 Marek Vasut <marex@denx.de>
+ * Copyright (c) 2013 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -16,8 +16,8 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef NIOS2_TARGET_STRUCTS_H
-#define NIOS2_TARGET_STRUCTS_H
+#ifndef GENERIC_TARGET_STRUCTS_H
+#define GENERIC_TARGET_STRUCTS_H
struct target_ipc_perm {
abi_int __key; /* Key. */
diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c
index bee2a9e4ea..7f1499ed28 100644
--- a/linux-user/hexagon/cpu_loop.c
+++ b/linux-user/hexagon/cpu_loop.c
@@ -28,8 +28,7 @@
void cpu_loop(CPUHexagonState *env)
{
CPUState *cs = env_cpu(env);
- int trapnr, signum, sigcode;
- target_ulong sigaddr;
+ int trapnr;
target_ulong syscallnum;
target_ulong ret;
@@ -39,10 +38,6 @@ void cpu_loop(CPUHexagonState *env)
cpu_exec_end(cs);
process_queued_cpu_work(cs);
- signum = 0;
- sigcode = 0;
- sigaddr = 0;
-
switch (trapnr) {
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
@@ -59,37 +54,23 @@ void cpu_loop(CPUHexagonState *env)
env->gpr[4],
env->gpr[5],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->gpr[HEX_REG_PC] -= 4;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->gpr[0] = ret;
}
break;
- case HEX_EXCP_FETCH_NO_UPAGE:
- case HEX_EXCP_PRIV_NO_UREAD:
- case HEX_EXCP_PRIV_NO_UWRITE:
- signum = TARGET_SIGSEGV;
- sigcode = TARGET_SEGV_MAPERR;
- break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
+ case EXCP_DEBUG:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, 0);
+ break;
default:
EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
trapnr);
exit(EXIT_FAILURE);
}
-
- if (signum) {
- target_siginfo_t info = {
- .si_signo = signum,
- .si_errno = 0,
- .si_code = sigcode,
- ._sifields._sigfault._addr = sigaddr
- };
- queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
- }
-
process_pending_signals(env);
}
}
diff --git a/linux-user/hexagon/signal.c b/linux-user/hexagon/signal.c
index c7f0bf6b92..492b51f155 100644
--- a/linux-user/hexagon/signal.c
+++ b/linux-user/hexagon/signal.c
@@ -39,15 +39,12 @@ struct target_sigcontext {
target_ulong m0;
target_ulong m1;
target_ulong usr;
- target_ulong p3_0;
target_ulong gp;
target_ulong ugp;
target_ulong pc;
target_ulong cause;
target_ulong badva;
- target_ulong pad1;
- target_ulong pad2;
- target_ulong pad3;
+ target_ulong pred[NUM_PREGS];
};
struct target_ucontext {
@@ -118,10 +115,14 @@ static void setup_sigcontext(struct target_sigcontext *sc, CPUHexagonState *env)
__put_user(env->gpr[HEX_REG_M0], &sc->m0);
__put_user(env->gpr[HEX_REG_M1], &sc->m1);
__put_user(env->gpr[HEX_REG_USR], &sc->usr);
- __put_user(env->gpr[HEX_REG_P3_0], &sc->p3_0);
__put_user(env->gpr[HEX_REG_GP], &sc->gp);
__put_user(env->gpr[HEX_REG_UGP], &sc->ugp);
__put_user(env->gpr[HEX_REG_PC], &sc->pc);
+
+ int i;
+ for (i = 0; i < NUM_PREGS; i++) {
+ __put_user(env->pred[i], &(sc->pred[i]));
+ }
}
static void setup_ucontext(struct target_ucontext *uc,
@@ -161,7 +162,12 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
}
setup_ucontext(&frame->uc, env, set);
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
+ /*
+ * The on-stack signal trampoline is no longer executed;
+ * however, the libgcc signal frame unwinding code checks
+ * for the presence of these two numeric magic values.
+ */
install_sigtramp(frame->tramp);
env->gpr[HEX_REG_PC] = ka->_sa_handler;
@@ -171,8 +177,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
frame_addr + offsetof(struct target_rt_sigframe, info);
env->gpr[HEX_REG_R02] =
frame_addr + offsetof(struct target_rt_sigframe, uc);
- env->gpr[HEX_REG_LR] =
- frame_addr + offsetof(struct target_rt_sigframe, tramp);
+ env->gpr[HEX_REG_LR] = default_rt_sigreturn;
return;
@@ -226,10 +231,14 @@ static void restore_sigcontext(CPUHexagonState *env,
__get_user(env->gpr[HEX_REG_M0], &sc->m0);
__get_user(env->gpr[HEX_REG_M1], &sc->m1);
__get_user(env->gpr[HEX_REG_USR], &sc->usr);
- __get_user(env->gpr[HEX_REG_P3_0], &sc->p3_0);
__get_user(env->gpr[HEX_REG_GP], &sc->gp);
__get_user(env->gpr[HEX_REG_UGP], &sc->ugp);
__get_user(env->gpr[HEX_REG_PC], &sc->pc);
+
+ int i;
+ for (i = 0; i < NUM_PREGS; i++) {
+ __get_user(env->pred[i], &(sc->pred[i]));
+ }
}
static void restore_ucontext(CPUHexagonState *env, struct target_ucontext *uc)
@@ -264,10 +273,21 @@ long do_rt_sigreturn(CPUHexagonState *env)
target_restore_altstack(&frame->uc.uc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
return 0;
}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 4 * 2, 0);
+ assert(tramp != NULL);
+
+ default_rt_sigreturn = sigtramp_page;
+ install_sigtramp(tramp);
+
+ unlock_user(tramp, sigtramp_page, 4 * 2);
+}
diff --git a/linux-user/hexagon/target_elf.h b/linux-user/hexagon/target_elf.h
index b4e9f40527..36056fc9f0 100644
--- a/linux-user/hexagon/target_elf.h
+++ b/linux-user/hexagon/target_elf.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,7 +20,10 @@
static inline const char *cpu_get_model(uint32_t eflags)
{
- /* For now, treat anything newer than v5 as a v67 */
+ static char buf[32];
+ int err;
+
+ /* For now, treat anything newer than v5 as a v73 */
/* FIXME - Disable instructions that are newer than the specified arch */
if (eflags == 0x04 || /* v5 */
eflags == 0x05 || /* v55 */
@@ -30,11 +33,18 @@ static inline const char *cpu_get_model(uint32_t eflags)
eflags == 0x65 || /* v65 */
eflags == 0x66 || /* v66 */
eflags == 0x67 || /* v67 */
- eflags == 0x8067 /* v67t */
+ eflags == 0x8067 || /* v67t */
+ eflags == 0x68 || /* v68 */
+ eflags == 0x69 || /* v69 */
+ eflags == 0x71 || /* v71 */
+ eflags == 0x8071 || /* v71t */
+ eflags == 0x73 /* v73 */
) {
- return "v67";
+ return "v73";
}
- return "unknown";
+
+ err = snprintf(buf, sizeof(buf), "unknown (0x%x)", eflags);
+ return err >= 0 && err < sizeof(buf) ? buf : "unknown";
}
#endif
diff --git a/linux-user/hexagon/target_mman.h b/linux-user/hexagon/target_mman.h
new file mode 100644
index 0000000000..e6b5e2ca36
--- /dev/null
+++ b/linux-user/hexagon/target_mman.h
@@ -0,0 +1,14 @@
+/*
+ * arch/hexgon/include/asm/processor.h
+ * TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+ *
+ * arch/hexagon/include/asm/mem-layout.h
+ * TASK_SIZE PAGE_OFFSET
+ * PAGE_OFFSET 0xc0000000
+ */
+#define TASK_UNMAPPED_BASE 0x40000000
+
+/* arch/hexagon/include/asm/elf.h */
+#define ELF_ET_DYN_BASE 0x08000000
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/hexagon/target_prctl.h b/linux-user/hexagon/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/hexagon/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/hexagon/target_proc.h b/linux-user/hexagon/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/hexagon/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/hexagon/target_resource.h b/linux-user/hexagon/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/hexagon/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/hexagon/target_signal.h b/linux-user/hexagon/target_signal.h
index 345cf1cbb8..68fb71312e 100644
--- a/linux-user/hexagon/target_signal.h
+++ b/linux-user/hexagon/target_signal.h
@@ -18,17 +18,8 @@
#ifndef HEXAGON_TARGET_SIGNAL_H
#define HEXAGON_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-
#include "../generic/signal.h"
-#endif /* TARGET_SIGNAL_H */
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
+#endif /* HEXAGON_TARGET_SIGNAL_H */
diff --git a/linux-user/hexagon/target_structs.h b/linux-user/hexagon/target_structs.h
index c217d9442a..3a06f373c3 100644
--- a/linux-user/hexagon/target_structs.h
+++ b/linux-user/hexagon/target_structs.h
@@ -1,54 +1 @@
-/*
- * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * Hexagon specific structures for linux-user
- */
-#ifndef HEXAGON_TARGET_STRUCTS_H
-#define HEXAGON_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
- abi_ulong __unused1;
- abi_ulong shm_dtime; /* time of last shmdt() */
- abi_ulong __unused2;
- abi_ulong shm_ctime; /* time of last change by shmctl() */
- abi_ulong __unused3;
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/host/aarch64/hostdep.h b/linux-user/host/aarch64/hostdep.h
deleted file mode 100644
index a8d41a21ad..0000000000
--- a/linux-user/host/aarch64/hostdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef AARCH64_HOSTDEP_H
-#define AARCH64_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- __u64 *pcreg = &uc->uc_mcontext.pc;
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/aarch64/safe-syscall.inc.S b/linux-user/host/aarch64/safe-syscall.inc.S
deleted file mode 100644
index bc1f5a9792..0000000000
--- a/linux-user/host/aarch64/safe-syscall.inc.S
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Written by Richard Henderson <rth@twiddle.net>
- * Copyright (C) 2016 Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, #function
- .type safe_syscall_start, #function
- .type safe_syscall_end, #function
-
- /* This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-safe_syscall_base:
- .cfi_startproc
- /* The syscall calling convention isn't the same as the
- * C one:
- * we enter with x0 == *signal_pending
- * x1 == syscall number
- * x2 ... x7, (stack) == syscall arguments
- * and return the result in x0
- * and the syscall instruction needs
- * x8 == syscall number
- * x0 ... x6 == syscall arguments
- * and returns the result in x0
- * Shuffle everything around appropriately.
- */
- mov x9, x0 /* signal_pending pointer */
- mov x8, x1 /* syscall number */
- mov x0, x2 /* syscall arguments */
- mov x1, x3
- mov x2, x4
- mov x3, x5
- mov x4, x6
- mov x5, x7
- ldr x6, [sp]
-
- /* This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* if signal_pending is non-zero, don't do the call */
- ldr w10, [x9]
- cbnz w10, 0f
- svc 0x0
-safe_syscall_end:
- /* code path for having successfully executed the syscall */
- ret
-
-0:
- /* code path when we didn't execute the syscall */
- mov x0, #-TARGET_ERESTARTSYS
- ret
- .cfi_endproc
-
- .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/arm/hostdep.h b/linux-user/host/arm/hostdep.h
deleted file mode 100644
index 9276fe6ceb..0000000000
--- a/linux-user/host/arm/hostdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef ARM_HOSTDEP_H
-#define ARM_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- unsigned long *pcreg = &uc->uc_mcontext.arm_pc;
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/arm/safe-syscall.inc.S b/linux-user/host/arm/safe-syscall.inc.S
deleted file mode 100644
index 88c4958504..0000000000
--- a/linux-user/host/arm/safe-syscall.inc.S
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Written by Richard Henderson <rth@twiddle.net>
- * Copyright (C) 2016 Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, %function
-
- .cfi_sections .debug_frame
-
- .text
- .syntax unified
- .arm
- .align 2
-
- /* This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-safe_syscall_base:
- .fnstart
- .cfi_startproc
- mov r12, sp /* save entry stack */
- push { r4, r5, r6, r7, r8, lr }
- .save { r4, r5, r6, r7, r8, lr }
- .cfi_adjust_cfa_offset 24
- .cfi_rel_offset r4, 0
- .cfi_rel_offset r5, 4
- .cfi_rel_offset r6, 8
- .cfi_rel_offset r7, 12
- .cfi_rel_offset r8, 16
- .cfi_rel_offset lr, 20
-
- /* The syscall calling convention isn't the same as the C one:
- * we enter with r0 == *signal_pending
- * r1 == syscall number
- * r2, r3, [sp+0] ... [sp+12] == syscall arguments
- * and return the result in r0
- * and the syscall instruction needs
- * r7 == syscall number
- * r0 ... r6 == syscall arguments
- * and returns the result in r0
- * Shuffle everything around appropriately.
- * Note the 16 bytes that we pushed to save registers.
- */
- mov r8, r0 /* copy signal_pending */
- mov r7, r1 /* syscall number */
- mov r0, r2 /* syscall args */
- mov r1, r3
- ldm r12, { r2, r3, r4, r5, r6 }
-
- /* This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* if signal_pending is non-zero, don't do the call */
- ldr r12, [r8] /* signal_pending */
- tst r12, r12
- bne 1f
- swi 0
-safe_syscall_end:
- /* code path for having successfully executed the syscall */
- pop { r4, r5, r6, r7, r8, pc }
-
-1:
- /* code path when we didn't execute the syscall */
- ldr r0, =-TARGET_ERESTARTSYS
- pop { r4, r5, r6, r7, r8, pc }
- .fnend
- .cfi_endproc
-
- .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/i386/hostdep.h b/linux-user/host/i386/hostdep.h
deleted file mode 100644
index 073be74d87..0000000000
--- a/linux-user/host/i386/hostdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef I386_HOSTDEP_H
-#define I386_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- greg_t *pcreg = &uc->uc_mcontext.gregs[REG_EIP];
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/i386/safe-syscall.inc.S b/linux-user/host/i386/safe-syscall.inc.S
deleted file mode 100644
index 9e58fc6504..0000000000
--- a/linux-user/host/i386/safe-syscall.inc.S
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Written by Richard Henderson <rth@twiddle.net>
- * Copyright (C) 2016 Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, @function
-
- /* This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-safe_syscall_base:
- .cfi_startproc
- push %ebp
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset ebp, 0
- push %esi
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset esi, 0
- push %edi
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset edi, 0
- push %ebx
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset ebx, 0
-
- /* The syscall calling convention isn't the same as the C one:
- * we enter with 0(%esp) == return address
- * 4(%esp) == *signal_pending
- * 8(%esp) == syscall number
- * 12(%esp) ... 32(%esp) == syscall arguments
- * and return the result in eax
- * and the syscall instruction needs
- * eax == syscall number
- * ebx, ecx, edx, esi, edi, ebp == syscall arguments
- * and returns the result in eax
- * Shuffle everything around appropriately.
- * Note the 16 bytes that we pushed to save registers.
- */
- mov 12+16(%esp), %ebx /* the syscall arguments */
- mov 16+16(%esp), %ecx
- mov 20+16(%esp), %edx
- mov 24+16(%esp), %esi
- mov 28+16(%esp), %edi
- mov 32+16(%esp), %ebp
-
- /* This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* if signal_pending is non-zero, don't do the call */
- mov 4+16(%esp), %eax /* signal_pending */
- cmpl $0, (%eax)
- jnz 1f
- mov 8+16(%esp), %eax /* syscall number */
- int $0x80
-safe_syscall_end:
- /* code path for having successfully executed the syscall */
- pop %ebx
- .cfi_remember_state
- .cfi_adjust_cfa_offset -4
- .cfi_restore ebx
- pop %edi
- .cfi_adjust_cfa_offset -4
- .cfi_restore edi
- pop %esi
- .cfi_adjust_cfa_offset -4
- .cfi_restore esi
- pop %ebp
- .cfi_adjust_cfa_offset -4
- .cfi_restore ebp
- ret
-
-1:
- /* code path when we didn't execute the syscall */
- .cfi_restore_state
- mov $-TARGET_ERESTARTSYS, %eax
- jmp safe_syscall_end
- .cfi_endproc
-
- .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/ia64/hostdep.h b/linux-user/host/ia64/hostdep.h
deleted file mode 100644
index 263bf7658e..0000000000
--- a/linux-user/host/ia64/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef IA64_HOSTDEP_H
-#define IA64_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/mips/hostdep.h b/linux-user/host/mips/hostdep.h
deleted file mode 100644
index ba111d75c3..0000000000
--- a/linux-user/host/mips/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef MIPS_HOSTDEP_H
-#define MIPS_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/ppc/hostdep.h b/linux-user/host/ppc/hostdep.h
deleted file mode 100644
index 23d8bd9d47..0000000000
--- a/linux-user/host/ppc/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef PPC_HOSTDEP_H
-#define PPC_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/ppc64/hostdep.h b/linux-user/host/ppc64/hostdep.h
deleted file mode 100644
index 98979ad917..0000000000
--- a/linux-user/host/ppc64/hostdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef PPC64_HOSTDEP_H
-#define PPC64_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- unsigned long *pcreg = &uc->uc_mcontext.gp_regs[PT_NIP];
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/ppc64/safe-syscall.inc.S b/linux-user/host/ppc64/safe-syscall.inc.S
deleted file mode 100644
index 875133173b..0000000000
--- a/linux-user/host/ppc64/safe-syscall.inc.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Written by Richard Henderson <rth@twiddle.net>
- * Copyright (C) 2016 Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, @function
-
- .text
-
- /* This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-#if _CALL_ELF == 2
-safe_syscall_base:
- .cfi_startproc
- .localentry safe_syscall_base,0
-#else
- .section ".opd","aw"
- .align 3
-safe_syscall_base:
- .quad .L.safe_syscall_base,.TOC.@tocbase,0
- .previous
-.L.safe_syscall_base:
- .cfi_startproc
-#endif
- /* We enter with r3 == *signal_pending
- * r4 == syscall number
- * r5 ... r10 == syscall arguments
- * and return the result in r3
- * and the syscall instruction needs
- * r0 == syscall number
- * r3 ... r8 == syscall arguments
- * and returns the result in r3
- * Shuffle everything around appropriately.
- */
- std 14, 16(1) /* Preserve r14 in SP+16 */
- .cfi_offset 14, 16
- mr 14, 3 /* signal_pending */
- mr 0, 4 /* syscall number */
- mr 3, 5 /* syscall arguments */
- mr 4, 6
- mr 5, 7
- mr 6, 8
- mr 7, 9
- mr 8, 10
-
- /* This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* if signal_pending is non-zero, don't do the call */
- lwz 12, 0(14)
- cmpwi 0, 12, 0
- bne- 0f
- sc
-safe_syscall_end:
- /* code path when we did execute the syscall */
- ld 14, 16(1) /* restore r14 to its original value */
- bnslr+
-
- /* syscall failed; return negative errno */
- neg 3, 3
- blr
-
- /* code path when we didn't execute the syscall */
-0: addi 3, 0, -TARGET_ERESTARTSYS
- ld 14, 16(1) /* restore r14 to its original value */
- blr
- .cfi_endproc
-
-#if _CALL_ELF == 2
- .size safe_syscall_base, .-safe_syscall_base
-#else
- .size safe_syscall_base, .-.L.safe_syscall_base
- .size .L.safe_syscall_base, .-.L.safe_syscall_base
-#endif
diff --git a/linux-user/host/riscv32/hostdep.h b/linux-user/host/riscv32/hostdep.h
deleted file mode 100644
index adf9edbf2d..0000000000
--- a/linux-user/host/riscv32/hostdep.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef RISCV32_HOSTDEP_H
-#define RISCV32_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/riscv64/hostdep.h b/linux-user/host/riscv64/hostdep.h
deleted file mode 100644
index 865f0fb9ff..0000000000
--- a/linux-user/host/riscv64/hostdep.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef RISCV64_HOSTDEP_H
-#define RISCV64_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- unsigned long *pcreg = &uc->uc_mcontext.__gregs[REG_PC];
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/riscv64/safe-syscall.inc.S b/linux-user/host/riscv64/safe-syscall.inc.S
deleted file mode 100644
index 9ca3fbfd1e..0000000000
--- a/linux-user/host/riscv64/safe-syscall.inc.S
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Written by Richard Henderson <rth@twiddle.net>
- * Copyright (C) 2018 Linaro, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, @function
- .type safe_syscall_start, @function
- .type safe_syscall_end, @function
-
- /*
- * This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-safe_syscall_base:
- .cfi_startproc
- /*
- * The syscall calling convention is nearly the same as C:
- * we enter with a0 == *signal_pending
- * a1 == syscall number
- * a2 ... a7 == syscall arguments
- * and return the result in a0
- * and the syscall instruction needs
- * a7 == syscall number
- * a0 ... a5 == syscall arguments
- * and returns the result in a0
- * Shuffle everything around appropriately.
- */
- mv t0, a0 /* signal_pending pointer */
- mv t1, a1 /* syscall number */
- mv a0, a2 /* syscall arguments */
- mv a1, a3
- mv a2, a4
- mv a3, a5
- mv a4, a6
- mv a5, a7
- mv a7, t1
-
- /*
- * This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* If signal_pending is non-zero, don't do the call */
- lw t1, 0(t0)
- bnez t1, 0f
- scall
-safe_syscall_end:
- /* code path for having successfully executed the syscall */
- ret
-
-0:
- /* code path when we didn't execute the syscall */
- li a0, -TARGET_ERESTARTSYS
- ret
- .cfi_endproc
-
- .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/s390/hostdep.h b/linux-user/host/s390/hostdep.h
deleted file mode 100644
index afcba5a16a..0000000000
--- a/linux-user/host/s390/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef S390_HOSTDEP_H
-#define S390_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/s390x/hostdep.h b/linux-user/host/s390x/hostdep.h
deleted file mode 100644
index 4f0171f36f..0000000000
--- a/linux-user/host/s390x/hostdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef S390X_HOSTDEP_H
-#define S390X_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- unsigned long *pcreg = &uc->uc_mcontext.psw.addr;
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/s390x/safe-syscall.inc.S b/linux-user/host/s390x/safe-syscall.inc.S
deleted file mode 100644
index 414b44ad38..0000000000
--- a/linux-user/host/s390x/safe-syscall.inc.S
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Written by Richard Henderson <rth@twiddle.net>
- * Copyright (C) 2016 Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, @function
-
- /* This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-safe_syscall_base:
- .cfi_startproc
- stmg %r6,%r15,48(%r15) /* save all call-saved registers */
- .cfi_offset %r15,-40
- .cfi_offset %r14,-48
- .cfi_offset %r13,-56
- .cfi_offset %r12,-64
- .cfi_offset %r11,-72
- .cfi_offset %r10,-80
- .cfi_offset %r9,-88
- .cfi_offset %r8,-96
- .cfi_offset %r7,-104
- .cfi_offset %r6,-112
- lgr %r1,%r15
- lg %r0,8(%r15) /* load eos */
- aghi %r15,-160
- .cfi_adjust_cfa_offset 160
- stg %r1,0(%r15) /* store back chain */
- stg %r0,8(%r15) /* store eos */
-
- /* The syscall calling convention isn't the same as the
- * C one:
- * we enter with r2 == *signal_pending
- * r3 == syscall number
- * r4, r5, r6, (stack) == syscall arguments
- * and return the result in r2
- * and the syscall instruction needs
- * r1 == syscall number
- * r2 ... r7 == syscall arguments
- * and returns the result in r2
- * Shuffle everything around appropriately.
- */
- lgr %r8,%r2 /* signal_pending pointer */
- lgr %r1,%r3 /* syscall number */
- lgr %r2,%r4 /* syscall args */
- lgr %r3,%r5
- lgr %r4,%r6
- lmg %r5,%r7,320(%r15)
-
- /* This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* if signal_pending is non-zero, don't do the call */
- icm %r0,15,0(%r8)
- jne 2f
- svc 0
-safe_syscall_end:
-
-1: lg %r15,0(%r15) /* load back chain */
- .cfi_remember_state
- .cfi_adjust_cfa_offset -160
- lmg %r6,%r15,48(%r15) /* load saved registers */
- br %r14
- .cfi_restore_state
-2: lghi %r2, -TARGET_ERESTARTSYS
- j 1b
- .cfi_endproc
-
- .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/sparc/hostdep.h b/linux-user/host/sparc/hostdep.h
deleted file mode 100644
index 391ad923cf..0000000000
--- a/linux-user/host/sparc/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef SPARC_HOSTDEP_H
-#define SPARC_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/sparc64/hostdep.h b/linux-user/host/sparc64/hostdep.h
deleted file mode 100644
index ce3968fca0..0000000000
--- a/linux-user/host/sparc64/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef SPARC64_HOSTDEP_H
-#define SPARC64_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/x32/hostdep.h b/linux-user/host/x32/hostdep.h
deleted file mode 100644
index 2c2d6d37da..0000000000
--- a/linux-user/host/x32/hostdep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef X32_HOSTDEP_H
-#define X32_HOSTDEP_H
-
-#endif
diff --git a/linux-user/host/x86_64/hostdep.h b/linux-user/host/x86_64/hostdep.h
deleted file mode 100644
index a4fefb5114..0000000000
--- a/linux-user/host/x86_64/hostdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * hostdep.h : things which are dependent on the host architecture
- *
- * * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef X86_64_HOSTDEP_H
-#define X86_64_HOSTDEP_H
-
-/* We have a safe-syscall.inc.S */
-#define HAVE_SAFE_SYSCALL
-
-#ifndef __ASSEMBLER__
-
-/* These are defined by the safe-syscall.inc.S file */
-extern char safe_syscall_start[];
-extern char safe_syscall_end[];
-
-/* Adjust the signal context to rewind out of safe-syscall if we're in it */
-static inline void rewind_if_in_safe_syscall(void *puc)
-{
- ucontext_t *uc = puc;
- greg_t *pcreg = &uc->uc_mcontext.gregs[REG_RIP];
-
- if (*pcreg > (uintptr_t)safe_syscall_start
- && *pcreg < (uintptr_t)safe_syscall_end) {
- *pcreg = (uintptr_t)safe_syscall_start;
- }
-}
-
-#endif /* __ASSEMBLER__ */
-
-#endif
diff --git a/linux-user/host/x86_64/safe-syscall.inc.S b/linux-user/host/x86_64/safe-syscall.inc.S
deleted file mode 100644
index f36992daa3..0000000000
--- a/linux-user/host/x86_64/safe-syscall.inc.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * safe-syscall.inc.S : host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- * This is intended to be included by linux-user/safe-syscall.S
- *
- * Copyright (C) 2015 Timothy Edward Baldwin <T.E.Baldwin99@members.leeds.ac.uk>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
- .global safe_syscall_base
- .global safe_syscall_start
- .global safe_syscall_end
- .type safe_syscall_base, @function
-
- /* This is the entry point for making a system call. The calling
- * convention here is that of a C varargs function with the
- * first argument an 'int *' to the signal_pending flag, the
- * second one the system call number (as a 'long'), and all further
- * arguments being syscall arguments (also 'long').
- * We return a long which is the syscall's return value, which
- * may be negative-errno on failure. Conversion to the
- * -1-and-errno-set convention is done by the calling wrapper.
- */
-safe_syscall_base:
- .cfi_startproc
- /* This saves a frame pointer and aligns the stack for the syscall.
- * (It's unclear if the syscall ABI has the same stack alignment
- * requirements as the userspace function call ABI, but better safe than
- * sorry. Appendix A2 of http://www.x86-64.org/documentation/abi.pdf
- * does not list any ABI differences regarding stack alignment.)
- */
- push %rbp
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset rbp, 0
-
- /* The syscall calling convention isn't the same as the
- * C one:
- * we enter with rdi == *signal_pending
- * rsi == syscall number
- * rdx, rcx, r8, r9, (stack), (stack) == syscall arguments
- * and return the result in rax
- * and the syscall instruction needs
- * rax == syscall number
- * rdi, rsi, rdx, r10, r8, r9 == syscall arguments
- * and returns the result in rax
- * Shuffle everything around appropriately.
- * Note that syscall will trash rcx and r11.
- */
- mov %rsi, %rax /* syscall number */
- mov %rdi, %rbp /* signal_pending pointer */
- /* and the syscall arguments */
- mov %rdx, %rdi
- mov %rcx, %rsi
- mov %r8, %rdx
- mov %r9, %r10
- mov 16(%rsp), %r8
- mov 24(%rsp), %r9
-
- /* This next sequence of code works in conjunction with the
- * rewind_if_safe_syscall_function(). If a signal is taken
- * and the interrupted PC is anywhere between 'safe_syscall_start'
- * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
- * The code sequence must therefore be able to cope with this, and
- * the syscall instruction must be the final one in the sequence.
- */
-safe_syscall_start:
- /* if signal_pending is non-zero, don't do the call */
- cmpl $0, (%rbp)
- jnz 1f
- syscall
-safe_syscall_end:
- /* code path for having successfully executed the syscall */
- pop %rbp
- .cfi_remember_state
- .cfi_def_cfa_offset 8
- .cfi_restore rbp
- ret
-
-1:
- /* code path when we didn't execute the syscall */
- .cfi_restore_state
- mov $-TARGET_ERESTARTSYS, %rax
- pop %rbp
- .cfi_def_cfa_offset 8
- .cfi_restore rbp
- ret
- .cfi_endproc
-
- .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/hppa/Makefile.vdso b/linux-user/hppa/Makefile.vdso
new file mode 100644
index 0000000000..f4537ae716
--- /dev/null
+++ b/linux-user/hppa/Makefile.vdso
@@ -0,0 +1,11 @@
+include $(BUILD_DIR)/tests/tcg/hppa-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/hppa
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso.so
+
+$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ -nostdlib -shared -Wl,-h,linux-vdso32.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both \
+ -Wl,-T,$(SUBDIR)/vdso.ld $<
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
index 81607a9b27..d5232f37fe 100644
--- a/linux-user/hppa/cpu_loop.c
+++ b/linux-user/hppa/cpu_loop.c
@@ -110,7 +110,6 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
void cpu_loop(CPUHPPAState *env)
{
CPUState *cs = env_cpu(env);
- target_siginfo_t info;
abi_ulong ret;
int trapnr;
@@ -133,8 +132,8 @@ void cpu_loop(CPUHPPAState *env)
env->iaoq_f = env->gr[31];
env->iaoq_b = env->gr[31] + 4;
break;
- case -TARGET_ERESTARTSYS:
- case -TARGET_QEMU_ESIGRETURN:
+ case -QEMU_ERESTARTSYS:
+ case -QEMU_ESIGRETURN:
break;
}
break;
@@ -144,58 +143,44 @@ void cpu_loop(CPUHPPAState *env)
env->iaoq_f = env->gr[31];
env->iaoq_b = env->gr[31] + 4;
break;
- case EXCP_ITLB_MISS:
- case EXCP_DTLB_MISS:
- case EXCP_NA_ITLB_MISS:
- case EXCP_NA_DTLB_MISS:
case EXCP_IMP:
- case EXCP_DMP:
- case EXCP_DMB:
- case EXCP_PAGE_REF:
- case EXCP_DMAR:
- case EXCP_DMPI:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_ACCERR;
- info._sifields._sigfault._addr = env->cr[CR_IOR];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case EXCP_UNALIGN:
- info.si_signo = TARGET_SIGBUS;
- info.si_errno = 0;
- info.si_code = 0;
- info._sifields._sigfault._addr = env->cr[CR_IOR];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, env->iaoq_f);
break;
case EXCP_ILL:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f);
+ break;
case EXCP_PRIV_OPR:
+ /* check for glibc ABORT_INSTRUCTION "iitlbp %r0,(%sr0, %r0)" */
+ if (env->cr[CR_IIR] == 0x04000000) {
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f);
+ } else {
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->iaoq_f);
+ }
+ break;
case EXCP_PRIV_REG:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPN;
- info._sifields._sigfault._addr = env->iaoq_f;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVREG, env->iaoq_f);
break;
case EXCP_OVERFLOW:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->iaoq_f);
+ break;
case EXCP_COND:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_CONDTRAP, env->iaoq_f);
+ break;
case EXCP_ASSIST:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = 0;
- info._sifields._sigfault._addr = env->iaoq_f;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGFPE, 0, env->iaoq_f);
+ break;
+ case EXCP_BREAK:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f & ~3);
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f);
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
default:
- g_assert_not_reached();
+ EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
+ abort();
}
process_pending_signals(env);
}
diff --git a/linux-user/hppa/meson.build b/linux-user/hppa/meson.build
index 4709508a09..aa2d9a87a6 100644
--- a/linux-user/hppa/meson.build
+++ b/linux-user/hppa/meson.build
@@ -3,3 +3,8 @@ syscall_nr_generators += {
arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
output: '@BASENAME@_nr.h')
}
+
+vdso_inc = gen_vdso.process('vdso.so',
+ extra_args: [ '-r', '__kernel_sigtramp_rt' ])
+
+linux_user_ss.add(when: 'TARGET_HPPA', if_true: vdso_inc)
diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c
index c2fbc26ebb..682ba25922 100644
--- a/linux-user/hppa/signal.c
+++ b/linux-user/hppa/signal.c
@@ -21,11 +21,12 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
+#include "vdso-asmoffset.h"
struct target_sigcontext {
abi_ulong sc_flags;
abi_ulong sc_gr[32];
- uint64_t sc_fr[32];
+ abi_ullong sc_fr[32];
abi_ulong sc_iasq[2];
abi_ulong sc_iaoq[2];
abi_ulong sc_sar;
@@ -41,31 +42,34 @@ struct target_ucontext {
};
struct target_rt_sigframe {
- abi_uint tramp[9];
+ abi_uint tramp[2]; /* syscall restart return address */
target_siginfo_t info;
struct target_ucontext uc;
/* hidden location of upper halves of pa2.0 64-bit gregs */
};
+QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe) != sizeof_rt_sigframe);
+QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext)
+ != offsetof_sigcontext);
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_gr)
+ != offsetof_sigcontext_gr);
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_fr)
+ != offsetof_sigcontext_fr);
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_iaoq)
+ != offsetof_sigcontext_iaoq);
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_sar)
+ != offsetof_sigcontext_sar);
+
+
static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
{
- int flags = 0;
int i;
- /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
-
- if (env->iaoq_f < TARGET_PAGE_SIZE) {
- /* In the gateway page, executing a syscall. */
- flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
- __put_user(env->gr[31], &sc->sc_iaoq[0]);
- __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
- } else {
- __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
- __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
- }
+ __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
+ __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
__put_user(0, &sc->sc_iasq[0]);
__put_user(0, &sc->sc_iasq[1]);
- __put_user(flags, &sc->sc_flags);
+ __put_user(0, &sc->sc_flags);
__put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
for (i = 1; i < 32; ++i) {
@@ -82,7 +86,7 @@ static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
{
- target_ulong psw;
+ abi_ulong psw;
int i;
__get_user(psw, &sc->sc_gr[0]);
@@ -101,10 +105,6 @@ static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
__get_user(env->cr[CR_SAR], &sc->sc_sar);
}
-/* No, this doesn't look right, but it's copied straight from the kernel. */
-#define PARISC_RT_SIGFRAME_SIZE32 \
- ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
-
void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUArchState *env)
@@ -112,13 +112,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
abi_ulong frame_addr, sp, haddr;
struct target_rt_sigframe *frame;
int i;
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
sp = get_sp_from_cpustate(env);
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
sp = (ts->sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
}
- frame_addr = QEMU_ALIGN_UP(sp, 64);
+ frame_addr = QEMU_ALIGN_UP(sp, SIGFRAME);
sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
trace_user_setup_rt_frame(env, frame_addr);
@@ -127,7 +127,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
goto give_sigsegv;
}
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
frame->uc.tuc_flags = 0;
frame->uc.tuc_link = 0;
@@ -139,14 +139,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
setup_sigcontext(&frame->uc.tuc_mcontext, env);
- __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
- __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
- __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
- __put_user(0x08000240, frame->tramp + 3); /* nop */
-
unlock_user_struct(frame, frame_addr, 1);
- env->gr[2] = h2g(frame->tramp);
+ env->gr[2] = default_rt_sigreturn;
env->gr[30] = sp;
env->gr[26] = sig;
env->gr[25] = h2g(&frame->info);
@@ -155,19 +150,21 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
haddr = ka->_sa_handler;
if (haddr & 2) {
/* Function descriptor. */
- target_ulong *fdesc, dest;
+ abi_ptr *fdesc, dest;
haddr &= -4;
- if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
+ fdesc = lock_user(VERIFY_READ, haddr, 2 * sizeof(abi_ptr), 1);
+ if (!fdesc) {
goto give_sigsegv;
}
__get_user(dest, fdesc);
__get_user(env->gr[19], fdesc + 1);
- unlock_user_struct(fdesc, haddr, 1);
+ unlock_user(fdesc, haddr, 0);
haddr = dest;
}
env->iaoq_f = haddr;
env->iaoq_b = haddr + 4;
+ env->psw_n = 0;
return;
give_sigsegv:
@@ -191,9 +188,29 @@ long do_rt_sigreturn(CPUArchState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6*4, 0);
+ abi_ulong SIGFRAME_CONTEXT_REGS32;
+ assert(tramp != NULL);
+
+ SIGFRAME_CONTEXT_REGS32 = offsetof(struct target_rt_sigframe, uc.tuc_mcontext);
+ SIGFRAME_CONTEXT_REGS32 -= PARISC_RT_SIGFRAME_SIZE32;
+
+ __put_user(SIGFRAME_CONTEXT_REGS32, tramp + 0);
+ __put_user(0x08000240, tramp + 1); /* nop - b/c dwarf2 unwind routines */
+ __put_user(0x34190000, tramp + 2); /* ldi 0, %r25 (in_syscall=0) */
+ __put_user(0x3414015a, tramp + 3); /* ldi __NR_rt_sigreturn, %r20 */
+ __put_user(0xe4008200, tramp + 4); /* ble 0x100(%sr2, %r0) */
+ __put_user(0x08000240, tramp + 5); /* nop */
+
+ default_rt_sigreturn = (sigtramp_page + 8) | 3;
+ unlock_user(tramp, sigtramp_page, 6*4);
}
diff --git a/linux-user/hppa/target_elf.h b/linux-user/hppa/target_elf.h
index 82b4e9535e..19cae8bd65 100644
--- a/linux-user/hppa/target_elf.h
+++ b/linux-user/hppa/target_elf.h
@@ -9,6 +9,6 @@
#define HPPA_TARGET_ELF_H
static inline const char *cpu_get_model(uint32_t eflags)
{
- return "any";
+ return "hppa";
}
#endif
diff --git a/linux-user/hppa/target_mman.h b/linux-user/hppa/target_mman.h
new file mode 100644
index 0000000000..ccda46e842
--- /dev/null
+++ b/linux-user/hppa/target_mman.h
@@ -0,0 +1,35 @@
+#ifndef HPPA_TARGET_MMAN_H
+#define HPPA_TARGET_MMAN_H
+
+#define TARGET_MAP_TYPE 0x2b
+#define TARGET_MAP_FIXED 0x04
+#define TARGET_MAP_ANONYMOUS 0x10
+#define TARGET_MAP_GROWSDOWN 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+#define TARGET_MAP_UNINITIALIZED 0
+
+#define TARGET_MADV_MERGEABLE 65
+#define TARGET_MADV_UNMERGEABLE 66
+#define TARGET_MADV_HUGEPAGE 67
+#define TARGET_MADV_NOHUGEPAGE 68
+#define TARGET_MADV_DONTDUMP 69
+#define TARGET_MADV_DODUMP 70
+#define TARGET_MADV_WIPEONFORK 71
+#define TARGET_MADV_KEEPONFORK 72
+
+#define TARGET_MS_SYNC 1
+#define TARGET_MS_ASYNC 2
+#define TARGET_MS_INVALIDATE 4
+
+/* arch/parisc/include/asm/processor.h: DEFAULT_MAP_BASE32 */
+#define TASK_UNMAPPED_BASE 0x40000000
+
+/* arch/parisc/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/hppa/target_prctl.h b/linux-user/hppa/target_prctl.h
new file mode 100644
index 0000000000..5629ddbf39
--- /dev/null
+++ b/linux-user/hppa/target_prctl.h
@@ -0,0 +1 @@
+#include "../generic/target_prctl_unalign.h"
diff --git a/linux-user/hppa/target_proc.h b/linux-user/hppa/target_proc.h
new file mode 100644
index 0000000000..9340c3b6af
--- /dev/null
+++ b/linux-user/hppa/target_proc.h
@@ -0,0 +1,26 @@
+/*
+ * HPPA specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HPPA_TARGET_PROC_H
+#define HPPA_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int i, num_cpus;
+
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "processor\t: %d\n", i);
+ dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
+ dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
+ dprintf(fd, "capabilities\t: os32\n");
+ dprintf(fd, "model\t\t: 9000/778/B160L - "
+ "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
+ }
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* HPPA_TARGET_PROC_H */
diff --git a/linux-user/hppa/target_resource.h b/linux-user/hppa/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/hppa/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/hppa/target_signal.h b/linux-user/hppa/target_signal.h
index 7f525362e9..190bb3d653 100644
--- a/linux-user/hppa/target_signal.h
+++ b/linux-user/hppa/target_signal.h
@@ -64,11 +64,12 @@ typedef struct target_sigaltstack {
#define TARGET_SA_NOCLDWAIT 0x00000080
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
/* mask for all SS_xxx flags */
#define TARGET_SS_FLAG_BITS TARGET_SS_AUTODISARM
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* HPPA_TARGET_SIGNAL_H */
diff --git a/linux-user/hppa/target_syscall.h b/linux-user/hppa/target_syscall.h
index 0018bcb5c4..9a8f8ca628 100644
--- a/linux-user/hppa/target_syscall.h
+++ b/linux-user/hppa/target_syscall.h
@@ -22,9 +22,10 @@ struct target_pt_regs {
#define UNAME_MACHINE "parisc"
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
+#define TARGET_DEFAULT_STACK_SIZE 80 * 1024 * 1024UL
+
#endif /* HPPA_TARGET_SYSCALL_H */
diff --git a/linux-user/hppa/vdso-asmoffset.h b/linux-user/hppa/vdso-asmoffset.h
new file mode 100644
index 0000000000..c8b40c0332
--- /dev/null
+++ b/linux-user/hppa/vdso-asmoffset.h
@@ -0,0 +1,12 @@
+#define sizeof_rt_sigframe 584
+#define offsetof_sigcontext 160
+#define offsetof_sigcontext_gr 0x4
+#define offsetof_sigcontext_fr 0x88
+#define offsetof_sigcontext_iaoq 0x190
+#define offsetof_sigcontext_sar 0x198
+
+/* arch/parisc/include/asm/rt_sigframe.h */
+#define SIGFRAME 64
+#define FUNCTIONCALLFRAME 48
+#define PARISC_RT_SIGFRAME_SIZE32 \
+ (((sizeof_rt_sigframe) + FUNCTIONCALLFRAME + SIGFRAME) & -SIGFRAME)
diff --git a/linux-user/hppa/vdso.S b/linux-user/hppa/vdso.S
new file mode 100644
index 0000000000..5be14d2f70
--- /dev/null
+++ b/linux-user/hppa/vdso.S
@@ -0,0 +1,165 @@
+/*
+ * hppa linux kernel vdso replacement.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include "vdso-asmoffset.h"
+
+ .text
+
+
+/*
+ * arch/parisc/kernel/vdso32/sigtramp.S:
+ * Gdb expects the trampoline is on the stack and the pc is offset from
+ * a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline
+ * is not on the stack, we need a new variant with different offsets and
+ * data to tell gdb where to find the signal context on the stack.
+ *
+ * Here we put the offset to the context data at the start of the trampoline
+ * region and offset the first trampoline by 2 instructions. Please do
+ * not change the trampoline as the code in gdb depends on the following
+ * instruction sequence exactly.
+ */
+
+/* arch/parisc/kernel/asm-offsets.c */
+#define SIGFRAME_CONTEXT_REGS32 \
+ (offsetof_sigcontext - PARISC_RT_SIGFRAME_SIZE32)
+
+ .align 64
+ .word SIGFRAME_CONTEXT_REGS32
+
+/*
+ * All that said, we can provide a proper unwind record, which means that
+ * GDB should not actually need the offset magic.
+ *
+ * The return address that arrived here, from the inner frame, is
+ * not marked as a signal frame and so the unwinder still tries to
+ * subtract 1 to examine the presumed call insn. Thus we must
+ * extend the unwind info to a nop before the start.
+ */
+
+ .cfi_startproc simple
+ .cfi_signal_frame
+
+ /* Compare pa32_fallback_frame_state from libgcc. */
+
+ /*
+ * Place the CFA at the start of sigcontext for convenience.
+ * The previous CFA will be restored from the saved stack pointer.
+ */
+ .cfi_def_cfa 30, -PARISC_RT_SIGFRAME_SIZE32 + offsetof_sigcontext
+
+ /* Record save offset of general registers. */
+ .cfi_offset 1, offsetof_sigcontext_gr + 1 * 4
+ .cfi_offset 2, offsetof_sigcontext_gr + 2 * 4
+ .cfi_offset 3, offsetof_sigcontext_gr + 3 * 4
+ .cfi_offset 4, offsetof_sigcontext_gr + 4 * 4
+ .cfi_offset 5, offsetof_sigcontext_gr + 5 * 4
+ .cfi_offset 6, offsetof_sigcontext_gr + 6 * 4
+ .cfi_offset 7, offsetof_sigcontext_gr + 7 * 4
+ .cfi_offset 8, offsetof_sigcontext_gr + 8 * 4
+ .cfi_offset 9, offsetof_sigcontext_gr + 9 * 4
+ .cfi_offset 10, offsetof_sigcontext_gr + 10 * 4
+ .cfi_offset 11, offsetof_sigcontext_gr + 11 * 4
+ .cfi_offset 12, offsetof_sigcontext_gr + 12 * 4
+ .cfi_offset 13, offsetof_sigcontext_gr + 13 * 4
+ .cfi_offset 14, offsetof_sigcontext_gr + 14 * 4
+ .cfi_offset 15, offsetof_sigcontext_gr + 15 * 4
+ .cfi_offset 16, offsetof_sigcontext_gr + 16 * 4
+ .cfi_offset 17, offsetof_sigcontext_gr + 17 * 4
+ .cfi_offset 18, offsetof_sigcontext_gr + 18 * 4
+ .cfi_offset 19, offsetof_sigcontext_gr + 19 * 4
+ .cfi_offset 20, offsetof_sigcontext_gr + 20 * 4
+ .cfi_offset 21, offsetof_sigcontext_gr + 21 * 4
+ .cfi_offset 22, offsetof_sigcontext_gr + 22 * 4
+ .cfi_offset 23, offsetof_sigcontext_gr + 23 * 4
+ .cfi_offset 24, offsetof_sigcontext_gr + 24 * 4
+ .cfi_offset 25, offsetof_sigcontext_gr + 25 * 4
+ .cfi_offset 26, offsetof_sigcontext_gr + 26 * 4
+ .cfi_offset 27, offsetof_sigcontext_gr + 27 * 4
+ .cfi_offset 28, offsetof_sigcontext_gr + 28 * 4
+ .cfi_offset 29, offsetof_sigcontext_gr + 29 * 4
+ .cfi_offset 30, offsetof_sigcontext_gr + 30 * 4
+ .cfi_offset 31, offsetof_sigcontext_gr + 31 * 4
+
+ /* Record save offset of fp registers, left and right halves. */
+ .cfi_offset 32, offsetof_sigcontext_fr + 4 * 8
+ .cfi_offset 33, offsetof_sigcontext_fr + 4 * 8 + 4
+ .cfi_offset 34, offsetof_sigcontext_fr + 5 * 8
+ .cfi_offset 35, offsetof_sigcontext_fr + 5 * 8 + 4
+ .cfi_offset 36, offsetof_sigcontext_fr + 6 * 8
+ .cfi_offset 37, offsetof_sigcontext_fr + 6 * 8 + 4
+ .cfi_offset 38, offsetof_sigcontext_fr + 7 * 8
+ .cfi_offset 39, offsetof_sigcontext_fr + 7 * 8 + 4
+ .cfi_offset 40, offsetof_sigcontext_fr + 8 * 8
+ .cfi_offset 41, offsetof_sigcontext_fr + 8 * 8 + 4
+ .cfi_offset 42, offsetof_sigcontext_fr + 9 * 8
+ .cfi_offset 43, offsetof_sigcontext_fr + 9 * 8 + 4
+ .cfi_offset 44, offsetof_sigcontext_fr + 10 * 8
+ .cfi_offset 45, offsetof_sigcontext_fr + 10 * 8 + 4
+ .cfi_offset 46, offsetof_sigcontext_fr + 11 * 8
+ .cfi_offset 47, offsetof_sigcontext_fr + 11 * 8 + 4
+ .cfi_offset 48, offsetof_sigcontext_fr + 12 * 8
+ .cfi_offset 49, offsetof_sigcontext_fr + 12 * 8 + 4
+ .cfi_offset 50, offsetof_sigcontext_fr + 13 * 8
+ .cfi_offset 51, offsetof_sigcontext_fr + 13 * 8 + 4
+ .cfi_offset 52, offsetof_sigcontext_fr + 14 * 8
+ .cfi_offset 53, offsetof_sigcontext_fr + 14 * 8 + 4
+ .cfi_offset 54, offsetof_sigcontext_fr + 15 * 8
+ .cfi_offset 55, offsetof_sigcontext_fr + 15 * 8 + 4
+ .cfi_offset 56, offsetof_sigcontext_fr + 16 * 8
+ .cfi_offset 57, offsetof_sigcontext_fr + 16 * 8 + 4
+ .cfi_offset 58, offsetof_sigcontext_fr + 17 * 8
+ .cfi_offset 59, offsetof_sigcontext_fr + 17 * 8 + 4
+ .cfi_offset 60, offsetof_sigcontext_fr + 18 * 8
+ .cfi_offset 61, offsetof_sigcontext_fr + 18 * 8 + 4
+ .cfi_offset 62, offsetof_sigcontext_fr + 19 * 8
+ .cfi_offset 63, offsetof_sigcontext_fr + 19 * 8 + 4
+ .cfi_offset 64, offsetof_sigcontext_fr + 20 * 8
+ .cfi_offset 65, offsetof_sigcontext_fr + 20 * 8 + 4
+ .cfi_offset 66, offsetof_sigcontext_fr + 21 * 8
+ .cfi_offset 67, offsetof_sigcontext_fr + 21 * 8 + 4
+ .cfi_offset 68, offsetof_sigcontext_fr + 22 * 8
+ .cfi_offset 69, offsetof_sigcontext_fr + 22 * 8 + 4
+ .cfi_offset 70, offsetof_sigcontext_fr + 23 * 8
+ .cfi_offset 71, offsetof_sigcontext_fr + 23 * 8 + 4
+ .cfi_offset 72, offsetof_sigcontext_fr + 24 * 8
+ .cfi_offset 73, offsetof_sigcontext_fr + 24 * 8 + 4
+ .cfi_offset 74, offsetof_sigcontext_fr + 25 * 8
+ .cfi_offset 75, offsetof_sigcontext_fr + 25 * 8 + 4
+ .cfi_offset 76, offsetof_sigcontext_fr + 26 * 8
+ .cfi_offset 77, offsetof_sigcontext_fr + 26 * 8 + 4
+ .cfi_offset 78, offsetof_sigcontext_fr + 27 * 8
+ .cfi_offset 79, offsetof_sigcontext_fr + 27 * 8 + 4
+ .cfi_offset 80, offsetof_sigcontext_fr + 28 * 8
+ .cfi_offset 81, offsetof_sigcontext_fr + 28 * 8 + 4
+ .cfi_offset 82, offsetof_sigcontext_fr + 29 * 8
+ .cfi_offset 83, offsetof_sigcontext_fr + 29 * 8 + 4
+ .cfi_offset 84, offsetof_sigcontext_fr + 30 * 8
+ .cfi_offset 85, offsetof_sigcontext_fr + 30 * 8 + 4
+ .cfi_offset 86, offsetof_sigcontext_fr + 31 * 8
+ .cfi_offset 87, offsetof_sigcontext_fr + 31 * 8 + 4
+
+ /* Record save offset of %sar */
+ .cfi_offset 88, offsetof_sigcontext_sar
+
+ /* Record save offset of return address, iaoq[0]. */
+ .cfi_return_column 89
+ .cfi_offset 89, offsetof_sigcontext_iaoq
+
+ nop
+
+__kernel_sigtramp_rt:
+ ldi 0, %r25
+ ldi __NR_rt_sigreturn, %r20
+ be,l 0x100(%sr2, %r0), %sr0, %r31
+ nop
+
+ .cfi_endproc
+ .size __kernel_sigtramp_rt, . - __kernel_sigtramp_rt
+ .type __kernel_sigtramp_rt, @function
+ .globl __kernel_sigtramp_rt
diff --git a/linux-user/hppa/vdso.ld b/linux-user/hppa/vdso.ld
new file mode 100644
index 0000000000..b17ad974f3
--- /dev/null
+++ b/linux-user/hppa/vdso.ld
@@ -0,0 +1,77 @@
+/*
+ * Linker script for linux hppa vdso.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+VERSION {
+ /*
+ * The kernel's vdso32.lds.S attempts to export
+ * __kernel_sigtramp_rt32
+ * __kernel_restart_syscall32
+ * except that those symbols don't exist. The actual symbols are
+ * __kernel_sigtramp_rt
+ * __kernel_restart_syscall
+ * which means that nothing is exported at all.
+ * QEMU handles syscall restart internally, so we don't
+ * need to implement __kernel_restart_syscall at all.
+ */
+ LINUX_5.18 {
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS;
+ dynamic PT_DYNAMIC FLAGS(4);
+ note PT_NOTE FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /* The following, including the FILEHDRS and PHDRS, are modified
+ when we relocate the binary. We want them to be initially
+ writable for the relocation; we'll force them read-only after. */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ .data : {
+ /* There ought not be any real read-write data.
+ But since we manipulated the segment layout,
+ we have to put these sections somewhere. */
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load
+}
diff --git a/linux-user/hppa/vdso.so b/linux-user/hppa/vdso.so
new file mode 100755
index 0000000000..e1ddd70c37
--- /dev/null
+++ b/linux-user/hppa/vdso.so
Binary files differ
diff --git a/linux-user/i386/Makefile.vdso b/linux-user/i386/Makefile.vdso
new file mode 100644
index 0000000000..95bc616f6d
--- /dev/null
+++ b/linux-user/i386/Makefile.vdso
@@ -0,0 +1,11 @@
+include $(BUILD_DIR)/tests/tcg/i386-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/i386
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso.so
+
+$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ -m32 -nostdlib -shared -Wl,-h,linux-gate.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both \
+ -Wl,-T,$(SUBDIR)/vdso.ld $<
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
index f6a1cc632b..92beb6830c 100644
--- a/linux-user/i386/cpu_loop.c
+++ b/linux-user/i386/cpu_loop.c
@@ -18,8 +18,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
+#include "qemu/timer.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
#include "signal-common.h"
@@ -47,7 +47,7 @@ static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
}
static uint64_t *idt_table;
-#ifdef TARGET_X86_64
+
static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
uint64_t addr, unsigned int sel)
{
@@ -60,8 +60,10 @@ static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
p[2] = tswap32(addr >> 32);
p[3] = 0;
}
+
+#ifdef TARGET_X86_64
/* only dpl matters as we do only user space emulation */
-static void set_idt(int n, unsigned int dpl)
+static void set_idt(int n, unsigned int dpl, bool is64)
{
set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
}
@@ -78,23 +80,16 @@ static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
}
/* only dpl matters as we do only user space emulation */
-static void set_idt(int n, unsigned int dpl)
+static void set_idt(int n, unsigned int dpl, bool is64)
{
- set_gate(idt_table + n, 0, dpl, 0, 0);
+ if (is64) {
+ set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
+ } else {
+ set_gate(idt_table + n, 0, dpl, 0, 0);
+ }
}
#endif
-static void gen_signal(CPUX86State *env, int sig, int code, abi_ptr addr)
-{
- target_siginfo_t info = {
- .si_signo = sig,
- .si_code = code,
- ._sifields._sigfault._addr = addr
- };
-
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
-}
-
#ifdef TARGET_X86_64
static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len)
{
@@ -107,7 +102,7 @@ static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len)
}
env->error_code = PG_ERROR_W_MASK | PG_ERROR_U_MASK;
- gen_signal(env, TARGET_SIGSEGV, TARGET_SEGV_MAPERR, addr);
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, addr);
return false;
}
@@ -148,7 +143,7 @@ static void emulate_vsyscall(CPUX86State *env)
}
/*
- * Validate the the pointer arguments.
+ * Validate the pointer arguments.
*/
switch (syscall) {
case TARGET_NR_gettimeofday:
@@ -180,8 +175,8 @@ static void emulate_vsyscall(CPUX86State *env)
ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI],
env->regs[R_EDX], env->regs[10], env->regs[8],
env->regs[9], 0, 0);
- g_assert(ret != -TARGET_ERESTARTSYS);
- g_assert(ret != -TARGET_QEMU_ESIGRETURN);
+ g_assert(ret != -QEMU_ERESTARTSYS);
+ g_assert(ret != -QEMU_ESIGRETURN);
if (ret == -TARGET_EFAULT) {
goto sigsegv;
}
@@ -193,16 +188,25 @@ static void emulate_vsyscall(CPUX86State *env)
return;
sigsegv:
- /* Like force_sig(SIGSEGV). */
- gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0);
+ force_sig(TARGET_SIGSEGV);
}
#endif
+static bool maybe_handle_vm86_trap(CPUX86State *env, int trapnr)
+{
+#ifndef TARGET_X86_64
+ if (env->eflags & VM_MASK) {
+ handle_vm86_trap(env, trapnr);
+ return true;
+ }
+#endif
+ return false;
+}
+
void cpu_loop(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
int trapnr;
- abi_ulong pc;
abi_ulong ret;
for(;;) {
@@ -213,6 +217,9 @@ void cpu_loop(CPUX86State *env)
switch(trapnr) {
case 0x80:
+#ifndef TARGET_X86_64
+ case EXCP_SYSCALL:
+#endif
/* linux syscall from int $0x80 */
ret = do_syscall(env,
env->regs[R_EAX],
@@ -223,15 +230,15 @@ void cpu_loop(CPUX86State *env)
env->regs[R_EDI],
env->regs[R_EBP],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->eip -= 2;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->regs[R_EAX] = ret;
}
break;
-#ifndef TARGET_ABI32
+#ifdef TARGET_X86_64
case EXCP_SYSCALL:
- /* linux syscall from syscall instruction */
+ /* linux syscall from syscall instruction. */
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EDI],
@@ -241,110 +248,111 @@ void cpu_loop(CPUX86State *env)
env->regs[8],
env->regs[9],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->eip -= 2;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->regs[R_EAX] = ret;
}
break;
-#endif
-#ifdef TARGET_X86_64
case EXCP_VSYSCALL:
emulate_vsyscall(env);
break;
#endif
case EXCP0B_NOSEG:
case EXCP0C_STACK:
- gen_signal(env, TARGET_SIGBUS, TARGET_SI_KERNEL, 0);
+ force_sig(TARGET_SIGBUS);
break;
case EXCP0D_GPF:
/* XXX: potential problem if ABI32 */
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_fault(env);
+ if (maybe_handle_vm86_trap(env, trapnr)) {
break;
}
-#endif
- gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0);
+ force_sig(TARGET_SIGSEGV);
break;
case EXCP0E_PAGE:
- gen_signal(env, TARGET_SIGSEGV,
- (env->error_code & 1 ?
- TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR),
- env->cr[2]);
+ force_sig_fault(TARGET_SIGSEGV,
+ (env->error_code & PG_ERROR_P_MASK ?
+ TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR),
+ env->cr[2]);
break;
case EXCP00_DIVZ:
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_trap(env, trapnr);
+ if (maybe_handle_vm86_trap(env, trapnr)) {
break;
}
-#endif
- gen_signal(env, TARGET_SIGFPE, TARGET_FPE_INTDIV, env->eip);
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->eip);
break;
case EXCP01_DB:
- case EXCP03_INT3:
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_trap(env, trapnr);
+ if (maybe_handle_vm86_trap(env, trapnr)) {
break;
}
-#endif
- if (trapnr == EXCP01_DB) {
- gen_signal(env, TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->eip);
- } else {
- gen_signal(env, TARGET_SIGTRAP, TARGET_SI_KERNEL, 0);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->eip);
+ break;
+ case EXCP03_INT3:
+ if (maybe_handle_vm86_trap(env, trapnr)) {
+ break;
}
+ force_sig(TARGET_SIGTRAP);
break;
case EXCP04_INTO:
case EXCP05_BOUND:
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_trap(env, trapnr);
+ if (maybe_handle_vm86_trap(env, trapnr)) {
break;
}
-#endif
- gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0);
+ force_sig(TARGET_SIGSEGV);
break;
case EXCP06_ILLOP:
- gen_signal(env, TARGET_SIGILL, TARGET_ILL_ILLOPN, env->eip);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->eip);
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
case EXCP_DEBUG:
- gen_signal(env, TARGET_SIGTRAP, TARGET_TRAP_BRKPT, 0);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->eip);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
default:
- pc = env->segs[R_CS].base + env->eip;
- EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
- (long)pc, trapnr);
+ EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
abort();
}
process_pending_signals(env);
}
}
+static void target_cpu_free(void *obj)
+{
+ target_munmap(cpu_env(obj)->gdt.base,
+ sizeof(uint64_t) * TARGET_GDT_ENTRIES);
+ g_free(obj);
+}
+
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
+ CPUState *cpu = env_cpu(env);
+ bool is64 = (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) != 0;
+ int i;
+
+ OBJECT(cpu)->free = target_cpu_free;
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
env->hflags |= HF_PE_MASK | HF_CPL_MASK;
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
env->cr[4] |= CR4_OSFXSR_MASK;
env->hflags |= HF_OSFXSR_MASK;
}
-#ifndef TARGET_ABI32
+
/* enable 64 bit mode if possible */
- if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+ if (is64) {
+ env->cr[4] |= CR4_PAE_MASK;
+ env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
+ env->hflags |= HF_LMA_MASK;
+ }
+#ifndef TARGET_ABI32
+ else {
fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
exit(EXIT_FAILURE);
}
- env->cr[4] |= CR4_PAE_MASK;
- env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
- env->hflags |= HF_LMA_MASK;
#endif
/* flags setup : we activate the IRQs by default as in user mode */
@@ -383,27 +391,12 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
idt_table = g2h_untagged(env->idt.base);
- set_idt(0, 0);
- set_idt(1, 0);
- set_idt(2, 0);
- set_idt(3, 3);
- set_idt(4, 3);
- set_idt(5, 0);
- set_idt(6, 0);
- set_idt(7, 0);
- set_idt(8, 0);
- set_idt(9, 0);
- set_idt(10, 0);
- set_idt(11, 0);
- set_idt(12, 0);
- set_idt(13, 0);
- set_idt(14, 0);
- set_idt(15, 0);
- set_idt(16, 0);
- set_idt(17, 0);
- set_idt(18, 0);
- set_idt(19, 0);
- set_idt(0x80, 3);
+ for (i = 0; i < 20; i++) {
+ set_idt(i, 0, is64);
+ }
+ set_idt(3, 3, is64);
+ set_idt(4, 3, is64);
+ set_idt(0x80, 3, is64);
/* linux segment setup */
{
diff --git a/linux-user/i386/meson.build b/linux-user/i386/meson.build
index ee523019a5..d42fc6cbc9 100644
--- a/linux-user/i386/meson.build
+++ b/linux-user/i386/meson.build
@@ -3,3 +3,10 @@ syscall_nr_generators += {
arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
output: '@BASENAME@_nr.h')
}
+
+vdso_inc = gen_vdso.process('vdso.so', extra_args: [
+ '-s', '__kernel_sigreturn',
+ '-r', '__kernel_rt_sigreturn'
+ ])
+
+linux_user_ss.add(when: 'TARGET_I386', if_true: vdso_inc)
diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c
index 3b4b55fc0a..990048f42a 100644
--- a/linux-user/i386/signal.c
+++ b/linux-user/i386/signal.c
@@ -21,9 +21,14 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
+#include "user/tswap-target.h"
/* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
+#define TARGET_FP_XSTATE_MAGIC1 0x46505853U /* FPXS */
+#define TARGET_FP_XSTATE_MAGIC2 0x46505845U /* FPXE */
+#define TARGET_FP_XSTATE_MAGIC2_SIZE 4
+
struct target_fpreg {
uint16_t significand[4];
uint16_t exponent;
@@ -39,29 +44,16 @@ struct target_xmmreg {
uint32_t element[4];
};
-struct target_fpstate_32 {
- /* Regular FPU environment */
- uint32_t cw;
- uint32_t sw;
- uint32_t tag;
- uint32_t ipoff;
- uint32_t cssel;
- uint32_t dataoff;
- uint32_t datasel;
- struct target_fpreg st[8];
- uint16_t status;
- uint16_t magic; /* 0xffff = regular FPU data only */
-
- /* FXSR FPU environment */
- uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
- uint32_t mxcsr;
- uint32_t reserved;
- struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
- struct target_xmmreg xmm[8];
- uint32_t padding[56];
+struct target_fpx_sw_bytes {
+ uint32_t magic1;
+ uint32_t extended_size;
+ uint64_t xfeatures;
+ uint32_t xstate_size;
+ uint32_t reserved[7];
};
+QEMU_BUILD_BUG_ON(sizeof(struct target_fpx_sw_bytes) != 12*4);
-struct target_fpstate_64 {
+struct target_fpstate_fxsave {
/* FXSAVE format */
uint16_t cw;
uint16_t sw;
@@ -73,13 +65,41 @@ struct target_fpstate_64 {
uint32_t mxcsr_mask;
uint32_t st_space[32];
uint32_t xmm_space[64];
- uint32_t reserved[24];
+ uint32_t hw_reserved[12];
+ struct target_fpx_sw_bytes sw_reserved;
+ uint8_t xfeatures[];
+};
+#define TARGET_FXSAVE_SIZE sizeof(struct target_fpstate_fxsave)
+QEMU_BUILD_BUG_ON(TARGET_FXSAVE_SIZE != 512);
+QEMU_BUILD_BUG_ON(offsetof(struct target_fpstate_fxsave, sw_reserved) != 464);
+
+struct target_fpstate_32 {
+ /* Regular FPU environment */
+ uint32_t cw;
+ uint32_t sw;
+ uint32_t tag;
+ uint32_t ipoff;
+ uint32_t cssel;
+ uint32_t dataoff;
+ uint32_t datasel;
+ struct target_fpreg st[8];
+ uint16_t status;
+ uint16_t magic; /* 0xffff = regular FPU data only */
+ struct target_fpstate_fxsave fxsave;
};
+/*
+ * For simplicity, setup_frame aligns struct target_fpstate_32 to
+ * 16 bytes, so ensure that the FXSAVE area is also aligned.
+ */
+QEMU_BUILD_BUG_ON(offsetof(struct target_fpstate_32, fxsave) & 15);
+
#ifndef TARGET_X86_64
# define target_fpstate target_fpstate_32
+# define TARGET_FPSTATE_FXSAVE_OFFSET offsetof(struct target_fpstate_32, fxsave)
#else
-# define target_fpstate target_fpstate_64
+# define target_fpstate target_fpstate_fxsave
+# define TARGET_FPSTATE_FXSAVE_OFFSET 0
#endif
struct target_sigcontext_32 {
@@ -163,10 +183,25 @@ struct sigframe {
abi_ulong pretcode;
int sig;
struct target_sigcontext sc;
- struct target_fpstate fpstate;
+ /*
+ * The actual fpstate is placed after retcode[] below, to make
+ * room for the variable-sized xsave data. The older unused fpstate
+ * has to be kept to avoid changing the offset of extramask[], which
+ * is part of the ABI.
+ */
+ struct target_fpstate fpstate_unused;
abi_ulong extramask[TARGET_NSIG_WORDS-1];
char retcode[8];
+
+ /*
+ * This field will be 16-byte aligned in memory. Applying QEMU_ALIGNED
+ * to it ensures that the base of the frame has an appropriate alignment
+ * too.
+ */
+ struct target_fpstate fpstate QEMU_ALIGNED(8);
};
+#define TARGET_SIGFRAME_FXSAVE_OFFSET ( \
+ offsetof(struct sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET)
struct rt_sigframe {
abi_ulong pretcode;
@@ -175,9 +210,21 @@ struct rt_sigframe {
abi_ulong puc;
struct target_siginfo info;
struct target_ucontext uc;
- struct target_fpstate fpstate;
char retcode[8];
+ struct target_fpstate fpstate QEMU_ALIGNED(8);
};
+#define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \
+ offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET)
+
+/*
+ * Verify that vdso-asmoffset.h constants match.
+ */
+#include "i386/vdso-asmoffset.h"
+
+QEMU_BUILD_BUG_ON(offsetof(struct sigframe, sc.eip)
+ != SIGFRAME_SIGCONTEXT_eip);
+QEMU_BUILD_BUG_ON(offsetof(struct rt_sigframe, uc.tuc_mcontext.eip)
+ != RT_SIGFRAME_SIGCONTEXT_eip);
#else
@@ -185,16 +232,51 @@ struct rt_sigframe {
abi_ulong pretcode;
struct target_ucontext uc;
struct target_siginfo info;
- struct target_fpstate fpstate;
+ struct target_fpstate fpstate QEMU_ALIGNED(16);
};
-
+#define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \
+ offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET)
#endif
/*
* Set up a signal frame.
*/
-/* XXX: save x87 state */
+static void xsave_sigcontext(CPUX86State *env, struct target_fpstate_fxsave *fxsave,
+ abi_ulong fxsave_addr)
+{
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ /* fxsave_addr must be 16 byte aligned for fxsave */
+ assert(!(fxsave_addr & 0xf));
+
+ cpu_x86_fxsave(env, fxsave_addr);
+ __put_user(0, &fxsave->sw_reserved.magic1);
+ } else {
+ uint32_t xstate_size = xsave_area_size(env->xcr0, false);
+ uint32_t xfeatures_size = xstate_size - TARGET_FXSAVE_SIZE;
+
+ /*
+ * extended_size is the offset from fpstate_addr to right after the end
+ * of the extended save states. On 32-bit that includes the legacy
+ * FSAVE area.
+ */
+ uint32_t extended_size = TARGET_FPSTATE_FXSAVE_OFFSET
+ + xstate_size + TARGET_FP_XSTATE_MAGIC2_SIZE;
+
+ /* fxsave_addr must be 64 byte aligned for xsave */
+ assert(!(fxsave_addr & 0x3f));
+
+ /* Zero the header, XSAVE *adds* features to an existing save state. */
+ memset(fxsave->xfeatures, 0, 64);
+ cpu_x86_xsave(env, fxsave_addr);
+ __put_user(TARGET_FP_XSTATE_MAGIC1, &fxsave->sw_reserved.magic1);
+ __put_user(extended_size, &fxsave->sw_reserved.extended_size);
+ __put_user(env->xcr0, &fxsave->sw_reserved.xfeatures);
+ __put_user(xstate_size, &fxsave->sw_reserved.xstate_size);
+ __put_user(TARGET_FP_XSTATE_MAGIC2, (uint32_t *) &fxsave->xfeatures[xfeatures_size]);
+ }
+}
+
static void setup_sigcontext(struct target_sigcontext *sc,
struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
abi_ulong fpstate_addr)
@@ -226,13 +308,14 @@ static void setup_sigcontext(struct target_sigcontext *sc,
cpu_x86_fsave(env, fpstate_addr, 1);
fpstate->status = fpstate->sw;
- magic = 0xffff;
+ if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) {
+ magic = 0xffff;
+ } else {
+ xsave_sigcontext(env, &fpstate->fxsave,
+ fpstate_addr + TARGET_FPSTATE_FXSAVE_OFFSET);
+ magic = 0;
+ }
__put_user(magic, &fpstate->magic);
- __put_user(fpstate_addr, &sc->fpstate);
-
- /* non-iBCS2 extensions.. */
- __put_user(mask, &sc->oldmask);
- __put_user(env->cr[2], &sc->cr2);
#else
__put_user(env->regs[R_EDI], &sc->rdi);
__put_user(env->regs[R_ESI], &sc->rsi);
@@ -262,15 +345,14 @@ static void setup_sigcontext(struct target_sigcontext *sc,
__put_user((uint16_t)0, &sc->fs);
__put_user(env->segs[R_SS].selector, &sc->ss);
- __put_user(mask, &sc->oldmask);
- __put_user(env->cr[2], &sc->cr2);
-
- /* fpstate_addr must be 16 byte aligned for fxsave */
- assert(!(fpstate_addr & 0xf));
+ xsave_sigcontext(env, fpstate, fpstate_addr);
+#endif
- cpu_x86_fxsave(env, fpstate_addr);
__put_user(fpstate_addr, &sc->fpstate);
-#endif
+
+ /* non-iBCS2 extensions.. */
+ __put_user(mask, &sc->oldmask);
+ __put_user(env->cr[2], &sc->cr2);
}
/*
@@ -278,7 +360,7 @@ static void setup_sigcontext(struct target_sigcontext *sc,
*/
static inline abi_ulong
-get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
+get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t fxsave_offset)
{
unsigned long esp;
@@ -302,14 +384,34 @@ get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
#endif
}
-#ifndef TARGET_X86_64
- return (esp - frame_size) & -8ul;
-#else
- return ((esp - frame_size) & (~15ul)) - 8;
-#endif
+ if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) {
+ return (esp - (fxsave_offset + TARGET_FXSAVE_SIZE)) & -8ul;
+ } else if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ return ((esp - TARGET_FXSAVE_SIZE) & -16ul) - fxsave_offset;
+ } else {
+ size_t xstate_size =
+ xsave_area_size(env->xcr0, false) + TARGET_FP_XSTATE_MAGIC2_SIZE;
+ return ((esp - xstate_size) & -64ul) - fxsave_offset;
+ }
}
#ifndef TARGET_X86_64
+static void install_sigtramp(void *tramp)
+{
+ /* This is popl %eax ; movl $syscall,%eax ; int $0x80 */
+ __put_user(0xb858, (uint16_t *)(tramp + 0));
+ __put_user(TARGET_NR_sigreturn, (int32_t *)(tramp + 2));
+ __put_user(0x80cd, (uint16_t *)(tramp + 6));
+}
+
+static void install_rt_sigtramp(void *tramp)
+{
+ /* This is movl $syscall,%eax ; int $0x80 */
+ __put_user(0xb8, (uint8_t *)(tramp + 0));
+ __put_user(TARGET_NR_rt_sigreturn, (int32_t *)(tramp + 1));
+ __put_user(0x80cd, (uint16_t *)(tramp + 5));
+}
+
/* compare linux/arch/i386/kernel/signal.c:setup_frame() */
void setup_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUX86State *env)
@@ -318,7 +420,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
struct sigframe *frame;
int i;
- frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ frame_addr = get_sigframe(ka, env, TARGET_SIGFRAME_FXSAVE_OFFSET);
trace_user_setup_frame(env, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
@@ -329,7 +431,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
frame_addr + offsetof(struct sigframe, fpstate));
- for(i = 1; i < TARGET_NSIG_WORDS; i++) {
+ for (i = 1; i < TARGET_NSIG_WORDS; i++) {
__put_user(set->sig[i], &frame->extramask[i - 1]);
}
@@ -338,16 +440,9 @@ void setup_frame(int sig, struct target_sigaction *ka,
if (ka->sa_flags & TARGET_SA_RESTORER) {
__put_user(ka->sa_restorer, &frame->pretcode);
} else {
- uint16_t val16;
- abi_ulong retcode_addr;
- retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
- __put_user(retcode_addr, &frame->pretcode);
- /* This is popl %eax ; movl $,%eax ; int $0x80 */
- val16 = 0xb858;
- __put_user(val16, (uint16_t *)(frame->retcode+0));
- __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
- val16 = 0x80cd;
- __put_user(val16, (uint16_t *)(frame->retcode+6));
+ /* This is no longer used, but is retained for ABI compatibility. */
+ install_sigtramp(frame->retcode);
+ __put_user(default_sigreturn, &frame->pretcode);
}
/* Set up registers for signal handler */
@@ -381,7 +476,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
struct rt_sigframe *frame;
int i;
- frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ frame_addr = get_sigframe(ka, env, TARGET_RT_SIGFRAME_FXSAVE_OFFSET);
trace_user_setup_rt_frame(env, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
@@ -396,40 +491,38 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
__put_user(addr, &frame->puc);
#endif
if (ka->sa_flags & TARGET_SA_SIGINFO) {
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
}
/* Create the ucontext. */
- __put_user(0, &frame->uc.tuc_flags);
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+ __put_user(1, &frame->uc.tuc_flags);
+ } else {
+ __put_user(0, &frame->uc.tuc_flags);
+ }
__put_user(0, &frame->uc.tuc_link);
target_save_altstack(&frame->uc.tuc_stack, env);
setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
- for(i = 0; i < TARGET_NSIG_WORDS; i++) {
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
}
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
-#ifndef TARGET_X86_64
if (ka->sa_flags & TARGET_SA_RESTORER) {
__put_user(ka->sa_restorer, &frame->pretcode);
} else {
- uint16_t val16;
- addr = frame_addr + offsetof(struct rt_sigframe, retcode);
- __put_user(addr, &frame->pretcode);
- /* This is movl $,%eax ; int $0x80 */
- __put_user(0xb8, (char *)(frame->retcode+0));
- __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
- val16 = 0x80cd;
- __put_user(val16, (uint16_t *)(frame->retcode+5));
- }
+#ifdef TARGET_X86_64
+ /* For x86_64, SA_RESTORER is required ABI. */
+ goto give_sigsegv;
#else
- /* XXX: Would be slightly better to return -EFAULT here if test fails
- assert(ka->sa_flags & TARGET_SA_RESTORER); */
- __put_user(ka->sa_restorer, &frame->pretcode);
+ /* This is no longer used, but is retained for ABI compatibility. */
+ install_rt_sigtramp(frame->retcode);
+ __put_user(default_rt_sigreturn, &frame->pretcode);
#endif
+ }
/* Set up registers for signal handler */
env->regs[R_ESP] = frame_addr;
@@ -460,10 +553,37 @@ give_sigsegv:
force_sigsegv(sig);
}
+static int xrstor_sigcontext(CPUX86State *env, struct target_fpstate_fxsave *fxsave,
+ abi_ulong fxsave_addr)
+{
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+ uint32_t extended_size = tswapl(fxsave->sw_reserved.extended_size);
+ uint32_t xstate_size = tswapl(fxsave->sw_reserved.xstate_size);
+ uint32_t xfeatures_size = xstate_size - TARGET_FXSAVE_SIZE;
+
+ /* Linux checks MAGIC2 using xstate_size, not extended_size. */
+ if (tswapl(fxsave->sw_reserved.magic1) == TARGET_FP_XSTATE_MAGIC1 &&
+ extended_size >= TARGET_FPSTATE_FXSAVE_OFFSET + xstate_size + TARGET_FP_XSTATE_MAGIC2_SIZE) {
+ if (!access_ok(env_cpu(env), VERIFY_READ, fxsave_addr,
+ extended_size - TARGET_FPSTATE_FXSAVE_OFFSET)) {
+ return 1;
+ }
+ if (tswapl(*(uint32_t *) &fxsave->xfeatures[xfeatures_size]) == TARGET_FP_XSTATE_MAGIC2) {
+ cpu_x86_xrstor(env, fxsave_addr);
+ return 0;
+ }
+ }
+ /* fall through to fxrstor */
+ }
+
+ cpu_x86_fxrstor(env, fxsave_addr);
+ return 0;
+}
+
static int
restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
{
- unsigned int err = 0;
+ int err = 1;
abi_ulong fpstate_addr;
unsigned int tmpflags;
@@ -514,20 +634,28 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
fpstate_addr = tswapl(sc->fpstate);
if (fpstate_addr != 0) {
- if (!access_ok(env_cpu(env), VERIFY_READ, fpstate_addr,
- sizeof(struct target_fpstate))) {
- goto badframe;
+ struct target_fpstate *fpstate;
+ if (!lock_user_struct(VERIFY_READ, fpstate, fpstate_addr,
+ sizeof(struct target_fpstate))) {
+ return err;
}
#ifndef TARGET_X86_64
- cpu_x86_frstor(env, fpstate_addr, 1);
+ if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) {
+ cpu_x86_frstor(env, fpstate_addr, 1);
+ err = 0;
+ } else {
+ err = xrstor_sigcontext(env, &fpstate->fxsave,
+ fpstate_addr + TARGET_FPSTATE_FXSAVE_OFFSET);
+ }
#else
- cpu_x86_fxrstor(env, fpstate_addr);
+ err = xrstor_sigcontext(env, fpstate, fpstate_addr);
#endif
+ unlock_user_struct(fpstate, fpstate_addr, 0);
+ } else {
+ err = 0;
}
return err;
-badframe:
- return 1;
}
/* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
@@ -556,12 +684,12 @@ long do_sigreturn(CPUX86State *env)
if (restore_sigcontext(env, &frame->sc))
goto badframe;
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
#endif
@@ -585,10 +713,26 @@ long do_rt_sigreturn(CPUX86State *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+#ifndef TARGET_X86_64
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ install_sigtramp(tramp);
+
+ default_rt_sigreturn = sigtramp_page + 8;
+ install_rt_sigtramp(tramp + 8);
+
+ unlock_user(tramp, sigtramp_page, 2 * 8);
}
+#endif
diff --git a/linux-user/i386/target_elf.h b/linux-user/i386/target_elf.h
index 1c6142e7da..238a9aba73 100644
--- a/linux-user/i386/target_elf.h
+++ b/linux-user/i386/target_elf.h
@@ -9,6 +9,6 @@
#define I386_TARGET_ELF_H
static inline const char *cpu_get_model(uint32_t eflags)
{
- return "qemu32";
+ return "max";
}
#endif
diff --git a/linux-user/i386/target_mman.h b/linux-user/i386/target_mman.h
new file mode 100644
index 0000000000..e3b8e1eaa6
--- /dev/null
+++ b/linux-user/i386/target_mman.h
@@ -0,0 +1,17 @@
+/*
+ * arch/x86/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
+ * __TASK_UNMAPPED_BASE(S) PAGE_ALIGN(S / 3)
+ *
+ * arch/x86/include/asm/page_32_types.h:
+ * TASK_SIZE_LOW TASK_SIZE
+ * TASK_SIZE __PAGE_OFFSET
+ * __PAGE_OFFSET CONFIG_PAGE_OFFSET
+ * CONFIG_PAGE_OFFSET 0xc0000000 (default in Kconfig)
+ */
+#define TASK_UNMAPPED_BASE 0x40000000
+
+/* arch/x86/include/asm/elf.h */
+#define ELF_ET_DYN_BASE 0x00400000
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/i386/target_prctl.h b/linux-user/i386/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/i386/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/i386/target_proc.h b/linux-user/i386/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/i386/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/i386/target_resource.h b/linux-user/i386/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/i386/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h
index 50361af874..9315cba241 100644
--- a/linux-user/i386/target_signal.h
+++ b/linux-user/i386/target_signal.h
@@ -1,25 +1,9 @@
#ifndef I386_TARGET_SIGNAL_H
#define I386_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* I386_TARGET_SIGNAL_H */
diff --git a/linux-user/i386/target_structs.h b/linux-user/i386/target_structs.h
index e22847fd20..3a06f373c3 100644
--- a/linux-user/i386/target_structs.h
+++ b/linux-user/i386/target_structs.h
@@ -1,58 +1 @@
-/*
- * i386 specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef I386_TARGET_STRUCTS_H
-#define I386_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/i386/target_syscall.h b/linux-user/i386/target_syscall.h
index ed356b3908..aaade06b13 100644
--- a/linux-user/i386/target_syscall.h
+++ b/linux-user/i386/target_syscall.h
@@ -150,7 +150,6 @@ struct target_vm86plus_struct {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/i386/vdso-asmoffset.h b/linux-user/i386/vdso-asmoffset.h
new file mode 100644
index 0000000000..4e5ee0dd49
--- /dev/null
+++ b/linux-user/i386/vdso-asmoffset.h
@@ -0,0 +1,6 @@
+/*
+ * offsetof(struct sigframe, sc.eip)
+ * offsetof(struct rt_sigframe, uc.tuc_mcontext.eip)
+ */
+#define SIGFRAME_SIGCONTEXT_eip 64
+#define RT_SIGFRAME_SIGCONTEXT_eip 220
diff --git a/linux-user/i386/vdso.S b/linux-user/i386/vdso.S
new file mode 100644
index 0000000000..e7a1f333a1
--- /dev/null
+++ b/linux-user/i386/vdso.S
@@ -0,0 +1,143 @@
+/*
+ * i386 linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include "vdso-asmoffset.h"
+
+.macro endf name
+ .globl \name
+ .type \name, @function
+ .size \name, . - \name
+.endm
+
+.macro vdso_syscall1 name, nr
+\name:
+ .cfi_startproc
+ mov %ebx, %edx
+ .cfi_register %ebx, %edx
+ mov 4(%esp), %ebx
+ mov $\nr, %eax
+ int $0x80
+ mov %edx, %ebx
+ ret
+ .cfi_endproc
+endf \name
+.endm
+
+.macro vdso_syscall2 name, nr
+\name:
+ .cfi_startproc
+ mov %ebx, %edx
+ .cfi_register %ebx, %edx
+ mov 4(%esp), %ebx
+ mov 8(%esp), %ecx
+ mov $\nr, %eax
+ int $0x80
+ mov %edx, %ebx
+ ret
+ .cfi_endproc
+endf \name
+.endm
+
+.macro vdso_syscall3 name, nr
+\name:
+ .cfi_startproc
+ push %ebx
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset %ebx, 0
+ mov 8(%esp), %ebx
+ mov 12(%esp), %ecx
+ mov 16(%esp), %edx
+ mov $\nr, %eax
+ int $0x80
+ pop %ebx
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore %ebx
+ ret
+ .cfi_endproc
+endf \name
+.endm
+
+__kernel_vsyscall:
+ .cfi_startproc
+ int $0x80
+ ret
+ .cfi_endproc
+endf __kernel_vsyscall
+
+vdso_syscall2 __vdso_clock_gettime, __NR_clock_gettime
+vdso_syscall2 __vdso_clock_gettime64, __NR_clock_gettime64
+vdso_syscall2 __vdso_clock_getres, __NR_clock_getres
+vdso_syscall2 __vdso_gettimeofday, __NR_gettimeofday
+vdso_syscall1 __vdso_time, __NR_time
+vdso_syscall3 __vdso_getcpu, __NR_gettimeofday
+
+/*
+ * Signal return handlers.
+ */
+
+ .cfi_startproc simple
+ .cfi_signal_frame
+
+/*
+ * For convenience, put the cfa just above eip in sigcontext, and count
+ * offsets backward from there. Re-compute the cfa in the two contexts
+ * we have for signal unwinding. This is far simpler than the
+ * DW_CFA_expression form that the kernel uses, and is equally correct.
+ */
+
+ .cfi_def_cfa %esp, SIGFRAME_SIGCONTEXT_eip + 4
+
+ .cfi_offset %eip, -4
+ /* err, -8 */
+ /* trapno, -12 */
+ .cfi_offset %eax, -16
+ .cfi_offset %ecx, -20
+ .cfi_offset %edx, -24
+ .cfi_offset %ebx, -28
+ .cfi_offset %esp, -32
+ .cfi_offset %ebp, -36
+ .cfi_offset %esi, -40
+ .cfi_offset %edi, -44
+
+/*
+ * While this frame is marked as a signal frame, that only applies to how
+ * the return address is handled for the outer frame. The return address
+ * that arrived here, from the inner frame, is not marked as a signal frame
+ * and so the unwinder still tries to subtract 1 to examine the presumed
+ * call insn. Thus we must extend the unwind info to a nop before the start.
+ */
+ nop
+
+__kernel_sigreturn:
+ popl %eax /* pop sig */
+ .cfi_adjust_cfa_offset -4
+ movl $__NR_sigreturn, %eax
+ int $0x80
+endf __kernel_sigreturn
+
+ .cfi_def_cfa_offset RT_SIGFRAME_SIGCONTEXT_eip + 4
+ nop
+
+__kernel_rt_sigreturn:
+ movl $__NR_rt_sigreturn, %eax
+ int $0x80
+endf __kernel_rt_sigreturn
+
+ .cfi_endproc
+
+/*
+ * TODO: Add elf notes. E.g.
+ *
+ * #include <linux/elfnote.h>
+ * ELFNOTE_START(Linux, 0, "a")
+ * .long LINUX_VERSION_CODE
+ * ELFNOTE_END
+ *
+ * but what version number would we set for QEMU?
+ */
diff --git a/linux-user/i386/vdso.ld b/linux-user/i386/vdso.ld
new file mode 100644
index 0000000000..326b7a8f98
--- /dev/null
+++ b/linux-user/i386/vdso.ld
@@ -0,0 +1,76 @@
+/*
+ * Linker script for linux i386 replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+ENTRY(__kernel_vsyscall)
+
+VERSION {
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_time;
+ __vdso_clock_getres;
+ __vdso_clock_gettime64;
+ __vdso_getcpu;
+ };
+
+ LINUX_2.5 {
+ global:
+ __kernel_vsyscall;
+ __kernel_sigreturn;
+ __kernel_rt_sigreturn;
+ local: *;
+ };
+}
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ .data : {
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load =0x90909090
+}
diff --git a/linux-user/i386/vdso.so b/linux-user/i386/vdso.so
new file mode 100755
index 0000000000..bdece5dfcf
--- /dev/null
+++ b/linux-user/i386/vdso.so
Binary files differ
diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h
new file mode 100644
index 0000000000..be079684a2
--- /dev/null
+++ b/linux-user/include/host/aarch64/host-signal.h
@@ -0,0 +1,87 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef AARCH64_HOST_SIGNAL_H
+#define AARCH64_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
+#ifndef ESR_MAGIC
+#define ESR_MAGIC 0x45535201
+struct esr_context {
+ struct _aarch64_ctx head;
+ uint64_t esr;
+};
+#endif
+
+static inline struct _aarch64_ctx *first_ctx(host_sigcontext *uc)
+{
+ return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
+}
+
+static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
+{
+ return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
+}
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.pc;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.pc = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ struct _aarch64_ctx *hdr;
+ uint32_t insn;
+
+ /* Find the esr_context, which has the WnR bit in it */
+ for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) {
+ if (hdr->magic == ESR_MAGIC) {
+ struct esr_context const *ec = (struct esr_context const *)hdr;
+ uint64_t esr = ec->esr;
+
+ /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */
+ return extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
+ }
+ }
+
+ /*
+ * Fall back to parsing instructions; will only be needed
+ * for really ancient (pre-3.16) kernels.
+ */
+ insn = *(uint32_t *)host_signal_pc(uc);
+
+ return (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */
+ || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */
+ || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */
+ || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */
+ || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */
+ || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */
+ || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */
+ /* Ignore bits 10, 11 & 21, controlling indexing. */
+ || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */
+ || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */
+ /* Ignore bits 23 & 24, controlling indexing. */
+ || (insn & 0x3a400000) == 0x28000000; /* C3.3.7,14-16 */
+}
+
+#endif
diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h
new file mode 100644
index 0000000000..faba496d24
--- /dev/null
+++ b/linux-user/include/host/arm/host-signal.h
@@ -0,0 +1,43 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ARM_HOST_SIGNAL_H
+#define ARM_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.arm_pc;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.arm_pc = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ /*
+ * In the FSR, bit 11 is WnR, assuming a v6 or
+ * later processor. On v5 we will always report
+ * this as a read, which will fail later.
+ */
+ uint32_t fsr = uc->uc_mcontext.error_code;
+ return extract32(fsr, 11, 1);
+}
+
+#endif
diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h
new file mode 100644
index 0000000000..e2b64f077f
--- /dev/null
+++ b/linux-user/include/host/i386/host-signal.h
@@ -0,0 +1,38 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef I386_HOST_SIGNAL_H
+#define I386_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.gregs[REG_EIP];
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.gregs[REG_EIP] = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
+ && (uc->uc_mcontext.gregs[REG_ERR] & 0x2);
+}
+
+#endif
diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h
new file mode 100644
index 0000000000..d33c3fc03e
--- /dev/null
+++ b/linux-user/include/host/loongarch64/host-signal.h
@@ -0,0 +1,93 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef LOONGARCH64_HOST_SIGNAL_H
+#define LOONGARCH64_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.__pc;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.__pc = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc);
+ uint32_t insn = pinsn[0];
+
+ /* Detect store by reading the instruction at the program counter. */
+ switch ((insn >> 26) & 0b111111) {
+ case 0b001000: /* {ll,sc}.[wd] */
+ switch ((insn >> 24) & 0b11) {
+ case 0b01: /* sc.w */
+ case 0b11: /* sc.d */
+ return true;
+ }
+ break;
+ case 0b001001: /* {ld,st}ox4.[wd] ({ld,st}ptr.[wd]) */
+ switch ((insn >> 24) & 0b11) {
+ case 0b01: /* stox4.w (stptr.w) */
+ case 0b11: /* stox4.d (stptr.d) */
+ return true;
+ }
+ break;
+ case 0b001010: /* {ld,st}.* family */
+ switch ((insn >> 22) & 0b1111) {
+ case 0b0100: /* st.b */
+ case 0b0101: /* st.h */
+ case 0b0110: /* st.w */
+ case 0b0111: /* st.d */
+ case 0b1101: /* fst.s */
+ case 0b1111: /* fst.d */
+ return true;
+ }
+ break;
+ case 0b001110: /* indexed, atomic, bounds-checking memory operations */
+ switch ((insn >> 15) & 0b11111111111) {
+ case 0b00000100000: /* stx.b */
+ case 0b00000101000: /* stx.h */
+ case 0b00000110000: /* stx.w */
+ case 0b00000111000: /* stx.d */
+ case 0b00001110000: /* fstx.s */
+ case 0b00001111000: /* fstx.d */
+ case 0b00011101100: /* fstgt.s */
+ case 0b00011101101: /* fstgt.d */
+ case 0b00011101110: /* fstle.s */
+ case 0b00011101111: /* fstle.d */
+ case 0b00011111000: /* stgt.b */
+ case 0b00011111001: /* stgt.h */
+ case 0b00011111010: /* stgt.w */
+ case 0b00011111011: /* stgt.d */
+ case 0b00011111100: /* stle.b */
+ case 0b00011111101: /* stle.h */
+ case 0b00011111110: /* stle.w */
+ case 0b00011111111: /* stle.d */
+ case 0b00011000000 ... 0b00011100011: /* am* insns */
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+#endif
diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h
new file mode 100644
index 0000000000..0dbc5cecfd
--- /dev/null
+++ b/linux-user/include/host/mips/host-signal.h
@@ -0,0 +1,75 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef MIPS_HOST_SIGNAL_H
+#define MIPS_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.pc;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.pc = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+#if defined(__misp16) || defined(__mips_micromips)
+#error "Unsupported encoding"
+#endif
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ uint32_t insn = *(uint32_t *)host_signal_pc(uc);
+
+ /* Detect all store instructions at program counter. */
+ switch ((insn >> 26) & 077) {
+ case 050: /* SB */
+ case 051: /* SH */
+ case 052: /* SWL */
+ case 053: /* SW */
+ case 054: /* SDL */
+ case 055: /* SDR */
+ case 056: /* SWR */
+ case 070: /* SC */
+ case 071: /* SWC1 */
+ case 074: /* SCD */
+ case 075: /* SDC1 */
+ case 077: /* SD */
+#if !defined(__mips_isa_rev) || __mips_isa_rev < 6
+ case 072: /* SWC2 */
+ case 076: /* SDC2 */
+#endif
+ return true;
+ case 023: /* COP1X */
+ /*
+ * Required in all versions of MIPS64 since
+ * MIPS64r1 and subsequent versions of MIPS32r2.
+ */
+ switch (insn & 077) {
+ case 010: /* SWXC1 */
+ case 011: /* SDXC1 */
+ case 015: /* SUXC1 */
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+#endif
diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h
new file mode 100644
index 0000000000..de25c803f5
--- /dev/null
+++ b/linux-user/include/host/ppc/host-signal.h
@@ -0,0 +1,39 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2022 Linaro Ltd.
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_HOST_SIGNAL_H
+#define PPC_HOST_SIGNAL_H
+
+#include <asm/ptrace.h>
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.regs->nip;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.regs->nip = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ return uc->uc_mcontext.regs->trap != 0x400
+ && (uc->uc_mcontext.regs->dsisr & 0x02000000);
+}
+
+#endif
diff --git a/linux-user/include/host/ppc64/host-signal.h b/linux-user/include/host/ppc64/host-signal.h
new file mode 100644
index 0000000000..c4ea866472
--- /dev/null
+++ b/linux-user/include/host/ppc64/host-signal.h
@@ -0,0 +1,41 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_HOST_SIGNAL_H
+#define PPC_HOST_SIGNAL_H
+
+/* Needed for PT_* constants */
+#include <asm/ptrace.h>
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.gp_regs[PT_NIP];
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.gp_regs[PT_NIP] = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ return uc->uc_mcontext.gp_regs[PT_TRAP] != 0x400
+ && (uc->uc_mcontext.gp_regs[PT_DSISR] & 0x02000000);
+}
+
+#endif
diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h
new file mode 100644
index 0000000000..decacb2325
--- /dev/null
+++ b/linux-user/include/host/riscv/host-signal.h
@@ -0,0 +1,71 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef RISCV_HOST_SIGNAL_H
+#define RISCV_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.__gregs[REG_PC];
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.__gregs[REG_PC] = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ /*
+ * Detect store by reading the instruction at the program counter.
+ * Do not read more than 16 bits, because we have not yet determined
+ * the size of the instruction.
+ */
+ const uint16_t *pinsn = (const uint16_t *)host_signal_pc(uc);
+ uint16_t insn = pinsn[0];
+
+ /* 16-bit instructions */
+ switch (insn & 0xe003) {
+ case 0xa000: /* c.fsd */
+ case 0xc000: /* c.sw */
+ case 0xe000: /* c.sd (rv64) / c.fsw (rv32) */
+ case 0xa002: /* c.fsdsp */
+ case 0xc002: /* c.swsp */
+ case 0xe002: /* c.sdsp (rv64) / c.fswsp (rv32) */
+ return true;
+ }
+
+ /* 32-bit instructions, major opcodes */
+ switch (insn & 0x7f) {
+ case 0x23: /* store */
+ case 0x27: /* store-fp */
+ return true;
+ case 0x2f: /* amo */
+ /*
+ * The AMO function code is in bits 25-31, unread as yet.
+ * The AMO functions are LR (read), SC (write), and the
+ * rest are all read-modify-write.
+ */
+ insn = pinsn[1];
+ return (insn >> 11) != 2; /* LR */
+ }
+
+ return false;
+}
+
+#endif
diff --git a/linux-user/include/host/s390x/host-signal.h b/linux-user/include/host/s390x/host-signal.h
new file mode 100644
index 0000000000..e6d3ec26dc
--- /dev/null
+++ b/linux-user/include/host/s390x/host-signal.h
@@ -0,0 +1,138 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef S390_HOST_SIGNAL_H
+#define S390_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.psw.addr;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.psw.addr = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ uint16_t *pinsn = (uint16_t *)host_signal_pc(uc);
+
+ /*
+ * ??? On linux, the non-rt signal handler has 4 (!) arguments instead
+ * of the normal 2 arguments. The 4th argument contains the "Translation-
+ * Exception Identification for DAT Exceptions" from the hardware (aka
+ * "int_parm_long"), which does in fact contain the is_write value.
+ * The rt signal handler, as far as I can tell, does not give this value
+ * at all. Not that we could get to it from here even if it were.
+ * So fall back to parsing instructions. Treat read-modify-write ones as
+ * writes, which is not fully correct, but for tracking self-modifying code
+ * this is better than treating them as reads. Checking si_addr page flags
+ * might be a viable improvement, albeit a racy one.
+ */
+ /* ??? This is not even close to complete. */
+ switch (pinsn[0] >> 8) {
+ case 0x50: /* ST */
+ case 0x42: /* STC */
+ case 0x40: /* STH */
+ case 0x44: /* EX */
+ case 0xba: /* CS */
+ case 0xbb: /* CDS */
+ return true;
+ case 0xc4: /* RIL format insns */
+ switch (pinsn[0] & 0xf) {
+ case 0xf: /* STRL */
+ case 0xb: /* STGRL */
+ case 0x7: /* STHRL */
+ return true;
+ }
+ break;
+ case 0xc6: /* RIL-b format insns */
+ switch (pinsn[0] & 0xf) {
+ case 0x0: /* EXRL */
+ return true;
+ }
+ break;
+ case 0xc8: /* SSF format insns */
+ switch (pinsn[0] & 0xf) {
+ case 0x2: /* CSST */
+ return true;
+ }
+ break;
+ case 0xe3: /* RXY format insns */
+ switch (pinsn[2] & 0xff) {
+ case 0x50: /* STY */
+ case 0x24: /* STG */
+ case 0x72: /* STCY */
+ case 0x70: /* STHY */
+ case 0x8e: /* STPQ */
+ case 0x3f: /* STRVH */
+ case 0x3e: /* STRV */
+ case 0x2f: /* STRVG */
+ return true;
+ }
+ break;
+ case 0xe6:
+ switch (pinsn[2] & 0xff) {
+ case 0x09: /* VSTEBRH */
+ case 0x0a: /* VSTEBRG */
+ case 0x0b: /* VSTEBRF */
+ case 0x0e: /* VSTBR */
+ case 0x0f: /* VSTER */
+ case 0x3f: /* VSTRLR */
+ return true;
+ }
+ break;
+ case 0xe7:
+ switch (pinsn[2] & 0xff) {
+ case 0x08: /* VSTEB */
+ case 0x09: /* VSTEH */
+ case 0x0a: /* VSTEG */
+ case 0x0b: /* VSTEF */
+ case 0x0e: /* VST */
+ case 0x1a: /* VSCEG */
+ case 0x1b: /* VSCEF */
+ case 0x3e: /* VSTM */
+ case 0x3f: /* VSTL */
+ return true;
+ }
+ break;
+ case 0xeb: /* RSY format insns */
+ switch (pinsn[2] & 0xff) {
+ case 0x14: /* CSY */
+ case 0x30: /* CSG */
+ case 0x31: /* CDSY */
+ case 0x3e: /* CDSG */
+ case 0xe4: /* LANG */
+ case 0xe6: /* LAOG */
+ case 0xe7: /* LAXG */
+ case 0xe8: /* LAAG */
+ case 0xea: /* LAALG */
+ case 0xf4: /* LAN */
+ case 0xf6: /* LAO */
+ case 0xf7: /* LAX */
+ case 0xfa: /* LAAL */
+ case 0xf8: /* LAA */
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+#endif
diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h
new file mode 100644
index 0000000000..64957c2bca
--- /dev/null
+++ b/linux-user/include/host/sparc64/host-signal.h
@@ -0,0 +1,64 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SPARC64_HOST_SIGNAL_H
+#define SPARC64_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is struct sigcontext. */
+typedef struct sigcontext host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *sc)
+{
+ return sc->sigc_regs.tpc;
+}
+
+static inline void host_signal_set_pc(host_sigcontext *sc, uintptr_t pc)
+{
+ sc->sigc_regs.tpc = pc;
+ sc->sigc_regs.tnpc = pc + 4;
+}
+
+static inline void *host_signal_mask(host_sigcontext *sc)
+{
+ return &sc->sigc_mask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ uint32_t insn = *(uint32_t *)host_signal_pc(uc);
+
+ if ((insn >> 30) == 3) {
+ switch ((insn >> 19) & 0x3f) {
+ case 0x05: /* stb */
+ case 0x15: /* stba */
+ case 0x06: /* sth */
+ case 0x16: /* stha */
+ case 0x04: /* st */
+ case 0x14: /* sta */
+ case 0x07: /* std */
+ case 0x17: /* stda */
+ case 0x0e: /* stx */
+ case 0x1e: /* stxa */
+ case 0x24: /* stf */
+ case 0x34: /* stfa */
+ case 0x27: /* stdf */
+ case 0x37: /* stdfa */
+ case 0x26: /* stqf */
+ case 0x36: /* stqfa */
+ case 0x25: /* stfsr */
+ case 0x3c: /* casa */
+ case 0x3e: /* casxa */
+ return true;
+ }
+ }
+ return false;
+}
+
+#endif
diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h
new file mode 100644
index 0000000000..5a7627fedc
--- /dev/null
+++ b/linux-user/include/host/x86_64/host-signal.h
@@ -0,0 +1,37 @@
+/*
+ * host-signal.h: signal info dependent on the host architecture
+ *
+ * Copyright (C) 2021 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef X86_64_HOST_SIGNAL_H
+#define X86_64_HOST_SIGNAL_H
+
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
+typedef ucontext_t host_sigcontext;
+
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
+{
+ return uc->uc_mcontext.gregs[REG_RIP];
+}
+
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
+{
+ uc->uc_mcontext.gregs[REG_RIP] = pc;
+}
+
+static inline void *host_signal_mask(host_sigcontext *uc)
+{
+ return &uc->uc_sigmask;
+}
+
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
+{
+ return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
+ && (uc->uc_mcontext.gregs[REG_ERR] & 0x2);
+}
+
+#endif
diff --git a/linux-user/include/special-errno.h b/linux-user/include/special-errno.h
new file mode 100644
index 0000000000..4120455baa
--- /dev/null
+++ b/linux-user/include/special-errno.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU internal errno values for implementing user-only POSIX.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2021 Linaro, Ltd.
+ */
+
+#ifndef SPECIAL_ERRNO_H
+#define SPECIAL_ERRNO_H
+
+/*
+ * All of these are QEMU internal, not visible to the guest.
+ * They should be chosen so as to not overlap with any host
+ * or guest errno.
+ */
+
+/*
+ * This is returned when a system call should be restarted, to tell the
+ * main loop that it should wind the guest PC backwards so it will
+ * re-execute the syscall after handling any pending signals.
+ */
+#define QEMU_ERESTARTSYS 512
+
+/*
+ * This is returned after a successful sigreturn syscall, to indicate
+ * that it has correctly set the guest registers and so the main loop
+ * should not touch them.
+ */
+#define QEMU_ESIGRETURN 513
+
+#endif /* SPECIAL_ERRNO_H */
diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h
index 7193c3b226..d508d0c04a 100644
--- a/linux-user/ioctls.h
+++ b/linux-user/ioctls.h
@@ -96,9 +96,7 @@
IOCTL(BLKROGET, IOC_R, MK_PTR(TYPE_INT))
IOCTL(BLKRRPART, 0, TYPE_NULL)
IOCTL(BLKGETSIZE, IOC_R, MK_PTR(TYPE_ULONG))
-#ifdef BLKGETSIZE64
IOCTL(BLKGETSIZE64, IOC_R, MK_PTR(TYPE_ULONGLONG))
-#endif
IOCTL(BLKFLSBUF, 0, TYPE_NULL)
IOCTL(BLKRASET, 0, TYPE_INT)
IOCTL(BLKRAGET, IOC_R, MK_PTR(TYPE_LONG))
@@ -107,33 +105,15 @@
IOCTL_SPECIAL(BLKPG, IOC_W, do_ioctl_blkpg,
MK_PTR(MK_STRUCT(STRUCT_blkpg_ioctl_arg)))
-#ifdef BLKDISCARD
IOCTL(BLKDISCARD, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2)))
-#endif
-#ifdef BLKIOMIN
IOCTL(BLKIOMIN, IOC_R, MK_PTR(TYPE_INT))
-#endif
-#ifdef BLKIOOPT
IOCTL(BLKIOOPT, IOC_R, MK_PTR(TYPE_INT))
-#endif
-#ifdef BLKALIGNOFF
IOCTL(BLKALIGNOFF, IOC_R, MK_PTR(TYPE_INT))
-#endif
-#ifdef BLKPBSZGET
IOCTL(BLKPBSZGET, IOC_R, MK_PTR(TYPE_INT))
-#endif
-#ifdef BLKDISCARDZEROES
IOCTL(BLKDISCARDZEROES, IOC_R, MK_PTR(TYPE_INT))
-#endif
-#ifdef BLKSECDISCARD
IOCTL(BLKSECDISCARD, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2)))
-#endif
-#ifdef BLKROTATIONAL
IOCTL(BLKROTATIONAL, IOC_R, MK_PTR(TYPE_SHORT))
-#endif
-#ifdef BLKZEROOUT
IOCTL(BLKZEROOUT, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2)))
-#endif
IOCTL(FDMSGON, 0, TYPE_NULL)
IOCTL(FDMSGOFF, 0, TYPE_NULL)
@@ -149,17 +129,22 @@
IOCTL(FDTWADDLE, 0, TYPE_NULL)
IOCTL(FDEJECT, 0, TYPE_NULL)
-#ifdef FIBMAP
IOCTL(FIBMAP, IOC_W | IOC_R, MK_PTR(TYPE_LONG))
-#endif
#ifdef FICLONE
IOCTL(FICLONE, IOC_W, TYPE_INT)
IOCTL(FICLONERANGE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_file_clone_range)))
#endif
+#ifdef FIFREEZE
+ IOCTL(FIFREEZE, IOC_W | IOC_R, TYPE_INT)
+#endif
+#ifdef FITHAW
+ IOCTL(FITHAW, IOC_W | IOC_R, TYPE_INT)
+#endif
+#ifdef FITRIM
+ IOCTL(FITRIM, IOC_W | IOC_R, MK_PTR(MK_STRUCT(STRUCT_fstrim_range)))
+#endif
-#ifdef FIGETBSZ
IOCTL(FIGETBSZ, IOC_R, MK_PTR(TYPE_LONG))
-#endif
#ifdef CONFIG_FIEMAP
IOCTL_SPECIAL(FS_IOC_FIEMAP, IOC_W | IOC_R, do_ioctl_fs_ioc_fiemap,
MK_PTR(MK_STRUCT(STRUCT_fiemap)))
@@ -637,6 +622,10 @@
IOCTL(LOOP_SET_STATUS64, IOC_W, MK_PTR(MK_STRUCT(STRUCT_loop_info64)))
IOCTL(LOOP_GET_STATUS64, IOC_R, MK_PTR(MK_STRUCT(STRUCT_loop_info64)))
IOCTL(LOOP_CHANGE_FD, 0, TYPE_INT)
+ IOCTL(LOOP_SET_CAPACITY, 0, TYPE_INT)
+ IOCTL(LOOP_SET_DIRECT_IO, 0, TYPE_INT)
+ IOCTL(LOOP_SET_BLOCK_SIZE, 0, TYPE_INT)
+ IOCTL(LOOP_CONFIGURE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_loop_config)))
IOCTL(LOOP_CTL_ADD, 0, TYPE_INT)
IOCTL(LOOP_CTL_REMOVE, 0, TYPE_INT)
diff --git a/linux-user/linux_loop.h b/linux-user/linux_loop.h
index c69fea11e4..f80b96f1ff 100644
--- a/linux-user/linux_loop.h
+++ b/linux-user/linux_loop.h
@@ -96,6 +96,8 @@ struct loop_info64 {
#define LOOP_CHANGE_FD 0x4C06
#define LOOP_SET_CAPACITY 0x4C07
#define LOOP_SET_DIRECT_IO 0x4C08
+#define LOOP_SET_BLOCK_SIZE 0x4C09
+#define LOOP_CONFIGURE 0x4C0A
/* /dev/loop-control interface */
#define LOOP_CTL_ADD 0x4C80
diff --git a/linux-user/linuxload.c b/linux-user/linuxload.c
index 2ed5fc45ed..37f132be4a 100644
--- a/linux-user/linuxload.c
+++ b/linux-user/linuxload.c
@@ -3,7 +3,9 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
+#include "user-mmap.h"
#include "loader.h"
+#include "qapi/error.h"
#define NGROUPS 32
@@ -37,7 +39,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
int mode;
int retval;
- if (fstat(bprm->fd, &st) < 0) {
+ if (fstat(bprm->src.fd, &st) < 0) {
return -errno;
}
@@ -67,7 +69,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
bprm->e_gid = st.st_gid;
}
- retval = read(bprm->fd, bprm->buf, BPRM_BUF_SIZE);
+ retval = read(bprm->src.fd, bprm->buf, BPRM_BUF_SIZE);
if (retval < 0) {
perror("prepare_binprm");
exit(-1);
@@ -76,6 +78,10 @@ static int prepare_binprm(struct linux_binprm *bprm)
/* Make sure the rest of the loader won't read garbage. */
memset(bprm->buf + retval, 0, BPRM_BUF_SIZE - retval);
}
+
+ bprm->src.cache = bprm->buf;
+ bprm->src.cache_size = retval;
+
return retval;
}
@@ -83,7 +89,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
abi_ulong stringp, int push_ptr)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
int n = sizeof(abi_ulong);
abi_ulong envp;
abi_ulong argv;
@@ -92,6 +98,11 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
envp = sp;
sp -= (argc + 1) * n;
argv = sp;
+ ts->info->envp = envp;
+ ts->info->envc = envc;
+ ts->info->argv = argv;
+ ts->info->argc = argc;
+
if (push_ptr) {
/* FIXME - handle put_user() failures */
sp -= n;
@@ -99,19 +110,22 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
sp -= n;
put_user_ual(argv, sp);
}
+
sp -= n;
/* FIXME - handle put_user() failures */
put_user_ual(argc, sp);
- ts->info->arg_start = stringp;
+
+ ts->info->arg_strings = stringp;
while (argc-- > 0) {
/* FIXME - handle put_user() failures */
put_user_ual(stringp, argv);
argv += n;
stringp += target_strlen(stringp) + 1;
}
- ts->info->arg_end = stringp;
/* FIXME - handle put_user() failures */
put_user_ual(0, argv);
+
+ ts->info->env_strings = stringp;
while (envc-- > 0) {
/* FIXME - handle put_user() failures */
put_user_ual(stringp, envp);
@@ -130,7 +144,7 @@ int loader_exec(int fdexec, const char *filename, char **argv, char **envp,
{
int retval;
- bprm->fd = fdexec;
+ bprm->src.fd = fdexec;
bprm->filename = (char *)filename;
bprm->argc = count(argv);
bprm->argv = argv;
@@ -139,29 +153,112 @@ int loader_exec(int fdexec, const char *filename, char **argv, char **envp,
retval = prepare_binprm(bprm);
- if (retval >= 0) {
- if (bprm->buf[0] == 0x7f
- && bprm->buf[1] == 'E'
- && bprm->buf[2] == 'L'
- && bprm->buf[3] == 'F') {
- retval = load_elf_binary(bprm, infop);
+ if (retval < 4) {
+ return -ENOEXEC;
+ }
+ if (bprm->buf[0] == 0x7f
+ && bprm->buf[1] == 'E'
+ && bprm->buf[2] == 'L'
+ && bprm->buf[3] == 'F') {
+ retval = load_elf_binary(bprm, infop);
#if defined(TARGET_HAS_BFLT)
- } else if (bprm->buf[0] == 'b'
- && bprm->buf[1] == 'F'
- && bprm->buf[2] == 'L'
- && bprm->buf[3] == 'T') {
- retval = load_flt_binary(bprm, infop);
+ } else if (bprm->buf[0] == 'b'
+ && bprm->buf[1] == 'F'
+ && bprm->buf[2] == 'L'
+ && bprm->buf[3] == 'T') {
+ retval = load_flt_binary(bprm, infop);
#endif
- } else {
- return -ENOEXEC;
- }
+ } else {
+ return -ENOEXEC;
}
-
- if (retval >= 0) {
- /* success. Initialize important registers */
- do_init_thread(regs, infop);
+ if (retval < 0) {
return retval;
}
- return retval;
+ /* Success. Initialize important registers. */
+ do_init_thread(regs, infop);
+ return 0;
+}
+
+bool imgsrc_read(void *dst, off_t offset, size_t len,
+ const ImageSource *img, Error **errp)
+{
+ ssize_t ret;
+
+ if (offset + len <= img->cache_size) {
+ memcpy(dst, img->cache + offset, len);
+ return true;
+ }
+
+ if (img->fd < 0) {
+ error_setg(errp, "read past end of buffer");
+ return false;
+ }
+
+ ret = pread(img->fd, dst, len, offset);
+ if (ret == len) {
+ return true;
+ }
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "Error reading file header");
+ } else {
+ error_setg(errp, "Incomplete read of file header");
+ }
+ return false;
+}
+
+void *imgsrc_read_alloc(off_t offset, size_t len,
+ const ImageSource *img, Error **errp)
+{
+ void *alloc = g_malloc(len);
+ bool ok = imgsrc_read(alloc, offset, len, img, errp);
+
+ if (!ok) {
+ g_free(alloc);
+ alloc = NULL;
+ }
+ return alloc;
+}
+
+abi_long imgsrc_mmap(abi_ulong start, abi_ulong len, int prot,
+ int flags, const ImageSource *src, abi_ulong offset)
+{
+ const int prot_write = PROT_READ | PROT_WRITE;
+ abi_long ret;
+ void *haddr;
+
+ assert(flags == (MAP_PRIVATE | MAP_FIXED));
+
+ if (src->fd >= 0) {
+ return target_mmap(start, len, prot, flags, src->fd, offset);
+ }
+
+ /*
+ * This case is for the vdso; we don't expect bad images.
+ * The mmap may extend beyond the end of the image, especially
+ * to the end of the page. Zero fill.
+ */
+ assert(offset < src->cache_size);
+
+ ret = target_mmap(start, len, prot_write, flags | MAP_ANON, -1, 0);
+ if (ret == -1) {
+ return ret;
+ }
+
+ haddr = lock_user(VERIFY_WRITE, start, len, 0);
+ assert(haddr != NULL);
+ if (offset + len <= src->cache_size) {
+ memcpy(haddr, src->cache + offset, len);
+ } else {
+ size_t rest = src->cache_size - offset;
+ memcpy(haddr, src->cache + offset, rest);
+ memset(haddr + rest, 0, len - rest);
+ }
+ unlock_user(haddr, start, len);
+
+ if (prot != prot_write) {
+ target_mprotect(start, len, prot);
+ }
+
+ return ret;
}
diff --git a/linux-user/loader.h b/linux-user/loader.h
index f375ee0679..e102e6f410 100644
--- a/linux-user/loader.h
+++ b/linux-user/loader.h
@@ -18,6 +18,48 @@
#ifndef LINUX_USER_LOADER_H
#define LINUX_USER_LOADER_H
+typedef struct {
+ const void *cache;
+ unsigned int cache_size;
+ int fd;
+} ImageSource;
+
+/**
+ * imgsrc_read: Read from ImageSource
+ * @dst: destination for read
+ * @offset: offset within file for read
+ * @len: size of the read
+ * @img: ImageSource to read from
+ * @errp: Error details.
+ *
+ * Read into @dst, using the cache when possible.
+ */
+bool imgsrc_read(void *dst, off_t offset, size_t len,
+ const ImageSource *img, Error **errp);
+
+/**
+ * imgsrc_read_alloc: Read from ImageSource
+ * @offset: offset within file for read
+ * @size: size of the read
+ * @img: ImageSource to read from
+ * @errp: Error details.
+ *
+ * Read into newly allocated memory, using the cache when possible.
+ */
+void *imgsrc_read_alloc(off_t offset, size_t len,
+ const ImageSource *img, Error **errp);
+
+/**
+ * imgsrc_mmap: Map from ImageSource
+ *
+ * If @src has a file descriptor, pass on to target_mmap. Otherwise,
+ * this is "mapping" from a host buffer, which resolves to memcpy.
+ * Therefore, flags must be MAP_PRIVATE | MAP_FIXED; the argument is
+ * retained for clarity.
+ */
+abi_long imgsrc_mmap(abi_ulong start, abi_ulong len, int prot,
+ int flags, const ImageSource *src, abi_ulong offset);
+
/*
* Read a good amount of data initially, to hopefully get all the
* program headers loaded.
@@ -29,15 +71,15 @@
* used when loading binaries.
*/
struct linux_binprm {
- char buf[BPRM_BUF_SIZE] __attribute__((aligned));
- abi_ulong p;
- int fd;
- int e_uid, e_gid;
- int argc, envc;
- char **argv;
- char **envp;
- char *filename; /* Name of binary */
- int (*core_dump)(int, const CPUArchState *); /* coredump routine */
+ char buf[BPRM_BUF_SIZE] __attribute__((aligned));
+ ImageSource src;
+ abi_ulong p;
+ int e_uid, e_gid;
+ int argc, envc;
+ char **argv;
+ char **envp;
+ char *filename; /* Name of binary */
+ int (*core_dump)(int, const CPUArchState *); /* coredump routine */
};
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop);
@@ -56,4 +98,13 @@ abi_long memcpy_to_target(abi_ulong dest, const void *src,
extern unsigned long guest_stack_size;
+#if defined(TARGET_S390X) || defined(TARGET_AARCH64) || defined(TARGET_ARM)
+uint32_t get_elf_hwcap(void);
+const char *elf_hwcap_str(uint32_t bit);
+#endif
+#if defined(TARGET_AARCH64) || defined(TARGET_ARM)
+uint64_t get_elf_hwcap2(void);
+const char *elf_hwcap2_str(uint32_t bit);
+#endif
+
#endif /* LINUX_USER_LOADER_H */
diff --git a/linux-user/loongarch64/Makefile.vdso b/linux-user/loongarch64/Makefile.vdso
new file mode 100644
index 0000000000..369de13344
--- /dev/null
+++ b/linux-user/loongarch64/Makefile.vdso
@@ -0,0 +1,11 @@
+include $(BUILD_DIR)/tests/tcg/loongarch64-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/loongarch64
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso.so
+
+$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both \
+ -Wl,--no-warn-rwx-segments -Wl,-T,$(SUBDIR)/vdso.ld $<
diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c
new file mode 100644
index 0000000000..73d7b6796a
--- /dev/null
+++ b/linux-user/loongarch64/cpu_loop.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU LoongArch user cpu_loop.
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "user-internals.h"
+#include "cpu_loop-common.h"
+#include "signal-common.h"
+
+void cpu_loop(CPULoongArchState *env)
+{
+ CPUState *cs = env_cpu(env);
+ int trapnr, si_code;
+ abi_long ret;
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+
+ switch (trapnr) {
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCCODE_SYS:
+ env->pc += 4;
+ ret = do_syscall(env, env->gpr[11],
+ env->gpr[4], env->gpr[5],
+ env->gpr[6], env->gpr[7],
+ env->gpr[8], env->gpr[9],
+ -1, -1);
+ if (ret == -QEMU_ERESTARTSYS) {
+ env->pc -= 4;
+ break;
+ }
+ if (ret == -QEMU_ESIGRETURN) {
+ /*
+ * Returning from a successful sigreturn syscall.
+ * Avoid clobbering register state.
+ */
+ break;
+ }
+ env->gpr[4] = ret;
+ break;
+ case EXCCODE_INE:
+ force_sig_fault(TARGET_SIGILL, 0, env->pc);
+ break;
+ case EXCCODE_FPE:
+ si_code = TARGET_FPE_FLTUNK;
+ if (GET_FP_CAUSE(env->fcsr0) & FP_INVALID) {
+ si_code = TARGET_FPE_FLTINV;
+ } else if (GET_FP_CAUSE(env->fcsr0) & FP_DIV0) {
+ si_code = TARGET_FPE_FLTDIV;
+ } else if (GET_FP_CAUSE(env->fcsr0) & FP_OVERFLOW) {
+ si_code = TARGET_FPE_FLTOVF;
+ } else if (GET_FP_CAUSE(env->fcsr0) & FP_UNDERFLOW) {
+ si_code = TARGET_FPE_FLTUND;
+ } else if (GET_FP_CAUSE(env->fcsr0) & FP_INEXACT) {
+ si_code = TARGET_FPE_FLTRES;
+ }
+ force_sig_fault(TARGET_SIGFPE, si_code, env->pc);
+ break;
+ case EXCP_DEBUG:
+ case EXCCODE_BRK:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
+ break;
+ case EXCCODE_BCE:
+ force_sig_fault(TARGET_SIGSYS, TARGET_SI_KERNEL, env->pc);
+ break;
+
+ /*
+ * Begin with LSX and LASX disabled, then enable on the first trap.
+ * In this way we can tell if the unit is in use. This is used to
+ * choose the layout of any signal frame.
+ */
+ case EXCCODE_SXD:
+ env->CSR_EUEN |= R_CSR_EUEN_SXE_MASK;
+ break;
+ case EXCCODE_ASXD:
+ env->CSR_EUEN |= R_CSR_EUEN_ASXE_MASK;
+ break;
+
+ case EXCP_ATOMIC:
+ cpu_exec_step_atomic(cs);
+ break;
+ default:
+ EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ exit(EXIT_FAILURE);
+ }
+ process_pending_signals(env);
+ }
+}
+
+void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ env->gpr[i] = regs->regs[i];
+ }
+ env->pc = regs->csr.era;
+
+}
diff --git a/linux-user/loongarch64/meson.build b/linux-user/loongarch64/meson.build
new file mode 100644
index 0000000000..17896535f0
--- /dev/null
+++ b/linux-user/loongarch64/meson.build
@@ -0,0 +1,4 @@
+vdso_inc = gen_vdso.process('vdso.so',
+ extra_args: ['-r', '__vdso_rt_sigreturn'])
+
+linux_user_ss.add(when: 'TARGET_LOONGARCH64', if_true: vdso_inc)
diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c
new file mode 100644
index 0000000000..1a322f9697
--- /dev/null
+++ b/linux-user/loongarch64/signal.c
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch emulation of Linux signals
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "user-internals.h"
+#include "signal-common.h"
+#include "linux-user/trace.h"
+#include "target/loongarch/internals.h"
+#include "target/loongarch/vec.h"
+#include "vdso-asmoffset.h"
+
+/* FP context was used */
+#define SC_USED_FP (1 << 0)
+
+struct target_sigcontext {
+ abi_ulong sc_pc;
+ abi_ulong sc_regs[32];
+ abi_uint sc_flags;
+ abi_ulong sc_extcontext[0] QEMU_ALIGNED(16);
+};
+
+QEMU_BUILD_BUG_ON(sizeof(struct target_sigcontext) != sizeof_sigcontext);
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_pc)
+ != offsetof_sigcontext_pc);
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_regs)
+ != offsetof_sigcontext_gr);
+
+#define FPU_CTX_MAGIC 0x46505501
+#define FPU_CTX_ALIGN 8
+struct target_fpu_context {
+ abi_ulong regs[32];
+ abi_ulong fcc;
+ abi_uint fcsr;
+} QEMU_ALIGNED(FPU_CTX_ALIGN);
+
+QEMU_BUILD_BUG_ON(offsetof(struct target_fpu_context, regs)
+ != offsetof_fpucontext_fr);
+
+#define LSX_CTX_MAGIC 0x53580001
+#define LSX_CTX_ALIGN 16
+struct target_lsx_context {
+ abi_ulong regs[2 * 32];
+ abi_ulong fcc;
+ abi_uint fcsr;
+} QEMU_ALIGNED(LSX_CTX_ALIGN);
+
+#define LASX_CTX_MAGIC 0x41535801
+#define LASX_CTX_ALIGN 32
+struct target_lasx_context {
+ abi_ulong regs[4 * 32];
+ abi_ulong fcc;
+ abi_uint fcsr;
+} QEMU_ALIGNED(LASX_CTX_ALIGN);
+
+#define CONTEXT_INFO_ALIGN 16
+struct target_sctx_info {
+ abi_uint magic;
+ abi_uint size;
+ abi_ulong padding;
+} QEMU_ALIGNED(CONTEXT_INFO_ALIGN);
+
+QEMU_BUILD_BUG_ON(sizeof(struct target_sctx_info) != sizeof_sctx_info);
+
+struct target_ucontext {
+ abi_ulong tuc_flags;
+ abi_ptr tuc_link;
+ target_stack_t tuc_stack;
+ target_sigset_t tuc_sigmask;
+ uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
+ struct target_sigcontext tuc_mcontext;
+};
+
+struct target_rt_sigframe {
+ struct target_siginfo rs_info;
+ struct target_ucontext rs_uc;
+};
+
+QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe)
+ != sizeof_rt_sigframe);
+QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, rs_uc.tuc_mcontext)
+ != offsetof_sigcontext);
+
+/*
+ * These two structures are not present in guest memory, are private
+ * to the signal implementation, but are largely copied from the
+ * kernel's signal implementation.
+ */
+struct ctx_layout {
+ void *haddr;
+ abi_ptr gaddr;
+ unsigned int size;
+};
+
+struct extctx_layout {
+ unsigned long size;
+ unsigned int flags;
+ struct ctx_layout fpu;
+ struct ctx_layout lsx;
+ struct ctx_layout lasx;
+ struct ctx_layout end;
+};
+
+static abi_ptr extframe_alloc(struct extctx_layout *extctx,
+ struct ctx_layout *sctx, unsigned size,
+ unsigned align, abi_ptr orig_sp)
+{
+ abi_ptr sp = orig_sp;
+
+ sp -= sizeof(struct target_sctx_info) + size;
+ align = MAX(align, CONTEXT_INFO_ALIGN);
+ sp = ROUND_DOWN(sp, align);
+ sctx->gaddr = sp;
+
+ size = orig_sp - sp;
+ sctx->size = size;
+ extctx->size += size;
+
+ return sp;
+}
+
+static abi_ptr setup_extcontext(CPULoongArchState *env,
+ struct extctx_layout *extctx, abi_ptr sp)
+{
+ memset(extctx, 0, sizeof(struct extctx_layout));
+
+ /* Grow down, alloc "end" context info first. */
+ sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp);
+
+ /* For qemu, there is no lazy fp context switch, so fp always present. */
+ extctx->flags = SC_USED_FP;
+
+ if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
+ sp = extframe_alloc(extctx, &extctx->lasx,
+ sizeof(struct target_lasx_context), LASX_CTX_ALIGN, sp);
+ } else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
+ sp = extframe_alloc(extctx, &extctx->lsx,
+ sizeof(struct target_lsx_context), LSX_CTX_ALIGN, sp);
+ } else {
+ sp = extframe_alloc(extctx, &extctx->fpu,
+ sizeof(struct target_fpu_context), FPU_CTX_ALIGN, sp);
+ }
+
+ return sp;
+}
+
+static void setup_sigframe(CPULoongArchState *env,
+ struct target_sigcontext *sc,
+ struct extctx_layout *extctx)
+{
+ struct target_sctx_info *info;
+ int i;
+
+ __put_user(extctx->flags, &sc->sc_flags);
+ __put_user(env->pc, &sc->sc_pc);
+ __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; ++i) {
+ __put_user(env->gpr[i], &sc->sc_regs[i]);
+ }
+
+ /*
+ * Set extension context
+ */
+
+ if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
+ struct target_lasx_context *lasx_ctx;
+ info = extctx->lasx.haddr;
+
+ __put_user(LASX_CTX_MAGIC, &info->magic);
+ __put_user(extctx->lasx.size, &info->size);
+
+ lasx_ctx = (struct target_lasx_context *)(info + 1);
+
+ for (i = 0; i < 32; ++i) {
+ __put_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
+ __put_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
+ __put_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
+ __put_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
+ }
+ __put_user(read_fcc(env), &lasx_ctx->fcc);
+ __put_user(env->fcsr0, &lasx_ctx->fcsr);
+ } else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
+ struct target_lsx_context *lsx_ctx;
+ info = extctx->lsx.haddr;
+
+ __put_user(LSX_CTX_MAGIC, &info->magic);
+ __put_user(extctx->lsx.size, &info->size);
+
+ lsx_ctx = (struct target_lsx_context *)(info + 1);
+
+ for (i = 0; i < 32; ++i) {
+ __put_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
+ __put_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
+ }
+ __put_user(read_fcc(env), &lsx_ctx->fcc);
+ __put_user(env->fcsr0, &lsx_ctx->fcsr);
+ } else {
+ struct target_fpu_context *fpu_ctx;
+ info = extctx->fpu.haddr;
+
+ __put_user(FPU_CTX_MAGIC, &info->magic);
+ __put_user(extctx->fpu.size, &info->size);
+
+ fpu_ctx = (struct target_fpu_context *)(info + 1);
+
+ for (i = 0; i < 32; ++i) {
+ __put_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
+ }
+ __put_user(read_fcc(env), &fpu_ctx->fcc);
+ __put_user(env->fcsr0, &fpu_ctx->fcsr);
+ }
+
+ /*
+ * Set end context
+ */
+ info = extctx->end.haddr;
+ __put_user(0, &info->magic);
+ __put_user(0, &info->size);
+}
+
+static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame)
+{
+ memset(extctx, 0, sizeof(*extctx));
+
+ while (1) {
+ abi_uint magic, size;
+
+ if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) {
+ return false;
+ }
+
+ switch (magic) {
+ case 0: /* END */
+ extctx->end.gaddr = frame;
+ extctx->end.size = size;
+ extctx->size += size;
+ return true;
+
+ case FPU_CTX_MAGIC:
+ if (size < (sizeof(struct target_sctx_info) +
+ sizeof(struct target_fpu_context))) {
+ return false;
+ }
+ extctx->fpu.gaddr = frame;
+ extctx->fpu.size = size;
+ extctx->size += size;
+ break;
+ case LSX_CTX_MAGIC:
+ if (size < (sizeof(struct target_sctx_info) +
+ sizeof(struct target_lsx_context))) {
+ return false;
+ }
+ extctx->lsx.gaddr = frame;
+ extctx->lsx.size = size;
+ extctx->size += size;
+ break;
+ case LASX_CTX_MAGIC:
+ if (size < (sizeof(struct target_sctx_info) +
+ sizeof(struct target_lasx_context))) {
+ return false;
+ }
+ extctx->lasx.gaddr = frame;
+ extctx->lasx.size = size;
+ extctx->size += size;
+ break;
+ default:
+ return false;
+ }
+
+ frame += size;
+ }
+}
+
+static void restore_sigframe(CPULoongArchState *env,
+ struct target_sigcontext *sc,
+ struct extctx_layout *extctx)
+{
+ int i;
+ abi_ulong fcc;
+
+ __get_user(env->pc, &sc->sc_pc);
+ for (i = 1; i < 32; ++i) {
+ __get_user(env->gpr[i], &sc->sc_regs[i]);
+ }
+
+ if (extctx->lasx.haddr) {
+ struct target_lasx_context *lasx_ctx =
+ extctx->lasx.haddr + sizeof(struct target_sctx_info);
+
+ for (i = 0; i < 32; ++i) {
+ __get_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
+ __get_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
+ __get_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
+ __get_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
+ }
+ __get_user(fcc, &lasx_ctx->fcc);
+ write_fcc(env, fcc);
+ __get_user(env->fcsr0, &lasx_ctx->fcsr);
+ restore_fp_status(env);
+ } else if (extctx->lsx.haddr) {
+ struct target_lsx_context *lsx_ctx =
+ extctx->lsx.haddr + sizeof(struct target_sctx_info);
+
+ for (i = 0; i < 32; ++i) {
+ __get_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
+ __get_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
+ }
+ __get_user(fcc, &lsx_ctx->fcc);
+ write_fcc(env, fcc);
+ __get_user(env->fcsr0, &lsx_ctx->fcsr);
+ restore_fp_status(env);
+ } else if (extctx->fpu.haddr) {
+ struct target_fpu_context *fpu_ctx =
+ extctx->fpu.haddr + sizeof(struct target_sctx_info);
+
+ for (i = 0; i < 32; ++i) {
+ __get_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
+ }
+ __get_user(fcc, &fpu_ctx->fcc);
+ write_fcc(env, fcc);
+ __get_user(env->fcsr0, &fpu_ctx->fcsr);
+ restore_fp_status(env);
+ }
+}
+
+/*
+ * Determine which stack to use.
+ */
+static abi_ptr get_sigframe(struct target_sigaction *ka,
+ CPULoongArchState *env,
+ struct extctx_layout *extctx)
+{
+ abi_ulong sp;
+
+ sp = target_sigsp(get_sp_from_cpustate(env), ka);
+ sp = ROUND_DOWN(sp, 16);
+ sp = setup_extcontext(env, extctx, sp);
+ sp -= sizeof(struct target_rt_sigframe);
+
+ assert(QEMU_IS_ALIGNED(sp, 16));
+
+ return sp;
+}
+
+void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPULoongArchState *env)
+{
+ struct target_rt_sigframe *frame;
+ struct extctx_layout extctx;
+ abi_ptr frame_addr;
+ int i;
+
+ frame_addr = get_sigframe(ka, env, &extctx);
+ trace_user_setup_rt_frame(env, frame_addr);
+
+ frame = lock_user(VERIFY_WRITE, frame_addr,
+ sizeof(*frame) + extctx.size, 0);
+ if (!frame) {
+ force_sigsegv(sig);
+ return;
+ }
+
+ if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
+ extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
+ extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
+ } else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
+ extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
+ extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
+ } else {
+ extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
+ extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
+ }
+
+ frame->rs_info = *info;
+
+ __put_user(0, &frame->rs_uc.tuc_flags);
+ __put_user(0, &frame->rs_uc.tuc_link);
+ target_save_altstack(&frame->rs_uc.tuc_stack, env);
+
+ setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
+
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
+ __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
+ }
+
+ env->gpr[4] = sig;
+ env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info);
+ env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc);
+ env->gpr[3] = frame_addr;
+ env->gpr[1] = default_rt_sigreturn;
+
+ env->pc = ka->_sa_handler;
+ unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size);
+}
+
+long do_rt_sigreturn(CPULoongArchState *env)
+{
+ struct target_rt_sigframe *frame;
+ struct extctx_layout extctx;
+ abi_ulong frame_addr;
+ sigset_t blocked;
+
+ frame_addr = env->gpr[3];
+ trace_user_do_rt_sigreturn(env, frame_addr);
+
+ if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) {
+ goto badframe;
+ }
+
+ frame = lock_user(VERIFY_READ, frame_addr,
+ sizeof(*frame) + extctx.size, 1);
+ if (!frame) {
+ goto badframe;
+ }
+
+ if (extctx.lasx.gaddr) {
+ extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
+ } else if (extctx.lsx.gaddr) {
+ extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
+ } else if (extctx.fpu.gaddr) {
+ extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
+ }
+
+ target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
+ set_sigmask(&blocked);
+
+ restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
+
+ target_restore_altstack(&frame->rs_uc.tuc_stack, env);
+
+ unlock_user(frame, frame_addr, 0);
+ return -QEMU_ESIGRETURN;
+
+ badframe:
+ force_sig(TARGET_SIGSEGV);
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
+ assert(tramp != NULL);
+
+ __put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */
+ __put_user(0x002b0000, tramp + 1); /* syscall 0 */
+
+ default_rt_sigreturn = sigtramp_page;
+ unlock_user(tramp, sigtramp_page, 8);
+}
diff --git a/linux-user/loongarch64/sockbits.h b/linux-user/loongarch64/sockbits.h
new file mode 100644
index 0000000000..1cffcae120
--- /dev/null
+++ b/linux-user/loongarch64/sockbits.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_SOCKBITS_H
+#define LOONGARCH_TARGET_SOCKBITS_H
+
+#include "../generic/sockbits.h"
+
+#endif
diff --git a/linux-user/nios2/syscall_nr.h b/linux-user/loongarch64/syscall_nr.h
index 11a37b32e8..be00915adf 100644
--- a/linux-user/nios2/syscall_nr.h
+++ b/linux-user/loongarch64/syscall_nr.h
@@ -3,10 +3,9 @@
* Do not modify.
* This file is generated by scripts/gensyscalls.sh
*/
-#ifndef LINUX_USER_NIOS2_SYSCALL_NR_H
-#define LINUX_USER_NIOS2_SYSCALL_NR_H
+#ifndef LINUX_USER_LOONGARCH_SYSCALL_NR_H
+#define LINUX_USER_LOONGARCH_SYSCALL_NR_H
-#define TARGET_NR_cacheflush (TARGET_NR_arch_specific_syscall)
#define TARGET_NR_io_setup 0
#define TARGET_NR_io_destroy 1
#define TARGET_NR_io_submit 2
@@ -32,7 +31,7 @@
#define TARGET_NR_epoll_pwait 22
#define TARGET_NR_dup 23
#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl64 25
+#define TARGET_NR_fcntl 25
#define TARGET_NR_inotify_init1 26
#define TARGET_NR_inotify_add_watch 27
#define TARGET_NR_inotify_rm_watch 28
@@ -45,15 +44,14 @@
#define TARGET_NR_unlinkat 35
#define TARGET_NR_symlinkat 36
#define TARGET_NR_linkat 37
-#define TARGET_NR_renameat 38
#define TARGET_NR_umount2 39
#define TARGET_NR_mount 40
#define TARGET_NR_pivot_root 41
#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs64 43
-#define TARGET_NR_fstatfs64 44
-#define TARGET_NR_truncate64 45
-#define TARGET_NR_ftruncate64 46
+#define TARGET_NR_statfs 43
+#define TARGET_NR_fstatfs 44
+#define TARGET_NR_truncate 45
+#define TARGET_NR_ftruncate 46
#define TARGET_NR_fallocate 47
#define TARGET_NR_faccessat 48
#define TARGET_NR_chdir 49
@@ -69,7 +67,7 @@
#define TARGET_NR_pipe2 59
#define TARGET_NR_quotactl 60
#define TARGET_NR_getdents64 61
-#define TARGET_NR_llseek 62
+#define TARGET_NR_lseek 62
#define TARGET_NR_read 63
#define TARGET_NR_write 64
#define TARGET_NR_readv 65
@@ -78,7 +76,7 @@
#define TARGET_NR_pwrite64 68
#define TARGET_NR_preadv 69
#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile64 71
+#define TARGET_NR_sendfile 71
#define TARGET_NR_pselect6 72
#define TARGET_NR_ppoll 73
#define TARGET_NR_signalfd4 74
@@ -86,8 +84,6 @@
#define TARGET_NR_splice 76
#define TARGET_NR_tee 77
#define TARGET_NR_readlinkat 78
-#define TARGET_NR_fstatat64 79
-#define TARGET_NR_fstat64 80
#define TARGET_NR_sync 81
#define TARGET_NR_fsync 82
#define TARGET_NR_fdatasync 83
@@ -170,8 +166,6 @@
#define TARGET_NR_uname 160
#define TARGET_NR_sethostname 161
#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
#define TARGET_NR_getrusage 165
#define TARGET_NR_umask 166
#define TARGET_NR_prctl 167
@@ -229,8 +223,8 @@
#define TARGET_NR_keyctl 219
#define TARGET_NR_clone 220
#define TARGET_NR_execve 221
-#define TARGET_NR_mmap2 222
-#define TARGET_NR_fadvise64_64 223
+#define TARGET_NR_mmap 222
+#define TARGET_NR_fadvise64 223
#define TARGET_NR_swapon 224
#define TARGET_NR_swapoff 225
#define TARGET_NR_mprotect 226
@@ -287,26 +281,6 @@
#define TARGET_NR_io_pgetevents 292
#define TARGET_NR_rseq 293
#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_clock_gettime64 403
-#define TARGET_NR_clock_settime64 404
-#define TARGET_NR_clock_adjtime64 405
-#define TARGET_NR_clock_getres_time64 406
-#define TARGET_NR_clock_nanosleep_time64 407
-#define TARGET_NR_timer_gettime64 408
-#define TARGET_NR_timer_settime64 409
-#define TARGET_NR_timerfd_gettime64 410
-#define TARGET_NR_timerfd_settime64 411
-#define TARGET_NR_utimensat_time64 412
-#define TARGET_NR_pselect6_time64 413
-#define TARGET_NR_ppoll_time64 414
-#define TARGET_NR_io_pgetevents_time64 416
-#define TARGET_NR_recvmmsg_time64 417
-#define TARGET_NR_mq_timedsend_time64 418
-#define TARGET_NR_mq_timedreceive_time64 419
-#define TARGET_NR_semtimedop_time64 420
-#define TARGET_NR_rt_sigtimedwait_time64 421
-#define TARGET_NR_futex_time64 422
-#define TARGET_NR_sched_rr_get_interval_time64 423
#define TARGET_NR_pidfd_send_signal 424
#define TARGET_NR_io_uring_setup 425
#define TARGET_NR_io_uring_enter 426
@@ -318,6 +292,7 @@
#define TARGET_NR_fsmount 432
#define TARGET_NR_fspick 433
#define TARGET_NR_pidfd_open 434
+#define TARGET_NR_clone3 435
#define TARGET_NR_close_range 436
#define TARGET_NR_openat2 437
#define TARGET_NR_pidfd_getfd 438
@@ -325,9 +300,13 @@
#define TARGET_NR_process_madvise 440
#define TARGET_NR_epoll_pwait2 441
#define TARGET_NR_mount_setattr 442
+#define TARGET_NR_quotactl_fd 443
#define TARGET_NR_landlock_create_ruleset 444
#define TARGET_NR_landlock_add_rule 445
#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_syscalls 447
+#define TARGET_NR_process_mrelease 448
+#define TARGET_NR_futex_waitv 449
+#define TARGET_NR_set_mempolicy_home_node 450
+#define TARGET_NR_syscalls 451
-#endif /* LINUX_USER_NIOS2_SYSCALL_NR_H */
+#endif /* LINUX_USER_LOONGARCH_SYSCALL_NR_H */
diff --git a/linux-user/loongarch64/target_cpu.h b/linux-user/loongarch64/target_cpu.h
new file mode 100644
index 0000000000..a29af66156
--- /dev/null
+++ b/linux-user/loongarch64/target_cpu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch specific CPU ABI and functions for linux-user
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_CPU_H
+#define LOONGARCH_TARGET_CPU_H
+
+static inline void cpu_clone_regs_child(CPULoongArchState *env,
+ target_ulong newsp, unsigned flags)
+{
+ if (newsp) {
+ env->gpr[3] = newsp;
+ }
+ env->gpr[4] = 0;
+}
+
+static inline void cpu_clone_regs_parent(CPULoongArchState *env,
+ unsigned flags)
+{
+}
+
+static inline void cpu_set_tls(CPULoongArchState *env, target_ulong newtls)
+{
+ env->gpr[2] = newtls;
+}
+
+static inline abi_ulong get_sp_from_cpustate(CPULoongArchState *state)
+{
+ return state->gpr[3];
+}
+#endif
diff --git a/linux-user/loongarch64/target_elf.h b/linux-user/loongarch64/target_elf.h
new file mode 100644
index 0000000000..95c3f05a46
--- /dev/null
+++ b/linux-user/loongarch64/target_elf.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_ELF_H
+#define LOONGARCH_TARGET_ELF_H
+static inline const char *cpu_get_model(uint32_t eflags)
+{
+ return "la464";
+}
+#endif
diff --git a/linux-user/loongarch64/target_errno_defs.h b/linux-user/loongarch64/target_errno_defs.h
new file mode 100644
index 0000000000..c198b8aca9
--- /dev/null
+++ b/linux-user/loongarch64/target_errno_defs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_ERRNO_DEFS_H
+#define LOONGARCH_TARGET_ERRNO_DEFS_H
+
+/* Target uses generic errno */
+#include "../generic/target_errno_defs.h"
+
+#endif
diff --git a/linux-user/loongarch64/target_fcntl.h b/linux-user/loongarch64/target_fcntl.h
new file mode 100644
index 0000000000..99bf586854
--- /dev/null
+++ b/linux-user/loongarch64/target_fcntl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_FCNTL_H
+#define LOONGARCH_TARGET_FCNTL_H
+
+#include "../generic/fcntl.h"
+
+#endif
diff --git a/linux-user/loongarch64/target_mman.h b/linux-user/loongarch64/target_mman.h
new file mode 100644
index 0000000000..8c2a3d5596
--- /dev/null
+++ b/linux-user/loongarch64/target_mman.h
@@ -0,0 +1,12 @@
+/*
+ * arch/loongarch/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+ * TASK_SIZE64 0x1UL << (... ? VA_BITS : ...)
+ */
+#define TASK_UNMAPPED_BASE \
+ TARGET_PAGE_ALIGN((1ull << TARGET_VIRT_ADDR_SPACE_BITS) / 3)
+
+/* arch/loongarch/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/loongarch64/target_prctl.h b/linux-user/loongarch64/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/loongarch64/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/loongarch64/target_proc.h b/linux-user/loongarch64/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/loongarch64/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/loongarch64/target_resource.h b/linux-user/loongarch64/target_resource.h
new file mode 100644
index 0000000000..0f86bf24ee
--- /dev/null
+++ b/linux-user/loongarch64/target_resource.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_RESOURCE_H
+#define LOONGARCH_TARGET_RESOURCE_H
+
+#include "../generic/target_resource.h"
+
+#endif
diff --git a/linux-user/loongarch64/target_signal.h b/linux-user/loongarch64/target_signal.h
new file mode 100644
index 0000000000..ad3aaffcb4
--- /dev/null
+++ b/linux-user/loongarch64/target_signal.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_SIGNAL_H
+#define LOONGARCH_TARGET_SIGNAL_H
+
+#include "../generic/signal.h"
+
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
+#endif /* LOONGARCH_TARGET_SIGNAL_H */
diff --git a/linux-user/loongarch64/target_structs.h b/linux-user/loongarch64/target_structs.h
new file mode 100644
index 0000000000..6041441e15
--- /dev/null
+++ b/linux-user/loongarch64/target_structs.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_STRUCTS_H
+#define LOONGARCH_TARGET_STRUCTS_H
+
+#include "../generic/target_structs.h"
+
+#endif
diff --git a/linux-user/loongarch64/target_syscall.h b/linux-user/loongarch64/target_syscall.h
new file mode 100644
index 0000000000..39f229bb9c
--- /dev/null
+++ b/linux-user/loongarch64/target_syscall.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_SYSCALL_H
+#define LOONGARCH_TARGET_SYSCALL_H
+
+#include "qemu/units.h"
+
+/*
+ * this struct defines the way the registers are stored on the
+ * stack during a system call.
+ */
+
+struct target_pt_regs {
+ /* Saved main processor registers. */
+ target_ulong regs[32];
+
+ /* Saved special registers. */
+ struct {
+ target_ulong era;
+ target_ulong badv;
+ target_ulong crmd;
+ target_ulong prmd;
+ target_ulong euen;
+ target_ulong ecfg;
+ target_ulong estat;
+ } csr;
+ target_ulong orig_a0;
+ target_ulong __last[0];
+};
+
+#define UNAME_MACHINE "loongarch64"
+#define UNAME_MINIMUM_RELEASE "5.19.0"
+
+#define TARGET_MCL_CURRENT 1
+#define TARGET_MCL_FUTURE 2
+#define TARGET_MCL_ONFAULT 4
+
+#endif
diff --git a/linux-user/loongarch64/termbits.h b/linux-user/loongarch64/termbits.h
new file mode 100644
index 0000000000..d425db8748
--- /dev/null
+++ b/linux-user/loongarch64/termbits.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_TARGET_TERMBITS_H
+#define LOONGARCH_TARGET_TERMBITS_H
+
+#include "../generic/termbits.h"
+
+#endif
diff --git a/linux-user/loongarch64/vdso-asmoffset.h b/linux-user/loongarch64/vdso-asmoffset.h
new file mode 100644
index 0000000000..60d113822f
--- /dev/null
+++ b/linux-user/loongarch64/vdso-asmoffset.h
@@ -0,0 +1,8 @@
+#define sizeof_rt_sigframe 0x240
+#define sizeof_sigcontext 0x110
+#define sizeof_sctx_info 0x10
+
+#define offsetof_sigcontext 0x130
+#define offsetof_sigcontext_pc 0
+#define offsetof_sigcontext_gr 8
+#define offsetof_fpucontext_fr 0
diff --git a/linux-user/loongarch64/vdso.S b/linux-user/loongarch64/vdso.S
new file mode 100644
index 0000000000..780a5fda12
--- /dev/null
+++ b/linux-user/loongarch64/vdso.S
@@ -0,0 +1,130 @@
+/*
+ * Loongarch64 linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include "vdso-asmoffset.h"
+
+
+ .text
+
+.macro endf name
+ .globl \name
+ .type \name, @function
+ .size \name, . - \name
+.endm
+
+.macro vdso_syscall name, nr
+\name:
+ li.w $a7, \nr
+ syscall 0
+ jr $ra
+endf \name
+.endm
+
+ .cfi_startproc
+
+vdso_syscall __vdso_gettimeofday, __NR_gettimeofday
+vdso_syscall __vdso_clock_gettime, __NR_clock_gettime
+vdso_syscall __vdso_clock_getres, __NR_clock_getres
+vdso_syscall __vdso_getcpu, __NR_getcpu
+
+ .cfi_endproc
+
+/*
+ * Start the unwind info at least one instruction before the signal
+ * trampoline, because the unwinder will assume we are returning
+ * after a call site.
+ */
+
+ .cfi_startproc simple
+ .cfi_signal_frame
+
+#define B_GR offsetof_sigcontext_gr
+#define B_FR sizeof_sigcontext + sizeof_sctx_info + offsetof_fpucontext_fr
+
+ .cfi_def_cfa 2, offsetof_sigcontext
+
+ /* Return address */
+ .cfi_return_column 64
+ .cfi_offset 64, offsetof_sigcontext_pc /* pc */
+
+ /* Integer registers */
+ .cfi_offset 1, B_GR + 1 * 8
+ .cfi_offset 2, B_GR + 2 * 8
+ .cfi_offset 3, B_GR + 3 * 8
+ .cfi_offset 4, B_GR + 4 * 8
+ .cfi_offset 5, B_GR + 5 * 8
+ .cfi_offset 6, B_GR + 6 * 8
+ .cfi_offset 7, B_GR + 7 * 8
+ .cfi_offset 8, B_GR + 8 * 8
+ .cfi_offset 9, B_GR + 9 * 8
+ .cfi_offset 10, B_GR + 10 * 8
+ .cfi_offset 11, B_GR + 11 * 8
+ .cfi_offset 12, B_GR + 12 * 8
+ .cfi_offset 13, B_GR + 13 * 8
+ .cfi_offset 14, B_GR + 14 * 8
+ .cfi_offset 15, B_GR + 15 * 8
+ .cfi_offset 16, B_GR + 16 * 8
+ .cfi_offset 17, B_GR + 17 * 8
+ .cfi_offset 18, B_GR + 18 * 8
+ .cfi_offset 19, B_GR + 19 * 8
+ .cfi_offset 20, B_GR + 20 * 8
+ .cfi_offset 21, B_GR + 21 * 8
+ .cfi_offset 22, B_GR + 22 * 8
+ .cfi_offset 23, B_GR + 23 * 8
+ .cfi_offset 24, B_GR + 24 * 8
+ .cfi_offset 25, B_GR + 25 * 8
+ .cfi_offset 26, B_GR + 26 * 8
+ .cfi_offset 27, B_GR + 27 * 8
+ .cfi_offset 28, B_GR + 28 * 8
+ .cfi_offset 29, B_GR + 29 * 8
+ .cfi_offset 30, B_GR + 30 * 8
+ .cfi_offset 31, B_GR + 31 * 8
+
+ /* Floating point registers */
+ .cfi_offset 32, B_FR + 0
+ .cfi_offset 33, B_FR + 1 * 8
+ .cfi_offset 34, B_FR + 2 * 8
+ .cfi_offset 35, B_FR + 3 * 8
+ .cfi_offset 36, B_FR + 4 * 8
+ .cfi_offset 37, B_FR + 5 * 8
+ .cfi_offset 38, B_FR + 6 * 8
+ .cfi_offset 39, B_FR + 7 * 8
+ .cfi_offset 40, B_FR + 8 * 8
+ .cfi_offset 41, B_FR + 9 * 8
+ .cfi_offset 42, B_FR + 10 * 8
+ .cfi_offset 43, B_FR + 11 * 8
+ .cfi_offset 44, B_FR + 12 * 8
+ .cfi_offset 45, B_FR + 13 * 8
+ .cfi_offset 46, B_FR + 14 * 8
+ .cfi_offset 47, B_FR + 15 * 8
+ .cfi_offset 48, B_FR + 16 * 8
+ .cfi_offset 49, B_FR + 17 * 8
+ .cfi_offset 50, B_FR + 18 * 8
+ .cfi_offset 51, B_FR + 19 * 8
+ .cfi_offset 52, B_FR + 20 * 8
+ .cfi_offset 53, B_FR + 21 * 8
+ .cfi_offset 54, B_FR + 22 * 8
+ .cfi_offset 55, B_FR + 23 * 8
+ .cfi_offset 56, B_FR + 24 * 8
+ .cfi_offset 57, B_FR + 25 * 8
+ .cfi_offset 58, B_FR + 26 * 8
+ .cfi_offset 59, B_FR + 27 * 8
+ .cfi_offset 60, B_FR + 28 * 8
+ .cfi_offset 61, B_FR + 29 * 8
+ .cfi_offset 62, B_FR + 30 * 8
+ .cfi_offset 63, B_FR + 31 * 8
+
+ nop
+
+__vdso_rt_sigreturn:
+ li.w $a7, __NR_rt_sigreturn
+ syscall 0
+ .cfi_endproc
+endf __vdso_rt_sigreturn
diff --git a/linux-user/loongarch64/vdso.ld b/linux-user/loongarch64/vdso.ld
new file mode 100644
index 0000000000..682446ed0c
--- /dev/null
+++ b/linux-user/loongarch64/vdso.ld
@@ -0,0 +1,73 @@
+/*
+ * Linker script for linux loongarch64 replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_5.10 {
+ global:
+ __vdso_getcpu;
+ __vdso_clock_getres;
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_rt_sigreturn;
+
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS;
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ /*
+ * We can't prelink to any address without knowing something about
+ * the virtual memory space of the host, since that leaks over into
+ * the available memory space of the guest.
+ */
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ .data : {
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load =0xd503201f
+}
diff --git a/linux-user/loongarch64/vdso.so b/linux-user/loongarch64/vdso.so
new file mode 100755
index 0000000000..bfaa26f2bf
--- /dev/null
+++ b/linux-user/loongarch64/vdso.so
Binary files differ
diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c
index ebf32be78f..f79b8e4ab0 100644
--- a/linux-user/m68k/cpu_loop.c
+++ b/linux-user/m68k/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -29,7 +28,6 @@ void cpu_loop(CPUM68KState *env)
CPUState *cs = env_cpu(env);
int trapnr;
unsigned int n;
- target_siginfo_t info;
for(;;) {
cpu_exec_start(cs);
@@ -38,39 +36,25 @@ void cpu_loop(CPUM68KState *env)
process_queued_cpu_work(cs);
switch(trapnr) {
- case EXCP_HALT_INSN:
- /* Semihosing syscall. */
- env->pc += 4;
- do_m68k_semihosting(env, env->dregs[0]);
- break;
case EXCP_ILLEGAL:
case EXCP_LINEA:
case EXCP_LINEF:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPN;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc);
break;
case EXCP_CHK:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_INTOVF;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ case EXCP_TRAPCC:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->mmu.ar);
break;
case EXCP_DIV0:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_INTDIV;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->mmu.ar);
+ break;
+ case EXCP_TRACE:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_TRACE, env->mmu.ar);
break;
case EXCP_TRAP0:
{
abi_long ret;
n = env->dregs[0];
- env->pc += 2;
ret = do_syscall(env,
n,
env->dregs[1],
@@ -80,9 +64,9 @@ void cpu_loop(CPUM68KState *env)
env->dregs[5],
env->aregs[0],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->pc -= 2;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->dregs[0] = ret;
}
}
@@ -90,21 +74,12 @@ void cpu_loop(CPUM68KState *env)
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
- case EXCP_ACCESS:
- {
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->mmu.ar;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
+ case EXCP_TRAP0 + 1 ... EXCP_TRAP0 + 14:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP, env->pc);
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ case EXCP_TRAP15:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
@@ -120,7 +95,7 @@ void cpu_loop(CPUM68KState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
env->pc = regs->pc;
diff --git a/linux-user/m68k/signal.c b/linux-user/m68k/signal.c
index 4f8eb6f727..77555781aa 100644
--- a/linux-user/m68k/signal.c
+++ b/linux-user/m68k/signal.c
@@ -39,7 +39,6 @@ struct target_sigframe
int sig;
int code;
abi_ulong psc;
- char retcode[8];
abi_ulong extramask[TARGET_NSIG_WORDS-1];
struct target_sigcontext sc;
};
@@ -76,7 +75,6 @@ struct target_rt_sigframe
int sig;
abi_ulong pinfo;
abi_ulong puc;
- char retcode[8];
struct target_siginfo info;
struct target_ucontext uc;
};
@@ -130,7 +128,6 @@ void setup_frame(int sig, struct target_sigaction *ka,
{
struct target_sigframe *frame;
abi_ulong frame_addr;
- abi_ulong retcode_addr;
abi_ulong sc_addr;
int i;
@@ -152,16 +149,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
}
/* Set up to return from userspace. */
-
- retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
- __put_user(retcode_addr, &frame->pretcode);
-
- /* moveq #,d0; trap #0 */
-
- __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
- (uint32_t *)(frame->retcode));
-
- /* Set up to return from userspace */
+ __put_user(default_sigreturn, &frame->pretcode);
env->aregs[7] = frame_addr;
env->pc = ka->_sa_handler;
@@ -288,7 +276,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
{
struct target_rt_sigframe *frame;
abi_ulong frame_addr;
- abi_ulong retcode_addr;
abi_ulong info_addr;
abi_ulong uc_addr;
int err = 0;
@@ -308,7 +295,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
__put_user(uc_addr, &frame->puc);
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
/* Create the ucontext */
@@ -320,22 +307,12 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
if (err)
goto give_sigsegv;
- for(i = 0; i < TARGET_NSIG_WORDS; i++) {
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
}
/* Set up to return from userspace. */
-
- retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
- __put_user(retcode_addr, &frame->pretcode);
-
- /* moveq #,d0; notb d0; trap #0 */
-
- __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
- (uint32_t *)(frame->retcode + 0));
- __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
-
- /* Set up to return from userspace */
+ __put_user(default_rt_sigreturn, &frame->pretcode);
env->aregs[7] = frame_addr;
env->pc = ka->_sa_handler;
@@ -376,11 +353,11 @@ long do_sigreturn(CPUM68KState *env)
restore_sigcontext(env, &frame->sc);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUM68KState *env)
@@ -404,10 +381,30 @@ long do_rt_sigreturn(CPUM68KState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ void *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 4 + 6, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+
+ /* moveq #,d0; trap #0 */
+ __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), (uint32_t *)tramp);
+
+ default_rt_sigreturn = sigtramp_page + 4;
+
+ /* moveq #,d0; notb d0; trap #0 */
+ __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
+ (uint32_t *)(tramp + 4));
+ __put_user(0x4e40, (uint16_t *)(tramp + 8));
+
+ unlock_user(tramp, sigtramp_page, 4 + 6);
}
diff --git a/linux-user/m68k/target_cpu.h b/linux-user/m68k/target_cpu.h
index c3f288dfe8..4b40c09a8d 100644
--- a/linux-user/m68k/target_cpu.h
+++ b/linux-user/m68k/target_cpu.h
@@ -37,7 +37,7 @@ static inline void cpu_clone_regs_parent(CPUM68KState *env, unsigned flags)
static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
ts->tp_value = newtls;
}
diff --git a/linux-user/m68k/target_flat.h b/linux-user/m68k/target_flat.h
new file mode 100644
index 0000000000..bc83224cea
--- /dev/null
+++ b/linux-user/m68k/target_flat.h
@@ -0,0 +1 @@
+#include "../generic/target_flat.h"
diff --git a/linux-user/m68k/target_mman.h b/linux-user/m68k/target_mman.h
new file mode 100644
index 0000000000..20cfe750c5
--- /dev/null
+++ b/linux-user/m68k/target_mman.h
@@ -0,0 +1,6 @@
+/* arch/m68k/include/asm/processor.h */
+#define TASK_UNMAPPED_BASE 0xC0000000
+/* arch/m68k/include/asm/elf.h */
+#define ELF_ET_DYN_BASE 0xD0000000
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/m68k/target_prctl.h b/linux-user/m68k/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/m68k/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/m68k/target_proc.h b/linux-user/m68k/target_proc.h
new file mode 100644
index 0000000000..3df8f28e22
--- /dev/null
+++ b/linux-user/m68k/target_proc.h
@@ -0,0 +1,16 @@
+/*
+ * M68K specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef M68K_TARGET_PROC_H
+#define M68K_TARGET_PROC_H
+
+static int open_hardware(CPUArchState *cpu_env, int fd)
+{
+ dprintf(fd, "Model:\t\tqemu-m68k\n");
+ return 0;
+}
+#define HAVE_ARCH_PROC_HARDWARE
+
+#endif /* M68K_TARGET_PROC_H */
diff --git a/linux-user/m68k/target_resource.h b/linux-user/m68k/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/m68k/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h
index d096544ef8..6e0f4b74e3 100644
--- a/linux-user/m68k/target_signal.h
+++ b/linux-user/m68k/target_signal.h
@@ -1,25 +1,9 @@
#ifndef M68K_TARGET_SIGNAL_H
#define M68K_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* M68K_TARGET_SIGNAL_H */
diff --git a/linux-user/m68k/target_structs.h b/linux-user/m68k/target_structs.h
index e373d481e1..3a06f373c3 100644
--- a/linux-user/m68k/target_structs.h
+++ b/linux-user/m68k/target_structs.h
@@ -1,58 +1 @@
-/*
- * m68k specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef M68K_TARGET_STRUCTS_H
-#define M68K_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/m68k/target_syscall.h b/linux-user/m68k/target_syscall.h
index 23359a6299..8d4ddbd76c 100644
--- a/linux-user/m68k/target_syscall.h
+++ b/linux-user/m68k/target_syscall.h
@@ -20,7 +20,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "m68k"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/main.c b/linux-user/main.c
index 16def5215d..94e4c47f05 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -18,10 +18,9 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
+#include "qemu/help-texts.h"
#include "qemu/units.h"
#include "qemu/accel.h"
-#include "sysemu/tcg.h"
#include "qemu-version.h"
#include <sys/syscall.h>
#include <sys/resource.h>
@@ -39,9 +38,11 @@
#include "qemu/help_option.h"
#include "qemu/module.h"
#include "qemu/plugin.h"
+#include "user/guest-base.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
-#include "tcg/tcg.h"
+#include "gdbstub/user.h"
+#include "tcg/startup.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
#include "qemu/guest-random.h"
@@ -54,6 +55,12 @@
#include "signal-common.h"
#include "loader.h"
#include "user-mmap.h"
+#include "tcg/perf.h"
+#include "exec/page-vary.h"
+
+#ifdef CONFIG_SEMIHOSTING
+#include "semihosting/semihost.h"
+#endif
#ifndef AT_FLAGS_PRESERVE_ARGV0
#define AT_FLAGS_PRESERVE_ARGV0_BIT 0
@@ -61,8 +68,9 @@
#endif
char *exec_path;
+char real_exec_path[PATH_MAX];
-int singlestep;
+static bool opt_one_insn_per_tb;
static const char *argv0;
static const char *gdbstub;
static envlist_t *envlist;
@@ -85,6 +93,7 @@ static bool enable_strace;
* Used to support command line arguments overriding environment variables.
*/
static int last_log_mask;
+static const char *last_log_filename;
/*
* When running 32-on-64 we should make sure we can fit all of the possible
@@ -102,11 +111,9 @@ static int last_log_mask;
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
-/* There are a number of places where we assign reserved_va to a variable
- of type abi_ulong and expect it to fit. Avoid the last page. */
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
# else
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
# endif
# else
# define MAX_RESERVED_VA(CPU) 0
@@ -120,10 +127,14 @@ static void usage(int exitcode);
static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
const char *qemu_uname_release;
+#if !defined(TARGET_DEFAULT_STACK_SIZE)
/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
we allocate a bigger stack. Need a better solution, for example
by remapping the process stack directly at the right place */
-unsigned long guest_stack_size = 8 * 1024 * 1024UL;
+#define TARGET_DEFAULT_STACK_SIZE 8 * 1024 * 1024UL
+#endif
+
+unsigned long guest_stack_size = TARGET_DEFAULT_STACK_SIZE;
/***********************************************************/
/* Helper routines for implementing atomic operations. */
@@ -134,10 +145,15 @@ void fork_start(void)
start_exclusive();
mmap_fork_start();
cpu_list_lock();
+ qemu_plugin_user_prefork_lock();
+ gdbserver_fork_start();
}
-void fork_end(int child)
+void fork_end(pid_t pid)
{
+ bool child = pid == 0;
+
+ qemu_plugin_user_postfork(child);
mmap_fork_end(child);
if (child) {
CPUState *cpu, *next_cpu;
@@ -145,18 +161,21 @@ void fork_end(int child)
Discard information about the parent threads. */
CPU_FOREACH_SAFE(cpu, next_cpu) {
if (cpu != thread_cpu) {
- QTAILQ_REMOVE_RCU(&cpus, cpu, node);
+ QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);
}
}
qemu_init_cpu_list();
- gdbserver_fork(thread_cpu);
- /* qemu_init_cpu_list() takes care of reinitializing the
- * exclusive state, so we don't need to end_exclusive() here.
- */
+ get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id();
} else {
cpu_list_unlock();
- end_exclusive();
}
+ gdbserver_fork_end(thread_cpu, pid);
+ /*
+ * qemu_init_cpu_list() reinitialized the child exclusive state, but we
+ * also need to keep current_cpu consistent, so call end_exclusive() for
+ * both child and parent.
+ */
+ end_exclusive();
}
__thread CPUState *thread_cpu;
@@ -190,19 +209,33 @@ void stop_all_tasks(void)
/* Assumes contents are already zeroed. */
void init_task_state(TaskState *ts)
{
+ long ticks_per_sec;
+ struct timespec bt;
+
ts->used = 1;
ts->sigaltstack_used = (struct target_sigaltstack) {
.ss_sp = 0,
.ss_size = 0,
.ss_flags = TARGET_SS_DISABLE,
};
+
+ /* Capture task start time relative to system boot */
+
+ ticks_per_sec = sysconf(_SC_CLK_TCK);
+
+ if ((ticks_per_sec > 0) && !clock_gettime(CLOCK_BOOTTIME, &bt)) {
+ /* start_boottime is expressed in clock ticks */
+ ts->start_boottime = bt.tv_sec * (uint64_t) ticks_per_sec;
+ ts->start_boottime += bt.tv_nsec * (uint64_t) ticks_per_sec /
+ NANOSECONDS_PER_SECOND;
+ }
}
CPUArchState *cpu_copy(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
CPUState *new_cpu = cpu_create(cpu_type);
- CPUArchState *new_env = new_cpu->env_ptr;
+ CPUArchState *new_env = cpu_env(new_cpu);
CPUBreakpoint *bp;
/* Reset non arch specific state */
@@ -210,6 +243,14 @@ CPUArchState *cpu_copy(CPUArchState *env)
new_cpu->tcg_cflags = cpu->tcg_cflags;
memcpy(new_env, env, sizeof(CPUArchState));
+#if defined(TARGET_I386) || defined(TARGET_X86_64)
+ new_env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ memcpy(g2h_untagged(new_env->gdt.base), g2h_untagged(env->gdt.base),
+ sizeof(uint64_t) * TARGET_GDT_ENTRIES);
+ OBJECT(new_cpu)->free = OBJECT(cpu)->free;
+#endif
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
@@ -243,7 +284,7 @@ static void handle_arg_dfilter(const char *arg)
static void handle_arg_log_filename(const char *arg)
{
- qemu_set_log_filename(arg, &error_fatal);
+ last_log_filename = arg;
}
static void handle_arg_set_env(const char *arg)
@@ -297,11 +338,11 @@ static void handle_arg_ld_prefix(const char *arg)
static void handle_arg_pagesize(const char *arg)
{
- qemu_host_page_size = atoi(arg);
- if (qemu_host_page_size == 0 ||
- (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
- fprintf(stderr, "page size must be a power of two\n");
- exit(EXIT_FAILURE);
+ unsigned size, want = qemu_real_host_page_size();
+
+ if (qemu_strtoui(arg, NULL, 10, &size) || size != want) {
+ warn_report("Deprecated page size option cannot "
+ "change host page size (%u)", want);
}
}
@@ -324,10 +365,7 @@ static void handle_arg_cpu(const char *arg)
{
cpu_model = strdup(arg);
if (cpu_model == NULL || is_help_option(cpu_model)) {
- /* XXX: implement xxx_cpu_list for targets that still miss it */
-#if defined(cpu_list)
- cpu_list();
-#endif
+ list_cpus();
exit(EXIT_FAILURE);
}
}
@@ -342,7 +380,9 @@ static void handle_arg_reserved_va(const char *arg)
{
char *p;
int shift = 0;
- reserved_va = strtoul(arg, &p, 0);
+ unsigned long val;
+
+ val = strtoul(arg, &p, 0);
switch (*p) {
case 'k':
case 'K':
@@ -356,10 +396,10 @@ static void handle_arg_reserved_va(const char *arg)
break;
}
if (shift) {
- unsigned long unshifted = reserved_va;
+ unsigned long unshifted = val;
p++;
- reserved_va <<= shift;
- if (reserved_va >> shift != unshifted) {
+ val <<= shift;
+ if (val >> shift != unshifted) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
@@ -368,11 +408,13 @@ static void handle_arg_reserved_va(const char *arg)
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
exit(EXIT_FAILURE);
}
+ /* The representation is size - 1, with 0 remaining "default". */
+ reserved_va = val ? val - 1 : 0;
}
-static void handle_arg_singlestep(const char *arg)
+static void handle_arg_one_insn_per_tb(const char *arg)
{
- singlestep = 1;
+ opt_one_insn_per_tb = true;
}
static void handle_arg_strace(const char *arg)
@@ -399,6 +441,16 @@ static void handle_arg_abi_call0(const char *arg)
}
#endif
+static void handle_arg_perfmap(const char *arg)
+{
+ perf_enable_perfmap();
+}
+
+static void handle_arg_jitdump(const char *arg)
+{
+ perf_enable_jitdump();
+}
+
static QemuPluginList plugins = QTAILQ_HEAD_INITIALIZER(plugins);
#ifdef CONFIG_PLUGIN
@@ -450,9 +502,10 @@ static const struct qemu_argument arg_table[] = {
{"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename,
"logfile", "write logs to 'logfile' (default stderr)"},
{"p", "QEMU_PAGESIZE", true, handle_arg_pagesize,
- "pagesize", "set the host page size to 'pagesize'"},
- {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep,
- "", "run in singlestep mode"},
+ "pagesize", "deprecated change to host page size"},
+ {"one-insn-per-tb",
+ "QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb,
+ "", "run with one guest instruction per emulated TB"},
{"strace", "QEMU_STRACE", false, handle_arg_strace,
"", "log system calls"},
{"seed", "QEMU_RAND_SEED", true, handle_arg_seed,
@@ -469,6 +522,10 @@ static const struct qemu_argument arg_table[] = {
{"xtensa-abi-call0", "QEMU_XTENSA_ABI_CALL0", false, handle_arg_abi_call0,
"", "assume CALL0 Xtensa ABI"},
#endif
+ {"perfmap", "QEMU_PERFMAP", false, handle_arg_perfmap,
+ "", "Generate a /tmp/perf-${pid}.map file for perf"},
+ {"jitdump", "QEMU_JITDUMP", false, handle_arg_jitdump,
+ "", "Generate a jit-${pid}.dump file for perf"},
{NULL, NULL, false, NULL, NULL, NULL}
};
@@ -629,7 +686,7 @@ int main(int argc, char **argv, char **envp)
int i;
int ret;
int execfd;
- int log_mask;
+ int host_page_size;
unsigned long max_reserved_va;
bool preserve_argv0;
@@ -640,8 +697,16 @@ int main(int argc, char **argv, char **envp)
envlist = envlist_create();
- /* add current environment into the list */
+ /*
+ * add current environment into the list
+ * envlist_setenv adds to the front of the list; to preserve environ
+ * order add from back to front
+ */
for (wrk = environ; *wrk != NULL; wrk++) {
+ continue;
+ }
+ while (wrk != environ) {
+ wrk--;
(void) envlist_setenv(envlist, *wrk);
}
@@ -651,7 +716,8 @@ int main(int argc, char **argv, char **envp)
struct rlimit lim;
if (getrlimit(RLIMIT_STACK, &lim) == 0
&& lim.rlim_cur != RLIM_INFINITY
- && lim.rlim_cur == (target_long)lim.rlim_cur) {
+ && lim.rlim_cur == (target_long)lim.rlim_cur
+ && lim.rlim_cur > guest_stack_size) {
guest_stack_size = lim.rlim_cur;
}
}
@@ -663,11 +729,9 @@ int main(int argc, char **argv, char **envp)
optind = parse_args(argc, argv);
- log_mask = last_log_mask | (enable_strace ? LOG_STRACE : 0);
- if (log_mask) {
- qemu_log_needs_buffers();
- qemu_set_log(log_mask);
- }
+ qemu_set_log_filename_flags(last_log_filename,
+ last_log_mask | (enable_strace * LOG_STRACE),
+ &error_fatal);
if (!trace_init_backends()) {
exit(1);
@@ -700,6 +764,11 @@ int main(int argc, char **argv, char **envp)
}
}
+ /* Resolve executable file name to full path name */
+ if (realpath(exec_path, real_exec_path)) {
+ exec_path = real_exec_path;
+ }
+
/*
* get binfmt_misc flags
*/
@@ -719,15 +788,28 @@ int main(int argc, char **argv, char **envp)
}
cpu_type = parse_cpu_option(cpu_model);
- /* init tcg before creating CPUs and to get qemu_host_page_size */
+ /* init tcg before creating CPUs */
{
- AccelClass *ac = ACCEL_GET_CLASS(current_accel());
+ AccelState *accel = current_accel();
+ AccelClass *ac = ACCEL_GET_CLASS(accel);
accel_init_interfaces(ac);
+ object_property_set_bool(OBJECT(accel), "one-insn-per-tb",
+ opt_one_insn_per_tb, &error_abort);
ac->init_machine(NULL);
}
+
+ /*
+ * Finalize page size before creating CPUs.
+ * This will do nothing if !TARGET_PAGE_BITS_VARY.
+ * The most efficient setting is to match the host.
+ */
+ host_page_size = qemu_real_host_page_size();
+ set_preferred_target_page_bits(ctz32(host_page_size));
+ finalize_target_page_bits();
+
cpu = cpu_create(cpu_type);
- env = cpu->env_ptr;
+ env = cpu_env(cpu);
cpu_reset(cpu);
thread_cpu = cpu;
@@ -739,18 +821,64 @@ int main(int argc, char **argv, char **envp)
*/
max_reserved_va = MAX_RESERVED_VA(cpu);
if (reserved_va != 0) {
+ if ((reserved_va + 1) % host_page_size) {
+ char *s = size_to_str(host_page_size);
+ fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
+ g_free(s);
+ exit(EXIT_FAILURE);
+ }
if (max_reserved_va && reserved_va > max_reserved_va) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
- /*
- * reserved_va must be aligned with the host page size
- * as it is used with mmap()
- */
- reserved_va = max_reserved_va & qemu_host_page_mask;
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
+ reserved_va = max_reserved_va;
+ }
+
+ /*
+ * Temporarily disable
+ * "comparison is always false due to limited range of data type"
+ * due to comparison between (possible) uint64_t and uintptr_t.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+
+ /*
+ * Select an initial value for task_unmapped_base that is in range.
+ */
+ if (reserved_va) {
+ if (TASK_UNMAPPED_BASE < reserved_va) {
+ task_unmapped_base = TASK_UNMAPPED_BASE;
+ } else {
+ /* The most common default formula is TASK_SIZE / 3. */
+ task_unmapped_base = TARGET_PAGE_ALIGN(reserved_va / 3);
+ }
+ } else if (TASK_UNMAPPED_BASE < UINTPTR_MAX) {
+ task_unmapped_base = TASK_UNMAPPED_BASE;
+ } else {
+ /* 32-bit host: pick something medium size. */
+ task_unmapped_base = 0x10000000;
+ }
+ mmap_next_start = task_unmapped_base;
+
+ /* Similarly for elf_et_dyn_base. */
+ if (reserved_va) {
+ if (ELF_ET_DYN_BASE < reserved_va) {
+ elf_et_dyn_base = ELF_ET_DYN_BASE;
+ } else {
+ /* The most common default formula is TASK_SIZE / 3 * 2. */
+ elf_et_dyn_base = TARGET_PAGE_ALIGN(reserved_va / 3) * 2;
+ }
+ } else if (ELF_ET_DYN_BASE < UINTPTR_MAX) {
+ elf_et_dyn_base = ELF_ET_DYN_BASE;
+ } else {
+ /* 32-bit host: pick something medium size. */
+ elf_et_dyn_base = 0x18000000;
}
+#pragma GCC diagnostic pop
+
{
Error *err = NULL;
if (seed_optarg != NULL) {
@@ -778,7 +906,7 @@ int main(int argc, char **argv, char **envp)
if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
unsigned long tmp;
if (fscanf(fp, "%lu", &tmp) == 1 && tmp != 0) {
- mmap_min_addr = tmp;
+ mmap_min_addr = MAX(tmp, host_page_size);
qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n",
mmap_min_addr);
}
@@ -791,7 +919,7 @@ int main(int argc, char **argv, char **envp)
* If we're in a chroot with no /proc, fall back to 1 page.
*/
if (mmap_min_addr == 0) {
- mmap_min_addr = qemu_host_page_size;
+ mmap_min_addr = host_page_size;
qemu_log_mask(CPU_LOG_PAGE,
"host mmap_min_addr=0x%lx (fallback)\n",
mmap_min_addr);
@@ -801,11 +929,7 @@ int main(int argc, char **argv, char **envp)
* Prepare copy of argv vector for target.
*/
target_argc = argc - optind;
- target_argv = calloc(target_argc + 1, sizeof (char *));
- if (target_argv == NULL) {
- (void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
- exit(EXIT_FAILURE);
- }
+ target_argv = g_new0(char *, target_argc + 1);
/*
* If argv0 is specified (using '-0' switch) we replace
@@ -844,21 +968,34 @@ int main(int argc, char **argv, char **envp)
g_free(target_environ);
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
- qemu_log("guest_base %p\n", (void *)guest_base);
- log_page_dump("binary load");
-
- qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
- qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
- qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", info->start_code);
- qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", info->start_data);
- qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
- qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", info->start_stack);
- qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
- qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
- qemu_log("argv_start 0x" TARGET_ABI_FMT_lx "\n", info->arg_start);
- qemu_log("env_start 0x" TARGET_ABI_FMT_lx "\n",
- info->arg_end + (abi_ulong)sizeof(abi_ulong));
- qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx "\n", info->saved_auxv);
+ FILE *f = qemu_log_trylock();
+ if (f) {
+ fprintf(f, "guest_base %p\n", (void *)guest_base);
+ fprintf(f, "page layout changed following binary load\n");
+ page_dump(f);
+
+ fprintf(f, "end_code 0x" TARGET_ABI_FMT_lx "\n",
+ info->end_code);
+ fprintf(f, "start_code 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_code);
+ fprintf(f, "start_data 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_data);
+ fprintf(f, "end_data 0x" TARGET_ABI_FMT_lx "\n",
+ info->end_data);
+ fprintf(f, "start_stack 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_stack);
+ fprintf(f, "brk 0x" TARGET_ABI_FMT_lx "\n",
+ info->brk);
+ fprintf(f, "entry 0x" TARGET_ABI_FMT_lx "\n",
+ info->entry);
+ fprintf(f, "argv_start 0x" TARGET_ABI_FMT_lx "\n",
+ info->argv);
+ fprintf(f, "env_start 0x" TARGET_ABI_FMT_lx "\n",
+ info->envp);
+ fprintf(f, "auxv_start 0x" TARGET_ABI_FMT_lx "\n",
+ info->saved_auxv);
+ qemu_log_unlock(f);
+ }
}
target_set_brk(info->brk);
@@ -868,7 +1005,7 @@ int main(int argc, char **argv, char **envp)
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */
- tcg_prologue_init(tcg_ctx);
+ tcg_prologue_init();
target_cpu_copy_regs(env, regs);
@@ -878,8 +1015,13 @@ int main(int argc, char **argv, char **envp)
gdbstub);
exit(EXIT_FAILURE);
}
- gdb_handlesig(cpu, 0);
+ gdb_handlesig(cpu, 0, NULL, NULL, 0);
}
+
+#ifdef CONFIG_SEMIHOSTING
+ qemu_semihosting_guestfd_init();
+#endif
+
cpu_loop(env);
/* never exits */
return 0;
diff --git a/linux-user/meson.build b/linux-user/meson.build
index 9549f81682..bc41e8c3bc 100644
--- a/linux-user/meson.build
+++ b/linux-user/meson.build
@@ -1,3 +1,12 @@
+if not have_linux_user
+ subdir_done()
+endif
+
+linux_user_ss = ss.source_set()
+
+common_user_inc += include_directories('include/host/' / host_arch)
+common_user_inc += include_directories('include')
+
linux_user_ss.add(files(
'elfload.c',
'exit.c',
@@ -5,33 +14,43 @@ linux_user_ss.add(files(
'linuxload.c',
'main.c',
'mmap.c',
- 'safe-syscall.S',
'signal.c',
'strace.c',
'syscall.c',
+ 'thunk.c',
'uaccess.c',
'uname.c',
))
linux_user_ss.add(rt)
+linux_user_ss.add(libdw)
linux_user_ss.add(when: 'TARGET_HAS_BFLT', if_true: files('flatload.c'))
linux_user_ss.add(when: 'TARGET_I386', if_true: files('vm86.c'))
linux_user_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', if_true: files('semihost.c'))
-
syscall_nr_generators = {}
+gen_vdso_exe = executable('gen-vdso', 'gen-vdso.c',
+ native: true, build_by_default: false)
+gen_vdso = generator(gen_vdso_exe, output: '@BASENAME@.c.inc',
+ arguments: ['-o', '@OUTPUT@', '@EXTRA_ARGS@', '@INPUT@'])
+
+subdir('aarch64')
subdir('alpha')
subdir('arm')
subdir('hppa')
subdir('i386')
+subdir('loongarch64')
subdir('m68k')
subdir('microblaze')
subdir('mips64')
subdir('mips')
subdir('ppc')
+subdir('riscv')
subdir('s390x')
subdir('sh4')
subdir('sparc')
subdir('x86_64')
subdir('xtensa')
+
+specific_ss.add_all(when: 'CONFIG_LINUX_USER', if_true: linux_user_ss)
diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c
index 52222eb93f..212e62d0a6 100644
--- a/linux-user/microblaze/cpu_loop.c
+++ b/linux-user/microblaze/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -26,10 +25,9 @@
void cpu_loop(CPUMBState *env)
{
+ int trapnr, ret, si_code, sig;
CPUState *cs = env_cpu(env);
- int trapnr, ret;
- target_siginfo_t info;
-
+
while (1) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
@@ -37,19 +35,9 @@ void cpu_loop(CPUMBState *env)
process_queued_cpu_work(cs);
switch (trapnr) {
- case 0xaa:
- {
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = 0;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
- break;
case EXCP_INTERRUPT:
- /* just indicate that signals should be handled asap */
- break;
+ /* just indicate that signals should be handled asap */
+ break;
case EXCP_SYSCALL:
/* Return address is 4 bytes after the call. */
env->regs[14] += 4;
@@ -63,10 +51,10 @@ void cpu_loop(CPUMBState *env)
env->regs[9],
env->regs[10],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
/* Wind back to before the syscall. */
env->pc -= 4;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->regs[3] = ret;
}
/* All syscall exits result in guest r14 being equal to the
@@ -77,6 +65,7 @@ void cpu_loop(CPUMBState *env)
*/
env->regs[14] = env->pc;
break;
+
case EXCP_HW_EXCP:
env->regs[17] = env->pc + 4;
if (env->iflags & D_FLAG) {
@@ -84,42 +73,47 @@ void cpu_loop(CPUMBState *env)
env->pc -= 4;
/* FIXME: if branch was immed, replay the imm as well. */
}
-
env->iflags &= ~(IMM_FLAG | D_FLAG);
-
switch (env->esr & 31) {
- case ESR_EC_DIVZERO:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_FLTDIV;
- info._sifields._sigfault._addr = 0;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case ESR_EC_FPU:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- if (env->fsr & FSR_IO) {
- info.si_code = TARGET_FPE_FLTINV;
- }
- if (env->fsr & FSR_DZ) {
- info.si_code = TARGET_FPE_FLTDIV;
- }
- info._sifields._sigfault._addr = 0;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- default:
- fprintf(stderr, "Unhandled hw-exception: 0x%x\n",
- env->esr & ESR_EC_MASK);
- cpu_dump_state(cs, stderr, 0);
- exit(EXIT_FAILURE);
- break;
+ case ESR_EC_DIVZERO:
+ sig = TARGET_SIGFPE;
+ si_code = TARGET_FPE_INTDIV;
+ break;
+ case ESR_EC_FPU:
+ /*
+ * Note that the kernel passes along fsr as si_code
+ * if there's no recognized bit set. Possibly this
+ * implies that si_code is 0, but follow the structure.
+ */
+ sig = TARGET_SIGFPE;
+ si_code = env->fsr;
+ if (si_code & FSR_IO) {
+ si_code = TARGET_FPE_FLTINV;
+ } else if (si_code & FSR_OF) {
+ si_code = TARGET_FPE_FLTOVF;
+ } else if (si_code & FSR_UF) {
+ si_code = TARGET_FPE_FLTUND;
+ } else if (si_code & FSR_DZ) {
+ si_code = TARGET_FPE_FLTDIV;
+ } else if (si_code & FSR_DO) {
+ si_code = TARGET_FPE_FLTRES;
+ }
+ break;
+ case ESR_EC_PRIVINSN:
+ sig = SIGILL;
+ si_code = ILL_PRVOPC;
+ break;
+ default:
+ fprintf(stderr, "Unhandled hw-exception: 0x%x\n",
+ env->esr & ESR_EC_MASK);
+ cpu_dump_state(cs, stderr, 0);
+ exit(EXIT_FAILURE);
}
+ force_sig_fault(sig, si_code, env->pc);
break;
+
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
diff --git a/linux-user/microblaze/signal.c b/linux-user/microblaze/signal.c
index b822679d18..f6d47d76ff 100644
--- a/linux-user/microblaze/signal.c
+++ b/linux-user/microblaze/signal.c
@@ -147,7 +147,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
return;
}
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
@@ -161,17 +161,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
/* Kernel does not use SA_RESTORER. */
- /* addi r12, r0, __NR_sigreturn */
- __put_user(0x31800000U | TARGET_NR_rt_sigreturn, frame->tramp + 0);
- /* brki r14, 0x8 */
- __put_user(0xb9cc0008U, frame->tramp + 1);
-
/*
* Return from sighandler will jump to the tramp.
* Negative 8 offset because return is rtsd r15, 8
*/
- env->regs[15] =
- frame_addr + offsetof(struct target_rt_sigframe, tramp) - 8;
+ env->regs[15] = default_rt_sigreturn - 8;
/* Set up registers for signal handler */
env->regs[1] = frame_addr;
@@ -213,10 +207,26 @@ long do_rt_sigreturn(CPUMBState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
+ assert(tramp != NULL);
+
+ /*
+ * addi r12, r0, __NR_rt_sigreturn
+ * brki r14, 0x8
+ */
+ __put_user(0x31800000U | TARGET_NR_rt_sigreturn, tramp);
+ __put_user(0xb9cc0008U, tramp + 1);
+
+ default_rt_sigreturn = sigtramp_page;
+ unlock_user(tramp, sigtramp_page, 8);
}
diff --git a/linux-user/microblaze/target_flat.h b/linux-user/microblaze/target_flat.h
new file mode 100644
index 0000000000..bc83224cea
--- /dev/null
+++ b/linux-user/microblaze/target_flat.h
@@ -0,0 +1 @@
+#include "../generic/target_flat.h"
diff --git a/linux-user/microblaze/target_mman.h b/linux-user/microblaze/target_mman.h
new file mode 100644
index 0000000000..6b3dd54f89
--- /dev/null
+++ b/linux-user/microblaze/target_mman.h
@@ -0,0 +1,12 @@
+/*
+ * arch/microblaze/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+ * TASK_SIZE CONFIG_KERNEL_START
+ * CONFIG_KERNEL_START 0xc0000000 (default in Kconfig)
+ */
+#define TASK_UNMAPPED_BASE 0x48000000
+
+/* arch/microblaze/include/uapi/asm/elf.h */
+#define ELF_ET_DYN_BASE 0x08000000
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/microblaze/target_prctl.h b/linux-user/microblaze/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/microblaze/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/microblaze/target_proc.h b/linux-user/microblaze/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/microblaze/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/microblaze/target_resource.h b/linux-user/microblaze/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/microblaze/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h
index 1c326296de..7dc5c45f00 100644
--- a/linux-user/microblaze/target_signal.h
+++ b/linux-user/microblaze/target_signal.h
@@ -1,24 +1,8 @@
#ifndef MICROBLAZE_TARGET_SIGNAL_H
#define MICROBLAZE_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* MICROBLAZE_TARGET_SIGNAL_H */
diff --git a/linux-user/microblaze/target_structs.h b/linux-user/microblaze/target_structs.h
index d08f6a53a8..3a06f373c3 100644
--- a/linux-user/microblaze/target_structs.h
+++ b/linux-user/microblaze/target_structs.h
@@ -1,58 +1 @@
-/*
- * MicroBlaze specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef MICROBLAZE_TARGET_STRUCTS_H
-#define MICROBLAZE_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/microblaze/target_syscall.h b/linux-user/microblaze/target_syscall.h
index 7f653db34f..43362a1664 100644
--- a/linux-user/microblaze/target_syscall.h
+++ b/linux-user/microblaze/target_syscall.h
@@ -49,7 +49,6 @@ struct target_pt_regs {
};
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c
index cb03fb066b..462387a073 100644
--- a/linux-user/mips/cpu_loop.c
+++ b/linux-user/mips/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -40,36 +39,32 @@ enum {
BRK_DIVZERO = 7
};
-static int do_break(CPUMIPSState *env, target_siginfo_t *info,
- unsigned int code)
+static void do_tr_or_bp(CPUMIPSState *env, unsigned int code, bool trap)
{
- int ret = -1;
+ target_ulong pc = env->active_tc.PC;
switch (code) {
case BRK_OVERFLOW:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, pc);
+ break;
case BRK_DIVZERO:
- info->si_signo = TARGET_SIGFPE;
- info->si_errno = 0;
- info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV;
- queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info);
- ret = 0;
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, pc);
break;
default:
- info->si_signo = TARGET_SIGTRAP;
- info->si_errno = 0;
- queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info);
- ret = 0;
+ if (trap) {
+ force_sig(TARGET_SIGTRAP);
+ } else {
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, pc);
+ }
break;
}
-
- return ret;
}
void cpu_loop(CPUMIPSState *env)
{
CPUState *cs = env_cpu(env);
- target_siginfo_t info;
- int trapnr;
+ int trapnr, si_code;
+ unsigned int code;
abi_long ret;
# ifdef TARGET_ABI_MIPSO32
unsigned int syscall_num;
@@ -141,11 +136,11 @@ done_syscall:
env->active_tc.gpr[8], env->active_tc.gpr[9],
env->active_tc.gpr[10], env->active_tc.gpr[11]);
# endif /* O32 */
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->active_tc.PC -= 4;
break;
}
- if (ret == -TARGET_QEMU_ESIGRETURN) {
+ if (ret == -QEMU_ESIGRETURN) {
/* Returning from a successful sigreturn syscall.
Avoid clobbering register state. */
break;
@@ -158,162 +153,57 @@ done_syscall:
}
env->active_tc.gpr[2] = ret;
break;
- case EXCP_TLBL:
- case EXCP_TLBS:
- case EXCP_AdEL:
- case EXCP_AdES:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->CP0_BadVAddr;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case EXCP_CpU:
case EXCP_RI:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = 0;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ case EXCP_DSPDIS:
+ force_sig(TARGET_SIGILL);
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case EXCP_DSPDIS:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPC;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT,
+ env->active_tc.PC);
break;
case EXCP_FPE:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_FLTUNK;
+ si_code = TARGET_FPE_FLTUNK;
if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {
- info.si_code = TARGET_FPE_FLTINV;
+ si_code = TARGET_FPE_FLTINV;
} else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_DIV0) {
- info.si_code = TARGET_FPE_FLTDIV;
+ si_code = TARGET_FPE_FLTDIV;
} else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_OVERFLOW) {
- info.si_code = TARGET_FPE_FLTOVF;
+ si_code = TARGET_FPE_FLTOVF;
} else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_UNDERFLOW) {
- info.si_code = TARGET_FPE_FLTUND;
+ si_code = TARGET_FPE_FLTUND;
} else if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INEXACT) {
- info.si_code = TARGET_FPE_FLTRES;
+ si_code = TARGET_FPE_FLTRES;
}
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGFPE, si_code, env->active_tc.PC);
+ break;
+ case EXCP_OVERFLOW:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->active_tc.PC);
break;
/* The code below was inspired by the MIPS Linux kernel trap
* handling code in arch/mips/kernel/traps.c.
*/
case EXCP_BREAK:
- {
- abi_ulong trap_instr;
- unsigned int code;
-
- if (env->hflags & MIPS_HFLAG_M16) {
- if (env->insn_flags & ASE_MICROMIPS) {
- /* microMIPS mode */
- ret = get_user_u16(trap_instr, env->active_tc.PC);
- if (ret != 0) {
- goto error;
- }
-
- if ((trap_instr >> 10) == 0x11) {
- /* 16-bit instruction */
- code = trap_instr & 0xf;
- } else {
- /* 32-bit instruction */
- abi_ulong instr_lo;
-
- ret = get_user_u16(instr_lo,
- env->active_tc.PC + 2);
- if (ret != 0) {
- goto error;
- }
- trap_instr = (trap_instr << 16) | instr_lo;
- code = ((trap_instr >> 6) & ((1 << 20) - 1));
- /* Unfortunately, microMIPS also suffers from
- the old assembler bug... */
- if (code >= (1 << 10)) {
- code >>= 10;
- }
- }
- } else {
- /* MIPS16e mode */
- ret = get_user_u16(trap_instr, env->active_tc.PC);
- if (ret != 0) {
- goto error;
- }
- code = (trap_instr >> 6) & 0x3f;
- }
- } else {
- ret = get_user_u32(trap_instr, env->active_tc.PC);
- if (ret != 0) {
- goto error;
- }
-
- /* As described in the original Linux kernel code, the
- * below checks on 'code' are to work around an old
- * assembly bug.
- */
- code = ((trap_instr >> 6) & ((1 << 20) - 1));
- if (code >= (1 << 10)) {
- code >>= 10;
- }
- }
-
- if (do_break(env, &info, code) != 0) {
- goto error;
- }
+ /*
+ * As described in the original Linux kernel code, the below
+ * checks on 'code' are to work around an old assembly bug.
+ */
+ code = env->error_code;
+ if (code >= (1 << 10)) {
+ code >>= 10;
}
+ do_tr_or_bp(env, code, false);
break;
case EXCP_TRAP:
- {
- abi_ulong trap_instr;
- unsigned int code = 0;
-
- if (env->hflags & MIPS_HFLAG_M16) {
- /* microMIPS mode */
- abi_ulong instr[2];
-
- ret = get_user_u16(instr[0], env->active_tc.PC) ||
- get_user_u16(instr[1], env->active_tc.PC + 2);
-
- trap_instr = (instr[0] << 16) | instr[1];
- } else {
- ret = get_user_u32(trap_instr, env->active_tc.PC);
- }
-
- if (ret != 0) {
- goto error;
- }
-
- /* The immediate versions don't provide a code. */
- if (!(trap_instr & 0xFC000000)) {
- if (env->hflags & MIPS_HFLAG_M16) {
- /* microMIPS mode */
- code = ((trap_instr >> 12) & ((1 << 4) - 1));
- } else {
- code = ((trap_instr >> 6) & ((1 << 10) - 1));
- }
- }
-
- if (do_break(env, &info, code) != 0) {
- goto error;
- }
- }
+ do_tr_or_bp(env, env->error_code, true);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
default:
-error:
EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
abort();
}
@@ -324,7 +214,7 @@ error:
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
int i;
@@ -402,7 +292,10 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
env->CP0_Status |= (1 << CP0St_FR);
env->hflags |= MIPS_HFLAG_F64;
}
- } else if (!prog_req.fre && !prog_req.frdefault &&
+ } else if (prog_req.fr1) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ env->hflags |= MIPS_HFLAG_F64;
+ } else if (!prog_req.fre && !prog_req.frdefault &&
!prog_req.fr1 && !prog_req.single && !prog_req.soft) {
fprintf(stderr, "qemu: Can't find a matching FPU mode\n");
exit(1);
diff --git a/linux-user/mips/signal.c b/linux-user/mips/signal.c
index d174b3453c..d69a5d73dd 100644
--- a/linux-user/mips/signal.c
+++ b/linux-user/mips/signal.c
@@ -87,10 +87,8 @@ struct target_rt_sigframe {
};
/* Install trampoline to jump back from signal handler */
-static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
+static void install_sigtramp(uint32_t *tramp, unsigned int syscall)
{
- int err = 0;
-
/*
* Set up the return code ...
*
@@ -100,7 +98,6 @@ static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
__put_user(0x24020000 + syscall, tramp + 0);
__put_user(0x0000000c , tramp + 1);
- return err;
}
static inline void setup_sigcontext(CPUMIPSState *regs,
@@ -212,8 +209,6 @@ void setup_frame(int sig, struct target_sigaction * ka,
goto give_sigsegv;
}
- install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
-
setup_sigcontext(regs, &frame->sf_sc);
for(i = 0; i < TARGET_NSIG_WORDS; i++) {
@@ -234,7 +229,7 @@ void setup_frame(int sig, struct target_sigaction * ka,
regs->active_tc.gpr[ 5] = 0;
regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
regs->active_tc.gpr[29] = frame_addr;
- regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
+ regs->active_tc.gpr[31] = default_sigreturn;
/* The original kernel code sets CP0_EPC to the handler
* since it returns to userland using eret
* we cannot do this here, and we must set PC directly */
@@ -286,11 +281,11 @@ long do_sigreturn(CPUMIPSState *regs)
/* I am not sure this is right, but it seems to work
* maybe a problem with nested signals ? */
regs->CP0_EPC = 0;
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
# endif /* O32 */
@@ -308,9 +303,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
goto give_sigsegv;
}
- install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
-
- tswap_siginfo(&frame->rs_info, info);
+ frame->rs_info = *info;
__put_user(0, &frame->rs_uc.tuc_flags);
__put_user(0, &frame->rs_uc.tuc_link);
@@ -318,7 +311,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
- for(i = 0; i < TARGET_NSIG_WORDS; i++) {
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
}
@@ -338,11 +331,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
env->active_tc.gpr[ 6] = frame_addr
+ offsetof(struct target_rt_sigframe, rs_uc);
env->active_tc.gpr[29] = frame_addr;
- env->active_tc.gpr[31] = frame_addr
- + offsetof(struct target_rt_sigframe, rs_code);
- /* The original kernel code sets CP0_EPC to the handler
- * since it returns to userland using eret
- * we cannot do this here, and we must set PC directly */
+ env->active_tc.gpr[31] = default_rt_sigreturn;
+
+ /*
+ * The original kernel code sets CP0_EPC to the handler
+ * since it returns to userland using eret
+ * we cannot do this here, and we must set PC directly
+ */
env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
mips_set_hflags_isa_mode_from_pc(env);
unlock_user_struct(frame, frame_addr, 1);
@@ -376,9 +371,25 @@ long do_rt_sigreturn(CPUMIPSState *env)
/* I am not sure this is right, but it seems to work
* maybe a problem with nested signals ? */
env->CP0_EPC = 0;
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
+ assert(tramp != NULL);
+
+#ifdef TARGET_ARCH_HAS_SETUP_FRAME
+ default_sigreturn = sigtramp_page;
+ install_sigtramp(tramp, TARGET_NR_sigreturn);
+#endif
+
+ default_rt_sigreturn = sigtramp_page + 8;
+ install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn);
+
+ unlock_user(tramp, sigtramp_page, 2 * 8);
}
diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h
index a98c9bd6ad..b965e86b2b 100644
--- a/linux-user/mips/target_elf.h
+++ b/linux-user/mips/target_elf.h
@@ -15,6 +15,9 @@ static inline const char *cpu_get_model(uint32_t eflags)
if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) {
return "R5900";
}
+ if (eflags & EF_MIPS_NAN2008) {
+ return "P5600";
+ }
return "24Kf";
}
#endif
diff --git a/linux-user/mips/target_mman.h b/linux-user/mips/target_mman.h
new file mode 100644
index 0000000000..b84fe1e8a8
--- /dev/null
+++ b/linux-user/mips/target_mman.h
@@ -0,0 +1,29 @@
+#ifndef MIPS_TARGET_MMAN_H
+#define MIPS_TARGET_MMAN_H
+
+#define TARGET_PROT_SEM 0x10
+
+#define TARGET_MAP_NORESERVE 0x0400
+#define TARGET_MAP_ANONYMOUS 0x0800
+#define TARGET_MAP_GROWSDOWN 0x1000
+#define TARGET_MAP_DENYWRITE 0x2000
+#define TARGET_MAP_EXECUTABLE 0x4000
+#define TARGET_MAP_LOCKED 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+
+/*
+ * arch/mips/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+ */
+#define TASK_UNMAPPED_BASE \
+ TARGET_PAGE_ALIGN((1ull << TARGET_VIRT_ADDR_SPACE_BITS) / 3)
+
+/* arch/mips/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/mips/target_prctl.h b/linux-user/mips/target_prctl.h
new file mode 100644
index 0000000000..e028333db9
--- /dev/null
+++ b/linux-user/mips/target_prctl.h
@@ -0,0 +1,88 @@
+/*
+ * MIPS specific prctl functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef MIPS_TARGET_PRCTL_H
+#define MIPS_TARGET_PRCTL_H
+
+static abi_long do_prctl_get_fp_mode(CPUArchState *env)
+{
+ abi_long ret = 0;
+
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ ret |= PR_FP_MODE_FR;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
+ ret |= PR_FP_MODE_FRE;
+ }
+ return ret;
+}
+#define do_prctl_get_fp_mode do_prctl_get_fp_mode
+
+static abi_long do_prctl_set_fp_mode(CPUArchState *env, abi_long arg2)
+{
+ bool old_fr = env->CP0_Status & (1 << CP0St_FR);
+ bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
+ bool new_fr = arg2 & PR_FP_MODE_FR;
+ bool new_fre = arg2 & PR_FP_MODE_FRE;
+ const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+
+ /* If nothing to change, return right away, successfully. */
+ if (old_fr == new_fr && old_fre == new_fre) {
+ return 0;
+ }
+ /* Check the value is valid */
+ if (arg2 & ~known_bits) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ /* Setting FRE without FR is not supported. */
+ if (new_fre && !new_fr) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
+ /* FR1 is not supported */
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
+ && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
+ /* cannot set FR=0 */
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
+ /* Cannot set FRE=1 */
+ return -TARGET_EOPNOTSUPP;
+ }
+
+ int i;
+ fpr_t *fpr = env->active_fpu.fpr;
+ for (i = 0; i < 32 ; i += 2) {
+ if (!old_fr && new_fr) {
+ fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
+ } else if (old_fr && !new_fr) {
+ fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
+ }
+ }
+
+ if (new_fr) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ env->hflags |= MIPS_HFLAG_F64;
+ } else {
+ env->CP0_Status &= ~(1 << CP0St_FR);
+ env->hflags &= ~MIPS_HFLAG_F64;
+ }
+ if (new_fre) {
+ env->CP0_Config5 |= (1 << CP0C5_FRE);
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ env->hflags |= MIPS_HFLAG_FRE;
+ }
+ } else {
+ env->CP0_Config5 &= ~(1 << CP0C5_FRE);
+ env->hflags &= ~MIPS_HFLAG_FRE;
+ }
+
+ return 0;
+}
+#define do_prctl_set_fp_mode do_prctl_set_fp_mode
+
+#endif /* MIPS_TARGET_PRCTL_H */
diff --git a/linux-user/mips/target_proc.h b/linux-user/mips/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/mips/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/mips/target_resource.h b/linux-user/mips/target_resource.h
new file mode 100644
index 0000000000..6d131b041d
--- /dev/null
+++ b/linux-user/mips/target_resource.h
@@ -0,0 +1,24 @@
+#ifndef MIPS_TARGET_RESOURCE_H
+#define MIPS_TARGET_RESOURCE_H
+
+#include "../generic/target_resource.h"
+
+#undef TARGET_RLIM_INFINITY
+#define TARGET_RLIM_INFINITY 0x7fffffffUL
+
+#undef TARGET_RLIMIT_NOFILE
+#define TARGET_RLIMIT_NOFILE 5
+
+#undef TARGET_RLIMIT_AS
+#define TARGET_RLIMIT_AS 6
+
+#undef TARGET_RLIMIT_RSS
+#define TARGET_RLIMIT_RSS 7
+
+#undef TARGET_RLIMIT_NPROC
+#define TARGET_RLIMIT_NPROC 8
+
+#undef TARGET_RLIMIT_MEMLOCK
+#define TARGET_RLIMIT_MEMLOCK 9
+
+#endif
diff --git a/linux-user/mips/target_signal.h b/linux-user/mips/target_signal.h
index d521765f6b..fa542c1f4e 100644
--- a/linux-user/mips/target_signal.h
+++ b/linux-user/mips/target_signal.h
@@ -67,12 +67,12 @@ typedef struct target_sigaltstack {
#define TARGET_SA_RESTORER 0x04000000 /* Only for O32 */
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
#if defined(TARGET_ABI_MIPSO32)
/* compare linux/arch/mips/kernel/signal.c:setup_frame() */
#define TARGET_ARCH_HAS_SETUP_FRAME
#endif
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h
index f59057493a..08ead67810 100644
--- a/linux-user/mips/target_syscall.h
+++ b/linux-user/mips/target_syscall.h
@@ -24,7 +24,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
@@ -36,10 +35,4 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env)
return 0x40000;
}
-/* MIPS-specific prctl() options */
-#define TARGET_PR_SET_FP_MODE 45
-#define TARGET_PR_GET_FP_MODE 46
-#define TARGET_PR_FP_MODE_FR (1 << 0)
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
-
#endif /* MIPS_TARGET_SYSCALL_H */
diff --git a/linux-user/mips64/target_mman.h b/linux-user/mips64/target_mman.h
new file mode 100644
index 0000000000..7bdc47d902
--- /dev/null
+++ b/linux-user/mips64/target_mman.h
@@ -0,0 +1 @@
+#include "../mips/target_mman.h"
diff --git a/linux-user/mips64/target_prctl.h b/linux-user/mips64/target_prctl.h
new file mode 100644
index 0000000000..18da9ae619
--- /dev/null
+++ b/linux-user/mips64/target_prctl.h
@@ -0,0 +1 @@
+#include "../mips/target_prctl.h"
diff --git a/linux-user/mips64/target_proc.h b/linux-user/mips64/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/mips64/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/mips64/target_resource.h b/linux-user/mips64/target_resource.h
new file mode 100644
index 0000000000..fe29002a12
--- /dev/null
+++ b/linux-user/mips64/target_resource.h
@@ -0,0 +1 @@
+#include "../mips/target_resource.h"
diff --git a/linux-user/mips64/target_signal.h b/linux-user/mips64/target_signal.h
index d857c55e4c..b05098f7f6 100644
--- a/linux-user/mips64/target_signal.h
+++ b/linux-user/mips64/target_signal.h
@@ -65,7 +65,6 @@ typedef struct target_sigaltstack {
#define TARGET_SA_RESETHAND 0x80000000
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
@@ -76,4 +75,6 @@ typedef struct target_sigaltstack {
/* compare linux/arch/mips/kernel/signal.c:setup_frame() */
#define TARGET_ARCH_HAS_SETUP_FRAME
#endif
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* MIPS64_TARGET_SIGNAL_H */
diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h
index cd1e1b4969..358dc2d64c 100644
--- a/linux-user/mips64/target_syscall.h
+++ b/linux-user/mips64/target_syscall.h
@@ -21,7 +21,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
@@ -33,10 +32,4 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env)
return 0x40000;
}
-/* MIPS-specific prctl() options */
-#define TARGET_PR_SET_FP_MODE 45
-#define TARGET_PR_GET_FP_MODE 46
-#define TARGET_PR_FP_MODE_FR (1 << 0)
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
-
#endif /* MIPS64_TARGET_SYSCALL_H */
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index c125031b90..be3b9a68eb 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -17,11 +17,18 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include <sys/shm.h>
#include "trace.h"
#include "exec/log.h"
#include "qemu.h"
#include "user-internals.h"
#include "user-mmap.h"
+#include "target_mman.h"
+#include "qemu/interval-tree.h"
+
+#ifdef TARGET_ARM
+#include "target/arm/cpu-features.h"
+#endif
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
static __thread int mmap_lock_count;
@@ -35,6 +42,7 @@ void mmap_lock(void)
void mmap_unlock(void)
{
+ assert(mmap_lock_count > 0);
if (--mmap_lock_count == 0) {
pthread_mutex_unlock(&mmap_mutex);
}
@@ -55,10 +63,49 @@ void mmap_fork_start(void)
void mmap_fork_end(int child)
{
- if (child)
+ if (child) {
pthread_mutex_init(&mmap_mutex, NULL);
- else
+ } else {
pthread_mutex_unlock(&mmap_mutex);
+ }
+}
+
+/* Protected by mmap_lock. */
+static IntervalTreeRoot shm_regions;
+
+static void shm_region_add(abi_ptr start, abi_ptr last)
+{
+ IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
+
+ i->start = start;
+ i->last = last;
+ interval_tree_insert(i, &shm_regions);
+}
+
+static abi_ptr shm_region_find(abi_ptr start)
+{
+ IntervalTreeNode *i;
+
+ for (i = interval_tree_iter_first(&shm_regions, start, start); i;
+ i = interval_tree_iter_next(i, start, start)) {
+ if (i->start == start) {
+ return i->last;
+ }
+ }
+ return 0;
+}
+
+static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
+{
+ IntervalTreeNode *i, *n;
+
+ for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
+ n = interval_tree_iter_next(i, start, last);
+ if (i->start >= start && i->last <= last) {
+ interval_tree_remove(i, &shm_regions);
+ g_free(i);
+ }
+ }
}
/*
@@ -67,24 +114,11 @@ void mmap_fork_end(int child)
* Return 0 if the target prot bitmask is invalid, otherwise
* the internal qemu page_flags (which will include PAGE_VALID).
*/
-static int validate_prot_to_pageflags(int *host_prot, int prot)
+static int validate_prot_to_pageflags(int prot)
{
int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
- /*
- * For the host, we need not pass anything except read/write/exec.
- * While PROT_SEM is allowed by all hosts, it is also ignored, so
- * don't bother transforming guest bit to host bit. Any other
- * target-specific prot bits will not be understood by the host
- * and will need to be encoded into page_flags for qemu emulation.
- *
- * Pages that are executable by the guest will never be executed
- * by the host, but the host will need to be able to read them.
- */
- *host_prot = (prot & (PROT_READ | PROT_WRITE))
- | (prot & PROT_EXEC ? PROT_READ : 0);
-
#ifdef TARGET_AARCH64
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
@@ -105,206 +139,256 @@ static int validate_prot_to_pageflags(int *host_prot, int prot)
page_flags |= PAGE_MTE;
}
}
+#elif defined(TARGET_HPPA)
+ valid |= PROT_GROWSDOWN | PROT_GROWSUP;
#endif
return prot & ~valid ? 0 : page_flags;
}
+/*
+ * For the host, we need not pass anything except read/write/exec.
+ * While PROT_SEM is allowed by all hosts, it is also ignored, so
+ * don't bother transforming guest bit to host bit. Any other
+ * target-specific prot bits will not be understood by the host
+ * and will need to be encoded into page_flags for qemu emulation.
+ *
+ * Pages that are executable by the guest will never be executed
+ * by the host, but the host will need to be able to read them.
+ */
+static int target_to_host_prot(int prot)
+{
+ return (prot & (PROT_READ | PROT_WRITE)) |
+ (prot & PROT_EXEC ? PROT_READ : 0);
+}
+
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
{
- abi_ulong end, host_start, host_end, addr;
- int prot1, ret, page_flags, host_prot;
+ int host_page_size = qemu_real_host_page_size();
+ abi_ulong starts[3];
+ abi_ulong lens[3];
+ int prots[3];
+ abi_ulong host_start, host_last, last;
+ int prot1, ret, page_flags, nranges;
trace_target_mprotect(start, len, target_prot);
if ((start & ~TARGET_PAGE_MASK) != 0) {
return -TARGET_EINVAL;
}
- page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
+ page_flags = validate_prot_to_pageflags(target_prot);
if (!page_flags) {
return -TARGET_EINVAL;
}
+ if (len == 0) {
+ return 0;
+ }
len = TARGET_PAGE_ALIGN(len);
- end = start + len;
if (!guest_range_valid_untagged(start, len)) {
return -TARGET_ENOMEM;
}
- if (len == 0) {
- return 0;
- }
+
+ last = start + len - 1;
+ host_start = start & -host_page_size;
+ host_last = ROUND_UP(last, host_page_size) - 1;
+ nranges = 0;
mmap_lock();
- host_start = start & qemu_host_page_mask;
- host_end = HOST_PAGE_ALIGN(end);
- if (start > host_start) {
- /* handle host page containing start */
- prot1 = host_prot;
- for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
- }
- if (host_end == host_start + qemu_host_page_size) {
- for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
- }
- end = host_end;
+
+ if (host_last - host_start < host_page_size) {
+ /* Single host page contains all guest pages: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a);
}
- ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
- prot1 & PAGE_BITS);
- if (ret != 0) {
- goto error;
+ for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a + 1);
}
- host_start += qemu_host_page_size;
- }
- if (end < host_end) {
- prot1 = host_prot;
- for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot1 |= page_get_flags(addr);
+ starts[nranges] = host_start;
+ lens[nranges] = host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ } else {
+ if (host_start < start) {
+ /* Host page contains more than one guest page: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a);
+ }
+ /* If the resulting sum differs, create a new range. */
+ if (prot1 != target_prot) {
+ starts[nranges] = host_start;
+ lens[nranges] = host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ host_start += host_page_size;
+ }
}
- ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
- qemu_host_page_size, prot1 & PAGE_BITS);
- if (ret != 0) {
- goto error;
+
+ if (last < host_last) {
+ /* Host page contains more than one guest page: sum the prot. */
+ prot1 = target_prot;
+ for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
+ prot1 |= page_get_flags(a + 1);
+ }
+ /* If the resulting sum differs, create a new range. */
+ if (prot1 != target_prot) {
+ host_last -= host_page_size;
+ starts[nranges] = host_last + 1;
+ lens[nranges] = host_page_size;
+ prots[nranges] = prot1;
+ nranges++;
+ }
+ }
+
+ /* Create a range for the middle, if any remains. */
+ if (host_start < host_last) {
+ starts[nranges] = host_start;
+ lens[nranges] = host_last - host_start + 1;
+ prots[nranges] = target_prot;
+ nranges++;
}
- host_end -= qemu_host_page_size;
}
- /* handle the pages in the middle */
- if (host_start < host_end) {
- ret = mprotect(g2h_untagged(host_start),
- host_end - host_start, host_prot);
+ for (int i = 0; i < nranges; ++i) {
+ ret = mprotect(g2h_untagged(starts[i]), lens[i],
+ target_to_host_prot(prots[i]));
if (ret != 0) {
goto error;
}
}
- page_set_flags(start, start + len, page_flags);
- mmap_unlock();
- return 0;
-error:
+
+ page_set_flags(start, last, page_flags);
+ ret = 0;
+
+ error:
mmap_unlock();
return ret;
}
-/* map an incomplete host page */
-static int mmap_frag(abi_ulong real_start,
- abi_ulong start, abi_ulong end,
- int prot, int flags, int fd, abi_ulong offset)
+/*
+ * Perform munmap on behalf of the target, with host parameters.
+ * If reserved_va, we must replace the memory reservation.
+ */
+static int do_munmap(void *addr, size_t len)
{
- abi_ulong real_end, addr;
+ if (reserved_va) {
+ void *ptr = mmap(addr, len, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS
+ | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+ return ptr == addr ? 0 : -1;
+ }
+ return munmap(addr, len);
+}
+
+/*
+ * Map an incomplete host page.
+ *
+ * Here be dragons. This case will not work if there is an existing
+ * overlapping host page, which is file mapped, and for which the mapping
+ * is beyond the end of the file. In that case, we will see SIGBUS when
+ * trying to write a portion of this page.
+ *
+ * FIXME: Work around this with a temporary signal handler and longjmp.
+ */
+static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
+ int prot, int flags, int fd, off_t offset)
+{
+ int host_page_size = qemu_real_host_page_size();
+ abi_ulong real_last;
void *host_start;
- int prot1, prot_new;
+ int prot_old, prot_new;
+ int host_prot_old, host_prot_new;
- real_end = real_start + qemu_host_page_size;
+ if (!(flags & MAP_ANONYMOUS)
+ && (flags & MAP_TYPE) == MAP_SHARED
+ && (prot & PROT_WRITE)) {
+ /*
+ * msync() won't work with the partial page, so we return an
+ * error if write is possible while it is a shared mapping.
+ */
+ errno = EINVAL;
+ return false;
+ }
+
+ real_last = real_start + host_page_size - 1;
host_start = g2h_untagged(real_start);
- /* get the protection of the target pages outside the mapping */
- prot1 = 0;
- for(addr = real_start; addr < real_end; addr++) {
- if (addr < start || addr >= end)
- prot1 |= page_get_flags(addr);
+ /* Get the protection of the target pages outside the mapping. */
+ prot_old = 0;
+ for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot_old |= page_get_flags(a);
+ }
+ for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
+ prot_old |= page_get_flags(a);
}
- if (prot1 == 0) {
- /* no page was there, so we allocate one */
- void *p = mmap(host_start, qemu_host_page_size, prot,
+ if (prot_old == 0) {
+ /*
+ * Since !(prot_old & PAGE_VALID), there were no guest pages
+ * outside of the fragment we need to map. Allocate a new host
+ * page to cover, discarding whatever else may have been present.
+ */
+ void *p = mmap(host_start, host_page_size,
+ target_to_host_prot(prot),
flags | MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED)
- return -1;
- prot1 = prot;
+ if (p != host_start) {
+ if (p != MAP_FAILED) {
+ do_munmap(p, host_page_size);
+ errno = EEXIST;
+ }
+ return false;
+ }
+ prot_old = prot;
}
- prot1 &= PAGE_BITS;
+ prot_new = prot | prot_old;
- prot_new = prot | prot1;
- if (!(flags & MAP_ANONYMOUS)) {
- /* msync() won't work here, so we return an error if write is
- possible while it is a shared mapping */
- if ((flags & MAP_TYPE) == MAP_SHARED &&
- (prot & PROT_WRITE))
- return -1;
+ host_prot_old = target_to_host_prot(prot_old);
+ host_prot_new = target_to_host_prot(prot_new);
- /* adjust protection to be able to read */
- if (!(prot1 & PROT_WRITE))
- mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
-
- /* read the corresponding file data */
- if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
- return -1;
+ /* Adjust protection to be able to write. */
+ if (!(host_prot_old & PROT_WRITE)) {
+ host_prot_old |= PROT_WRITE;
+ mprotect(host_start, host_page_size, host_prot_old);
+ }
- /* put final protection */
- if (prot_new != (prot1 | PROT_WRITE))
- mprotect(host_start, qemu_host_page_size, prot_new);
+ /* Read or zero the new guest pages. */
+ if (flags & MAP_ANONYMOUS) {
+ memset(g2h_untagged(start), 0, last - start + 1);
} else {
- if (prot_new != prot1) {
- mprotect(host_start, qemu_host_page_size, prot_new);
- }
- if (prot_new & PROT_WRITE) {
- memset(g2h_untagged(start), 0, end - start);
+ if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
+ return false;
}
}
- return 0;
-}
-#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
-#ifdef TARGET_AARCH64
-# define TASK_UNMAPPED_BASE 0x5500000000
-#else
-# define TASK_UNMAPPED_BASE (1ul << 38)
-#endif
-#else
-# define TASK_UNMAPPED_BASE 0x40000000
-#endif
-abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
+ /* Put final protection */
+ if (host_prot_new != host_prot_old) {
+ mprotect(host_start, host_page_size, host_prot_new);
+ }
+ return true;
+}
-unsigned long last_brk;
+abi_ulong task_unmapped_base;
+abi_ulong elf_et_dyn_base;
+abi_ulong mmap_next_start;
-/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
- of guest address space. */
+/*
+ * Subroutine of mmap_find_vma, used when we have pre-allocated
+ * a chunk of guest address space.
+ */
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong align)
{
- abi_ulong addr, end_addr, incr = qemu_host_page_size;
- int prot;
- bool looped = false;
-
- if (size > reserved_va) {
- return (abi_ulong)-1;
- }
-
- /* Note that start and size have already been aligned by mmap_find_vma. */
+ target_ulong ret;
- end_addr = start + size;
- if (start > reserved_va - size) {
- /* Start at the top of the address space. */
- end_addr = ((reserved_va - size) & -align) + size;
- looped = true;
+ ret = page_find_range_empty(start, reserved_va, size, align);
+ if (ret == -1 && start > mmap_min_addr) {
+ /* Restart at the beginning of the address space. */
+ ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
}
- /* Search downward from END_ADDR, checking to see if a page is in use. */
- addr = end_addr;
- while (1) {
- addr -= incr;
- if (addr > end_addr) {
- if (looped) {
- /* Failure. The entire address space has been searched. */
- return (abi_ulong)-1;
- }
- /* Re-start at the top of the address space. */
- addr = end_addr = ((reserved_va - size) & -align) + size;
- looped = true;
- } else {
- prot = page_get_flags(addr);
- if (prot) {
- /* Page in use. Restart below this page. */
- addr = end_addr = ((addr - size) & -align) + size;
- } else if (addr && addr + size == end_addr) {
- /* Success! All pages between ADDR and END_ADDR are free. */
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
- return addr;
- }
- }
- }
+ return ret;
}
/*
@@ -315,21 +399,21 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
*/
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
{
+ int host_page_size = qemu_real_host_page_size();
void *ptr, *prev;
abi_ulong addr;
int wrapped, repeat;
- align = MAX(align, qemu_host_page_size);
+ align = MAX(align, host_page_size);
/* If 'start' == 0, then a default start address is used. */
if (start == 0) {
start = mmap_next_start;
} else {
- start &= qemu_host_page_mask;
+ start &= -host_page_size;
}
start = ROUND_UP(start, align);
-
- size = HOST_PAGE_ALIGN(size);
+ size = ROUND_UP(size, host_page_size);
if (reserved_va) {
return mmap_find_vma_reserved(start, size, align);
@@ -348,15 +432,17 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
* - shmat() with SHM_REMAP flag
*/
ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
- MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */
if (ptr == MAP_FAILED) {
return (abi_ulong)-1;
}
- /* Count the number of sequential returns of the same address.
- This is used to modify the search algorithm below. */
+ /*
+ * Count the number of sequential returns of the same address.
+ * This is used to modify the search algorithm below.
+ */
repeat = (ptr == prev ? repeat + 1 : 0);
if (h2g_valid(ptr + size - 1)) {
@@ -364,7 +450,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
if ((addr & (align - 1)) == 0) {
/* Success. */
- if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
+ if (start == mmap_next_start && addr >= task_unmapped_base) {
mmap_next_start = addr + size;
}
return addr;
@@ -373,14 +459,18 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
/* The address is not properly aligned for the target. */
switch (repeat) {
case 0:
- /* Assume the result that the kernel gave us is the
- first with enough free space, so start again at the
- next higher target page. */
+ /*
+ * Assume the result that the kernel gave us is the
+ * first with enough free space, so start again at the
+ * next higher target page.
+ */
addr = ROUND_UP(addr, align);
break;
case 1:
- /* Sometimes the kernel decides to perform the allocation
- at the top end of memory instead. */
+ /*
+ * Sometimes the kernel decides to perform the allocation
+ * at the top end of memory instead.
+ */
addr &= -align;
break;
case 2:
@@ -393,8 +483,10 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
break;
}
} else {
- /* Since the result the kernel gave didn't fit, start
- again at low memory. If any repetition, fail. */
+ /*
+ * Since the result the kernel gave didn't fit, start
+ * again at low memory. If any repetition, fail.
+ */
addr = (repeat ? -1 : 0);
}
@@ -409,8 +501,10 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
return (abi_ulong)-1;
}
wrapped = 1;
- /* Don't actually use 0 when wrapping, instead indicate
- that we'd truly like an allocation in low memory. */
+ /*
+ * Don't actually use 0 when wrapping, instead indicate
+ * that we'd truly like an allocation in low memory.
+ */
addr = (mmap_min_addr > TARGET_PAGE_SIZE
? TARGET_PAGE_ALIGN(mmap_min_addr)
: TARGET_PAGE_SIZE);
@@ -420,38 +514,442 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
}
}
+/*
+ * Record a successful mmap within the user-exec interval tree.
+ */
+static abi_long mmap_end(abi_ulong start, abi_ulong last,
+ abi_ulong passthrough_start,
+ abi_ulong passthrough_last,
+ int flags, int page_flags)
+{
+ if (flags & MAP_ANONYMOUS) {
+ page_flags |= PAGE_ANON;
+ }
+ page_flags |= PAGE_RESET;
+ if (passthrough_start > passthrough_last) {
+ page_set_flags(start, last, page_flags);
+ } else {
+ if (start < passthrough_start) {
+ page_set_flags(start, passthrough_start - 1, page_flags);
+ }
+ page_set_flags(passthrough_start, passthrough_last,
+ page_flags | PAGE_PASSTHROUGH);
+ if (passthrough_last < last) {
+ page_set_flags(passthrough_last + 1, last, page_flags);
+ }
+ }
+ shm_region_rm_complete(start, last);
+ trace_target_mmap_complete(start);
+ if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
+ FILE *f = qemu_log_trylock();
+ if (f) {
+ fprintf(f, "page layout changed following mmap\n");
+ page_dump(f);
+ qemu_log_unlock(f);
+ }
+ }
+ return start;
+}
+
+/*
+ * Special case host page size == target page size,
+ * where there are no edge conditions.
+ */
+static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
+ int host_prot, int flags, int page_flags,
+ int fd, off_t offset)
+{
+ void *p, *want_p = g2h_untagged(start);
+ abi_ulong last;
+
+ p = mmap(want_p, len, host_prot, flags, fd, offset);
+ if (p == MAP_FAILED) {
+ return -1;
+ }
+ /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
+ if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
+ do_munmap(p, len);
+ errno = EEXIST;
+ return -1;
+ }
+
+ start = h2g(p);
+ last = start + len - 1;
+ return mmap_end(start, last, start, last, flags, page_flags);
+}
+
+/*
+ * Special case host page size < target page size.
+ *
+ * The two special cases are increased guest alignment, and mapping
+ * past the end of a file.
+ *
+ * When mapping files into a memory area larger than the file,
+ * accesses to pages beyond the file size will cause a SIGBUS.
+ *
+ * For example, if mmaping a file of 100 bytes on a host with 4K
+ * pages emulating a target with 8K pages, the target expects to
+ * be able to access the first 8K. But the host will trap us on
+ * any access beyond 4K.
+ *
+ * When emulating a target with a larger page-size than the hosts,
+ * we may need to truncate file maps at EOF and add extra anonymous
+ * pages up to the targets page boundary.
+ *
+ * This workaround only works for files that do not change.
+ * If the file is later extended (e.g. ftruncate), the SIGBUS
+ * vanishes and the proper behaviour is that changes within the
+ * anon page should be reflected in the file.
+ *
+ * However, this case is rather common with executable images,
+ * so the workaround is important for even trivial tests, whereas
+ * the mmap of of a file being extended is less common.
+ */
+static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot,
+ int mmap_flags, int page_flags, int fd,
+ off_t offset, int host_page_size)
+{
+ void *p, *want_p = g2h_untagged(start);
+ off_t fileend_adj = 0;
+ int flags = mmap_flags;
+ abi_ulong last, pass_last;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ struct stat sb;
+
+ if (fstat(fd, &sb) == -1) {
+ return -1;
+ }
+ if (offset >= sb.st_size) {
+ /*
+ * The entire map is beyond the end of the file.
+ * Transform it to an anonymous mapping.
+ */
+ flags |= MAP_ANONYMOUS;
+ fd = -1;
+ offset = 0;
+ } else if (offset + len > sb.st_size) {
+ /*
+ * A portion of the map is beyond the end of the file.
+ * Truncate the file portion of the allocation.
+ */
+ fileend_adj = offset + len - sb.st_size;
+ }
+ }
+
+ if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
+ if (fileend_adj) {
+ p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
+ } else {
+ p = mmap(want_p, len, host_prot, flags, fd, offset);
+ }
+ if (p != want_p) {
+ if (p != MAP_FAILED) {
+ /* Host does not support MAP_FIXED_NOREPLACE: emulate. */
+ do_munmap(p, len);
+ errno = EEXIST;
+ }
+ return -1;
+ }
+
+ if (fileend_adj) {
+ void *t = mmap(p, len - fileend_adj, host_prot,
+ (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED,
+ fd, offset);
+
+ if (t == MAP_FAILED) {
+ int save_errno = errno;
+
+ /*
+ * We failed a map over the top of the successful anonymous
+ * mapping above. The only failure mode is running out of VMAs,
+ * and there's nothing that we can do to detect that earlier.
+ * If we have replaced an existing mapping with MAP_FIXED,
+ * then we cannot properly recover. It's a coin toss whether
+ * it would be better to exit or continue here.
+ */
+ if (!(flags & MAP_FIXED_NOREPLACE) &&
+ !page_check_range_empty(start, start + len - 1)) {
+ qemu_log("QEMU target_mmap late failure: %s",
+ strerror(save_errno));
+ }
+
+ do_munmap(want_p, len);
+ errno = save_errno;
+ return -1;
+ }
+ }
+ } else {
+ size_t host_len, part_len;
+
+ /*
+ * Take care to align the host memory. Perform a larger anonymous
+ * allocation and extract the aligned portion. Remap the file on
+ * top of that.
+ */
+ host_len = len + TARGET_PAGE_SIZE - host_page_size;
+ p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED) {
+ return -1;
+ }
+
+ part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1);
+ if (part_len) {
+ part_len = TARGET_PAGE_SIZE - part_len;
+ do_munmap(p, part_len);
+ p += part_len;
+ host_len -= part_len;
+ }
+ if (len < host_len) {
+ do_munmap(p + len, host_len - len);
+ }
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ void *t = mmap(p, len - fileend_adj, host_prot,
+ flags | MAP_FIXED, fd, offset);
+
+ if (t == MAP_FAILED) {
+ int save_errno = errno;
+ do_munmap(p, len);
+ errno = save_errno;
+ return -1;
+ }
+ }
+
+ start = h2g(p);
+ }
+
+ last = start + len - 1;
+ if (fileend_adj) {
+ pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1;
+ } else {
+ pass_last = last;
+ }
+ return mmap_end(start, last, start, pass_last, mmap_flags, page_flags);
+}
+
+/*
+ * Special case host page size > target page size.
+ *
+ * The two special cases are address and file offsets that are valid
+ * for the guest that cannot be directly represented by the host.
+ */
+static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
+ int target_prot, int host_prot,
+ int flags, int page_flags, int fd,
+ off_t offset, int host_page_size)
+{
+ void *p, *want_p = g2h_untagged(start);
+ off_t host_offset = offset & -host_page_size;
+ abi_ulong last, real_start, real_last;
+ bool misaligned_offset = false;
+ size_t host_len;
+
+ if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
+ /*
+ * Adjust the offset to something representable on the host.
+ */
+ host_len = len + offset - host_offset;
+ p = mmap(want_p, host_len, host_prot, flags, fd, host_offset);
+ if (p == MAP_FAILED) {
+ return -1;
+ }
+
+ /* Update start to the file position at offset. */
+ p += offset - host_offset;
+
+ start = h2g(p);
+ last = start + len - 1;
+ return mmap_end(start, last, start, last, flags, page_flags);
+ }
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ misaligned_offset = (start ^ offset) & (host_page_size - 1);
+
+ /*
+ * The fallback for misalignment is a private mapping + read.
+ * This carries none of semantics required of MAP_SHARED.
+ */
+ if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) {
+ errno = EINVAL;
+ return -1;
+ }
+ }
+
+ last = start + len - 1;
+ real_start = start & -host_page_size;
+ real_last = ROUND_UP(last, host_page_size) - 1;
+
+ /*
+ * Handle the start and end of the mapping.
+ */
+ if (real_start < start) {
+ abi_ulong real_page_last = real_start + host_page_size - 1;
+ if (last <= real_page_last) {
+ /* Entire allocation a subset of one host page. */
+ if (!mmap_frag(real_start, start, last, target_prot,
+ flags, fd, offset)) {
+ return -1;
+ }
+ return mmap_end(start, last, -1, 0, flags, page_flags);
+ }
+
+ if (!mmap_frag(real_start, start, real_page_last, target_prot,
+ flags, fd, offset)) {
+ return -1;
+ }
+ real_start = real_page_last + 1;
+ }
+
+ if (last < real_last) {
+ abi_ulong real_page_start = real_last - host_page_size + 1;
+ if (!mmap_frag(real_page_start, real_page_start, last,
+ target_prot, flags, fd,
+ offset + real_page_start - start)) {
+ return -1;
+ }
+ real_last = real_page_start - 1;
+ }
+
+ if (real_start > real_last) {
+ return mmap_end(start, last, -1, 0, flags, page_flags);
+ }
+
+ /*
+ * Handle the middle of the mapping.
+ */
+
+ host_len = real_last - real_start + 1;
+ want_p += real_start - start;
+
+ if (flags & MAP_ANONYMOUS) {
+ p = mmap(want_p, host_len, host_prot, flags, -1, 0);
+ } else if (!misaligned_offset) {
+ p = mmap(want_p, host_len, host_prot, flags, fd,
+ offset + real_start - start);
+ } else {
+ p = mmap(want_p, host_len, host_prot | PROT_WRITE,
+ flags | MAP_ANONYMOUS, -1, 0);
+ }
+ if (p != want_p) {
+ if (p != MAP_FAILED) {
+ do_munmap(p, host_len);
+ errno = EEXIST;
+ }
+ return -1;
+ }
+
+ if (misaligned_offset) {
+ /* TODO: The read could be short. */
+ if (pread(fd, p, host_len, offset + real_start - start) != host_len) {
+ do_munmap(p, host_len);
+ return -1;
+ }
+ if (!(host_prot & PROT_WRITE)) {
+ mprotect(p, host_len, host_prot);
+ }
+ }
+
+ return mmap_end(start, last, -1, 0, flags, page_flags);
+}
+
+static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
+ int target_prot, int flags, int page_flags,
+ int fd, off_t offset)
+{
+ int host_page_size = qemu_real_host_page_size();
+ int host_prot;
+
+ /*
+ * For reserved_va, we are in full control of the allocation.
+ * Find a suitable hole and convert to MAP_FIXED.
+ */
+ if (reserved_va) {
+ if (flags & MAP_FIXED_NOREPLACE) {
+ /* Validate that the chosen range is empty. */
+ if (!page_check_range_empty(start, start + len - 1)) {
+ errno = EEXIST;
+ return -1;
+ }
+ flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
+ } else if (!(flags & MAP_FIXED)) {
+ abi_ulong real_start = start & -host_page_size;
+ off_t host_offset = offset & -host_page_size;
+ size_t real_len = len + offset - host_offset;
+ abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
+
+ start = mmap_find_vma(real_start, real_len, align);
+ if (start == (abi_ulong)-1) {
+ errno = ENOMEM;
+ return -1;
+ }
+ start += offset - host_offset;
+ flags |= MAP_FIXED;
+ }
+ }
+
+ host_prot = target_to_host_prot(target_prot);
+
+ if (host_page_size == TARGET_PAGE_SIZE) {
+ return mmap_h_eq_g(start, len, host_prot, flags,
+ page_flags, fd, offset);
+ } else if (host_page_size < TARGET_PAGE_SIZE) {
+ return mmap_h_lt_g(start, len, host_prot, flags,
+ page_flags, fd, offset, host_page_size);
+ } else {
+ return mmap_h_gt_g(start, len, target_prot, host_prot, flags,
+ page_flags, fd, offset, host_page_size);
+ }
+}
+
/* NOTE: all the constants are the HOST ones */
abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
- int flags, int fd, abi_ulong offset)
+ int flags, int fd, off_t offset)
{
- abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
- int page_flags, host_prot;
+ abi_long ret;
+ int page_flags;
- mmap_lock();
trace_target_mmap(start, len, target_prot, flags, fd, offset);
if (!len) {
errno = EINVAL;
- goto fail;
+ return -1;
}
- page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
+ page_flags = validate_prot_to_pageflags(target_prot);
if (!page_flags) {
errno = EINVAL;
- goto fail;
+ return -1;
}
/* Also check for overflows... */
len = TARGET_PAGE_ALIGN(len);
- if (!len) {
+ if (!len || len != (size_t)len) {
errno = ENOMEM;
- goto fail;
+ return -1;
}
if (offset & ~TARGET_PAGE_MASK) {
errno = EINVAL;
- goto fail;
+ return -1;
}
+ if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
+ if (start & ~TARGET_PAGE_MASK) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (!guest_range_valid_untagged(start, len)) {
+ errno = ENOMEM;
+ return -1;
+ }
+ }
+
+ mmap_lock();
+
+ ret = target_mmap__locked(start, len, target_prot, flags,
+ page_flags, fd, offset);
+
+ mmap_unlock();
/*
* If we're mapping shared memory, ensure we generate code for parallel
@@ -459,7 +957,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
* supported by the host -- anything that requires EXCP_ATOMIC will not
* be atomic with respect to an external process.
*/
- if (flags & MAP_SHARED) {
+ if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
CPUState *cpu = thread_cpu;
if (!(cpu->tcg_cflags & CF_PARALLEL)) {
cpu->tcg_cflags |= CF_PARALLEL;
@@ -467,278 +965,91 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
}
}
- real_start = start & qemu_host_page_mask;
- host_offset = offset & qemu_host_page_mask;
-
- /* If the user is asking for the kernel to find a location, do that
- before we truncate the length for mapping files below. */
- if (!(flags & MAP_FIXED)) {
- host_len = len + offset - host_offset;
- host_len = HOST_PAGE_ALIGN(host_len);
- start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
- if (start == (abi_ulong)-1) {
- errno = ENOMEM;
- goto fail;
- }
- }
-
- /* When mapping files into a memory area larger than the file, accesses
- to pages beyond the file size will cause a SIGBUS.
-
- For example, if mmaping a file of 100 bytes on a host with 4K pages
- emulating a target with 8K pages, the target expects to be able to
- access the first 8K. But the host will trap us on any access beyond
- 4K.
-
- When emulating a target with a larger page-size than the hosts, we
- may need to truncate file maps at EOF and add extra anonymous pages
- up to the targets page boundary. */
-
- if ((qemu_real_host_page_size < qemu_host_page_size) &&
- !(flags & MAP_ANONYMOUS)) {
- struct stat sb;
-
- if (fstat (fd, &sb) == -1)
- goto fail;
-
- /* Are we trying to create a map beyond EOF?. */
- if (offset + len > sb.st_size) {
- /* If so, truncate the file map at eof aligned with
- the hosts real pagesize. Additional anonymous maps
- will be created beyond EOF. */
- len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
- }
- }
+ return ret;
+}
- if (!(flags & MAP_FIXED)) {
- unsigned long host_start;
- void *p;
+static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
+{
+ int host_page_size = qemu_real_host_page_size();
+ abi_ulong real_start;
+ abi_ulong real_last;
+ abi_ulong real_len;
+ abi_ulong last;
+ abi_ulong a;
+ void *host_start;
+ int prot;
- host_len = len + offset - host_offset;
- host_len = HOST_PAGE_ALIGN(host_len);
+ last = start + len - 1;
+ real_start = start & -host_page_size;
+ real_last = ROUND_UP(last, host_page_size) - 1;
- /* Note: we prefer to control the mapping address. It is
- especially important if qemu_host_page_size >
- qemu_real_host_page_size */
- p = mmap(g2h_untagged(start), host_len, host_prot,
- flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED) {
- goto fail;
+ /*
+ * If guest pages remain on the first or last host pages,
+ * adjust the deallocation to retain those guest pages.
+ * The single page special case is required for the last page,
+ * lest real_start overflow to zero.
+ */
+ if (real_last - real_start < host_page_size) {
+ prot = 0;
+ for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a);
}
- /* update start so that it points to the file position at 'offset' */
- host_start = (unsigned long)p;
- if (!(flags & MAP_ANONYMOUS)) {
- p = mmap(g2h_untagged(start), len, host_prot,
- flags | MAP_FIXED, fd, host_offset);
- if (p == MAP_FAILED) {
- munmap(g2h_untagged(start), host_len);
- goto fail;
- }
- host_start += offset - host_offset;
+ for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a + 1);
+ }
+ if (prot != 0) {
+ return 0;
}
- start = h2g(host_start);
} else {
- if (start & ~TARGET_PAGE_MASK) {
- errno = EINVAL;
- goto fail;
+ for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a);
+ }
+ if (prot != 0) {
+ real_start += host_page_size;
}
- end = start + len;
- real_end = HOST_PAGE_ALIGN(end);
-
- /*
- * Test if requested memory area fits target address space
- * It can fail only on 64-bit host with 32-bit target.
- * On any other target/host host mmap() handles this error correctly.
- */
- if (end < start || !guest_range_valid_untagged(start, len)) {
- errno = ENOMEM;
- goto fail;
- }
-
- /* worst case: we cannot map the file because the offset is not
- aligned, so we read it */
- if (!(flags & MAP_ANONYMOUS) &&
- (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
- /* msync() won't work here, so we return an error if write is
- possible while it is a shared mapping */
- if ((flags & MAP_TYPE) == MAP_SHARED &&
- (host_prot & PROT_WRITE)) {
- errno = EINVAL;
- goto fail;
- }
- retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0);
- if (retaddr == -1)
- goto fail;
- if (pread(fd, g2h_untagged(start), len, offset) == -1)
- goto fail;
- if (!(host_prot & PROT_WRITE)) {
- ret = target_mprotect(start, len, target_prot);
- assert(ret == 0);
- }
- goto the_end;
- }
-
- /* handle the start of the mapping */
- if (start > real_start) {
- if (real_end == real_start + qemu_host_page_size) {
- /* one single host page */
- ret = mmap_frag(real_start, start, end,
- host_prot, flags, fd, offset);
- if (ret == -1)
- goto fail;
- goto the_end1;
- }
- ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
- host_prot, flags, fd, offset);
- if (ret == -1)
- goto fail;
- real_start += qemu_host_page_size;
- }
- /* handle the end of the mapping */
- if (end < real_end) {
- ret = mmap_frag(real_end - qemu_host_page_size,
- real_end - qemu_host_page_size, end,
- host_prot, flags, fd,
- offset + real_end - qemu_host_page_size - start);
- if (ret == -1)
- goto fail;
- real_end -= qemu_host_page_size;
- }
-
- /* map the middle (easier) */
- if (real_start < real_end) {
- void *p;
- unsigned long offset1;
- if (flags & MAP_ANONYMOUS)
- offset1 = 0;
- else
- offset1 = offset + real_start - start;
- p = mmap(g2h_untagged(real_start), real_end - real_start,
- host_prot, flags, fd, offset1);
- if (p == MAP_FAILED)
- goto fail;
- }
- }
- the_end1:
- if (flags & MAP_ANONYMOUS) {
- page_flags |= PAGE_ANON;
- }
- page_flags |= PAGE_RESET;
- page_set_flags(start, start + len, page_flags);
- the_end:
- trace_target_mmap_complete(start);
- if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
- log_page_dump(__func__);
- }
- tb_invalidate_phys_range(start, start + len);
- mmap_unlock();
- return start;
-fail:
- mmap_unlock();
- return -1;
-}
-
-static void mmap_reserve(abi_ulong start, abi_ulong size)
-{
- abi_ulong real_start;
- abi_ulong real_end;
- abi_ulong addr;
- abi_ulong end;
- int prot;
- real_start = start & qemu_host_page_mask;
- real_end = HOST_PAGE_ALIGN(start + size);
- end = start + size;
- if (start > real_start) {
- /* handle host page containing start */
- prot = 0;
- for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
+ for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(a + 1);
}
- if (real_end == real_start + qemu_host_page_size) {
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- end = real_end;
+ if (prot != 0) {
+ real_last -= host_page_size;
}
- if (prot != 0)
- real_start += qemu_host_page_size;
- }
- if (end < real_end) {
- prot = 0;
- for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
+
+ if (real_last < real_start) {
+ return 0;
}
- if (prot != 0)
- real_end -= qemu_host_page_size;
- }
- if (real_start != real_end) {
- mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
- MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
- -1, 0);
}
+
+ real_len = real_last - real_start + 1;
+ host_start = g2h_untagged(real_start);
+
+ return do_munmap(host_start, real_len);
}
int target_munmap(abi_ulong start, abi_ulong len)
{
- abi_ulong end, real_start, real_end, addr;
- int prot, ret;
+ int ret;
trace_target_munmap(start, len);
- if (start & ~TARGET_PAGE_MASK)
- return -TARGET_EINVAL;
+ if (start & ~TARGET_PAGE_MASK) {
+ errno = EINVAL;
+ return -1;
+ }
len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid_untagged(start, len)) {
- return -TARGET_EINVAL;
+ errno = EINVAL;
+ return -1;
}
mmap_lock();
- end = start + len;
- real_start = start & qemu_host_page_mask;
- real_end = HOST_PAGE_ALIGN(end);
-
- if (start > real_start) {
- /* handle host page containing start */
- prot = 0;
- for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- if (real_end == real_start + qemu_host_page_size) {
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- end = real_end;
- }
- if (prot != 0)
- real_start += qemu_host_page_size;
- }
- if (end < real_end) {
- prot = 0;
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr);
- }
- if (prot != 0)
- real_end -= qemu_host_page_size;
- }
-
- ret = 0;
- /* unmap what we can */
- if (real_start < real_end) {
- if (reserved_va) {
- mmap_reserve(real_start, real_end - real_start);
- } else {
- ret = munmap(g2h_untagged(real_start), real_end - real_start);
- }
- }
-
- if (ret == 0) {
- page_set_flags(start, start + len, 0);
- tb_invalidate_phys_range(start, start + len);
+ ret = mmap_reserve_or_unmap(start, len);
+ if (likely(ret == 0)) {
+ page_set_flags(start, start + len - 1, 0);
+ shm_region_rm_complete(start, start + len - 1);
}
mmap_unlock();
+
return ret;
}
@@ -765,9 +1076,11 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags, g2h_untagged(new_addr));
if (reserved_va && host_addr != MAP_FAILED) {
- /* If new and old addresses overlap then the above mremap will
- already have failed with EINVAL. */
- mmap_reserve(old_addr, old_size);
+ /*
+ * If new and old addresses overlap then the above mremap will
+ * already have failed with EINVAL.
+ */
+ mmap_reserve_or_unmap(old_addr, old_size);
}
} else if (flags & MREMAP_MAYMOVE) {
abi_ulong mmap_start;
@@ -782,20 +1095,20 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags | MREMAP_FIXED,
g2h_untagged(mmap_start));
if (reserved_va) {
- mmap_reserve(old_addr, old_size);
+ mmap_reserve_or_unmap(old_addr, old_size);
}
}
} else {
- int prot = 0;
+ int page_flags = 0;
if (reserved_va && old_size < new_size) {
abi_ulong addr;
for (addr = old_addr + old_size;
addr < old_addr + new_size;
addr++) {
- prot |= page_get_flags(addr);
+ page_flags |= page_get_flags(addr);
}
}
- if (prot == 0) {
+ if (page_flags == 0) {
host_addr = mremap(g2h_untagged(old_addr),
old_size, new_size, flags);
@@ -808,7 +1121,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
errno = ENOMEM;
host_addr = MAP_FAILED;
} else if (reserved_va && old_size > new_size) {
- mmap_reserve(old_addr + old_size, old_size - new_size);
+ mmap_reserve_or_unmap(old_addr + old_size,
+ old_size - new_size);
}
}
} else {
@@ -822,11 +1136,305 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
} else {
new_addr = h2g(host_addr);
prot = page_get_flags(old_addr);
- page_set_flags(old_addr, old_addr + old_size, 0);
- page_set_flags(new_addr, new_addr + new_size,
+ page_set_flags(old_addr, old_addr + old_size - 1, 0);
+ shm_region_rm_complete(old_addr, old_addr + old_size - 1);
+ page_set_flags(new_addr, new_addr + new_size - 1,
prot | PAGE_VALID | PAGE_RESET);
+ shm_region_rm_complete(new_addr, new_addr + new_size - 1);
}
- tb_invalidate_phys_range(new_addr, new_addr + new_size);
mmap_unlock();
return new_addr;
}
+
+abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
+{
+ abi_ulong len;
+ int ret = 0;
+
+ if (start & ~TARGET_PAGE_MASK) {
+ return -TARGET_EINVAL;
+ }
+ if (len_in == 0) {
+ return 0;
+ }
+ len = TARGET_PAGE_ALIGN(len_in);
+ if (len == 0 || !guest_range_valid_untagged(start, len)) {
+ return -TARGET_EINVAL;
+ }
+
+ /* Translate for some architectures which have different MADV_xxx values */
+ switch (advice) {
+ case TARGET_MADV_DONTNEED: /* alpha */
+ advice = MADV_DONTNEED;
+ break;
+ case TARGET_MADV_WIPEONFORK: /* parisc */
+ advice = MADV_WIPEONFORK;
+ break;
+ case TARGET_MADV_KEEPONFORK: /* parisc */
+ advice = MADV_KEEPONFORK;
+ break;
+ /* we do not care about the other MADV_xxx values yet */
+ }
+
+ /*
+ * Most advice values are hints, so ignoring and returning success is ok.
+ *
+ * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
+ * MADV_KEEPONFORK are not hints and need to be emulated.
+ *
+ * A straight passthrough for those may not be safe because qemu sometimes
+ * turns private file-backed mappings into anonymous mappings.
+ * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
+ * same semantics for the host as for the guest.
+ *
+ * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
+ * return failure if not.
+ *
+ * MADV_DONTNEED is passed through as well, if possible.
+ * If passthrough isn't possible, we nevertheless (wrongly!) return
+ * success, which is broken but some userspace programs fail to work
+ * otherwise. Completely implementing such emulation is quite complicated
+ * though.
+ */
+ mmap_lock();
+ switch (advice) {
+ case MADV_WIPEONFORK:
+ case MADV_KEEPONFORK:
+ ret = -EINVAL;
+ /* fall through */
+ case MADV_DONTNEED:
+ if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
+ ret = get_errno(madvise(g2h_untagged(start), len, advice));
+ if ((advice == MADV_DONTNEED) && (ret == 0)) {
+ page_reset_target_data(start, start + len - 1);
+ }
+ }
+ }
+ mmap_unlock();
+
+ return ret;
+}
+
+#ifndef TARGET_FORCE_SHMLBA
+/*
+ * For most architectures, SHMLBA is the same as the page size;
+ * some architectures have larger values, in which case they should
+ * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
+ * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
+ * and defining its own value for SHMLBA.
+ *
+ * The kernel also permits SHMLBA to be set by the architecture to a
+ * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
+ * this means that addresses are rounded to the large size if
+ * SHM_RND is set but addresses not aligned to that size are not rejected
+ * as long as they are at least page-aligned. Since the only architecture
+ * which uses this is ia64 this code doesn't provide for that oddity.
+ */
+static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
+{
+ return TARGET_PAGE_SIZE;
+}
+#endif
+
+#if defined(__arm__) || defined(__mips__) || defined(__sparc__)
+#define HOST_FORCE_SHMLBA 1
+#else
+#define HOST_FORCE_SHMLBA 0
+#endif
+
+abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
+ abi_ulong shmaddr, int shmflg)
+{
+ CPUState *cpu = env_cpu(cpu_env);
+ struct shmid_ds shm_info;
+ int ret;
+ int h_pagesize;
+ int t_shmlba, h_shmlba, m_shmlba;
+ size_t t_len, h_len, m_len;
+
+ /* shmat pointers are always untagged */
+
+ /*
+ * Because we can't use host shmat() unless the address is sufficiently
+ * aligned for the host, we'll need to check both.
+ * TODO: Could be fixed with softmmu.
+ */
+ t_shmlba = target_shmlba(cpu_env);
+ h_pagesize = qemu_real_host_page_size();
+ h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
+ m_shmlba = MAX(t_shmlba, h_shmlba);
+
+ if (shmaddr) {
+ if (shmaddr & (m_shmlba - 1)) {
+ if (shmflg & SHM_RND) {
+ /*
+ * The guest is allowing the kernel to round the address.
+ * Assume that the guest is ok with us rounding to the
+ * host required alignment too. Anyway if we don't, we'll
+ * get an error from the kernel.
+ */
+ shmaddr &= ~(m_shmlba - 1);
+ if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
+ return -TARGET_EINVAL;
+ }
+ } else {
+ int require = TARGET_PAGE_SIZE;
+#ifdef TARGET_FORCE_SHMLBA
+ require = t_shmlba;
+#endif
+ /*
+ * Include host required alignment, as otherwise we cannot
+ * use host shmat at all.
+ */
+ require = MAX(require, h_shmlba);
+ if (shmaddr & (require - 1)) {
+ return -TARGET_EINVAL;
+ }
+ }
+ }
+ } else {
+ if (shmflg & SHM_REMAP) {
+ return -TARGET_EINVAL;
+ }
+ }
+ /* All rounding now manually concluded. */
+ shmflg &= ~SHM_RND;
+
+ /* Find out the length of the shared memory segment. */
+ ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
+ if (is_error(ret)) {
+ /* can't get length, bail out */
+ return ret;
+ }
+ t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
+ h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
+ m_len = MAX(t_len, h_len);
+
+ if (!guest_range_valid_untagged(shmaddr, m_len)) {
+ return -TARGET_EINVAL;
+ }
+
+ WITH_MMAP_LOCK_GUARD() {
+ bool mapped = false;
+ void *want, *test;
+ abi_ulong last;
+
+ if (!shmaddr) {
+ shmaddr = mmap_find_vma(0, m_len, m_shmlba);
+ if (shmaddr == -1) {
+ return -TARGET_ENOMEM;
+ }
+ mapped = !reserved_va;
+ } else if (shmflg & SHM_REMAP) {
+ /*
+ * If host page size > target page size, the host shmat may map
+ * more memory than the guest expects. Reject a mapping that
+ * would replace memory in the unexpected gap.
+ * TODO: Could be fixed with softmmu.
+ */
+ if (t_len < h_len &&
+ !page_check_range_empty(shmaddr + t_len,
+ shmaddr + h_len - 1)) {
+ return -TARGET_EINVAL;
+ }
+ } else {
+ if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
+ return -TARGET_EINVAL;
+ }
+ }
+
+ /* All placement is now complete. */
+ want = (void *)g2h_untagged(shmaddr);
+
+ /*
+ * Map anonymous pages across the entire range, then remap with
+ * the shared memory. This is required for a number of corner
+ * cases for which host and guest page sizes differ.
+ */
+ if (h_len != t_len) {
+ int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
+ int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
+ | (reserved_va || mapped || (shmflg & SHM_REMAP)
+ ? MAP_FIXED : MAP_FIXED_NOREPLACE);
+
+ test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
+ if (unlikely(test != want)) {
+ /* shmat returns EINVAL not EEXIST like mmap. */
+ ret = (test == MAP_FAILED && errno != EEXIST
+ ? get_errno(-1) : -TARGET_EINVAL);
+ if (mapped) {
+ do_munmap(want, m_len);
+ }
+ return ret;
+ }
+ mapped = true;
+ }
+
+ if (reserved_va || mapped) {
+ shmflg |= SHM_REMAP;
+ }
+ test = shmat(shmid, want, shmflg);
+ if (test == MAP_FAILED) {
+ ret = get_errno(-1);
+ if (mapped) {
+ do_munmap(want, m_len);
+ }
+ return ret;
+ }
+ assert(test == want);
+
+ last = shmaddr + m_len - 1;
+ page_set_flags(shmaddr, last,
+ PAGE_VALID | PAGE_RESET | PAGE_READ |
+ (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
+ (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
+
+ shm_region_rm_complete(shmaddr, last);
+ shm_region_add(shmaddr, last);
+ }
+
+ /*
+ * We're mapping shared memory, so ensure we generate code for parallel
+ * execution and flush old translations. This will work up to the level
+ * supported by the host -- anything that requires EXCP_ATOMIC will not
+ * be atomic with respect to an external process.
+ */
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
+ cpu->tcg_cflags |= CF_PARALLEL;
+ tb_flush(cpu);
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
+ FILE *f = qemu_log_trylock();
+ if (f) {
+ fprintf(f, "page layout changed following shmat\n");
+ page_dump(f);
+ qemu_log_unlock(f);
+ }
+ }
+ return shmaddr;
+}
+
+abi_long target_shmdt(abi_ulong shmaddr)
+{
+ abi_long rv;
+
+ /* shmdt pointers are always untagged */
+
+ WITH_MMAP_LOCK_GUARD() {
+ abi_ulong last = shm_region_find(shmaddr);
+ if (last == 0) {
+ return -TARGET_EINVAL;
+ }
+
+ rv = get_errno(shmdt(g2h_untagged(shmaddr)));
+ if (rv == 0) {
+ abi_ulong size = last - shmaddr + 1;
+
+ page_set_flags(shmaddr, last, 0);
+ shm_region_rm_complete(shmaddr, last);
+ mmap_reserve_or_unmap(shmaddr, size);
+ }
+ }
+ return rv;
+}
diff --git a/linux-user/nios2/cpu_loop.c b/linux-user/nios2/cpu_loop.c
deleted file mode 100644
index 34290fb3b5..0000000000
--- a/linux-user/nios2/cpu_loop.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * qemu user cpu loop
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu.h"
-#include "user-internals.h"
-#include "cpu_loop-common.h"
-#include "signal-common.h"
-
-void cpu_loop(CPUNios2State *env)
-{
- CPUState *cs = env_cpu(env);
- Nios2CPU *cpu = NIOS2_CPU(cs);
- target_siginfo_t info;
- int trapnr, ret;
-
- for (;;) {
- cpu_exec_start(cs);
- trapnr = cpu_exec(cs);
- cpu_exec_end(cs);
-
- switch (trapnr) {
- case EXCP_INTERRUPT:
- /* just indicate that signals should be handled asap */
- break;
- case EXCP_TRAP:
- if (env->regs[R_AT] == 0) {
- abi_long ret;
- qemu_log_mask(CPU_LOG_INT, "\nSyscall\n");
-
- ret = do_syscall(env, env->regs[2],
- env->regs[4], env->regs[5], env->regs[6],
- env->regs[7], env->regs[8], env->regs[9],
- 0, 0);
-
- if (env->regs[2] == 0) { /* FIXME: syscall 0 workaround */
- ret = 0;
- }
-
- env->regs[2] = abs(ret);
- /* Return value is 0..4096 */
- env->regs[7] = (ret > 0xfffffffffffff000ULL);
- env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
- env->regs[CR_STATUS] &= ~0x3;
- env->regs[R_EA] = env->regs[R_PC] + 4;
- env->regs[R_PC] += 4;
- break;
- } else {
- qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
-
- env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
- env->regs[CR_STATUS] &= ~0x3;
- env->regs[R_EA] = env->regs[R_PC] + 4;
- env->regs[R_PC] = cpu->exception_addr;
-
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- }
- case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case 0xaa:
- switch (env->regs[R_PC]) {
- /*case 0x1000:*/ /* TODO:__kuser_helper_version */
- case 0x1004: /* __kuser_cmpxchg */
- start_exclusive();
- if (env->regs[4] & 0x3) {
- goto kuser_fail;
- }
- ret = get_user_u32(env->regs[2], env->regs[4]);
- if (ret) {
- end_exclusive();
- goto kuser_fail;
- }
- env->regs[2] -= env->regs[5];
- if (env->regs[2] == 0) {
- put_user_u32(env->regs[6], env->regs[4]);
- }
- end_exclusive();
- env->regs[R_PC] = env->regs[R_RA];
- break;
- /*case 0x1040:*/ /* TODO:__kuser_sigtramp */
- default:
- ;
-kuser_fail:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* TODO: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->regs[R_PC];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
- break;
- default:
- EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
- trapnr);
- abort();
- }
-
- process_pending_signals(env);
- }
-}
-
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
-{
- env->regs[0] = 0;
- env->regs[1] = regs->r1;
- env->regs[2] = regs->r2;
- env->regs[3] = regs->r3;
- env->regs[4] = regs->r4;
- env->regs[5] = regs->r5;
- env->regs[6] = regs->r6;
- env->regs[7] = regs->r7;
- env->regs[8] = regs->r8;
- env->regs[9] = regs->r9;
- env->regs[10] = regs->r10;
- env->regs[11] = regs->r11;
- env->regs[12] = regs->r12;
- env->regs[13] = regs->r13;
- env->regs[14] = regs->r14;
- env->regs[15] = regs->r15;
- /* TODO: unsigned long orig_r2; */
- env->regs[R_RA] = regs->ra;
- env->regs[R_FP] = regs->fp;
- env->regs[R_SP] = regs->sp;
- env->regs[R_GP] = regs->gp;
- env->regs[CR_ESTATUS] = regs->estatus;
- env->regs[R_EA] = regs->ea;
- /* TODO: unsigned long orig_r7; */
-
- /* Emulate eret when starting thread. */
- env->regs[R_PC] = regs->ea;
-}
diff --git a/linux-user/nios2/signal.c b/linux-user/nios2/signal.c
deleted file mode 100644
index a77e8a40f4..0000000000
--- a/linux-user/nios2/signal.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Emulation of Linux signals
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "qemu.h"
-#include "user-internals.h"
-#include "signal-common.h"
-#include "linux-user/trace.h"
-
-#define MCONTEXT_VERSION 2
-
-struct target_sigcontext {
- int version;
- unsigned long gregs[32];
-};
-
-struct target_ucontext {
- abi_ulong tuc_flags;
- abi_ulong tuc_link;
- target_stack_t tuc_stack;
- struct target_sigcontext tuc_mcontext;
- target_sigset_t tuc_sigmask; /* mask last for extensibility */
-};
-
-struct target_rt_sigframe {
- struct target_siginfo info;
- struct target_ucontext uc;
-};
-
-static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
-{
- unsigned long *gregs = uc->tuc_mcontext.gregs;
-
- __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
- __put_user(env->regs[1], &gregs[0]);
- __put_user(env->regs[2], &gregs[1]);
- __put_user(env->regs[3], &gregs[2]);
- __put_user(env->regs[4], &gregs[3]);
- __put_user(env->regs[5], &gregs[4]);
- __put_user(env->regs[6], &gregs[5]);
- __put_user(env->regs[7], &gregs[6]);
- __put_user(env->regs[8], &gregs[7]);
- __put_user(env->regs[9], &gregs[8]);
- __put_user(env->regs[10], &gregs[9]);
- __put_user(env->regs[11], &gregs[10]);
- __put_user(env->regs[12], &gregs[11]);
- __put_user(env->regs[13], &gregs[12]);
- __put_user(env->regs[14], &gregs[13]);
- __put_user(env->regs[15], &gregs[14]);
- __put_user(env->regs[16], &gregs[15]);
- __put_user(env->regs[17], &gregs[16]);
- __put_user(env->regs[18], &gregs[17]);
- __put_user(env->regs[19], &gregs[18]);
- __put_user(env->regs[20], &gregs[19]);
- __put_user(env->regs[21], &gregs[20]);
- __put_user(env->regs[22], &gregs[21]);
- __put_user(env->regs[23], &gregs[22]);
- __put_user(env->regs[R_RA], &gregs[23]);
- __put_user(env->regs[R_FP], &gregs[24]);
- __put_user(env->regs[R_GP], &gregs[25]);
- __put_user(env->regs[R_EA], &gregs[27]);
- __put_user(env->regs[R_SP], &gregs[28]);
-
- return 0;
-}
-
-static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
- int *pr2)
-{
- int temp;
- unsigned long *gregs = uc->tuc_mcontext.gregs;
-
- /* Always make any pending restarted system calls return -EINTR */
- /* current->restart_block.fn = do_no_restart_syscall; */
-
- __get_user(temp, &uc->tuc_mcontext.version);
- if (temp != MCONTEXT_VERSION) {
- return 1;
- }
-
- /* restore passed registers */
- __get_user(env->regs[1], &gregs[0]);
- __get_user(env->regs[2], &gregs[1]);
- __get_user(env->regs[3], &gregs[2]);
- __get_user(env->regs[4], &gregs[3]);
- __get_user(env->regs[5], &gregs[4]);
- __get_user(env->regs[6], &gregs[5]);
- __get_user(env->regs[7], &gregs[6]);
- __get_user(env->regs[8], &gregs[7]);
- __get_user(env->regs[9], &gregs[8]);
- __get_user(env->regs[10], &gregs[9]);
- __get_user(env->regs[11], &gregs[10]);
- __get_user(env->regs[12], &gregs[11]);
- __get_user(env->regs[13], &gregs[12]);
- __get_user(env->regs[14], &gregs[13]);
- __get_user(env->regs[15], &gregs[14]);
- __get_user(env->regs[16], &gregs[15]);
- __get_user(env->regs[17], &gregs[16]);
- __get_user(env->regs[18], &gregs[17]);
- __get_user(env->regs[19], &gregs[18]);
- __get_user(env->regs[20], &gregs[19]);
- __get_user(env->regs[21], &gregs[20]);
- __get_user(env->regs[22], &gregs[21]);
- __get_user(env->regs[23], &gregs[22]);
- /* gregs[23] is handled below */
- /* Verify, should this be settable */
- __get_user(env->regs[R_FP], &gregs[24]);
- /* Verify, should this be settable */
- __get_user(env->regs[R_GP], &gregs[25]);
- /* Not really necessary no user settable bits */
- __get_user(temp, &gregs[26]);
- __get_user(env->regs[R_EA], &gregs[27]);
-
- __get_user(env->regs[R_RA], &gregs[23]);
- __get_user(env->regs[R_SP], &gregs[28]);
-
- target_restore_altstack(&uc->tuc_stack, env);
-
- *pr2 = env->regs[2];
- return 0;
-}
-
-static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
- size_t frame_size)
-{
- unsigned long usp;
-
- /* This is the X/Open sanctioned signal stack switching. */
- usp = target_sigsp(get_sp_from_cpustate(env), ka);
-
- /* Verify, is it 32 or 64 bit aligned */
- return (void *)((usp - frame_size) & -8UL);
-}
-
-void setup_rt_frame(int sig, struct target_sigaction *ka,
- target_siginfo_t *info,
- target_sigset_t *set,
- CPUNios2State *env)
-{
- struct target_rt_sigframe *frame;
- int i, err = 0;
-
- frame = get_sigframe(ka, env, sizeof(*frame));
-
- if (ka->sa_flags & SA_SIGINFO) {
- tswap_siginfo(&frame->info, info);
- }
-
- /* Create the ucontext. */
- __put_user(0, &frame->uc.tuc_flags);
- __put_user(0, &frame->uc.tuc_link);
- target_save_altstack(&frame->uc.tuc_stack, env);
- err |= rt_setup_ucontext(&frame->uc, env);
- for (i = 0; i < TARGET_NSIG_WORDS; i++) {
- __put_user((abi_ulong)set->sig[i],
- (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
- }
-
- if (err) {
- goto give_sigsegv;
- }
-
- /* Set up to return from userspace; jump to fixed address sigreturn
- trampoline on kuser page. */
- env->regs[R_RA] = (unsigned long) (0x1044);
-
- /* Set up registers for signal handler */
- env->regs[R_SP] = (unsigned long) frame;
- env->regs[4] = (unsigned long) sig;
- env->regs[5] = (unsigned long) &frame->info;
- env->regs[6] = (unsigned long) &frame->uc;
- env->regs[R_EA] = (unsigned long) ka->_sa_handler;
- return;
-
-give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sigsegv(sig);
- return;
-}
-
-long do_sigreturn(CPUNios2State *env)
-{
- trace_user_do_sigreturn(env, 0);
- qemu_log_mask(LOG_UNIMP, "do_sigreturn: not implemented\n");
- return -TARGET_ENOSYS;
-}
-
-long do_rt_sigreturn(CPUNios2State *env)
-{
- /* Verify, can we follow the stack back */
- abi_ulong frame_addr = env->regs[R_SP];
- struct target_rt_sigframe *frame;
- sigset_t set;
- int rval;
-
- if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
- goto badframe;
- }
-
- target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
-
- if (rt_restore_ucontext(env, &frame->uc, &rval)) {
- goto badframe;
- }
-
- unlock_user_struct(frame, frame_addr, 0);
- return rval;
-
-badframe:
- unlock_user_struct(frame, frame_addr, 0);
- force_sig(TARGET_SIGSEGV);
- return 0;
-}
diff --git a/linux-user/nios2/sockbits.h b/linux-user/nios2/sockbits.h
deleted file mode 100644
index 0e4c8f012d..0000000000
--- a/linux-user/nios2/sockbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/sockbits.h"
diff --git a/linux-user/nios2/target_cpu.h b/linux-user/nios2/target_cpu.h
deleted file mode 100644
index 2d2008f002..0000000000
--- a/linux-user/nios2/target_cpu.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Nios2 specific CPU ABI and functions for linux-user
- *
- * Copyright (c) 2016 Marek Vasut <marex@denx.de>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef NIOS2_TARGET_CPU_H
-#define NIOS2_TARGET_CPU_H
-
-static inline void cpu_clone_regs_child(CPUNios2State *env, target_ulong newsp,
- unsigned flags)
-{
- if (newsp) {
- env->regs[R_SP] = newsp;
- }
- env->regs[R_RET0] = 0;
-}
-
-static inline void cpu_clone_regs_parent(CPUNios2State *env, unsigned flags)
-{
-}
-
-static inline void cpu_set_tls(CPUNios2State *env, target_ulong newtls)
-{
- /*
- * Linux kernel 3.10 does not pay any attention to CLONE_SETTLS
- * in copy_thread(), so QEMU need not do so either.
- */
-}
-
-static inline abi_ulong get_sp_from_cpustate(CPUNios2State *state)
-{
- return state->regs[R_SP];
-}
-#endif
diff --git a/linux-user/nios2/target_elf.h b/linux-user/nios2/target_elf.h
deleted file mode 100644
index 801e20afaf..0000000000
--- a/linux-user/nios2/target_elf.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or (at your option) any
- * later version. See the COPYING file in the top-level directory.
- */
-
-#ifndef NIOS2_TARGET_ELF_H
-#define NIOS2_TARGET_ELF_H
-static inline const char *cpu_get_model(uint32_t eflags)
-{
- return "any";
-}
-#endif
diff --git a/linux-user/nios2/target_errno_defs.h b/linux-user/nios2/target_errno_defs.h
deleted file mode 100644
index 28120013e2..0000000000
--- a/linux-user/nios2/target_errno_defs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef NIOS2_TARGET_ERRNO_DEFS_H
-#define NIOS2_TARGET_ERRNO_DEFS_H
-
-/* Target uses generic errno */
-#include "../generic/target_errno_defs.h"
-
-#endif
diff --git a/linux-user/nios2/target_fcntl.h b/linux-user/nios2/target_fcntl.h
deleted file mode 100644
index 714583215d..0000000000
--- a/linux-user/nios2/target_fcntl.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or (at your option) any
- * later version. See the COPYING file in the top-level directory.
- */
-
-#ifndef NIOS2_TARGET_FCNTL_H
-#define NIOS2_TARGET_FCNTL_H
-#include "../generic/fcntl.h"
-#endif
diff --git a/linux-user/nios2/target_signal.h b/linux-user/nios2/target_signal.h
deleted file mode 100644
index aebf749f12..0000000000
--- a/linux-user/nios2/target_signal.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef NIOS2_TARGET_SIGNAL_H
-#define NIOS2_TARGET_SIGNAL_H
-
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/* sigaltstack controls */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
-#include "../generic/signal.h"
-
-#endif /* NIOS2_TARGET_SIGNAL_H */
diff --git a/linux-user/nios2/target_syscall.h b/linux-user/nios2/target_syscall.h
deleted file mode 100644
index 78006c24d4..0000000000
--- a/linux-user/nios2/target_syscall.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef NIOS2_TARGET_SYSCALL_H
-#define NIOS2_TARGET_SYSCALL_H
-
-#define UNAME_MACHINE "nios2"
-#define UNAME_MINIMUM_RELEASE "3.19.0"
-
-struct target_pt_regs {
- unsigned long r8; /* r8-r15 Caller-saved GP registers */
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long r14;
- unsigned long r15;
- unsigned long r1; /* Assembler temporary */
- unsigned long r2; /* Retval LS 32bits */
- unsigned long r3; /* Retval MS 32bits */
- unsigned long r4; /* r4-r7 Register arguments */
- unsigned long r5;
- unsigned long r6;
- unsigned long r7;
- unsigned long orig_r2; /* Copy of r2 ?? */
- unsigned long ra; /* Return address */
- unsigned long fp; /* Frame pointer */
- unsigned long sp; /* Stack pointer */
- unsigned long gp; /* Global pointer */
- unsigned long estatus;
- unsigned long ea; /* Exception return address (pc) */
- unsigned long orig_r7;
-};
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_MCL_CURRENT 1
-#define TARGET_MCL_FUTURE 2
-#define TARGET_MCL_ONFAULT 4
-
-#endif /* NIOS2_TARGET_SYSCALL_H */
diff --git a/linux-user/nios2/termbits.h b/linux-user/nios2/termbits.h
deleted file mode 100644
index b1d4f4fedb..0000000000
--- a/linux-user/nios2/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/termbits.h"
diff --git a/linux-user/openrisc/cpu_loop.c b/linux-user/openrisc/cpu_loop.c
index f6360db47c..a7aa586c8f 100644
--- a/linux-user/openrisc/cpu_loop.c
+++ b/linux-user/openrisc/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -29,7 +28,6 @@ void cpu_loop(CPUOpenRISCState *env)
CPUState *cs = env_cpu(env);
int trapnr;
abi_long ret;
- target_siginfo_t info;
for (;;) {
cpu_exec_start(cs);
@@ -48,54 +46,36 @@ void cpu_loop(CPUOpenRISCState *env)
cpu_get_gpr(env, 6),
cpu_get_gpr(env, 7),
cpu_get_gpr(env, 8), 0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->pc -= 4;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
cpu_set_gpr(env, 11, ret);
}
break;
- case EXCP_DPF:
- case EXCP_IPF:
- case EXCP_RANGE:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case EXCP_ALIGN:
- info.si_signo = TARGET_SIGBUS;
- info.si_errno = 0;
- info.si_code = TARGET_BUS_ADRALN;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, env->eear);
break;
case EXCP_ILLEGAL:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPC;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case EXCP_FPE:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = 0;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc);
break;
case EXCP_INTERRUPT:
/* We processed the pending cpu work above. */
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
+ case EXCP_RANGE:
+ /* Requires SR.OVE set, which linux-user won't do. */
+ cpu_abort(cs, "Unexpected RANGE exception");
+ case EXCP_FPE:
+ /*
+ * Requires FPSCR.FPEE set. Writes to FPSCR from usermode not
+ * yet enabled in kernel ABI, so linux-user does not either.
+ */
+ cpu_abort(cs, "Unexpected FPE exception");
default:
g_assert_not_reached();
}
diff --git a/linux-user/openrisc/signal.c b/linux-user/openrisc/signal.c
index ca2532bf50..cb74a9fe5e 100644
--- a/linux-user/openrisc/signal.c
+++ b/linux-user/openrisc/signal.c
@@ -38,7 +38,6 @@ typedef struct target_ucontext {
typedef struct target_rt_sigframe {
struct target_siginfo info;
target_ucontext uc;
- uint32_t retcode[4]; /* trampoline code */
} target_rt_sigframe;
static void restore_sigcontext(CPUOpenRISCState *env, target_sigcontext *sc)
@@ -104,7 +103,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
}
if (ka->sa_flags & SA_SIGINFO) {
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
}
__put_user(0, &frame->uc.tuc_flags);
@@ -116,14 +115,8 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
}
- /* This is l.ori r11,r0,__NR_sigreturn; l.sys 1; l.nop; l.nop */
- __put_user(0xa9600000 | TARGET_NR_rt_sigreturn, frame->retcode + 0);
- __put_user(0x20000001, frame->retcode + 1);
- __put_user(0x15000000, frame->retcode + 2);
- __put_user(0x15000000, frame->retcode + 3);
-
/* Set up registers for signal handler */
- cpu_set_gpr(env, 9, frame_addr + offsetof(target_rt_sigframe, retcode));
+ cpu_set_gpr(env, 9, default_rt_sigreturn);
cpu_set_gpr(env, 3, sig);
cpu_set_gpr(env, 4, frame_addr + offsetof(target_rt_sigframe, info));
cpu_set_gpr(env, 5, frame_addr + offsetof(target_rt_sigframe, uc));
@@ -169,3 +162,16 @@ long do_rt_sigreturn(CPUOpenRISCState *env)
force_sig(TARGET_SIGSEGV);
return 0;
}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
+ assert(tramp != NULL);
+
+ /* This is l.ori r11,r0,__NR_sigreturn; l.sys 1 */
+ __put_user(0xa9600000 | TARGET_NR_rt_sigreturn, tramp + 0);
+ __put_user(0x20000001, tramp + 1);
+
+ default_rt_sigreturn = sigtramp_page;
+ unlock_user(tramp, sigtramp_page, 8);
+}
diff --git a/linux-user/openrisc/target_mman.h b/linux-user/openrisc/target_mman.h
new file mode 100644
index 0000000000..243c1d5f26
--- /dev/null
+++ b/linux-user/openrisc/target_mman.h
@@ -0,0 +1,11 @@
+/*
+ * arch/openrisc/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+ * TASK_SIZE (0x80000000UL)
+ */
+#define TASK_UNMAPPED_BASE 0x30000000
+
+/* arch/openrisc/include/asm/elf.h */
+#define ELF_ET_DYN_BASE 0x08000000
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/openrisc/target_prctl.h b/linux-user/openrisc/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/openrisc/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/openrisc/target_proc.h b/linux-user/openrisc/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/openrisc/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/openrisc/target_resource.h b/linux-user/openrisc/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/openrisc/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/openrisc/target_signal.h b/linux-user/openrisc/target_signal.h
index 8283eaf544..5b9d40974a 100644
--- a/linux-user/openrisc/target_signal.h
+++ b/linux-user/openrisc/target_signal.h
@@ -1,29 +1,8 @@
#ifndef OPENRISC_TARGET_SIGNAL_H
#define OPENRISC_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_long ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/* sigaltstack controls */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_SA_NOCLDSTOP 0x00000001
-#define TARGET_SA_NOCLDWAIT 0x00000002
-#define TARGET_SA_SIGINFO 0x00000004
-#define TARGET_SA_ONSTACK 0x08000000
-#define TARGET_SA_RESTART 0x10000000
-#define TARGET_SA_NODEFER 0x40000000
-#define TARGET_SA_RESETHAND 0x80000000
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* OPENRISC_TARGET_SIGNAL_H */
diff --git a/linux-user/openrisc/target_structs.h b/linux-user/openrisc/target_structs.h
index e98e2bc799..3a06f373c3 100644
--- a/linux-user/openrisc/target_structs.h
+++ b/linux-user/openrisc/target_structs.h
@@ -1,58 +1 @@
-/*
- * OpenRISC specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef OPENRISC_TARGET_STRUCTS_H
-#define OPENRISC_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/openrisc/target_syscall.h b/linux-user/openrisc/target_syscall.h
index ef0d89a551..7fe5b73d3b 100644
--- a/linux-user/openrisc/target_syscall.h
+++ b/linux-user/openrisc/target_syscall.h
@@ -15,7 +15,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "openrisc"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/ppc/Makefile.vdso b/linux-user/ppc/Makefile.vdso
new file mode 100644
index 0000000000..3ca3c6b83e
--- /dev/null
+++ b/linux-user/ppc/Makefile.vdso
@@ -0,0 +1,20 @@
+include $(BUILD_DIR)/tests/tcg/ppc64-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/ppc
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so $(SUBDIR)/vdso-64le.so
+
+LDFLAGS32 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-32.ld \
+ -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1
+LDFLAGS64 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-64.ld \
+ -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1
+
+$(SUBDIR)/vdso-32.so: vdso.S vdso-32.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS32) -m32 $<
+
+$(SUBDIR)/vdso-64.so: vdso.S vdso-64.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS64) -mbig-endian $<
+
+$(SUBDIR)/vdso-64le.so: vdso.S vdso-64.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS64) -mlittle-endian $<
diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c
index 840b23736b..02204ad8be 100644
--- a/linux-user/ppc/cpu_loop.c
+++ b/linux-user/ppc/cpu_loop.c
@@ -18,8 +18,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
+#include "qemu/timer.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
#include "signal-common.h"
@@ -54,14 +54,6 @@ uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
return cpu_ppc_get_tb(env);
}
-uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env)
-__attribute__ (( alias ("cpu_ppc_load_tbu") ));
-
-uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env)
-{
- return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
-}
-
/* XXX: to be fixed */
int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
{
@@ -76,8 +68,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
void cpu_loop(CPUPPCState *env)
{
CPUState *cs = env_cpu(env);
- target_siginfo_t info;
- int trapnr;
+ int trapnr, si_signo, si_code;
target_ulong ret;
for(;;) {
@@ -102,97 +93,37 @@ void cpu_loop(CPUPPCState *env)
"Aborting\n");
break;
case POWERPC_EXCP_DSI: /* Data storage exception */
- /* XXX: check this. Seems bugged */
- switch (env->error_code & 0xFF000000) {
- case 0x40000000:
- case 0x42000000:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_MAPERR;
- break;
- case 0x04000000:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLADR;
- break;
- case 0x08000000:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_ACCERR;
- break;
- default:
- /* Let's send a regular segfault... */
- EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
- env->error_code);
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_MAPERR;
- break;
- }
- info._sifields._sigfault._addr = env->spr[SPR_DAR];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case POWERPC_EXCP_ISI: /* Instruction storage exception */
- /* XXX: check this */
- switch (env->error_code & 0xFF000000) {
- case 0x40000000:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_MAPERR;
- break;
- case 0x10000000:
- case 0x08000000:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_ACCERR;
- break;
- default:
- /* Let's send a regular segfault... */
- EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
- env->error_code);
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_MAPERR;
- break;
- }
- info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ /* FIXME: handle maperr in ppc_cpu_record_sigsegv. */
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
+ env->spr[SPR_DAR]);
break;
case POWERPC_EXCP_EXTERNAL: /* External input */
cpu_abort(cs, "External interrupt while in user mode. "
"Aborting\n");
break;
- case POWERPC_EXCP_ALIGN: /* Alignment exception */
- /* XXX: check this */
- info.si_signo = TARGET_SIGBUS;
- info.si_errno = 0;
- info.si_code = TARGET_BUS_ADRALN;
- info._sifields._sigfault._addr = env->nip;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case POWERPC_EXCP_PROGRAM: /* Program exception */
case POWERPC_EXCP_HV_EMU: /* HV emulation */
/* XXX: check this */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
+ si_signo = TARGET_SIGFPE;
switch (env->error_code & 0xF) {
case POWERPC_EXCP_FP_OX:
- info.si_code = TARGET_FPE_FLTOVF;
+ si_code = TARGET_FPE_FLTOVF;
break;
case POWERPC_EXCP_FP_UX:
- info.si_code = TARGET_FPE_FLTUND;
+ si_code = TARGET_FPE_FLTUND;
break;
case POWERPC_EXCP_FP_ZX:
case POWERPC_EXCP_FP_VXZDZ:
- info.si_code = TARGET_FPE_FLTDIV;
+ si_code = TARGET_FPE_FLTDIV;
break;
case POWERPC_EXCP_FP_XX:
- info.si_code = TARGET_FPE_FLTRES;
+ si_code = TARGET_FPE_FLTRES;
break;
case POWERPC_EXCP_FP_VXSOFT:
- info.si_code = TARGET_FPE_FLTINV;
+ si_code = TARGET_FPE_FLTINV;
break;
case POWERPC_EXCP_FP_VXSNAN:
case POWERPC_EXCP_FP_VXISI:
@@ -201,56 +132,56 @@ void cpu_loop(CPUPPCState *env)
case POWERPC_EXCP_FP_VXVC:
case POWERPC_EXCP_FP_VXSQRT:
case POWERPC_EXCP_FP_VXCVI:
- info.si_code = TARGET_FPE_FLTSUB;
+ si_code = TARGET_FPE_FLTSUB;
break;
default:
EXCP_DUMP(env, "Unknown floating point exception (%02x)\n",
env->error_code);
+ si_code = 0;
break;
}
break;
case POWERPC_EXCP_INVAL:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
+ si_signo = TARGET_SIGILL;
switch (env->error_code & 0xF) {
case POWERPC_EXCP_INVAL_INVAL:
- info.si_code = TARGET_ILL_ILLOPC;
+ si_code = TARGET_ILL_ILLOPC;
break;
case POWERPC_EXCP_INVAL_LSWX:
- info.si_code = TARGET_ILL_ILLOPN;
+ si_code = TARGET_ILL_ILLOPN;
break;
case POWERPC_EXCP_INVAL_SPR:
- info.si_code = TARGET_ILL_PRVREG;
+ si_code = TARGET_ILL_PRVREG;
break;
case POWERPC_EXCP_INVAL_FP:
- info.si_code = TARGET_ILL_COPROC;
+ si_code = TARGET_ILL_COPROC;
break;
default:
EXCP_DUMP(env, "Unknown invalid operation (%02x)\n",
env->error_code & 0xF);
- info.si_code = TARGET_ILL_ILLADR;
+ si_code = TARGET_ILL_ILLADR;
break;
}
break;
case POWERPC_EXCP_PRIV:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
+ si_signo = TARGET_SIGILL;
switch (env->error_code & 0xF) {
case POWERPC_EXCP_PRIV_OPC:
- info.si_code = TARGET_ILL_PRVOPC;
+ si_code = TARGET_ILL_PRVOPC;
break;
case POWERPC_EXCP_PRIV_REG:
- info.si_code = TARGET_ILL_PRVREG;
+ si_code = TARGET_ILL_PRVREG;
break;
default:
EXCP_DUMP(env, "Unknown privilege violation (%02x)\n",
env->error_code & 0xF);
- info.si_code = TARGET_ILL_PRVOPC;
+ si_code = TARGET_ILL_PRVOPC;
break;
}
break;
case POWERPC_EXCP_TRAP:
- cpu_abort(cs, "Tried to call a TRAP\n");
+ si_signo = TARGET_SIGTRAP;
+ si_code = TARGET_TRAP_BRKPT;
break;
default:
/* Should not happen ! */
@@ -258,28 +189,19 @@ void cpu_loop(CPUPPCState *env)
env->error_code);
break;
}
- info._sifields._sigfault._addr = env->nip;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(si_signo, si_code, env->nip);
break;
case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_COPROC;
- info._sifields._sigfault._addr = env->nip;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
+ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_COPROC, env->nip);
break;
case POWERPC_EXCP_SYSCALL: /* System call exception */
case POWERPC_EXCP_SYSCALL_VECTORED:
cpu_abort(cs, "Syscall exception while in user mode. "
"Aborting\n");
break;
- case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_COPROC;
- info._sifields._sigfault._addr = env->nip;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case POWERPC_EXCP_DECR: /* Decrementer exception */
cpu_abort(cs, "Decrementer interrupt while in user mode. "
"Aborting\n");
@@ -300,13 +222,6 @@ void cpu_loop(CPUPPCState *env)
cpu_abort(cs, "Instruction TLB exception while in user mode. "
"Aborting\n");
break;
- case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_COPROC;
- info._sifields._sigfault._addr = env->nip;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
break;
@@ -363,25 +278,10 @@ void cpu_loop(CPUPPCState *env)
cpu_abort(cs, "Hypervisor instruction segment exception "
"while in user mode. Aborting\n");
break;
- case POWERPC_EXCP_VPU: /* Vector unavailable exception */
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_COPROC;
- info._sifields._sigfault._addr = env->nip;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
cpu_abort(cs, "Programmable interval timer interrupt "
"while in user mode. Aborting\n");
break;
- case POWERPC_EXCP_IO: /* IO error exception */
- cpu_abort(cs, "IO error exception while in user mode. "
- "Aborting\n");
- break;
- case POWERPC_EXCP_RUNM: /* Run mode exception */
- cpu_abort(cs, "Run mode exception while in user mode. "
- "Aborting\n");
- break;
case POWERPC_EXCP_EMUL: /* Emulation trap exception */
cpu_abort(cs, "Emulation trap exception not handled\n");
break;
@@ -436,11 +336,11 @@ void cpu_loop(CPUPPCState *env)
ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
env->gpr[5], env->gpr[6], env->gpr[7],
env->gpr[8], 0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->nip -= 4;
break;
}
- if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
+ if (ret == (target_ulong)(-QEMU_ESIGRETURN)) {
/* Returning from a successful sigreturn syscall.
Avoid corrupting register state. */
break;
@@ -452,10 +352,7 @@ void cpu_loop(CPUPPCState *env)
env->gpr[3] = ret;
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->nip);
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
diff --git a/linux-user/ppc/meson.build b/linux-user/ppc/meson.build
index 19fead7bc8..80cacae396 100644
--- a/linux-user/ppc/meson.build
+++ b/linux-user/ppc/meson.build
@@ -3,3 +3,15 @@ syscall_nr_generators += {
arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
output: '@BASENAME@_nr.h')
}
+
+vdso_32_inc = gen_vdso.process('vdso-32.so', extra_args: [
+ '-s', '__kernel_sigtramp32',
+ '-r', '__kernel_sigtramp_rt32'
+ ])
+linux_user_ss.add(when: 'TARGET_PPC', if_true: vdso_32_inc)
+
+vdso_64_inc = gen_vdso.process('vdso-64.so',
+ extra_args: ['-r', '__kernel_sigtramp_rt64'])
+vdso_64le_inc = gen_vdso.process('vdso-64le.so',
+ extra_args: ['-r', '__kernel_sigtramp_rt64'])
+linux_user_ss.add(when: 'TARGET_PPC64', if_true: [vdso_64_inc, vdso_64le_inc])
diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c
index e4d0dfa3bf..a1d8c0bccc 100644
--- a/linux-user/ppc/signal.c
+++ b/linux-user/ppc/signal.c
@@ -21,14 +21,8 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
-
-/* Size of dummy stack frame allocated when calling signal handler.
- See arch/powerpc/include/asm/ptrace.h. */
-#if defined(TARGET_PPC64)
-#define SIGNAL_FRAMESIZE 128
-#else
-#define SIGNAL_FRAMESIZE 64
-#endif
+#include "user/tswap-target.h"
+#include "vdso-asmoffset.h"
/* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
on 64-bit PPC, sigcontext and mcontext are one and the same. */
@@ -73,6 +67,16 @@ struct target_mcontext {
#endif
};
+QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_fregs)
+ != offsetof_mcontext_fregs);
+#if defined(TARGET_PPC64)
+QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, v_regs)
+ != offsetof_mcontext_vregs_ptr);
+#else
+QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_vregs)
+ != offsetof_mcontext_vregs);
+#endif
+
/* See arch/powerpc/include/asm/sigcontext.h. */
struct target_sigcontext {
target_ulong _unused[4];
@@ -161,6 +165,7 @@ struct target_ucontext {
#endif
};
+#if !defined(TARGET_PPC64)
/* See arch/powerpc/kernel/signal_32.c. */
struct target_sigframe {
struct target_sigcontext sctx;
@@ -168,6 +173,10 @@ struct target_sigframe {
int32_t abigap[56];
};
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigframe, mctx)
+ != offsetof_sigframe_mcontext);
+#endif
+
#if defined(TARGET_PPC64)
#define TARGET_TRAMP_SIZE 6
@@ -184,6 +193,10 @@ struct target_rt_sigframe {
char abigap[288];
} __attribute__((aligned(16)));
+QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe,
+ uc.tuc_sigcontext.mcontext)
+ != offsetof_rt_sigframe_mcontext);
+
#else
struct target_rt_sigframe {
@@ -192,6 +205,9 @@ struct target_rt_sigframe {
int32_t abigap[56];
};
+QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext)
+ != offsetof_rt_sigframe_mcontext);
+
#endif
#if defined(TARGET_PPC64)
@@ -203,9 +219,6 @@ struct target_func_ptr {
#endif
-/* We use the mc_pad field for the signal return trampoline. */
-#define tramp mc_pad
-
/* See arch/powerpc/kernel/signal.c. */
static target_ulong get_sigframe(struct target_sigaction *ka,
CPUPPCState *env,
@@ -218,8 +231,7 @@ static target_ulong get_sigframe(struct target_sigaction *ka,
return (oldsp - frame_size) & ~0xFUL;
}
-#if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
- (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
+#if TARGET_BIG_ENDIAN == HOST_BIG_ENDIAN
#define PPC_VEC_HI 0
#define PPC_VEC_LO 1
#else
@@ -232,7 +244,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
{
target_ulong msr = env->msr;
int i;
- target_ulong ccr = 0;
+ uint32_t ccr = 0;
/* In general, the kernel attempts to be intelligent about what it
needs to save for Altivec/FP/SPE registers. We don't care that
@@ -245,11 +257,9 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
__put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
__put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
__put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
- __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
+ __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]);
- for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
- ccr |= env->crf[i] << (32 - ((i + 1) * 4));
- }
+ ccr = ppc_get_cr(env);
__put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
/* Save Altivec registers if necessary. */
@@ -309,10 +319,8 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
static void encode_trampoline(int sigret, uint32_t *tramp)
{
/* Set up the sigreturn trampoline: li r0,sigret; sc. */
- if (sigret) {
- __put_user(0x38000000 | sigret, &tramp[0]);
- __put_user(0x44000002, &tramp[1]);
- }
+ __put_user(0x38000000 | sigret, &tramp[0]);
+ __put_user(0x44000002, &tramp[1]);
}
static void restore_user_regs(CPUPPCState *env,
@@ -320,6 +328,7 @@ static void restore_user_regs(CPUPPCState *env,
{
target_ulong save_r2 = 0;
target_ulong msr;
+ target_ulong xer;
target_ulong ccr;
int i;
@@ -335,13 +344,12 @@ static void restore_user_regs(CPUPPCState *env,
__get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
__get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
__get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
- __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
- __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
- for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
- env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
- }
+ __get_user(xer, &frame->mc_gregs[TARGET_PT_XER]);
+ cpu_write_xer(env, xer);
+ __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
+ ppc_set_cr(env, ccr);
if (!sig) {
env->gpr[2] = save_r2;
}
@@ -438,12 +446,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
/* Save user regs. */
save_user_regs(env, &frame->mctx);
- /* Construct the trampoline code on the stack. */
- encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
-
- /* The kernel checks for the presence of a VDSO here. We don't
- emulate a vdso, so use a sigreturn system call. */
- env->lr = (target_ulong) h2g(frame->mctx.tramp);
+ env->lr = default_sigreturn;
/* Turn off all fp exceptions. */
env->fpscr = 0;
@@ -479,22 +482,19 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUPPCState *env)
{
struct target_rt_sigframe *rt_sf;
- uint32_t *trampptr = 0;
struct target_mcontext *mctx = 0;
target_ulong rt_sf_addr, newsp = 0;
int i, err = 0;
#if defined(TARGET_PPC64)
struct target_sigcontext *sc = 0;
-#if !defined(TARGET_ABI32)
- struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
-#endif
+ struct image_info *image = get_task_state(thread_cpu)->info;
#endif
rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
goto sigsegv;
- tswap_siginfo(&rt_sf->info, info);
+ rt_sf->info = *info;
__put_user(0, &rt_sf->uc.tuc_flags);
__put_user(0, &rt_sf->uc.tuc_link);
@@ -503,28 +503,23 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
__put_user(h2g (&rt_sf->uc.tuc_mcontext),
&rt_sf->uc.tuc_regs);
#endif
- for(i = 0; i < TARGET_NSIG_WORDS; i++) {
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
}
#if defined(TARGET_PPC64)
mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
- trampptr = &rt_sf->trampoline[0];
sc = &rt_sf->uc.tuc_sigcontext;
__put_user(h2g(mctx), &sc->regs);
__put_user(sig, &sc->signal);
#else
mctx = &rt_sf->uc.tuc_mcontext;
- trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
#endif
save_user_regs(env, mctx);
- encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
- /* The kernel checks for the presence of a VDSO here. We don't
- emulate a vdso, so use a sigreturn system call. */
- env->lr = (target_ulong) h2g(trampptr);
+ env->lr = default_rt_sigreturn;
/* Turn off all fp exceptions. */
env->fpscr = 0;
@@ -543,7 +538,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
env->gpr[6] = (target_ulong) h2g(rt_sf);
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
+#if defined(TARGET_PPC64)
if (get_ppc64_abi(image) < 2) {
/* ELFv1 PPC64 function pointers are pointers to OPD entries. */
struct target_func_ptr *handler =
@@ -558,7 +553,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
env->nip = (target_ulong) ka->_sa_handler;
#endif
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
/* Signal handlers are entered in big-endian mode. */
ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
#else
@@ -575,7 +570,7 @@ sigsegv:
}
-#if !defined(TARGET_PPC64) || defined(TARGET_ABI32)
+#if !defined(TARGET_PPC64)
long do_sigreturn(CPUPPCState *env)
{
struct target_sigcontext *sc = NULL;
@@ -588,12 +583,9 @@ long do_sigreturn(CPUPPCState *env)
if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
goto sigsegv;
-#if defined(TARGET_PPC64)
- set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
-#else
__get_user(set.sig[0], &sc->oldmask);
__get_user(set.sig[1], &sc->_unused[3]);
-#endif
+
target_to_host_sigset_internal(&blocked, &set);
set_sigmask(&blocked);
@@ -604,13 +596,13 @@ long do_sigreturn(CPUPPCState *env)
unlock_user_struct(sr, sr_addr, 1);
unlock_user_struct(sc, sc_addr, 1);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
sigsegv:
unlock_user_struct(sr, sr_addr, 1);
unlock_user_struct(sc, sc_addr, 1);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
#endif /* !defined(TARGET_PPC64) */
@@ -659,12 +651,12 @@ long do_rt_sigreturn(CPUPPCState *env)
target_restore_altstack(&rt_sf->uc.tuc_stack, env);
unlock_user_struct(rt_sf, rt_sf_addr, 1);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
sigsegv:
unlock_user_struct(rt_sf, rt_sf_addr, 1);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
/* This syscall implements {get,set,swap}context for userland. */
@@ -682,7 +674,7 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
}
if (uold_ctx) {
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
return -TARGET_EFAULT;
@@ -717,8 +709,24 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
/* We cannot return to a partially updated context. */
force_sig(TARGET_SIGSEGV);
}
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
return 0;
}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
+ assert(tramp != NULL);
+
+#ifdef TARGET_ARCH_HAS_SETUP_FRAME
+ default_sigreturn = sigtramp_page;
+ encode_trampoline(TARGET_NR_sigreturn, tramp + 0);
+#endif
+
+ default_rt_sigreturn = sigtramp_page + 8;
+ encode_trampoline(TARGET_NR_rt_sigreturn, tramp + 2);
+
+ unlock_user(tramp, sigtramp_page, 2 * 8);
+}
diff --git a/linux-user/ppc/target_mman.h b/linux-user/ppc/target_mman.h
new file mode 100644
index 0000000000..646d1ccae7
--- /dev/null
+++ b/linux-user/ppc/target_mman.h
@@ -0,0 +1,29 @@
+#ifndef PPC_TARGET_MMAN_H
+#define PPC_TARGET_MMAN_H
+
+#define TARGET_MAP_NORESERVE 0x40
+#define TARGET_MAP_LOCKED 0x80
+
+/*
+ * arch/powerpc/include/asm/task_size_64.h
+ * TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+ * TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
+ * TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE))
+ * DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB (with 4k pages)
+ */
+#ifdef TARGET_PPC64
+#define TASK_UNMAPPED_BASE 0x0000100000000000ull
+#else
+#define TASK_UNMAPPED_BASE 0x40000000
+#endif
+
+/* arch/powerpc/include/asm/elf.h */
+#ifdef TARGET_PPC64
+#define ELF_ET_DYN_BASE 0x100000000ull
+#else
+#define ELF_ET_DYN_BASE 0x000400000
+#endif
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/ppc/target_prctl.h b/linux-user/ppc/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/ppc/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/ppc/target_proc.h b/linux-user/ppc/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/ppc/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/ppc/target_resource.h b/linux-user/ppc/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/ppc/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h
index 72fcdd9bfa..5be24e152b 100644
--- a/linux-user/ppc/target_signal.h
+++ b/linux-user/ppc/target_signal.h
@@ -1,27 +1,11 @@
#ifndef PPC_TARGET_SIGNAL_H
#define PPC_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#if !defined(TARGET_PPC64)
#define TARGET_ARCH_HAS_SETUP_FRAME
#endif
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* PPC_TARGET_SIGNAL_H */
diff --git a/linux-user/ppc/target_syscall.h b/linux-user/ppc/target_syscall.h
index b9c4b813d3..77b36d0b46 100644
--- a/linux-user/ppc/target_syscall.h
+++ b/linux-user/ppc/target_syscall.h
@@ -36,7 +36,7 @@ struct target_pt_regs {
abi_ulong link;
abi_ulong xer;
abi_ulong ccr;
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
+#if defined(TARGET_PPC64)
abi_ulong softe;
#else
abi_ulong mq; /* 601 only (not used at present) */
@@ -58,8 +58,8 @@ struct target_revectored_struct {
* flags masks
*/
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
-#ifdef TARGET_WORDS_BIGENDIAN
+#if defined(TARGET_PPC64)
+#if TARGET_BIG_ENDIAN
#define UNAME_MACHINE "ppc64"
#else
#define UNAME_MACHINE "ppc64le"
@@ -71,7 +71,6 @@ struct target_revectored_struct {
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
diff --git a/linux-user/ppc/vdso-32.ld b/linux-user/ppc/vdso-32.ld
new file mode 100644
index 0000000000..6962696540
--- /dev/null
+++ b/linux-user/ppc/vdso-32.ld
@@ -0,0 +1,70 @@
+/*
+ * Linker script for linux powerpc64 replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_2.6.15 {
+ global:
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_gettime64;
+ __kernel_clock_getres;
+ __kernel_time;
+ __kernel_sync_dicache;
+ __kernel_sigtramp32;
+ __kernel_sigtramp_rt32;
+ __kernel_getcpu;
+ local: *;
+ };
+}
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ .data : {
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load
+}
diff --git a/linux-user/ppc/vdso-32.so b/linux-user/ppc/vdso-32.so
new file mode 100755
index 0000000000..b19baafb0d
--- /dev/null
+++ b/linux-user/ppc/vdso-32.so
Binary files differ
diff --git a/linux-user/ppc/vdso-64.ld b/linux-user/ppc/vdso-64.ld
new file mode 100644
index 0000000000..a55c65ed54
--- /dev/null
+++ b/linux-user/ppc/vdso-64.ld
@@ -0,0 +1,68 @@
+/*
+ * Linker script for linux powerpc64 replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_2.6.15 {
+ global:
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+ __kernel_sync_dicache;
+ __kernel_sigtramp_rt64;
+ __kernel_getcpu;
+ __kernel_time;
+ local: *;
+ };
+}
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ .data : {
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load
+}
diff --git a/linux-user/ppc/vdso-64.so b/linux-user/ppc/vdso-64.so
new file mode 100755
index 0000000000..913c831b38
--- /dev/null
+++ b/linux-user/ppc/vdso-64.so
Binary files differ
diff --git a/linux-user/ppc/vdso-64le.so b/linux-user/ppc/vdso-64le.so
new file mode 100755
index 0000000000..258a03b807
--- /dev/null
+++ b/linux-user/ppc/vdso-64le.so
Binary files differ
diff --git a/linux-user/ppc/vdso-asmoffset.h b/linux-user/ppc/vdso-asmoffset.h
new file mode 100644
index 0000000000..6844c8c81c
--- /dev/null
+++ b/linux-user/ppc/vdso-asmoffset.h
@@ -0,0 +1,20 @@
+/*
+ * Size of dummy stack frame allocated when calling signal handler.
+ * See arch/powerpc/include/asm/ptrace.h.
+ */
+#ifdef TARGET_ABI32
+# define SIGNAL_FRAMESIZE 64
+#else
+# define SIGNAL_FRAMESIZE 128
+#endif
+
+#ifdef TARGET_ABI32
+# define offsetof_sigframe_mcontext 0x20
+# define offsetof_rt_sigframe_mcontext 0x140
+# define offsetof_mcontext_fregs 0xc0
+# define offsetof_mcontext_vregs 0x1d0
+#else
+# define offsetof_rt_sigframe_mcontext 0xe8
+# define offsetof_mcontext_fregs 0x180
+# define offsetof_mcontext_vregs_ptr 0x288
+#endif
diff --git a/linux-user/ppc/vdso.S b/linux-user/ppc/vdso.S
new file mode 100644
index 0000000000..2e79ea9808
--- /dev/null
+++ b/linux-user/ppc/vdso.S
@@ -0,0 +1,239 @@
+/*
+ * PowerPC linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include <asm/errno.h>
+
+#ifndef _ARCH_PPC64
+# define TARGET_ABI32
+#endif
+#include "vdso-asmoffset.h"
+
+
+ .text
+
+.macro endf name
+ .globl \name
+ .size \name, .-\name
+ /* For PPC64, functions have special linkage; we export pointers. */
+#ifndef _ARCH_PPC64
+ .type \name, @function
+#endif
+.endm
+
+.macro raw_syscall nr
+ addi 0, 0, \nr
+ sc
+.endm
+
+.macro vdso_syscall name, nr
+\name:
+ raw_syscall \nr
+ blr
+endf \name
+.endm
+
+ .cfi_startproc
+
+vdso_syscall __kernel_gettimeofday, __NR_gettimeofday
+vdso_syscall __kernel_clock_gettime, __NR_clock_gettime
+vdso_syscall __kernel_clock_getres, __NR_clock_getres
+vdso_syscall __kernel_getcpu, __NR_getcpu
+vdso_syscall __kernel_time, __NR_time
+
+#ifdef __NR_clock_gettime64
+vdso_syscall __kernel_clock_gettime64, __NR_clock_gettime64
+#endif
+
+__kernel_sync_dicache:
+ /* qemu does not need to flush caches */
+ blr
+endf __kernel_sync_dicache
+
+ .cfi_endproc
+
+/*
+ * TODO: __kernel_get_tbfreq
+ * This is probably a constant for QEMU.
+ */
+
+/*
+ * Start the unwind info at least one instruction before the signal
+ * trampoline, because the unwinder will assume we are returning
+ * after a call site.
+ */
+
+ .cfi_startproc simple
+ .cfi_signal_frame
+
+#ifdef _ARCH_PPC64
+# define __kernel_sigtramp_rt __kernel_sigtramp_rt64
+# define sizeof_reg 8
+#else
+# define __kernel_sigtramp_rt __kernel_sigtramp_rt32
+# define sizeof_reg 4
+#endif
+#define sizeof_freg 8
+#define sizeof_vreg 16
+
+ .cfi_def_cfa 1, SIGNAL_FRAMESIZE + offsetof_rt_sigframe_mcontext
+
+ /* Return address */
+ .cfi_return_column 67
+ .cfi_offset 67, 32 * sizeof_reg /* nip */
+
+ /* Integer registers */
+ .cfi_offset 0, 0 * sizeof_reg
+ .cfi_offset 1, 1 * sizeof_reg
+ .cfi_offset 2, 2 * sizeof_reg
+ .cfi_offset 3, 3 * sizeof_reg
+ .cfi_offset 4, 4 * sizeof_reg
+ .cfi_offset 5, 5 * sizeof_reg
+ .cfi_offset 6, 6 * sizeof_reg
+ .cfi_offset 7, 7 * sizeof_reg
+ .cfi_offset 8, 8 * sizeof_reg
+ .cfi_offset 9, 9 * sizeof_reg
+ .cfi_offset 10, 10 * sizeof_reg
+ .cfi_offset 11, 11 * sizeof_reg
+ .cfi_offset 12, 12 * sizeof_reg
+ .cfi_offset 13, 13 * sizeof_reg
+ .cfi_offset 14, 14 * sizeof_reg
+ .cfi_offset 15, 15 * sizeof_reg
+ .cfi_offset 16, 16 * sizeof_reg
+ .cfi_offset 17, 17 * sizeof_reg
+ .cfi_offset 18, 18 * sizeof_reg
+ .cfi_offset 19, 19 * sizeof_reg
+ .cfi_offset 20, 20 * sizeof_reg
+ .cfi_offset 21, 21 * sizeof_reg
+ .cfi_offset 22, 22 * sizeof_reg
+ .cfi_offset 23, 23 * sizeof_reg
+ .cfi_offset 24, 24 * sizeof_reg
+ .cfi_offset 25, 25 * sizeof_reg
+ .cfi_offset 26, 26 * sizeof_reg
+ .cfi_offset 27, 27 * sizeof_reg
+ .cfi_offset 28, 28 * sizeof_reg
+ .cfi_offset 29, 29 * sizeof_reg
+ .cfi_offset 30, 30 * sizeof_reg
+ .cfi_offset 31, 31 * sizeof_reg
+ .cfi_offset 65, 36 * sizeof_reg /* lr */
+ .cfi_offset 70, 38 * sizeof_reg /* ccr */
+
+ /* Floating point registers */
+ .cfi_offset 32, offsetof_mcontext_fregs
+ .cfi_offset 33, offsetof_mcontext_fregs + 1 * sizeof_freg
+ .cfi_offset 34, offsetof_mcontext_fregs + 2 * sizeof_freg
+ .cfi_offset 35, offsetof_mcontext_fregs + 3 * sizeof_freg
+ .cfi_offset 36, offsetof_mcontext_fregs + 4 * sizeof_freg
+ .cfi_offset 37, offsetof_mcontext_fregs + 5 * sizeof_freg
+ .cfi_offset 38, offsetof_mcontext_fregs + 6 * sizeof_freg
+ .cfi_offset 39, offsetof_mcontext_fregs + 7 * sizeof_freg
+ .cfi_offset 40, offsetof_mcontext_fregs + 8 * sizeof_freg
+ .cfi_offset 41, offsetof_mcontext_fregs + 9 * sizeof_freg
+ .cfi_offset 42, offsetof_mcontext_fregs + 10 * sizeof_freg
+ .cfi_offset 43, offsetof_mcontext_fregs + 11 * sizeof_freg
+ .cfi_offset 44, offsetof_mcontext_fregs + 12 * sizeof_freg
+ .cfi_offset 45, offsetof_mcontext_fregs + 13 * sizeof_freg
+ .cfi_offset 46, offsetof_mcontext_fregs + 14 * sizeof_freg
+ .cfi_offset 47, offsetof_mcontext_fregs + 15 * sizeof_freg
+ .cfi_offset 48, offsetof_mcontext_fregs + 16 * sizeof_freg
+ .cfi_offset 49, offsetof_mcontext_fregs + 17 * sizeof_freg
+ .cfi_offset 50, offsetof_mcontext_fregs + 18 * sizeof_freg
+ .cfi_offset 51, offsetof_mcontext_fregs + 19 * sizeof_freg
+ .cfi_offset 52, offsetof_mcontext_fregs + 20 * sizeof_freg
+ .cfi_offset 53, offsetof_mcontext_fregs + 21 * sizeof_freg
+ .cfi_offset 54, offsetof_mcontext_fregs + 22 * sizeof_freg
+ .cfi_offset 55, offsetof_mcontext_fregs + 23 * sizeof_freg
+ .cfi_offset 56, offsetof_mcontext_fregs + 24 * sizeof_freg
+ .cfi_offset 57, offsetof_mcontext_fregs + 25 * sizeof_freg
+ .cfi_offset 58, offsetof_mcontext_fregs + 26 * sizeof_freg
+ .cfi_offset 59, offsetof_mcontext_fregs + 27 * sizeof_freg
+ .cfi_offset 60, offsetof_mcontext_fregs + 28 * sizeof_freg
+ .cfi_offset 61, offsetof_mcontext_fregs + 29 * sizeof_freg
+ .cfi_offset 62, offsetof_mcontext_fregs + 30 * sizeof_freg
+ .cfi_offset 63, offsetof_mcontext_fregs + 31 * sizeof_freg
+
+ /*
+ * Unlike the kernel, unconditionally represent the Altivec/VSX regs.
+ * The space within the stack frame is always available, and most of
+ * our supported processors have them enabled. The only complication
+ * for PPC64 is the misalignment, so that we have to use indirection.
+ */
+.macro save_vreg_ofs reg, ofs
+#ifdef _ARCH_PPC64
+ /*
+ * vreg = *(cfa + offsetof(v_regs)) + ofs
+ *
+ * The CFA is input to the expression on the stack, so:
+ * DW_CFA_expression reg, length (7),
+ * DW_OP_plus_uconst (0x23), vreg_ptr, DW_OP_deref (0x06),
+ * DW_OP_plus_uconst (0x23), ofs
+ */
+ .cfi_escape 0x10, 77 + \reg, 7, 0x23, (offsetof_mcontext_vregs_ptr & 0x7f) + 0x80, offsetof_mcontext_vregs_ptr >> 7, 0x06, 0x23, (\ofs & 0x7f) | 0x80, \ofs >> 7
+#else
+ .cfi_offset 77 + \reg, offsetof_mcontext_vregs + \ofs
+#endif
+.endm
+
+.macro save_vreg reg
+ save_vreg_ofs \reg, (\reg * sizeof_vreg)
+.endm
+
+ save_vreg 0
+ save_vreg 1
+ save_vreg 2
+ save_vreg 3
+ save_vreg 4
+ save_vreg 5
+ save_vreg 6
+ save_vreg 7
+ save_vreg 8
+ save_vreg 9
+ save_vreg 10
+ save_vreg 11
+ save_vreg 12
+ save_vreg 13
+ save_vreg 14
+ save_vreg 15
+ save_vreg 16
+ save_vreg 17
+ save_vreg 18
+ save_vreg 19
+ save_vreg 20
+ save_vreg 21
+ save_vreg 22
+ save_vreg 23
+ save_vreg 24
+ save_vreg 25
+ save_vreg 26
+ save_vreg 27
+ save_vreg 28
+ save_vreg 29
+ save_vreg 30
+ save_vreg 31
+ save_vreg 32
+ save_vreg_ofs 33, (32 * sizeof_vreg + 12)
+
+ nop
+
+__kernel_sigtramp_rt:
+ raw_syscall __NR_rt_sigreturn
+endf __kernel_sigtramp_rt
+
+#ifndef _ARCH_PPC64
+ /*
+ * The non-rt sigreturn has the same layout at a different offset.
+ * Move the CFA and leave all the other descriptions the same.
+ */
+ .cfi_def_cfa 1, SIGNAL_FRAMESIZE + offsetof_sigframe_mcontext
+ nop
+__kernel_sigtramp32:
+ raw_syscall __NR_sigreturn
+endf __kernel_sigtramp32
+#endif
+
+ .cfi_endproc
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 5c713fa8ab..4777856b52 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -4,8 +4,6 @@
#include "cpu.h"
#include "exec/cpu_ldst.h"
-#undef DEBUG_REMAP
-
#include "exec/user/abitypes.h"
#include "syscall_defs.h"
@@ -29,25 +27,28 @@ struct image_info {
abi_ulong end_code;
abi_ulong start_data;
abi_ulong end_data;
- abi_ulong start_brk;
abi_ulong brk;
- abi_ulong reserve_brk;
- abi_ulong start_mmap;
abi_ulong start_stack;
abi_ulong stack_limit;
+ abi_ulong vdso;
abi_ulong entry;
abi_ulong code_offset;
abi_ulong data_offset;
abi_ulong saved_auxv;
abi_ulong auxv_len;
- abi_ulong arg_start;
- abi_ulong arg_end;
- abi_ulong arg_strings;
- abi_ulong env_strings;
+ abi_ulong argc;
+ abi_ulong argv;
+ abi_ulong envc;
+ abi_ulong envp;
abi_ulong file_string;
uint32_t elf_flags;
int personality;
abi_ulong alignment;
+ bool exec_stack;
+
+ /* Generic semihosting knows about these pointers. */
+ abi_ulong arg_strings; /* strings for argv */
+ abi_ulong env_strings; /* strings for envp; ends arg_strings */
/* The fields below are used in FDPIC mode. */
abi_ulong loadmap_addr;
@@ -89,17 +90,11 @@ struct vm86_saved_state {
#include "nwfpe/fpa11.h"
#endif
-#define MAX_SIGQUEUE_SIZE 1024
-
struct emulated_sigtable {
int pending; /* true if signal is pending */
target_siginfo_t info;
};
-/*
- * NOTE: we force a big alignment so that the stack stored after is
- * aligned too
- */
typedef struct TaskState {
pid_t ts_tid; /* tid (or pid) of this task */
#ifdef TARGET_ARM
@@ -160,12 +155,24 @@ typedef struct TaskState {
/* This thread's sigaltstack, if it has one */
struct target_sigaltstack sigaltstack_used;
-} __attribute__((aligned(16))) TaskState;
+
+ /* Start time of task after system boot in clock ticks */
+ uint64_t start_boottime;
+} TaskState;
+
+static inline TaskState *get_task_state(CPUState *cs)
+{
+ return cs->opaque;
+}
abi_long do_brk(abi_ulong new_brk);
+int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
+ int flags, mode_t mode, bool safe);
+ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz);
/* user access */
+#define VERIFY_NONE 0
#define VERIFY_READ PAGE_READ
#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
@@ -176,7 +183,7 @@ static inline bool access_ok_untagged(int type, abi_ulong addr, abi_ulong size)
: !guest_range_valid_untagged(addr, size)) {
return false;
}
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
static inline bool access_ok(CPUState *cpu, int type,
@@ -239,7 +246,7 @@ static inline bool access_ok(CPUState *cpu, int type,
} while (0)
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
# define __put_user(x, hptr) __put_user_e(x, hptr, be)
# define __get_user(x, hptr) __get_user_e(x, hptr, be)
#else
@@ -323,7 +330,7 @@ void *lock_user(int type, abi_ulong guest_addr, ssize_t len, bool copy);
/* Unlock an area of guest memory. The first LEN bytes must be
flushed back to guest memory. host_ptr = NULL is explicitly
allowed and does nothing. */
-#ifndef DEBUG_REMAP
+#ifndef CONFIG_DEBUG_REMAP
static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
ssize_t len)
{
diff --git a/linux-user/riscv/Makefile.vdso b/linux-user/riscv/Makefile.vdso
new file mode 100644
index 0000000000..2c257dbfda
--- /dev/null
+++ b/linux-user/riscv/Makefile.vdso
@@ -0,0 +1,15 @@
+include $(BUILD_DIR)/tests/tcg/riscv64-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/riscv
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so
+
+LDFLAGS = -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \
+ -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld
+
+$(SUBDIR)/vdso-32.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS) -mabi=ilp32d -march=rv32g $<
+
+$(SUBDIR)/vdso-64.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS) -mabi=lp64d -march=rv64g $<
diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c
index 9859a366e4..52c49c2e42 100644
--- a/linux-user/riscv/cpu_loop.c
+++ b/linux-user/riscv/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/error-report.h"
#include "qemu.h"
#include "user-internals.h"
@@ -30,8 +29,7 @@
void cpu_loop(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
- int trapnr, signum, sigcode;
- target_ulong sigaddr;
+ int trapnr;
target_ulong ret;
for (;;) {
@@ -40,10 +38,6 @@ void cpu_loop(CPURISCVState *env)
cpu_exec_end(cs);
process_queued_cpu_work(cs);
- signum = 0;
- sigcode = 0;
- sigaddr = 0;
-
switch (trapnr) {
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
@@ -69,9 +63,9 @@ void cpu_loop(CPURISCVState *env)
env->gpr[xA5],
0, 0);
}
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->pc -= 4;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->gpr[xA0] = ret;
}
if (cs->singlestep_enabled) {
@@ -79,46 +73,23 @@ void cpu_loop(CPURISCVState *env)
}
break;
case RISCV_EXCP_ILLEGAL_INST:
- signum = TARGET_SIGILL;
- sigcode = TARGET_ILL_ILLOPC;
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc);
break;
case RISCV_EXCP_BREAKPOINT:
- signum = TARGET_SIGTRAP;
- sigcode = TARGET_TRAP_BRKPT;
- sigaddr = env->pc;
- break;
- case RISCV_EXCP_INST_PAGE_FAULT:
- case RISCV_EXCP_LOAD_PAGE_FAULT:
- case RISCV_EXCP_STORE_PAGE_FAULT:
- signum = TARGET_SIGSEGV;
- sigcode = TARGET_SEGV_MAPERR;
- sigaddr = env->badaddr;
+ case EXCP_DEBUG:
+ gdbstep:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case RISCV_EXCP_SEMIHOST:
- env->gpr[xA0] = do_common_semihosting(cs);
+ do_common_semihosting(cs);
env->pc += 4;
break;
- case EXCP_DEBUG:
- gdbstep:
- signum = TARGET_SIGTRAP;
- sigcode = TARGET_TRAP_BRKPT;
- break;
default:
EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
trapnr);
exit(EXIT_FAILURE);
}
- if (signum) {
- target_siginfo_t info = {
- .si_signo = signum,
- .si_errno = 0,
- .si_code = sigcode,
- ._sifields._sigfault._addr = sigaddr
- };
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
-
process_pending_signals(env);
}
}
@@ -126,14 +97,14 @@ void cpu_loop(CPURISCVState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
struct image_info *info = ts->info;
env->pc = regs->sepc;
env->gpr[xSP] = regs->sp;
env->elf_flags = info->elf_flags;
- if ((env->misa & RVE) && !(env->elf_flags & EF_RISCV_RVE)) {
+ if ((env->misa_ext & RVE) && !(env->elf_flags & EF_RISCV_RVE)) {
error_report("Incompatible ELF: RVE cpu requires RVE ABI binary");
exit(EXIT_FAILURE);
}
diff --git a/linux-user/riscv/meson.build b/linux-user/riscv/meson.build
new file mode 100644
index 0000000000..beb989a7ca
--- /dev/null
+++ b/linux-user/riscv/meson.build
@@ -0,0 +1,7 @@
+vdso_32_inc = gen_vdso.process('vdso-32.so',
+ extra_args: ['-r', '__vdso_rt_sigreturn'])
+vdso_64_inc = gen_vdso.process('vdso-64.so',
+ extra_args: ['-r', '__vdso_rt_sigreturn'])
+
+linux_user_ss.add(when: 'TARGET_RISCV32', if_true: vdso_32_inc)
+linux_user_ss.add(when: 'TARGET_RISCV64', if_true: vdso_64_inc)
diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c
index f7f33bc90a..358fa1d82d 100644
--- a/linux-user/riscv/signal.c
+++ b/linux-user/riscv/signal.c
@@ -21,6 +21,7 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
+#include "vdso-asmoffset.h"
/* Signal handler invocation must be transparent for the code being
interrupted. Complete CPU (hart) state is saved on entry and restored
@@ -37,9 +38,11 @@ struct target_sigcontext {
uint32_t fcsr;
}; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
+QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, fpr) != offsetof_freg0);
+
struct target_ucontext {
- unsigned long uc_flags;
- struct target_ucontext *uc_link;
+ abi_ulong uc_flags;
+ abi_ptr uc_link;
target_stack_t uc_stack;
target_sigset_t uc_sigmask;
uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
@@ -47,11 +50,15 @@ struct target_ucontext {
};
struct target_rt_sigframe {
- uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
struct target_siginfo info;
struct target_ucontext uc;
};
+QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe)
+ != sizeof_rt_sigframe);
+QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.uc_mcontext)
+ != offsetof_uc_mcontext);
+
static abi_ulong get_sigframe(struct target_sigaction *ka,
CPURISCVState *regs, size_t framesize)
{
@@ -65,9 +72,7 @@ static abi_ulong get_sigframe(struct target_sigaction *ka,
/* This is the X/Open sanctioned signal stack switching. */
sp = target_sigsp(sp, ka) - framesize;
-
- /* XXX: kernel aligns with 0xf ? */
- sp &= ~3UL; /* align sp on 4-byte boundary */
+ sp &= ~0xf;
return sp;
}
@@ -105,12 +110,6 @@ static void setup_ucontext(struct target_ucontext *uc,
setup_sigcontext(&uc->uc_mcontext, env);
}
-static inline void install_sigtramp(uint32_t *tramp)
-{
- __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
- __put_user(0x00000073, tramp + 1); /* ecall */
-}
-
void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPURISCVState *env)
@@ -126,15 +125,14 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
}
setup_ucontext(&frame->uc, env, set);
- tswap_siginfo(&frame->info, info);
- install_sigtramp(frame->tramp);
+ frame->info = *info;
env->pc = ka->_sa_handler;
env->gpr[xSP] = frame_addr;
env->gpr[xA0] = sig;
env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
- env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
+ env->gpr[xRA] = default_rt_sigreturn;
return;
@@ -196,10 +194,22 @@ long do_rt_sigreturn(CPURISCVState *env)
target_restore_altstack(&frame->uc.uc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
return 0;
}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
+ assert(tramp != NULL);
+
+ __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
+ __put_user(0x00000073, tramp + 1); /* ecall */
+
+ default_rt_sigreturn = sigtramp_page;
+ unlock_user(tramp, sigtramp_page, 8);
+}
diff --git a/linux-user/riscv/syscall32_nr.h b/linux-user/riscv/syscall32_nr.h
index 1327d7dffa..412e58e5b2 100644
--- a/linux-user/riscv/syscall32_nr.h
+++ b/linux-user/riscv/syscall32_nr.h
@@ -228,6 +228,7 @@
#define TARGET_NR_accept4 242
#define TARGET_NR_arch_specific_syscall 244
#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15)
+#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14)
#define TARGET_NR_prlimit64 261
#define TARGET_NR_fanotify_init 262
#define TARGET_NR_fanotify_mark 263
diff --git a/linux-user/riscv/syscall64_nr.h b/linux-user/riscv/syscall64_nr.h
index 6659751933..29e1eb2075 100644
--- a/linux-user/riscv/syscall64_nr.h
+++ b/linux-user/riscv/syscall64_nr.h
@@ -251,6 +251,7 @@
#define TARGET_NR_recvmmsg 243
#define TARGET_NR_arch_specific_syscall 244
#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15)
+#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14)
#define TARGET_NR_wait4 260
#define TARGET_NR_prlimit64 261
#define TARGET_NR_fanotify_init 262
diff --git a/linux-user/riscv/target_elf.h b/linux-user/riscv/target_elf.h
index 9dd65652ee..dedd5956f3 100644
--- a/linux-user/riscv/target_elf.h
+++ b/linux-user/riscv/target_elf.h
@@ -9,7 +9,6 @@
#define RISCV_TARGET_ELF_H
static inline const char *cpu_get_model(uint32_t eflags)
{
- /* TYPE_RISCV_CPU_ANY */
- return "any";
+ return "max";
}
#endif
diff --git a/linux-user/riscv/target_mman.h b/linux-user/riscv/target_mman.h
new file mode 100644
index 0000000000..3049bcc67d
--- /dev/null
+++ b/linux-user/riscv/target_mman.h
@@ -0,0 +1,11 @@
+/*
+ * arch/loongarch/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+ */
+#define TASK_UNMAPPED_BASE \
+ TARGET_PAGE_ALIGN((1ull << (TARGET_VIRT_ADDR_SPACE_BITS - 1)) / 3)
+
+/* arch/riscv/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/riscv/target_prctl.h b/linux-user/riscv/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/riscv/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/riscv/target_proc.h b/linux-user/riscv/target_proc.h
new file mode 100644
index 0000000000..c77c003d65
--- /dev/null
+++ b/linux-user/riscv/target_proc.h
@@ -0,0 +1,37 @@
+/*
+ * RISC-V specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef RISCV_TARGET_PROC_H
+#define RISCV_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int i;
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ RISCVCPU *cpu = env_archcpu(cpu_env);
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
+ char *isa_string = riscv_isa_string(cpu);
+ const char *mmu;
+
+ if (cfg->mmu) {
+ mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
+ } else {
+ mmu = "none";
+ }
+
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "processor\t: %d\n", i);
+ dprintf(fd, "hart\t\t: %d\n", i);
+ dprintf(fd, "isa\t\t: %s\n", isa_string);
+ dprintf(fd, "mmu\t\t: %s\n", mmu);
+ dprintf(fd, "uarch\t\t: qemu\n\n");
+ }
+
+ g_free(isa_string);
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* RISCV_TARGET_PROC_H */
diff --git a/linux-user/riscv/target_resource.h b/linux-user/riscv/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/riscv/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/riscv/target_signal.h b/linux-user/riscv/target_signal.h
index f113ba9a55..6c0470f0bc 100644
--- a/linux-user/riscv/target_signal.h
+++ b/linux-user/riscv/target_signal.h
@@ -1,18 +1,8 @@
#ifndef RISCV_TARGET_SIGNAL_H
#define RISCV_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* RISCV_TARGET_SIGNAL_H */
diff --git a/linux-user/riscv/target_structs.h b/linux-user/riscv/target_structs.h
index ea3e5ed17e..3a06f373c3 100644
--- a/linux-user/riscv/target_structs.h
+++ b/linux-user/riscv/target_structs.h
@@ -1,46 +1 @@
-/*
- * RISC-V specific structures for linux-user
- *
- * This is a copy of ../aarch64/target_structs.h atm.
- *
- */
-#ifndef RISCV_TARGET_STRUCTS_H
-#define RISCV_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/riscv/target_syscall.h b/linux-user/riscv/target_syscall.h
index dc597c8972..7601f10c28 100644
--- a/linux-user/riscv/target_syscall.h
+++ b/linux-user/riscv/target_syscall.h
@@ -45,12 +45,12 @@ struct target_pt_regs {
#ifdef TARGET_RISCV32
#define UNAME_MACHINE "riscv32"
+#define UNAME_MINIMUM_RELEASE "5.4.0"
#else
#define UNAME_MACHINE "riscv64"
-#endif
#define UNAME_MINIMUM_RELEASE "4.15.0"
+#endif
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/riscv/vdso-32.so b/linux-user/riscv/vdso-32.so
new file mode 100755
index 0000000000..c2ce2a4757
--- /dev/null
+++ b/linux-user/riscv/vdso-32.so
Binary files differ
diff --git a/linux-user/riscv/vdso-64.so b/linux-user/riscv/vdso-64.so
new file mode 100755
index 0000000000..ae49f5b043
--- /dev/null
+++ b/linux-user/riscv/vdso-64.so
Binary files differ
diff --git a/linux-user/riscv/vdso-asmoffset.h b/linux-user/riscv/vdso-asmoffset.h
new file mode 100644
index 0000000000..123902ef61
--- /dev/null
+++ b/linux-user/riscv/vdso-asmoffset.h
@@ -0,0 +1,9 @@
+#ifdef TARGET_ABI32
+# define sizeof_rt_sigframe 0x2b0
+# define offsetof_uc_mcontext 0x120
+# define offsetof_freg0 0x80
+#else
+# define sizeof_rt_sigframe 0x340
+# define offsetof_uc_mcontext 0x130
+# define offsetof_freg0 0x100
+#endif
diff --git a/linux-user/riscv/vdso.S b/linux-user/riscv/vdso.S
new file mode 100644
index 0000000000..c37275233a
--- /dev/null
+++ b/linux-user/riscv/vdso.S
@@ -0,0 +1,187 @@
+/*
+ * RISC-V linux replacement vdso.
+ *
+ * Copyright 2021 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include <asm/errno.h>
+
+#if __riscv_xlen == 32
+# define TARGET_ABI32
+#endif
+#include "vdso-asmoffset.h"
+
+ .text
+
+.macro endf name
+ .globl \name
+ .type \name, @function
+ .size \name, . - \name
+.endm
+
+.macro raw_syscall nr
+ li a7, \nr
+ ecall
+.endm
+
+.macro vdso_syscall name, nr
+\name:
+ raw_syscall \nr
+ ret
+endf \name
+.endm
+
+__vdso_gettimeofday:
+ .cfi_startproc
+#ifdef __NR_gettimeofday
+ raw_syscall __NR_gettimeofday
+ ret
+#else
+ /* No gettimeofday, fall back to clock_gettime64. */
+ beq a1, zero, 1f
+ sw zero, 0(a1) /* tz->tz_minuteswest = 0 */
+ sw zero, 4(a1) /* tz->tz_dsttime = 0 */
+1: addi sp, sp, -32
+ .cfi_adjust_cfa_offset 32
+ sw a0, 16(sp) /* save tv */
+ mv a0, sp
+ raw_syscall __NR_clock_gettime64
+ lw t0, 0(sp) /* timespec.tv_sec.low */
+ lw t1, 4(sp) /* timespec.tv_sec.high */
+ lw t2, 8(sp) /* timespec.tv_nsec.low */
+ lw a1, 16(sp) /* restore tv */
+ addi sp, sp, 32
+ .cfi_adjust_cfa_offset -32
+ bne a0, zero, 9f /* syscall error? */
+ li a0, -EOVERFLOW
+ bne t1, zero, 9f /* y2038? */
+ li a0, 0
+ li t3, 1000
+ divu t2, t2, t3 /* nsec -> usec */
+ sw t0, 0(a1) /* tz->tv_sec */
+ sw t2, 4(a1) /* tz->tv_usec */
+9: ret
+#endif
+ .cfi_endproc
+endf __vdso_gettimeofday
+
+ .cfi_startproc
+
+#ifdef __NR_clock_gettime
+vdso_syscall __vdso_clock_gettime, __NR_clock_gettime
+#else
+vdso_syscall __vdso_clock_gettime, __NR_clock_gettime64
+#endif
+
+#ifdef __NR_clock_getres
+vdso_syscall __vdso_clock_getres, __NR_clock_getres
+#else
+vdso_syscall __vdso_clock_getres, __NR_clock_getres_time64
+#endif
+
+vdso_syscall __vdso_getcpu, __NR_getcpu
+
+__vdso_flush_icache:
+ /* qemu does not need to flush the icache */
+ li a0, 0
+ ret
+endf __vdso_flush_icache
+
+ .cfi_endproc
+
+/*
+ * Start the unwind info at least one instruction before the signal
+ * trampoline, because the unwinder will assume we are returning
+ * after a call site.
+ */
+
+ .cfi_startproc simple
+ .cfi_signal_frame
+
+#define sizeof_reg (__riscv_xlen / 8)
+#define sizeof_freg 8
+#define B_GR 0
+#define B_FR offsetof_freg0
+
+ .cfi_def_cfa 2, offsetof_uc_mcontext
+
+ /* Return address */
+ .cfi_return_column 64
+ .cfi_offset 64, B_GR + 0 /* pc */
+
+ /* Integer registers */
+ .cfi_offset 1, B_GR + 1 * sizeof_reg /* r1 (ra) */
+ .cfi_offset 2, B_GR + 2 * sizeof_reg /* r2 (sp) */
+ .cfi_offset 3, B_GR + 3 * sizeof_reg
+ .cfi_offset 4, B_GR + 4 * sizeof_reg
+ .cfi_offset 5, B_GR + 5 * sizeof_reg
+ .cfi_offset 6, B_GR + 6 * sizeof_reg
+ .cfi_offset 7, B_GR + 7 * sizeof_reg
+ .cfi_offset 8, B_GR + 8 * sizeof_reg
+ .cfi_offset 9, B_GR + 9 * sizeof_reg
+ .cfi_offset 10, B_GR + 10 * sizeof_reg
+ .cfi_offset 11, B_GR + 11 * sizeof_reg
+ .cfi_offset 12, B_GR + 12 * sizeof_reg
+ .cfi_offset 13, B_GR + 13 * sizeof_reg
+ .cfi_offset 14, B_GR + 14 * sizeof_reg
+ .cfi_offset 15, B_GR + 15 * sizeof_reg
+ .cfi_offset 16, B_GR + 16 * sizeof_reg
+ .cfi_offset 17, B_GR + 17 * sizeof_reg
+ .cfi_offset 18, B_GR + 18 * sizeof_reg
+ .cfi_offset 19, B_GR + 19 * sizeof_reg
+ .cfi_offset 20, B_GR + 20 * sizeof_reg
+ .cfi_offset 21, B_GR + 21 * sizeof_reg
+ .cfi_offset 22, B_GR + 22 * sizeof_reg
+ .cfi_offset 23, B_GR + 23 * sizeof_reg
+ .cfi_offset 24, B_GR + 24 * sizeof_reg
+ .cfi_offset 25, B_GR + 25 * sizeof_reg
+ .cfi_offset 26, B_GR + 26 * sizeof_reg
+ .cfi_offset 27, B_GR + 27 * sizeof_reg
+ .cfi_offset 28, B_GR + 28 * sizeof_reg
+ .cfi_offset 29, B_GR + 29 * sizeof_reg
+ .cfi_offset 30, B_GR + 30 * sizeof_reg
+ .cfi_offset 31, B_GR + 31 * sizeof_reg /* r31 */
+
+ .cfi_offset 32, B_FR + 0 /* f0 */
+ .cfi_offset 33, B_FR + 1 * sizeof_freg /* f1 */
+ .cfi_offset 34, B_FR + 2 * sizeof_freg
+ .cfi_offset 35, B_FR + 3 * sizeof_freg
+ .cfi_offset 36, B_FR + 4 * sizeof_freg
+ .cfi_offset 37, B_FR + 5 * sizeof_freg
+ .cfi_offset 38, B_FR + 6 * sizeof_freg
+ .cfi_offset 39, B_FR + 7 * sizeof_freg
+ .cfi_offset 40, B_FR + 8 * sizeof_freg
+ .cfi_offset 41, B_FR + 9 * sizeof_freg
+ .cfi_offset 42, B_FR + 10 * sizeof_freg
+ .cfi_offset 43, B_FR + 11 * sizeof_freg
+ .cfi_offset 44, B_FR + 12 * sizeof_freg
+ .cfi_offset 45, B_FR + 13 * sizeof_freg
+ .cfi_offset 46, B_FR + 14 * sizeof_freg
+ .cfi_offset 47, B_FR + 15 * sizeof_freg
+ .cfi_offset 48, B_FR + 16 * sizeof_freg
+ .cfi_offset 49, B_FR + 17 * sizeof_freg
+ .cfi_offset 50, B_FR + 18 * sizeof_freg
+ .cfi_offset 51, B_FR + 19 * sizeof_freg
+ .cfi_offset 52, B_FR + 20 * sizeof_freg
+ .cfi_offset 53, B_FR + 21 * sizeof_freg
+ .cfi_offset 54, B_FR + 22 * sizeof_freg
+ .cfi_offset 55, B_FR + 23 * sizeof_freg
+ .cfi_offset 56, B_FR + 24 * sizeof_freg
+ .cfi_offset 57, B_FR + 25 * sizeof_freg
+ .cfi_offset 58, B_FR + 26 * sizeof_freg
+ .cfi_offset 59, B_FR + 27 * sizeof_freg
+ .cfi_offset 60, B_FR + 28 * sizeof_freg
+ .cfi_offset 61, B_FR + 29 * sizeof_freg
+ .cfi_offset 62, B_FR + 30 * sizeof_freg
+ .cfi_offset 63, B_FR + 31 * sizeof_freg /* f31 */
+
+ nop
+
+__vdso_rt_sigreturn:
+ raw_syscall __NR_rt_sigreturn
+endf __vdso_rt_sigreturn
+
+ .cfi_endproc
diff --git a/linux-user/riscv/vdso.ld b/linux-user/riscv/vdso.ld
new file mode 100644
index 0000000000..aabe2b0ab3
--- /dev/null
+++ b/linux-user/riscv/vdso.ld
@@ -0,0 +1,74 @@
+/*
+ * Linker script for linux riscv replacement vdso.
+ *
+ * Copyright 2021 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_4.15 {
+ global:
+ __vdso_rt_sigreturn;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
+ __vdso_clock_getres;
+ __vdso_getcpu;
+ __vdso_flush_icache;
+
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS;
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ /*
+ * We can't prelink to any address without knowing something about
+ * the virtual memory space of the host, since that leaks over into
+ * the available memory space of the guest.
+ */
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ .data : {
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load =0xd503201f
+}
diff --git a/linux-user/s390x/Makefile.vdso b/linux-user/s390x/Makefile.vdso
new file mode 100644
index 0000000000..e82bf9e29f
--- /dev/null
+++ b/linux-user/s390x/Makefile.vdso
@@ -0,0 +1,11 @@
+include $(BUILD_DIR)/tests/tcg/s390x-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/s390x
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso.so
+
+$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ -nostdlib -shared -Wl,-h,linux-vdso64.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both \
+ -Wl,-T,$(SUBDIR)/vdso.ld $<
diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c
index 69b69981f6..8b7ac2879e 100644
--- a/linux-user/s390x/cpu_loop.c
+++ b/linux-user/s390x/cpu_loop.c
@@ -18,14 +18,11 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
#include "signal-common.h"
-/* s390x masks the fault address it reports in si_addr for SIGSEGV and SIGBUS */
-#define S390X_FAIL_ADDR_MASK -4096LL
static int get_pgm_data_si_code(int dxc_code)
{
@@ -60,7 +57,6 @@ void cpu_loop(CPUS390XState *env)
{
CPUState *cs = env_cpu(env);
int trapnr, n, sig;
- target_siginfo_t info;
target_ulong addr;
abi_long ret;
@@ -85,11 +81,20 @@ void cpu_loop(CPUS390XState *env)
ret = do_syscall(env, n, env->regs[2], env->regs[3],
env->regs[4], env->regs[5],
env->regs[6], env->regs[7], 0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->psw.addr -= env->int_svc_ilen;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->regs[2] = ret;
}
+
+ if (unlikely(cs->singlestep_enabled)) {
+ /*
+ * cpu_tb_exec() did not raise EXCP_DEBUG, because it has seen
+ * that EXCP_SVC was already pending.
+ */
+ cs->exception_index = EXCP_DEBUG;
+ }
+
break;
case EXCP_DEBUG:
@@ -111,12 +116,13 @@ void cpu_loop(CPUS390XState *env)
n = TARGET_ILL_ILLOPC;
goto do_signal_pc;
case PGM_PROTECTION:
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR,
+ env->__excp_addr);
+ break;
case PGM_ADDRESSING:
- sig = TARGET_SIGSEGV;
- /* XXX: check env->error_code */
- n = TARGET_SEGV_MAPERR;
- addr = env->__excp_addr & S390X_FAIL_ADDR_MASK;
- goto do_signal;
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
+ env->__excp_addr);
+ break;
case PGM_EXECUTE:
case PGM_SPECIFICATION:
case PGM_SPECIAL_OP:
@@ -159,11 +165,7 @@ void cpu_loop(CPUS390XState *env)
*/
env->psw.addr += env->int_pgm_ilen;
do_signal:
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = n;
- info._sifields._sigfault._addr = addr;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(sig, n, addr);
break;
case EXCP_ATOMIC:
diff --git a/linux-user/s390x/meson.build b/linux-user/s390x/meson.build
index 0781ccea1d..a7a25ed9ce 100644
--- a/linux-user/s390x/meson.build
+++ b/linux-user/s390x/meson.build
@@ -3,3 +3,9 @@ syscall_nr_generators += {
arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
output: '@BASENAME@_nr.h')
}
+
+vdso_inc = gen_vdso.process('vdso.so', extra_args: [
+ '-s', '__kernel_sigreturn',
+ '-r', '__kernel_rt_sigreturn'
+ ])
+linux_user_ss.add(when: 'TARGET_S390X', if_true: vdso_inc)
diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c
index 80f34086d7..df49c24708 100644
--- a/linux-user/s390x/signal.c
+++ b/linux-user/s390x/signal.c
@@ -21,13 +21,12 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
+#include "vdso-asmoffset.h"
#define __NUM_GPRS 16
#define __NUM_FPRS 16
#define __NUM_ACRS 16
-#define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
-
#define _SIGCONTEXT_NSIG 64
#define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
@@ -63,12 +62,11 @@ typedef struct {
} target_sigcontext;
typedef struct {
- uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
+ uint8_t callee_used_stack[STACK_FRAME_OVERHEAD];
target_sigcontext sc;
target_sigregs sregs;
int signo;
target_sigregs_ext sregs_ext;
- uint16_t retcode;
} sigframe;
#define TARGET_UC_VXRS 2
@@ -84,8 +82,12 @@ struct target_ucontext {
};
typedef struct {
- uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
- uint16_t retcode;
+ uint8_t callee_used_stack[STACK_FRAME_OVERHEAD];
+ /*
+ * This field is no longer initialized by the kernel, but it's still a part
+ * of the ABI.
+ */
+ uint16_t svc_insn;
struct target_siginfo info;
struct target_ucontext uc;
} rt_sigframe;
@@ -143,6 +145,7 @@ static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
* We have to store the fp registers to current->thread.fp_regs
* to merge them with the emulated registers.
*/
+ __put_user(env->fpc, &sregs->fpregs.fpc);
for (i = 0; i < 16; i++) {
__put_user(*get_freg(env, i), &sregs->fpregs.fprs[i]);
}
@@ -209,9 +212,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
if (ka->sa_flags & TARGET_SA_RESTORER) {
restorer = ka->sa_restorer;
} else {
- restorer = frame_addr + offsetof(sigframe, retcode);
- __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
- &frame->retcode);
+ restorer = default_sigreturn;
}
/* Set up registers for signal handler */
@@ -262,13 +263,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
if (ka->sa_flags & TARGET_SA_RESTORER) {
restorer = ka->sa_restorer;
} else {
- restorer = frame_addr + offsetof(typeof(*frame), retcode);
- __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
- &frame->retcode);
+ restorer = default_rt_sigreturn;
}
/* Create siginfo on the signal stack. */
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
/* Create ucontext on the signal stack. */
uc_flags = 0;
@@ -332,6 +331,7 @@ static void restore_sigregs(CPUS390XState *env, target_sigregs *sc)
for (i = 0; i < 16; i++) {
__get_user(env->aregs[i], &sc->regs.acrs[i]);
}
+ __get_user(env->fpc, &sc->fpregs.fpc);
for (i = 0; i < 16; i++) {
__get_user(*get_freg(env, i), &sc->fpregs.fprs[i]);
}
@@ -365,7 +365,7 @@ long do_sigreturn(CPUS390XState *env)
trace_user_do_sigreturn(env, frame_addr);
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
/* Make sure that we're initializing all of target_set. */
@@ -379,7 +379,7 @@ long do_sigreturn(CPUS390XState *env)
restore_sigregs_ext(env, &frame->sregs_ext);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUS390XState *env)
@@ -391,7 +391,7 @@ long do_rt_sigreturn(CPUS390XState *env)
trace_user_do_rt_sigreturn(env, frame_addr);
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
@@ -403,5 +403,19 @@ long do_rt_sigreturn(CPUS390XState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 + 2, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, &tramp[0]);
+
+ default_rt_sigreturn = sigtramp_page + 2;
+ __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, &tramp[1]);
+
+ unlock_user(tramp, sigtramp_page, 2 + 2);
}
diff --git a/linux-user/s390x/target_mman.h b/linux-user/s390x/target_mman.h
new file mode 100644
index 0000000000..c82435e381
--- /dev/null
+++ b/linux-user/s390x/target_mman.h
@@ -0,0 +1,21 @@
+/*
+ * arch/s390/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE (... : (_REGION2_SIZE >> 1))
+ *
+ * arch/s390/include/asm/pgtable.h:
+ * _REGION2_SIZE (1UL << _REGION2_SHIFT)
+ * _REGION2_SHIFT 42
+ */
+#define TASK_UNMAPPED_BASE (1ull << 41)
+
+/*
+ * arch/s390/include/asm/elf.h:
+ * ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)
+ *
+ * arch/s390/include/asm/processor.h:
+ * STACK_TOP VDSO_LIMIT - VDSO_SIZE - PAGE_SIZE
+ * VDSO_LIMIT _REGION2_SIZE
+ */
+#define ELF_ET_DYN_BASE (((1ull << 42) / 3 * 2) & ~0xffffffffull)
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/s390x/target_prctl.h b/linux-user/s390x/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/s390x/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/s390x/target_proc.h b/linux-user/s390x/target_proc.h
new file mode 100644
index 0000000000..a4a4821ea5
--- /dev/null
+++ b/linux-user/s390x/target_proc.h
@@ -0,0 +1,109 @@
+/*
+ * S390X specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef S390X_TARGET_PROC_H
+#define S390X_TARGET_PROC_H
+
+/*
+ * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
+ * show in /proc/cpuinfo.
+ *
+ * Skip the following in order to match the missing support in op_ecag():
+ * - show_cacheinfo().
+ * - show_cpu_topology().
+ * - show_cpu_mhz().
+ *
+ * Use fixed values for certain fields:
+ * - bogomips per cpu - from a qemu-system-s390x run.
+ * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
+ *
+ * Keep the code structure close to arch/s390/kernel/processor.c.
+ */
+
+static void show_facilities(int fd)
+{
+ size_t sizeof_stfl_bytes = 2048;
+ g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
+ unsigned int bit;
+
+ dprintf(fd, "facilities :");
+ s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
+ for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
+ if (test_be_bit(bit, stfl_bytes)) {
+ dprintf(fd, " %d", bit);
+ }
+ }
+ dprintf(fd, "\n");
+}
+
+static int cpu_ident(unsigned long n)
+{
+ return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
+ n);
+}
+
+static void show_cpu_summary(CPUArchState *cpu_env, int fd)
+{
+ S390CPUModel *model = env_archcpu(cpu_env)->model;
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ uint32_t elf_hwcap = get_elf_hwcap();
+ const char *hwcap_str;
+ int i;
+
+ dprintf(fd, "vendor_id : IBM/S390\n"
+ "# processors : %i\n"
+ "bogomips per cpu: 13370.00\n",
+ num_cpus);
+ dprintf(fd, "max thread id : 0\n");
+ dprintf(fd, "features\t: ");
+ for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
+ if (!(elf_hwcap & (1 << i))) {
+ continue;
+ }
+ hwcap_str = elf_hwcap_str(i);
+ if (hwcap_str) {
+ dprintf(fd, "%s ", hwcap_str);
+ }
+ }
+ dprintf(fd, "\n");
+ show_facilities(fd);
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "processor %d: "
+ "version = %02X, "
+ "identification = %06X, "
+ "machine = %04X\n",
+ i, model->cpu_ver, cpu_ident(i), model->def->type);
+ }
+}
+
+static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
+{
+ S390CPUModel *model = env_archcpu(cpu_env)->model;
+
+ dprintf(fd, "version : %02X\n", model->cpu_ver);
+ dprintf(fd, "identification : %06X\n", cpu_ident(n));
+ dprintf(fd, "machine : %04X\n", model->def->type);
+}
+
+static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
+{
+ dprintf(fd, "\ncpu number : %ld\n", n);
+ show_cpu_ids(cpu_env, fd, n);
+}
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ int i;
+
+ show_cpu_summary(cpu_env, fd);
+ for (i = 0; i < num_cpus; i++) {
+ show_cpuinfo(cpu_env, fd, i);
+ }
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* S390X_TARGET_PROC_H */
diff --git a/linux-user/s390x/target_resource.h b/linux-user/s390x/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/s390x/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h
index bbfc464d44..41e0e34a55 100644
--- a/linux-user/s390x/target_signal.h
+++ b/linux-user/s390x/target_signal.h
@@ -1,22 +1,9 @@
#ifndef S390X_TARGET_SIGNAL_H
#define S390X_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* S390X_TARGET_SIGNAL_H */
diff --git a/linux-user/s390x/target_syscall.h b/linux-user/s390x/target_syscall.h
index 94f84178db..4018988a25 100644
--- a/linux-user/s390x/target_syscall.h
+++ b/linux-user/s390x/target_syscall.h
@@ -27,7 +27,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/s390x/vdso-asmoffset.h b/linux-user/s390x/vdso-asmoffset.h
new file mode 100644
index 0000000000..27a062d6c1
--- /dev/null
+++ b/linux-user/s390x/vdso-asmoffset.h
@@ -0,0 +1,2 @@
+/* Minimum stack frame size */
+#define STACK_FRAME_OVERHEAD 160
diff --git a/linux-user/s390x/vdso.S b/linux-user/s390x/vdso.S
new file mode 100644
index 0000000000..3332492477
--- /dev/null
+++ b/linux-user/s390x/vdso.S
@@ -0,0 +1,61 @@
+/*
+ * s390x linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+#include "vdso-asmoffset.h"
+
+.macro endf name
+ .globl \name
+ .type \name, @function
+ .size \name, . - \name
+.endm
+
+.macro raw_syscall n
+ .ifne \n < 0x100
+ svc \n
+ .else
+ lghi %r1, \n
+ svc 0
+ .endif
+.endm
+
+.macro vdso_syscall name, nr
+\name:
+ .cfi_startproc
+ aghi %r15, -(STACK_FRAME_OVERHEAD + 16)
+ .cfi_adjust_cfa_offset STACK_FRAME_OVERHEAD + 16
+ stg %r14, STACK_FRAME_OVERHEAD(%r15)
+ .cfi_rel_offset %r14, STACK_FRAME_OVERHEAD
+ raw_syscall \nr
+ lg %r14, STACK_FRAME_OVERHEAD(%r15)
+ aghi %r15, STACK_FRAME_OVERHEAD + 16
+ .cfi_restore %r14
+ .cfi_adjust_cfa_offset -(STACK_FRAME_OVERHEAD + 16)
+ br %r14
+ .cfi_endproc
+endf \name
+.endm
+
+vdso_syscall __kernel_gettimeofday, __NR_gettimeofday
+vdso_syscall __kernel_clock_gettime, __NR_clock_gettime
+vdso_syscall __kernel_clock_getres, __NR_clock_getres
+vdso_syscall __kernel_getcpu, __NR_getcpu
+
+/*
+ * TODO unwind info, though we're ok without it.
+ * The kernel supplies bogus empty unwind info, and it is likely ignored
+ * by all users. Without it we get the fallback signal frame handling.
+ */
+
+__kernel_sigreturn:
+ raw_syscall __NR_sigreturn
+endf __kernel_sigreturn
+
+__kernel_rt_sigreturn:
+ raw_syscall __NR_rt_sigreturn
+endf __kernel_rt_sigreturn
diff --git a/linux-user/s390x/vdso.ld b/linux-user/s390x/vdso.ld
new file mode 100644
index 0000000000..d3f1d1b164
--- /dev/null
+++ b/linux-user/s390x/vdso.ld
@@ -0,0 +1,72 @@
+/*
+ * Linker script for linux s390x replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_2.6.29 {
+ global:
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+ __kernel_getcpu;
+ __kernel_rt_sigreturn;
+ __kernel_sigreturn;
+ /*
+ * QEMU handles syscall restart internally, so we don't
+ * need the __kernel_restart_syscall entry point.
+ */
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ .data : {
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load
+}
diff --git a/linux-user/s390x/vdso.so b/linux-user/s390x/vdso.so
new file mode 100755
index 0000000000..64130f6f33
--- /dev/null
+++ b/linux-user/s390x/vdso.so
Binary files differ
diff --git a/linux-user/safe-syscall.S b/linux-user/safe-syscall.S
deleted file mode 100644
index 42ea7c40ba..0000000000
--- a/linux-user/safe-syscall.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * safe-syscall.S : include the host-specific assembly fragment
- * to handle signals occurring at the same time as system calls.
- *
- * Written by Peter Maydell <peter.maydell@linaro.org>
- *
- * Copyright (C) 2016 Linaro Limited
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#include "hostdep.h"
-#include "target_errno_defs.h"
-
-/* We have the correct host directory on our include path
- * so that this will pull in the right fragment for the architecture.
- */
-#ifdef HAVE_SAFE_SYSCALL
-#include "safe-syscall.inc.S"
-#endif
-
-/* We must specifically say that we're happy for the stack to not be
- * executable, otherwise the toolchain will default to assuming our
- * assembly needs an executable stack and the whole QEMU binary will
- * needlessly end up with one. This should be the last thing in this file.
- */
-#if defined(__linux__) && defined(__ELF__)
-.section .note.GNU-stack, "", %progbits
-#endif
diff --git a/linux-user/safe-syscall.h b/linux-user/safe-syscall.h
deleted file mode 100644
index 6bc0390262..0000000000
--- a/linux-user/safe-syscall.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * safe-syscall.h: prototypes for linux-user signal-race-safe syscalls
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef LINUX_USER_SAFE_SYSCALL_H
-#define LINUX_USER_SAFE_SYSCALL_H
-
-/**
- * safe_syscall:
- * @int number: number of system call to make
- * ...: arguments to the system call
- *
- * Call a system call if guest signal not pending.
- * This has the same API as the libc syscall() function, except that it
- * may return -1 with errno == TARGET_ERESTARTSYS if a signal was pending.
- *
- * Returns: the system call result, or -1 with an error code in errno
- * (Errnos are host errnos; we rely on TARGET_ERESTARTSYS not clashing
- * with any of the host errno values.)
- */
-
-/*
- * A guide to using safe_syscall() to handle interactions between guest
- * syscalls and guest signals:
- *
- * Guest syscalls come in two flavours:
- *
- * (1) Non-interruptible syscalls
- *
- * These are guest syscalls that never get interrupted by signals and
- * so never return EINTR. They can be implemented straightforwardly in
- * QEMU: just make sure that if the implementation code has to make any
- * blocking calls that those calls are retried if they return EINTR.
- * It's also OK to implement these with safe_syscall, though it will be
- * a little less efficient if a signal is delivered at the 'wrong' moment.
- *
- * Some non-interruptible syscalls need to be handled using block_signals()
- * to block signals for the duration of the syscall. This mainly applies
- * to code which needs to modify the data structures used by the
- * host_signal_handler() function and the functions it calls, including
- * all syscalls which change the thread's signal mask.
- *
- * (2) Interruptible syscalls
- *
- * These are guest syscalls that can be interrupted by signals and
- * for which we need to either return EINTR or arrange for the guest
- * syscall to be restarted. This category includes both syscalls which
- * always restart (and in the kernel return -ERESTARTNOINTR), ones
- * which only restart if there is no handler (kernel returns -ERESTARTNOHAND
- * or -ERESTART_RESTARTBLOCK), and the most common kind which restart
- * if the handler was registered with SA_RESTART (kernel returns
- * -ERESTARTSYS). System calls which are only interruptible in some
- * situations (like 'open') also need to be handled this way.
- *
- * Here it is important that the host syscall is made
- * via this safe_syscall() function, and *not* via the host libc.
- * If the host libc is used then the implementation will appear to work
- * most of the time, but there will be a race condition where a
- * signal could arrive just before we make the host syscall inside libc,
- * and then then guest syscall will not correctly be interrupted.
- * Instead the implementation of the guest syscall can use the safe_syscall
- * function but otherwise just return the result or errno in the usual
- * way; the main loop code will take care of restarting the syscall
- * if appropriate.
- *
- * (If the implementation needs to make multiple host syscalls this is
- * OK; any which might really block must be via safe_syscall(); for those
- * which are only technically blocking (ie which we know in practice won't
- * stay in the host kernel indefinitely) it's OK to use libc if necessary.
- * You must be able to cope with backing out correctly if some safe_syscall
- * you make in the implementation returns either -TARGET_ERESTARTSYS or
- * EINTR though.)
- *
- * block_signals() cannot be used for interruptible syscalls.
- *
- *
- * How and why the safe_syscall implementation works:
- *
- * The basic setup is that we make the host syscall via a known
- * section of host native assembly. If a signal occurs, our signal
- * handler checks the interrupted host PC against the addresse of that
- * known section. If the PC is before or at the address of the syscall
- * instruction then we change the PC to point at a "return
- * -TARGET_ERESTARTSYS" code path instead, and then exit the signal handler
- * (causing the safe_syscall() call to immediately return that value).
- * Then in the main.c loop if we see this magic return value we adjust
- * the guest PC to wind it back to before the system call, and invoke
- * the guest signal handler as usual.
- *
- * This winding-back will happen in two cases:
- * (1) signal came in just before we took the host syscall (a race);
- * in this case we'll take the guest signal and have another go
- * at the syscall afterwards, and this is indistinguishable for the
- * guest from the timing having been different such that the guest
- * signal really did win the race
- * (2) signal came in while the host syscall was blocking, and the
- * host kernel decided the syscall should be restarted;
- * in this case we want to restart the guest syscall also, and so
- * rewinding is the right thing. (Note that "restart" semantics mean
- * "first call the signal handler, then reattempt the syscall".)
- * The other situation to consider is when a signal came in while the
- * host syscall was blocking, and the host kernel decided that the syscall
- * should not be restarted; in this case QEMU's host signal handler will
- * be invoked with the PC pointing just after the syscall instruction,
- * with registers indicating an EINTR return; the special code in the
- * handler will not kick in, and we will return EINTR to the guest as
- * we should.
- *
- * Notice that we can leave the host kernel to make the decision for
- * us about whether to do a restart of the syscall or not; we do not
- * need to check SA_RESTART flags in QEMU or distinguish the various
- * kinds of restartability.
- */
-#ifdef HAVE_SAFE_SYSCALL
-/* The core part of this function is implemented in assembly */
-extern long safe_syscall_base(int *pending, long number, ...);
-
-#define safe_syscall(...) \
- ({ \
- long ret_; \
- int *psp_ = &((TaskState *)thread_cpu->opaque)->signal_pending; \
- ret_ = safe_syscall_base(psp_, __VA_ARGS__); \
- if (is_error(ret_)) { \
- errno = -ret_; \
- ret_ = -1; \
- } \
- ret_; \
- })
-
-#else
-
-/*
- * Fallback for architectures which don't yet provide a safe-syscall assembly
- * fragment; note that this is racy!
- * This should go away when all host architectures have been updated.
- */
-#define safe_syscall syscall
-
-#endif
-
-#endif
diff --git a/linux-user/semihost.c b/linux-user/semihost.c
index 17f074ac56..cee62a365c 100644
--- a/linux-user/semihost.c
+++ b/linux-user/semihost.c
@@ -16,39 +16,6 @@
#include "user-internals.h"
#include <termios.h>
-int qemu_semihosting_console_outs(CPUArchState *env, target_ulong addr)
-{
- int len = target_strlen(addr);
- void *s;
- if (len < 0){
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: passed inaccessible address " TARGET_FMT_lx,
- __func__, addr);
- return 0;
- }
- s = lock_user(VERIFY_READ, addr, (long)(len + 1), 1);
- g_assert(s); /* target_strlen has already verified this will work */
- len = write(STDERR_FILENO, s, len);
- unlock_user(s, addr, 0);
- return len;
-}
-
-void qemu_semihosting_console_outc(CPUArchState *env, target_ulong addr)
-{
- char c;
-
- if (get_user_u8(c, addr)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: passed inaccessible address " TARGET_FMT_lx,
- __func__, addr);
- } else {
- if (write(STDERR_FILENO, &c, 1) != 1) {
- qemu_log_mask(LOG_UNIMP, "%s: unexpected write to stdout failure",
- __func__);
- }
- }
-}
-
/*
* For linux-user we can safely block. However as we want to return as
* soon as a character is read we need to tweak the termio to disable
@@ -56,21 +23,28 @@ void qemu_semihosting_console_outc(CPUArchState *env, target_ulong addr)
* program is expecting more normal behaviour. This is slow but
* nothing using semihosting console reading is expecting to be fast.
*/
-target_ulong qemu_semihosting_console_inc(CPUArchState *env)
+int qemu_semihosting_console_read(CPUState *cs, void *buf, int len)
{
- uint8_t c;
+ int ret;
struct termios old_tio, new_tio;
/* Disable line-buffering and echo */
tcgetattr(STDIN_FILENO, &old_tio);
new_tio = old_tio;
new_tio.c_lflag &= (~ICANON & ~ECHO);
+ new_tio.c_cc[VMIN] = 1;
+ new_tio.c_cc[VTIME] = 0;
tcsetattr(STDIN_FILENO, TCSANOW, &new_tio);
- c = getchar();
+ ret = fread(buf, 1, len, stdin);
/* restore config */
tcsetattr(STDIN_FILENO, TCSANOW, &old_tio);
- return (target_ulong) c;
+ return ret;
+}
+
+int qemu_semihosting_console_write(void *buf, int len)
+{
+ return fwrite(buf, 1, len, stderr);
}
diff --git a/linux-user/sh4/cpu_loop.c b/linux-user/sh4/cpu_loop.c
index 65b8972e3c..c805f9db11 100644
--- a/linux-user/sh4/cpu_loop.c
+++ b/linux-user/sh4/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -28,7 +27,6 @@ void cpu_loop(CPUSH4State *env)
{
CPUState *cs = env_cpu(env);
int trapnr, ret;
- target_siginfo_t info;
while (1) {
bool arch_interrupt = true;
@@ -50,9 +48,9 @@ void cpu_loop(CPUSH4State *env)
env->gregs[0],
env->gregs[1],
0, 0);
- if (ret == -TARGET_ERESTARTSYS) {
+ if (ret == -QEMU_ERESTARTSYS) {
env->pc -= 2;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
+ } else if (ret != -QEMU_ESIGRETURN) {
env->gregs[0] = ret;
}
break;
@@ -60,18 +58,7 @@ void cpu_loop(CPUSH4State *env)
/* just indicate that signals should be handled asap */
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
- case 0xa0:
- case 0xc0:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->tea;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
index d70d744bef..9ecc026fae 100644
--- a/linux-user/sh4/signal.c
+++ b/linux-user/sh4/signal.c
@@ -52,7 +52,6 @@ struct target_sigframe
{
struct target_sigcontext sc;
target_ulong extramask[TARGET_NSIG_WORDS-1];
- uint16_t retcode[3];
};
@@ -68,7 +67,6 @@ struct target_rt_sigframe
{
struct target_siginfo info;
struct target_ucontext uc;
- uint16_t retcode[3];
};
@@ -106,6 +104,14 @@ static void unwind_gusa(CPUSH4State *regs)
/* Reset the SP to the saved version in R1. */
regs->gregs[15] = regs->gregs[1];
+ } else if (regs->gregs[15] >= -128u && regs->pc == regs->gregs[0]) {
+ /* If we are on the last instruction of a gUSA region, we must reset
+ the SP, otherwise we would be pushing the signal context to
+ invalid memory. */
+ regs->gregs[15] = regs->gregs[1];
+ } else if (regs->flags & TB_FLAG_DELAY_SLOT) {
+ /* If we are in a delay slot, push the previous instruction. */
+ regs->pc -= 2;
}
}
@@ -163,7 +169,7 @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
__get_user(regs->fpul, &sc->sc_fpul);
regs->tra = -1; /* disable syscall checks */
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
+ regs->flags = 0;
}
void setup_frame(int sig, struct target_sigaction *ka,
@@ -190,15 +196,9 @@ void setup_frame(int sig, struct target_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa_flags & TARGET_SA_RESTORER) {
- regs->pr = (unsigned long) ka->sa_restorer;
+ regs->pr = ka->sa_restorer;
} else {
- /* Generate return code (system call to sigreturn) */
- abi_ulong retcode_addr = frame_addr +
- offsetof(struct target_sigframe, retcode);
- __put_user(MOVW(2), &frame->retcode[0]);
- __put_user(TRAP_NOARG, &frame->retcode[1]);
- __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
- regs->pr = (unsigned long) retcode_addr;
+ regs->pr = default_sigreturn;
}
/* Set up registers for signal handler */
@@ -207,7 +207,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
regs->gregs[5] = 0;
regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
regs->pc = (unsigned long) ka->_sa_handler;
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
unlock_user_struct(frame, frame_addr, 1);
return;
@@ -233,7 +233,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
goto give_sigsegv;
}
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
/* Create the ucontext. */
__put_user(0, &frame->uc.tuc_flags);
@@ -248,15 +248,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa_flags & TARGET_SA_RESTORER) {
- regs->pr = (unsigned long) ka->sa_restorer;
+ regs->pr = ka->sa_restorer;
} else {
- /* Generate return code (system call to sigreturn) */
- abi_ulong retcode_addr = frame_addr +
- offsetof(struct target_rt_sigframe, retcode);
- __put_user(MOVW(2), &frame->retcode[0]);
- __put_user(TRAP_NOARG, &frame->retcode[1]);
- __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
- regs->pr = (unsigned long) retcode_addr;
+ regs->pr = default_rt_sigreturn;
}
/* Set up registers for signal handler */
@@ -265,7 +259,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
regs->pc = (unsigned long) ka->_sa_handler;
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
unlock_user_struct(frame, frame_addr, 1);
return;
@@ -300,12 +294,12 @@ long do_sigreturn(CPUSH4State *regs)
restore_sigcontext(regs, &frame->sc);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUSH4State *regs)
@@ -327,10 +321,28 @@ long do_rt_sigreturn(CPUSH4State *regs)
target_restore_altstack(&frame->uc.tuc_stack, regs);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 6, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ __put_user(MOVW(2), &tramp[0]);
+ __put_user(TRAP_NOARG, &tramp[1]);
+ __put_user(TARGET_NR_sigreturn, &tramp[2]);
+
+ default_rt_sigreturn = sigtramp_page + 6;
+ __put_user(MOVW(2), &tramp[3]);
+ __put_user(TRAP_NOARG, &tramp[4]);
+ __put_user(TARGET_NR_rt_sigreturn, &tramp[5]);
+
+ unlock_user(tramp, sigtramp_page, 2 * 6);
}
diff --git a/linux-user/sh4/target_flat.h b/linux-user/sh4/target_flat.h
new file mode 100644
index 0000000000..bc83224cea
--- /dev/null
+++ b/linux-user/sh4/target_flat.h
@@ -0,0 +1 @@
+#include "../generic/target_flat.h"
diff --git a/linux-user/sh4/target_mman.h b/linux-user/sh4/target_mman.h
new file mode 100644
index 0000000000..dd9016081e
--- /dev/null
+++ b/linux-user/sh4/target_mman.h
@@ -0,0 +1,8 @@
+/* arch/sh/include/asm/processor_32.h */
+#define TASK_UNMAPPED_BASE \
+ TARGET_PAGE_ALIGN((1u << TARGET_VIRT_ADDR_SPACE_BITS) / 3)
+
+/* arch/sh/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/sh4/target_prctl.h b/linux-user/sh4/target_prctl.h
new file mode 100644
index 0000000000..5629ddbf39
--- /dev/null
+++ b/linux-user/sh4/target_prctl.h
@@ -0,0 +1 @@
+#include "../generic/target_prctl_unalign.h"
diff --git a/linux-user/sh4/target_proc.h b/linux-user/sh4/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/sh4/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/sh4/target_resource.h b/linux-user/sh4/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/sh4/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h
index d7309b7136..eee6a1a7cd 100644
--- a/linux-user/sh4/target_signal.h
+++ b/linux-user/sh4/target_signal.h
@@ -1,25 +1,9 @@
#ifndef SH4_TARGET_SIGNAL_H
#define SH4_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif /* SH4_TARGET_SIGNAL_H */
diff --git a/linux-user/sh4/target_structs.h b/linux-user/sh4/target_structs.h
index 00ac39478b..3a06f373c3 100644
--- a/linux-user/sh4/target_structs.h
+++ b/linux-user/sh4/target_structs.h
@@ -1,58 +1 @@
-/*
- * SH4 specific structures for linux-user
- *
- * Copyright (c) 2013 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef SH4_TARGET_STRUCTS_H
-#define SH4_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
+#include "../generic/target_structs.h"
diff --git a/linux-user/sh4/target_syscall.h b/linux-user/sh4/target_syscall.h
index c1437adafe..148398855d 100644
--- a/linux-user/sh4/target_syscall.h
+++ b/linux-user/sh4/target_syscall.h
@@ -15,7 +15,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "sh4"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/sh4/termbits.h b/linux-user/sh4/termbits.h
index f91b5c51cf..28e79f2c9a 100644
--- a/linux-user/sh4/termbits.h
+++ b/linux-user/sh4/termbits.h
@@ -39,86 +39,86 @@ struct target_termios {
#define TARGET_VEOL2 16
/* c_iflag bits */
-#define TARGET_IGNBRK 0000001
-#define TARGET_BRKINT 0000002
-#define TARGET_IGNPAR 0000004
-#define TARGET_PARMRK 0000010
-#define TARGET_INPCK 0000020
-#define TARGET_ISTRIP 0000040
-#define TARGET_INLCR 0000100
-#define TARGET_IGNCR 0000200
-#define TARGET_ICRNL 0000400
-#define TARGET_IUCLC 0001000
-#define TARGET_IXON 0002000
-#define TARGET_IXANY 0004000
-#define TARGET_IXOFF 0010000
-#define TARGET_IMAXBEL 0020000
-#define TARGET_IUTF8 0040000
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IUCLC 0001000
+#define TARGET_IXON 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IXOFF 0010000
+#define TARGET_IMAXBEL 0020000
+#define TARGET_IUTF8 0040000
/* c_oflag bits */
-#define TARGET_OPOST 0000001
-#define TARGET_OLCUC 0000002
-#define TARGET_ONLCR 0000004
-#define TARGET_OCRNL 0000010
-#define TARGET_ONOCR 0000020
-#define TARGET_ONLRET 0000040
-#define TARGET_OFILL 0000100
-#define TARGET_OFDEL 0000200
-#define TARGET_NLDLY 0000400
-#define TARGET_NL0 0000000
-#define TARGET_NL1 0000400
-#define TARGET_CRDLY 0003000
-#define TARGET_CR0 0000000
-#define TARGET_CR1 0001000
-#define TARGET_CR2 0002000
-#define TARGET_CR3 0003000
-#define TARGET_TABDLY 0014000
-#define TARGET_TAB0 0000000
-#define TARGET_TAB1 0004000
-#define TARGET_TAB2 0010000
-#define TARGET_TAB3 0014000
-#define TARGET_XTABS 0014000
-#define TARGET_BSDLY 0020000
-#define TARGET_BS0 0000000
-#define TARGET_BS1 0020000
-#define TARGET_VTDLY 0040000
-#define TARGET_VT0 0000000
-#define TARGET_VT1 0040000
-#define TARGET_FFDLY 0100000
-#define TARGET_FF0 0000000
-#define TARGET_FF1 0100000
+#define TARGET_OPOST 0000001
+#define TARGET_OLCUC 0000002
+#define TARGET_ONLCR 0000004
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+#define TARGET_OFILL 0000100
+#define TARGET_OFDEL 0000200
+#define TARGET_NLDLY 0000400
+#define TARGET_NL0 0000000
+#define TARGET_NL1 0000400
+#define TARGET_CRDLY 0003000
+#define TARGET_CR0 0000000
+#define TARGET_CR1 0001000
+#define TARGET_CR2 0002000
+#define TARGET_CR3 0003000
+#define TARGET_TABDLY 0014000
+#define TARGET_TAB0 0000000
+#define TARGET_TAB1 0004000
+#define TARGET_TAB2 0010000
+#define TARGET_TAB3 0014000
+#define TARGET_XTABS 0014000
+#define TARGET_BSDLY 0020000
+#define TARGET_BS0 0000000
+#define TARGET_BS1 0020000
+#define TARGET_VTDLY 0040000
+#define TARGET_VT0 0000000
+#define TARGET_VT1 0040000
+#define TARGET_FFDLY 0100000
+#define TARGET_FF0 0000000
+#define TARGET_FF1 0100000
/* c_cflag bit meaning */
-#define TARGET_CBAUD 0010017
-#define TARGET_B0 0000000 /* hang up */
-#define TARGET_B50 0000001
-#define TARGET_B75 0000002
-#define TARGET_B110 0000003
-#define TARGET_B134 0000004
-#define TARGET_B150 0000005
-#define TARGET_B200 0000006
-#define TARGET_B300 0000007
-#define TARGET_B600 0000010
-#define TARGET_B1200 0000011
-#define TARGET_B1800 0000012
-#define TARGET_B2400 0000013
-#define TARGET_B4800 0000014
-#define TARGET_B9600 0000015
-#define TARGET_B19200 0000016
-#define TARGET_B38400 0000017
+#define TARGET_CBAUD 0010017
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
#define TARGET_EXTA B19200
#define TARGET_EXTB B38400
-#define TARGET_CSIZE 0000060
-#define TARGET_CS5 0000000
-#define TARGET_CS6 0000020
-#define TARGET_CS7 0000040
-#define TARGET_CS8 0000060
-#define TARGET_CSTOPB 0000100
-#define TARGET_CREAD 0000200
-#define TARGET_PARENB 0000400
-#define TARGET_PARODD 0001000
-#define TARGET_HUPCL 0002000
-#define TARGET_CLOCAL 0004000
+#define TARGET_CSIZE 0000060
+#define TARGET_CS5 0000000
+#define TARGET_CS6 0000020
+#define TARGET_CS7 0000040
+#define TARGET_CS8 0000060
+#define TARGET_CSTOPB 0000100
+#define TARGET_CREAD 0000200
+#define TARGET_PARENB 0000400
+#define TARGET_PARODD 0001000
+#define TARGET_HUPCL 0002000
+#define TARGET_CLOCAL 0004000
#define TARGET_CBAUDEX 0010000
#define TARGET_B57600 0010001
#define TARGET_B115200 0010002
@@ -135,44 +135,44 @@ struct target_termios {
#define TARGET_B3000000 0010015
#define TARGET_B3500000 0010016
#define TARGET_B4000000 0010017
-#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */
-#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
-#define TARGET_CRTSCTS 020000000000 /* flow control */
+#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
/* c_lflag bits */
-#define TARGET_ISIG 0000001
-#define TARGET_ICANON 0000002
-#define TARGET_XCASE 0000004
-#define TARGET_ECHO 0000010
-#define TARGET_ECHOE 0000020
-#define TARGET_ECHOK 0000040
-#define TARGET_ECHONL 0000100
-#define TARGET_NOFLSH 0000200
-#define TARGET_TOSTOP 0000400
-#define TARGET_ECHOCTL 0001000
-#define TARGET_ECHOPRT 0002000
-#define TARGET_ECHOKE 0004000
-#define TARGET_FLUSHO 0010000
-#define TARGET_PENDIN 0040000
-#define TARGET_IEXTEN 0100000
+#define TARGET_ISIG 0000001
+#define TARGET_ICANON 0000002
+#define TARGET_XCASE 0000004
+#define TARGET_ECHO 0000010
+#define TARGET_ECHOE 0000020
+#define TARGET_ECHOK 0000040
+#define TARGET_ECHONL 0000100
+#define TARGET_NOFLSH 0000200
+#define TARGET_TOSTOP 0000400
+#define TARGET_ECHOCTL 0001000
+#define TARGET_ECHOPRT 0002000
+#define TARGET_ECHOKE 0004000
+#define TARGET_FLUSHO 0010000
+#define TARGET_PENDIN 0040000
+#define TARGET_IEXTEN 0100000
#define TARGET_EXTPROC 0200000
/* tcflow() and TCXONC use these */
-#define TARGET_TCOOFF 0
-#define TARGET_TCOON 1
-#define TARGET_TCIOFF 2
-#define TARGET_TCION 3
+#define TARGET_TCOOFF 0
+#define TARGET_TCOON 1
+#define TARGET_TCIOFF 2
+#define TARGET_TCION 3
/* tcflush() and TCFLSH use these */
-#define TARGET_TCIFLUSH 0
-#define TARGET_TCOFLUSH 1
-#define TARGET_TCIOFLUSH 2
+#define TARGET_TCIFLUSH 0
+#define TARGET_TCOFLUSH 1
+#define TARGET_TCIOFLUSH 2
/* tcsetattr uses these */
-#define TARGET_TCSANOW 0
-#define TARGET_TCSADRAIN 1
-#define TARGET_TARGET_TCSAFLUSH 2
+#define TARGET_TCSANOW 0
+#define TARGET_TCSADRAIN 1
+#define TARGET_TARGET_TCSAFLUSH 2
/* ioctl */
#define TARGET_FIOCLEX TARGET_IO('f', 1)
@@ -273,7 +273,7 @@ ebugging only */
#define TARGET_TIOCSERGETLSR TARGET_IOR('T', 89, unsigned int) /* 0x5459 */ /* Get line sta
tus register */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+# define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#define TARGET_TIOCSERGETMULTI TARGET_IOR('T', 90, int) /* 0x545A
*/ /* Get multiport config */
#define TARGET_TIOCSERSETMULTI TARGET_IOW('T', 91, int) /* 0x545B
diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h
index 79511becb4..f4cbe6185e 100644
--- a/linux-user/signal-common.h
+++ b/linux-user/signal-common.h
@@ -20,6 +20,14 @@
#ifndef SIGNAL_COMMON_H
#define SIGNAL_COMMON_H
+#include "special-errno.h"
+
+/* Fallback addresses into sigtramp page. */
+extern abi_ulong default_sigreturn;
+extern abi_ulong default_rt_sigreturn;
+
+void setup_sigtramp(abi_ulong tramp_page);
+
int on_sig_stack(unsigned long sp);
int sas_ss_flags(unsigned long sp);
abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka);
@@ -35,8 +43,6 @@ void host_to_target_sigset_internal(target_sigset_t *d,
const sigset_t *s);
void target_to_host_sigset_internal(sigset_t *d,
const target_sigset_t *s);
-void tswap_siginfo(target_siginfo_t *tinfo,
- const target_siginfo_t *info);
void set_sigmask(const sigset_t *set);
void force_sig(int sig);
void force_sigsegv(int oldsig);
@@ -51,8 +57,8 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
void process_pending_signals(CPUArchState *cpu_env);
void signal_init(void);
-int queue_signal(CPUArchState *env, int sig, int si_type,
- target_siginfo_t *info);
+void queue_signal(CPUArchState *env, int sig, int si_type,
+ target_siginfo_t *info);
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
int target_to_host_signal(int sig);
@@ -70,7 +76,7 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
* Block all signals, and arrange that the signal mask is returned to
* its correct value for the guest before we resume execution of guest code.
* If this function returns non-zero, then the caller should immediately
- * return -TARGET_ERESTARTSYS to the main loop, which will take the pending
+ * return -QEMU_ERESTARTSYS to the main loop, which will take the pending
* signal and restart execution of the syscall.
* If block_signals() returns zero, then the caller can continue with
* emulation of the system call knowing that no signals can be taken
@@ -84,4 +90,76 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
*/
int block_signals(void); /* Returns non zero if signal pending */
+/**
+ * process_sigsuspend_mask: read and apply syscall-local signal mask
+ *
+ * Read the guest signal mask from @sigset, length @sigsize.
+ * Convert that to a host signal mask and save it to sigpending_mask.
+ *
+ * Return value: negative target errno, or zero;
+ * store &sigpending_mask into *pset on success.
+ */
+int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
+ target_ulong sigsize);
+
+/**
+ * finish_sigsuspend_mask: finish a sigsuspend-like syscall
+ *
+ * Set in_sigsuspend if we need to use the modified sigset
+ * during process_pending_signals.
+ */
+static inline void finish_sigsuspend_mask(int ret)
+{
+ if (ret != -QEMU_ERESTARTSYS) {
+ TaskState *ts = get_task_state(thread_cpu);
+ ts->in_sigsuspend = 1;
+ }
+}
+
+#if defined(SIGSTKFLT) && defined(TARGET_SIGSTKFLT)
+#define MAKE_SIG_ENTRY_SIGSTKFLT MAKE_SIG_ENTRY(SIGSTKFLT)
+#else
+#define MAKE_SIG_ENTRY_SIGSTKFLT
+#endif
+
+#if defined(SIGIOT) && defined(TARGET_SIGIOT)
+#define MAKE_SIG_ENTRY_SIGIOT MAKE_SIG_ENTRY(SIGIOT)
+#else
+#define MAKE_SIG_ENTRY_SIGIOT
+#endif
+
+#define MAKE_SIGNAL_LIST \
+ MAKE_SIG_ENTRY(SIGHUP) \
+ MAKE_SIG_ENTRY(SIGINT) \
+ MAKE_SIG_ENTRY(SIGQUIT) \
+ MAKE_SIG_ENTRY(SIGILL) \
+ MAKE_SIG_ENTRY(SIGTRAP) \
+ MAKE_SIG_ENTRY(SIGABRT) \
+ MAKE_SIG_ENTRY(SIGBUS) \
+ MAKE_SIG_ENTRY(SIGFPE) \
+ MAKE_SIG_ENTRY(SIGKILL) \
+ MAKE_SIG_ENTRY(SIGUSR1) \
+ MAKE_SIG_ENTRY(SIGSEGV) \
+ MAKE_SIG_ENTRY(SIGUSR2) \
+ MAKE_SIG_ENTRY(SIGPIPE) \
+ MAKE_SIG_ENTRY(SIGALRM) \
+ MAKE_SIG_ENTRY(SIGTERM) \
+ MAKE_SIG_ENTRY(SIGCHLD) \
+ MAKE_SIG_ENTRY(SIGCONT) \
+ MAKE_SIG_ENTRY(SIGSTOP) \
+ MAKE_SIG_ENTRY(SIGTSTP) \
+ MAKE_SIG_ENTRY(SIGTTIN) \
+ MAKE_SIG_ENTRY(SIGTTOU) \
+ MAKE_SIG_ENTRY(SIGURG) \
+ MAKE_SIG_ENTRY(SIGXCPU) \
+ MAKE_SIG_ENTRY(SIGXFSZ) \
+ MAKE_SIG_ENTRY(SIGVTALRM) \
+ MAKE_SIG_ENTRY(SIGPROF) \
+ MAKE_SIG_ENTRY(SIGWINCH) \
+ MAKE_SIG_ENTRY(SIGIO) \
+ MAKE_SIG_ENTRY(SIGPWR) \
+ MAKE_SIG_ENTRY(SIGSYS) \
+ MAKE_SIG_ENTRY_SIGSTKFLT \
+ MAKE_SIG_ENTRY_SIGIOT
+
#endif
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 2038216455..05dc4afb52 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -18,7 +18,8 @@
*/
#include "qemu/osdep.h"
#include "qemu/bitops.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/user.h"
+#include "hw/core/tcg-cpu-ops.h"
#include <sys/ucontext.h>
#include <sys/resource.h>
@@ -29,17 +30,25 @@
#include "loader.h"
#include "trace.h"
#include "signal-common.h"
+#include "host-signal.h"
+#include "user/safe-syscall.h"
+#include "tcg/tcg.h"
+
+/* target_siginfo_t must fit in gdbstub's siginfo save area. */
+QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
static struct target_sigaction sigact_table[TARGET_NSIG];
static void host_signal_handler(int host_signum, siginfo_t *info,
void *puc);
+/* Fallback addresses into sigtramp page. */
+abi_ulong default_sigreturn;
+abi_ulong default_rt_sigreturn;
/*
- * System includes define _NSIG as SIGRTMAX + 1,
- * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
- * and the first signal is SIGHUP defined as 1
+ * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
+ * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
* Signal number 0 is reserved for use as kill(pid, 0), to test whether
* a process exists without sending it a signal.
*/
@@ -47,41 +56,9 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
#endif
static uint8_t host_to_target_signal_table[_NSIG] = {
- [SIGHUP] = TARGET_SIGHUP,
- [SIGINT] = TARGET_SIGINT,
- [SIGQUIT] = TARGET_SIGQUIT,
- [SIGILL] = TARGET_SIGILL,
- [SIGTRAP] = TARGET_SIGTRAP,
- [SIGABRT] = TARGET_SIGABRT,
-/* [SIGIOT] = TARGET_SIGIOT,*/
- [SIGBUS] = TARGET_SIGBUS,
- [SIGFPE] = TARGET_SIGFPE,
- [SIGKILL] = TARGET_SIGKILL,
- [SIGUSR1] = TARGET_SIGUSR1,
- [SIGSEGV] = TARGET_SIGSEGV,
- [SIGUSR2] = TARGET_SIGUSR2,
- [SIGPIPE] = TARGET_SIGPIPE,
- [SIGALRM] = TARGET_SIGALRM,
- [SIGTERM] = TARGET_SIGTERM,
-#ifdef SIGSTKFLT
- [SIGSTKFLT] = TARGET_SIGSTKFLT,
-#endif
- [SIGCHLD] = TARGET_SIGCHLD,
- [SIGCONT] = TARGET_SIGCONT,
- [SIGSTOP] = TARGET_SIGSTOP,
- [SIGTSTP] = TARGET_SIGTSTP,
- [SIGTTIN] = TARGET_SIGTTIN,
- [SIGTTOU] = TARGET_SIGTTOU,
- [SIGURG] = TARGET_SIGURG,
- [SIGXCPU] = TARGET_SIGXCPU,
- [SIGXFSZ] = TARGET_SIGXFSZ,
- [SIGVTALRM] = TARGET_SIGVTALRM,
- [SIGPROF] = TARGET_SIGPROF,
- [SIGWINCH] = TARGET_SIGWINCH,
- [SIGIO] = TARGET_SIGIO,
- [SIGPWR] = TARGET_SIGPWR,
- [SIGSYS] = TARGET_SIGSYS,
- /* next signals stay the same */
+#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
+ MAKE_SIGNAL_LIST
+#undef MAKE_SIG_ENTRY
};
static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
@@ -89,18 +66,24 @@ static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
/* valid sig is between 1 and _NSIG - 1 */
int host_to_target_signal(int sig)
{
- if (sig < 1 || sig >= _NSIG) {
+ if (sig < 1) {
return sig;
}
+ if (sig >= _NSIG) {
+ return TARGET_NSIG + 1;
+ }
return host_to_target_signal_table[sig];
}
/* valid sig is between 1 and TARGET_NSIG */
int target_to_host_signal(int sig)
{
- if (sig < 1 || sig > TARGET_NSIG) {
+ if (sig < 1) {
return sig;
}
+ if (sig > TARGET_NSIG) {
+ return _NSIG;
+ }
return target_to_host_signal_table[sig];
}
@@ -192,7 +175,7 @@ void target_to_host_old_sigset(sigset_t *sigset,
int block_signals(void)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
sigset_t set;
/* It's OK to block everything including SIGSEGV, because we won't
@@ -207,14 +190,14 @@ int block_signals(void)
/* Wrapper for sigprocmask function
* Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
- * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
+ * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
* a signal was already pending and the syscall must be restarted, or
* 0 on success.
* If set is NULL, this is guaranteed not to fail.
*/
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
if (oldset) {
*oldset = ts->signal_mask;
@@ -224,7 +207,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
int i;
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
switch (how) {
@@ -252,23 +235,21 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
return 0;
}
-#if !defined(TARGET_NIOS2)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/
void set_sigmask(const sigset_t *set)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
ts->signal_mask = *set;
}
-#endif
/* sigaltstack management */
int on_sig_stack(unsigned long sp)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
return (sp - ts->sigaltstack_used.ss_sp
< ts->sigaltstack_used.ss_size);
@@ -276,7 +257,7 @@ int on_sig_stack(unsigned long sp)
int sas_ss_flags(unsigned long sp)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
: on_sig_stack(sp) ? SS_ONSTACK : 0);
@@ -287,7 +268,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
/*
* This is the X/Open sanctioned signal stack switching.
*/
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
@@ -297,7 +278,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
void target_save_altstack(target_stack_t *uss, CPUArchState *env)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
__put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
__put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
@@ -306,7 +287,7 @@ void target_save_altstack(target_stack_t *uss, CPUArchState *env)
abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
+ TaskState *ts = get_task_state(thread_cpu);
size_t minstacksize = TARGET_MINSIGSTKSZ;
target_stack_t ss;
@@ -400,7 +381,12 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
case TARGET_SIGCHLD:
tinfo->_sifields._sigchld._pid = info->si_pid;
tinfo->_sifields._sigchld._uid = info->si_uid;
- tinfo->_sifields._sigchld._status = info->si_status;
+ if (si_code == CLD_EXITED)
+ tinfo->_sifields._sigchld._status = info->si_status;
+ else
+ tinfo->_sifields._sigchld._status
+ = host_to_target_signal(info->si_status & 0x7f)
+ | (info->si_status & ~0x7f);
tinfo->_sifields._sigchld._utime = info->si_utime;
tinfo->_sifields._sigchld._stime = info->si_stime;
si_type = QEMU_SI_CHLD;
@@ -426,8 +412,8 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
tinfo->si_code = deposit32(si_code, 16, 16, si_type);
}
-void tswap_siginfo(target_siginfo_t *tinfo,
- const target_siginfo_t *info)
+static void tswap_siginfo(target_siginfo_t *tinfo,
+ const target_siginfo_t *info)
{
int si_type = extract32(info->si_code, 16, 16);
int si_code = sextract32(info->si_code, 0, 16);
@@ -509,26 +495,6 @@ void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
info->si_value.sival_ptr = (void *)(long)sival_ptr;
}
-static int fatal_signal (int sig)
-{
- switch (sig) {
- case TARGET_SIGCHLD:
- case TARGET_SIGURG:
- case TARGET_SIGWINCH:
- /* Ignored by default. */
- return 0;
- case TARGET_SIGCONT:
- case TARGET_SIGSTOP:
- case TARGET_SIGTSTP:
- case TARGET_SIGTTIN:
- case TARGET_SIGTTOU:
- /* Job control signals. */
- return 0;
- default:
- return 1;
- }
-}
-
/* returns 1 if given signal should dump core if not handled */
static int core_dump_signal(int sig)
{
@@ -548,57 +514,68 @@ static int core_dump_signal(int sig)
static void signal_table_init(void)
{
- int host_sig, target_sig, count;
+ int hsig, tsig, count;
/*
* Signals are supported starting from TARGET_SIGRTMIN and going up
- * until we run out of host realtime signals.
- * glibc at least uses only the lower 2 rt signals and probably
- * nobody's using the upper ones.
- * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
- * To fix this properly we need to do manual signal delivery multiplexed
- * over a single host signal.
+ * until we run out of host realtime signals. Glibc uses the lower 2
+ * RT signals and (hopefully) nobody uses the upper ones.
+ * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
+ * To fix this properly we would need to do manual signal delivery
+ * multiplexed over a single host signal.
* Attempts for configure "missing" signals via sigaction will be
* silently ignored.
+ *
+ * Remap the target SIGABRT, so that we can distinguish host abort
+ * from guest abort. When the guest registers a signal handler or
+ * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
+ * arrives at dump_core_and_abort(), we will map back to host SIGABRT
+ * so that the parent (native or emulated) sees the correct signal.
+ * Finally, also map host to guest SIGABRT so that the emulated
+ * parent sees the correct mapping from wait status.
*/
- for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
- target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
- if (target_sig <= TARGET_NSIG) {
- host_to_target_signal_table[host_sig] = target_sig;
- }
- }
- /* generate signal conversion tables */
- for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
- target_to_host_signal_table[target_sig] = _NSIG; /* poison */
+ hsig = SIGRTMIN;
+ host_to_target_signal_table[SIGABRT] = 0;
+ host_to_target_signal_table[hsig++] = TARGET_SIGABRT;
+
+ for (tsig = TARGET_SIGRTMIN;
+ hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
+ hsig++, tsig++) {
+ host_to_target_signal_table[hsig] = tsig;
}
- for (host_sig = 1; host_sig < _NSIG; host_sig++) {
- if (host_to_target_signal_table[host_sig] == 0) {
- host_to_target_signal_table[host_sig] = host_sig;
- }
- target_sig = host_to_target_signal_table[host_sig];
- if (target_sig <= TARGET_NSIG) {
- target_to_host_signal_table[target_sig] = host_sig;
+
+ /* Invert the mapping that has already been assigned. */
+ for (hsig = 1; hsig < _NSIG; hsig++) {
+ tsig = host_to_target_signal_table[hsig];
+ if (tsig) {
+ assert(target_to_host_signal_table[tsig] == 0);
+ target_to_host_signal_table[tsig] = hsig;
}
}
- if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
- for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
- if (target_to_host_signal_table[target_sig] == _NSIG) {
- count++;
- }
+ host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
+
+ /* Map everything else out-of-bounds. */
+ for (hsig = 1; hsig < _NSIG; hsig++) {
+ if (host_to_target_signal_table[hsig] == 0) {
+ host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
}
- trace_signal_table_init(count);
}
+ for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
+ if (target_to_host_signal_table[tsig] == 0) {
+ target_to_host_signal_table[tsig] = _NSIG;
+ count++;
+ }
+ }
+
+ trace_signal_table_init(count);
}
void signal_init(void)
{
- TaskState *ts = (TaskState *)thread_cpu->opaque;
- struct sigaction act;
- struct sigaction oact;
- int i;
- int host_sig;
+ TaskState *ts = get_task_state(thread_cpu);
+ struct sigaction act, oact;
/* initialize signal conversion tables */
signal_table_init();
@@ -609,27 +586,36 @@ void signal_init(void)
sigfillset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
act.sa_sigaction = host_signal_handler;
- for(i = 1; i <= TARGET_NSIG; i++) {
-#ifdef CONFIG_GPROF
- if (i == TARGET_SIGPROF) {
+
+ /*
+ * A parent process may configure ignored signals, but all other
+ * signals are default. For any target signals that have no host
+ * mapping, set to ignore. For all core_dump_signal, install our
+ * host signal handler so that we may invoke dump_core_and_abort.
+ * This includes SIGSEGV and SIGBUS, which are also need our signal
+ * handler for paging and exceptions.
+ */
+ for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
+ int hsig = target_to_host_signal(tsig);
+ abi_ptr thand = TARGET_SIG_IGN;
+
+ if (hsig >= _NSIG) {
continue;
}
-#endif
- host_sig = target_to_host_signal(i);
- sigaction(host_sig, NULL, &oact);
- if (oact.sa_sigaction == (void *)SIG_IGN) {
- sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
- } else if (oact.sa_sigaction == (void *)SIG_DFL) {
- sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
+
+ /* As we force remap SIGABRT, cannot probe and install in one step. */
+ if (tsig == TARGET_SIGABRT) {
+ sigaction(SIGABRT, NULL, &oact);
+ sigaction(hsig, &act, NULL);
+ } else {
+ struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
+ sigaction(hsig, iact, &oact);
+ }
+
+ if (oact.sa_sigaction != (void *)SIG_IGN) {
+ thand = TARGET_SIG_DFL;
}
- /* If there's already a handler installed then something has
- gone horribly wrong, so don't even try to handle that case. */
- /* Install some handlers for our own use. We need at least
- SIGSEGV and SIGBUS, to detect exceptions. We can not just
- trap all signals because it affects syscall interrupt
- behavior. But do trap all default-fatal signals. */
- if (fatal_signal (i))
- sigaction(host_sig, &act, NULL);
+ sigact_table[tsig - 1]._sa_handler = thand;
}
}
@@ -640,7 +626,6 @@ void signal_init(void)
void force_sig(int sig)
{
CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
target_siginfo_t info = {};
info.si_signo = sig;
@@ -648,7 +633,7 @@ void force_sig(int sig)
info.si_code = TARGET_SI_KERNEL;
info._sifields._kill._pid = 0;
info._sifields._kill._uid = 0;
- queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
+ queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
}
/*
@@ -658,14 +643,13 @@ void force_sig(int sig)
void force_sig_fault(int sig, int code, abi_ulong addr)
{
CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
target_siginfo_t info = {};
info.si_signo = sig;
info.si_errno = 0;
info.si_code = code;
info._sifields._sigfault._addr = addr;
- queue_signal(env, sig, QEMU_SI_FAULT, &info);
+ queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
}
/* Force a SIGSEGV if we couldn't write to memory trying to set
@@ -683,20 +667,80 @@ void force_sigsegv(int oldsig)
}
force_sig(TARGET_SIGSEGV);
}
-
#endif
+void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type, bool maperr, uintptr_t ra)
+{
+ const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+
+ if (tcg_ops->record_sigsegv) {
+ tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
+ }
+
+ force_sig_fault(TARGET_SIGSEGV,
+ maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
+ addr);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, ra);
+}
+
+void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type, uintptr_t ra)
+{
+ const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+
+ if (tcg_ops->record_sigbus) {
+ tcg_ops->record_sigbus(cpu, addr, access_type, ra);
+ }
+
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, ra);
+}
+
/* abort execution with signal */
-static void QEMU_NORETURN dump_core_and_abort(int target_sig)
+static G_NORETURN
+void die_with_signal(int host_sig)
{
- CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
- TaskState *ts = (TaskState *)cpu->opaque;
+ struct sigaction act = {
+ .sa_handler = SIG_DFL,
+ };
+
+ /*
+ * The proper exit code for dying from an uncaught signal is -<signal>.
+ * The kernel doesn't allow exit() or _exit() to pass a negative value.
+ * To get the proper exit code we need to actually die from an uncaught
+ * signal. Here the default signal handler is installed, we send
+ * the signal and we wait for it to arrive.
+ */
+ sigfillset(&act.sa_mask);
+ sigaction(host_sig, &act, NULL);
+
+ kill(getpid(), host_sig);
+
+ /* Make sure the signal isn't masked (reusing the mask inside of act). */
+ sigdelset(&act.sa_mask, host_sig);
+ sigsuspend(&act.sa_mask);
+
+ /* unreachable */
+ _exit(EXIT_FAILURE);
+}
+
+static G_NORETURN
+void dump_core_and_abort(CPUArchState *env, int target_sig)
+{
+ CPUState *cpu = env_cpu(env);
+ TaskState *ts = get_task_state(cpu);
int host_sig, core_dumped = 0;
- struct sigaction act;
- host_sig = target_to_host_signal(target_sig);
- trace_user_force_sig(env, target_sig, host_sig);
+ /* On exit, undo the remapping of SIGABRT. */
+ if (target_sig == TARGET_SIGABRT) {
+ host_sig = SIGABRT;
+ } else {
+ host_sig = target_to_host_signal(target_sig);
+ }
+ trace_user_dump_core_and_abort(env, target_sig, host_sig);
gdb_signalled(env, target_sig);
/* dump core if supported by target binary format */
@@ -716,37 +760,17 @@ static void QEMU_NORETURN dump_core_and_abort(int target_sig)
target_sig, strsignal(host_sig), "core dumped" );
}
- /* The proper exit code for dying from an uncaught signal is
- * -<signal>. The kernel doesn't allow exit() or _exit() to pass
- * a negative value. To get the proper exit code we need to
- * actually die from an uncaught signal. Here the default signal
- * handler is installed, we send ourself a signal and we wait for
- * it to arrive. */
- sigfillset(&act.sa_mask);
- act.sa_handler = SIG_DFL;
- act.sa_flags = 0;
- sigaction(host_sig, &act, NULL);
-
- /* For some reason raise(host_sig) doesn't send the signal when
- * statically linked on x86-64. */
- kill(getpid(), host_sig);
-
- /* Make sure the signal isn't masked (just reuse the mask inside
- of act) */
- sigdelset(&act.sa_mask, host_sig);
- sigsuspend(&act.sa_mask);
-
- /* unreachable */
- abort();
+ preexit_cleanup(env, 128 + target_sig);
+ die_with_signal(host_sig);
}
/* queue a signal so that it will be send to the virtual CPU as soon
as possible */
-int queue_signal(CPUArchState *env, int sig, int si_type,
- target_siginfo_t *info)
+void queue_signal(CPUArchState *env, int sig, int si_type,
+ target_siginfo_t *info)
{
CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
trace_user_queue_signal(env, sig);
@@ -756,67 +780,254 @@ int queue_signal(CPUArchState *env, int sig, int si_type,
ts->sync_signal.pending = sig;
/* signal that a new signal is pending */
qatomic_set(&ts->signal_pending, 1);
- return 1; /* indicates that the signal was queued */
}
-#ifndef HAVE_SAFE_SYSCALL
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
static inline void rewind_if_in_safe_syscall(void *puc)
{
- /* Default version: never rewind */
+ host_sigcontext *uc = (host_sigcontext *)puc;
+ uintptr_t pcreg = host_signal_pc(uc);
+
+ if (pcreg > (uintptr_t)safe_syscall_start
+ && pcreg < (uintptr_t)safe_syscall_end) {
+ host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
+ }
}
-#endif
-static void host_signal_handler(int host_signum, siginfo_t *info,
- void *puc)
+static G_NORETURN
+void die_from_signal(siginfo_t *info)
{
- CPUArchState *env = thread_cpu->env_ptr;
- CPUState *cpu = env_cpu(env);
- TaskState *ts = cpu->opaque;
+ char sigbuf[4], codebuf[12];
+ const char *sig, *code = NULL;
+
+ switch (info->si_signo) {
+ case SIGSEGV:
+ sig = "SEGV";
+ switch (info->si_code) {
+ case SEGV_MAPERR:
+ code = "MAPERR";
+ break;
+ case SEGV_ACCERR:
+ code = "ACCERR";
+ break;
+ }
+ break;
+ case SIGBUS:
+ sig = "BUS";
+ switch (info->si_code) {
+ case BUS_ADRALN:
+ code = "ADRALN";
+ break;
+ case BUS_ADRERR:
+ code = "ADRERR";
+ break;
+ }
+ break;
+ case SIGILL:
+ sig = "ILL";
+ switch (info->si_code) {
+ case ILL_ILLOPC:
+ code = "ILLOPC";
+ break;
+ case ILL_ILLOPN:
+ code = "ILLOPN";
+ break;
+ case ILL_ILLADR:
+ code = "ILLADR";
+ break;
+ case ILL_PRVOPC:
+ code = "PRVOPC";
+ break;
+ case ILL_PRVREG:
+ code = "PRVREG";
+ break;
+ case ILL_COPROC:
+ code = "COPROC";
+ break;
+ }
+ break;
+ case SIGFPE:
+ sig = "FPE";
+ switch (info->si_code) {
+ case FPE_INTDIV:
+ code = "INTDIV";
+ break;
+ case FPE_INTOVF:
+ code = "INTOVF";
+ break;
+ }
+ break;
+ case SIGTRAP:
+ sig = "TRAP";
+ break;
+ default:
+ snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
+ sig = sigbuf;
+ break;
+ }
+ if (code == NULL) {
+ snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
+ code = codebuf;
+ }
- int sig;
+ error_report("QEMU internal SIG%s {code=%s, addr=%p}",
+ sig, code, info->si_addr);
+ die_with_signal(info->si_signo);
+}
+
+static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
+ host_sigcontext *uc)
+{
+ uintptr_t host_addr = (uintptr_t)info->si_addr;
+ /*
+ * Convert forcefully to guest address space: addresses outside
+ * reserved_va are still valid to report via SEGV_MAPERR.
+ */
+ bool is_valid = h2g_valid(host_addr);
+ abi_ptr guest_addr = h2g_nocheck(host_addr);
+ uintptr_t pc = host_signal_pc(uc);
+ bool is_write = host_signal_write(info, uc);
+ MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
+ bool maperr;
+
+ /* If this was a write to a TB protected page, restart. */
+ if (is_write
+ && is_valid
+ && info->si_code == SEGV_ACCERR
+ && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
+ pc, guest_addr)) {
+ return;
+ }
+
+ /*
+ * If the access was not on behalf of the guest, within the executable
+ * mapping of the generated code buffer, then it is a host bug.
+ */
+ if (access_type != MMU_INST_FETCH
+ && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
+ die_from_signal(info);
+ }
+
+ maperr = true;
+ if (is_valid && info->si_code == SEGV_ACCERR) {
+ /*
+ * With reserved_va, the whole address space is PROT_NONE,
+ * which means that we may get ACCERR when we want MAPERR.
+ */
+ if (page_get_flags(guest_addr) & PAGE_VALID) {
+ maperr = false;
+ } else {
+ info->si_code = SEGV_MAPERR;
+ }
+ }
+
+ sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
+ cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
+}
+
+static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
+ host_sigcontext *uc)
+{
+ uintptr_t pc = host_signal_pc(uc);
+ bool is_write = host_signal_write(info, uc);
+ MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
+
+ /*
+ * If the access was not on behalf of the guest, within the executable
+ * mapping of the generated code buffer, then it is a host bug.
+ */
+ if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
+ die_from_signal(info);
+ }
+
+ if (info->si_code == BUS_ADRALN) {
+ uintptr_t host_addr = (uintptr_t)info->si_addr;
+ abi_ptr guest_addr = h2g_nocheck(host_addr);
+
+ sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
+ cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
+ }
+ return pc;
+}
+
+static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
+{
+ CPUState *cpu = thread_cpu;
+ CPUArchState *env = cpu_env(cpu);
+ TaskState *ts = get_task_state(cpu);
target_siginfo_t tinfo;
- ucontext_t *uc = puc;
+ host_sigcontext *uc = puc;
struct emulated_sigtable *k;
+ int guest_sig;
+ uintptr_t pc = 0;
+ bool sync_sig = false;
+ void *sigmask;
- /* the CPU emulator uses some host signals to detect exceptions,
- we forward to it some signals */
- if ((host_signum == SIGSEGV || host_signum == SIGBUS)
- && info->si_code > 0) {
- if (cpu_signal_handler(host_signum, info, puc))
+ /*
+ * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
+ * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
+ * SIGFPE, SIGTRAP are always host bugs.
+ */
+ if (info->si_code > 0) {
+ switch (host_sig) {
+ case SIGSEGV:
+ /* Only returns on handle_sigsegv_accerr_write success. */
+ host_sigsegv_handler(cpu, info, uc);
return;
+ case SIGBUS:
+ pc = host_sigbus_handler(cpu, info, uc);
+ sync_sig = true;
+ break;
+ case SIGILL:
+ case SIGFPE:
+ case SIGTRAP:
+ die_from_signal(info);
+ }
}
/* get target signal number */
- sig = host_to_target_signal(host_signum);
- if (sig < 1 || sig > TARGET_NSIG)
+ guest_sig = host_to_target_signal(host_sig);
+ if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
return;
- trace_user_host_signal(env, host_signum, sig);
-
- rewind_if_in_safe_syscall(puc);
+ }
+ trace_user_host_signal(env, host_sig, guest_sig);
host_to_target_siginfo_noswap(&tinfo, info);
- k = &ts->sigtab[sig - 1];
+ k = &ts->sigtab[guest_sig - 1];
k->info = tinfo;
- k->pending = sig;
+ k->pending = guest_sig;
ts->signal_pending = 1;
- /* Block host signals until target signal handler entered. We
+ /*
+ * For synchronous signals, unwind the cpu state to the faulting
+ * insn and then exit back to the main loop so that the signal
+ * is delivered immediately.
+ */
+ if (sync_sig) {
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, pc);
+ }
+
+ rewind_if_in_safe_syscall(puc);
+
+ /*
+ * Block host signals until target signal handler entered. We
* can't block SIGSEGV or SIGBUS while we're executing guest
* code in case the guest code provokes one in the window between
* now and it getting out to the main loop. Signals will be
* unblocked again in process_pending_signals().
*
- * WARNING: we cannot use sigfillset() here because the uc_sigmask
+ * WARNING: we cannot use sigfillset() here because the sigmask
* field is a kernel sigset_t, which is much smaller than the
* libc sigset_t which sigfillset() operates on. Using sigfillset()
* would write 0xff bytes off the end of the structure and trash
* data on the struct.
- * We can't use sizeof(uc->uc_sigmask) either, because the libc
- * headers define the struct field with the wrong (too large) type.
*/
- memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
- sigdelset(&uc->uc_sigmask, SIGSEGV);
- sigdelset(&uc->uc_sigmask, SIGBUS);
+ sigmask = host_signal_mask(uc);
+ memset(sigmask, 0xff, SIGSET_T_SIZE);
+ sigdelset(sigmask, SIGSEGV);
+ sigdelset(sigmask, SIGBUS);
/* interrupt the virtual CPU as soon as possible */
cpu_exit(thread_cpu);
@@ -869,7 +1080,6 @@ int do_sigaction(int sig, const struct target_sigaction *act,
struct target_sigaction *oact, abi_ulong ka_restorer)
{
struct target_sigaction *k;
- struct sigaction act1;
int host_sig;
int ret = 0;
@@ -884,7 +1094,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
}
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
k = &sigact_table[sig - 1];
@@ -898,7 +1108,6 @@ int do_sigaction(int sig, const struct target_sigaction *act,
oact->sa_mask = k->sa_mask;
}
if (act) {
- /* FIXME: This is not threadsafe. */
__get_user(k->_sa_handler, &act->_sa_handler);
__get_user(k->sa_flags, &act->sa_flags);
#ifdef TARGET_ARCH_HAS_SA_RESTORER
@@ -930,22 +1139,27 @@ int do_sigaction(int sig, const struct target_sigaction *act,
return 0;
}
if (host_sig != SIGSEGV && host_sig != SIGBUS) {
+ struct sigaction act1;
+
sigfillset(&act1.sa_mask);
act1.sa_flags = SA_SIGINFO;
- if (k->sa_flags & TARGET_SA_RESTART)
- act1.sa_flags |= SA_RESTART;
- /* NOTE: it is important to update the host kernel signal
- ignore state to avoid getting unexpected interrupted
- syscalls */
if (k->_sa_handler == TARGET_SIG_IGN) {
+ /*
+ * It is important to update the host kernel signal ignore
+ * state to avoid getting unexpected interrupted syscalls.
+ */
act1.sa_sigaction = (void *)SIG_IGN;
} else if (k->_sa_handler == TARGET_SIG_DFL) {
- if (fatal_signal (sig))
+ if (core_dump_signal(sig)) {
act1.sa_sigaction = host_signal_handler;
- else
+ } else {
act1.sa_sigaction = (void *)SIG_DFL;
+ }
} else {
act1.sa_sigaction = host_signal_handler;
+ if (k->sa_flags & TARGET_SA_RESTART) {
+ act1.sa_flags |= SA_RESTART;
+ }
}
ret = sigaction(host_sig, &act1, NULL);
}
@@ -959,15 +1173,27 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
CPUState *cpu = env_cpu(cpu_env);
abi_ulong handler;
sigset_t set;
+ target_siginfo_t unswapped;
target_sigset_t target_old_set;
struct target_sigaction *sa;
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */
k->pending = 0;
- sig = gdb_handlesig(cpu, sig);
+ /*
+ * Writes out siginfo values byteswapped, accordingly to the target.
+ * It also cleans the si_type from si_code making it correct for
+ * the target. We must hold on to the original unswapped copy for
+ * strace below, because si_type is still required there.
+ */
+ if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
+ unswapped = k->info;
+ }
+ tswap_siginfo(&k->info, &k->info);
+
+ sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
if (!sig) {
sa = NULL;
handler = TARGET_SIG_IGN;
@@ -977,7 +1203,7 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
}
if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
- print_taken_signal(sig, &k->info);
+ print_taken_signal(sig, &unswapped);
}
if (handler == TARGET_SIG_DFL) {
@@ -988,12 +1214,12 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
sig != TARGET_SIGURG &&
sig != TARGET_SIGWINCH &&
sig != TARGET_SIGCONT) {
- dump_core_and_abort(sig);
+ dump_core_and_abort(cpu_env, sig);
}
} else if (handler == TARGET_SIG_IGN) {
/* ignore sig */
} else if (handler == TARGET_SIG_ERR) {
- dump_core_and_abort(sig);
+ dump_core_and_abort(cpu_env, sig);
} else {
/* compute the blocked signals during the handler execution */
sigset_t *blocked_set;
@@ -1043,12 +1269,11 @@ void process_pending_signals(CPUArchState *cpu_env)
{
CPUState *cpu = env_cpu(cpu_env);
int sig;
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
sigset_t set;
sigset_t *blocked_set;
while (qatomic_read(&ts->signal_pending)) {
- /* FIXME: This is not threadsafe. */
sigfillset(&set);
sigprocmask(SIG_SETMASK, &set, 0);
@@ -1100,3 +1325,26 @@ void process_pending_signals(CPUArchState *cpu_env)
}
ts->in_sigsuspend = 0;
}
+
+int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
+ target_ulong sigsize)
+{
+ TaskState *ts = get_task_state(thread_cpu);
+ sigset_t *host_set = &ts->sigsuspend_mask;
+ target_sigset_t *target_sigset;
+
+ if (sigsize != sizeof(*target_sigset)) {
+ /* Like the kernel, we enforce correct size sigsets */
+ return -TARGET_EINVAL;
+ }
+
+ target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
+ if (!target_sigset) {
+ return -TARGET_EFAULT;
+ }
+ target_to_host_sigset(host_set, target_sigset);
+ unlock_user(target_sigset, sigset, 0);
+
+ *pset = host_set;
+ return 0;
+}
diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c
index ad29b4eb6a..50424a54df 100644
--- a/linux-user/sparc/cpu_loop.c
+++ b/linux-user/sparc/cpu_loop.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
@@ -150,12 +149,72 @@ static void flush_windows(CPUSPARCState *env)
#endif
}
+static void next_instruction(CPUSPARCState *env)
+{
+ env->pc = env->npc;
+ env->npc = env->npc + 4;
+}
+
+static uint32_t do_getcc(CPUSPARCState *env)
+{
+#ifdef TARGET_SPARC64
+ return cpu_get_ccr(env) & 0xf;
+#else
+ return extract32(cpu_get_psr(env), 20, 4);
+#endif
+}
+
+static void do_setcc(CPUSPARCState *env, uint32_t icc)
+{
+#ifdef TARGET_SPARC64
+ cpu_put_ccr(env, (cpu_get_ccr(env) & 0xf0) | (icc & 0xf));
+#else
+ cpu_put_psr(env, deposit32(cpu_get_psr(env), 20, 4, icc));
+#endif
+}
+
+static uint32_t do_getpsr(CPUSPARCState *env)
+{
+#ifdef TARGET_SPARC64
+ const uint64_t TSTATE_CWP = 0x1f;
+ const uint64_t TSTATE_ICC = 0xfull << 32;
+ const uint64_t TSTATE_XCC = 0xfull << 36;
+ const uint32_t PSR_S = 0x00000080u;
+ const uint32_t PSR_V8PLUS = 0xff000000u;
+ uint64_t tstate = sparc64_tstate(env);
+
+ /* See <asm/psrcompat.h>, tstate_to_psr. */
+ return ((tstate & TSTATE_CWP) |
+ PSR_S |
+ ((tstate & TSTATE_ICC) >> 12) |
+ ((tstate & TSTATE_XCC) >> 20) |
+ PSR_V8PLUS);
+#else
+ return (cpu_get_psr(env) & (PSR_ICC | PSR_CWP)) | PSR_S;
+#endif
+}
+
+/* Avoid ifdefs below for the abi32 and abi64 paths. */
+#ifdef TARGET_ABI32
+#define TARGET_TT_SYSCALL (TT_TRAP + 0x10) /* t_linux */
+#else
+#define TARGET_TT_SYSCALL (TT_TRAP + 0x6d) /* tl0_linux64 */
+#endif
+
+/* Avoid ifdefs below for the v9 and pre-v9 hw traps. */
+#ifdef TARGET_SPARC64
+#define TARGET_TT_SPILL TT_SPILL
+#define TARGET_TT_FILL TT_FILL
+#else
+#define TARGET_TT_SPILL TT_WIN_OVF
+#define TARGET_TT_FILL TT_WIN_UNF
+#endif
+
void cpu_loop (CPUSPARCState *env)
{
CPUState *cs = env_cpu(env);
int trapnr;
abi_long ret;
- target_siginfo_t info;
while (1) {
cpu_exec_start(cs);
@@ -163,127 +222,133 @@ void cpu_loop (CPUSPARCState *env)
cpu_exec_end(cs);
process_queued_cpu_work(cs);
- /* Compute PSR before exposing state. */
- if (env->cc_op != CC_OP_FLAGS) {
- cpu_get_psr(env);
- }
-
switch (trapnr) {
-#ifndef TARGET_SPARC64
- case 0x88:
- case 0x90:
-#else
- case 0x110:
- case 0x16d:
-#endif
+ case TARGET_TT_SYSCALL:
ret = do_syscall (env, env->gregs[1],
env->regwptr[0], env->regwptr[1],
env->regwptr[2], env->regwptr[3],
env->regwptr[4], env->regwptr[5],
0, 0);
- if (ret == -TARGET_ERESTARTSYS || ret == -TARGET_QEMU_ESIGRETURN) {
+ if (ret == -QEMU_ERESTARTSYS || ret == -QEMU_ESIGRETURN) {
break;
}
if ((abi_ulong)ret >= (abi_ulong)(-515)) {
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
- env->xcc |= PSR_CARRY;
-#else
- env->psr |= PSR_CARRY;
-#endif
+ set_syscall_C(env, 1);
ret = -ret;
} else {
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
- env->xcc &= ~PSR_CARRY;
-#else
- env->psr &= ~PSR_CARRY;
-#endif
+ set_syscall_C(env, 0);
}
env->regwptr[0] = ret;
/* next instruction */
env->pc = env->npc;
env->npc = env->npc + 4;
break;
- case 0x83: /* flush windows */
-#ifdef TARGET_ABI32
- case 0x103:
-#endif
- flush_windows(env);
- /* next instruction */
- env->pc = env->npc;
- env->npc = env->npc + 4;
- break;
-#ifndef TARGET_SPARC64
- case TT_WIN_OVF: /* window overflow */
- save_window(env);
+
+ case TT_TRAP + 0x01: /* breakpoint */
+ case EXCP_DEBUG:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
- case TT_WIN_UNF: /* window underflow */
- restore_window(env);
+
+ case TT_TRAP + 0x02: /* div0 */
+ case TT_DIV_ZERO:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->pc);
break;
- case TT_TFAULT:
- case TT_DFAULT:
- {
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->mmuregs[4];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
+
+ case TT_TRAP + 0x03: /* flush windows */
+ flush_windows(env);
+ next_instruction(env);
break;
-#else
- case TT_SPILL: /* window overflow */
- save_window(env);
+
+ case TT_TRAP + 0x20: /* getcc */
+ env->gregs[1] = do_getcc(env);
+ next_instruction(env);
break;
- case TT_FILL: /* window underflow */
- restore_window(env);
+ case TT_TRAP + 0x21: /* setcc */
+ do_setcc(env, env->gregs[1]);
+ next_instruction(env);
break;
- case TT_TFAULT:
- case TT_DFAULT:
- {
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- if (trapnr == TT_DFAULT)
- info._sifields._sigfault._addr = env->dmmu.mmuregs[4];
- else
- info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
+ case TT_TRAP + 0x22: /* getpsr */
+ env->gregs[1] = do_getpsr(env);
+ next_instruction(env);
break;
-#ifndef TARGET_ABI32
- case 0x16e:
+
+#ifdef TARGET_SPARC64
+ case TT_TRAP + 0x6e:
flush_windows(env);
sparc64_get_context(env);
break;
- case 0x16f:
+ case TT_TRAP + 0x6f:
flush_windows(env);
sparc64_set_context(env);
break;
#endif
-#endif
+
+ case TARGET_TT_SPILL: /* window overflow */
+ save_window(env);
+ break;
+ case TARGET_TT_FILL: /* window underflow */
+ restore_window(env);
+ break;
+
+ case TT_FP_EXCP:
+ {
+ int code = TARGET_FPE_FLTUNK;
+ target_ulong fsr = cpu_get_fsr(env);
+
+ if ((fsr & FSR_FTT_MASK) == FSR_FTT_IEEE_EXCP) {
+ if (fsr & FSR_NVC) {
+ code = TARGET_FPE_FLTINV;
+ } else if (fsr & FSR_OFC) {
+ code = TARGET_FPE_FLTOVF;
+ } else if (fsr & FSR_UFC) {
+ code = TARGET_FPE_FLTUND;
+ } else if (fsr & FSR_DZC) {
+ code = TARGET_FPE_FLTDIV;
+ } else if (fsr & FSR_NXC) {
+ code = TARGET_FPE_FLTRES;
+ }
+ }
+ force_sig_fault(TARGET_SIGFPE, code, env->pc);
+ }
+ break;
+
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
case TT_ILL_INSN:
- {
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPC;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc);
break;
- case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ case TT_PRIV_INSN:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc);
+ break;
+ case TT_TOVF:
+ force_sig_fault(TARGET_SIGEMT, TARGET_EMT_TAGOVF, env->pc);
break;
+#ifdef TARGET_SPARC64
+ case TT_PRIV_ACT:
+ /* Note do_privact defers to do_privop. */
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc);
+ break;
+#else
+ case TT_NCP_INSN:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_COPROC, env->pc);
+ break;
+ case TT_UNIMP_FLUSH:
+ next_instruction(env);
+ break;
+#endif
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
default:
+ /*
+ * Most software trap numbers vector to BAD_TRAP.
+ * Handle anything not explicitly matched above.
+ */
+ if (trapnr >= TT_TRAP && trapnr <= TT_TRAP + 0x7f) {
+ force_sig_fault(TARGET_SIGILL, ILL_ILLTRP, env->pc);
+ break;
+ }
fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr);
cpu_dump_state(cs, stderr, 0);
exit(EXIT_FAILURE);
diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c
index 3bc023d281..f164b74032 100644
--- a/linux-user/sparc/signal.c
+++ b/linux-user/sparc/signal.c
@@ -164,7 +164,7 @@ static void restore_pt_regs(struct target_pt_regs *regs, CPUSPARCState *env)
*/
uint32_t psr;
__get_user(psr, &regs->psr);
- env->psr = (psr & PSR_ICC) | (env->psr & ~PSR_ICC);
+ cpu_put_psr_icc(env, psr);
#endif
/* Note that pc and npc are handled in the caller. */
@@ -199,20 +199,21 @@ static void save_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env)
for (i = 0; i < 32; ++i) {
__put_user(env->fpr[i].ll, &fpu->si_double_regs[i]);
}
- __put_user(env->fsr, &fpu->si_fsr);
+ __put_user(cpu_get_fsr(env), &fpu->si_fsr);
__put_user(env->gsr, &fpu->si_gsr);
__put_user(env->fprs, &fpu->si_fprs);
#else
for (i = 0; i < 16; ++i) {
__put_user(env->fpr[i].ll, &fpu->si_double_regs[i]);
}
- __put_user(env->fsr, &fpu->si_fsr);
+ __put_user(cpu_get_fsr(env), &fpu->si_fsr);
__put_user(0, &fpu->si_fpqdepth);
#endif
}
static void restore_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env)
{
+ target_ulong fsr;
int i;
#ifdef TARGET_SPARC64
@@ -230,18 +231,25 @@ static void restore_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env)
__get_user(env->fpr[i].ll, &fpu->si_double_regs[i]);
}
}
- __get_user(env->fsr, &fpu->si_fsr);
__get_user(env->gsr, &fpu->si_gsr);
env->fprs |= fprs;
#else
for (i = 0; i < 16; ++i) {
__get_user(env->fpr[i].ll, &fpu->si_double_regs[i]);
}
- __get_user(env->fsr, &fpu->si_fsr);
#endif
+
+ __get_user(fsr, &fpu->si_fsr);
+ cpu_put_fsr(env, fsr);
}
#ifdef TARGET_ARCH_HAS_SETUP_FRAME
+static void install_sigtramp(uint32_t *tramp, int syscall)
+{
+ __put_user(0x82102000u + syscall, &tramp[0]); /* mov syscall, %g1 */
+ __put_user(0x91d02010u, &tramp[1]); /* t 0x10 */
+}
+
void setup_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUSPARCState *env)
{
@@ -291,13 +299,9 @@ void setup_frame(int sig, struct target_sigaction *ka,
if (ka->ka_restorer) {
env->regwptr[WREG_O7] = ka->ka_restorer;
} else {
- env->regwptr[WREG_O7] = sf_addr +
- offsetof(struct target_signal_frame, insns) - 2 * 4;
-
- /* mov __NR_sigreturn, %g1 */
- __put_user(0x821020d8u, &sf->insns[0]);
- /* t 0x10 */
- __put_user(0x91d02010u, &sf->insns[1]);
+ /* Not used, but retain for ABI compatibility. */
+ install_sigtramp(sf->insns, TARGET_NR_sigreturn);
+ env->regwptr[WREG_O7] = default_sigreturn;
}
unlock_user(sf, sf_addr, sf_size);
}
@@ -329,7 +333,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
__put_user(0, &sf->rwin_save); /* TODO: save_rwin_state */
- tswap_siginfo(&sf->info, info);
+ sf->info = *info;
tswap_sigset(&sf->mask, set);
target_save_altstack(&sf->stack, env);
@@ -358,13 +362,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
if (ka->ka_restorer) {
env->regwptr[WREG_O7] = ka->ka_restorer;
} else {
- env->regwptr[WREG_O7] =
- sf_addr + offsetof(struct target_rt_signal_frame, insns) - 2 * 4;
-
- /* mov __NR_rt_sigreturn, %g1 */
- __put_user(0x82102065u, &sf->insns[0]);
- /* t 0x10 */
- __put_user(0x91d02010u, &sf->insns[1]);
+ /* Not used, but retain for ABI compatibility. */
+ install_sigtramp(sf->insns, TARGET_NR_rt_sigreturn);
+ env->regwptr[WREG_O7] = default_rt_sigreturn;
}
#else
env->regwptr[WREG_O7] = ka->ka_restorer;
@@ -433,12 +433,12 @@ long do_sigreturn(CPUSPARCState *env)
set_sigmask(&host_set);
unlock_user_struct(sf, sf_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
segv_and_exit:
unlock_user_struct(sf, sf_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
#else
return -TARGET_ENOSYS;
#endif
@@ -497,15 +497,31 @@ long do_rt_sigreturn(CPUSPARCState *env)
env->npc = tnpc;
unlock_user_struct(sf, sf_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
segv_and_exit:
unlock_user_struct(sf, sf_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
}
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+#ifdef TARGET_ABI32
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ install_sigtramp(tramp, TARGET_NR_sigreturn);
+
+ default_rt_sigreturn = sigtramp_page + 8;
+ install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn);
+
+ unlock_user(tramp, sigtramp_page, 2 * 8);
+}
+#endif
+
+#ifdef TARGET_SPARC64
#define SPARC_MC_TSTATE 0
#define SPARC_MC_PC 1
#define SPARC_MC_NPC 2
@@ -577,7 +593,7 @@ void sparc64_set_context(CPUSPARCState *env)
struct target_ucontext *ucp;
target_mc_gregset_t *grp;
target_mc_fpu_t *fpup;
- abi_ulong pc, npc, tstate;
+ target_ulong pc, npc, tstate;
unsigned int i;
unsigned char fenab;
@@ -648,6 +664,7 @@ void sparc64_set_context(CPUSPARCState *env)
__get_user(fenab, &(fpup->mcfpu_enab));
if (fenab) {
abi_ulong fprs;
+ abi_ulong fsr;
/*
* We use the FPRS from the guest only in deciding whether
@@ -676,7 +693,8 @@ void sparc64_set_context(CPUSPARCState *env)
__get_user(env->fpr[i].ll, &(fpup->mcfpu_fregs.dregs[i]));
}
}
- __get_user(env->fsr, &(fpup->mcfpu_fsr));
+ __get_user(fsr, &(fpup->mcfpu_fsr));
+ cpu_put_fsr(env, fsr);
__get_user(env->gsr, &(fpup->mcfpu_gsr));
}
unlock_user_struct(ucp, ucp_addr, 0);
@@ -775,4 +793,4 @@ do_sigsegv:
unlock_user_struct(ucp, ucp_addr, 1);
force_sig(TARGET_SIGSEGV);
}
-#endif
+#endif /* TARGET_SPARC64 */
diff --git a/linux-user/sparc/target_cpu.h b/linux-user/sparc/target_cpu.h
index 1f4bed50f4..5f62c5eb75 100644
--- a/linux-user/sparc/target_cpu.h
+++ b/linux-user/sparc/target_cpu.h
@@ -26,6 +26,17 @@
# define TARGET_STACK_BIAS 0
#endif
+static void set_syscall_C(CPUSPARCState *env, bool val)
+{
+#ifndef TARGET_SPARC64
+ env->icc_C = val;
+#elif defined(TARGET_ABI32)
+ env->icc_C = (uint64_t)val << 32;
+#else
+ env->xcc_C = val;
+#endif
+}
+
static inline void cpu_clone_regs_child(CPUSPARCState *env, target_ulong newsp,
unsigned flags)
{
@@ -58,11 +69,7 @@ static inline void cpu_clone_regs_child(CPUSPARCState *env, target_ulong newsp,
* do the pc advance twice.
*/
env->regwptr[WREG_O0] = 0;
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
- env->xcc &= ~PSR_CARRY;
-#else
- env->psr &= ~PSR_CARRY;
-#endif
+ set_syscall_C(env, 0);
env->pc = env->npc;
env->npc = env->npc + 4;
}
diff --git a/linux-user/sparc/target_mman.h b/linux-user/sparc/target_mman.h
new file mode 100644
index 0000000000..696ca73fe4
--- /dev/null
+++ b/linux-user/sparc/target_mman.h
@@ -0,0 +1,35 @@
+#ifndef SPARC_TARGET_MMAN_H
+#define SPARC_TARGET_MMAN_H
+
+#define TARGET_MAP_NORESERVE 0x40
+#define TARGET_MAP_LOCKED 0x100
+#define TARGET_MAP_GROWSDOWN 0x0200
+
+/*
+ * arch/sparc/include/asm/page_64.h:
+ * TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
+ * _AC(0x0000000070000000,UL) : \
+ * VA_EXCLUDE_END)
+ * But VA_EXCLUDE_END is > 0xffff800000000000UL which doesn't work
+ * in userland emulation.
+ */
+#ifdef TARGET_ABI32
+#define TASK_UNMAPPED_BASE 0x70000000
+#else
+#define TASK_UNMAPPED_BASE (1ull << (TARGET_VIRT_ADDR_SPACE_BITS - 2))
+#endif
+
+/*
+ * arch/sparc/include/asm/elf_64.h
+ * Except that COMPAT_ELF_ET_DYN_BASE exactly matches TASK_UNMAPPED_BASE,
+ * so move it up a bit.
+ */
+#ifdef TARGET_ABI32
+#define ELF_ET_DYN_BASE 0x78000000
+#else
+#define ELF_ET_DYN_BASE 0x0000010000000000ull
+#endif
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/sparc/target_prctl.h b/linux-user/sparc/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/sparc/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/sparc/target_proc.h b/linux-user/sparc/target_proc.h
new file mode 100644
index 0000000000..3bb3134a47
--- /dev/null
+++ b/linux-user/sparc/target_proc.h
@@ -0,0 +1,16 @@
+/*
+ * Sparc specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef SPARC_TARGET_PROC_H
+#define SPARC_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ dprintf(fd, "type\t\t: sun4u\n");
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* SPARC_TARGET_PROC_H */
diff --git a/linux-user/sparc/target_resource.h b/linux-user/sparc/target_resource.h
new file mode 100644
index 0000000000..d9a2fb814a
--- /dev/null
+++ b/linux-user/sparc/target_resource.h
@@ -0,0 +1,17 @@
+#ifndef SPARC_TARGET_RESOURCE_H
+#define SPARC_TARGET_RESOURCE_H
+
+#include "../generic/target_resource.h"
+
+#if TARGET_ABI_BITS == 32
+#undef TARGET_RLIM_INFINITY
+#define TARGET_RLIM_INFINITY 0x7fffffffUL
+#endif
+
+#undef TARGET_RLIMIT_NOFILE
+#define TARGET_RLIMIT_NOFILE 6
+
+#undef TARGET_RLIMIT_NPROC
+#define TARGET_RLIMIT_NPROC 7
+
+#endif
diff --git a/linux-user/sparc/target_signal.h b/linux-user/sparc/target_signal.h
index 34f9a12519..f223eb4af6 100644
--- a/linux-user/sparc/target_signal.h
+++ b/linux-user/sparc/target_signal.h
@@ -8,7 +8,7 @@
#define TARGET_SIGTRAP 5
#define TARGET_SIGABRT 6
#define TARGET_SIGIOT 6
-#define TARGET_SIGSTKFLT 7 /* actually EMT */
+#define TARGET_SIGEMT 7
#define TARGET_SIGFPE 8
#define TARGET_SIGKILL 9
#define TARGET_SIGBUS 10
@@ -65,10 +65,13 @@ typedef struct target_sigaltstack {
#define TARGET_ARCH_HAS_KA_RESTORER 1
#define TARGET_MINSIGSTKSZ 4096
-#define TARGET_SIGSTKSZ 16384
#ifdef TARGET_ABI32
#define TARGET_ARCH_HAS_SETUP_FRAME
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+#else
+/* For sparc64, use of KA_RESTORER is mandatory. */
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
#endif
/* bit-flags */
diff --git a/linux-user/sparc/target_syscall.h b/linux-user/sparc/target_syscall.h
index 087b39d39c..e421165357 100644
--- a/linux-user/sparc/target_syscall.h
+++ b/linux-user/sparc/target_syscall.h
@@ -34,7 +34,6 @@ struct target_pt_regs {
* and copy_thread().
*/
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 4096
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
@@ -51,11 +50,7 @@ static inline abi_ulong target_shmlba(CPUSPARCState *env)
#ifdef TARGET_SPARC64
return MAX(TARGET_PAGE_SIZE, 16 * 1024);
#else
- if (!(env->def.features & CPU_FEATURE_FLUSH)) {
- return 64 * 1024;
- } else {
- return 256 * 1024;
- }
+ return 256 * 1024;
#endif
}
diff --git a/linux-user/strace.c b/linux-user/strace.c
index 2cdbf030ba..b4d1098170 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -17,15 +17,17 @@
#include "qemu.h"
#include "user-internals.h"
#include "strace.h"
+#include "signal-common.h"
+#include "target_mman.h"
struct syscallname {
int nr;
const char *name;
const char *format;
- void (*call)(void *, const struct syscallname *,
+ void (*call)(CPUArchState *, const struct syscallname *,
abi_long, abi_long, abi_long,
abi_long, abi_long, abi_long);
- void (*result)(void *, const struct syscallname *, abi_long,
+ void (*result)(CPUArchState *, const struct syscallname *, abi_long,
abi_long, abi_long, abi_long,
abi_long, abi_long, abi_long);
};
@@ -44,15 +46,21 @@ struct syscallname {
*/
struct flags {
abi_long f_value; /* flag */
+ abi_long f_mask; /* mask */
const char *f_string; /* stringified flag */
};
+/* No 'struct flags' element should have a zero mask. */
+#define FLAG_BASIC(V, M, N) { V, M | QEMU_BUILD_BUG_ON_ZERO(!(M)), N }
+
/* common flags for all architectures */
-#define FLAG_GENERIC(name) { name, #name }
+#define FLAG_GENERIC_MASK(V, M) FLAG_BASIC(V, M, #V)
+#define FLAG_GENERIC(V) FLAG_BASIC(V, V, #V)
/* target specific flags (syscall_defs.h has TARGET_<flag>) */
-#define FLAG_TARGET(name) { TARGET_ ## name, #name }
+#define FLAG_TARGET_MASK(V, M) FLAG_BASIC(TARGET_##V, TARGET_##M, #V)
+#define FLAG_TARGET(V) FLAG_BASIC(TARGET_##V, TARGET_##V, #V)
/* end of flags array */
-#define FLAG_END { 0, NULL }
+#define FLAG_END { 0, 0, NULL }
/* Structure used to translate enumerated values into strings */
struct enums {
@@ -79,8 +87,10 @@ UNUSED static void print_syscall_epilogue(const struct syscallname *);
UNUSED static void print_string(abi_long, int);
UNUSED static void print_buf(abi_long addr, abi_long len, int last);
UNUSED static void print_raw_param(const char *, abi_long, int);
+UNUSED static void print_raw_param64(const char *, long long, int last);
UNUSED static void print_timeval(abi_ulong, int);
UNUSED static void print_timespec(abi_ulong, int);
+UNUSED static void print_timespec64(abi_ulong, int);
UNUSED static void print_timezone(abi_ulong, int);
UNUSED static void print_itimerval(abi_ulong, int);
UNUSED static void print_number(abi_long, int);
@@ -141,30 +151,21 @@ if( cmd == val ) { \
qemu_log("%d", cmd);
}
+static const char * const target_signal_name[] = {
+#define MAKE_SIG_ENTRY(sig) [TARGET_##sig] = #sig,
+ MAKE_SIGNAL_LIST
+#undef MAKE_SIG_ENTRY
+};
+
static void
print_signal(abi_ulong arg, int last)
{
const char *signal_name = NULL;
- switch(arg) {
- case TARGET_SIGHUP: signal_name = "SIGHUP"; break;
- case TARGET_SIGINT: signal_name = "SIGINT"; break;
- case TARGET_SIGQUIT: signal_name = "SIGQUIT"; break;
- case TARGET_SIGILL: signal_name = "SIGILL"; break;
- case TARGET_SIGABRT: signal_name = "SIGABRT"; break;
- case TARGET_SIGFPE: signal_name = "SIGFPE"; break;
- case TARGET_SIGKILL: signal_name = "SIGKILL"; break;
- case TARGET_SIGSEGV: signal_name = "SIGSEGV"; break;
- case TARGET_SIGPIPE: signal_name = "SIGPIPE"; break;
- case TARGET_SIGALRM: signal_name = "SIGALRM"; break;
- case TARGET_SIGTERM: signal_name = "SIGTERM"; break;
- case TARGET_SIGUSR1: signal_name = "SIGUSR1"; break;
- case TARGET_SIGUSR2: signal_name = "SIGUSR2"; break;
- case TARGET_SIGCHLD: signal_name = "SIGCHLD"; break;
- case TARGET_SIGCONT: signal_name = "SIGCONT"; break;
- case TARGET_SIGSTOP: signal_name = "SIGSTOP"; break;
- case TARGET_SIGTTIN: signal_name = "SIGTTIN"; break;
- case TARGET_SIGTTOU: signal_name = "SIGTTOU"; break;
+
+ if (arg < ARRAY_SIZE(target_signal_name)) {
+ signal_name = target_signal_name[arg];
}
+
if (signal_name == NULL) {
print_raw_param("%ld", arg, last);
return;
@@ -366,7 +367,6 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last)
switch (sa_family) {
case AF_UNIX: {
struct target_sockaddr_un *un = (struct target_sockaddr_un *)sa;
- int i;
qemu_log("{sun_family=AF_UNIX,sun_path=\"");
for (i = 0; i < addrlen -
offsetof(struct target_sockaddr_un, sun_path) &&
@@ -512,21 +512,69 @@ print_socket_protocol(int domain, int type, int protocol)
case NETLINK_ROUTE:
qemu_log("NETLINK_ROUTE");
break;
+ case NETLINK_UNUSED:
+ qemu_log("NETLINK_UNUSED");
+ break;
+ case NETLINK_USERSOCK:
+ qemu_log("NETLINK_USERSOCK");
+ break;
+ case NETLINK_FIREWALL:
+ qemu_log("NETLINK_FIREWALL");
+ break;
+ case NETLINK_SOCK_DIAG:
+ qemu_log("NETLINK_SOCK_DIAG");
+ break;
+ case NETLINK_NFLOG:
+ qemu_log("NETLINK_NFLOG");
+ break;
+ case NETLINK_XFRM:
+ qemu_log("NETLINK_XFRM");
+ break;
+ case NETLINK_SELINUX:
+ qemu_log("NETLINK_SELINUX");
+ break;
+ case NETLINK_ISCSI:
+ qemu_log("NETLINK_ISCSI");
+ break;
case NETLINK_AUDIT:
qemu_log("NETLINK_AUDIT");
break;
+ case NETLINK_FIB_LOOKUP:
+ qemu_log("NETLINK_FIB_LOOKUP");
+ break;
+ case NETLINK_CONNECTOR:
+ qemu_log("NETLINK_CONNECTOR");
+ break;
case NETLINK_NETFILTER:
qemu_log("NETLINK_NETFILTER");
break;
+ case NETLINK_IP6_FW:
+ qemu_log("NETLINK_IP6_FW");
+ break;
+ case NETLINK_DNRTMSG:
+ qemu_log("NETLINK_DNRTMSG");
+ break;
case NETLINK_KOBJECT_UEVENT:
qemu_log("NETLINK_KOBJECT_UEVENT");
break;
+ case NETLINK_GENERIC:
+ qemu_log("NETLINK_GENERIC");
+ break;
+ case NETLINK_SCSITRANSPORT:
+ qemu_log("NETLINK_SCSITRANSPORT");
+ break;
+ case NETLINK_ECRYPTFS:
+ qemu_log("NETLINK_ECRYPTFS");
+ break;
case NETLINK_RDMA:
qemu_log("NETLINK_RDMA");
break;
case NETLINK_CRYPTO:
qemu_log("NETLINK_CRYPTO");
break;
+ case NETLINK_SMC:
+ qemu_log("NETLINK_SMC");
+ break;
default:
qemu_log("%d", protocol);
break;
@@ -593,7 +641,7 @@ print_fdset(int n, abi_ulong target_fds_addr)
/* select */
#ifdef TARGET_NR__newselect
static void
-print_newselect(void *cpu_env, const struct syscallname *name,
+print_newselect(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6)
{
@@ -609,9 +657,8 @@ print_newselect(void *cpu_env, const struct syscallname *name,
}
#endif
-#ifdef TARGET_NR_semctl
static void
-print_semctl(void *cpu_env, const struct syscallname *name,
+print_semctl(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6)
{
@@ -620,52 +667,41 @@ print_semctl(void *cpu_env, const struct syscallname *name,
print_ipc_cmd(arg3);
qemu_log(",0x" TARGET_ABI_FMT_lx ")", arg4);
}
-#endif
static void
-print_execve(void *cpu_env, const struct syscallname *name,
- abi_long arg1, abi_long arg2, abi_long arg3,
- abi_long arg4, abi_long arg5, abi_long arg6)
+print_shmat(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
{
- abi_ulong arg_ptr_addr;
- char *s;
-
- if (!(s = lock_user_string(arg1)))
- return;
- qemu_log("%s(\"%s\",{", name->name, s);
- unlock_user(s, arg1, 0);
-
- for (arg_ptr_addr = arg2; ; arg_ptr_addr += sizeof(abi_ulong)) {
- abi_ulong *arg_ptr, arg_addr;
-
- arg_ptr = lock_user(VERIFY_READ, arg_ptr_addr, sizeof(abi_ulong), 1);
- if (!arg_ptr)
- return;
- arg_addr = tswapal(*arg_ptr);
- unlock_user(arg_ptr, arg_ptr_addr, 0);
- if (!arg_addr)
- break;
- if ((s = lock_user_string(arg_addr))) {
- qemu_log("\"%s\",", s);
- unlock_user(s, arg_addr, 0);
- }
- }
+ static const struct flags shmat_flags[] = {
+ FLAG_GENERIC(SHM_RND),
+ FLAG_GENERIC(SHM_REMAP),
+ FLAG_GENERIC(SHM_RDONLY),
+ FLAG_GENERIC(SHM_EXEC),
+ FLAG_END
+ };
- qemu_log("NULL})");
+ print_syscall_prologue(name);
+ print_raw_param(TARGET_ABI_FMT_ld, arg0, 0);
+ print_pointer(arg1, 0);
+ print_flags(shmat_flags, arg2, 1);
+ print_syscall_epilogue(name);
}
#ifdef TARGET_NR_ipc
static void
-print_ipc(void *cpu_env, const struct syscallname *name,
+print_ipc(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6)
{
switch(arg1) {
case IPCOP_semctl:
- qemu_log("semctl(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ",",
- arg1, arg2);
- print_ipc_cmd(arg3);
- qemu_log(",0x" TARGET_ABI_FMT_lx ")", arg4);
+ print_semctl(cpu_env, &(const struct syscallname){ .name = "semctl" },
+ arg2, arg3, arg4, arg5, 0, 0);
+ break;
+ case IPCOP_shmat:
+ print_shmat(cpu_env, &(const struct syscallname){ .name = "shmat" },
+ arg2, arg5, arg3, 0, 0, 0);
break;
default:
qemu_log(("%s("
@@ -689,7 +725,7 @@ print_syscall_err(abi_long ret)
const char *errstr;
qemu_log(" = ");
- if (ret < 0) {
+ if (is_error(ret)) {
errstr = target_strerror(-ret);
if (errstr) {
qemu_log("-1 errno=%d (%s)", (int)-ret, errstr);
@@ -700,7 +736,7 @@ print_syscall_err(abi_long ret)
}
static void
-print_syscall_ret_addr(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_addr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -721,7 +757,7 @@ print_syscall_ret_raw(struct syscallname *name, abi_long ret)
#ifdef TARGET_NR__newselect
static void
-print_syscall_ret_newselect(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_newselect(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -751,7 +787,7 @@ print_syscall_ret_newselect(void *cpu_env, const struct syscallname *name,
#define TARGET_TIME_ERROR 5 /* clock not synchronized */
#ifdef TARGET_NR_adjtimex
static void
-print_syscall_ret_adjtimex(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_adjtimex(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -786,7 +822,7 @@ print_syscall_ret_adjtimex(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_clock_gettime) || defined(TARGET_NR_clock_getres)
static void
-print_syscall_ret_clock_gettime(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_clock_gettime(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -803,9 +839,27 @@ print_syscall_ret_clock_gettime(void *cpu_env, const struct syscallname *name,
#define print_syscall_ret_clock_getres print_syscall_ret_clock_gettime
#endif
+#if defined(TARGET_NR_clock_gettime64)
+static void
+print_syscall_ret_clock_gettime64(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long ret, abi_long arg0, abi_long arg1,
+ abi_long arg2, abi_long arg3, abi_long arg4,
+ abi_long arg5)
+{
+ if (!print_syscall_err(ret)) {
+ qemu_log(TARGET_ABI_FMT_ld, ret);
+ qemu_log(" (");
+ print_timespec64(arg1, 1);
+ qemu_log(")");
+ }
+
+ qemu_log("\n");
+}
+#endif
+
#ifdef TARGET_NR_gettimeofday
static void
-print_syscall_ret_gettimeofday(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_gettimeofday(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -824,7 +878,7 @@ print_syscall_ret_gettimeofday(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_getitimer
static void
-print_syscall_ret_getitimer(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_getitimer(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -843,7 +897,7 @@ print_syscall_ret_getitimer(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_getitimer
static void
-print_syscall_ret_setitimer(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_setitimer(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -862,7 +916,7 @@ print_syscall_ret_setitimer(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_listxattr) || defined(TARGET_NR_llistxattr) \
|| defined(TARGGET_NR_flistxattr)
static void
-print_syscall_ret_listxattr(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_listxattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -894,7 +948,7 @@ print_syscall_ret_listxattr(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_ioctl
static void
-print_syscall_ret_ioctl(void *cpu_env, const struct syscallname *name,
+print_syscall_ret_ioctl(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
@@ -933,15 +987,15 @@ print_syscall_ret_ioctl(void *cpu_env, const struct syscallname *name,
}
#endif
-UNUSED static struct flags access_flags[] = {
- FLAG_GENERIC(F_OK),
+UNUSED static const struct flags access_flags[] = {
+ FLAG_GENERIC_MASK(F_OK, R_OK | W_OK | X_OK),
FLAG_GENERIC(R_OK),
FLAG_GENERIC(W_OK),
FLAG_GENERIC(X_OK),
FLAG_END,
};
-UNUSED static struct flags at_file_flags[] = {
+UNUSED static const struct flags at_file_flags[] = {
#ifdef AT_EACCESS
FLAG_GENERIC(AT_EACCESS),
#endif
@@ -951,14 +1005,14 @@ UNUSED static struct flags at_file_flags[] = {
FLAG_END,
};
-UNUSED static struct flags unlinkat_flags[] = {
+UNUSED static const struct flags unlinkat_flags[] = {
#ifdef AT_REMOVEDIR
FLAG_GENERIC(AT_REMOVEDIR),
#endif
FLAG_END,
};
-UNUSED static struct flags mode_flags[] = {
+UNUSED static const struct flags mode_flags[] = {
FLAG_GENERIC(S_IFSOCK),
FLAG_GENERIC(S_IFLNK),
FLAG_GENERIC(S_IFREG),
@@ -969,19 +1023,21 @@ UNUSED static struct flags mode_flags[] = {
FLAG_END,
};
-UNUSED static struct flags open_access_flags[] = {
- FLAG_TARGET(O_RDONLY),
- FLAG_TARGET(O_WRONLY),
- FLAG_TARGET(O_RDWR),
+UNUSED static const struct flags open_access_flags[] = {
+ FLAG_TARGET_MASK(O_RDONLY, O_ACCMODE),
+ FLAG_TARGET_MASK(O_WRONLY, O_ACCMODE),
+ FLAG_TARGET_MASK(O_RDWR, O_ACCMODE),
FLAG_END,
};
-UNUSED static struct flags open_flags[] = {
+UNUSED static const struct flags open_flags[] = {
FLAG_TARGET(O_APPEND),
FLAG_TARGET(O_CREAT),
FLAG_TARGET(O_DIRECTORY),
FLAG_TARGET(O_EXCL),
+#if TARGET_O_LARGEFILE != 0
FLAG_TARGET(O_LARGEFILE),
+#endif
FLAG_TARGET(O_NOCTTY),
FLAG_TARGET(O_NOFOLLOW),
FLAG_TARGET(O_NONBLOCK), /* also O_NDELAY */
@@ -1007,7 +1063,7 @@ UNUSED static struct flags open_flags[] = {
FLAG_END,
};
-UNUSED static struct flags mount_flags[] = {
+UNUSED static const struct flags mount_flags[] = {
#ifdef MS_BIND
FLAG_GENERIC(MS_BIND),
#endif
@@ -1032,7 +1088,7 @@ UNUSED static struct flags mount_flags[] = {
FLAG_END,
};
-UNUSED static struct flags umount2_flags[] = {
+UNUSED static const struct flags umount2_flags[] = {
#ifdef MNT_FORCE
FLAG_GENERIC(MNT_FORCE),
#endif
@@ -1045,8 +1101,8 @@ UNUSED static struct flags umount2_flags[] = {
FLAG_END,
};
-UNUSED static struct flags mmap_prot_flags[] = {
- FLAG_GENERIC(PROT_NONE),
+UNUSED static const struct flags mmap_prot_flags[] = {
+ FLAG_GENERIC_MASK(PROT_NONE, PROT_READ | PROT_WRITE | PROT_EXEC),
FLAG_GENERIC(PROT_EXEC),
FLAG_GENERIC(PROT_READ),
FLAG_GENERIC(PROT_WRITE),
@@ -1056,35 +1112,39 @@ UNUSED static struct flags mmap_prot_flags[] = {
FLAG_END,
};
-UNUSED static struct flags mmap_flags[] = {
- FLAG_TARGET(MAP_SHARED),
- FLAG_TARGET(MAP_PRIVATE),
+UNUSED static const struct flags mmap_flags[] = {
+ FLAG_TARGET_MASK(MAP_SHARED, MAP_TYPE),
+ FLAG_TARGET_MASK(MAP_PRIVATE, MAP_TYPE),
+ FLAG_TARGET_MASK(MAP_SHARED_VALIDATE, MAP_TYPE),
FLAG_TARGET(MAP_ANONYMOUS),
FLAG_TARGET(MAP_DENYWRITE),
+ FLAG_TARGET(MAP_EXECUTABLE),
FLAG_TARGET(MAP_FIXED),
+ FLAG_TARGET(MAP_FIXED_NOREPLACE),
FLAG_TARGET(MAP_GROWSDOWN),
- FLAG_TARGET(MAP_EXECUTABLE),
-#ifdef MAP_LOCKED
+ FLAG_TARGET(MAP_HUGETLB),
FLAG_TARGET(MAP_LOCKED),
-#endif
-#ifdef MAP_NONBLOCK
FLAG_TARGET(MAP_NONBLOCK),
-#endif
FLAG_TARGET(MAP_NORESERVE),
-#ifdef MAP_POPULATE
FLAG_TARGET(MAP_POPULATE),
-#endif
-#ifdef TARGET_MAP_UNINITIALIZED
+ FLAG_TARGET(MAP_STACK),
+ FLAG_TARGET(MAP_SYNC),
+#if TARGET_MAP_UNINITIALIZED != 0
FLAG_TARGET(MAP_UNINITIALIZED),
#endif
FLAG_END,
};
-UNUSED static struct flags clone_flags[] = {
+#ifndef CLONE_PIDFD
+# define CLONE_PIDFD 0x00001000
+#endif
+
+UNUSED static const struct flags clone_flags[] = {
FLAG_GENERIC(CLONE_VM),
FLAG_GENERIC(CLONE_FS),
FLAG_GENERIC(CLONE_FILES),
FLAG_GENERIC(CLONE_SIGHAND),
+ FLAG_GENERIC(CLONE_PIDFD),
FLAG_GENERIC(CLONE_PTRACE),
FLAG_GENERIC(CLONE_VFORK),
FLAG_GENERIC(CLONE_PARENT),
@@ -1124,7 +1184,17 @@ UNUSED static struct flags clone_flags[] = {
FLAG_END,
};
-UNUSED static struct flags msg_flags[] = {
+UNUSED static const struct flags execveat_flags[] = {
+#ifdef AT_EMPTY_PATH
+ FLAG_GENERIC(AT_EMPTY_PATH),
+#endif
+#ifdef AT_SYMLINK_NOFOLLOW
+ FLAG_GENERIC(AT_SYMLINK_NOFOLLOW),
+#endif
+ FLAG_END,
+};
+
+UNUSED static const struct flags msg_flags[] = {
/* send */
FLAG_GENERIC(MSG_CONFIRM),
FLAG_GENERIC(MSG_DONTROUTE),
@@ -1144,7 +1214,7 @@ UNUSED static struct flags msg_flags[] = {
FLAG_END,
};
-UNUSED static struct flags statx_flags[] = {
+UNUSED static const struct flags statx_flags[] = {
#ifdef AT_EMPTY_PATH
FLAG_GENERIC(AT_EMPTY_PATH),
#endif
@@ -1155,18 +1225,18 @@ UNUSED static struct flags statx_flags[] = {
FLAG_GENERIC(AT_SYMLINK_NOFOLLOW),
#endif
#ifdef AT_STATX_SYNC_AS_STAT
- FLAG_GENERIC(AT_STATX_SYNC_AS_STAT),
+ FLAG_GENERIC_MASK(AT_STATX_SYNC_AS_STAT, AT_STATX_SYNC_TYPE),
#endif
#ifdef AT_STATX_FORCE_SYNC
- FLAG_GENERIC(AT_STATX_FORCE_SYNC),
+ FLAG_GENERIC_MASK(AT_STATX_FORCE_SYNC, AT_STATX_SYNC_TYPE),
#endif
#ifdef AT_STATX_DONT_SYNC
- FLAG_GENERIC(AT_STATX_DONT_SYNC),
+ FLAG_GENERIC_MASK(AT_STATX_DONT_SYNC, AT_STATX_SYNC_TYPE),
#endif
FLAG_END,
};
-UNUSED static struct flags statx_mask[] = {
+UNUSED static const struct flags statx_mask[] = {
/* This must come first, because it includes everything. */
#ifdef STATX_ALL
FLAG_GENERIC(STATX_ALL),
@@ -1214,7 +1284,7 @@ UNUSED static struct flags statx_mask[] = {
FLAG_END,
};
-UNUSED static struct flags falloc_flags[] = {
+UNUSED static const struct flags falloc_flags[] = {
FLAG_GENERIC(FALLOC_FL_KEEP_SIZE),
FLAG_GENERIC(FALLOC_FL_PUNCH_HOLE),
#ifdef FALLOC_FL_NO_HIDE_STALE
@@ -1234,7 +1304,7 @@ UNUSED static struct flags falloc_flags[] = {
#endif
};
-UNUSED static struct flags termios_iflags[] = {
+UNUSED static const struct flags termios_iflags[] = {
FLAG_TARGET(IGNBRK),
FLAG_TARGET(BRKINT),
FLAG_TARGET(IGNPAR),
@@ -1253,7 +1323,7 @@ UNUSED static struct flags termios_iflags[] = {
FLAG_END,
};
-UNUSED static struct flags termios_oflags[] = {
+UNUSED static const struct flags termios_oflags[] = {
FLAG_TARGET(OPOST),
FLAG_TARGET(OLCUC),
FLAG_TARGET(ONLCR),
@@ -1337,7 +1407,7 @@ UNUSED static struct enums termios_cflags_CSIZE[] = {
ENUM_END,
};
-UNUSED static struct flags termios_cflags[] = {
+UNUSED static const struct flags termios_cflags[] = {
FLAG_TARGET(CSTOPB),
FLAG_TARGET(CREAD),
FLAG_TARGET(PARENB),
@@ -1348,7 +1418,7 @@ UNUSED static struct flags termios_cflags[] = {
FLAG_END,
};
-UNUSED static struct flags termios_lflags[] = {
+UNUSED static const struct flags termios_lflags[] = {
FLAG_TARGET(ISIG),
FLAG_TARGET(ICANON),
FLAG_TARGET(XCASE),
@@ -1368,7 +1438,8 @@ UNUSED static struct flags termios_lflags[] = {
FLAG_END,
};
-UNUSED static struct flags mlockall_flags[] = {
+#ifdef TARGET_NR_mlockall
+static const struct flags mlockall_flags[] = {
FLAG_TARGET(MCL_CURRENT),
FLAG_TARGET(MCL_FUTURE),
#ifdef MCL_ONFAULT
@@ -1376,6 +1447,7 @@ UNUSED static struct flags mlockall_flags[] = {
#endif
FLAG_END,
};
+#endif
/* IDs of the various system clocks */
#define TARGET_CLOCK_REALTIME 0
@@ -1433,14 +1505,10 @@ print_flags(const struct flags *f, abi_long flags, int last)
const char *sep = "";
int n;
- if ((flags == 0) && (f->f_value == 0)) {
- qemu_log("%s%s", f->f_string, get_comma(last));
- return;
- }
for (n = 0; f->f_string != NULL; f++) {
- if ((f->f_value != 0) && ((flags & f->f_value) == f->f_value)) {
+ if ((flags & f->f_mask) == f->f_value) {
qemu_log("%s%s", sep, f->f_string);
- flags &= ~f->f_value;
+ flags &= ~f->f_mask;
sep = "|";
n++;
}
@@ -1494,6 +1562,11 @@ print_file_mode(abi_long mode, int last)
const char *sep = "";
const struct flags *m;
+ if (mode == 0) {
+ qemu_log("000%s", get_comma(last));
+ return;
+ }
+
for (m = &mode_flags[0]; m->f_string != NULL; m++) {
if ((m->f_value & mode) == m->f_value) {
qemu_log("%s%s", m->f_string, sep);
@@ -1595,6 +1668,19 @@ print_raw_param(const char *fmt, abi_long param, int last)
qemu_log(format, param);
}
+/*
+ * Same as print_raw_param() but prints out raw 64-bit parameter.
+ */
+static void
+print_raw_param64(const char *fmt, long long param, int last)
+{
+ char format[64];
+
+ (void)snprintf(format, sizeof(format), "%s%s", fmt, get_comma(last));
+ qemu_log(format, param);
+}
+
+
static void
print_pointer(abi_long p, int last)
{
@@ -1661,6 +1747,25 @@ print_timespec(abi_ulong ts_addr, int last)
}
static void
+print_timespec64(abi_ulong ts_addr, int last)
+{
+ if (ts_addr) {
+ struct target__kernel_timespec *ts;
+
+ ts = lock_user(VERIFY_READ, ts_addr, sizeof(*ts), 1);
+ if (!ts) {
+ print_pointer(ts_addr, last);
+ return;
+ }
+ print_raw_param64("{tv_sec=%" PRId64, tswap64(ts->tv_sec), 0);
+ print_raw_param64("tv_nsec=%" PRId64 "}", tswap64(ts->tv_nsec), last);
+ unlock_user(ts, ts_addr, 0);
+ } else {
+ qemu_log("NULL%s", get_comma(last));
+ }
+}
+
+static void
print_timezone(abi_ulong tz_addr, int last)
{
if (tz_addr) {
@@ -1760,7 +1865,7 @@ print_termios(void *arg)
#ifdef TARGET_NR_accept
static void
-print_accept(void *cpu_env, const struct syscallname *name,
+print_accept(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1774,7 +1879,7 @@ print_accept(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_access
static void
-print_access(void *cpu_env, const struct syscallname *name,
+print_access(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1787,7 +1892,7 @@ print_access(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_acct
static void
-print_acct(void *cpu_env, const struct syscallname *name,
+print_acct(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1799,7 +1904,7 @@ print_acct(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_brk
static void
-print_brk(void *cpu_env, const struct syscallname *name,
+print_brk(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1811,7 +1916,7 @@ print_brk(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_chdir
static void
-print_chdir(void *cpu_env, const struct syscallname *name,
+print_chdir(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1823,7 +1928,7 @@ print_chdir(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_chroot
static void
-print_chroot(void *cpu_env, const struct syscallname *name,
+print_chroot(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1835,7 +1940,7 @@ print_chroot(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_chmod
static void
-print_chmod(void *cpu_env, const struct syscallname *name,
+print_chmod(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1848,7 +1953,7 @@ print_chmod(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_chown) || defined(TARGET_NR_lchown)
static void
-print_chown(void *cpu_env, const struct syscallname *name,
+print_chown(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1863,7 +1968,7 @@ print_chown(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_clock_adjtime
static void
-print_clock_adjtime(void *cpu_env, const struct syscallname *name,
+print_clock_adjtime(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1887,7 +1992,7 @@ static void do_print_clone(unsigned int flags, abi_ulong newsp,
}
static void
-print_clone(void *cpu_env, const struct syscallname *name,
+print_clone(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6)
{
@@ -1907,7 +2012,7 @@ print_clone(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_creat
static void
-print_creat(void *cpu_env, const struct syscallname *name,
+print_creat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1920,7 +2025,7 @@ print_creat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_execv
static void
-print_execv(void *cpu_env, const struct syscallname *name,
+print_execv(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1931,9 +2036,61 @@ print_execv(void *cpu_env, const struct syscallname *name,
}
#endif
-#ifdef TARGET_NR_faccessat
static void
-print_faccessat(void *cpu_env, const struct syscallname *name,
+print_execve_argv(abi_long argv, int last)
+{
+ abi_ulong arg_ptr_addr;
+ char *s;
+
+ qemu_log("{");
+ for (arg_ptr_addr = argv; ; arg_ptr_addr += sizeof(abi_ulong)) {
+ abi_ulong *arg_ptr, arg_addr;
+
+ arg_ptr = lock_user(VERIFY_READ, arg_ptr_addr, sizeof(abi_ulong), 1);
+ if (!arg_ptr) {
+ return;
+ }
+ arg_addr = tswapal(*arg_ptr);
+ unlock_user(arg_ptr, arg_ptr_addr, 0);
+ if (!arg_addr) {
+ break;
+ }
+ s = lock_user_string(arg_addr);
+ if (s) {
+ qemu_log("\"%s\",", s);
+ unlock_user(s, arg_addr, 0);
+ }
+ }
+ qemu_log("NULL}%s", get_comma(last));
+}
+
+static void
+print_execve(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg1, abi_long arg2, abi_long arg3,
+ abi_long arg4, abi_long arg5, abi_long arg6)
+{
+ print_syscall_prologue(name);
+ print_string(arg1, 0);
+ print_execve_argv(arg2, 1);
+ print_syscall_epilogue(name);
+}
+
+static void
+print_execveat(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg1, abi_long arg2, abi_long arg3,
+ abi_long arg4, abi_long arg5, abi_long arg6)
+{
+ print_syscall_prologue(name);
+ print_at_dirfd(arg1, 0);
+ print_string(arg2, 0);
+ print_execve_argv(arg3, 0);
+ print_flags(execveat_flags, arg5, 1);
+ print_syscall_epilogue(name);
+}
+
+#if defined(TARGET_NR_faccessat) || defined(TARGET_NR_faccessat2)
+static void
+print_faccessat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1948,7 +2105,7 @@ print_faccessat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_fallocate
static void
-print_fallocate(void *cpu_env, const struct syscallname *name,
+print_fallocate(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1968,7 +2125,7 @@ print_fallocate(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_fchmodat
static void
-print_fchmodat(void *cpu_env, const struct syscallname *name,
+print_fchmodat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1983,7 +2140,7 @@ print_fchmodat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_fchownat
static void
-print_fchownat(void *cpu_env, const struct syscallname *name,
+print_fchownat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -1999,7 +2156,7 @@ print_fchownat(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_fcntl) || defined(TARGET_NR_fcntl64)
static void
-print_fcntl(void *cpu_env, const struct syscallname *name,
+print_fcntl(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2135,7 +2292,7 @@ print_fcntl(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_fgetxattr
static void
-print_fgetxattr(void *cpu_env, const struct syscallname *name,
+print_fgetxattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2150,7 +2307,7 @@ print_fgetxattr(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_flistxattr
static void
-print_flistxattr(void *cpu_env, const struct syscallname *name,
+print_flistxattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2164,7 +2321,7 @@ print_flistxattr(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_getxattr) || defined(TARGET_NR_lgetxattr)
static void
-print_getxattr(void *cpu_env, const struct syscallname *name,
+print_getxattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2180,7 +2337,7 @@ print_getxattr(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_listxattr) || defined(TARGET_NR_llistxattr)
static void
-print_listxattr(void *cpu_env, const struct syscallname *name,
+print_listxattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2195,7 +2352,7 @@ print_listxattr(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_fremovexattr)
static void
-print_fremovexattr(void *cpu_env, const struct syscallname *name,
+print_fremovexattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2208,7 +2365,7 @@ print_fremovexattr(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_removexattr) || defined(TARGET_NR_lremovexattr)
static void
-print_removexattr(void *cpu_env, const struct syscallname *name,
+print_removexattr(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2222,7 +2379,7 @@ print_removexattr(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_futimesat
static void
-print_futimesat(void *cpu_env, const struct syscallname *name,
+print_futimesat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2237,7 +2394,7 @@ print_futimesat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_gettimeofday
static void
-print_gettimeofday(void *cpu_env, const struct syscallname *name,
+print_gettimeofday(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2250,7 +2407,7 @@ print_gettimeofday(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_settimeofday
static void
-print_settimeofday(void *cpu_env, const struct syscallname *name,
+print_settimeofday(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2263,7 +2420,7 @@ print_settimeofday(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_clock_gettime) || defined(TARGET_NR_clock_getres)
static void
-print_clock_gettime(void *cpu_env, const struct syscallname *name,
+print_clock_gettime(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2275,9 +2432,22 @@ print_clock_gettime(void *cpu_env, const struct syscallname *name,
#define print_clock_getres print_clock_gettime
#endif
+#if defined(TARGET_NR_clock_gettime64)
+static void
+print_clock_gettime64(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ print_syscall_prologue(name);
+ print_enums(clockids, arg0, 0);
+ print_pointer(arg1, 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#ifdef TARGET_NR_clock_settime
static void
-print_clock_settime(void *cpu_env, const struct syscallname *name,
+print_clock_settime(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2290,7 +2460,7 @@ print_clock_settime(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_getitimer
static void
-print_getitimer(void *cpu_env, const struct syscallname *name,
+print_getitimer(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2303,7 +2473,7 @@ print_getitimer(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_setitimer
static void
-print_setitimer(void *cpu_env, const struct syscallname *name,
+print_setitimer(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2317,7 +2487,7 @@ print_setitimer(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_link
static void
-print_link(void *cpu_env, const struct syscallname *name,
+print_link(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2330,7 +2500,7 @@ print_link(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_linkat
static void
-print_linkat(void *cpu_env, const struct syscallname *name,
+print_linkat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2346,7 +2516,7 @@ print_linkat(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR__llseek) || defined(TARGET_NR_llseek)
static void
-print__llseek(void *cpu_env, const struct syscallname *name,
+print__llseek(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2369,7 +2539,7 @@ print__llseek(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_lseek
static void
-print_lseek(void *cpu_env, const struct syscallname *name,
+print_lseek(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2400,7 +2570,7 @@ print_lseek(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_truncate
static void
-print_truncate(void *cpu_env, const struct syscallname *name,
+print_truncate(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2413,7 +2583,7 @@ print_truncate(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_truncate64
static void
-print_truncate64(void *cpu_env, const struct syscallname *name,
+print_truncate64(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2430,7 +2600,7 @@ print_truncate64(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_ftruncate64
static void
-print_ftruncate64(void *cpu_env, const struct syscallname *name,
+print_ftruncate64(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2447,7 +2617,7 @@ print_ftruncate64(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mlockall
static void
-print_mlockall(void *cpu_env, const struct syscallname *name,
+print_mlockall(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2459,7 +2629,7 @@ print_mlockall(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_socket)
static void
-print_socket(void *cpu_env, const struct syscallname *name,
+print_socket(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2920,7 +3090,7 @@ static struct {
};
static void
-print_socketcall(void *cpu_env, const struct syscallname *name,
+print_socketcall(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2941,7 +3111,7 @@ print_socketcall(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_bind)
static void
-print_bind(void *cpu_env, const struct syscallname *name,
+print_bind(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2955,7 +3125,7 @@ print_bind(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) || \
defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64)
static void
-print_stat(void *cpu_env, const struct syscallname *name,
+print_stat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2969,9 +3139,49 @@ print_stat(void *cpu_env, const struct syscallname *name,
#define print_lstat64 print_stat
#endif
+#if defined(TARGET_NR_madvise)
+static struct enums madvise_advice[] = {
+ ENUM_TARGET(MADV_NORMAL),
+ ENUM_TARGET(MADV_RANDOM),
+ ENUM_TARGET(MADV_SEQUENTIAL),
+ ENUM_TARGET(MADV_WILLNEED),
+ ENUM_TARGET(MADV_DONTNEED),
+ ENUM_TARGET(MADV_FREE),
+ ENUM_TARGET(MADV_REMOVE),
+ ENUM_TARGET(MADV_DONTFORK),
+ ENUM_TARGET(MADV_DOFORK),
+ ENUM_TARGET(MADV_MERGEABLE),
+ ENUM_TARGET(MADV_UNMERGEABLE),
+ ENUM_TARGET(MADV_HUGEPAGE),
+ ENUM_TARGET(MADV_NOHUGEPAGE),
+ ENUM_TARGET(MADV_DONTDUMP),
+ ENUM_TARGET(MADV_DODUMP),
+ ENUM_TARGET(MADV_WIPEONFORK),
+ ENUM_TARGET(MADV_KEEPONFORK),
+ ENUM_TARGET(MADV_COLD),
+ ENUM_TARGET(MADV_PAGEOUT),
+ ENUM_TARGET(MADV_POPULATE_READ),
+ ENUM_TARGET(MADV_POPULATE_WRITE),
+ ENUM_TARGET(MADV_DONTNEED_LOCKED),
+ ENUM_END,
+};
+
+static void
+print_madvise(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ print_syscall_prologue(name);
+ print_pointer(arg0, 0);
+ print_raw_param("%d", arg1, 0);
+ print_enums(madvise_advice, arg2, 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#if defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
static void
-print_fstat(void *cpu_env, const struct syscallname *name,
+print_fstat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2985,7 +3195,7 @@ print_fstat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mkdir
static void
-print_mkdir(void *cpu_env, const struct syscallname *name,
+print_mkdir(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -2998,7 +3208,7 @@ print_mkdir(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mkdirat
static void
-print_mkdirat(void *cpu_env, const struct syscallname *name,
+print_mkdirat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3012,7 +3222,7 @@ print_mkdirat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_rmdir
static void
-print_rmdir(void *cpu_env, const struct syscallname *name,
+print_rmdir(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3024,7 +3234,7 @@ print_rmdir(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_rt_sigaction
static void
-print_rt_sigaction(void *cpu_env, const struct syscallname *name,
+print_rt_sigaction(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3038,7 +3248,7 @@ print_rt_sigaction(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_rt_sigprocmask
static void
-print_rt_sigprocmask(void *cpu_env, const struct syscallname *name,
+print_rt_sigprocmask(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3051,14 +3261,15 @@ print_rt_sigprocmask(void *cpu_env, const struct syscallname *name,
}
qemu_log("%s,", how);
print_pointer(arg1, 0);
- print_pointer(arg2, 1);
+ print_pointer(arg2, 0);
+ print_raw_param("%u", arg3, 1);
print_syscall_epilogue(name);
}
#endif
#ifdef TARGET_NR_rt_sigqueueinfo
static void
-print_rt_sigqueueinfo(void *cpu_env, const struct syscallname *name,
+print_rt_sigqueueinfo(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3083,7 +3294,7 @@ print_rt_sigqueueinfo(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_rt_tgsigqueueinfo
static void
-print_rt_tgsigqueueinfo(void *cpu_env, const struct syscallname *name,
+print_rt_tgsigqueueinfo(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3167,7 +3378,7 @@ print_syslog_action(abi_ulong arg, int last)
}
static void
-print_syslog(void *cpu_env, const struct syscallname *name,
+print_syslog(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3181,7 +3392,7 @@ print_syslog(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mknod
static void
-print_mknod(void *cpu_env, const struct syscallname *name,
+print_mknod(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3200,7 +3411,7 @@ print_mknod(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mknodat
static void
-print_mknodat(void *cpu_env, const struct syscallname *name,
+print_mknodat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3220,7 +3431,7 @@ print_mknodat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mq_open
static void
-print_mq_open(void *cpu_env, const struct syscallname *name,
+print_mq_open(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3239,7 +3450,7 @@ print_mq_open(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_open
static void
-print_open(void *cpu_env, const struct syscallname *name,
+print_open(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3256,7 +3467,7 @@ print_open(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_openat
static void
-print_openat(void *cpu_env, const struct syscallname *name,
+print_openat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3272,9 +3483,37 @@ print_openat(void *cpu_env, const struct syscallname *name,
}
#endif
+#ifdef TARGET_NR_pidfd_send_signal
+static void
+print_pidfd_send_signal(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ void *p;
+ target_siginfo_t uinfo;
+
+ print_syscall_prologue(name);
+ print_raw_param("%d", arg0, 0);
+ print_signal(arg1, 0);
+
+ p = lock_user(VERIFY_READ, arg2, sizeof(target_siginfo_t), 1);
+ if (p) {
+ get_target_siginfo(&uinfo, p);
+ print_siginfo(&uinfo);
+
+ unlock_user(p, arg2, 0);
+ } else {
+ print_pointer(arg2, 0);
+ }
+
+ print_raw_param("%u", arg3, 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#ifdef TARGET_NR_mq_unlink
static void
-print_mq_unlink(void *cpu_env, const struct syscallname *name,
+print_mq_unlink(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3286,7 +3525,7 @@ print_mq_unlink(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)
static void
-print_fstatat64(void *cpu_env, const struct syscallname *name,
+print_fstatat64(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3302,7 +3541,7 @@ print_fstatat64(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_readlink
static void
-print_readlink(void *cpu_env, const struct syscallname *name,
+print_readlink(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3316,7 +3555,7 @@ print_readlink(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_readlinkat
static void
-print_readlinkat(void *cpu_env, const struct syscallname *name,
+print_readlinkat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3331,7 +3570,7 @@ print_readlinkat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_rename
static void
-print_rename(void *cpu_env, const struct syscallname *name,
+print_rename(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3344,7 +3583,7 @@ print_rename(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_renameat
static void
-print_renameat(void *cpu_env, const struct syscallname *name,
+print_renameat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3359,7 +3598,7 @@ print_renameat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_statfs
static void
-print_statfs(void *cpu_env, const struct syscallname *name,
+print_statfs(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3372,7 +3611,7 @@ print_statfs(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_statfs64
static void
-print_statfs64(void *cpu_env, const struct syscallname *name,
+print_statfs64(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3385,7 +3624,7 @@ print_statfs64(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_symlink
static void
-print_symlink(void *cpu_env, const struct syscallname *name,
+print_symlink(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3398,7 +3637,7 @@ print_symlink(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_symlinkat
static void
-print_symlinkat(void *cpu_env, const struct syscallname *name,
+print_symlinkat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3412,7 +3651,7 @@ print_symlinkat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_mount
static void
-print_mount(void *cpu_env, const struct syscallname *name,
+print_mount(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3428,7 +3667,7 @@ print_mount(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_umount
static void
-print_umount(void *cpu_env, const struct syscallname *name,
+print_umount(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3440,7 +3679,7 @@ print_umount(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_umount2
static void
-print_umount2(void *cpu_env, const struct syscallname *name,
+print_umount2(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3453,7 +3692,7 @@ print_umount2(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_unlink
static void
-print_unlink(void *cpu_env, const struct syscallname *name,
+print_unlink(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3465,7 +3704,7 @@ print_unlink(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_unlinkat
static void
-print_unlinkat(void *cpu_env, const struct syscallname *name,
+print_unlinkat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3479,7 +3718,7 @@ print_unlinkat(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_unshare
static void
-print_unshare(void *cpu_env, const struct syscallname *name,
+print_unshare(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3489,9 +3728,24 @@ print_unshare(void *cpu_env, const struct syscallname *name,
}
#endif
+#ifdef TARGET_NR_clock_nanosleep
+static void
+print_clock_nanosleep(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ print_syscall_prologue(name);
+ print_enums(clockids, arg0, 0);
+ print_raw_param("%d", arg1, 0);
+ print_timespec(arg2, 0);
+ print_timespec(arg3, 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#ifdef TARGET_NR_utime
static void
-print_utime(void *cpu_env, const struct syscallname *name,
+print_utime(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3504,7 +3758,7 @@ print_utime(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_utimes
static void
-print_utimes(void *cpu_env, const struct syscallname *name,
+print_utimes(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3517,7 +3771,7 @@ print_utimes(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_utimensat
static void
-print_utimensat(void *cpu_env, const struct syscallname *name,
+print_utimensat(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3532,10 +3786,24 @@ print_utimensat(void *cpu_env, const struct syscallname *name,
#if defined(TARGET_NR_mmap) || defined(TARGET_NR_mmap2)
static void
-print_mmap(void *cpu_env, const struct syscallname *name,
+print_mmap_both(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5)
-{
+ abi_long arg3, abi_long arg4, abi_long arg5,
+ bool is_old_mmap)
+{
+ if (is_old_mmap) {
+ abi_ulong *v;
+ abi_ulong argp = arg0;
+ if (!(v = lock_user(VERIFY_READ, argp, 6 * sizeof(abi_ulong), 1)))
+ return;
+ arg0 = tswapal(v[0]);
+ arg1 = tswapal(v[1]);
+ arg2 = tswapal(v[2]);
+ arg3 = tswapal(v[3]);
+ arg4 = tswapal(v[4]);
+ arg5 = tswapal(v[5]);
+ unlock_user(v, argp, 0);
+ }
print_syscall_prologue(name);
print_pointer(arg0, 0);
print_raw_param("%d", arg1, 0);
@@ -3545,12 +3813,39 @@ print_mmap(void *cpu_env, const struct syscallname *name,
print_raw_param("%#x", arg5, 1);
print_syscall_epilogue(name);
}
-#define print_mmap2 print_mmap
+#endif
+
+#if defined(TARGET_NR_mmap)
+static void
+print_mmap(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ return print_mmap_both(cpu_env, name, arg0, arg1, arg2, arg3,
+ arg4, arg5,
+#if defined(TARGET_NR_mmap2)
+ true
+#else
+ false
+#endif
+ );
+}
+#endif
+
+#if defined(TARGET_NR_mmap2)
+static void
+print_mmap2(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ return print_mmap_both(cpu_env, name, arg0, arg1, arg2, arg3,
+ arg4, arg5, false);
+}
#endif
#ifdef TARGET_NR_mprotect
static void
-print_mprotect(void *cpu_env, const struct syscallname *name,
+print_mprotect(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3564,7 +3859,7 @@ print_mprotect(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_munmap
static void
-print_munmap(void *cpu_env, const struct syscallname *name,
+print_munmap(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3576,65 +3871,158 @@ print_munmap(void *cpu_env, const struct syscallname *name,
#endif
#ifdef TARGET_NR_futex
-static void print_futex_op(abi_long tflag, int last)
-{
-#define print_op(val) \
-if( cmd == val ) { \
- qemu_log(#val); \
- return; \
-}
-
- int cmd = (int)tflag;
-#ifdef FUTEX_PRIVATE_FLAG
- if (cmd & FUTEX_PRIVATE_FLAG) {
- qemu_log("FUTEX_PRIVATE_FLAG|");
- cmd &= ~FUTEX_PRIVATE_FLAG;
- }
-#endif
-#ifdef FUTEX_CLOCK_REALTIME
- if (cmd & FUTEX_CLOCK_REALTIME) {
- qemu_log("FUTEX_CLOCK_REALTIME|");
- cmd &= ~FUTEX_CLOCK_REALTIME;
+static void print_futex_op(int cmd, int last)
+{
+ static const char * const futex_names[] = {
+#define NAME(X) [X] = #X
+ NAME(FUTEX_WAIT),
+ NAME(FUTEX_WAKE),
+ NAME(FUTEX_FD),
+ NAME(FUTEX_REQUEUE),
+ NAME(FUTEX_CMP_REQUEUE),
+ NAME(FUTEX_WAKE_OP),
+ NAME(FUTEX_LOCK_PI),
+ NAME(FUTEX_UNLOCK_PI),
+ NAME(FUTEX_TRYLOCK_PI),
+ NAME(FUTEX_WAIT_BITSET),
+ NAME(FUTEX_WAKE_BITSET),
+ NAME(FUTEX_WAIT_REQUEUE_PI),
+ NAME(FUTEX_CMP_REQUEUE_PI),
+ NAME(FUTEX_LOCK_PI2),
+#undef NAME
+ };
+
+ unsigned base_cmd = cmd & FUTEX_CMD_MASK;
+
+ if (base_cmd < ARRAY_SIZE(futex_names)) {
+ qemu_log("%s%s%s",
+ (cmd & FUTEX_PRIVATE_FLAG ? "FUTEX_PRIVATE_FLAG|" : ""),
+ (cmd & FUTEX_CLOCK_REALTIME ? "FUTEX_CLOCK_REALTIME|" : ""),
+ futex_names[base_cmd]);
+ } else {
+ qemu_log("0x%x", cmd);
}
-#endif
- print_op(FUTEX_WAIT)
- print_op(FUTEX_WAKE)
- print_op(FUTEX_FD)
- print_op(FUTEX_REQUEUE)
- print_op(FUTEX_CMP_REQUEUE)
- print_op(FUTEX_WAKE_OP)
- print_op(FUTEX_LOCK_PI)
- print_op(FUTEX_UNLOCK_PI)
- print_op(FUTEX_TRYLOCK_PI)
-#ifdef FUTEX_WAIT_BITSET
- print_op(FUTEX_WAIT_BITSET)
-#endif
-#ifdef FUTEX_WAKE_BITSET
- print_op(FUTEX_WAKE_BITSET)
-#endif
- /* unknown values */
- qemu_log("%d", cmd);
}
static void
-print_futex(void *cpu_env, const struct syscallname *name,
+print_futex(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
+ abi_long op = arg1 & FUTEX_CMD_MASK;
print_syscall_prologue(name);
print_pointer(arg0, 0);
print_futex_op(arg1, 0);
print_raw_param(",%d", arg2, 0);
- print_pointer(arg3, 0); /* struct timespec */
+ switch (op) {
+ case FUTEX_WAIT:
+ case FUTEX_WAIT_BITSET:
+ case FUTEX_LOCK_PI:
+ case FUTEX_LOCK_PI2:
+ case FUTEX_WAIT_REQUEUE_PI:
+ print_timespec(arg3, 0);
+ break;
+ default:
+ print_pointer(arg3, 0);
+ break;
+ }
print_pointer(arg4, 0);
print_raw_param("%d", arg4, 1);
print_syscall_epilogue(name);
}
#endif
+#ifdef TARGET_NR_prlimit64
+static const char *target_ressource_string(abi_ulong r)
+{
+ #define RET_RES_ENTRY(res) case TARGET_##res: return #res;
+ switch (r) {
+ RET_RES_ENTRY(RLIMIT_AS);
+ RET_RES_ENTRY(RLIMIT_CORE);
+ RET_RES_ENTRY(RLIMIT_CPU);
+ RET_RES_ENTRY(RLIMIT_DATA);
+ RET_RES_ENTRY(RLIMIT_FSIZE);
+ RET_RES_ENTRY(RLIMIT_LOCKS);
+ RET_RES_ENTRY(RLIMIT_MEMLOCK);
+ RET_RES_ENTRY(RLIMIT_MSGQUEUE);
+ RET_RES_ENTRY(RLIMIT_NICE);
+ RET_RES_ENTRY(RLIMIT_NOFILE);
+ RET_RES_ENTRY(RLIMIT_NPROC);
+ RET_RES_ENTRY(RLIMIT_RSS);
+ RET_RES_ENTRY(RLIMIT_RTPRIO);
+#ifdef RLIMIT_RTTIME
+ RET_RES_ENTRY(RLIMIT_RTTIME);
+#endif
+ RET_RES_ENTRY(RLIMIT_SIGPENDING);
+ RET_RES_ENTRY(RLIMIT_STACK);
+ default:
+ return NULL;
+ }
+ #undef RET_RES_ENTRY
+}
+
+static void
+print_rlimit64(abi_ulong rlim_addr, int last)
+{
+ if (rlim_addr) {
+ struct target_rlimit64 *rl;
+
+ rl = lock_user(VERIFY_READ, rlim_addr, sizeof(*rl), 1);
+ if (!rl) {
+ print_pointer(rlim_addr, last);
+ return;
+ }
+ print_raw_param64("{rlim_cur=%" PRId64, tswap64(rl->rlim_cur), 0);
+ print_raw_param64("rlim_max=%" PRId64 "}", tswap64(rl->rlim_max),
+ last);
+ unlock_user(rl, rlim_addr, 0);
+ } else {
+ qemu_log("NULL%s", get_comma(last));
+ }
+}
+
+static void
+print_prlimit64(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ const char *rlim_name;
+
+ print_syscall_prologue(name);
+ print_raw_param("%d", arg0, 0);
+ rlim_name = target_ressource_string(arg1);
+ if (rlim_name) {
+ qemu_log("%s,", rlim_name);
+ } else {
+ print_raw_param("%d", arg1, 0);
+ }
+ print_rlimit64(arg2, 0);
+ print_pointer(arg3, 1);
+ print_syscall_epilogue(name);
+}
+
+static void
+print_syscall_ret_prlimit64(CPUArchState *cpu_env,
+ const struct syscallname *name,
+ abi_long ret, abi_long arg0, abi_long arg1,
+ abi_long arg2, abi_long arg3, abi_long arg4,
+ abi_long arg5)
+{
+ if (!print_syscall_err(ret)) {
+ qemu_log(TARGET_ABI_FMT_ld, ret);
+ if (arg3) {
+ qemu_log(" (");
+ print_rlimit64(arg3, 1);
+ qemu_log(")");
+ }
+ }
+ qemu_log("\n");
+}
+#endif
+
#ifdef TARGET_NR_kill
static void
-print_kill(void *cpu_env, const struct syscallname *name,
+print_kill(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3647,7 +4035,7 @@ print_kill(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_tkill
static void
-print_tkill(void *cpu_env, const struct syscallname *name,
+print_tkill(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3660,7 +4048,7 @@ print_tkill(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_tgkill
static void
-print_tgkill(void *cpu_env, const struct syscallname *name,
+print_tgkill(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3672,9 +4060,28 @@ print_tgkill(void *cpu_env, const struct syscallname *name,
}
#endif
+#if defined(TARGET_NR_pread64) || defined(TARGET_NR_pwrite64)
+static void
+print_pread64(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ if (regpairs_aligned(cpu_env, TARGET_NR_pread64)) {
+ arg3 = arg4;
+ arg4 = arg5;
+ }
+ print_syscall_prologue(name);
+ print_raw_param("%d", arg0, 0);
+ print_pointer(arg1, 0);
+ print_raw_param("%d", arg2, 0);
+ print_raw_param("%" PRIu64, target_offset64(arg3, arg4), 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#ifdef TARGET_NR_statx
static void
-print_statx(void *cpu_env, const struct syscallname *name,
+print_statx(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3690,7 +4097,7 @@ print_statx(void *cpu_env, const struct syscallname *name,
#ifdef TARGET_NR_ioctl
static void
-print_ioctl(void *cpu_env, const struct syscallname *name,
+print_ioctl(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg0, abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
@@ -3775,55 +4182,74 @@ static int nsyscalls = ARRAY_SIZE(scnames);
* The public interface to this module.
*/
void
-print_syscall(void *cpu_env, int num,
+print_syscall(CPUArchState *cpu_env, int num,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6)
{
int i;
- const char *format="%s(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ")";
+ FILE *f;
+ const char *format = "%s(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ","
+ TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ","
+ TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ")";
- qemu_log("%d ", getpid());
+ f = qemu_log_trylock();
+ if (!f) {
+ return;
+ }
+ fprintf(f, "%d ", getpid());
- for(i=0;i<nsyscalls;i++)
- if( scnames[i].nr == num ) {
- if( scnames[i].call != NULL ) {
- scnames[i].call(
- cpu_env, &scnames[i], arg1, arg2, arg3, arg4, arg5, arg6);
+ for (i = 0; i < nsyscalls; i++) {
+ if (scnames[i].nr == num) {
+ if (scnames[i].call != NULL) {
+ scnames[i].call(cpu_env, &scnames[i], arg1, arg2, arg3,
+ arg4, arg5, arg6);
} else {
/* XXX: this format system is broken because it uses
host types and host pointers for strings */
- if( scnames[i].format != NULL )
+ if (scnames[i].format != NULL) {
format = scnames[i].format;
- qemu_log(format,
- scnames[i].name, arg1, arg2, arg3, arg4, arg5, arg6);
+ }
+ fprintf(f, format, scnames[i].name, arg1, arg2,
+ arg3, arg4, arg5, arg6);
}
+ qemu_log_unlock(f);
return;
}
- qemu_log("Unknown syscall %d\n", num);
+ }
+ fprintf(f, "Unknown syscall %d\n", num);
+ qemu_log_unlock(f);
}
void
-print_syscall_ret(void *cpu_env, int num, abi_long ret,
+print_syscall_ret(CPUArchState *cpu_env, int num, abi_long ret,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6)
{
int i;
+ FILE *f;
- for(i=0;i<nsyscalls;i++)
- if( scnames[i].nr == num ) {
- if( scnames[i].result != NULL ) {
+ f = qemu_log_trylock();
+ if (!f) {
+ return;
+ }
+
+ for (i = 0; i < nsyscalls; i++) {
+ if (scnames[i].nr == num) {
+ if (scnames[i].result != NULL) {
scnames[i].result(cpu_env, &scnames[i], ret,
arg1, arg2, arg3,
arg4, arg5, arg6);
} else {
if (!print_syscall_err(ret)) {
- qemu_log(TARGET_ABI_FMT_ld, ret);
+ fprintf(f, TARGET_ABI_FMT_ld, ret);
}
- qemu_log("\n");
+ fprintf(f, "\n");
}
break;
}
+ }
+ qemu_log_unlock(f);
}
void print_taken_signal(int target_signum, const target_siginfo_t *tinfo)
@@ -3831,9 +4257,17 @@ void print_taken_signal(int target_signum, const target_siginfo_t *tinfo)
/* Print the strace output for a signal being taken:
* --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} ---
*/
- qemu_log("--- ");
+ FILE *f;
+
+ f = qemu_log_trylock();
+ if (!f) {
+ return;
+ }
+
+ fprintf(f, "--- ");
print_signal(target_signum, 1);
- qemu_log(" ");
+ fprintf(f, " ");
print_siginfo(tinfo);
- qemu_log(" ---\n");
+ fprintf(f, " ---\n");
+ qemu_log_unlock(f);
}
diff --git a/linux-user/strace.h b/linux-user/strace.h
index 1e232d07fc..d5e7f26bcb 100644
--- a/linux-user/strace.h
+++ b/linux-user/strace.h
@@ -18,10 +18,10 @@
#ifndef LINUX_USER_STRACE_H
#define LINUX_USER_STRACE_H
-void print_syscall(void *cpu_env, int num,
+void print_syscall(CPUArchState *cpu_env, int num,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6);
-void print_syscall_ret(void *cpu_env, int num, abi_long ret,
+void print_syscall_ret(CPUArchState *cpu_env, int num, abi_long ret,
abi_long arg1, abi_long arg2, abi_long arg3,
abi_long arg4, abi_long arg5, abi_long arg6);
/**
diff --git a/linux-user/strace.list b/linux-user/strace.list
index 278596acd1..dfd4237d14 100644
--- a/linux-user/strace.list
+++ b/linux-user/strace.list
@@ -86,12 +86,16 @@
{ TARGET_NR_clock_getres, "clock_getres" , NULL, print_clock_getres,
print_syscall_ret_clock_getres },
#endif
+#ifdef TARGET_NR_clock_getres_time64
+{ TARGET_NR_clock_getres_time64, "clock_getres_time64" , NULL, NULL, NULL },
+#endif
#ifdef TARGET_NR_clock_gettime
{ TARGET_NR_clock_gettime, "clock_gettime" , NULL, print_clock_gettime,
print_syscall_ret_clock_gettime },
#endif
#ifdef TARGET_NR_clock_nanosleep
-{ TARGET_NR_clock_nanosleep, "clock_nanosleep" , NULL, NULL, NULL },
+{ TARGET_NR_clock_nanosleep, "clock_nanosleep" , NULL, print_clock_nanosleep,
+ NULL },
#endif
#ifdef TARGET_NR_clock_settime
{ TARGET_NR_clock_settime, "clock_settime" , NULL, print_clock_settime, NULL },
@@ -102,6 +106,9 @@
#ifdef TARGET_NR_close
{ TARGET_NR_close, "close" , "%s(%d)", NULL, NULL },
#endif
+#ifdef TARGET_NR_close_range
+{ TARGET_NR_close_range, "close_range" , "%s(%u,%u,%u)", NULL, NULL },
+#endif
#ifdef TARGET_NR_connect
{ TARGET_NR_connect, "connect" , "%s(%d,%#x,%d)", NULL, NULL },
#endif
@@ -160,7 +167,7 @@
{ TARGET_NR_execve, "execve" , NULL, print_execve, NULL },
#endif
#ifdef TARGET_NR_execveat
-{ TARGET_NR_execveat, "execveat" , NULL, NULL, NULL },
+{ TARGET_NR_execveat, "execveat" , NULL, print_execveat, NULL },
#endif
#ifdef TARGET_NR_exec_with_loader
{ TARGET_NR_exec_with_loader, "exec_with_loader" , NULL, NULL, NULL },
@@ -177,6 +184,9 @@
#ifdef TARGET_NR_faccessat
{ TARGET_NR_faccessat, "faccessat" , NULL, print_faccessat, NULL },
#endif
+#ifdef TARGET_NR_faccessat2
+{ TARGET_NR_faccessat2, "faccessat2" , NULL, print_faccessat, NULL },
+#endif
#ifdef TARGET_NR_fadvise64
{ TARGET_NR_fadvise64, "fadvise64" , NULL, NULL, NULL },
#endif
@@ -268,6 +278,9 @@
#ifdef TARGET_NR_futex
{ TARGET_NR_futex, "futex" , NULL, print_futex, NULL },
#endif
+#ifdef TARGET_NR_futex_time64
+{ TARGET_NR_futex_time64, "futex_time64" , NULL, NULL, NULL },
+#endif
#ifdef TARGET_NR_futimesat
{ TARGET_NR_futimesat, "futimesat" , NULL, print_futimesat, NULL },
#endif
@@ -278,10 +291,10 @@
{ TARGET_NR_getcwd, "getcwd" , "%s(%p,%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_getdents
-{ TARGET_NR_getdents, "getdents" , NULL, NULL, NULL },
+{ TARGET_NR_getdents, "getdents" , "%s(%d,%p,%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_getdents64
-{ TARGET_NR_getdents64, "getdents64" , NULL, NULL, NULL },
+{ TARGET_NR_getdents64, "getdents64" , "%s(%d,%p,%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_getdomainname
{ TARGET_NR_getdomainname, "getdomainname" , NULL, NULL, NULL },
@@ -308,10 +321,10 @@
{ TARGET_NR_getgid32, "getgid32" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_getgroups
-{ TARGET_NR_getgroups, "getgroups" , NULL, NULL, NULL },
+{ TARGET_NR_getgroups, "getgroups" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_getgroups32
-{ TARGET_NR_getgroups32, "getgroups32" , NULL, NULL, NULL },
+{ TARGET_NR_getgroups32, "getgroups32" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_gethostname
{ TARGET_NR_gethostname, "gethostname" , NULL, NULL, NULL },
@@ -330,7 +343,7 @@
{ TARGET_NR_getpagesize, "getpagesize" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_getpeername
-{ TARGET_NR_getpeername, "getpeername" , NULL, NULL, NULL },
+{ TARGET_NR_getpeername, "getpeername" , "%s(%d,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_getpgid
{ TARGET_NR_getpgid, "getpgid" , "%s(%u)", NULL, NULL },
@@ -351,22 +364,22 @@
{ TARGET_NR_getpriority, "getpriority", "%s(%#x,%#x)", NULL, NULL },
#endif
#ifdef TARGET_NR_getrandom
-{ TARGET_NR_getrandom, "getrandom", NULL, NULL, NULL },
+{ TARGET_NR_getrandom, "getrandom", "%s(%p,%u,%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_getresgid
-{ TARGET_NR_getresgid, "getresgid" , NULL, NULL, NULL },
+{ TARGET_NR_getresgid, "getresgid" , "%s(%p,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_getresgid32
{ TARGET_NR_getresgid32, "getresgid32" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_getresuid
-{ TARGET_NR_getresuid, "getresuid" , NULL, NULL, NULL },
+{ TARGET_NR_getresuid, "getresuid" , "%s(%p,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_getresuid32
{ TARGET_NR_getresuid32, "getresuid32" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_getrlimit
-{ TARGET_NR_getrlimit, "getrlimit" , NULL, NULL, NULL },
+{ TARGET_NR_getrlimit, "getrlimit" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_get_robust_list
{ TARGET_NR_get_robust_list, "get_robust_list" , NULL, NULL, NULL },
@@ -378,14 +391,19 @@
{ TARGET_NR_getsid, "getsid" , "%s(%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_getsockname
-{ TARGET_NR_getsockname, "getsockname" , NULL, NULL, NULL },
+{ TARGET_NR_getsockname, "getsockname" , "%s(%d,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_getsockopt
-{ TARGET_NR_getsockopt, "getsockopt" , NULL, NULL, NULL },
+{ TARGET_NR_getsockopt, "getsockopt" , "%s(%d,%d,%d,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_get_thread_area
+#if defined(TARGET_I386) && defined(TARGET_ABI32)
{ TARGET_NR_get_thread_area, "get_thread_area", "%s(0x"TARGET_ABI_FMT_lx")",
NULL, NULL },
+#elif defined(TARGET_M68K)
+{ TARGET_NR_get_thread_area, "get_thread_area" , "%s()",
+ NULL, print_syscall_ret_addr },
+#endif
#endif
#ifdef TARGET_NR_gettid
{ TARGET_NR_gettid, "gettid" , "%s()", NULL, NULL },
@@ -536,7 +554,7 @@
{ TARGET_NR_lstat64, "lstat64" , NULL, print_lstat64, NULL },
#endif
#ifdef TARGET_NR_madvise
-{ TARGET_NR_madvise, "madvise" , NULL, NULL, NULL },
+{ TARGET_NR_madvise, "madvise" , NULL, print_madvise, NULL },
#endif
#ifdef TARGET_NR_madvise1
{ TARGET_NR_madvise1, "madvise1" , NULL, NULL, NULL },
@@ -638,7 +656,7 @@
{ TARGET_NR_msgsnd, "msgsnd" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_msync
-{ TARGET_NR_msync, "msync" , NULL, NULL, NULL },
+{ TARGET_NR_msync, "msync" , "%s(%p,%u,%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_multiplexer
{ TARGET_NR_multiplexer, "multiplexer" , NULL, NULL, NULL },
@@ -1031,7 +1049,8 @@
{ TARGET_NR_perfctr, "perfctr" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_personality
-{ TARGET_NR_personality, "personality" , NULL, NULL, NULL },
+{ TARGET_NR_personality, "personality" , "%s(0x"TARGET_ABI_FMT_lx")", NULL,
+ print_syscall_ret_addr },
#endif
#ifdef TARGET_NR_pipe
{ TARGET_NR_pipe, "pipe" , NULL, NULL, NULL },
@@ -1040,22 +1059,23 @@
{ TARGET_NR_pivot_root, "pivot_root" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_poll
-{ TARGET_NR_poll, "poll" , NULL, NULL, NULL },
+{ TARGET_NR_poll, "poll" , "%s(%p,%u,%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_ppoll
-{ TARGET_NR_ppoll, "ppoll" , NULL, NULL, NULL },
+{ TARGET_NR_ppoll, "ppoll" , "%s(%p,%u,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_prctl
{ TARGET_NR_prctl, "prctl" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_pread64
-{ TARGET_NR_pread64, "pread64" , NULL, NULL, NULL },
+{ TARGET_NR_pread64, "pread64" , NULL, print_pread64, NULL },
#endif
#ifdef TARGET_NR_preadv
{ TARGET_NR_preadv, "preadv" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_prlimit64
-{ TARGET_NR_prlimit64, "prlimit64" , NULL, NULL, NULL },
+{ TARGET_NR_prlimit64, "prlimit64" , NULL, print_prlimit64,
+ print_syscall_ret_prlimit64 },
#endif
#ifdef TARGET_NR_process_vm_readv
{ TARGET_NR_process_vm_readv, "process_vm_readv" , NULL, NULL, NULL },
@@ -1079,7 +1099,7 @@
{ TARGET_NR_putpmsg, "putpmsg" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_pwrite64
-{ TARGET_NR_pwrite64, "pwrite64" , NULL, NULL, NULL },
+{ TARGET_NR_pwrite64, "pwrite64" , NULL, print_pread64, NULL },
#endif
#ifdef TARGET_NR_pwritev
{ TARGET_NR_pwritev, "pwritev" , NULL, NULL, NULL },
@@ -1112,7 +1132,7 @@
{ TARGET_NR_reboot, "reboot" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_recv
-{ TARGET_NR_recv, "recv" , NULL, NULL, NULL },
+{ TARGET_NR_recv, "recv" , "%s(%d,%p,%u,%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_recvfrom
{ TARGET_NR_recvfrom, "recvfrom" , NULL, NULL, NULL },
@@ -1172,7 +1192,7 @@
{ TARGET_NR_rt_sigqueueinfo, "rt_sigqueueinfo" , NULL, print_rt_sigqueueinfo, NULL },
#endif
#ifdef TARGET_NR_rt_sigreturn
-{ TARGET_NR_rt_sigreturn, "rt_sigreturn" , NULL, NULL, NULL },
+{ TARGET_NR_rt_sigreturn, "rt_sigreturn" , "%s(%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_rt_sigsuspend
{ TARGET_NR_rt_sigsuspend, "rt_sigsuspend" , NULL, NULL, NULL },
@@ -1184,16 +1204,19 @@
{ TARGET_NR_rt_tgsigqueueinfo, "rt_tgsigqueueinfo" , NULL, print_rt_tgsigqueueinfo, NULL },
#endif
#ifdef TARGET_NR_sched_getaffinity
-{ TARGET_NR_sched_getaffinity, "sched_getaffinity" , NULL, NULL, NULL },
+{ TARGET_NR_sched_getaffinity, "sched_getaffinity" , "%s(%d,%u,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_sched_get_affinity
{ TARGET_NR_sched_get_affinity, "sched_get_affinity" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_sched_getattr
-{ TARGET_NR_sched_getattr, "sched_getattr" , NULL, NULL, NULL },
+{ TARGET_NR_sched_getattr, "sched_getattr" , "%s(%d,%p,%u,%u)", NULL, NULL },
+#endif
+#ifdef TARGET_NR_sched_setattr
+{ TARGET_NR_sched_setattr, "sched_setattr" , "%s(%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_sched_getparam
-{ TARGET_NR_sched_getparam, "sched_getparam" , NULL, NULL, NULL },
+{ TARGET_NR_sched_getparam, "sched_getparam" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_sched_get_priority_max
{ TARGET_NR_sched_get_priority_max, "sched_get_priority_max" , NULL, NULL, NULL },
@@ -1208,7 +1231,7 @@
{ TARGET_NR_sched_rr_get_interval, "sched_rr_get_interval" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_sched_setaffinity
-{ TARGET_NR_sched_setaffinity, "sched_setaffinity" , NULL, NULL, NULL },
+{ TARGET_NR_sched_setaffinity, "sched_setaffinity" , "%s(%d,%u,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_sched_setatt
{ TARGET_NR_sched_setatt, "sched_setatt" , NULL, NULL, NULL },
@@ -1286,10 +1309,10 @@
{ TARGET_NR_setgid32, "setgid32" , "%s(%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_setgroups
-{ TARGET_NR_setgroups, "setgroups" , NULL, NULL, NULL },
+{ TARGET_NR_setgroups, "setgroups" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_setgroups32
-{ TARGET_NR_setgroups32, "setgroups32" , NULL, NULL, NULL },
+{ TARGET_NR_setgroups32, "setgroups32" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_sethae
{ TARGET_NR_sethae, "sethae" , NULL, NULL, NULL },
@@ -1341,23 +1364,23 @@
{ TARGET_NR_setreuid32, "setreuid32" , "%s(%u,%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_setrlimit
-{ TARGET_NR_setrlimit, "setrlimit" , NULL, NULL, NULL },
+{ TARGET_NR_setrlimit, "setrlimit" , "%s(%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_set_robust_list
-{ TARGET_NR_set_robust_list, "set_robust_list" , NULL, NULL, NULL },
+{ TARGET_NR_set_robust_list, "set_robust_list" , "%s(%p,%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_setsid
{ TARGET_NR_setsid, "setsid" , "%s()", NULL, NULL },
#endif
#ifdef TARGET_NR_setsockopt
-{ TARGET_NR_setsockopt, "setsockopt" , NULL, NULL, NULL },
+{ TARGET_NR_setsockopt, "setsockopt" , "%s(%d,%d,%d,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_set_thread_area
{ TARGET_NR_set_thread_area, "set_thread_area", "%s(0x"TARGET_ABI_FMT_lx")",
NULL, NULL },
#endif
#ifdef TARGET_NR_set_tid_address
-{ TARGET_NR_set_tid_address, "set_tid_address" , NULL, NULL, NULL },
+{ TARGET_NR_set_tid_address, "set_tid_address" , "%s(%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_settimeofday
{ TARGET_NR_settimeofday, "settimeofday" , NULL, print_settimeofday, NULL },
@@ -1375,7 +1398,7 @@
{ TARGET_NR_sgetmask, "sgetmask" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_shmat
-{ TARGET_NR_shmat, "shmat" , NULL, NULL, print_syscall_ret_addr },
+{ TARGET_NR_shmat, "shmat" , NULL, print_shmat, print_syscall_ret_addr },
#endif
#ifdef TARGET_NR_shmctl
{ TARGET_NR_shmctl, "shmctl" , NULL, NULL, NULL },
@@ -1486,7 +1509,7 @@
{ TARGET_NR_sysfs, "sysfs" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_sysinfo
-{ TARGET_NR_sysinfo, "sysinfo" , NULL, NULL, NULL },
+{ TARGET_NR_sysinfo, "sysinfo" , "%s(%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_sys_kexec_load
{ TARGET_NR_sys_kexec_load, "sys_kexec_load" , NULL, NULL, NULL },
@@ -1522,7 +1545,10 @@
{ TARGET_NR_timer_gettime, "timer_gettime" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_timer_settime
-{ TARGET_NR_timer_settime, "timer_settime" , NULL, NULL, NULL },
+{ TARGET_NR_timer_settime, "timer_settime" , "%s(%d,%d,%p,%p)", NULL, NULL },
+#endif
+#ifdef TARGET_NR_timer_settime64
+{ TARGET_NR_timer_settime64, "timer_settime64" , "%s(%d,%d,%p,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_timerfd
{ TARGET_NR_timerfd, "timerfd" , NULL, NULL, NULL },
@@ -1633,7 +1659,7 @@
{ TARGET_NR_vserver, "vserver" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_wait4
-{ TARGET_NR_wait4, "wait4" , NULL, NULL, NULL },
+{ TARGET_NR_wait4, "wait4" , "%s(%d,%p,%d,%p)", NULL, NULL },
#endif
#ifdef TARGET_NR_waitid
{ TARGET_NR_waitid, "waitid" , "%s(%#x,%d,%p,%#x)", NULL, NULL },
@@ -1657,7 +1683,16 @@
{ TARGET_NR_sync_file_range2, "sync_file_range2", NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_pipe2
-{ TARGET_NR_pipe2, "pipe2", NULL, NULL, NULL },
+{ TARGET_NR_pipe2, "pipe2", "%s(%p,%d)", NULL, NULL },
+#endif
+#ifdef TARGET_NR_pidfd_open
+{ TARGET_NR_pidfd_open, "pidfd_open", "%s(%d,%u)", NULL, NULL },
+#endif
+#ifdef TARGET_NR_pidfd_send_signal
+{ TARGET_NR_pidfd_send_signal, "pidfd_send_signal", NULL, print_pidfd_send_signal, NULL },
+#endif
+#ifdef TARGET_NR_pidfd_getfd
+{ TARGET_NR_pidfd_getfd, "pidfd_getfd", "%s(%d,%d,%u)", NULL, NULL },
#endif
#ifdef TARGET_NR_atomic_cmpxchg_32
{ TARGET_NR_atomic_cmpxchg_32, "atomic_cmpxchg_32", NULL, NULL, NULL },
@@ -1671,3 +1706,7 @@
#ifdef TARGET_NR_copy_file_range
{ TARGET_NR_copy_file_range, "copy_file_range", "%s(%d,%p,%d,%p,"TARGET_ABI_FMT_lu",%u)", NULL, NULL },
#endif
+#ifdef TARGET_NR_clock_gettime64
+{ TARGET_NR_clock_gettime64, "clock_gettime64" , NULL, print_clock_gettime64,
+ print_syscall_ret_clock_gettime64 },
+#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 544f5b662f..41659b63f5 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -22,6 +22,9 @@
#include "qemu/path.h"
#include "qemu/memfd.h"
#include "qemu/queue.h"
+#include "qemu/plugin.h"
+#include "tcg/startup.h"
+#include "target_mman.h"
#include <elf.h>
#include <endian.h>
#include <grp.h>
@@ -132,13 +135,14 @@
#include "signal-common.h"
#include "loader.h"
#include "user-mmap.h"
-#include "safe-syscall.h"
+#include "user/safe-syscall.h"
#include "qemu/guest-random.h"
#include "qemu/selfmap.h"
#include "user/syscall-trace.h"
+#include "special-errno.h"
#include "qapi/error.h"
#include "fd-trans.h"
-#include "tcg/tcg.h"
+#include "cpu_loop-common.h"
#ifndef CLONE_IO
#define CLONE_IO 0x80000000 /* Clone io context */
@@ -166,9 +170,13 @@
#define CLONE_IGNORED_FLAGS \
(CLONE_DETACHED | CLONE_IO)
+#ifndef CLONE_PIDFD
+# define CLONE_PIDFD 0x00001000
+#endif
+
/* Flags for fork which we can implement within QEMU itself */
#define CLONE_OPTIONAL_FORK_FLAGS \
- (CLONE_SETTLS | CLONE_PARENT_SETTID | \
+ (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
/* Flags for thread creation which we can implement within QEMU itself */
@@ -197,8 +205,10 @@
//#define DEBUG_ERESTARTSYS
//#include <linux/msdos_fs.h>
-#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
-#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
+#define VFAT_IOCTL_READDIR_BOTH \
+ _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
+#define VFAT_IOCTL_READDIR_SHORT \
+ _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
#undef _syscall0
#undef _syscall1
@@ -269,9 +279,6 @@ static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
#if defined(__NR_futex_time64)
# define __NR_sys_futex_time64 __NR_futex_time64
#endif
-#define __NR_sys_inotify_init __NR_inotify_init
-#define __NR_sys_inotify_add_watch __NR_inotify_add_watch
-#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
#define __NR_sys_statx __NR_statx
#if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
@@ -302,16 +309,16 @@ _syscall0(int, sys_gettid)
#endif
#if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
-_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
+_syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
#endif
#if (defined(TARGET_NR_getdents) && \
!defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
(defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
-_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
+_syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
#endif
#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
-_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
- loff_t *, res, uint, wh);
+_syscall5(int, _llseek, unsigned int, fd, unsigned long, hi, unsigned long, lo,
+ loff_t *, res, unsigned int, wh);
#endif
_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
@@ -320,8 +327,12 @@ _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
#ifdef __NR_exit_group
_syscall1(int,exit_group,int,error_code)
#endif
-#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
-_syscall1(int,set_tid_address,int *,tidptr)
+#if defined(__NR_close_range) && defined(TARGET_NR_close_range)
+#define __NR_sys_close_range __NR_close_range
+_syscall3(int,sys_close_range,int,first,int,last,int,flags)
+#ifndef CLOSE_RANGE_CLOEXEC
+#define CLOSE_RANGE_CLOEXEC (1U << 2)
+#endif
#endif
#if defined(__NR_futex)
_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
@@ -331,12 +342,52 @@ _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
_syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
const struct timespec *,timeout,int *,uaddr2,int,val3)
#endif
+#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
+_syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
+#endif
+#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
+_syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
+ unsigned int, flags);
+#endif
+#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
+_syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
+#endif
#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
+/* sched_attr is not defined in glibc */
+struct sched_attr {
+ uint32_t size;
+ uint32_t sched_policy;
+ uint64_t sched_flags;
+ int32_t sched_nice;
+ uint32_t sched_priority;
+ uint64_t sched_runtime;
+ uint64_t sched_deadline;
+ uint64_t sched_period;
+ uint32_t sched_util_min;
+ uint32_t sched_util_max;
+};
+#define __NR_sys_sched_getattr __NR_sched_getattr
+_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, size, unsigned int, flags);
+#define __NR_sys_sched_setattr __NR_sched_setattr
+_syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, flags);
+#define __NR_sys_sched_getscheduler __NR_sched_getscheduler
+_syscall1(int, sys_sched_getscheduler, pid_t, pid);
+#define __NR_sys_sched_setscheduler __NR_sched_setscheduler
+_syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
+ const struct sched_param *, param);
+#define __NR_sys_sched_getparam __NR_sched_getparam
+_syscall2(int, sys_sched_getparam, pid_t, pid,
+ struct sched_param *, param);
+#define __NR_sys_sched_setparam __NR_sched_setparam
+_syscall2(int, sys_sched_setparam, pid_t, pid,
+ const struct sched_param *, param);
#define __NR_sys_getcpu __NR_getcpu
_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
@@ -404,7 +455,6 @@ static const bitmask_transtbl fcntl_flags_tbl[] = {
#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
{ TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
#endif
- { 0, 0, 0, 0 }
};
_syscall2(int, sys_getcwd1, char *, buf, size_t, size)
@@ -444,33 +494,6 @@ static int sys_renameat2(int oldfd, const char *old,
#ifdef CONFIG_INOTIFY
#include <sys/inotify.h>
-
-#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
-static int sys_inotify_init(void)
-{
- return (inotify_init());
-}
-#endif
-#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
-static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
-{
- return (inotify_add_watch(fd, pathname, mask));
-}
-#endif
-#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
-static int sys_inotify_rm_watch(int fd, int32_t wd)
-{
- return (inotify_rm_watch(fd, wd));
-}
-#endif
-#ifdef CONFIG_INOTIFY1
-#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
-static int sys_inotify_init1(int flags)
-{
- return (inotify_init1(flags));
-}
-#endif
-#endif
#else
/* Userspace can usually survive runtime without inotify */
#undef TARGET_NR_inotify_init
@@ -497,20 +520,25 @@ _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
#if defined(TARGET_NR_timer_create)
/* Maximum of 32 active POSIX timers allowed at any one time. */
-static timer_t g_posix_timers[32] = { 0, } ;
+#define GUEST_TIMER_MAX 32
+static timer_t g_posix_timers[GUEST_TIMER_MAX];
+static int g_posix_timer_allocated[GUEST_TIMER_MAX];
static inline int next_free_host_timer(void)
{
- int k ;
- /* FIXME: Does finding the next free slot require a lock? */
- for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
- if (g_posix_timers[k] == 0) {
- g_posix_timers[k] = (timer_t) 1;
+ int k;
+ for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
+ if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
return k;
}
}
return -1;
}
+
+static inline void free_host_timer_slot(int id)
+{
+ qatomic_store_release(g_posix_timer_allocated + id, 0);
+}
#endif
static inline int host_to_target_errno(int host_errno)
@@ -535,7 +563,7 @@ static inline int target_to_host_errno(int target_errno)
}
}
-static inline abi_long get_errno(abi_long ret)
+abi_long get_errno(abi_long ret)
{
if (ret == -1)
return -host_to_target_errno(errno);
@@ -545,16 +573,34 @@ static inline abi_long get_errno(abi_long ret)
const char *target_strerror(int err)
{
- if (err == TARGET_ERESTARTSYS) {
+ if (err == QEMU_ERESTARTSYS) {
return "To be restarted";
}
- if (err == TARGET_QEMU_ESIGRETURN) {
+ if (err == QEMU_ESIGRETURN) {
return "Successful exit from sigreturn";
}
return strerror(target_to_host_errno(err));
}
+static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
+{
+ int i;
+ uint8_t b;
+ if (usize <= ksize) {
+ return 1;
+ }
+ for (i = ksize; i < usize; i++) {
+ if (get_user_u8(b, addr + i)) {
+ return -TARGET_EFAULT;
+ }
+ if (b != 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
#define safe_syscall0(type, name) \
static type safe_##name(void) \
{ \
@@ -613,6 +659,8 @@ safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
int, options, struct rusage *, rusage)
safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
+safe_syscall5(int, execveat, int, dirfd, const char *, filename,
+ char **, argv, char **, envp, int, flags)
#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
@@ -752,88 +800,53 @@ static inline int host_to_target_sock_type(int host_type)
return target_type;
}
-static abi_ulong target_brk;
-static abi_ulong target_original_brk;
-static abi_ulong brk_page;
+static abi_ulong target_brk, initial_target_brk;
void target_set_brk(abi_ulong new_brk)
{
- target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
- brk_page = HOST_PAGE_ALIGN(target_brk);
+ target_brk = TARGET_PAGE_ALIGN(new_brk);
+ initial_target_brk = target_brk;
}
-//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
-#define DEBUGF_BRK(message, args...)
-
/* do_brk() must return target values and target errnos. */
-abi_long do_brk(abi_ulong new_brk)
+abi_long do_brk(abi_ulong brk_val)
{
abi_long mapped_addr;
- abi_ulong new_alloc_size;
+ abi_ulong new_brk;
+ abi_ulong old_brk;
/* brk pointers are always untagged */
- DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
-
- if (!new_brk) {
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
+ /* do not allow to shrink below initial brk value */
+ if (brk_val < initial_target_brk) {
return target_brk;
}
- if (new_brk < target_original_brk) {
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
- target_brk);
+
+ new_brk = TARGET_PAGE_ALIGN(brk_val);
+ old_brk = TARGET_PAGE_ALIGN(target_brk);
+
+ /* new and old target_brk might be on the same page */
+ if (new_brk == old_brk) {
+ target_brk = brk_val;
return target_brk;
}
- /* If the new brk is less than the highest page reserved to the
- * target heap allocation, set it and we're almost done... */
- if (new_brk <= brk_page) {
- /* Heap contents are initialized to zero, as for anonymous
- * mapped pages. */
- if (new_brk > target_brk) {
- memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
- }
- target_brk = new_brk;
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
- return target_brk;
- }
+ /* Release heap if necessary */
+ if (new_brk < old_brk) {
+ target_munmap(new_brk, old_brk - new_brk);
- /* We need to allocate more memory after the brk... Note that
- * we don't use MAP_FIXED because that will map over the top of
- * any existing mapping (like the one with the host libc or qemu
- * itself); instead we treat "mapped but at wrong address" as
- * a failure and unmap again.
- */
- new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
- mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
- PROT_READ|PROT_WRITE,
- MAP_ANON|MAP_PRIVATE, 0, 0));
-
- if (mapped_addr == brk_page) {
- /* Heap contents are initialized to zero, as for anonymous
- * mapped pages. Technically the new pages are already
- * initialized to zero since they *are* anonymous mapped
- * pages, however we have to take care with the contents that
- * come from the remaining part of the previous page: it may
- * contains garbage data due to a previous heap usage (grown
- * then shrunken). */
- memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
-
- target_brk = new_brk;
- brk_page = HOST_PAGE_ALIGN(target_brk);
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
- target_brk);
+ target_brk = brk_val;
return target_brk;
- } else if (mapped_addr != -1) {
- /* Mapped but at wrong address, meaning there wasn't actually
- * enough space for this brk.
- */
- target_munmap(mapped_addr, new_alloc_size);
- mapped_addr = -1;
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
}
- else {
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
+
+ mapped_addr = target_mmap(old_brk, new_brk - old_brk,
+ PROT_READ | PROT_WRITE,
+ MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
+ -1, 0);
+
+ if (mapped_addr == old_brk) {
+ target_brk = brk_val;
+ return target_brk;
}
#if defined(TARGET_ALPHA)
@@ -1032,6 +1045,10 @@ static inline int target_to_host_resource(int code)
return RLIMIT_RSS;
case TARGET_RLIMIT_RTPRIO:
return RLIMIT_RTPRIO;
+#ifdef RLIMIT_RTTIME
+ case TARGET_RLIMIT_RTTIME:
+ return RLIMIT_RTTIME;
+#endif
case TARGET_RLIMIT_SIGPENDING:
return RLIMIT_SIGPENDING;
case TARGET_RLIMIT_STACK:
@@ -1368,14 +1385,12 @@ static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
* The 6th arg is actually two args smashed together,
* so we cannot use the C library.
*/
- sigset_t set;
struct {
sigset_t *set;
size_t size;
} sig, *sig_ptr;
abi_ulong arg_sigset, arg_sigsize, *arg7;
- target_sigset_t *target_sigset;
n = arg1;
rfd_addr = arg2;
@@ -1416,10 +1431,8 @@ static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
}
/* Extract the two packed args for the sigset */
+ sig_ptr = NULL;
if (arg6) {
- sig_ptr = &sig;
- sig.size = SIGSET_T_SIZE;
-
arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
if (!arg7) {
return -TARGET_EFAULT;
@@ -1429,28 +1442,22 @@ static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
unlock_user(arg7, arg6, 0);
if (arg_sigset) {
- sig.set = &set;
- if (arg_sigsize != sizeof(*target_sigset)) {
- /* Like the kernel, we enforce correct size sigsets */
- return -TARGET_EINVAL;
- }
- target_sigset = lock_user(VERIFY_READ, arg_sigset,
- sizeof(*target_sigset), 1);
- if (!target_sigset) {
- return -TARGET_EFAULT;
+ ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
+ if (ret != 0) {
+ return ret;
}
- target_to_host_sigset(&set, target_sigset);
- unlock_user(target_sigset, arg_sigset, 0);
- } else {
- sig.set = NULL;
+ sig_ptr = &sig;
+ sig.size = SIGSET_T_SIZE;
}
- } else {
- sig_ptr = NULL;
}
ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
ts_ptr, sig_ptr));
+ if (sig_ptr) {
+ finish_sigsuspend_mask(ret);
+ }
+
if (!is_error(ret)) {
if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
return -TARGET_EFAULT;
@@ -1506,8 +1513,7 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
}
if (ppoll) {
struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
- target_sigset_t *target_set;
- sigset_t _set, *set = &_set;
+ sigset_t *set = NULL;
if (arg3) {
if (time64) {
@@ -1526,25 +1532,19 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
}
if (arg4) {
- if (arg5 != sizeof(target_sigset_t)) {
- unlock_user(target_pfd, arg1, 0);
- return -TARGET_EINVAL;
- }
-
- target_set = lock_user(VERIFY_READ, arg4,
- sizeof(target_sigset_t), 1);
- if (!target_set) {
+ ret = process_sigsuspend_mask(&set, arg4, arg5);
+ if (ret != 0) {
unlock_user(target_pfd, arg1, 0);
- return -TARGET_EFAULT;
+ return ret;
}
- target_to_host_sigset(set, target_set);
- } else {
- set = NULL;
}
ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
set, SIGSET_T_SIZE));
+ if (set) {
+ finish_sigsuspend_mask(ret);
+ }
if (!is_error(ret) && arg3) {
if (time64) {
if (host_to_target_timespec64(arg3, timeout_ts)) {
@@ -1556,9 +1556,6 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
}
}
}
- if (arg4) {
- unlock_user(target_set, arg4, 0);
- }
} else {
struct timespec ts, *pts;
@@ -1584,21 +1581,12 @@ static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
}
#endif
-static abi_long do_pipe2(int host_pipe[], int flags)
-{
-#ifdef CONFIG_PIPE2
- return pipe2(host_pipe, flags);
-#else
- return -ENOSYS;
-#endif
-}
-
-static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
+static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
int flags, int is_pipe2)
{
int host_pipe[2];
abi_long ret;
- ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
+ ret = pipe2(host_pipe, flags);
if (is_error(ret))
return get_errno(ret);
@@ -1607,44 +1595,26 @@ static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
pipe syscall, but didn't replicate this into the pipe2 syscall. */
if (!is_pipe2) {
#if defined(TARGET_ALPHA)
- ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
+ cpu_env->ir[IR_A4] = host_pipe[1];
return host_pipe[0];
#elif defined(TARGET_MIPS)
- ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
+ cpu_env->active_tc.gpr[3] = host_pipe[1];
return host_pipe[0];
#elif defined(TARGET_SH4)
- ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
+ cpu_env->gregs[1] = host_pipe[1];
return host_pipe[0];
#elif defined(TARGET_SPARC)
- ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
+ cpu_env->regwptr[1] = host_pipe[1];
return host_pipe[0];
#endif
}
if (put_user_s32(host_pipe[0], pipedes)
- || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
+ || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
return -TARGET_EFAULT;
return get_errno(ret);
}
-static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
- abi_ulong target_addr,
- socklen_t len)
-{
- struct target_ip_mreqn *target_smreqn;
-
- target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
- if (!target_smreqn)
- return -TARGET_EFAULT;
- mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
- mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
- if (len == sizeof(struct target_ip_mreqn))
- mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
- unlock_user(target_smreqn, target_addr, 0);
-
- return 0;
-}
-
static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
abi_ulong target_addr,
socklen_t len)
@@ -1696,6 +1666,11 @@ static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
lladdr = (struct target_sockaddr_ll *)addr;
lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
+ } else if (sa_family == AF_INET6) {
+ struct sockaddr_in6 *in6addr;
+
+ in6addr = (struct sockaddr_in6 *)addr;
+ in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
}
unlock_user(target_saddr, target_addr, 0);
@@ -1812,6 +1787,14 @@ static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
__get_user(cred->pid, &target_cred->pid);
__get_user(cred->uid, &target_cred->uid);
__get_user(cred->gid, &target_cred->gid);
+ } else if (cmsg->cmsg_level == SOL_ALG) {
+ uint32_t *dst = (uint32_t *)data;
+
+ memcpy(dst, target_data, len);
+ /* fix endianness of first 32-bit word */
+ if (len >= sizeof(uint32_t)) {
+ *dst = tswap32(*dst);
+ }
} else {
qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
cmsg->cmsg_level, cmsg->cmsg_type);
@@ -2066,8 +2049,6 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
{
abi_long ret;
int val;
- struct ip_mreqn *ip_mreq;
- struct ip_mreq_source *ip_mreq_source;
switch(level) {
case SOL_TCP:
@@ -2110,19 +2091,40 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
break;
case IP_ADD_MEMBERSHIP:
case IP_DROP_MEMBERSHIP:
+ {
+ struct ip_mreqn ip_mreq;
+ struct target_ip_mreqn *target_smreqn;
+
+ QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
+ sizeof(struct target_ip_mreq));
+
if (optlen < sizeof (struct target_ip_mreq) ||
- optlen > sizeof (struct target_ip_mreqn))
+ optlen > sizeof (struct target_ip_mreqn)) {
return -TARGET_EINVAL;
+ }
- ip_mreq = (struct ip_mreqn *) alloca(optlen);
- target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
- ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
- break;
+ target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
+ if (!target_smreqn) {
+ return -TARGET_EFAULT;
+ }
+ ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
+ ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
+ if (optlen == sizeof(struct target_ip_mreqn)) {
+ ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
+ optlen = sizeof(struct ip_mreqn);
+ }
+ unlock_user(target_smreqn, optval_addr, 0);
+ ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
+ break;
+ }
case IP_BLOCK_SOURCE:
case IP_UNBLOCK_SOURCE:
case IP_ADD_SOURCE_MEMBERSHIP:
case IP_DROP_SOURCE_MEMBERSHIP:
+ {
+ struct ip_mreq_source *ip_mreq_source;
+
if (optlen != sizeof (struct target_ip_mreq_source))
return -TARGET_EINVAL;
@@ -2133,7 +2135,7 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
unlock_user (ip_mreq_source, optval_addr, 0);
break;
-
+ }
default:
goto unimplemented;
}
@@ -2276,18 +2278,13 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
switch (optname) {
case ALG_SET_KEY:
{
- char *alg_key = g_malloc(optlen);
-
+ char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
if (!alg_key) {
- return -TARGET_ENOMEM;
- }
- if (copy_from_user(alg_key, optval_addr, optlen)) {
- g_free(alg_key);
return -TARGET_EFAULT;
}
ret = get_errno(setsockopt(sockfd, level, optname,
alg_key, optlen));
- g_free(alg_key);
+ unlock_user(alg_key, optval_addr, optlen);
break;
}
case ALG_SET_AEAD_AUTHSIZE:
@@ -2304,12 +2301,10 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
case TARGET_SOL_SOCKET:
switch (optname) {
case TARGET_SO_RCVTIMEO:
+ case TARGET_SO_SNDTIMEO:
{
struct timeval tv;
- optname = SO_RCVTIMEO;
-
-set_timeout:
if (optlen != sizeof(struct target_timeval)) {
return -TARGET_EINVAL;
}
@@ -2318,13 +2313,12 @@ set_timeout:
return -TARGET_EFAULT;
}
- ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
+ ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
+ optname == TARGET_SO_RCVTIMEO ?
+ SO_RCVTIMEO : SO_SNDTIMEO,
&tv, sizeof(tv)));
return ret;
}
- case TARGET_SO_SNDTIMEO:
- optname = SO_SNDTIMEO;
- goto set_timeout;
case TARGET_SO_ATTACH_FILTER:
{
struct target_sock_fprog *tfprog;
@@ -2741,8 +2735,13 @@ get_timeout:
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
if (ret < 0)
return ret;
- if (optname == SO_TYPE) {
+ switch (optname) {
+ case SO_TYPE:
val = host_to_target_sock_type(val);
+ break;
+ case SO_ERROR:
+ val = host_to_target_errno(val);
+ break;
}
if (len > lv)
len = lv;
@@ -2914,7 +2913,7 @@ get_timeout:
unlock_user(results, optval_addr, 0);
return ret;
}
- /* swap host endianess to target endianess. */
+ /* swap host endianness to target endianness. */
for (i = 0; i < (len / sizeof(uint32_t)); i++) {
results[i] = tswap32(results[i]);
}
@@ -3263,7 +3262,10 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
target_vec, count, send);
if (vec == NULL) {
ret = -host_to_target_errno(errno);
- goto out2;
+ /* allow sending packet without any iov, e.g. with MSG_MORE flag */
+ if (!send || ret) {
+ goto out2;
+ }
}
msg.msg_iovlen = count;
msg.msg_iov = vec;
@@ -3294,7 +3296,8 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
if (fd_trans_host_to_target_data(fd)) {
ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
MIN(msg.msg_iov->iov_len, len));
- } else {
+ }
+ if (!is_error(ret)) {
ret = host_to_target_cmsg(msgp, &msg);
}
if (!is_error(ret)) {
@@ -3314,7 +3317,9 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
}
out:
- unlock_iovec(vec, target_vec, count, !send);
+ if (vec) {
+ unlock_iovec(vec, target_vec, count, !send);
+ }
out2:
return ret;
}
@@ -3392,7 +3397,17 @@ static abi_long do_accept4(int fd, abi_ulong target_addr,
abi_long ret;
int host_flags;
- host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
+ if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
+ return -TARGET_EINVAL;
+ }
+
+ host_flags = 0;
+ if (flags & TARGET_SOCK_NONBLOCK) {
+ host_flags |= SOCK_NONBLOCK;
+ }
+ if (flags & TARGET_SOCK_CLOEXEC) {
+ host_flags |= SOCK_CLOEXEC;
+ }
if (target_addr == 0) {
return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
@@ -3703,14 +3718,6 @@ static abi_long do_socketcall(int num, abi_ulong vptr)
}
#endif
-#define N_SHM_REGIONS 32
-
-static struct shm_region {
- abi_ulong start;
- abi_ulong size;
- bool in_use;
-} shm_regions[N_SHM_REGIONS];
-
#ifndef TARGET_SEMID64_DS
/* asm-generic version of this struct */
struct target_semid64_ds
@@ -4460,134 +4467,6 @@ static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
return ret;
}
-#ifndef TARGET_FORCE_SHMLBA
-/* For most architectures, SHMLBA is the same as the page size;
- * some architectures have larger values, in which case they should
- * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
- * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
- * and defining its own value for SHMLBA.
- *
- * The kernel also permits SHMLBA to be set by the architecture to a
- * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
- * this means that addresses are rounded to the large size if
- * SHM_RND is set but addresses not aligned to that size are not rejected
- * as long as they are at least page-aligned. Since the only architecture
- * which uses this is ia64 this code doesn't provide for that oddity.
- */
-static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
-{
- return TARGET_PAGE_SIZE;
-}
-#endif
-
-static inline abi_ulong do_shmat(CPUArchState *cpu_env,
- int shmid, abi_ulong shmaddr, int shmflg)
-{
- CPUState *cpu = env_cpu(cpu_env);
- abi_long raddr;
- void *host_raddr;
- struct shmid_ds shm_info;
- int i,ret;
- abi_ulong shmlba;
-
- /* shmat pointers are always untagged */
-
- /* find out the length of the shared memory segment */
- ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
- if (is_error(ret)) {
- /* can't get length, bail out */
- return ret;
- }
-
- shmlba = target_shmlba(cpu_env);
-
- if (shmaddr & (shmlba - 1)) {
- if (shmflg & SHM_RND) {
- shmaddr &= ~(shmlba - 1);
- } else {
- return -TARGET_EINVAL;
- }
- }
- if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
- return -TARGET_EINVAL;
- }
-
- mmap_lock();
-
- /*
- * We're mapping shared memory, so ensure we generate code for parallel
- * execution and flush old translations. This will work up to the level
- * supported by the host -- anything that requires EXCP_ATOMIC will not
- * be atomic with respect to an external process.
- */
- if (!(cpu->tcg_cflags & CF_PARALLEL)) {
- cpu->tcg_cflags |= CF_PARALLEL;
- tb_flush(cpu);
- }
-
- if (shmaddr)
- host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
- else {
- abi_ulong mmap_start;
-
- /* In order to use the host shmat, we need to honor host SHMLBA. */
- mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
-
- if (mmap_start == -1) {
- errno = ENOMEM;
- host_raddr = (void *)-1;
- } else
- host_raddr = shmat(shmid, g2h_untagged(mmap_start),
- shmflg | SHM_REMAP);
- }
-
- if (host_raddr == (void *)-1) {
- mmap_unlock();
- return get_errno((long)host_raddr);
- }
- raddr=h2g((unsigned long)host_raddr);
-
- page_set_flags(raddr, raddr + shm_info.shm_segsz,
- PAGE_VALID | PAGE_RESET | PAGE_READ |
- (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
-
- for (i = 0; i < N_SHM_REGIONS; i++) {
- if (!shm_regions[i].in_use) {
- shm_regions[i].in_use = true;
- shm_regions[i].start = raddr;
- shm_regions[i].size = shm_info.shm_segsz;
- break;
- }
- }
-
- mmap_unlock();
- return raddr;
-
-}
-
-static inline abi_long do_shmdt(abi_ulong shmaddr)
-{
- int i;
- abi_long rv;
-
- /* shmdt pointers are always untagged */
-
- mmap_lock();
-
- for (i = 0; i < N_SHM_REGIONS; ++i) {
- if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
- shm_regions[i].in_use = false;
- page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
- break;
- }
- }
- rv = get_errno(shmdt(g2h_untagged(shmaddr)));
-
- mmap_unlock();
-
- return rv;
-}
-
#ifdef TARGET_NR_ipc
/* ??? This only works with linear mappings. */
/* do_ipc() must return target values and target errnos. */
@@ -4674,7 +4553,7 @@ static abi_long do_ipc(CPUArchState *cpu_env,
default:
{
abi_ulong raddr;
- raddr = do_shmat(cpu_env, first, ptr, second);
+ raddr = target_shmat(cpu_env, first, ptr, second);
if (is_error(raddr))
return get_errno(raddr);
if (put_user_ual(raddr, third))
@@ -4687,7 +4566,7 @@ static abi_long do_ipc(CPUArchState *cpu_env,
}
break;
case IPCOP_shmdt:
- ret = do_shmdt(ptr);
+ ret = target_shmdt(ptr);
break;
case IPCOP_shmget:
@@ -4864,7 +4743,7 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
* We can't fit all the extents into the fixed size buffer.
* Allocate one that is large enough and use it instead.
*/
- host_ifconf = malloc(outbufsz);
+ host_ifconf = g_try_malloc(outbufsz);
if (!host_ifconf) {
return -TARGET_ENOMEM;
}
@@ -4912,7 +4791,7 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
}
if (free_buf) {
- free(host_ifconf);
+ g_free(host_ifconf);
}
return ret;
@@ -5053,7 +4932,7 @@ do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
target_size = thunk_type_size(arg_type, THUNK_TARGET);
/* construct host copy of urb and metadata */
- lurb = g_try_malloc0(sizeof(struct live_urb));
+ lurb = g_try_new0(struct live_urb, 1);
if (!lurb) {
return -TARGET_ENOMEM;
}
@@ -5164,8 +5043,8 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
{
void *gspec = argptr;
void *cur_data = host_data;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
- int spec_size = thunk_type_size(arg_type, 0);
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
+ int spec_size = thunk_type_size(dm_arg_type, 0);
int i;
for (i = 0; i < host_dm->target_count; i++) {
@@ -5173,7 +5052,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
uint32_t next;
int slen;
- thunk_convert(spec, gspec, arg_type, THUNK_HOST);
+ thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
slen = strlen((char*)gspec + spec_size) + 1;
next = spec->next;
spec->next = sizeof(*spec) + slen;
@@ -5213,7 +5092,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
uint32_t remaining_data = guest_data_size;
void *cur_data = argptr;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
int nl_size = 12; /* can't use thunk_size due to alignment */
while (1) {
@@ -5225,7 +5104,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
host_dm->flags |= DM_BUFFER_FULL_FLAG;
break;
}
- thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
+ thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
strcpy(cur_data + nl_size, nl->name);
cur_data += nl->next;
remaining_data -= nl->next;
@@ -5241,8 +5120,8 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
{
struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
void *cur_data = argptr;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
- int spec_size = thunk_type_size(arg_type, 0);
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
+ int spec_size = thunk_type_size(dm_arg_type, 0);
int i;
for (i = 0; i < host_dm->target_count; i++) {
@@ -5253,7 +5132,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
host_dm->flags |= DM_BUFFER_FULL_FLAG;
break;
}
- thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
+ thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
strcpy(cur_data + spec_size, (char*)&spec[1]);
cur_data = argptr + spec->next;
spec = (void*)host_dm + host_dm->data_start + next;
@@ -5281,8 +5160,8 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
uint32_t remaining_data = guest_data_size;
void *cur_data = argptr;
- const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
- int vers_size = thunk_type_size(arg_type, 0);
+ const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
+ int vers_size = thunk_type_size(dm_arg_type, 0);
while (1) {
uint32_t next = vers->next;
@@ -5293,7 +5172,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
host_dm->flags |= DM_BUFFER_FULL_FLAG;
break;
}
- thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
+ thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
strcpy(cur_data + vers_size, vers->name);
cur_data += vers->next;
remaining_data -= vers->next;
@@ -5411,7 +5290,7 @@ static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
for (i = 0; i < se->nb_fields; i++) {
if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
assert(*field_types == TYPE_PTRVOID);
- target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
+ target_rt_dev_ptr = argptr + src_offsets[i];
host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
if (*target_rt_dev_ptr != 0) {
*host_rt_dev_ptr = (unsigned long)lock_user_string(
@@ -5699,7 +5578,7 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg)
if (ie->target_cmd == 0) {
qemu_log_mask(
LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
- return -TARGET_ENOSYS;
+ return -TARGET_ENOTTY;
}
if (ie->target_cmd == cmd)
break;
@@ -5711,7 +5590,7 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg)
} else if (!ie->host_cmd) {
/* Some architectures define BSD ioctls in their headers
that are not implemented in Linux. */
- return -TARGET_ENOSYS;
+ return -TARGET_ENOTTY;
}
switch(arg_type[0]) {
@@ -5769,7 +5648,7 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg)
qemu_log_mask(LOG_UNIMP,
"Unsupported ioctl type: cmd=0x%04lx type=%d\n",
(long)cmd, arg_type[0]);
- ret = -TARGET_ENOSYS;
+ ret = -TARGET_ENOTTY;
break;
}
return ret;
@@ -5791,7 +5670,6 @@ static const bitmask_transtbl iflag_tbl[] = {
{ TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
{ TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
{ TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
- { 0, 0, 0, 0 }
};
static const bitmask_transtbl oflag_tbl[] = {
@@ -5819,7 +5697,6 @@ static const bitmask_transtbl oflag_tbl[] = {
{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
- { 0, 0, 0, 0 }
};
static const bitmask_transtbl cflag_tbl[] = {
@@ -5854,7 +5731,6 @@ static const bitmask_transtbl cflag_tbl[] = {
{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
- { 0, 0, 0, 0 }
};
static const bitmask_transtbl lflag_tbl[] = {
@@ -5874,7 +5750,6 @@ static const bitmask_transtbl lflag_tbl[] = {
{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
{ TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
- { 0, 0, 0, 0 }
};
static void target_to_host_termios (void *dst, const void *src)
@@ -5954,9 +5829,15 @@ static const StructEntry struct_termios_def = {
.print = print_termios,
};
+/* If the host does not provide these bits, they may be safely discarded. */
+#ifndef MAP_SYNC
+#define MAP_SYNC 0
+#endif
+#ifndef MAP_UNINITIALIZED
+#define MAP_UNINITIALIZED 0
+#endif
+
static const bitmask_transtbl mmap_flags_tbl[] = {
- { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
- { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
{ TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
{ TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
MAP_ANONYMOUS, MAP_ANONYMOUS },
@@ -5974,10 +5855,84 @@ static const bitmask_transtbl mmap_flags_tbl[] = {
Recognize it for the target insofar as we do not want to pass
it through to the host. */
{ TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
- { 0, 0, 0, 0 }
+ { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
+ { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
+ { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
+ MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
+ { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
+ MAP_UNINITIALIZED, MAP_UNINITIALIZED },
};
/*
+ * Arrange for legacy / undefined architecture specific flags to be
+ * ignored by mmap handling code.
+ */
+#ifndef TARGET_MAP_32BIT
+#define TARGET_MAP_32BIT 0
+#endif
+#ifndef TARGET_MAP_HUGE_2MB
+#define TARGET_MAP_HUGE_2MB 0
+#endif
+#ifndef TARGET_MAP_HUGE_1GB
+#define TARGET_MAP_HUGE_1GB 0
+#endif
+
+static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
+ int target_flags, int fd, off_t offset)
+{
+ /*
+ * The historical set of flags that all mmap types implicitly support.
+ */
+ enum {
+ TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
+ | TARGET_MAP_PRIVATE
+ | TARGET_MAP_FIXED
+ | TARGET_MAP_ANONYMOUS
+ | TARGET_MAP_DENYWRITE
+ | TARGET_MAP_EXECUTABLE
+ | TARGET_MAP_UNINITIALIZED
+ | TARGET_MAP_GROWSDOWN
+ | TARGET_MAP_LOCKED
+ | TARGET_MAP_NORESERVE
+ | TARGET_MAP_POPULATE
+ | TARGET_MAP_NONBLOCK
+ | TARGET_MAP_STACK
+ | TARGET_MAP_HUGETLB
+ | TARGET_MAP_32BIT
+ | TARGET_MAP_HUGE_2MB
+ | TARGET_MAP_HUGE_1GB
+ };
+ int host_flags;
+
+ switch (target_flags & TARGET_MAP_TYPE) {
+ case TARGET_MAP_PRIVATE:
+ host_flags = MAP_PRIVATE;
+ break;
+ case TARGET_MAP_SHARED:
+ host_flags = MAP_SHARED;
+ break;
+ case TARGET_MAP_SHARED_VALIDATE:
+ /*
+ * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
+ * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
+ */
+ if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ host_flags = MAP_SHARED_VALIDATE;
+ if (target_flags & TARGET_MAP_SYNC) {
+ host_flags |= MAP_SYNC;
+ }
+ break;
+ default:
+ return -TARGET_EINVAL;
+ }
+ host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
+
+ return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
+}
+
+/*
* NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
* TARGET_I386 is defined if TARGET_X86_64 is defined
*/
@@ -6291,9 +6246,253 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
return ret;
}
#endif /* defined(TARGET_ABI32 */
-
#endif /* defined(TARGET_I386) */
+/*
+ * These constants are generic. Supply any that are missing from the host.
+ */
+#ifndef PR_SET_NAME
+# define PR_SET_NAME 15
+# define PR_GET_NAME 16
+#endif
+#ifndef PR_SET_FP_MODE
+# define PR_SET_FP_MODE 45
+# define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0)
+# define PR_FP_MODE_FRE (1 << 1)
+#endif
+#ifndef PR_SVE_SET_VL
+# define PR_SVE_SET_VL 50
+# define PR_SVE_GET_VL 51
+# define PR_SVE_VL_LEN_MASK 0xffff
+# define PR_SVE_VL_INHERIT (1 << 17)
+#endif
+#ifndef PR_PAC_RESET_KEYS
+# define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1 << 0)
+# define PR_PAC_APIBKEY (1 << 1)
+# define PR_PAC_APDAKEY (1 << 2)
+# define PR_PAC_APDBKEY (1 << 3)
+# define PR_PAC_APGAKEY (1 << 4)
+#endif
+#ifndef PR_SET_TAGGED_ADDR_CTRL
+# define PR_SET_TAGGED_ADDR_CTRL 55
+# define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+#endif
+#ifndef PR_MTE_TCF_SHIFT
+# define PR_MTE_TCF_SHIFT 1
+# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+#endif
+#ifndef PR_SET_IO_FLUSHER
+# define PR_SET_IO_FLUSHER 57
+# define PR_GET_IO_FLUSHER 58
+#endif
+#ifndef PR_SET_SYSCALL_USER_DISPATCH
+# define PR_SET_SYSCALL_USER_DISPATCH 59
+#endif
+#ifndef PR_SME_SET_VL
+# define PR_SME_SET_VL 63
+# define PR_SME_GET_VL 64
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17)
+#endif
+
+#include "target_prctl.h"
+
+static abi_long do_prctl_inval0(CPUArchState *env)
+{
+ return -TARGET_EINVAL;
+}
+
+static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
+{
+ return -TARGET_EINVAL;
+}
+
+#ifndef do_prctl_get_fp_mode
+#define do_prctl_get_fp_mode do_prctl_inval0
+#endif
+#ifndef do_prctl_set_fp_mode
+#define do_prctl_set_fp_mode do_prctl_inval1
+#endif
+#ifndef do_prctl_sve_get_vl
+#define do_prctl_sve_get_vl do_prctl_inval0
+#endif
+#ifndef do_prctl_sve_set_vl
+#define do_prctl_sve_set_vl do_prctl_inval1
+#endif
+#ifndef do_prctl_reset_keys
+#define do_prctl_reset_keys do_prctl_inval1
+#endif
+#ifndef do_prctl_set_tagged_addr_ctrl
+#define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
+#endif
+#ifndef do_prctl_get_tagged_addr_ctrl
+#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
+#endif
+#ifndef do_prctl_get_unalign
+#define do_prctl_get_unalign do_prctl_inval1
+#endif
+#ifndef do_prctl_set_unalign
+#define do_prctl_set_unalign do_prctl_inval1
+#endif
+#ifndef do_prctl_sme_get_vl
+#define do_prctl_sme_get_vl do_prctl_inval0
+#endif
+#ifndef do_prctl_sme_set_vl
+#define do_prctl_sme_set_vl do_prctl_inval1
+#endif
+
+static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ abi_long ret;
+
+ switch (option) {
+ case PR_GET_PDEATHSIG:
+ {
+ int deathsig;
+ ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
+ arg3, arg4, arg5));
+ if (!is_error(ret) &&
+ put_user_s32(host_to_target_signal(deathsig), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return ret;
+ }
+ case PR_SET_PDEATHSIG:
+ return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
+ arg3, arg4, arg5));
+ case PR_GET_NAME:
+ {
+ void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
+ if (!name) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
+ arg3, arg4, arg5));
+ unlock_user(name, arg2, 16);
+ return ret;
+ }
+ case PR_SET_NAME:
+ {
+ void *name = lock_user(VERIFY_READ, arg2, 16, 1);
+ if (!name) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
+ arg3, arg4, arg5));
+ unlock_user(name, arg2, 0);
+ return ret;
+ }
+ case PR_GET_FP_MODE:
+ return do_prctl_get_fp_mode(env);
+ case PR_SET_FP_MODE:
+ return do_prctl_set_fp_mode(env, arg2);
+ case PR_SVE_GET_VL:
+ return do_prctl_sve_get_vl(env);
+ case PR_SVE_SET_VL:
+ return do_prctl_sve_set_vl(env, arg2);
+ case PR_SME_GET_VL:
+ return do_prctl_sme_get_vl(env);
+ case PR_SME_SET_VL:
+ return do_prctl_sme_set_vl(env, arg2);
+ case PR_PAC_RESET_KEYS:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_reset_keys(env, arg2);
+ case PR_SET_TAGGED_ADDR_CTRL:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_set_tagged_addr_ctrl(env, arg2);
+ case PR_GET_TAGGED_ADDR_CTRL:
+ if (arg2 || arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_get_tagged_addr_ctrl(env);
+
+ case PR_GET_UNALIGN:
+ return do_prctl_get_unalign(env, arg2);
+ case PR_SET_UNALIGN:
+ return do_prctl_set_unalign(env, arg2);
+
+ case PR_CAP_AMBIENT:
+ case PR_CAPBSET_READ:
+ case PR_CAPBSET_DROP:
+ case PR_GET_DUMPABLE:
+ case PR_SET_DUMPABLE:
+ case PR_GET_KEEPCAPS:
+ case PR_SET_KEEPCAPS:
+ case PR_GET_SECUREBITS:
+ case PR_SET_SECUREBITS:
+ case PR_GET_TIMING:
+ case PR_SET_TIMING:
+ case PR_GET_TIMERSLACK:
+ case PR_SET_TIMERSLACK:
+ case PR_MCE_KILL:
+ case PR_MCE_KILL_GET:
+ case PR_GET_NO_NEW_PRIVS:
+ case PR_SET_NO_NEW_PRIVS:
+ case PR_GET_IO_FLUSHER:
+ case PR_SET_IO_FLUSHER:
+ case PR_SET_CHILD_SUBREAPER:
+ case PR_GET_SPECULATION_CTRL:
+ case PR_SET_SPECULATION_CTRL:
+ /* Some prctl options have no pointer arguments and we can pass on. */
+ return get_errno(prctl(option, arg2, arg3, arg4, arg5));
+
+ case PR_GET_CHILD_SUBREAPER:
+ {
+ int val;
+ ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
+ arg3, arg4, arg5));
+ if (!is_error(ret) && put_user_s32(val, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return ret;
+ }
+
+ case PR_GET_TID_ADDRESS:
+ {
+ TaskState *ts = env_cpu(env)->opaque;
+ return put_user_ual(ts->child_tidptr, arg2);
+ }
+
+ case PR_GET_FPEXC:
+ case PR_SET_FPEXC:
+ /* Was used for SPE on PowerPC. */
+ return -TARGET_EINVAL;
+
+ case PR_GET_ENDIAN:
+ case PR_SET_ENDIAN:
+ case PR_GET_FPEMU:
+ case PR_SET_FPEMU:
+ case PR_SET_MM:
+ case PR_GET_SECCOMP:
+ case PR_SET_SECCOMP:
+ case PR_SET_SYSCALL_USER_DISPATCH:
+ case PR_GET_THP_DISABLE:
+ case PR_SET_THP_DISABLE:
+ case PR_GET_TSC:
+ case PR_SET_TSC:
+ /* Disable to prevent the target disabling stuff we need. */
+ return -TARGET_EINVAL;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
+ option);
+ return -TARGET_EINVAL;
+ }
+}
+
#define NEW_STACK_SIZE 0x40000
@@ -6321,7 +6520,7 @@ static void *clone_func(void *arg)
env = info->env;
cpu = env_cpu(env);
thread_cpu = cpu;
- ts = (TaskState *)cpu->opaque;
+ ts = get_task_state(cpu);
info->tid = sys_gettid();
task_settid(ts);
if (info->child_tidptr)
@@ -6363,7 +6562,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
flags &= ~(CLONE_VFORK | CLONE_VM);
if (flags & CLONE_VM) {
- TaskState *parent_ts = (TaskState *)cpu->opaque;
+ TaskState *parent_ts = get_task_state(cpu);
new_thread_info info;
pthread_attr_t attr;
@@ -6455,8 +6654,19 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
return -TARGET_EINVAL;
}
+#if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
+ if (flags & CLONE_PIDFD) {
+ return -TARGET_EINVAL;
+ }
+#endif
+
+ /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
+ if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
+ return -TARGET_EINVAL;
+ }
+
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
fork_start();
@@ -6464,7 +6674,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
if (ret == 0) {
/* Child Process. */
cpu_clone_regs_child(env, newsp, flags);
- fork_end(1);
+ fork_end(ret);
/* There is a race condition here. The parent process could
theoretically read the TID in the child process before the child
tid is set. This would require using either ptrace
@@ -6475,15 +6685,30 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
put_user_u32(sys_gettid(), child_tidptr);
if (flags & CLONE_PARENT_SETTID)
put_user_u32(sys_gettid(), parent_tidptr);
- ts = (TaskState *)cpu->opaque;
+ ts = get_task_state(cpu);
if (flags & CLONE_SETTLS)
cpu_set_tls (env, newtls);
if (flags & CLONE_CHILD_CLEARTID)
ts->child_tidptr = child_tidptr;
} else {
cpu_clone_regs_parent(env, flags);
- fork_end(0);
+ if (flags & CLONE_PIDFD) {
+ int pid_fd = 0;
+#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
+ int pid_child = ret;
+ pid_fd = pidfd_open(pid_child, 0);
+ if (pid_fd >= 0) {
+ fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
+ | FD_CLOEXEC);
+ } else {
+ pid_fd = 0;
+ }
+#endif
+ put_user_u32(pid_fd, parent_tidptr);
+ }
+ fork_end(ret);
}
+ g_assert(!cpu_in_exclusive_context(cpu));
}
return ret;
}
@@ -6669,6 +6894,14 @@ typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
+struct target_oabi_flock64 {
+ abi_short l_type;
+ abi_short l_whence;
+ abi_llong l_start;
+ abi_llong l_len;
+ abi_int l_pid;
+} QEMU_PACKED;
+
static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
abi_ulong target_flock_addr)
{
@@ -6818,6 +7051,10 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
ret = get_errno(safe_fcntl(fd, host_cmd, arg));
if (ret >= 0) {
ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
+ /* tell 32-bit guests it uses largefile on 64-bit hosts: */
+ if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
+ ret |= TARGET_O_LARGEFILE;
+ }
}
break;
@@ -7023,7 +7260,7 @@ void syscall_init(void)
}
#ifdef TARGET_NR_truncate64
-static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
+static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
abi_long arg2,
abi_long arg3,
abi_long arg4)
@@ -7037,7 +7274,7 @@ static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
#endif
#ifdef TARGET_NR_ftruncate64
-static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
+static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
abi_long arg2,
abi_long arg3,
abi_long arg4)
@@ -7329,15 +7566,23 @@ static inline int target_to_host_mlockall_arg(int arg)
}
#endif
+static inline int target_to_host_msync_arg(abi_long arg)
+{
+ return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
+ ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
+ ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
+ (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
+}
+
#if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
defined(TARGET_NR_newfstatat))
-static inline abi_long host_to_target_stat64(void *cpu_env,
+static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
abi_ulong target_addr,
struct stat *host_st)
{
#if defined(TARGET_ARM) && defined(TARGET_ABI32)
- if (((CPUARMState *)cpu_env)->eabi) {
+ if (cpu_env->eabi) {
struct target_eabi_stat64 *target_st;
if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
@@ -7502,15 +7747,16 @@ static int do_safe_futex(int *uaddr, int op, int val,
futexes locally would make futexes shared between multiple processes
tricky. However they're probably useless because guest atomic
operations won't work either. */
-#if defined(TARGET_NR_futex)
-static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
- target_ulong timeout, target_ulong uaddr2, int val3)
+#if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
+static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
+ int op, int val, target_ulong timeout,
+ target_ulong uaddr2, int val3)
{
- struct timespec ts, *pts;
+ struct timespec ts, *pts = NULL;
+ void *haddr2 = NULL;
int base_op;
- /* ??? We assume FUTEX_* constants are the same on both host
- and target. */
+ /* We assume FUTEX_* constants are the same on both host and target. */
#ifdef FUTEX_CMD_MASK
base_op = op & FUTEX_CMD_MASK;
#else
@@ -7519,85 +7765,53 @@ static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
switch (base_op) {
case FUTEX_WAIT:
case FUTEX_WAIT_BITSET:
- if (timeout) {
- pts = &ts;
- target_to_host_timespec(pts, timeout);
- } else {
- pts = NULL;
- }
- return do_safe_futex(g2h(cpu, uaddr),
- op, tswap32(val), pts, NULL, val3);
+ val = tswap32(val);
+ break;
+ case FUTEX_WAIT_REQUEUE_PI:
+ val = tswap32(val);
+ haddr2 = g2h(cpu, uaddr2);
+ break;
+ case FUTEX_LOCK_PI:
+ case FUTEX_LOCK_PI2:
+ break;
case FUTEX_WAKE:
- return do_safe_futex(g2h(cpu, uaddr),
- op, val, NULL, NULL, 0);
+ case FUTEX_WAKE_BITSET:
+ case FUTEX_TRYLOCK_PI:
+ case FUTEX_UNLOCK_PI:
+ timeout = 0;
+ break;
case FUTEX_FD:
- return do_safe_futex(g2h(cpu, uaddr),
- op, val, NULL, NULL, 0);
- case FUTEX_REQUEUE:
+ val = target_to_host_signal(val);
+ timeout = 0;
+ break;
case FUTEX_CMP_REQUEUE:
+ case FUTEX_CMP_REQUEUE_PI:
+ val3 = tswap32(val3);
+ /* fall through */
+ case FUTEX_REQUEUE:
case FUTEX_WAKE_OP:
- /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
- TIMEOUT parameter is interpreted as a uint32_t by the kernel.
- But the prototype takes a `struct timespec *'; insert casts
- to satisfy the compiler. We do not need to tswap TIMEOUT
- since it's not compared to guest memory. */
- pts = (struct timespec *)(uintptr_t) timeout;
- return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
- (base_op == FUTEX_CMP_REQUEUE
- ? tswap32(val3) : val3));
+ /*
+ * For these, the 4th argument is not TIMEOUT, but VAL2.
+ * But the prototype of do_safe_futex takes a pointer, so
+ * insert casts to satisfy the compiler. We do not need
+ * to tswap VAL2 since it's not compared to guest memory.
+ */
+ pts = (struct timespec *)(uintptr_t)timeout;
+ timeout = 0;
+ haddr2 = g2h(cpu, uaddr2);
+ break;
default:
return -TARGET_ENOSYS;
}
-}
-#endif
-
-#if defined(TARGET_NR_futex_time64)
-static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
- int val, target_ulong timeout,
- target_ulong uaddr2, int val3)
-{
- struct timespec ts, *pts;
- int base_op;
-
- /* ??? We assume FUTEX_* constants are the same on both host
- and target. */
-#ifdef FUTEX_CMD_MASK
- base_op = op & FUTEX_CMD_MASK;
-#else
- base_op = op;
-#endif
- switch (base_op) {
- case FUTEX_WAIT:
- case FUTEX_WAIT_BITSET:
- if (timeout) {
- pts = &ts;
- if (target_to_host_timespec64(pts, timeout)) {
- return -TARGET_EFAULT;
- }
- } else {
- pts = NULL;
+ if (timeout) {
+ pts = &ts;
+ if (time64
+ ? target_to_host_timespec64(pts, timeout)
+ : target_to_host_timespec(pts, timeout)) {
+ return -TARGET_EFAULT;
}
- return do_safe_futex(g2h(cpu, uaddr), op,
- tswap32(val), pts, NULL, val3);
- case FUTEX_WAKE:
- return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
- case FUTEX_FD:
- return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
- case FUTEX_REQUEUE:
- case FUTEX_CMP_REQUEUE:
- case FUTEX_WAKE_OP:
- /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
- TIMEOUT parameter is interpreted as a uint32_t by the kernel.
- But the prototype takes a `struct timespec *'; insert casts
- to satisfy the compiler. We do not need to tswap TIMEOUT
- since it's not compared to guest memory. */
- pts = (struct timespec *)(uintptr_t) timeout;
- return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
- (base_op == FUTEX_CMP_REQUEUE
- ? tswap32(val3) : val3));
- default:
- return -TARGET_ENOSYS;
}
+ return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
}
#endif
@@ -7734,10 +7948,10 @@ int host_to_target_waitstatus(int status)
return status;
}
-static int open_self_cmdline(void *cpu_env, int fd)
+static int open_self_cmdline(CPUArchState *cpu_env, int fd)
{
- CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
- struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
+ CPUState *cpu = env_cpu(cpu_env);
+ struct linux_binprm *bprm = get_task_state(cpu)->bprm;
int i;
for (i = 0; i < bprm->argc; i++) {
@@ -7751,72 +7965,193 @@ static int open_self_cmdline(void *cpu_env, int fd)
return 0;
}
-static int open_self_maps(void *cpu_env, int fd)
-{
- CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
- TaskState *ts = cpu->opaque;
- GSList *map_info = read_self_maps();
- GSList *s;
- int count;
-
- for (s = map_info; s; s = g_slist_next(s)) {
- MapInfo *e = (MapInfo *) s->data;
+struct open_self_maps_data {
+ TaskState *ts;
+ IntervalTreeRoot *host_maps;
+ int fd;
+ bool smaps;
+};
- if (h2g_valid(e->start)) {
- unsigned long min = e->start;
- unsigned long max = e->end;
- int flags = page_get_flags(h2g(min));
- const char *path;
+/*
+ * Subroutine to output one line of /proc/self/maps,
+ * or one region of /proc/self/smaps.
+ */
- max = h2g_valid(max - 1) ?
- max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
+#ifdef TARGET_HPPA
+# define test_stack(S, E, L) (E == L)
+#else
+# define test_stack(S, E, L) (S == L)
+#endif
- if (page_check_range(h2g(min), max - min, flags) == -1) {
- continue;
- }
+static void open_self_maps_4(const struct open_self_maps_data *d,
+ const MapInfo *mi, abi_ptr start,
+ abi_ptr end, unsigned flags)
+{
+ const struct image_info *info = d->ts->info;
+ const char *path = mi->path;
+ uint64_t offset;
+ int fd = d->fd;
+ int count;
- if (h2g(min) == ts->info->stack_limit) {
- path = "[stack]";
- } else {
- path = e->path;
- }
-
- count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
- " %c%c%c%c %08" PRIx64 " %s %"PRId64,
- h2g(min), h2g(max - 1) + 1,
- (flags & PAGE_READ) ? 'r' : '-',
- (flags & PAGE_WRITE_ORG) ? 'w' : '-',
- (flags & PAGE_EXEC) ? 'x' : '-',
- e->is_priv ? 'p' : '-',
- (uint64_t) e->offset, e->dev, e->inode);
- if (path) {
- dprintf(fd, "%*s%s\n", 73 - count, "", path);
- } else {
- dprintf(fd, "\n");
- }
- }
+ if (test_stack(start, end, info->stack_limit)) {
+ path = "[stack]";
+ } else if (start == info->brk) {
+ path = "[heap]";
+ } else if (start == info->vdso) {
+ path = "[vdso]";
+#ifdef TARGET_X86_64
+ } else if (start == TARGET_VSYSCALL_PAGE) {
+ path = "[vsyscall]";
+#endif
+ }
+
+ /* Except null device (MAP_ANON), adjust offset for this fragment. */
+ offset = mi->offset;
+ if (mi->dev) {
+ uintptr_t hstart = (uintptr_t)g2h_untagged(start);
+ offset += hstart - mi->itree.start;
+ }
+
+ count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
+ " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
+ start, end,
+ (flags & PAGE_READ) ? 'r' : '-',
+ (flags & PAGE_WRITE_ORG) ? 'w' : '-',
+ (flags & PAGE_EXEC) ? 'x' : '-',
+ mi->is_priv ? 'p' : 's',
+ offset, major(mi->dev), minor(mi->dev),
+ (uint64_t)mi->inode);
+ if (path) {
+ dprintf(fd, "%*s%s\n", 73 - count, "", path);
+ } else {
+ dprintf(fd, "\n");
+ }
+
+ if (d->smaps) {
+ unsigned long size = end - start;
+ unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
+ unsigned long size_kb = size >> 10;
+
+ dprintf(fd, "Size: %lu kB\n"
+ "KernelPageSize: %lu kB\n"
+ "MMUPageSize: %lu kB\n"
+ "Rss: 0 kB\n"
+ "Pss: 0 kB\n"
+ "Pss_Dirty: 0 kB\n"
+ "Shared_Clean: 0 kB\n"
+ "Shared_Dirty: 0 kB\n"
+ "Private_Clean: 0 kB\n"
+ "Private_Dirty: 0 kB\n"
+ "Referenced: 0 kB\n"
+ "Anonymous: %lu kB\n"
+ "LazyFree: 0 kB\n"
+ "AnonHugePages: 0 kB\n"
+ "ShmemPmdMapped: 0 kB\n"
+ "FilePmdMapped: 0 kB\n"
+ "Shared_Hugetlb: 0 kB\n"
+ "Private_Hugetlb: 0 kB\n"
+ "Swap: 0 kB\n"
+ "SwapPss: 0 kB\n"
+ "Locked: 0 kB\n"
+ "THPeligible: 0\n"
+ "VmFlags:%s%s%s%s%s%s%s%s\n",
+ size_kb, page_size_kb, page_size_kb,
+ (flags & PAGE_ANON ? size_kb : 0),
+ (flags & PAGE_READ) ? " rd" : "",
+ (flags & PAGE_WRITE_ORG) ? " wr" : "",
+ (flags & PAGE_EXEC) ? " ex" : "",
+ mi->is_priv ? "" : " sh",
+ (flags & PAGE_READ) ? " mr" : "",
+ (flags & PAGE_WRITE_ORG) ? " mw" : "",
+ (flags & PAGE_EXEC) ? " me" : "",
+ mi->is_priv ? "" : " ms");
}
+}
+
+/*
+ * Callback for walk_memory_regions, when read_self_maps() fails.
+ * Proceed without the benefit of host /proc/self/maps cross-check.
+ */
+static int open_self_maps_3(void *opaque, target_ulong guest_start,
+ target_ulong guest_end, unsigned long flags)
+{
+ static const MapInfo mi = { .is_priv = true };
+
+ open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
+ return 0;
+}
- free_self_maps(map_info);
+/*
+ * Callback for walk_memory_regions, when read_self_maps() succeeds.
+ */
+static int open_self_maps_2(void *opaque, target_ulong guest_start,
+ target_ulong guest_end, unsigned long flags)
+{
+ const struct open_self_maps_data *d = opaque;
+ uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
+ uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
-#ifdef TARGET_VSYSCALL_PAGE
+#ifdef TARGET_X86_64
/*
- * We only support execution from the vsyscall page.
- * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
+ * Because of the extremely high position of the page within the guest
+ * virtual address space, this is not backed by host memory at all.
+ * Therefore the loop below would fail. This is the only instance
+ * of not having host backing memory.
*/
- count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
- " --xp 00000000 00:00 0",
- TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
- dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
+ if (guest_start == TARGET_VSYSCALL_PAGE) {
+ return open_self_maps_3(opaque, guest_start, guest_end, flags);
+ }
#endif
+ while (1) {
+ IntervalTreeNode *n =
+ interval_tree_iter_first(d->host_maps, host_start, host_start);
+ MapInfo *mi = container_of(n, MapInfo, itree);
+ uintptr_t this_hlast = MIN(host_last, n->last);
+ target_ulong this_gend = h2g(this_hlast) + 1;
+
+ open_self_maps_4(d, mi, guest_start, this_gend, flags);
+
+ if (this_hlast == host_last) {
+ return 0;
+ }
+ host_start = this_hlast + 1;
+ guest_start = h2g(host_start);
+ }
+}
+
+static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
+{
+ struct open_self_maps_data d = {
+ .ts = env_cpu(env)->opaque,
+ .host_maps = read_self_maps(),
+ .fd = fd,
+ .smaps = smaps
+ };
+
+ if (d.host_maps) {
+ walk_memory_regions(&d, open_self_maps_2);
+ free_self_maps(d.host_maps);
+ } else {
+ walk_memory_regions(&d, open_self_maps_3);
+ }
return 0;
}
-static int open_self_stat(void *cpu_env, int fd)
+static int open_self_maps(CPUArchState *cpu_env, int fd)
+{
+ return open_self_maps_1(cpu_env, fd, false);
+}
+
+static int open_self_smaps(CPUArchState *cpu_env, int fd)
{
- CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
- TaskState *ts = cpu->opaque;
+ return open_self_maps_1(cpu_env, fd, true);
+}
+
+static int open_self_stat(CPUArchState *cpu_env, int fd)
+{
+ CPUState *cpu = env_cpu(cpu_env);
+ TaskState *ts = get_task_state(cpu);
g_autoptr(GString) buf = g_string_new(NULL);
int i;
@@ -7829,9 +8164,15 @@ static int open_self_stat(void *cpu_env, int fd)
gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
bin = bin ? bin + 1 : ts->bprm->argv[0];
g_string_printf(buf, "(%.15s) ", bin);
+ } else if (i == 2) {
+ /* task state */
+ g_string_assign(buf, "R "); /* we are running right now */
} else if (i == 3) {
/* ppid */
g_string_printf(buf, FMT_pid " ", getppid());
+ } else if (i == 21) {
+ /* starttime */
+ g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
} else if (i == 27) {
/* stack bottom */
g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
@@ -7848,10 +8189,10 @@ static int open_self_stat(void *cpu_env, int fd)
return 0;
}
-static int open_self_auxv(void *cpu_env, int fd)
+static int open_self_auxv(CPUArchState *cpu_env, int fd)
{
- CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
- TaskState *ts = cpu->opaque;
+ CPUState *cpu = env_cpu(cpu_env);
+ TaskState *ts = get_task_state(cpu);
abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len;
char *ptr;
@@ -7902,16 +8243,46 @@ static int is_proc_myself(const char *filename, const char *entry)
return 0;
}
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
- defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
+static void excp_dump_file(FILE *logfile, CPUArchState *env,
+ const char *fmt, int code)
+{
+ if (logfile) {
+ CPUState *cs = env_cpu(env);
+
+ fprintf(logfile, fmt, code);
+ fprintf(logfile, "Failing executable: %s\n", exec_path);
+ cpu_dump_state(cs, logfile, 0);
+ open_self_maps(env, fileno(logfile));
+ }
+}
+
+void target_exception_dump(CPUArchState *env, const char *fmt, int code)
+{
+ /* dump to console */
+ excp_dump_file(stderr, env, fmt, code);
+
+ /* dump to log file */
+ if (qemu_log_separate()) {
+ FILE *logfile = qemu_log_trylock();
+
+ excp_dump_file(logfile, env, fmt, code);
+ qemu_log_unlock(logfile);
+ }
+}
+
+#include "target_proc.h"
+
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
+ defined(HAVE_ARCH_PROC_CPUINFO) || \
+ defined(HAVE_ARCH_PROC_HARDWARE)
static int is_proc(const char *filename, const char *entry)
{
return strcmp(filename, entry) == 0;
}
#endif
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
-static int open_net_route(void *cpu_env, int fd)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
+static int open_net_route(CPUArchState *cpu_env, int fd)
{
FILE *fp;
char *line = NULL;
@@ -7955,62 +8326,49 @@ static int open_net_route(void *cpu_env, int fd)
}
#endif
-#if defined(TARGET_SPARC)
-static int open_cpuinfo(void *cpu_env, int fd)
-{
- dprintf(fd, "type\t\t: sun4u\n");
- return 0;
-}
-#endif
-
-#if defined(TARGET_HPPA)
-static int open_cpuinfo(void *cpu_env, int fd)
-{
- dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
- dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
- dprintf(fd, "capabilities\t: os32\n");
- dprintf(fd, "model\t\t: 9000/778/B160L\n");
- dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
- return 0;
-}
-#endif
-
-#if defined(TARGET_M68K)
-static int open_hardware(void *cpu_env, int fd)
-{
- dprintf(fd, "Model:\t\tqemu-m68k\n");
- return 0;
-}
-#endif
-
-static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
+int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
+ int flags, mode_t mode, bool safe)
{
+ g_autofree char *proc_name = NULL;
+ const char *pathname;
struct fake_open {
const char *filename;
- int (*fill)(void *cpu_env, int fd);
+ int (*fill)(CPUArchState *cpu_env, int fd);
int (*cmp)(const char *s1, const char *s2);
};
const struct fake_open *fake_open;
static const struct fake_open fakes[] = {
{ "maps", open_self_maps, is_proc_myself },
+ { "smaps", open_self_smaps, is_proc_myself },
{ "stat", open_self_stat, is_proc_myself },
{ "auxv", open_self_auxv, is_proc_myself },
{ "cmdline", open_self_cmdline, is_proc_myself },
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
{ "/proc/net/route", open_net_route, is_proc },
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_HPPA)
+#if defined(HAVE_ARCH_PROC_CPUINFO)
{ "/proc/cpuinfo", open_cpuinfo, is_proc },
#endif
-#if defined(TARGET_M68K)
+#if defined(HAVE_ARCH_PROC_HARDWARE)
{ "/proc/hardware", open_hardware, is_proc },
#endif
{ NULL, NULL, NULL }
};
+ /* if this is a file from /proc/ filesystem, expand full name */
+ proc_name = realpath(fname, NULL);
+ if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
+ pathname = proc_name;
+ } else {
+ pathname = fname;
+ }
+
if (is_proc_myself(pathname, "exe")) {
- int execfd = qemu_getauxval(AT_EXECFD);
- return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
+ if (safe) {
+ return safe_openat(dirfd, exec_path, flags, mode);
+ } else {
+ return openat(dirfd, exec_path, flags, mode);
+ }
}
for (fake_open = fakes; fake_open->filename; fake_open++) {
@@ -8024,16 +8382,22 @@ static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags,
char filename[PATH_MAX];
int fd, r;
- /* create temporary file to map stat to */
- tmpdir = getenv("TMPDIR");
- if (!tmpdir)
- tmpdir = "/tmp";
- snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
- fd = mkstemp(filename);
+ fd = memfd_create("qemu-open", 0);
if (fd < 0) {
- return fd;
+ if (errno != ENOSYS) {
+ return fd;
+ }
+ /* create temporary file to map stat to */
+ tmpdir = getenv("TMPDIR");
+ if (!tmpdir)
+ tmpdir = "/tmp";
+ snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
+ fd = mkstemp(filename);
+ if (fd < 0) {
+ return fd;
+ }
+ unlink(filename);
}
- unlink(filename);
if ((r = fake_open->fill(cpu_env, fd))) {
int e = errno;
@@ -8046,7 +8410,157 @@ static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags,
return fd;
}
- return safe_openat(dirfd, path(pathname), flags, mode);
+ if (safe) {
+ return safe_openat(dirfd, path(pathname), flags, mode);
+ } else {
+ return openat(dirfd, path(pathname), flags, mode);
+ }
+}
+
+ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
+{
+ ssize_t ret;
+
+ if (!pathname || !buf) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (!bufsiz) {
+ /* Short circuit this for the magic exe check. */
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (is_proc_myself((const char *)pathname, "exe")) {
+ /*
+ * Don't worry about sign mismatch as earlier mapping
+ * logic would have thrown a bad address error.
+ */
+ ret = MIN(strlen(exec_path), bufsiz);
+ /* We cannot NUL terminate the string. */
+ memcpy(buf, exec_path, ret);
+ } else {
+ ret = readlink(path(pathname), buf, bufsiz);
+ }
+
+ return ret;
+}
+
+static int do_execv(CPUArchState *cpu_env, int dirfd,
+ abi_long pathname, abi_long guest_argp,
+ abi_long guest_envp, int flags, bool is_execveat)
+{
+ int ret;
+ char **argp, **envp;
+ int argc, envc;
+ abi_ulong gp;
+ abi_ulong addr;
+ char **q;
+ void *p;
+
+ argc = 0;
+
+ for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
+ if (get_user_ual(addr, gp)) {
+ return -TARGET_EFAULT;
+ }
+ if (!addr) {
+ break;
+ }
+ argc++;
+ }
+ envc = 0;
+ for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
+ if (get_user_ual(addr, gp)) {
+ return -TARGET_EFAULT;
+ }
+ if (!addr) {
+ break;
+ }
+ envc++;
+ }
+
+ argp = g_new0(char *, argc + 1);
+ envp = g_new0(char *, envc + 1);
+
+ for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp)) {
+ goto execve_efault;
+ }
+ if (!addr) {
+ break;
+ }
+ *q = lock_user_string(addr);
+ if (!*q) {
+ goto execve_efault;
+ }
+ }
+ *q = NULL;
+
+ for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp)) {
+ goto execve_efault;
+ }
+ if (!addr) {
+ break;
+ }
+ *q = lock_user_string(addr);
+ if (!*q) {
+ goto execve_efault;
+ }
+ }
+ *q = NULL;
+
+ /*
+ * Although execve() is not an interruptible syscall it is
+ * a special case where we must use the safe_syscall wrapper:
+ * if we allow a signal to happen before we make the host
+ * syscall then we will 'lose' it, because at the point of
+ * execve the process leaves QEMU's control. So we use the
+ * safe syscall wrapper to ensure that we either take the
+ * signal as a guest signal, or else it does not happen
+ * before the execve completes and makes it the other
+ * program's problem.
+ */
+ p = lock_user_string(pathname);
+ if (!p) {
+ goto execve_efault;
+ }
+
+ const char *exe = p;
+ if (is_proc_myself(p, "exe")) {
+ exe = exec_path;
+ }
+ ret = is_execveat
+ ? safe_execveat(dirfd, exe, argp, envp, flags)
+ : safe_execve(exe, argp, envp);
+ ret = get_errno(ret);
+
+ unlock_user(p, pathname, 0);
+
+ goto execve_end;
+
+execve_efault:
+ ret = -TARGET_EFAULT;
+
+execve_end:
+ for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp) || !addr) {
+ break;
+ }
+ unlock_user(*q, addr, 0);
+ }
+ for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp) || !addr) {
+ break;
+ }
+ unlock_user(*q, addr, 0);
+ }
+
+ g_free(argp);
+ g_free(envp);
+ return ret;
}
#define TIMER_MAGIC 0x0caf0000
@@ -8137,16 +8651,430 @@ static int host_to_target_cpu_mask(const unsigned long *host_mask,
return 0;
}
+#ifdef TARGET_NR_getdents
+static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
+{
+ g_autofree void *hdirp = NULL;
+ void *tdirp;
+ int hlen, hoff, toff;
+ int hreclen, treclen;
+ off64_t prev_diroff = 0;
+
+ hdirp = g_try_malloc(count);
+ if (!hdirp) {
+ return -TARGET_ENOMEM;
+ }
+
+#ifdef EMULATE_GETDENTS_WITH_GETDENTS
+ hlen = sys_getdents(dirfd, hdirp, count);
+#else
+ hlen = sys_getdents64(dirfd, hdirp, count);
+#endif
+
+ hlen = get_errno(hlen);
+ if (is_error(hlen)) {
+ return hlen;
+ }
+
+ tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
+ if (!tdirp) {
+ return -TARGET_EFAULT;
+ }
+
+ for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
+#ifdef EMULATE_GETDENTS_WITH_GETDENTS
+ struct linux_dirent *hde = hdirp + hoff;
+#else
+ struct linux_dirent64 *hde = hdirp + hoff;
+#endif
+ struct target_dirent *tde = tdirp + toff;
+ int namelen;
+ uint8_t type;
+
+ namelen = strlen(hde->d_name);
+ hreclen = hde->d_reclen;
+ treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
+ treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
+
+ if (toff + treclen > count) {
+ /*
+ * If the host struct is smaller than the target struct, or
+ * requires less alignment and thus packs into less space,
+ * then the host can return more entries than we can pass
+ * on to the guest.
+ */
+ if (toff == 0) {
+ toff = -TARGET_EINVAL; /* result buffer is too small */
+ break;
+ }
+ /*
+ * Return what we have, resetting the file pointer to the
+ * location of the first record not returned.
+ */
+ lseek64(dirfd, prev_diroff, SEEK_SET);
+ break;
+ }
+
+ prev_diroff = hde->d_off;
+ tde->d_ino = tswapal(hde->d_ino);
+ tde->d_off = tswapal(hde->d_off);
+ tde->d_reclen = tswap16(treclen);
+ memcpy(tde->d_name, hde->d_name, namelen + 1);
+
+ /*
+ * The getdents type is in what was formerly a padding byte at the
+ * end of the structure.
+ */
+#ifdef EMULATE_GETDENTS_WITH_GETDENTS
+ type = *((uint8_t *)hde + hreclen - 1);
+#else
+ type = hde->d_type;
+#endif
+ *((uint8_t *)tde + treclen - 1) = type;
+ }
+
+ unlock_user(tdirp, arg2, toff);
+ return toff;
+}
+#endif /* TARGET_NR_getdents */
+
+#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
+static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
+{
+ g_autofree void *hdirp = NULL;
+ void *tdirp;
+ int hlen, hoff, toff;
+ int hreclen, treclen;
+ off64_t prev_diroff = 0;
+
+ hdirp = g_try_malloc(count);
+ if (!hdirp) {
+ return -TARGET_ENOMEM;
+ }
+
+ hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
+ if (is_error(hlen)) {
+ return hlen;
+ }
+
+ tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
+ if (!tdirp) {
+ return -TARGET_EFAULT;
+ }
+
+ for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
+ struct linux_dirent64 *hde = hdirp + hoff;
+ struct target_dirent64 *tde = tdirp + toff;
+ int namelen;
+
+ namelen = strlen(hde->d_name) + 1;
+ hreclen = hde->d_reclen;
+ treclen = offsetof(struct target_dirent64, d_name) + namelen;
+ treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
+
+ if (toff + treclen > count) {
+ /*
+ * If the host struct is smaller than the target struct, or
+ * requires less alignment and thus packs into less space,
+ * then the host can return more entries than we can pass
+ * on to the guest.
+ */
+ if (toff == 0) {
+ toff = -TARGET_EINVAL; /* result buffer is too small */
+ break;
+ }
+ /*
+ * Return what we have, resetting the file pointer to the
+ * location of the first record not returned.
+ */
+ lseek64(dirfd, prev_diroff, SEEK_SET);
+ break;
+ }
+
+ prev_diroff = hde->d_off;
+ tde->d_ino = tswap64(hde->d_ino);
+ tde->d_off = tswap64(hde->d_off);
+ tde->d_reclen = tswap16(treclen);
+ tde->d_type = hde->d_type;
+ memcpy(tde->d_name, hde->d_name, namelen);
+ }
+
+ unlock_user(tdirp, arg2, toff);
+ return toff;
+}
+#endif /* TARGET_NR_getdents64 */
+
+#if defined(TARGET_NR_riscv_hwprobe)
+
+#define RISCV_HWPROBE_KEY_MVENDORID 0
+#define RISCV_HWPROBE_KEY_MARCHID 1
+#define RISCV_HWPROBE_KEY_MIMPID 2
+
+#define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
+#define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
+
+#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
+#define RISCV_HWPROBE_IMA_FD (1 << 0)
+#define RISCV_HWPROBE_IMA_C (1 << 1)
+#define RISCV_HWPROBE_IMA_V (1 << 2)
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
+#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
+#define RISCV_HWPROBE_EXT_ZBC (1 << 7)
+#define RISCV_HWPROBE_EXT_ZBKB (1 << 8)
+#define RISCV_HWPROBE_EXT_ZBKC (1 << 9)
+#define RISCV_HWPROBE_EXT_ZBKX (1 << 10)
+#define RISCV_HWPROBE_EXT_ZKND (1 << 11)
+#define RISCV_HWPROBE_EXT_ZKNE (1 << 12)
+#define RISCV_HWPROBE_EXT_ZKNH (1 << 13)
+#define RISCV_HWPROBE_EXT_ZKSED (1 << 14)
+#define RISCV_HWPROBE_EXT_ZKSH (1 << 15)
+#define RISCV_HWPROBE_EXT_ZKT (1 << 16)
+#define RISCV_HWPROBE_EXT_ZVBB (1 << 17)
+#define RISCV_HWPROBE_EXT_ZVBC (1 << 18)
+#define RISCV_HWPROBE_EXT_ZVKB (1 << 19)
+#define RISCV_HWPROBE_EXT_ZVKG (1 << 20)
+#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21)
+#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22)
+#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23)
+#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24)
+#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25)
+#define RISCV_HWPROBE_EXT_ZVKT (1 << 26)
+#define RISCV_HWPROBE_EXT_ZFH (1 << 27)
+#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
+#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
+#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
+#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
+#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
+#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
+
+#define RISCV_HWPROBE_KEY_CPUPERF_0 5
+#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
+#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
+#define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
+#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
+#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
+#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
+
+#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
+
+struct riscv_hwprobe {
+ abi_llong key;
+ abi_ullong value;
+};
+
+static void risc_hwprobe_fill_pairs(CPURISCVState *env,
+ struct riscv_hwprobe *pair,
+ size_t pair_count)
+{
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
+
+ for (; pair_count > 0; pair_count--, pair++) {
+ abi_llong key;
+ abi_ullong value;
+ __put_user(0, &pair->value);
+ __get_user(key, &pair->key);
+ switch (key) {
+ case RISCV_HWPROBE_KEY_MVENDORID:
+ __put_user(cfg->mvendorid, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_MARCHID:
+ __put_user(cfg->marchid, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_MIMPID:
+ __put_user(cfg->mimpid, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+ value = riscv_has_ext(env, RVI) &&
+ riscv_has_ext(env, RVM) &&
+ riscv_has_ext(env, RVA) ?
+ RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
+ __put_user(value, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_IMA_EXT_0:
+ value = riscv_has_ext(env, RVF) &&
+ riscv_has_ext(env, RVD) ?
+ RISCV_HWPROBE_IMA_FD : 0;
+ value |= riscv_has_ext(env, RVC) ?
+ RISCV_HWPROBE_IMA_C : 0;
+ value |= riscv_has_ext(env, RVV) ?
+ RISCV_HWPROBE_IMA_V : 0;
+ value |= cfg->ext_zba ?
+ RISCV_HWPROBE_EXT_ZBA : 0;
+ value |= cfg->ext_zbb ?
+ RISCV_HWPROBE_EXT_ZBB : 0;
+ value |= cfg->ext_zbs ?
+ RISCV_HWPROBE_EXT_ZBS : 0;
+ value |= cfg->ext_zicboz ?
+ RISCV_HWPROBE_EXT_ZICBOZ : 0;
+ value |= cfg->ext_zbc ?
+ RISCV_HWPROBE_EXT_ZBC : 0;
+ value |= cfg->ext_zbkb ?
+ RISCV_HWPROBE_EXT_ZBKB : 0;
+ value |= cfg->ext_zbkc ?
+ RISCV_HWPROBE_EXT_ZBKC : 0;
+ value |= cfg->ext_zbkx ?
+ RISCV_HWPROBE_EXT_ZBKX : 0;
+ value |= cfg->ext_zknd ?
+ RISCV_HWPROBE_EXT_ZKND : 0;
+ value |= cfg->ext_zkne ?
+ RISCV_HWPROBE_EXT_ZKNE : 0;
+ value |= cfg->ext_zknh ?
+ RISCV_HWPROBE_EXT_ZKNH : 0;
+ value |= cfg->ext_zksed ?
+ RISCV_HWPROBE_EXT_ZKSED : 0;
+ value |= cfg->ext_zksh ?
+ RISCV_HWPROBE_EXT_ZKSH : 0;
+ value |= cfg->ext_zkt ?
+ RISCV_HWPROBE_EXT_ZKT : 0;
+ value |= cfg->ext_zvbb ?
+ RISCV_HWPROBE_EXT_ZVBB : 0;
+ value |= cfg->ext_zvbc ?
+ RISCV_HWPROBE_EXT_ZVBC : 0;
+ value |= cfg->ext_zvkb ?
+ RISCV_HWPROBE_EXT_ZVKB : 0;
+ value |= cfg->ext_zvkg ?
+ RISCV_HWPROBE_EXT_ZVKG : 0;
+ value |= cfg->ext_zvkned ?
+ RISCV_HWPROBE_EXT_ZVKNED : 0;
+ value |= cfg->ext_zvknha ?
+ RISCV_HWPROBE_EXT_ZVKNHA : 0;
+ value |= cfg->ext_zvknhb ?
+ RISCV_HWPROBE_EXT_ZVKNHB : 0;
+ value |= cfg->ext_zvksed ?
+ RISCV_HWPROBE_EXT_ZVKSED : 0;
+ value |= cfg->ext_zvksh ?
+ RISCV_HWPROBE_EXT_ZVKSH : 0;
+ value |= cfg->ext_zvkt ?
+ RISCV_HWPROBE_EXT_ZVKT : 0;
+ value |= cfg->ext_zfh ?
+ RISCV_HWPROBE_EXT_ZFH : 0;
+ value |= cfg->ext_zfhmin ?
+ RISCV_HWPROBE_EXT_ZFHMIN : 0;
+ value |= cfg->ext_zihintntl ?
+ RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
+ value |= cfg->ext_zvfh ?
+ RISCV_HWPROBE_EXT_ZVFH : 0;
+ value |= cfg->ext_zvfhmin ?
+ RISCV_HWPROBE_EXT_ZVFHMIN : 0;
+ value |= cfg->ext_zfa ?
+ RISCV_HWPROBE_EXT_ZFA : 0;
+ value |= cfg->ext_ztso ?
+ RISCV_HWPROBE_EXT_ZTSO : 0;
+ value |= cfg->ext_zacas ?
+ RISCV_HWPROBE_EXT_ZACAS : 0;
+ value |= cfg->ext_zicond ?
+ RISCV_HWPROBE_EXT_ZICOND : 0;
+ __put_user(value, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_CPUPERF_0:
+ __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
+ break;
+ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+ value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
+ __put_user(value, &pair->value);
+ break;
+ default:
+ __put_user(-1, &pair->key);
+ break;
+ }
+ }
+}
+
+static int cpu_set_valid(abi_long arg3, abi_long arg4)
+{
+ int ret, i, tmp;
+ size_t host_mask_size, target_mask_size;
+ unsigned long *host_mask;
+
+ /*
+ * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
+ * arg3 contains the cpu count.
+ */
+ tmp = (8 * sizeof(abi_ulong));
+ target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
+ host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
+ ~(sizeof(*host_mask) - 1);
+
+ host_mask = alloca(host_mask_size);
+
+ ret = target_to_host_cpu_mask(host_mask, host_mask_size,
+ arg4, target_mask_size);
+ if (ret != 0) {
+ return ret;
+ }
+
+ for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
+ if (host_mask[i] != 0) {
+ return 0;
+ }
+ }
+ return -TARGET_EINVAL;
+}
+
+static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
+ abi_long arg2, abi_long arg3,
+ abi_long arg4, abi_long arg5)
+{
+ int ret;
+ struct riscv_hwprobe *host_pairs;
+
+ /* flags must be 0 */
+ if (arg5 != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ /* check cpu_set */
+ if (arg3 != 0) {
+ ret = cpu_set_valid(arg3, arg4);
+ if (ret != 0) {
+ return ret;
+ }
+ } else if (arg4 != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ /* no pairs */
+ if (arg2 == 0) {
+ return 0;
+ }
+
+ host_pairs = lock_user(VERIFY_WRITE, arg1,
+ sizeof(*host_pairs) * (size_t)arg2, 0);
+ if (host_pairs == NULL) {
+ return -TARGET_EFAULT;
+ }
+ risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
+ unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
+ return 0;
+}
+#endif /* TARGET_NR_riscv_hwprobe */
+
#if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
#endif
+#if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
+#define __NR_sys_open_tree __NR_open_tree
+_syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
+ unsigned int, __flags)
+#endif
+
+#if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
+#define __NR_sys_move_mount __NR_move_mount
+_syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
+ int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
+#endif
+
/* This is an internal helper for do_syscall so that it is easier
* to have a single return point, so that actions, such as logging
* of syscall results, can be performed.
* All errnos that do_syscall() returns must be -TARGET_<errcode>.
*/
-static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
+static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8)
@@ -8173,15 +9101,21 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
Do thread termination if we have more then one thread. */
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
pthread_mutex_lock(&clone_lock);
if (CPU_NEXT(first_cpu)) {
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
+
+ if (ts->child_tidptr) {
+ put_user_u32(0, ts->child_tidptr);
+ do_sys_futex(g2h(cpu, ts->child_tidptr),
+ FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
+ }
- object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
+ object_unparent(OBJECT(cpu));
object_unref(OBJECT(cpu));
/*
* At this point the CPU should be unrealized and removed
@@ -8191,11 +9125,6 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
pthread_mutex_unlock(&clone_lock);
- if (ts->child_tidptr) {
- put_user_u32(0, ts->child_tidptr);
- do_sys_futex(g2h(cpu, ts->child_tidptr),
- FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
- }
thread_cpu = NULL;
g_free(ts);
rcu_unregister_thread();
@@ -8244,9 +9173,9 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_open:
if (!(p = lock_user_string(arg1)))
return -TARGET_EFAULT;
- ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
+ ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
target_to_host_bitmask(arg2, fcntl_flags_tbl),
- arg3));
+ arg3, true));
fd_trans_unregister(ret);
unlock_user(p, arg1, 0);
return ret;
@@ -8254,9 +9183,9 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_openat:
if (!(p = lock_user_string(arg2)))
return -TARGET_EFAULT;
- ret = get_errno(do_openat(cpu_env, arg1, p,
+ ret = get_errno(do_guest_openat(cpu_env, arg1, p,
target_to_host_bitmask(arg3, fcntl_flags_tbl),
- arg4));
+ arg4, true));
fd_trans_unregister(ret);
unlock_user(p, arg2, 0);
return ret;
@@ -8271,9 +9200,50 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
fd_trans_unregister(ret);
return ret;
#endif
+#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
+ case TARGET_NR_pidfd_open:
+ return get_errno(pidfd_open(arg1, arg2));
+#endif
+#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
+ case TARGET_NR_pidfd_send_signal:
+ {
+ siginfo_t uinfo, *puinfo;
+
+ if (arg3) {
+ p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
+ if (!p) {
+ return -TARGET_EFAULT;
+ }
+ target_to_host_siginfo(&uinfo, p);
+ unlock_user(p, arg3, 0);
+ puinfo = &uinfo;
+ } else {
+ puinfo = NULL;
+ }
+ ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
+ puinfo, arg4));
+ }
+ return ret;
+#endif
+#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
+ case TARGET_NR_pidfd_getfd:
+ return get_errno(pidfd_getfd(arg1, arg2, arg3));
+#endif
case TARGET_NR_close:
fd_trans_unregister(arg1);
return get_errno(close(arg1));
+#if defined(__NR_close_range) && defined(TARGET_NR_close_range)
+ case TARGET_NR_close_range:
+ ret = get_errno(sys_close_range(arg1, arg2, arg3));
+ if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
+ abi_long fd, maxfd;
+ maxfd = MIN(arg2, target_fd_max);
+ for (fd = arg1; fd < maxfd; fd++) {
+ fd_trans_unregister(fd);
+ }
+ }
+ return ret;
+#endif
case TARGET_NR_brk:
return do_brk(arg1);
@@ -8295,14 +9265,24 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_waitid
case TARGET_NR_waitid:
{
+ struct rusage ru;
siginfo_t info;
- info.si_pid = 0;
- ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
- if (!is_error(ret) && arg3 && info.si_pid != 0) {
- if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
+
+ ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
+ arg4, (arg5 ? &ru : NULL)));
+ if (!is_error(ret)) {
+ if (arg3) {
+ p = lock_user(VERIFY_WRITE, arg3,
+ sizeof(target_siginfo_t), 0);
+ if (!p) {
+ return -TARGET_EFAULT;
+ }
+ host_to_target_siginfo(p, &info);
+ unlock_user(p, arg3, sizeof(target_siginfo_t));
+ }
+ if (arg5 && host_to_target_rusage(arg5, &ru)) {
return -TARGET_EFAULT;
- host_to_target_siginfo(p, &info);
- unlock_user(p, arg3, sizeof(target_siginfo_t));
+ }
}
}
return ret;
@@ -8364,100 +9344,10 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
unlock_user(p, arg2, 0);
return ret;
#endif
+ case TARGET_NR_execveat:
+ return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
case TARGET_NR_execve:
- {
- char **argp, **envp;
- int argc, envc;
- abi_ulong gp;
- abi_ulong guest_argp;
- abi_ulong guest_envp;
- abi_ulong addr;
- char **q;
-
- argc = 0;
- guest_argp = arg2;
- for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
- if (get_user_ual(addr, gp))
- return -TARGET_EFAULT;
- if (!addr)
- break;
- argc++;
- }
- envc = 0;
- guest_envp = arg3;
- for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
- if (get_user_ual(addr, gp))
- return -TARGET_EFAULT;
- if (!addr)
- break;
- envc++;
- }
-
- argp = g_new0(char *, argc + 1);
- envp = g_new0(char *, envc + 1);
-
- for (gp = guest_argp, q = argp; gp;
- gp += sizeof(abi_ulong), q++) {
- if (get_user_ual(addr, gp))
- goto execve_efault;
- if (!addr)
- break;
- if (!(*q = lock_user_string(addr)))
- goto execve_efault;
- }
- *q = NULL;
-
- for (gp = guest_envp, q = envp; gp;
- gp += sizeof(abi_ulong), q++) {
- if (get_user_ual(addr, gp))
- goto execve_efault;
- if (!addr)
- break;
- if (!(*q = lock_user_string(addr)))
- goto execve_efault;
- }
- *q = NULL;
-
- if (!(p = lock_user_string(arg1)))
- goto execve_efault;
- /* Although execve() is not an interruptible syscall it is
- * a special case where we must use the safe_syscall wrapper:
- * if we allow a signal to happen before we make the host
- * syscall then we will 'lose' it, because at the point of
- * execve the process leaves QEMU's control. So we use the
- * safe syscall wrapper to ensure that we either take the
- * signal as a guest signal, or else it does not happen
- * before the execve completes and makes it the other
- * program's problem.
- */
- ret = get_errno(safe_execve(p, argp, envp));
- unlock_user(p, arg1, 0);
-
- goto execve_end;
-
- execve_efault:
- ret = -TARGET_EFAULT;
-
- execve_end:
- for (gp = guest_argp, q = argp; *q;
- gp += sizeof(abi_ulong), q++) {
- if (get_user_ual(addr, gp)
- || !addr)
- break;
- unlock_user(*q, addr, 0);
- }
- for (gp = guest_envp, q = envp; *q;
- gp += sizeof(abi_ulong), q++) {
- if (get_user_ual(addr, gp)
- || !addr)
- break;
- unlock_user(*q, addr, 0);
- }
-
- g_free(argp);
- g_free(envp);
- }
- return ret;
+ return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
case TARGET_NR_chdir:
if (!(p = lock_user_string(arg1)))
return -TARGET_EFAULT;
@@ -8507,7 +9397,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
/* Alpha specific */
case TARGET_NR_getxpid:
- ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
+ cpu_env->ir[IR_A4] = getppid();
return get_errno(getpid());
#endif
#ifdef TARGET_NR_getpid
@@ -8582,6 +9472,60 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
unlock_user(p, arg1, 0);
return ret;
#endif
+#if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
+ case TARGET_NR_move_mount:
+ {
+ void *p2, *p4;
+
+ if (!arg2 || !arg4) {
+ return -TARGET_EFAULT;
+ }
+
+ p2 = lock_user_string(arg2);
+ if (!p2) {
+ return -TARGET_EFAULT;
+ }
+
+ p4 = lock_user_string(arg4);
+ if (!p4) {
+ unlock_user(p2, arg2, 0);
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
+
+ unlock_user(p2, arg2, 0);
+ unlock_user(p4, arg4, 0);
+
+ return ret;
+ }
+#endif
+#if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
+ case TARGET_NR_open_tree:
+ {
+ void *p2;
+ int host_flags;
+
+ if (!arg2) {
+ return -TARGET_EFAULT;
+ }
+
+ p2 = lock_user_string(arg2);
+ if (!p2) {
+ return -TARGET_EFAULT;
+ }
+
+ host_flags = arg3 & ~TARGET_O_CLOEXEC;
+ if (arg3 & TARGET_O_CLOEXEC) {
+ host_flags |= O_CLOEXEC;
+ }
+
+ ret = get_errno(sys_open_tree(arg1, p2, host_flags));
+
+ unlock_user(p2, arg2, 0);
+
+ return ret;
+ }
+#endif
#ifdef TARGET_NR_stime /* not on alpha */
case TARGET_NR_stime:
{
@@ -8600,7 +9544,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_pause /* not on alpha */
case TARGET_NR_pause:
if (!block_signals()) {
- sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
+ sigsuspend(&get_task_state(cpu)->signal_mask);
}
return -TARGET_EINTR;
#endif
@@ -8685,6 +9629,15 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
unlock_user(p, arg2, 0);
return ret;
#endif
+#if defined(TARGET_NR_faccessat2)
+ case TARGET_NR_faccessat2:
+ if (!(p = lock_user_string(arg2))) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(faccessat(arg1, p, arg3, arg4));
+ unlock_user(p, arg2, 0);
+ return ret;
+#endif
#ifdef TARGET_NR_nice /* not on alpha */
case TARGET_NR_nice:
return get_errno(nice(arg1));
@@ -9030,13 +9983,20 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (!is_error(ret)) {
host_to_target_old_sigset(&mask, &oldset);
ret = mask;
- ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
+ cpu_env->ir[IR_V0] = 0; /* force no error */
}
#else
sigset_t set, oldset, *set_ptr;
int how;
if (arg2) {
+ p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
+ if (!p) {
+ return -TARGET_EFAULT;
+ }
+ target_to_host_old_sigset(&set, p);
+ unlock_user(p, arg2, 0);
+ set_ptr = &set;
switch (arg1) {
case TARGET_SIG_BLOCK:
how = SIG_BLOCK;
@@ -9050,11 +10010,6 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
default:
return -TARGET_EINVAL;
}
- if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
- return -TARGET_EFAULT;
- target_to_host_old_sigset(&set, p);
- unlock_user(p, arg2, 0);
- set_ptr = &set;
} else {
how = 0;
set_ptr = NULL;
@@ -9080,6 +10035,13 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
if (arg2) {
+ p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
+ if (!p) {
+ return -TARGET_EFAULT;
+ }
+ target_to_host_sigset(&set, p);
+ unlock_user(p, arg2, 0);
+ set_ptr = &set;
switch(how) {
case TARGET_SIG_BLOCK:
how = SIG_BLOCK;
@@ -9093,11 +10055,6 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
default:
return -TARGET_EINVAL;
}
- if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
- return -TARGET_EFAULT;
- target_to_host_sigset(&set, p);
- unlock_user(p, arg2, 0);
- set_ptr = &set;
} else {
how = 0;
set_ptr = NULL;
@@ -9150,40 +10107,35 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_sigsuspend
case TARGET_NR_sigsuspend:
{
- TaskState *ts = cpu->opaque;
+ sigset_t *set;
+
#if defined(TARGET_ALPHA)
- abi_ulong mask = arg1;
- target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
+ TaskState *ts = get_task_state(cpu);
+ /* target_to_host_old_sigset will bswap back */
+ abi_ulong mask = tswapal(arg1);
+ set = &ts->sigsuspend_mask;
+ target_to_host_old_sigset(set, &mask);
#else
- if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
- return -TARGET_EFAULT;
- target_to_host_old_sigset(&ts->sigsuspend_mask, p);
- unlock_user(p, arg1, 0);
-#endif
- ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
- SIGSET_T_SIZE));
- if (ret != -TARGET_ERESTARTSYS) {
- ts->in_sigsuspend = 1;
+ ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
+ if (ret != 0) {
+ return ret;
}
+#endif
+ ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
+ finish_sigsuspend_mask(ret);
}
return ret;
#endif
case TARGET_NR_rt_sigsuspend:
{
- TaskState *ts = cpu->opaque;
+ sigset_t *set;
- if (arg2 != sizeof(target_sigset_t)) {
- return -TARGET_EINVAL;
- }
- if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
- return -TARGET_EFAULT;
- target_to_host_sigset(&ts->sigsuspend_mask, p);
- unlock_user(p, arg1, 0);
- ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
- SIGSET_T_SIZE));
- if (ret != -TARGET_ERESTARTSYS) {
- ts->in_sigsuspend = 1;
+ ret = process_sigsuspend_mask(&set, arg1, arg2);
+ if (ret != 0) {
+ return ret;
}
+ ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
+ finish_sigsuspend_mask(ret);
}
return ret;
#ifdef TARGET_NR_rt_sigtimedwait
@@ -9278,7 +10230,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
target_to_host_siginfo(&uinfo, p);
unlock_user(p, arg3, 0);
- ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
+ ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
}
return ret;
case TARGET_NR_rt_tgsigqueueinfo:
@@ -9291,19 +10243,19 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
target_to_host_siginfo(&uinfo, p);
unlock_user(p, arg4, 0);
- ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
+ ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
}
return ret;
#ifdef TARGET_NR_sigreturn
case TARGET_NR_sigreturn:
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
return do_sigreturn(cpu_env);
#endif
case TARGET_NR_rt_sigreturn:
if (block_signals()) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
return do_rt_sigreturn(cpu_env);
case TARGET_NR_sethostname:
@@ -9466,27 +10418,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
void *p2;
p = lock_user_string(arg1);
p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
- if (!p || !p2) {
- ret = -TARGET_EFAULT;
- } else if (!arg3) {
- /* Short circuit this for the magic exe check. */
- ret = -TARGET_EINVAL;
- } else if (is_proc_myself((const char *)p, "exe")) {
- char real[PATH_MAX], *temp;
- temp = realpath(exec_path, real);
- /* Return value is # of bytes that we wrote to the buffer. */
- if (temp == NULL) {
- ret = get_errno(-1);
- } else {
- /* Don't worry about sign mismatch as earlier mapping
- * logic would have thrown a bad address error. */
- ret = MIN(strlen(real), arg3);
- /* We cannot NUL terminate the string. */
- memcpy(p2, real, ret);
- }
- } else {
- ret = get_errno(readlink(path(p), p2, arg3));
- }
+ ret = get_errno(do_guest_readlink(p, p2, arg3));
unlock_user(p2, arg2, ret);
unlock_user(p, arg1, 0);
}
@@ -9500,11 +10432,17 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
if (!p || !p2) {
ret = -TARGET_EFAULT;
+ } else if (!arg4) {
+ /* Short circuit this for the magic exe check. */
+ ret = -TARGET_EINVAL;
} else if (is_proc_myself((const char *)p, "exe")) {
- char real[PATH_MAX], *temp;
- temp = realpath(exec_path, real);
- ret = temp == NULL ? get_errno(-1) : strlen(real) ;
- snprintf((char *)p2, arg4, "%s", real);
+ /*
+ * Don't worry about sign mismatch as earlier mapping
+ * logic would have thrown a bad address error.
+ */
+ ret = MIN(strlen(exec_path), arg4);
+ /* We cannot NUL terminate the string. */
+ memcpy(p2, exec_path, ret);
} else {
ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
}
@@ -9552,28 +10490,20 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
v5 = tswapal(v[4]);
v6 = tswapal(v[5]);
unlock_user(v, arg1, 0);
- ret = get_errno(target_mmap(v1, v2, v3,
- target_to_host_bitmask(v4, mmap_flags_tbl),
- v5, v6));
+ return do_mmap(v1, v2, v3, v4, v5, v6);
}
#else
/* mmap pointers are always untagged */
- ret = get_errno(target_mmap(arg1, arg2, arg3,
- target_to_host_bitmask(arg4, mmap_flags_tbl),
- arg5,
- arg6));
+ return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
#endif
- return ret;
#endif
#ifdef TARGET_NR_mmap2
case TARGET_NR_mmap2:
#ifndef MMAP_SHIFT
#define MMAP_SHIFT 12
#endif
- ret = target_mmap(arg1, arg2, arg3,
- target_to_host_bitmask(arg4, mmap_flags_tbl),
- arg5, arg6 << MMAP_SHIFT);
- return get_errno(ret);
+ return do_mmap(arg1, arg2, arg3, arg4, arg5,
+ (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
#endif
case TARGET_NR_munmap:
arg1 = cpu_untagged_addr(cpu, arg1);
@@ -9581,7 +10511,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_mprotect:
arg1 = cpu_untagged_addr(cpu, arg1);
{
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
/* Special hack to detect libc making the stack executable. */
if ((arg3 & PROT_GROWSDOWN)
&& arg1 >= ts->info->stack_limit
@@ -9601,7 +10531,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
/* ??? msync/mlock/munlock are broken for softmmu. */
#ifdef TARGET_NR_msync
case TARGET_NR_msync:
- return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
+ return get_errno(msync(g2h(cpu, arg1), arg2,
+ target_to_host_msync_arg(arg3)));
#endif
#ifdef TARGET_NR_mlock
case TARGET_NR_mlock:
@@ -9651,7 +10582,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
#ifdef TARGET_ALPHA
/* Return value is the unbiased priority. Signal no error. */
- ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
+ cpu_env->ir[IR_V0] = 0;
#else
/* Return value is a biased priority to avoid negative numbers. */
ret = 20 - ret;
@@ -10083,11 +11014,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_shmat
case TARGET_NR_shmat:
- return do_shmat(cpu_env, arg1, arg2, arg3);
+ return target_shmat(cpu_env, arg1, arg2, arg3);
#endif
#ifdef TARGET_NR_shmdt
case TARGET_NR_shmdt:
- return do_shmdt(arg1);
+ return target_shmdt(arg1);
#endif
case TARGET_NR_fsync:
return get_errno(fsync(arg1));
@@ -10170,16 +11101,14 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
case TARGET_NR_clock_adjtime:
{
- struct timex htx, *phtx = &htx;
+ struct timex htx;
- if (target_to_host_timex(phtx, arg2) != 0) {
+ if (target_to_host_timex(&htx, arg2) != 0) {
return -TARGET_EFAULT;
}
- ret = get_errno(clock_adjtime(arg1, phtx));
- if (!is_error(ret) && phtx) {
- if (host_to_target_timex(arg2, phtx) != 0) {
- return -TARGET_EFAULT;
- }
+ ret = get_errno(clock_adjtime(arg1, &htx));
+ if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
+ return -TARGET_EFAULT;
}
}
return ret;
@@ -10227,162 +11156,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_getdents
case TARGET_NR_getdents:
-#ifdef EMULATE_GETDENTS_WITH_GETDENTS
-#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
- {
- struct target_dirent *target_dirp;
- struct linux_dirent *dirp;
- abi_long count = arg3;
-
- dirp = g_try_malloc(count);
- if (!dirp) {
- return -TARGET_ENOMEM;
- }
-
- ret = get_errno(sys_getdents(arg1, dirp, count));
- if (!is_error(ret)) {
- struct linux_dirent *de;
- struct target_dirent *tde;
- int len = ret;
- int reclen, treclen;
- int count1, tnamelen;
-
- count1 = 0;
- de = dirp;
- if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
- return -TARGET_EFAULT;
- tde = target_dirp;
- while (len > 0) {
- reclen = de->d_reclen;
- tnamelen = reclen - offsetof(struct linux_dirent, d_name);
- assert(tnamelen >= 0);
- treclen = tnamelen + offsetof(struct target_dirent, d_name);
- assert(count1 + treclen <= count);
- tde->d_reclen = tswap16(treclen);
- tde->d_ino = tswapal(de->d_ino);
- tde->d_off = tswapal(de->d_off);
- memcpy(tde->d_name, de->d_name, tnamelen);
- de = (struct linux_dirent *)((char *)de + reclen);
- len -= reclen;
- tde = (struct target_dirent *)((char *)tde + treclen);
- count1 += treclen;
- }
- ret = count1;
- unlock_user(target_dirp, arg2, ret);
- }
- g_free(dirp);
- }
-#else
- {
- struct linux_dirent *dirp;
- abi_long count = arg3;
-
- if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
- return -TARGET_EFAULT;
- ret = get_errno(sys_getdents(arg1, dirp, count));
- if (!is_error(ret)) {
- struct linux_dirent *de;
- int len = ret;
- int reclen;
- de = dirp;
- while (len > 0) {
- reclen = de->d_reclen;
- if (reclen > len)
- break;
- de->d_reclen = tswap16(reclen);
- tswapls(&de->d_ino);
- tswapls(&de->d_off);
- de = (struct linux_dirent *)((char *)de + reclen);
- len -= reclen;
- }
- }
- unlock_user(dirp, arg2, ret);
- }
-#endif
-#else
- /* Implement getdents in terms of getdents64 */
- {
- struct linux_dirent64 *dirp;
- abi_long count = arg3;
-
- dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
- if (!dirp) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(sys_getdents64(arg1, dirp, count));
- if (!is_error(ret)) {
- /* Convert the dirent64 structs to target dirent. We do this
- * in-place, since we can guarantee that a target_dirent is no
- * larger than a dirent64; however this means we have to be
- * careful to read everything before writing in the new format.
- */
- struct linux_dirent64 *de;
- struct target_dirent *tde;
- int len = ret;
- int tlen = 0;
-
- de = dirp;
- tde = (struct target_dirent *)dirp;
- while (len > 0) {
- int namelen, treclen;
- int reclen = de->d_reclen;
- uint64_t ino = de->d_ino;
- int64_t off = de->d_off;
- uint8_t type = de->d_type;
-
- namelen = strlen(de->d_name);
- treclen = offsetof(struct target_dirent, d_name)
- + namelen + 2;
- treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
-
- memmove(tde->d_name, de->d_name, namelen + 1);
- tde->d_ino = tswapal(ino);
- tde->d_off = tswapal(off);
- tde->d_reclen = tswap16(treclen);
- /* The target_dirent type is in what was formerly a padding
- * byte at the end of the structure:
- */
- *(((char *)tde) + treclen - 1) = type;
-
- de = (struct linux_dirent64 *)((char *)de + reclen);
- tde = (struct target_dirent *)((char *)tde + treclen);
- len -= reclen;
- tlen += treclen;
- }
- ret = tlen;
- }
- unlock_user(dirp, arg2, ret);
- }
-#endif
- return ret;
+ return do_getdents(arg1, arg2, arg3);
#endif /* TARGET_NR_getdents */
#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
case TARGET_NR_getdents64:
- {
- struct linux_dirent64 *dirp;
- abi_long count = arg3;
- if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
- return -TARGET_EFAULT;
- ret = get_errno(sys_getdents64(arg1, dirp, count));
- if (!is_error(ret)) {
- struct linux_dirent64 *de;
- int len = ret;
- int reclen;
- de = dirp;
- while (len > 0) {
- reclen = de->d_reclen;
- if (reclen > len)
- break;
- de->d_reclen = tswap16(reclen);
- tswap64s((uint64_t *)&de->d_ino);
- tswap64s((uint64_t *)&de->d_off);
- de = (struct linux_dirent64 *)((char *)de + reclen);
- len -= reclen;
- }
- }
- unlock_user(dirp, arg2, ret);
- }
- return ret;
+ return do_getdents64(arg1, arg2, arg3);
#endif /* TARGET_NR_getdents64 */
#if defined(TARGET_NR__newselect)
case TARGET_NR__newselect:
@@ -10528,14 +11306,14 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
case TARGET_NR_getcpu:
{
- unsigned cpu, node;
- ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
+ unsigned cpuid, node;
+ ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
arg2 ? &node : NULL,
NULL));
if (is_error(ret)) {
return ret;
}
- if (arg1 && put_user_u32(cpu, arg1)) {
+ if (arg1 && put_user_u32(cpuid, arg1)) {
return -TARGET_EFAULT;
}
if (arg2 && put_user_u32(node, arg2)) {
@@ -10545,30 +11323,32 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
case TARGET_NR_sched_setparam:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg2 == 0) {
return -TARGET_EINVAL;
}
- if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
+ if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
return -TARGET_EFAULT;
+ }
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg2, 0);
- return get_errno(sched_setparam(arg1, &schp));
+ return get_errno(sys_sched_setparam(arg1, &schp));
}
case TARGET_NR_sched_getparam:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg2 == 0) {
return -TARGET_EINVAL;
}
- ret = get_errno(sched_getparam(arg1, &schp));
+ ret = get_errno(sys_sched_getparam(arg1, &schp));
if (!is_error(ret)) {
- if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
+ if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
return -TARGET_EFAULT;
+ }
target_schp->sched_priority = tswap32(schp.sched_priority);
unlock_user_struct(target_schp, arg2, 1);
}
@@ -10576,19 +11356,106 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
case TARGET_NR_sched_setscheduler:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg3 == 0) {
return -TARGET_EINVAL;
}
- if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
+ if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
return -TARGET_EFAULT;
+ }
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg3, 0);
- return get_errno(sched_setscheduler(arg1, arg2, &schp));
+ return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
}
case TARGET_NR_sched_getscheduler:
- return get_errno(sched_getscheduler(arg1));
+ return get_errno(sys_sched_getscheduler(arg1));
+ case TARGET_NR_sched_getattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (arg3 > sizeof(scha)) {
+ arg3 = sizeof(scha);
+ }
+ ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
+ if (!is_error(ret)) {
+ target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ target_scha->size = tswap32(scha.size);
+ target_scha->sched_policy = tswap32(scha.sched_policy);
+ target_scha->sched_flags = tswap64(scha.sched_flags);
+ target_scha->sched_nice = tswap32(scha.sched_nice);
+ target_scha->sched_priority = tswap32(scha.sched_priority);
+ target_scha->sched_runtime = tswap64(scha.sched_runtime);
+ target_scha->sched_deadline = tswap64(scha.sched_deadline);
+ target_scha->sched_period = tswap64(scha.sched_period);
+ if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
+ target_scha->sched_util_min = tswap32(scha.sched_util_min);
+ target_scha->sched_util_max = tswap32(scha.sched_util_max);
+ }
+ unlock_user(target_scha, arg2, arg3);
+ }
+ return ret;
+ }
+ case TARGET_NR_sched_setattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ uint32_t size;
+ int zeroed;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (get_user_u32(size, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ if (!size) {
+ size = offsetof(struct target_sched_attr, sched_util_min);
+ }
+ if (size < offsetof(struct target_sched_attr, sched_util_min)) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+
+ zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
+ if (zeroed < 0) {
+ return zeroed;
+ } else if (zeroed == 0) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+ if (size > sizeof(struct target_sched_attr)) {
+ size = sizeof(struct target_sched_attr);
+ }
+
+ target_scha = lock_user(VERIFY_READ, arg2, size, 1);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ scha.size = size;
+ scha.sched_policy = tswap32(target_scha->sched_policy);
+ scha.sched_flags = tswap64(target_scha->sched_flags);
+ scha.sched_nice = tswap32(target_scha->sched_nice);
+ scha.sched_priority = tswap32(target_scha->sched_priority);
+ scha.sched_runtime = tswap64(target_scha->sched_runtime);
+ scha.sched_deadline = tswap64(target_scha->sched_deadline);
+ scha.sched_period = tswap64(target_scha->sched_period);
+ if (size > offsetof(struct target_sched_attr, sched_util_min)) {
+ scha.sched_util_min = tswap32(target_scha->sched_util_min);
+ scha.sched_util_max = tswap32(target_scha->sched_util_max);
+ }
+ unlock_user(target_scha, arg2, 0);
+ return get_errno(sys_sched_setattr(arg1, &scha, arg3));
+ }
case TARGET_NR_sched_yield:
return get_errno(sched_yield());
case TARGET_NR_sched_get_priority_max:
@@ -10630,290 +11497,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
#endif
case TARGET_NR_prctl:
- switch (arg1) {
- case PR_GET_PDEATHSIG:
- {
- int deathsig;
- ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
- if (!is_error(ret) && arg2
- && put_user_s32(deathsig, arg2)) {
- return -TARGET_EFAULT;
- }
- return ret;
- }
-#ifdef PR_GET_NAME
- case PR_GET_NAME:
- {
- void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
- if (!name) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(prctl(arg1, (unsigned long)name,
- arg3, arg4, arg5));
- unlock_user(name, arg2, 16);
- return ret;
- }
- case PR_SET_NAME:
- {
- void *name = lock_user(VERIFY_READ, arg2, 16, 1);
- if (!name) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(prctl(arg1, (unsigned long)name,
- arg3, arg4, arg5));
- unlock_user(name, arg2, 0);
- return ret;
- }
-#endif
-#ifdef TARGET_MIPS
- case TARGET_PR_GET_FP_MODE:
- {
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
- ret = 0;
- if (env->CP0_Status & (1 << CP0St_FR)) {
- ret |= TARGET_PR_FP_MODE_FR;
- }
- if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
- ret |= TARGET_PR_FP_MODE_FRE;
- }
- return ret;
- }
- case TARGET_PR_SET_FP_MODE:
- {
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
- bool old_fr = env->CP0_Status & (1 << CP0St_FR);
- bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
- bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
- bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
-
- const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
- TARGET_PR_FP_MODE_FRE;
-
- /* If nothing to change, return right away, successfully. */
- if (old_fr == new_fr && old_fre == new_fre) {
- return 0;
- }
- /* Check the value is valid */
- if (arg2 & ~known_bits) {
- return -TARGET_EOPNOTSUPP;
- }
- /* Setting FRE without FR is not supported. */
- if (new_fre && !new_fr) {
- return -TARGET_EOPNOTSUPP;
- }
- if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
- /* FR1 is not supported */
- return -TARGET_EOPNOTSUPP;
- }
- if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
- && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
- /* cannot set FR=0 */
- return -TARGET_EOPNOTSUPP;
- }
- if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
- /* Cannot set FRE=1 */
- return -TARGET_EOPNOTSUPP;
- }
-
- int i;
- fpr_t *fpr = env->active_fpu.fpr;
- for (i = 0; i < 32 ; i += 2) {
- if (!old_fr && new_fr) {
- fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
- } else if (old_fr && !new_fr) {
- fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
- }
- }
-
- if (new_fr) {
- env->CP0_Status |= (1 << CP0St_FR);
- env->hflags |= MIPS_HFLAG_F64;
- } else {
- env->CP0_Status &= ~(1 << CP0St_FR);
- env->hflags &= ~MIPS_HFLAG_F64;
- }
- if (new_fre) {
- env->CP0_Config5 |= (1 << CP0C5_FRE);
- if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
- env->hflags |= MIPS_HFLAG_FRE;
- }
- } else {
- env->CP0_Config5 &= ~(1 << CP0C5_FRE);
- env->hflags &= ~MIPS_HFLAG_FRE;
- }
-
- return 0;
- }
-#endif /* MIPS */
-#ifdef TARGET_AARCH64
- case TARGET_PR_SVE_SET_VL:
- /*
- * We cannot support either PR_SVE_SET_VL_ONEXEC or
- * PR_SVE_VL_INHERIT. Note the kernel definition
- * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
- * even though the current architectural maximum is VQ=16.
- */
- ret = -TARGET_EINVAL;
- if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
- && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
- uint32_t vq, old_vq;
-
- old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
- vq = MAX(arg2 / 16, 1);
- vq = MIN(vq, cpu->sve_max_vq);
-
- if (vq < old_vq) {
- aarch64_sve_narrow_vq(env, vq);
- }
- env->vfp.zcr_el[1] = vq - 1;
- arm_rebuild_hflags(env);
- ret = vq * 16;
- }
- return ret;
- case TARGET_PR_SVE_GET_VL:
- ret = -TARGET_EINVAL;
- {
- ARMCPU *cpu = env_archcpu(cpu_env);
- if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
- }
- }
- return ret;
- case TARGET_PR_PAC_RESET_KEYS:
- {
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
- TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
- TARGET_PR_PAC_APGAKEY);
- int ret = 0;
- Error *err = NULL;
-
- if (arg2 == 0) {
- arg2 = all;
- } else if (arg2 & ~all) {
- return -TARGET_EINVAL;
- }
- if (arg2 & TARGET_PR_PAC_APIAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apia,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APIBKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apib,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APDAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apda,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APDBKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apdb,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APGAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apga,
- sizeof(ARMPACKey), &err);
- }
- if (ret != 0) {
- /*
- * Some unknown failure in the crypto. The best
- * we can do is log it and fail the syscall.
- * The real syscall cannot fail this way.
- */
- qemu_log_mask(LOG_UNIMP,
- "PR_PAC_RESET_KEYS: Crypto failure: %s",
- error_get_pretty(err));
- error_free(err);
- return -TARGET_EIO;
- }
- return 0;
- }
- }
- return -TARGET_EINVAL;
- case TARGET_PR_SET_TAGGED_ADDR_CTRL:
- {
- abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (cpu_isar_feature(aa64_mte, cpu)) {
- valid_mask |= TARGET_PR_MTE_TCF_MASK;
- valid_mask |= TARGET_PR_MTE_TAG_MASK;
- }
-
- if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
-
- if (cpu_isar_feature(aa64_mte, cpu)) {
- switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
- case TARGET_PR_MTE_TCF_NONE:
- case TARGET_PR_MTE_TCF_SYNC:
- case TARGET_PR_MTE_TCF_ASYNC:
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
- * Note that the syscall values are consistent with hw.
- */
- env->cp15.sctlr_el[1] =
- deposit64(env->cp15.sctlr_el[1], 38, 2,
- arg2 >> TARGET_PR_MTE_TCF_SHIFT);
-
- /*
- * Write PR_MTE_TAG to GCR_EL1[Exclude].
- * Note that the syscall uses an include mask,
- * and hardware uses an exclude mask -- invert.
- */
- env->cp15.gcr_el1 =
- deposit64(env->cp15.gcr_el1, 0, 16,
- ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
- arm_rebuild_hflags(env);
- }
- return 0;
- }
- case TARGET_PR_GET_TAGGED_ADDR_CTRL:
- {
- abi_long ret = 0;
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (arg2 || arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- if (env->tagged_addr_enable) {
- ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
- }
- if (cpu_isar_feature(aa64_mte, cpu)) {
- /* See above. */
- ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
- << TARGET_PR_MTE_TCF_SHIFT);
- ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
- ~env->cp15.gcr_el1);
- }
- return ret;
- }
-#endif /* AARCH64 */
- case PR_GET_SECCOMP:
- case PR_SET_SECCOMP:
- /* Disable seccomp to prevent the target disabling syscalls we
- * need. */
- return -TARGET_EINVAL;
- default:
- /* Most prctl options have no pointer arguments */
- return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
- }
+ return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
break;
#ifdef TARGET_NR_arch_prctl
case TARGET_NR_arch_prctl:
@@ -11250,42 +11834,61 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_setregid:
return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
case TARGET_NR_getgroups:
- {
+ { /* the same code as for TARGET_NR_getgroups32 */
int gidsetsize = arg1;
target_id *target_grouplist;
- gid_t *grouplist;
+ g_autofree gid_t *grouplist = NULL;
int i;
- grouplist = alloca(gidsetsize * sizeof(gid_t));
+ if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
+ return -TARGET_EINVAL;
+ }
+ if (gidsetsize > 0) {
+ grouplist = g_try_new(gid_t, gidsetsize);
+ if (!grouplist) {
+ return -TARGET_ENOMEM;
+ }
+ }
ret = get_errno(getgroups(gidsetsize, grouplist));
- if (gidsetsize == 0)
- return ret;
- if (!is_error(ret)) {
- target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
- if (!target_grouplist)
+ if (!is_error(ret) && gidsetsize > 0) {
+ target_grouplist = lock_user(VERIFY_WRITE, arg2,
+ gidsetsize * sizeof(target_id), 0);
+ if (!target_grouplist) {
return -TARGET_EFAULT;
- for(i = 0;i < ret; i++)
+ }
+ for (i = 0; i < ret; i++) {
target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
- unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
+ }
+ unlock_user(target_grouplist, arg2,
+ gidsetsize * sizeof(target_id));
}
+ return ret;
}
- return ret;
case TARGET_NR_setgroups:
- {
+ { /* the same code as for TARGET_NR_setgroups32 */
int gidsetsize = arg1;
target_id *target_grouplist;
- gid_t *grouplist = NULL;
+ g_autofree gid_t *grouplist = NULL;
int i;
- if (gidsetsize) {
- grouplist = alloca(gidsetsize * sizeof(gid_t));
- target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
+
+ if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
+ return -TARGET_EINVAL;
+ }
+ if (gidsetsize > 0) {
+ grouplist = g_try_new(gid_t, gidsetsize);
+ if (!grouplist) {
+ return -TARGET_ENOMEM;
+ }
+ target_grouplist = lock_user(VERIFY_READ, arg2,
+ gidsetsize * sizeof(target_id), 1);
if (!target_grouplist) {
return -TARGET_EFAULT;
}
for (i = 0; i < gidsetsize; i++) {
grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
}
- unlock_user(target_grouplist, arg2, 0);
+ unlock_user(target_grouplist, arg2,
+ gidsetsize * sizeof(target_id));
}
return get_errno(setgroups(gidsetsize, grouplist));
}
@@ -11376,7 +11979,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
{
uid_t euid;
euid=geteuid();
- ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
+ cpu_env->ir[IR_A4]=euid;
}
return get_errno(getuid());
#endif
@@ -11386,7 +11989,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
{
uid_t egid;
egid=getegid();
- ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
+ cpu_env->ir[IR_A4]=egid;
}
return get_errno(getgid());
#endif
@@ -11398,7 +12001,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_GSI_IEEE_FP_CONTROL:
{
uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
- uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
+ uint64_t swcr = cpu_env->swcr;
swcr &= ~SWCR_STATUS_MASK;
swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
@@ -11440,8 +12043,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
* could be queried. Therefore, we store the status
* bits only in FPCR.
*/
- ((CPUAlphaState *)cpu_env)->swcr
- = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
+ cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
fpcr = cpu_alpha_load_fpcr(cpu_env);
fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
@@ -11465,7 +12067,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
fex = alpha_ieee_fpcr_to_swcr(fpcr);
fex = exc & ~fex;
fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
- fex &= ((CPUArchState *)cpu_env)->swcr;
+ fex &= (cpu_env)->swcr;
/* Update the hardware fpcr. */
fpcr |= alpha_ieee_swcr_to_fpcr(exc);
@@ -11497,9 +12099,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
- info._sifields._sigfault._addr
- = ((CPUArchState *)cpu_env)->pc;
- queue_signal((CPUArchState *)cpu_env, info.si_signo,
+ info._sifields._sigfault._addr = (cpu_env)->pc;
+ queue_signal(cpu_env, info.si_signo,
QEMU_SI_FAULT, &info);
}
ret = 0;
@@ -11569,44 +12170,62 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_getgroups32
case TARGET_NR_getgroups32:
- {
+ { /* the same code as for TARGET_NR_getgroups */
int gidsetsize = arg1;
uint32_t *target_grouplist;
- gid_t *grouplist;
+ g_autofree gid_t *grouplist = NULL;
int i;
- grouplist = alloca(gidsetsize * sizeof(gid_t));
+ if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
+ return -TARGET_EINVAL;
+ }
+ if (gidsetsize > 0) {
+ grouplist = g_try_new(gid_t, gidsetsize);
+ if (!grouplist) {
+ return -TARGET_ENOMEM;
+ }
+ }
ret = get_errno(getgroups(gidsetsize, grouplist));
- if (gidsetsize == 0)
- return ret;
- if (!is_error(ret)) {
- target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
+ if (!is_error(ret) && gidsetsize > 0) {
+ target_grouplist = lock_user(VERIFY_WRITE, arg2,
+ gidsetsize * 4, 0);
if (!target_grouplist) {
return -TARGET_EFAULT;
}
- for(i = 0;i < ret; i++)
+ for (i = 0; i < ret; i++) {
target_grouplist[i] = tswap32(grouplist[i]);
+ }
unlock_user(target_grouplist, arg2, gidsetsize * 4);
}
+ return ret;
}
- return ret;
#endif
#ifdef TARGET_NR_setgroups32
case TARGET_NR_setgroups32:
- {
+ { /* the same code as for TARGET_NR_setgroups */
int gidsetsize = arg1;
uint32_t *target_grouplist;
- gid_t *grouplist;
+ g_autofree gid_t *grouplist = NULL;
int i;
- grouplist = alloca(gidsetsize * sizeof(gid_t));
- target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
- if (!target_grouplist) {
- return -TARGET_EFAULT;
+ if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
+ return -TARGET_EINVAL;
+ }
+ if (gidsetsize > 0) {
+ grouplist = g_try_new(gid_t, gidsetsize);
+ if (!grouplist) {
+ return -TARGET_ENOMEM;
+ }
+ target_grouplist = lock_user(VERIFY_READ, arg2,
+ gidsetsize * 4, 1);
+ if (!target_grouplist) {
+ return -TARGET_EFAULT;
+ }
+ for (i = 0; i < gidsetsize; i++) {
+ grouplist[i] = tswap32(target_grouplist[i]);
+ }
+ unlock_user(target_grouplist, arg2, 0);
}
- for(i = 0;i < gidsetsize; i++)
- grouplist[i] = tswap32(target_grouplist[i]);
- unlock_user(target_grouplist, arg2, 0);
return get_errno(setgroups(gidsetsize, grouplist));
}
#endif
@@ -11677,7 +12296,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_mincore
case TARGET_NR_mincore:
{
- void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
+ void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
if (!a) {
return -TARGET_ENOMEM;
}
@@ -11705,7 +12324,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return -host_to_target_errno(ret);
#endif
-#if TARGET_ABI_BITS == 32
+#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
#ifdef TARGET_NR_fadvise64_64
case TARGET_NR_fadvise64_64:
@@ -11770,11 +12389,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_madvise
case TARGET_NR_madvise:
- /* A straight passthrough may not be safe because qemu sometimes
- turns private file-backed mappings into anonymous mappings.
- This will break MADV_DONTNEED.
- This is a hint, so ignoring and returning success is ok. */
- return 0;
+ return target_madvise(arg1, arg2, arg3);
#endif
#ifdef TARGET_NR_fcntl64
case TARGET_NR_fcntl64:
@@ -11785,7 +12400,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
to_flock64_fn *copyto = copy_to_user_flock64;
#ifdef TARGET_ARM
- if (!((CPUARMState *)cpu_env)->eabi) {
+ if (!cpu_env->eabi) {
copyfrom = copy_from_user_oabi_flock64;
copyto = copy_to_user_oabi_flock64;
}
@@ -11836,7 +12451,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return get_errno(sys_gettid());
#ifdef TARGET_NR_readahead
case TARGET_NR_readahead:
-#if TARGET_ABI_BITS == 32
+#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
if (regpairs_aligned(cpu_env, num)) {
arg2 = arg3;
arg3 = arg4;
@@ -11853,7 +12468,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_listxattr:
case TARGET_NR_llistxattr:
{
- void *p, *b = 0;
+ void *b = 0;
if (arg2) {
b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
if (!b) {
@@ -11890,7 +12505,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_setxattr:
case TARGET_NR_lsetxattr:
{
- void *p, *n, *v = 0;
+ void *n, *v = 0;
if (arg3) {
v = lock_user(VERIFY_READ, arg3, arg4, 1);
if (!v) {
@@ -11935,7 +12550,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_getxattr:
case TARGET_NR_lgetxattr:
{
- void *p, *n, *v = 0;
+ void *n, *v = 0;
if (arg3) {
v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
if (!v) {
@@ -11980,7 +12595,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_removexattr:
case TARGET_NR_lremovexattr:
{
- void *p, *n;
+ void *n;
p = lock_user_string(arg1);
n = lock_user_string(arg2);
if (p && n) {
@@ -12013,13 +12628,13 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#ifdef TARGET_NR_set_thread_area
case TARGET_NR_set_thread_area:
#if defined(TARGET_MIPS)
- ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
+ cpu_env->active_tc.CP0_UserLocal = arg1;
return 0;
#elif defined(TARGET_CRIS)
if (arg1 & 0xff)
ret = -TARGET_EINVAL;
else {
- ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
+ cpu_env->pregs[PR_PID] = arg1;
ret = 0;
}
return ret;
@@ -12027,7 +12642,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return do_set_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K)
{
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
ts->tp_value = arg1;
return 0;
}
@@ -12041,7 +12656,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return do_get_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K)
{
- TaskState *ts = cpu->opaque;
+ TaskState *ts = get_task_state(cpu);
return ts->tp_value;
}
#else
@@ -12163,9 +12778,14 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
#endif
-#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
+#if defined(TARGET_NR_set_tid_address)
case TARGET_NR_set_tid_address:
- return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
+ {
+ TaskState *ts = get_task_state(cpu);
+ ts->child_tidptr = arg1;
+ /* do not call host set_tid_address() syscall, instead return tid() */
+ return get_errno(sys_gettid());
+ }
#endif
case TARGET_NR_tkill:
@@ -12252,41 +12872,41 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_futex
case TARGET_NR_futex:
- return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
+ return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
#endif
#ifdef TARGET_NR_futex_time64
case TARGET_NR_futex_time64:
- return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
+ return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
#endif
-#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
+#ifdef CONFIG_INOTIFY
+#if defined(TARGET_NR_inotify_init)
case TARGET_NR_inotify_init:
- ret = get_errno(sys_inotify_init());
+ ret = get_errno(inotify_init());
if (ret >= 0) {
fd_trans_register(ret, &target_inotify_trans);
}
return ret;
#endif
-#ifdef CONFIG_INOTIFY1
-#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
+#if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
case TARGET_NR_inotify_init1:
- ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
+ ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
fcntl_flags_tbl)));
if (ret >= 0) {
fd_trans_register(ret, &target_inotify_trans);
}
return ret;
#endif
-#endif
-#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
+#if defined(TARGET_NR_inotify_add_watch)
case TARGET_NR_inotify_add_watch:
p = lock_user_string(arg2);
- ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
+ ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
unlock_user(p, arg2, 0);
return ret;
#endif
-#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
+#if defined(TARGET_NR_inotify_rm_watch)
case TARGET_NR_inotify_rm_watch:
- return get_errno(sys_inotify_rm_watch(arg1, arg2));
+ return get_errno(inotify_rm_watch(arg1, arg2));
+#endif
#endif
#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
@@ -12523,7 +13143,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif /* CONFIG_EVENTFD */
#if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
case TARGET_NR_fallocate:
-#if TARGET_ABI_BITS == 32
+#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
target_offset64(arg5, arg6)));
#else
@@ -12534,7 +13154,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(CONFIG_SYNC_FILE_RANGE)
#if defined(TARGET_NR_sync_file_range)
case TARGET_NR_sync_file_range:
-#if TARGET_ABI_BITS == 32
+#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
#if defined(TARGET_MIPS)
ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
target_offset64(arg5, arg6), arg7));
@@ -12556,7 +13176,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_arm_sync_file_range:
#endif
/* This is like sync_file_range but the arguments are reordered */
-#if TARGET_ABI_BITS == 32
+#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
target_offset64(arg5, arg6), arg2));
#else
@@ -12647,29 +13267,21 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_epoll_pwait)
case TARGET_NR_epoll_pwait:
{
- target_sigset_t *target_set;
- sigset_t _set, *set = &_set;
+ sigset_t *set = NULL;
if (arg5) {
- if (arg6 != sizeof(target_sigset_t)) {
- ret = -TARGET_EINVAL;
- break;
- }
-
- target_set = lock_user(VERIFY_READ, arg5,
- sizeof(target_sigset_t), 1);
- if (!target_set) {
- ret = -TARGET_EFAULT;
+ ret = process_sigsuspend_mask(&set, arg5, arg6);
+ if (ret != 0) {
break;
}
- target_to_host_sigset(set, target_set);
- unlock_user(target_set, arg5, 0);
- } else {
- set = NULL;
}
ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
set, SIGSET_T_SIZE));
+
+ if (set) {
+ finish_sigsuspend_mask(ret);
+ }
break;
}
#endif
@@ -12712,8 +13324,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
return -TARGET_EFAULT;
}
- rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
- rnew.rlim_max = tswap64(target_rnew->rlim_max);
+ __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
+ __get_user(rnew.rlim_max, &target_rnew->rlim_max);
unlock_user_struct(target_rnew, arg3, 0);
rnewp = &rnew;
}
@@ -12723,8 +13335,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
return -TARGET_EFAULT;
}
- target_rold->rlim_cur = tswap64(rold.rlim_cur);
- target_rold->rlim_max = tswap64(rold.rlim_max);
+ __put_user(rold.rlim_cur, &target_rold->rlim_cur);
+ __put_user(rold.rlim_max, &target_rold->rlim_max);
unlock_user_struct(target_rold, arg4, 1);
}
return ret;
@@ -12754,8 +13366,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
info.si_errno = 0;
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = arg6;
- queue_signal((CPUArchState *)cpu_env, info.si_signo,
- QEMU_SI_FAULT, &info);
+ queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
ret = 0xdeadbeef;
}
@@ -12790,15 +13401,18 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
phost_sevp = &host_sevp;
ret = target_to_host_sigevent(phost_sevp, arg2);
if (ret != 0) {
+ free_host_timer_slot(timer_index);
return ret;
}
}
ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
if (ret) {
- phtimer = NULL;
+ free_host_timer_slot(timer_index);
} else {
if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
+ timer_delete(*phtimer);
+ free_host_timer_slot(timer_index);
return -TARGET_EFAULT;
}
}
@@ -12934,7 +13548,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
} else {
timer_t htimer = g_posix_timers[timerid];
ret = get_errno(timer_delete(htimer));
- g_posix_timers[timerid] = 0;
+ free_host_timer_slot(timerid);
}
return ret;
}
@@ -12942,8 +13556,12 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
case TARGET_NR_timerfd_create:
- return get_errno(timerfd_create(arg1,
- target_to_host_bitmask(arg2, fcntl_flags_tbl)));
+ ret = get_errno(timerfd_create(arg1,
+ target_to_host_bitmask(arg2, fcntl_flags_tbl)));
+ if (ret >= 0) {
+ fd_trans_register(ret, &target_timerfd_trans);
+ }
+ return ret;
#endif
#if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
@@ -13117,6 +13735,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
#endif
+#if defined(TARGET_NR_riscv_hwprobe)
+ case TARGET_NR_riscv_hwprobe:
+ return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
+#endif
+
default:
qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
return -TARGET_ENOSYS;
@@ -13124,7 +13747,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
}
-abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
+abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8)
@@ -13141,7 +13764,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
static bool flag;
flag = !flag;
if (flag) {
- return -TARGET_ERESTARTSYS;
+ return -QEMU_ERESTARTSYS;
}
}
#endif
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index a5ce487dcc..a00b617cae 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -33,18 +33,18 @@
#define TARGET_SYS_SENDMMSG 20 /* sendmmsg() */
#define IPCOP_CALL(VERSION, OP) ((VERSION) << 16 | (OP))
-#define IPCOP_semop 1
-#define IPCOP_semget 2
-#define IPCOP_semctl 3
-#define IPCOP_semtimedop 4
-#define IPCOP_msgsnd 11
-#define IPCOP_msgrcv 12
-#define IPCOP_msgget 13
-#define IPCOP_msgctl 14
-#define IPCOP_shmat 21
-#define IPCOP_shmdt 22
-#define IPCOP_shmget 23
-#define IPCOP_shmctl 24
+#define IPCOP_semop 1
+#define IPCOP_semget 2
+#define IPCOP_semctl 3
+#define IPCOP_semtimedop 4
+#define IPCOP_msgsnd 11
+#define IPCOP_msgrcv 12
+#define IPCOP_msgget 13
+#define IPCOP_msgctl 14
+#define IPCOP_shmat 21
+#define IPCOP_shmdt 22
+#define IPCOP_shmget 23
+#define IPCOP_shmctl 24
#define TARGET_SEMOPM 500
@@ -56,42 +56,42 @@
* this explicit here. Please be sure to use the decoding macros
* below from now on.
*/
-#define TARGET_IOC_NRBITS 8
-#define TARGET_IOC_TYPEBITS 8
+#define TARGET_IOC_NRBITS 8
+#define TARGET_IOC_TYPEBITS 8
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
- || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
- || defined(TARGET_SPARC) \
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
+ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
+ || (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \
|| defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
- /* 16 bit uid wrappers emulation */
+/* 16 bit uid wrappers emulation */
#define USE_UID16
#define target_id uint16_t
#else
-#define target_id uint32_t
+#define target_id abi_uint
#endif
-#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
- || defined(TARGET_M68K) || defined(TARGET_CRIS) \
- || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
- || defined(TARGET_NIOS2) || defined(TARGET_RISCV) \
- || defined(TARGET_XTENSA)
+#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
+ || defined(TARGET_M68K) || defined(TARGET_CRIS) \
+ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
+ || defined(TARGET_RISCV) \
+ || defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64)
-#define TARGET_IOC_SIZEBITS 14
-#define TARGET_IOC_DIRBITS 2
+#define TARGET_IOC_SIZEBITS 14
+#define TARGET_IOC_DIRBITS 2
-#define TARGET_IOC_NONE 0U
+#define TARGET_IOC_NONE 0U
#define TARGET_IOC_WRITE 1U
-#define TARGET_IOC_READ 2U
+#define TARGET_IOC_READ 2U
-#elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
- defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \
- defined(TARGET_MIPS)
+#elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
+ defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \
+ defined(TARGET_MIPS)
-#define TARGET_IOC_SIZEBITS 13
-#define TARGET_IOC_DIRBITS 3
+#define TARGET_IOC_SIZEBITS 13
+#define TARGET_IOC_DIRBITS 3
-#define TARGET_IOC_NONE 1U
-#define TARGET_IOC_READ 2U
+#define TARGET_IOC_NONE 1U
+#define TARGET_IOC_READ 2U
#define TARGET_IOC_WRITE 4U
#elif defined(TARGET_HPPA)
@@ -115,32 +115,32 @@
#error unsupported CPU
#endif
-#define TARGET_IOC_NRMASK ((1 << TARGET_IOC_NRBITS)-1)
-#define TARGET_IOC_TYPEMASK ((1 << TARGET_IOC_TYPEBITS)-1)
-#define TARGET_IOC_SIZEMASK ((1 << TARGET_IOC_SIZEBITS)-1)
-#define TARGET_IOC_DIRMASK ((1 << TARGET_IOC_DIRBITS)-1)
+#define TARGET_IOC_NRMASK ((1 << TARGET_IOC_NRBITS)-1)
+#define TARGET_IOC_TYPEMASK ((1 << TARGET_IOC_TYPEBITS)-1)
+#define TARGET_IOC_SIZEMASK ((1 << TARGET_IOC_SIZEBITS)-1)
+#define TARGET_IOC_DIRMASK ((1 << TARGET_IOC_DIRBITS)-1)
-#define TARGET_IOC_NRSHIFT 0
-#define TARGET_IOC_TYPESHIFT (TARGET_IOC_NRSHIFT+TARGET_IOC_NRBITS)
-#define TARGET_IOC_SIZESHIFT (TARGET_IOC_TYPESHIFT+TARGET_IOC_TYPEBITS)
-#define TARGET_IOC_DIRSHIFT (TARGET_IOC_SIZESHIFT+TARGET_IOC_SIZEBITS)
+#define TARGET_IOC_NRSHIFT 0
+#define TARGET_IOC_TYPESHIFT (TARGET_IOC_NRSHIFT+TARGET_IOC_NRBITS)
+#define TARGET_IOC_SIZESHIFT (TARGET_IOC_TYPESHIFT+TARGET_IOC_TYPEBITS)
+#define TARGET_IOC_DIRSHIFT (TARGET_IOC_SIZESHIFT+TARGET_IOC_SIZEBITS)
-#define TARGET_IOC(dir,type,nr,size) \
- (((dir) << TARGET_IOC_DIRSHIFT) | \
- ((type) << TARGET_IOC_TYPESHIFT) | \
- ((nr) << TARGET_IOC_NRSHIFT) | \
- ((size) << TARGET_IOC_SIZESHIFT))
+#define TARGET_IOC(dir,type,nr,size) \
+ (((dir) << TARGET_IOC_DIRSHIFT) | \
+ ((type) << TARGET_IOC_TYPESHIFT) | \
+ ((nr) << TARGET_IOC_NRSHIFT) | \
+ ((size) << TARGET_IOC_SIZESHIFT))
/* used to create numbers */
-#define TARGET_IO(type,nr) TARGET_IOC(TARGET_IOC_NONE,(type),(nr),0)
-#define TARGET_IOR(type,nr,size) TARGET_IOC(TARGET_IOC_READ,(type),(nr),sizeof(size))
-#define TARGET_IOW(type,nr,size) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),sizeof(size))
-#define TARGET_IOWR(type,nr,size) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),sizeof(size))
+#define TARGET_IO(type,nr) TARGET_IOC(TARGET_IOC_NONE,(type),(nr),0)
+#define TARGET_IOR(type,nr,size) TARGET_IOC(TARGET_IOC_READ,(type),(nr),sizeof(size))
+#define TARGET_IOW(type,nr,size) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),sizeof(size))
+#define TARGET_IOWR(type,nr,size) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),sizeof(size))
/* the size is automatically computed for these defines */
-#define TARGET_IORU(type,nr) TARGET_IOC(TARGET_IOC_READ,(type),(nr),TARGET_IOC_SIZEMASK)
-#define TARGET_IOWU(type,nr) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
-#define TARGET_IOWRU(type,nr) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
+#define TARGET_IORU(type,nr) TARGET_IOC(TARGET_IOC_READ,(type),(nr),TARGET_IOC_SIZEMASK)
+#define TARGET_IOWU(type,nr) TARGET_IOC(TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
+#define TARGET_IOWRU(type,nr) TARGET_IOC(TARGET_IOC_READ|TARGET_IOC_WRITE,(type),(nr),TARGET_IOC_SIZEMASK)
struct target_sockaddr {
abi_ushort sa_family;
@@ -174,12 +174,12 @@ struct target_in_addr {
};
struct target_sockaddr_in {
- abi_ushort sin_family;
- abi_short sin_port; /* big endian */
- struct target_in_addr sin_addr;
- uint8_t __pad[sizeof(struct target_sockaddr) -
- sizeof(abi_ushort) - sizeof(abi_short) -
- sizeof(struct target_in_addr)];
+ abi_ushort sin_family;
+ abi_short sin_port; /* big endian */
+ struct target_in_addr sin_addr;
+ uint8_t __pad[sizeof(struct target_sockaddr) -
+ sizeof(abi_ushort) - sizeof(abi_short) -
+ sizeof(struct target_in_addr)];
};
struct target_sockaddr_in6 {
@@ -215,9 +215,9 @@ struct target_ip_mreqn {
struct target_ip_mreq_source {
/* big endian */
- uint32_t imr_multiaddr;
- uint32_t imr_interface;
- uint32_t imr_sourceaddr;
+ abi_uint imr_multiaddr;
+ abi_uint imr_interface;
+ abi_uint imr_sourceaddr;
};
struct target_linger {
@@ -360,26 +360,26 @@ struct target_iovec {
};
struct target_msghdr {
- abi_long msg_name; /* Socket name */
- int msg_namelen; /* Length of name */
- abi_long msg_iov; /* Data blocks */
- abi_long msg_iovlen; /* Number of blocks */
- abi_long msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
- abi_long msg_controllen; /* Length of cmsg list */
- unsigned int msg_flags;
+ abi_long msg_name; /* Socket name */
+ abi_int msg_namelen; /* Length of name */
+ abi_long msg_iov; /* Data blocks */
+ abi_long msg_iovlen; /* Number of blocks */
+ abi_long msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
+ abi_long msg_controllen; /* Length of cmsg list */
+ abi_uint msg_flags;
};
struct target_cmsghdr {
abi_long cmsg_len;
- int cmsg_level;
- int cmsg_type;
+ abi_int cmsg_level;
+ abi_int cmsg_type;
};
#define TARGET_CMSG_DATA(cmsg) ((unsigned char *) ((struct target_cmsghdr *) (cmsg) + 1))
-#define TARGET_CMSG_NXTHDR(mhdr, cmsg, cmsg_start) \
- __target_cmsg_nxthdr(mhdr, cmsg, cmsg_start)
-#define TARGET_CMSG_ALIGN(len) (((len) + sizeof (abi_long) - 1) \
- & (size_t) ~(sizeof (abi_long) - 1))
+#define TARGET_CMSG_NXTHDR(mhdr, cmsg, cmsg_start) \
+ __target_cmsg_nxthdr(mhdr, cmsg, cmsg_start)
+#define TARGET_CMSG_ALIGN(len) (((len) + sizeof (abi_long) - 1) \
+ & (size_t) ~(sizeof (abi_long) - 1))
#define TARGET_CMSG_SPACE(len) (sizeof(struct target_cmsghdr) + \
TARGET_CMSG_ALIGN(len))
#define TARGET_CMSG_LEN(len) (sizeof(struct target_cmsghdr) + (len))
@@ -389,73 +389,73 @@ __target_cmsg_nxthdr(struct target_msghdr *__mhdr,
struct target_cmsghdr *__cmsg,
struct target_cmsghdr *__cmsg_start)
{
- struct target_cmsghdr *__ptr;
-
- __ptr = (struct target_cmsghdr *)((unsigned char *) __cmsg
- + TARGET_CMSG_ALIGN (tswapal(__cmsg->cmsg_len)));
- if ((unsigned long)((char *)(__ptr+1) - (char *)__cmsg_start)
- > tswapal(__mhdr->msg_controllen)) {
- /* No more entries. */
- return (struct target_cmsghdr *)0;
- }
- return __ptr;
+ struct target_cmsghdr *__ptr;
+
+ __ptr = (struct target_cmsghdr *)((unsigned char *) __cmsg
+ + TARGET_CMSG_ALIGN (tswapal(__cmsg->cmsg_len)));
+ if ((unsigned long)((char *)(__ptr+1) - (char *)__cmsg_start)
+ > tswapal(__mhdr->msg_controllen)) {
+ /* No more entries. */
+ return (struct target_cmsghdr *)0;
+ }
+ return __ptr;
}
struct target_mmsghdr {
struct target_msghdr msg_hdr; /* Message header */
- unsigned int msg_len; /* Number of bytes transmitted */
+ abi_uint msg_len; /* Number of bytes transmitted */
};
struct target_rusage {
- struct target_timeval ru_utime; /* user time used */
- struct target_timeval ru_stime; /* system time used */
- abi_long ru_maxrss; /* maximum resident set size */
- abi_long ru_ixrss; /* integral shared memory size */
- abi_long ru_idrss; /* integral unshared data size */
- abi_long ru_isrss; /* integral unshared stack size */
- abi_long ru_minflt; /* page reclaims */
- abi_long ru_majflt; /* page faults */
- abi_long ru_nswap; /* swaps */
- abi_long ru_inblock; /* block input operations */
- abi_long ru_oublock; /* block output operations */
- abi_long ru_msgsnd; /* messages sent */
- abi_long ru_msgrcv; /* messages received */
- abi_long ru_nsignals; /* signals received */
- abi_long ru_nvcsw; /* voluntary context switches */
- abi_long ru_nivcsw; /* involuntary " */
+ struct target_timeval ru_utime; /* user time used */
+ struct target_timeval ru_stime; /* system time used */
+ abi_long ru_maxrss; /* maximum resident set size */
+ abi_long ru_ixrss; /* integral shared memory size */
+ abi_long ru_idrss; /* integral unshared data size */
+ abi_long ru_isrss; /* integral unshared stack size */
+ abi_long ru_minflt; /* page reclaims */
+ abi_long ru_majflt; /* page faults */
+ abi_long ru_nswap; /* swaps */
+ abi_long ru_inblock; /* block input operations */
+ abi_long ru_oublock; /* block output operations */
+ abi_long ru_msgsnd; /* messages sent */
+ abi_long ru_msgrcv; /* messages received */
+ abi_long ru_nsignals; /* signals received */
+ abi_long ru_nvcsw; /* voluntary context switches */
+ abi_long ru_nivcsw; /* involuntary " */
};
typedef struct {
- int val[2];
+ abi_int val[2];
} kernel_fsid_t;
struct target_dirent {
- abi_long d_ino;
- abi_long d_off;
- unsigned short d_reclen;
- char d_name[];
+ abi_long d_ino;
+ abi_long d_off;
+ abi_ushort d_reclen;
+ char d_name[];
};
struct target_dirent64 {
- uint64_t d_ino;
- int64_t d_off;
- unsigned short d_reclen;
- unsigned char d_type;
- char d_name[256];
+ abi_ullong d_ino;
+ abi_llong d_off;
+ abi_ushort d_reclen;
+ unsigned char d_type;
+ char d_name[];
};
/* mostly generic signal stuff */
-#define TARGET_SIG_DFL ((abi_long)0) /* default signal handling */
-#define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */
-#define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */
+#define TARGET_SIG_DFL ((abi_long)0) /* default signal handling */
+#define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */
+#define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */
#ifdef TARGET_MIPS
-#define TARGET_NSIG 128
+#define TARGET_NSIG 128
#else
-#define TARGET_NSIG 64
+#define TARGET_NSIG 64
#endif
-#define TARGET_NSIG_BPW TARGET_ABI_BITS
+#define TARGET_NSIG_BPW TARGET_ABI_BITS
#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW)
typedef struct {
@@ -501,78 +501,54 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#endif
#if defined(TARGET_ALPHA)
-typedef int32_t target_old_sa_flags;
+typedef abi_int target_old_sa_flags;
#else
typedef abi_ulong target_old_sa_flags;
#endif
#if defined(TARGET_MIPS)
struct target_sigaction {
- uint32_t sa_flags;
+ abi_uint sa_flags;
#if defined(TARGET_ABI_MIPSN32)
- uint32_t _sa_handler;
+ abi_uint _sa_handler;
#else
- abi_ulong _sa_handler;
+ abi_ulong _sa_handler;
#endif
- target_sigset_t sa_mask;
+ target_sigset_t sa_mask;
#ifdef TARGET_ARCH_HAS_SA_RESTORER
- /* ??? This is always present, but ignored unless O32. */
- abi_ulong sa_restorer;
+ /* ??? This is always present, but ignored unless O32. */
+ abi_ulong sa_restorer;
#endif
};
#else
struct target_old_sigaction {
- abi_ulong _sa_handler;
- abi_ulong sa_mask;
- target_old_sa_flags sa_flags;
+ abi_ulong _sa_handler;
+ abi_ulong sa_mask;
+ target_old_sa_flags sa_flags;
#ifdef TARGET_ARCH_HAS_SA_RESTORER
- abi_ulong sa_restorer;
+ abi_ulong sa_restorer;
#endif
};
struct target_sigaction {
- abi_ulong _sa_handler;
- abi_ulong sa_flags;
+ abi_ulong _sa_handler;
+ abi_ulong sa_flags;
#ifdef TARGET_ARCH_HAS_SA_RESTORER
- abi_ulong sa_restorer;
+ abi_ulong sa_restorer;
#endif
- target_sigset_t sa_mask;
+ target_sigset_t sa_mask;
#ifdef TARGET_ARCH_HAS_KA_RESTORER
- abi_ulong ka_restorer;
+ abi_ulong ka_restorer;
#endif
};
#endif
typedef union target_sigval {
- int sival_int;
- abi_ulong sival_ptr;
+ abi_int sival_int;
+ abi_ulong sival_ptr;
} target_sigval_t;
-#if 0
-#if defined (TARGET_SPARC)
-typedef struct {
- struct {
- abi_ulong psr;
- abi_ulong pc;
- abi_ulong npc;
- abi_ulong y;
- abi_ulong u_regs[16]; /* globals and ins */
- } si_regs;
- int si_mask;
-} __siginfo_t;
-typedef struct {
- unsigned long si_float_regs [32];
- unsigned long si_fsr;
- unsigned long si_fpqdepth;
- struct {
- unsigned long *insn_addr;
- unsigned long insn;
- } si_fpqueue [16];
-} __siginfo_fpu_t;
-#endif
-#endif
-
-#define TARGET_SI_MAX_SIZE 128
+#define TARGET_SI_MAX_SIZE 128
#if TARGET_ABI_BITS == 32
#define TARGET_SI_PREAMBLE_SIZE (3 * sizeof(int))
@@ -599,82 +575,82 @@ typedef struct {
typedef struct target_siginfo {
#ifdef TARGET_MIPS
- int si_signo;
- int si_code;
- int si_errno;
+ abi_int si_signo;
+ abi_int si_code;
+ abi_int si_errno;
#else
- int si_signo;
- int si_errno;
- int si_code;
+ abi_int si_signo;
+ abi_int si_errno;
+ abi_int si_code;
#endif
- union {
- int _pad[TARGET_SI_PAD_SIZE];
-
- /* kill() */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- unsigned int _timer1;
- unsigned int _timer2;
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- target_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- target_clock_t _utime;
- target_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- abi_ulong _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
+ union {
+ abi_int _pad[TARGET_SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ abi_uint _timer1;
+ abi_uint _timer2;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ target_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ abi_int _status; /* exit code */
+ target_clock_t _utime;
+ target_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ abi_ulong _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ abi_int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ abi_int _fd;
+ } _sigpoll;
+ } _sifields;
} target_siginfo_t;
/*
* si_code values
* Digital reserves positive values for kernel-generated signals.
*/
-#define TARGET_SI_USER 0 /* sent by kill, sigsend, raise */
-#define TARGET_SI_KERNEL 0x80 /* sent by the kernel from somewhere */
-#define TARGET_SI_QUEUE -1 /* sent by sigqueue */
+#define TARGET_SI_USER 0 /* sent by kill, sigsend, raise */
+#define TARGET_SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define TARGET_SI_QUEUE -1 /* sent by sigqueue */
#define TARGET_SI_TIMER -2 /* sent by timer expiration */
-#define TARGET_SI_MESGQ -3 /* sent by real time mesq state change */
-#define TARGET_SI_ASYNCIO -4 /* sent by AIO completion */
-#define TARGET_SI_SIGIO -5 /* sent by queued SIGIO */
+#define TARGET_SI_MESGQ -3 /* sent by real time mesq state change */
+#define TARGET_SI_ASYNCIO -4 /* sent by AIO completion */
+#define TARGET_SI_SIGIO -5 /* sent by queued SIGIO */
/*
* SIGILL si_codes
*/
-#define TARGET_ILL_ILLOPC (1) /* illegal opcode */
-#define TARGET_ILL_ILLOPN (2) /* illegal operand */
-#define TARGET_ILL_ILLADR (3) /* illegal addressing mode */
-#define TARGET_ILL_ILLTRP (4) /* illegal trap */
-#define TARGET_ILL_PRVOPC (5) /* privileged opcode */
-#define TARGET_ILL_PRVREG (6) /* privileged register */
-#define TARGET_ILL_COPROC (7) /* coprocessor error */
-#define TARGET_ILL_BADSTK (8) /* internal stack error */
+#define TARGET_ILL_ILLOPC (1) /* illegal opcode */
+#define TARGET_ILL_ILLOPN (2) /* illegal operand */
+#define TARGET_ILL_ILLADR (3) /* illegal addressing mode */
+#define TARGET_ILL_ILLTRP (4) /* illegal trap */
+#define TARGET_ILL_PRVOPC (5) /* privileged opcode */
+#define TARGET_ILL_PRVREG (6) /* privileged register */
+#define TARGET_ILL_COPROC (7) /* coprocessor error */
+#define TARGET_ILL_BADSTK (8) /* internal stack error */
/*
* SIGFPE si_codes
@@ -688,7 +664,7 @@ typedef struct target_siginfo {
#define TARGET_FPE_FLTINV (7) /* floating point invalid operation */
#define TARGET_FPE_FLTSUB (8) /* subscript out of range */
#define TARGET_FPE_FLTUNK (14) /* undiagnosed fp exception */
-#define TARGET_NSIGFPE 15
+#define TARGET_FPE_CONDTRAP (15) /* trap on condition */
/*
* SIGSEGV si_codes
@@ -700,9 +676,9 @@ typedef struct target_siginfo {
/*
* SIGBUS si_codes
*/
-#define TARGET_BUS_ADRALN (1) /* invalid address alignment */
-#define TARGET_BUS_ADRERR (2) /* non-existent physical address */
-#define TARGET_BUS_OBJERR (3) /* object specific hardware error */
+#define TARGET_BUS_ADRALN (1) /* invalid address alignment */
+#define TARGET_BUS_ADRERR (2) /* non-existent physical address */
+#define TARGET_BUS_OBJERR (3) /* object specific hardware error */
/* hardware memory error consumed on a machine check: action required */
#define TARGET_BUS_MCEERR_AR (4)
/* hardware memory error detected in process but not consumed: action optional*/
@@ -711,91 +687,47 @@ typedef struct target_siginfo {
/*
* SIGTRAP si_codes
*/
-#define TARGET_TRAP_BRKPT (1) /* process breakpoint */
-#define TARGET_TRAP_TRACE (2) /* process trace trap */
+#define TARGET_TRAP_BRKPT (1) /* process breakpoint */
+#define TARGET_TRAP_TRACE (2) /* process trace trap */
#define TARGET_TRAP_BRANCH (3) /* process taken branch trap */
#define TARGET_TRAP_HWBKPT (4) /* hardware breakpoint/watchpoint */
+#define TARGET_TRAP_UNK (5) /* undiagnosed trap */
-struct target_rlimit {
- abi_ulong rlim_cur;
- abi_ulong rlim_max;
-};
-
-#if defined(TARGET_ALPHA)
-#define TARGET_RLIM_INFINITY 0x7fffffffffffffffull
-#elif defined(TARGET_MIPS) || (defined(TARGET_SPARC) && TARGET_ABI_BITS == 32)
-#define TARGET_RLIM_INFINITY 0x7fffffffUL
-#else
-#define TARGET_RLIM_INFINITY ((abi_ulong)-1)
-#endif
+/*
+ * SIGEMT si_codes
+ */
+#define TARGET_EMT_TAGOVF 1 /* tag overflow */
-#if defined(TARGET_MIPS)
-#define TARGET_RLIMIT_CPU 0
-#define TARGET_RLIMIT_FSIZE 1
-#define TARGET_RLIMIT_DATA 2
-#define TARGET_RLIMIT_STACK 3
-#define TARGET_RLIMIT_CORE 4
-#define TARGET_RLIMIT_RSS 7
-#define TARGET_RLIMIT_NPROC 8
-#define TARGET_RLIMIT_NOFILE 5
-#define TARGET_RLIMIT_MEMLOCK 9
-#define TARGET_RLIMIT_AS 6
-#define TARGET_RLIMIT_LOCKS 10
-#define TARGET_RLIMIT_SIGPENDING 11
-#define TARGET_RLIMIT_MSGQUEUE 12
-#define TARGET_RLIMIT_NICE 13
-#define TARGET_RLIMIT_RTPRIO 14
-#else
-#define TARGET_RLIMIT_CPU 0
-#define TARGET_RLIMIT_FSIZE 1
-#define TARGET_RLIMIT_DATA 2
-#define TARGET_RLIMIT_STACK 3
-#define TARGET_RLIMIT_CORE 4
-#define TARGET_RLIMIT_RSS 5
-#if defined(TARGET_SPARC)
-#define TARGET_RLIMIT_NOFILE 6
-#define TARGET_RLIMIT_NPROC 7
-#else
-#define TARGET_RLIMIT_NPROC 6
-#define TARGET_RLIMIT_NOFILE 7
-#endif
-#define TARGET_RLIMIT_MEMLOCK 8
-#define TARGET_RLIMIT_AS 9
-#define TARGET_RLIMIT_LOCKS 10
-#define TARGET_RLIMIT_SIGPENDING 11
-#define TARGET_RLIMIT_MSGQUEUE 12
-#define TARGET_RLIMIT_NICE 13
-#define TARGET_RLIMIT_RTPRIO 14
-#endif
+#include "target_resource.h"
struct target_pollfd {
- int fd; /* file descriptor */
- short events; /* requested events */
- short revents; /* returned events */
+ abi_int fd; /* file descriptor */
+ abi_short events; /* requested events */
+ abi_short revents; /* returned events */
};
/* virtual terminal ioctls */
-#define TARGET_KIOCSOUND 0x4B2F /* start sound generation (0 for off) */
-#define TARGET_KDMKTONE 0x4B30 /* generate tone */
+#define TARGET_KIOCSOUND 0x4B2F /* start sound generation (0 for off) */
+#define TARGET_KDMKTONE 0x4B30 /* generate tone */
#define TARGET_KDGKBTYPE 0x4b33
#define TARGET_KDSETMODE 0x4b3a
#define TARGET_KDGKBMODE 0x4b44
#define TARGET_KDSKBMODE 0x4b45
-#define TARGET_KDGKBENT 0x4B46 /* gets one entry in translation table */
-#define TARGET_KDGKBSENT 0x4B48 /* gets one function key string entry */
-#define TARGET_KDGKBLED 0x4B64 /* get led flags (not lights) */
-#define TARGET_KDSKBLED 0x4B65 /* set led flags (not lights) */
-#define TARGET_KDGETLED 0x4B31 /* return current led state */
-#define TARGET_KDSETLED 0x4B32 /* set led state [lights, not flags] */
+#define TARGET_KDGKBENT 0x4B46 /* gets one entry in translation table */
+#define TARGET_KDGKBSENT 0x4B48 /* gets one function key string entry */
+#define TARGET_KDGKBLED 0x4B64 /* get led flags (not lights) */
+#define TARGET_KDSKBLED 0x4B65 /* set led flags (not lights) */
+#define TARGET_KDGETLED 0x4B31 /* return current led state */
+#define TARGET_KDSETLED 0x4B32 /* set led state [lights, not flags] */
#define TARGET_KDSIGACCEPT 0x4B4E
struct target_rtc_pll_info {
- int pll_ctrl;
- int pll_value;
- int pll_max;
- int pll_min;
- int pll_posmult;
- int pll_negmult;
+ abi_int pll_ctrl;
+ abi_int pll_value;
+ abi_int pll_max;
+ abi_int pll_min;
+ abi_int pll_posmult;
+ abi_int pll_negmult;
abi_long pll_clock;
};
@@ -818,18 +750,18 @@ struct target_rtc_pll_info {
#define TARGET_RTC_EPOCH_SET TARGET_IOW('p', 0x0e, abi_ulong)
#define TARGET_RTC_WKALM_RD TARGET_IOR('p', 0x10, struct rtc_wkalrm)
#define TARGET_RTC_WKALM_SET TARGET_IOW('p', 0x0f, struct rtc_wkalrm)
-#define TARGET_RTC_PLL_GET TARGET_IOR('p', 0x11, \
+#define TARGET_RTC_PLL_GET TARGET_IOR('p', 0x11, \
struct target_rtc_pll_info)
-#define TARGET_RTC_PLL_SET TARGET_IOW('p', 0x12, \
+#define TARGET_RTC_PLL_SET TARGET_IOW('p', 0x12, \
struct target_rtc_pll_info)
-#define TARGET_RTC_VL_READ TARGET_IOR('p', 0x13, int)
+#define TARGET_RTC_VL_READ TARGET_IOR('p', 0x13, abi_int)
#define TARGET_RTC_VL_CLR TARGET_IO('p', 0x14)
-#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SH4) || \
- defined(TARGET_XTENSA)
-#define TARGET_FIOGETOWN TARGET_IOR('f', 123, int)
-#define TARGET_FIOSETOWN TARGET_IOW('f', 124, int)
-#define TARGET_SIOCATMARK TARGET_IOR('s', 7, int)
+#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SH4) || \
+ defined(TARGET_XTENSA)
+#define TARGET_FIOGETOWN TARGET_IOR('f', 123, abi_int)
+#define TARGET_FIOSETOWN TARGET_IOW('f', 124, abi_int)
+#define TARGET_SIOCATMARK TARGET_IOR('s', 7, abi_int)
#define TARGET_SIOCSPGRP TARGET_IOW('s', 8, pid_t)
#define TARGET_SIOCGPGRP TARGET_IOR('s', 9, pid_t)
#else
@@ -919,40 +851,40 @@ struct target_rtc_pll_info {
/* From <linux/if_tun.h> */
-#define TARGET_TUNSETDEBUG TARGET_IOW('T', 201, int)
-#define TARGET_TUNSETIFF TARGET_IOW('T', 202, int)
-#define TARGET_TUNSETPERSIST TARGET_IOW('T', 203, int)
-#define TARGET_TUNSETOWNER TARGET_IOW('T', 204, int)
-#define TARGET_TUNSETLINK TARGET_IOW('T', 205, int)
-#define TARGET_TUNSETGROUP TARGET_IOW('T', 206, int)
-#define TARGET_TUNGETFEATURES TARGET_IOR('T', 207, unsigned int)
-#define TARGET_TUNSETOFFLOAD TARGET_IOW('T', 208, unsigned int)
-#define TARGET_TUNSETTXFILTER TARGET_IOW('T', 209, unsigned int)
-#define TARGET_TUNGETIFF TARGET_IOR('T', 210, unsigned int)
-#define TARGET_TUNGETSNDBUF TARGET_IOR('T', 211, int)
-#define TARGET_TUNSETSNDBUF TARGET_IOW('T', 212, int)
+#define TARGET_TUNSETDEBUG TARGET_IOW('T', 201, abi_int)
+#define TARGET_TUNSETIFF TARGET_IOW('T', 202, abi_int)
+#define TARGET_TUNSETPERSIST TARGET_IOW('T', 203, abi_int)
+#define TARGET_TUNSETOWNER TARGET_IOW('T', 204, abi_int)
+#define TARGET_TUNSETLINK TARGET_IOW('T', 205, abi_int)
+#define TARGET_TUNSETGROUP TARGET_IOW('T', 206, abi_int)
+#define TARGET_TUNGETFEATURES TARGET_IOR('T', 207, abi_uint)
+#define TARGET_TUNSETOFFLOAD TARGET_IOW('T', 208, abi_uint)
+#define TARGET_TUNSETTXFILTER TARGET_IOW('T', 209, abi_uint)
+#define TARGET_TUNGETIFF TARGET_IOR('T', 210, abi_uint)
+#define TARGET_TUNGETSNDBUF TARGET_IOR('T', 211, abi_int)
+#define TARGET_TUNSETSNDBUF TARGET_IOW('T', 212, abi_int)
/*
* TUNATTACHFILTER and TUNDETACHFILTER are not supported. Linux kernel keeps a
* user pointer in TUNATTACHFILTER, which we are not able to correctly handle.
*/
-#define TARGET_TUNGETVNETHDRSZ TARGET_IOR('T', 215, int)
-#define TARGET_TUNSETVNETHDRSZ TARGET_IOW('T', 216, int)
-#define TARGET_TUNSETQUEUE TARGET_IOW('T', 217, int)
-#define TARGET_TUNSETIFINDEX TARGET_IOW('T', 218, unsigned int)
+#define TARGET_TUNGETVNETHDRSZ TARGET_IOR('T', 215, abi_int)
+#define TARGET_TUNSETVNETHDRSZ TARGET_IOW('T', 216, abi_int)
+#define TARGET_TUNSETQUEUE TARGET_IOW('T', 217, abi_int)
+#define TARGET_TUNSETIFINDEX TARGET_IOW('T', 218, abi_uint)
/* TUNGETFILTER is not supported: see TUNATTACHFILTER. */
-#define TARGET_TUNSETVNETLE TARGET_IOW('T', 220, int)
-#define TARGET_TUNGETVNETLE TARGET_IOR('T', 221, int)
-#define TARGET_TUNSETVNETBE TARGET_IOW('T', 222, int)
-#define TARGET_TUNGETVNETBE TARGET_IOR('T', 223, int)
-#define TARGET_TUNSETSTEERINGEBPF TARGET_IOR('T', 224, int)
-#define TARGET_TUNSETFILTEREBPF TARGET_IOR('T', 225, int)
-#define TARGET_TUNSETCARRIER TARGET_IOW('T', 226, int)
+#define TARGET_TUNSETVNETLE TARGET_IOW('T', 220, abi_int)
+#define TARGET_TUNGETVNETLE TARGET_IOR('T', 221, abi_int)
+#define TARGET_TUNSETVNETBE TARGET_IOW('T', 222, abi_int)
+#define TARGET_TUNGETVNETBE TARGET_IOR('T', 223, abi_int)
+#define TARGET_TUNSETSTEERINGEBPF TARGET_IOR('T', 224, abi_int)
+#define TARGET_TUNSETFILTEREBPF TARGET_IOR('T', 225, abi_int)
+#define TARGET_TUNSETCARRIER TARGET_IOW('T', 226, abi_int)
#define TARGET_TUNGETDEVNETNS TARGET_IO('T', 227)
/* From <linux/random.h> */
-#define TARGET_RNDGETENTCNT TARGET_IOR('R', 0x00, int)
-#define TARGET_RNDADDTOENTCNT TARGET_IOW('R', 0x01, int)
+#define TARGET_RNDGETENTCNT TARGET_IOR('R', 0x00, abi_int)
+#define TARGET_RNDADDTOENTCNT TARGET_IOW('R', 0x01, abi_int)
#define TARGET_RNDZAPENTCNT TARGET_IO('R', 0x04)
#define TARGET_RNDCLEARPOOL TARGET_IO('R', 0x06)
#define TARGET_RNDRESEEDCRNG TARGET_IO('R', 0x07)
@@ -976,8 +908,8 @@ struct target_rtc_pll_info {
#define TARGET_BLKBSZGET TARGET_IOR(0x12, 112, abi_ulong)
#define TARGET_BLKBSZSET TARGET_IOW(0x12, 113, abi_ulong)
#define TARGET_BLKGETSIZE64 TARGET_IOR(0x12,114,abi_ulong)
- /* return device size in bytes
- (u64 *arg) */
+/* return device size in bytes
+ (u64 *arg) */
#define TARGET_BLKDISCARD TARGET_IO(0x12, 119)
#define TARGET_BLKIOMIN TARGET_IO(0x12, 120)
@@ -1008,9 +940,13 @@ struct target_rtc_pll_info {
#define TARGET_FIBMAP TARGET_IO(0x00,1) /* bmap access */
#define TARGET_FIGETBSZ TARGET_IO(0x00,2) /* get the block size used for bmap */
-#define TARGET_FICLONE TARGET_IOW(0x94, 9, int)
+#define TARGET_FICLONE TARGET_IOW(0x94, 9, abi_int)
#define TARGET_FICLONERANGE TARGET_IOW(0x94, 13, struct file_clone_range)
+#define TARGET_FIFREEZE TARGET_IOWR('X', 119, abi_int)
+#define TARGET_FITHAW TARGET_IOWR('X', 120, abi_int)
+#define TARGET_FITRIM TARGET_IOWR('X', 121, struct fstrim_range)
+
/*
* Note that the ioctl numbers for FS_IOC_<GET|SET><FLAGS|VERSION>
* claim type "long" but the actual type used by the kernel is "int".
@@ -1020,10 +956,10 @@ struct target_rtc_pll_info {
#define TARGET_FS_IOC_GETVERSION TARGET_IOR('v', 1, abi_long)
#define TARGET_FS_IOC_SETVERSION TARGET_IOW('v', 2, abi_long)
#define TARGET_FS_IOC_FIEMAP TARGET_IOWR('f',11,struct fiemap)
-#define TARGET_FS_IOC32_GETFLAGS TARGET_IOR('f', 1, int)
-#define TARGET_FS_IOC32_SETFLAGS TARGET_IOW('f', 2, int)
-#define TARGET_FS_IOC32_GETVERSION TARGET_IOR('v', 1, int)
-#define TARGET_FS_IOC32_SETVERSION TARGET_IOW('v', 2, int)
+#define TARGET_FS_IOC32_GETFLAGS TARGET_IOR('f', 1, abi_int)
+#define TARGET_FS_IOC32_SETFLAGS TARGET_IOW('f', 2, abi_int)
+#define TARGET_FS_IOC32_GETVERSION TARGET_IOR('v', 1, abi_int)
+#define TARGET_FS_IOC32_SETVERSION TARGET_IOW('v', 2, abi_int)
/* btrfs ioctls */
#ifdef HAVE_BTRFS_H
@@ -1035,11 +971,11 @@ struct target_rtc_pll_info {
#define TARGET_BTRFS_IOC_SUBVOL_CREATE TARGET_IOWU(BTRFS_IOCTL_MAGIC, 14)
#define TARGET_BTRFS_IOC_SNAP_DESTROY TARGET_IOWU(BTRFS_IOCTL_MAGIC, 15)
#define TARGET_BTRFS_IOC_INO_LOOKUP TARGET_IOWRU(BTRFS_IOCTL_MAGIC, 18)
-#define TARGET_BTRFS_IOC_DEFAULT_SUBVOL TARGET_IOW(BTRFS_IOCTL_MAGIC, 19,\
+#define TARGET_BTRFS_IOC_DEFAULT_SUBVOL TARGET_IOW(BTRFS_IOCTL_MAGIC, 19, \
abi_ullong)
-#define TARGET_BTRFS_IOC_SUBVOL_GETFLAGS TARGET_IOR(BTRFS_IOCTL_MAGIC, 25,\
+#define TARGET_BTRFS_IOC_SUBVOL_GETFLAGS TARGET_IOR(BTRFS_IOCTL_MAGIC, 25, \
abi_ullong)
-#define TARGET_BTRFS_IOC_SUBVOL_SETFLAGS TARGET_IOW(BTRFS_IOCTL_MAGIC, 26,\
+#define TARGET_BTRFS_IOC_SUBVOL_SETFLAGS TARGET_IOW(BTRFS_IOCTL_MAGIC, 26, \
abi_ullong)
#define TARGET_BTRFS_IOC_SCRUB TARGET_IOWRU(BTRFS_IOCTL_MAGIC, 27)
#define TARGET_BTRFS_IOC_SCRUB_CANCEL TARGET_IO(BTRFS_IOCTL_MAGIC, 28)
@@ -1093,56 +1029,56 @@ struct target_rtc_pll_info {
#define TARGET_USBDEVFS_GET_SPEED TARGET_IO('U', 31)
/* cdrom commands */
-#define TARGET_CDROMPAUSE 0x5301 /* Pause Audio Operation */
-#define TARGET_CDROMRESUME 0x5302 /* Resume paused Audio Operation */
-#define TARGET_CDROMPLAYMSF 0x5303 /* Play Audio MSF (struct cdrom_msf) */
-#define TARGET_CDROMPLAYTRKIND 0x5304 /* Play Audio Track/index
- (struct cdrom_ti) */
-#define TARGET_CDROMREADTOCHDR 0x5305 /* Read TOC header
- (struct cdrom_tochdr) */
-#define TARGET_CDROMREADTOCENTRY 0x5306 /* Read TOC entry
- (struct cdrom_tocentry) */
-#define TARGET_CDROMSTOP 0x5307 /* Stop the cdrom drive */
-#define TARGET_CDROMSTART 0x5308 /* Start the cdrom drive */
-#define TARGET_CDROMEJECT 0x5309 /* Ejects the cdrom media */
-#define TARGET_CDROMVOLCTRL 0x530a /* Control output volume
- (struct cdrom_volctrl) */
-#define TARGET_CDROMSUBCHNL 0x530b /* Read subchannel data
- (struct cdrom_subchnl) */
-#define TARGET_CDROMREADMODE2 0x530c /* Read TARGET_CDROM mode 2 data (2336 Bytes)
- (struct cdrom_read) */
-#define TARGET_CDROMREADMODE1 0x530d /* Read TARGET_CDROM mode 1 data (2048 Bytes)
- (struct cdrom_read) */
-#define TARGET_CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
-#define TARGET_CDROMEJECT_SW 0x530f /* enable(1)/disable(0) auto-ejecting */
-#define TARGET_CDROMMULTISESSION 0x5310 /* Obtain the start-of-last-session
- address of multi session disks
- (struct cdrom_multisession) */
-#define TARGET_CDROM_GET_MCN 0x5311 /* Obtain the "Universal Product Code"
- if available (struct cdrom_mcn) */
-#define TARGET_CDROM_GET_UPC TARGET_CDROM_GET_MCN /* This one is deprecated,
- but here anyway for compatibility */
-#define TARGET_CDROMRESET 0x5312 /* hard-reset the drive */
-#define TARGET_CDROMVOLREAD 0x5313 /* Get the drive's volume setting
- (struct cdrom_volctrl) */
-#define TARGET_CDROMREADRAW 0x5314 /* read data in raw mode (2352 Bytes)
- (struct cdrom_read) */
+#define TARGET_CDROMPAUSE 0x5301 /* Pause Audio Operation */
+#define TARGET_CDROMRESUME 0x5302 /* Resume paused Audio Operation */
+#define TARGET_CDROMPLAYMSF 0x5303 /* Play Audio MSF (struct cdrom_msf) */
+#define TARGET_CDROMPLAYTRKIND 0x5304 /* Play Audio Track/index
+ (struct cdrom_ti) */
+#define TARGET_CDROMREADTOCHDR 0x5305 /* Read TOC header
+ (struct cdrom_tochdr) */
+#define TARGET_CDROMREADTOCENTRY 0x5306 /* Read TOC entry
+ (struct cdrom_tocentry) */
+#define TARGET_CDROMSTOP 0x5307 /* Stop the cdrom drive */
+#define TARGET_CDROMSTART 0x5308 /* Start the cdrom drive */
+#define TARGET_CDROMEJECT 0x5309 /* Ejects the cdrom media */
+#define TARGET_CDROMVOLCTRL 0x530a /* Control output volume
+ (struct cdrom_volctrl) */
+#define TARGET_CDROMSUBCHNL 0x530b /* Read subchannel data
+ (struct cdrom_subchnl) */
+#define TARGET_CDROMREADMODE2 0x530c /* Read TARGET_CDROM mode 2 data (2336 Bytes)
+ (struct cdrom_read) */
+#define TARGET_CDROMREADMODE1 0x530d /* Read TARGET_CDROM mode 1 data (2048 Bytes)
+ (struct cdrom_read) */
+#define TARGET_CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
+#define TARGET_CDROMEJECT_SW 0x530f /* enable(1)/disable(0) auto-ejecting */
+#define TARGET_CDROMMULTISESSION 0x5310 /* Obtain the start-of-last-session
+ address of multi session disks
+ (struct cdrom_multisession) */
+#define TARGET_CDROM_GET_MCN 0x5311 /* Obtain the "Universal Product Code"
+ if available (struct cdrom_mcn) */
+#define TARGET_CDROM_GET_UPC TARGET_CDROM_GET_MCN /* This one is deprecated,
+ but here anyway for compatibility */
+#define TARGET_CDROMRESET 0x5312 /* hard-reset the drive */
+#define TARGET_CDROMVOLREAD 0x5313 /* Get the drive's volume setting
+ (struct cdrom_volctrl) */
+#define TARGET_CDROMREADRAW 0x5314 /* read data in raw mode (2352 Bytes)
+ (struct cdrom_read) */
/*
* These ioctls are used only used in aztcd.c and optcd.c
*/
-#define TARGET_CDROMREADCOOKED 0x5315 /* read data in cooked mode */
-#define TARGET_CDROMSEEK 0x5316 /* seek msf address */
+#define TARGET_CDROMREADCOOKED 0x5315 /* read data in cooked mode */
+#define TARGET_CDROMSEEK 0x5316 /* seek msf address */
/*
* This ioctl is only used by the scsi-cd driver.
- It is for playing audio in logical block addressing mode.
- */
-#define TARGET_CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
+ It is for playing audio in logical block addressing mode.
+*/
+#define TARGET_CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
/*
* These ioctls are only used in optcd.c
*/
-#define TARGET_CDROMREADALL 0x5318 /* read all 2646 bytes */
+#define TARGET_CDROMREADALL 0x5318 /* read all 2646 bytes */
/*
* These ioctls are (now) only in ide-cd.c for controlling
@@ -1159,35 +1095,35 @@ struct target_rtc_pll_info {
* They _will_ be adopted by all CD-ROM drivers, when all the CD-ROM
* drivers are eventually ported to the uniform CD-ROM driver interface.
*/
-#define TARGET_CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */
-#define TARGET_CDROM_SET_OPTIONS 0x5320 /* Set behavior options */
-#define TARGET_CDROM_CLEAR_OPTIONS 0x5321 /* Clear behavior options */
-#define TARGET_CDROM_SELECT_SPEED 0x5322 /* Set the CD-ROM speed */
-#define TARGET_CDROM_SELECT_DISC 0x5323 /* Select disc (for juke-boxes) */
-#define TARGET_CDROM_MEDIA_CHANGED 0x5325 /* Check is media changed */
-#define TARGET_CDROM_DRIVE_STATUS 0x5326 /* Get tray position, etc. */
-#define TARGET_CDROM_DISC_STATUS 0x5327 /* Get disc type, etc. */
+#define TARGET_CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */
+#define TARGET_CDROM_SET_OPTIONS 0x5320 /* Set behavior options */
+#define TARGET_CDROM_CLEAR_OPTIONS 0x5321 /* Clear behavior options */
+#define TARGET_CDROM_SELECT_SPEED 0x5322 /* Set the CD-ROM speed */
+#define TARGET_CDROM_SELECT_DISC 0x5323 /* Select disc (for juke-boxes) */
+#define TARGET_CDROM_MEDIA_CHANGED 0x5325 /* Check is media changed */
+#define TARGET_CDROM_DRIVE_STATUS 0x5326 /* Get tray position, etc. */
+#define TARGET_CDROM_DISC_STATUS 0x5327 /* Get disc type, etc. */
#define TARGET_CDROM_CHANGER_NSLOTS 0x5328 /* Get number of slots */
-#define TARGET_CDROM_LOCKDOOR 0x5329 /* lock or unlock door */
-#define TARGET_CDROM_DEBUG 0x5330 /* Turn debug messages on/off */
-#define TARGET_CDROM_GET_CAPABILITY 0x5331 /* get capabilities */
+#define TARGET_CDROM_LOCKDOOR 0x5329 /* lock or unlock door */
+#define TARGET_CDROM_DEBUG 0x5330 /* Turn debug messages on/off */
+#define TARGET_CDROM_GET_CAPABILITY 0x5331 /* get capabilities */
/* Note that scsi/scsi_ioctl.h also uses 0x5382 - 0x5386.
* Future CDROM ioctls should be kept below 0x537F
*/
/* This ioctl is only used by sbpcd at the moment */
-#define TARGET_CDROMAUDIOBUFSIZ 0x5382 /* set the audio buffer size */
- /* conflict with SCSI_IOCTL_GET_IDLUN */
+#define TARGET_CDROMAUDIOBUFSIZ 0x5382 /* set the audio buffer size */
+/* conflict with SCSI_IOCTL_GET_IDLUN */
/* DVD-ROM Specific ioctls */
-#define TARGET_DVD_READ_STRUCT 0x5390 /* Read structure */
-#define TARGET_DVD_WRITE_STRUCT 0x5391 /* Write structure */
-#define TARGET_DVD_AUTH 0x5392 /* Authentication */
+#define TARGET_DVD_READ_STRUCT 0x5390 /* Read structure */
+#define TARGET_DVD_WRITE_STRUCT 0x5391 /* Write structure */
+#define TARGET_DVD_AUTH 0x5392 /* Authentication */
-#define TARGET_CDROM_SEND_PACKET 0x5393 /* send a packet to the drive */
-#define TARGET_CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
-#define TARGET_CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
+#define TARGET_CDROM_SEND_PACKET 0x5393 /* send a packet to the drive */
+#define TARGET_CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
+#define TARGET_CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
/* HD commands */
@@ -1219,6 +1155,10 @@ struct target_rtc_pll_info {
#define TARGET_LOOP_SET_STATUS64 0x4C04
#define TARGET_LOOP_GET_STATUS64 0x4C05
#define TARGET_LOOP_CHANGE_FD 0x4C06
+#define TARGET_LOOP_SET_CAPACITY 0x4C07
+#define TARGET_LOOP_SET_DIRECT_IO 0x4C08
+#define TARGET_LOOP_SET_BLOCK_SIZE 0x4C09
+#define TARGET_LOOP_CONFIGURE 0x4C0A
#define TARGET_LOOP_CTL_ADD 0x4C80
#define TARGET_LOOP_CTL_REMOVE 0x4C81
@@ -1274,144 +1214,50 @@ struct target_rtc_pll_info {
#define TARGET_NCC 8
struct target_termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[TARGET_NCC]; /* control characters */
+ abi_ushort c_iflag; /* input mode flags */
+ abi_ushort c_oflag; /* output mode flags */
+ abi_ushort c_cflag; /* control mode flags */
+ abi_ushort c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCC]; /* control characters */
};
struct target_winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
+ abi_ushort ws_row;
+ abi_ushort ws_col;
+ abi_ushort ws_xpixel;
+ abi_ushort ws_ypixel;
};
#include "termbits.h"
-#if defined(TARGET_MIPS)
-#define TARGET_PROT_SEM 0x10
-#else
-#define TARGET_PROT_SEM 0x08
-#endif
-
-#ifdef TARGET_AARCH64
-#define TARGET_PROT_BTI 0x10
-#define TARGET_PROT_MTE 0x20
-#endif
-
-/* Common */
-#define TARGET_MAP_SHARED 0x01 /* Share changes */
-#define TARGET_MAP_PRIVATE 0x02 /* Changes are private */
-#if defined(TARGET_HPPA)
-#define TARGET_MAP_TYPE 0x03 /* Mask for type of mapping */
-#else
-#define TARGET_MAP_TYPE 0x0f /* Mask for type of mapping */
-#endif
-
-/* Target specific */
-#if defined(TARGET_MIPS)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x0800 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x1000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x2000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x4000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x8000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#elif defined(TARGET_PPC)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x0080 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0040 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x20000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#elif defined(TARGET_ALPHA)
-#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
-#define TARGET_MAP_FIXED 0x100 /* Interpret addr exactly */
-#define TARGET_MAP_GROWSDOWN 0x01000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x02000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x04000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x08000 /* lock the mapping */
-#define TARGET_MAP_NORESERVE 0x10000 /* no check for reservations */
-#define TARGET_MAP_POPULATE 0x20000 /* pop (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x40000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x80000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x100000 /* create a huge page mapping */
-#elif defined(TARGET_HPPA)
-#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
-#define TARGET_MAP_FIXED 0x04 /* Interpret addr exactly */
-#define TARGET_MAP_GROWSDOWN 0x08000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x00800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x01000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x02000 /* lock the mapping */
-#define TARGET_MAP_NORESERVE 0x04000 /* no check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* pop (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#elif defined(TARGET_XTENSA)
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x0800 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x1000 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x2000 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x4000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x8000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x40000
-#define TARGET_MAP_HUGETLB 0x80000 /* create a huge page mapping */
-#else
-#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
-#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
-#define TARGET_MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define TARGET_MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define TARGET_MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define TARGET_MAP_LOCKED 0x2000 /* pages are locked */
-#define TARGET_MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define TARGET_MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define TARGET_MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define TARGET_MAP_STACK 0x20000 /* ignored */
-#define TARGET_MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#define TARGET_MAP_UNINITIALIZED 0x4000000 /* for anonymous mmap, memory could be uninitialized */
-#endif
+#include "target_mman.h"
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
- || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
+ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
|| defined(TARGET_CRIS)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- unsigned short st_dev;
- unsigned short __pad1;
- abi_ulong st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ushort st_dev;
+ abi_ushort __pad1;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ushort st_rdev;
+ abi_ushort __pad2;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
@@ -1419,239 +1265,239 @@ struct target_stat {
*/
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned short st_dev;
- unsigned char __pad0[10];
+ abi_ushort st_dev;
+ unsigned char __pad0[10];
-#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- abi_ulong __st_ino;
+#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
+ abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned short st_rdev;
- unsigned char __pad3[10];
+ abi_ushort st_rdev;
+ unsigned char __pad3[10];
- long long st_size;
- abi_ulong st_blksize;
+ abi_llong st_size;
+ abi_ulong st_blksize;
- abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
- abi_ulong __pad4; /* future possible st_blocks high bits */
+ abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ulong __pad4; /* future possible st_blocks high bits */
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
} QEMU_PACKED;
#ifdef TARGET_ARM
#define TARGET_HAS_STRUCT_STAT64
struct target_eabi_stat64 {
- unsigned long long st_dev;
- unsigned int __pad1;
- abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_ullong st_dev;
+ abi_uint __pad1;
+ abi_ulong __st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned long long st_rdev;
- unsigned int __pad2[2];
+ abi_ullong st_rdev;
+ abi_uint __pad2[2];
- long long st_size;
- abi_ulong st_blksize;
- unsigned int __pad3;
- unsigned long long st_blocks;
+ abi_llong st_size;
+ abi_ulong st_blksize;
+ abi_uint __pad3;
+ abi_ullong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
} QEMU_PACKED;
#endif
#elif defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
struct target_stat {
- unsigned int st_dev;
- abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- abi_long st_size;
- abi_long target_st_atime;
- abi_long target_st_mtime;
- abi_long target_st_ctime;
- abi_long st_blksize;
- abi_long st_blocks;
- abi_ulong __unused4[2];
+ abi_uint st_dev;
+ abi_ulong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint st_rdev;
+ abi_long st_size;
+ abi_long target_st_atime;
+ abi_long target_st_mtime;
+ abi_long target_st_ctime;
+ abi_long st_blksize;
+ abi_long st_blocks;
+ abi_ulong __unused4[2];
};
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned char __pad0[6];
- unsigned short st_dev;
+ unsigned char __pad0[6];
+ abi_ushort st_dev;
- uint64_t st_ino;
- uint64_t st_nlink;
+ abi_ullong st_ino;
+ abi_ullong st_nlink;
- unsigned int st_mode;
+ abi_uint st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_uid;
+ abi_uint st_gid;
- unsigned char __pad2[6];
- unsigned short st_rdev;
+ unsigned char __pad2[6];
+ abi_ushort st_rdev;
- int64_t st_size;
- int64_t st_blksize;
+ abi_llong st_size;
+ abi_llong st_blksize;
- unsigned char __pad4[4];
- unsigned int st_blocks;
+ unsigned char __pad4[4];
+ abi_uint st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4[3];
+ abi_ulong __unused4[3];
};
#elif defined(TARGET_SPARC)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- unsigned short st_dev;
- abi_ulong st_ino;
- unsigned short st_mode;
- short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- abi_long st_size;
- abi_long target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_long target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_long target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_long st_blksize;
- abi_long st_blocks;
- abi_ulong __unused1[2];
+ abi_ushort st_dev;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_short st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ushort st_rdev;
+ abi_long st_size;
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_long st_blksize;
+ abi_long st_blocks;
+ abi_ulong __unused1[2];
};
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned char __pad0[6];
- unsigned short st_dev;
+ unsigned char __pad0[6];
+ abi_ushort st_dev;
- uint64_t st_ino;
+ abi_ullong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_uid;
+ abi_uint st_gid;
- unsigned char __pad2[6];
- unsigned short st_rdev;
+ unsigned char __pad2[6];
+ abi_ushort st_rdev;
- unsigned char __pad3[8];
+ unsigned char __pad3[8];
- int64_t st_size;
- unsigned int st_blksize;
+ abi_llong st_size;
+ abi_uint st_blksize;
- unsigned char __pad4[8];
- unsigned int st_blocks;
+ unsigned char __pad4[8];
+ abi_uint st_blocks;
- unsigned int target_st_atime;
- unsigned int target_st_atime_nsec;
+ abi_uint target_st_atime;
+ abi_uint target_st_atime_nsec;
- unsigned int target_st_mtime;
- unsigned int target_st_mtime_nsec;
+ abi_uint target_st_mtime;
+ abi_uint target_st_mtime_nsec;
- unsigned int target_st_ctime;
- unsigned int target_st_ctime_nsec;
+ abi_uint target_st_ctime;
+ abi_uint target_st_ctime_nsec;
- unsigned int __unused1;
- unsigned int __unused2;
+ abi_uint __unused1;
+ abi_uint __unused2;
};
#elif defined(TARGET_PPC)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
- abi_ulong st_nlink;
- unsigned int st_mode;
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+#if defined(TARGET_PPC64)
+ abi_ulong st_nlink;
+ abi_uint st_mode;
#else
- unsigned int st_mode;
- unsigned short st_nlink;
+ abi_uint st_mode;
+ abi_ushort st_nlink;
#endif
- unsigned int st_uid;
- unsigned int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
- abi_ulong __unused6;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
+#if defined(TARGET_PPC64)
+ abi_ulong __unused6;
#endif
};
-#if !defined(TARGET_PPC64) || defined(TARGET_ABI32)
+#if !defined(TARGET_PPC64)
#define TARGET_HAS_STRUCT_STAT64
struct QEMU_PACKED target_stat64 {
- unsigned long long st_dev;
- unsigned long long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long long st_rdev;
- unsigned long long __pad0;
- long long st_size;
- int st_blksize;
- unsigned int __pad1;
- long long st_blocks; /* Number 512-byte blocks allocated. */
- int target_st_atime;
- unsigned int target_st_atime_nsec;
- int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
+ abi_ullong st_dev;
+ abi_ullong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ullong st_rdev;
+ abi_ullong __pad0;
+ abi_llong st_size;
+ abi_int st_blksize;
+ abi_uint __pad1;
+ abi_llong st_blocks; /* Number 512-byte blocks allocated. */
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_uint __unused4;
+ abi_uint __unused5;
};
#endif
@@ -1659,78 +1505,78 @@ struct QEMU_PACKED target_stat64 {
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
- unsigned int st_mode;
- unsigned short st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_uint st_mode;
+ abi_ushort st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* FIXME: Microblaze no-mmu user-space has a difference stat64 layout... */
#define TARGET_HAS_STRUCT_STAT64
struct QEMU_PACKED target_stat64 {
- uint64_t st_dev;
+ abi_ullong st_dev;
#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- uint32_t pad0;
- uint32_t __st_ino;
-
- uint32_t st_mode;
- uint32_t st_nlink;
- uint32_t st_uid;
- uint32_t st_gid;
- uint64_t st_rdev;
- uint64_t __pad1;
-
- int64_t st_size;
- int32_t st_blksize;
- uint32_t __pad2;
- int64_t st_blocks; /* Number 512-byte blocks allocated. */
-
- int target_st_atime;
- unsigned int target_st_atime_nsec;
- int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- uint64_t st_ino;
+ abi_uint pad0;
+ abi_uint __st_ino;
+
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ullong st_rdev;
+ abi_ullong __pad1;
+
+ abi_llong st_size;
+ abi_int st_blksize;
+ abi_uint __pad2;
+ abi_llong st_blocks;
+
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_ullong st_ino;
};
#elif defined(TARGET_M68K)
struct target_stat {
- unsigned short st_dev;
- unsigned short __pad1;
- abi_ulong st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong __unused1;
- abi_ulong target_st_mtime;
- abi_ulong __unused2;
- abi_ulong target_st_ctime;
- abi_ulong __unused3;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ushort st_dev;
+ abi_ushort __pad1;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ushort st_rdev;
+ abi_ushort __pad2;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong __unused1;
+ abi_ulong target_st_mtime;
+ abi_ulong __unused2;
+ abi_ulong target_st_ctime;
+ abi_ulong __unused3;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
@@ -1738,37 +1584,37 @@ struct target_stat {
*/
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- unsigned long long st_dev;
- unsigned char __pad1[2];
+ abi_ullong st_dev;
+ unsigned char __pad1[2];
-#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- abi_ulong __st_ino;
+#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
+ abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned long long st_rdev;
- unsigned char __pad3[2];
+ abi_ullong st_rdev;
+ unsigned char __pad3[2];
- long long st_size;
- abi_ulong st_blksize;
+ abi_llong st_size;
+ abi_ulong st_blksize;
- abi_ulong __pad4; /* future possible st_blocks high bits */
- abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ulong __pad4; /* future possible st_blocks high bits */
+ abi_ulong st_blocks; /* Number 512-byte blocks allocated. */
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
} QEMU_PACKED;
#elif defined(TARGET_ABI_MIPSN64)
@@ -1776,94 +1622,94 @@ struct target_stat64 {
#define TARGET_STAT_HAVE_NSEC
/* The memory layout is the same as of struct stat64 of the 32-bit kernel. */
struct target_stat {
- unsigned int st_dev;
- unsigned int st_pad0[3]; /* Reserved for st_dev expansion */
+ abi_uint st_dev;
+ abi_uint st_pad0[3]; /* Reserved for st_dev expansion */
- abi_ulong st_ino;
+ abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- int st_uid;
- int st_gid;
+ abi_int st_uid;
+ abi_int st_gid;
- unsigned int st_rdev;
- unsigned int st_pad1[3]; /* Reserved for st_rdev expansion */
+ abi_uint st_rdev;
+ abi_uint st_pad1[3]; /* Reserved for st_rdev expansion */
- abi_ulong st_size;
+ abi_ulong st_size;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- unsigned int target_st_atime;
- unsigned int target_st_atime_nsec;
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ abi_uint target_st_atime;
+ abi_uint target_st_atime_nsec;
- unsigned int target_st_mtime;
- unsigned int target_st_mtime_nsec;
+ abi_uint target_st_mtime;
+ abi_uint target_st_mtime_nsec;
- unsigned int target_st_ctime;
- unsigned int target_st_ctime_nsec;
+ abi_uint target_st_ctime;
+ abi_uint target_st_ctime_nsec;
- unsigned int st_blksize;
- unsigned int st_pad2;
+ abi_uint st_blksize;
+ abi_uint st_pad2;
- abi_ulong st_blocks;
+ abi_ulong st_blocks;
};
#elif defined(TARGET_ABI_MIPSN32)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
- uint64_t st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- int st_uid;
- int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
- int64_t st_size;
- abi_long target_st_atime;
- abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
- abi_long target_st_mtime;
- abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
- abi_long target_st_ctime;
- abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
- abi_ulong st_blksize;
- abi_ulong st_pad2;
- int64_t st_blocks;
+ abi_ulong st_dev;
+ abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
+ abi_ullong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_int st_uid;
+ abi_int st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
+ abi_llong st_size;
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
+ abi_ulong st_blksize;
+ abi_ulong st_pad2;
+ abi_llong st_blocks;
};
#elif defined(TARGET_ABI_MIPSO32)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- unsigned st_dev;
- abi_long st_pad1[3]; /* Reserved for network id */
- abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- int st_uid;
- int st_gid;
- unsigned st_rdev;
- abi_long st_pad2[2];
- abi_long st_size;
- abi_long st_pad3;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- abi_long target_st_atime;
- abi_long target_st_atime_nsec;
- abi_long target_st_mtime;
- abi_long target_st_mtime_nsec;
- abi_long target_st_ctime;
- abi_long target_st_ctime_nsec;
- abi_long st_blksize;
- abi_long st_blocks;
- abi_long st_pad4[14];
+ abi_uint st_dev;
+ abi_long st_pad1[3]; /* Reserved for network id */
+ abi_ulong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_int st_uid;
+ abi_int st_gid;
+ abi_uint st_rdev;
+ abi_long st_pad2[2];
+ abi_long st_size;
+ abi_long st_pad3;
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ abi_long target_st_atime;
+ abi_long target_st_atime_nsec;
+ abi_long target_st_mtime;
+ abi_long target_st_mtime_nsec;
+ abi_long target_st_ctime;
+ abi_long target_st_ctime_nsec;
+ abi_long st_blksize;
+ abi_long st_blocks;
+ abi_long st_pad4[14];
};
/*
@@ -1874,107 +1720,107 @@ struct target_stat {
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- abi_ulong st_dev;
- abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
+ abi_ulong st_dev;
+ abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
- uint64_t st_ino;
+ abi_ullong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- int st_uid;
- int st_gid;
+ abi_int st_uid;
+ abi_int st_gid;
- abi_ulong st_rdev;
- abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
+ abi_ulong st_rdev;
+ abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
- int64_t st_size;
+ abi_llong st_size;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- abi_long target_st_atime;
- abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
- abi_long target_st_mtime;
- abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
- abi_long target_st_ctime;
- abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
- abi_ulong st_blksize;
- abi_ulong st_pad2;
+ abi_ulong st_blksize;
+ abi_ulong st_pad2;
- int64_t st_blocks;
+ abi_llong st_blocks;
};
#elif defined(TARGET_ALPHA)
struct target_stat {
- unsigned int st_dev;
- unsigned int st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- abi_long st_size;
- abi_ulong target_st_atime;
- abi_ulong target_st_mtime;
- abi_ulong target_st_ctime;
- unsigned int st_blksize;
- unsigned int st_blocks;
- unsigned int st_flags;
- unsigned int st_gen;
+ abi_uint st_dev;
+ abi_uint st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint st_rdev;
+ abi_long st_size;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_ctime;
+ abi_uint st_blksize;
+ abi_uint st_blocks;
+ abi_uint st_flags;
+ abi_uint st_gen;
};
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- abi_ulong st_dev;
- abi_ulong st_ino;
- abi_ulong st_rdev;
- abi_long st_size;
- abi_ulong st_blocks;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_blksize;
- unsigned int st_nlink;
- unsigned int __pad0;
-
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_long __unused[3];
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ulong st_rdev;
+ abi_long st_size;
+ abi_ulong st_blocks;
+
+ abi_uint st_mode;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint st_blksize;
+ abi_uint st_nlink;
+ abi_uint __pad0;
+
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_long __unused[3];
};
#elif defined(TARGET_SH4)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- abi_ulong st_rdev;
- abi_ulong st_size;
- abi_ulong st_blksize;
- abi_ulong st_blocks;
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
- abi_ulong __unused4;
- abi_ulong __unused5;
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort st_uid;
+ abi_ushort st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong st_blksize;
+ abi_ulong st_blocks;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong __unused4;
+ abi_ulong __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
@@ -1982,72 +1828,72 @@ struct target_stat {
*/
#define TARGET_HAS_STRUCT_STAT64
struct QEMU_PACKED target_stat64 {
- unsigned long long st_dev;
- unsigned char __pad0[4];
+ abi_ullong st_dev;
+ unsigned char __pad0[4];
-#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
- abi_ulong __st_ino;
+#define TARGET_STAT64_HAS_BROKEN_ST_INO 1
+ abi_ulong __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
+ abi_uint st_mode;
+ abi_uint st_nlink;
- abi_ulong st_uid;
- abi_ulong st_gid;
+ abi_ulong st_uid;
+ abi_ulong st_gid;
- unsigned long long st_rdev;
- unsigned char __pad3[4];
+ abi_ullong st_rdev;
+ unsigned char __pad3[4];
- long long st_size;
- abi_ulong st_blksize;
+ abi_llong st_size;
+ abi_ulong st_blksize;
- unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ullong st_blocks; /* Number 512-byte blocks allocated. */
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
- unsigned long long st_ino;
+ abi_ullong st_ino;
};
#elif defined(TARGET_I386) && !defined(TARGET_ABI32)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
- abi_ulong st_dev;
- abi_ulong st_ino;
- abi_ulong st_nlink;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad0;
- abi_ulong st_rdev;
- abi_long st_size;
- abi_long st_blksize;
- abi_long st_blocks; /* Number 512-byte blocks allocated. */
-
- abi_ulong target_st_atime;
- abi_ulong target_st_atime_nsec;
- abi_ulong target_st_mtime;
- abi_ulong target_st_mtime_nsec;
- abi_ulong target_st_ctime;
- abi_ulong target_st_ctime_nsec;
-
- abi_long __unused[3];
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ulong st_nlink;
+
+ abi_uint st_mode;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint __pad0;
+ abi_ulong st_rdev;
+ abi_long st_size;
+ abi_long st_blksize;
+ abi_long st_blocks; /* Number 512-byte blocks allocated. */
+
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+
+ abi_long __unused[3];
};
#elif defined(TARGET_S390X)
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
abi_ulong st_nlink;
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad1;
+ abi_uint st_mode;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint __pad1;
abi_ulong st_rdev;
abi_ulong st_size;
abi_ulong target_st_atime;
@@ -2065,15 +1911,15 @@ struct target_stat {
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
abi_ulong st_rdev;
abi_ulong _pad1;
abi_long st_size;
- int st_blksize;
- int __pad2;
+ abi_int st_blksize;
+ abi_int __pad2;
abi_long st_blocks;
abi_long target_st_atime;
abi_ulong target_st_atime_nsec;
@@ -2081,17 +1927,17 @@ struct target_stat {
abi_ulong target_st_mtime_nsec;
abi_long target_st_ctime;
abi_ulong target_st_ctime_nsec;
- unsigned int __unused[2];
+ abi_uint __unused[2];
};
#elif defined(TARGET_XTENSA)
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
abi_ulong st_rdev;
abi_long st_size;
abi_ulong st_blksize;
@@ -2108,17 +1954,17 @@ struct target_stat {
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- uint64_t st_dev; /* Device */
- uint64_t st_ino; /* File serial number */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- uint64_t st_rdev; /* Device number, if device. */
- int64_t st_size; /* Size of file, in bytes. */
+ abi_ullong st_dev; /* Device */
+ abi_ullong st_ino; /* File serial number */
+ abi_uint st_mode; /* File mode. */
+ abi_uint st_nlink; /* Link count. */
+ abi_uint st_uid; /* User ID of the file's owner. */
+ abi_uint st_gid; /* Group ID of the file's group. */
+ abi_ullong st_rdev; /* Device number, if device. */
+ abi_llong st_size; /* Size of file, in bytes. */
abi_ulong st_blksize; /* Optimal block size for I/O. */
abi_ulong __unused2;
- uint64_t st_blocks; /* Number 512-byte blocks allocated. */
+ abi_ullong st_blocks; /* Number 512-byte blocks allocated. */
abi_ulong target_st_atime; /* Time of last access. */
abi_ulong target_st_atime_nsec;
abi_ulong target_st_mtime; /* Time of last modification. */
@@ -2129,7 +1975,8 @@ struct target_stat64 {
abi_ulong __unused5;
};
-#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV)
+#elif defined(TARGET_OPENRISC) \
+ || defined(TARGET_RISCV) || defined(TARGET_HEXAGON)
/* These are the asm-generic versions of the stat and stat64 structures */
@@ -2137,15 +1984,15 @@ struct target_stat64 {
struct target_stat {
abi_ulong st_dev;
abi_ulong st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
abi_ulong st_rdev;
abi_ulong __pad1;
abi_long st_size;
- int st_blksize;
- int __pad2;
+ abi_int st_blksize;
+ abi_int __pad2;
abi_long st_blocks;
abi_long target_st_atime;
abi_ulong target_st_atime_nsec;
@@ -2153,33 +2000,33 @@ struct target_stat {
abi_ulong target_st_mtime_nsec;
abi_long target_st_ctime;
abi_ulong target_st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
+ abi_uint __unused4;
+ abi_uint __unused5;
};
#if !defined(TARGET_RISCV64)
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- uint64_t st_dev;
- uint64_t st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- uint64_t st_rdev;
- uint64_t __pad1;
- int64_t st_size;
- int st_blksize;
- int __pad2;
- int64_t st_blocks;
- int target_st_atime;
- unsigned int target_st_atime_nsec;
- int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
+ abi_ullong st_dev;
+ abi_ullong st_ino;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_ullong st_rdev;
+ abi_ullong __pad1;
+ abi_llong st_size;
+ abi_int st_blksize;
+ abi_int __pad2;
+ abi_llong st_blocks;
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_uint __unused4;
+ abi_uint __unused5;
};
#endif
@@ -2219,204 +2066,184 @@ struct target_stat {
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
- uint64_t st_dev;
+ abi_ullong st_dev;
abi_uint _pad1;
abi_uint _res1;
abi_uint st_mode;
abi_uint st_nlink;
abi_uint st_uid;
abi_uint st_gid;
- uint64_t st_rdev;
+ abi_ullong st_rdev;
abi_uint _pad2;
- int64_t st_size;
+ abi_llong st_size;
abi_int st_blksize;
- int64_t st_blocks;
+ abi_llong st_blocks;
abi_int target_st_atime;
abi_uint target_st_atime_nsec;
abi_int target_st_mtime;
abi_uint target_st_mtime_nsec;
abi_int target_st_ctime;
abi_uint target_st_ctime_nsec;
- uint64_t st_ino;
+ abi_ullong st_ino;
};
-#elif defined(TARGET_HEXAGON)
+#elif defined(TARGET_LOONGARCH64)
-struct target_stat {
- unsigned long long st_dev;
- unsigned long long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long long st_rdev;
- target_ulong __pad1;
- long long st_size;
- target_long st_blksize;
- int __pad2;
- long long st_blocks;
-
- target_long target_st_atime;
- target_long target_st_atime_nsec;
- target_long target_st_mtime;
- target_long target_st_mtime_nsec;
- target_long target_st_ctime;
- target_long target_st_ctime_nsec;
- int __unused[2];
-};
+/* LoongArch no newfstatat/fstat syscall. */
#else
#error unsupported CPU
#endif
typedef struct {
- int val[2];
+ abi_int val[2];
} target_fsid_t;
#ifdef TARGET_MIPS
#ifdef TARGET_ABI_MIPSN32
struct target_statfs {
- int32_t f_type;
- int32_t f_bsize;
- int32_t f_frsize; /* Fragment size - unsupported */
- int32_t f_blocks;
- int32_t f_bfree;
- int32_t f_files;
- int32_t f_ffree;
- int32_t f_bavail;
-
- /* Linux specials */
- target_fsid_t f_fsid;
- int32_t f_namelen;
- int32_t f_flags;
- int32_t f_spare[5];
+ abi_int f_type;
+ abi_int f_bsize;
+ abi_int f_frsize; /* Fragment size - unsupported */
+ abi_int f_blocks;
+ abi_int f_bfree;
+ abi_int f_files;
+ abi_int f_ffree;
+ abi_int f_bavail;
+
+ /* Linux specials */
+ target_fsid_t f_fsid;
+ abi_int f_namelen;
+ abi_int f_flags;
+ abi_int f_spare[5];
};
#else
struct target_statfs {
- abi_long f_type;
- abi_long f_bsize;
- abi_long f_frsize; /* Fragment size - unsupported */
- abi_long f_blocks;
- abi_long f_bfree;
- abi_long f_files;
- abi_long f_ffree;
- abi_long f_bavail;
-
- /* Linux specials */
- target_fsid_t f_fsid;
- abi_long f_namelen;
- abi_long f_flags;
- abi_long f_spare[5];
+ abi_long f_type;
+ abi_long f_bsize;
+ abi_long f_frsize; /* Fragment size - unsupported */
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_files;
+ abi_long f_ffree;
+ abi_long f_bavail;
+
+ /* Linux specials */
+ target_fsid_t f_fsid;
+ abi_long f_namelen;
+ abi_long f_flags;
+ abi_long f_spare[5];
};
#endif
struct target_statfs64 {
- uint32_t f_type;
- uint32_t f_bsize;
- uint32_t f_frsize; /* Fragment size - unsupported */
- uint32_t __pad;
- uint64_t f_blocks;
- uint64_t f_bfree;
- uint64_t f_files;
- uint64_t f_ffree;
- uint64_t f_bavail;
- target_fsid_t f_fsid;
- uint32_t f_namelen;
- uint32_t f_flags;
- uint32_t f_spare[5];
-};
-#elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \
- defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \
- defined(TARGET_RISCV)) && !defined(TARGET_ABI32)
+ abi_uint f_type;
+ abi_uint f_bsize;
+ abi_uint f_frsize; /* Fragment size - unsupported */
+ abi_uint __pad;
+ abi_ullong f_blocks;
+ abi_ullong f_bfree;
+ abi_ullong f_files;
+ abi_ullong f_ffree;
+ abi_ullong f_bavail;
+ target_fsid_t f_fsid;
+ abi_uint f_namelen;
+ abi_uint f_flags;
+ abi_uint f_spare[5];
+};
+#elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \
+ defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \
+ defined(TARGET_RISCV) || defined(TARGET_LOONGARCH64)) && \
+ !defined(TARGET_ABI32)
struct target_statfs {
- abi_long f_type;
- abi_long f_bsize;
- abi_long f_blocks;
- abi_long f_bfree;
- abi_long f_bavail;
- abi_long f_files;
- abi_long f_ffree;
- target_fsid_t f_fsid;
- abi_long f_namelen;
- abi_long f_frsize;
- abi_long f_flags;
- abi_long f_spare[4];
+ abi_long f_type;
+ abi_long f_bsize;
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_bavail;
+ abi_long f_files;
+ abi_long f_ffree;
+ target_fsid_t f_fsid;
+ abi_long f_namelen;
+ abi_long f_frsize;
+ abi_long f_flags;
+ abi_long f_spare[4];
};
struct target_statfs64 {
- abi_long f_type;
- abi_long f_bsize;
- abi_long f_blocks;
- abi_long f_bfree;
- abi_long f_bavail;
- abi_long f_files;
- abi_long f_ffree;
- target_fsid_t f_fsid;
- abi_long f_namelen;
- abi_long f_frsize;
- abi_long f_flags;
- abi_long f_spare[4];
+ abi_long f_type;
+ abi_long f_bsize;
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_bavail;
+ abi_long f_files;
+ abi_long f_ffree;
+ target_fsid_t f_fsid;
+ abi_long f_namelen;
+ abi_long f_frsize;
+ abi_long f_flags;
+ abi_long f_spare[4];
};
#elif defined(TARGET_S390X)
struct target_statfs {
- int32_t f_type;
- int32_t f_bsize;
+ abi_int f_type;
+ abi_int f_bsize;
abi_long f_blocks;
abi_long f_bfree;
abi_long f_bavail;
abi_long f_files;
abi_long f_ffree;
kernel_fsid_t f_fsid;
- int32_t f_namelen;
- int32_t f_frsize;
- int32_t f_flags;
- int32_t f_spare[4];
+ abi_int f_namelen;
+ abi_int f_frsize;
+ abi_int f_flags;
+ abi_int f_spare[4];
};
struct target_statfs64 {
- int32_t f_type;
- int32_t f_bsize;
+ abi_int f_type;
+ abi_int f_bsize;
abi_long f_blocks;
abi_long f_bfree;
abi_long f_bavail;
abi_long f_files;
abi_long f_ffree;
kernel_fsid_t f_fsid;
- int32_t f_namelen;
- int32_t f_frsize;
- int32_t f_flags;
- int32_t f_spare[4];
+ abi_int f_namelen;
+ abi_int f_frsize;
+ abi_int f_flags;
+ abi_int f_spare[4];
};
#else
struct target_statfs {
- uint32_t f_type;
- uint32_t f_bsize;
- uint32_t f_blocks;
- uint32_t f_bfree;
- uint32_t f_bavail;
- uint32_t f_files;
- uint32_t f_ffree;
- target_fsid_t f_fsid;
- uint32_t f_namelen;
- uint32_t f_frsize;
- uint32_t f_flags;
- uint32_t f_spare[4];
+ abi_uint f_type;
+ abi_uint f_bsize;
+ abi_uint f_blocks;
+ abi_uint f_bfree;
+ abi_uint f_bavail;
+ abi_uint f_files;
+ abi_uint f_ffree;
+ target_fsid_t f_fsid;
+ abi_uint f_namelen;
+ abi_uint f_frsize;
+ abi_uint f_flags;
+ abi_uint f_spare[4];
};
struct target_statfs64 {
- uint32_t f_type;
- uint32_t f_bsize;
- uint64_t f_blocks;
- uint64_t f_bfree;
- uint64_t f_bavail;
- uint64_t f_files;
- uint64_t f_ffree;
- target_fsid_t f_fsid;
- uint32_t f_namelen;
- uint32_t f_frsize;
- uint32_t f_flags;
- uint32_t f_spare[4];
+ abi_uint f_type;
+ abi_uint f_bsize;
+ abi_ullong f_blocks;
+ abi_ullong f_bfree;
+ abi_ullong f_bavail;
+ abi_ullong f_files;
+ abi_ullong f_ffree;
+ target_fsid_t f_fsid;
+ abi_uint f_namelen;
+ abi_uint f_frsize;
+ abi_uint f_flags;
+ abi_uint f_spare[4];
};
#endif
@@ -2434,7 +2261,7 @@ struct target_statfs64 {
/* soundcard defines */
/* XXX: convert them all to arch independent entries */
-#define TARGET_SNDCTL_COPR_HALT TARGET_IOWR('C', 7, int);
+#define TARGET_SNDCTL_COPR_HALT TARGET_IOWR('C', 7, abi_int);
#define TARGET_SNDCTL_COPR_LOAD 0xcfb04301
#define TARGET_SNDCTL_COPR_RCODE 0xc0144303
#define TARGET_SNDCTL_COPR_RCVMSG 0x8fa44309
@@ -2446,20 +2273,20 @@ struct target_statfs64 {
#define TARGET_SNDCTL_COPR_WDATA 0x40144304
#define TARGET_SNDCTL_DSP_RESET TARGET_IO('P', 0)
#define TARGET_SNDCTL_DSP_SYNC TARGET_IO('P', 1)
-#define TARGET_SNDCTL_DSP_SPEED TARGET_IOWR('P', 2, int)
-#define TARGET_SNDCTL_DSP_STEREO TARGET_IOWR('P', 3, int)
-#define TARGET_SNDCTL_DSP_GETBLKSIZE TARGET_IOWR('P', 4, int)
-#define TARGET_SNDCTL_DSP_SETFMT TARGET_IOWR('P', 5, int)
-#define TARGET_SNDCTL_DSP_CHANNELS TARGET_IOWR('P', 6, int)
-#define TARGET_SOUND_PCM_WRITE_FILTER TARGET_IOWR('P', 7, int)
+#define TARGET_SNDCTL_DSP_SPEED TARGET_IOWR('P', 2, abi_int)
+#define TARGET_SNDCTL_DSP_STEREO TARGET_IOWR('P', 3, abi_int)
+#define TARGET_SNDCTL_DSP_GETBLKSIZE TARGET_IOWR('P', 4, abi_int)
+#define TARGET_SNDCTL_DSP_SETFMT TARGET_IOWR('P', 5, abi_int)
+#define TARGET_SNDCTL_DSP_CHANNELS TARGET_IOWR('P', 6, abi_int)
+#define TARGET_SOUND_PCM_WRITE_FILTER TARGET_IOWR('P', 7, abi_int)
#define TARGET_SNDCTL_DSP_POST TARGET_IO('P', 8)
-#define TARGET_SNDCTL_DSP_SUBDIVIDE TARGET_IOWR('P', 9, int)
-#define TARGET_SNDCTL_DSP_SETFRAGMENT TARGET_IOWR('P',10, int)
-#define TARGET_SNDCTL_DSP_GETFMTS TARGET_IOR('P', 11, int)
+#define TARGET_SNDCTL_DSP_SUBDIVIDE TARGET_IOWR('P', 9, abi_int)
+#define TARGET_SNDCTL_DSP_SETFRAGMENT TARGET_IOWR('P',10, abi_int)
+#define TARGET_SNDCTL_DSP_GETFMTS TARGET_IOR('P', 11, abi_int)
#define TARGET_SNDCTL_DSP_GETOSPACE TARGET_IORU('P',12)
#define TARGET_SNDCTL_DSP_GETISPACE TARGET_IORU('P',13)
-#define TARGET_SNDCTL_DSP_GETCAPS TARGET_IOR('P', 15, int)
-#define TARGET_SNDCTL_DSP_GETTRIGGER TARGET_IOR('P',16, int)
+#define TARGET_SNDCTL_DSP_GETCAPS TARGET_IOR('P', 15, abi_int)
+#define TARGET_SNDCTL_DSP_GETTRIGGER TARGET_IOR('P',16, abi_int)
#define TARGET_SNDCTL_DSP_GETIPTR TARGET_IORU('P',17)
#define TARGET_SNDCTL_DSP_GETOPTR TARGET_IORU('P',18)
#define TARGET_SNDCTL_DSP_MAPINBUF TARGET_IORU('P', 19)
@@ -2507,89 +2334,89 @@ struct target_statfs64 {
#define TARGET_SOUND_PCM_READ_FILTER 0x80045007
#define TARGET_SOUND_MIXER_INFO TARGET_IOR ('M', 101, mixer_info)
#define TARGET_SOUND_MIXER_ACCESS 0xc0804d66
-#define TARGET_SOUND_MIXER_PRIVATE1 TARGET_IOWR('M', 111, int)
-#define TARGET_SOUND_MIXER_PRIVATE2 TARGET_IOWR('M', 112, int)
-#define TARGET_SOUND_MIXER_PRIVATE3 TARGET_IOWR('M', 113, int)
-#define TARGET_SOUND_MIXER_PRIVATE4 TARGET_IOWR('M', 114, int)
-#define TARGET_SOUND_MIXER_PRIVATE5 TARGET_IOWR('M', 115, int)
-
-#define TARGET_MIXER_READ(dev) TARGET_IOR('M', dev, int)
-
-#define TARGET_SOUND_MIXER_READ_VOLUME TARGET_MIXER_READ(SOUND_MIXER_VOLUME)
-#define TARGET_SOUND_MIXER_READ_BASS TARGET_MIXER_READ(SOUND_MIXER_BASS)
-#define TARGET_SOUND_MIXER_READ_TREBLE TARGET_MIXER_READ(SOUND_MIXER_TREBLE)
-#define TARGET_SOUND_MIXER_READ_SYNTH TARGET_MIXER_READ(SOUND_MIXER_SYNTH)
-#define TARGET_SOUND_MIXER_READ_PCM TARGET_MIXER_READ(SOUND_MIXER_PCM)
-#define TARGET_SOUND_MIXER_READ_SPEAKER TARGET_MIXER_READ(SOUND_MIXER_SPEAKER)
-#define TARGET_SOUND_MIXER_READ_LINE TARGET_MIXER_READ(SOUND_MIXER_LINE)
-#define TARGET_SOUND_MIXER_READ_MIC TARGET_MIXER_READ(SOUND_MIXER_MIC)
-#define TARGET_SOUND_MIXER_READ_CD TARGET_MIXER_READ(SOUND_MIXER_CD)
-#define TARGET_SOUND_MIXER_READ_IMIX TARGET_MIXER_READ(SOUND_MIXER_IMIX)
-#define TARGET_SOUND_MIXER_READ_ALTPCM TARGET_MIXER_READ(SOUND_MIXER_ALTPCM)
-#define TARGET_SOUND_MIXER_READ_RECLEV TARGET_MIXER_READ(SOUND_MIXER_RECLEV)
-#define TARGET_SOUND_MIXER_READ_IGAIN TARGET_MIXER_READ(SOUND_MIXER_IGAIN)
-#define TARGET_SOUND_MIXER_READ_OGAIN TARGET_MIXER_READ(SOUND_MIXER_OGAIN)
-#define TARGET_SOUND_MIXER_READ_LINE1 TARGET_MIXER_READ(SOUND_MIXER_LINE1)
-#define TARGET_SOUND_MIXER_READ_LINE2 TARGET_MIXER_READ(SOUND_MIXER_LINE2)
-#define TARGET_SOUND_MIXER_READ_LINE3 TARGET_MIXER_READ(SOUND_MIXER_LINE3)
+#define TARGET_SOUND_MIXER_PRIVATE1 TARGET_IOWR('M', 111, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE2 TARGET_IOWR('M', 112, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE3 TARGET_IOWR('M', 113, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE4 TARGET_IOWR('M', 114, abi_int)
+#define TARGET_SOUND_MIXER_PRIVATE5 TARGET_IOWR('M', 115, abi_int)
+
+#define TARGET_MIXER_READ(dev) TARGET_IOR('M', dev, abi_int)
+
+#define TARGET_SOUND_MIXER_READ_VOLUME TARGET_MIXER_READ(SOUND_MIXER_VOLUME)
+#define TARGET_SOUND_MIXER_READ_BASS TARGET_MIXER_READ(SOUND_MIXER_BASS)
+#define TARGET_SOUND_MIXER_READ_TREBLE TARGET_MIXER_READ(SOUND_MIXER_TREBLE)
+#define TARGET_SOUND_MIXER_READ_SYNTH TARGET_MIXER_READ(SOUND_MIXER_SYNTH)
+#define TARGET_SOUND_MIXER_READ_PCM TARGET_MIXER_READ(SOUND_MIXER_PCM)
+#define TARGET_SOUND_MIXER_READ_SPEAKER TARGET_MIXER_READ(SOUND_MIXER_SPEAKER)
+#define TARGET_SOUND_MIXER_READ_LINE TARGET_MIXER_READ(SOUND_MIXER_LINE)
+#define TARGET_SOUND_MIXER_READ_MIC TARGET_MIXER_READ(SOUND_MIXER_MIC)
+#define TARGET_SOUND_MIXER_READ_CD TARGET_MIXER_READ(SOUND_MIXER_CD)
+#define TARGET_SOUND_MIXER_READ_IMIX TARGET_MIXER_READ(SOUND_MIXER_IMIX)
+#define TARGET_SOUND_MIXER_READ_ALTPCM TARGET_MIXER_READ(SOUND_MIXER_ALTPCM)
+#define TARGET_SOUND_MIXER_READ_RECLEV TARGET_MIXER_READ(SOUND_MIXER_RECLEV)
+#define TARGET_SOUND_MIXER_READ_IGAIN TARGET_MIXER_READ(SOUND_MIXER_IGAIN)
+#define TARGET_SOUND_MIXER_READ_OGAIN TARGET_MIXER_READ(SOUND_MIXER_OGAIN)
+#define TARGET_SOUND_MIXER_READ_LINE1 TARGET_MIXER_READ(SOUND_MIXER_LINE1)
+#define TARGET_SOUND_MIXER_READ_LINE2 TARGET_MIXER_READ(SOUND_MIXER_LINE2)
+#define TARGET_SOUND_MIXER_READ_LINE3 TARGET_MIXER_READ(SOUND_MIXER_LINE3)
/* Obsolete macros */
-#define TARGET_SOUND_MIXER_READ_MUTE TARGET_MIXER_READ(SOUND_MIXER_MUTE)
-#define TARGET_SOUND_MIXER_READ_ENHANCE TARGET_MIXER_READ(SOUND_MIXER_ENHANCE)
-#define TARGET_SOUND_MIXER_READ_LOUD TARGET_MIXER_READ(SOUND_MIXER_LOUD)
-
-#define TARGET_SOUND_MIXER_READ_RECSRC TARGET_MIXER_READ(SOUND_MIXER_RECSRC)
-#define TARGET_SOUND_MIXER_READ_DEVMASK TARGET_MIXER_READ(SOUND_MIXER_DEVMASK)
-#define TARGET_SOUND_MIXER_READ_RECMASK TARGET_MIXER_READ(SOUND_MIXER_RECMASK)
-#define TARGET_SOUND_MIXER_READ_STEREODEVS TARGET_MIXER_READ(SOUND_MIXER_STEREODEVS)
-#define TARGET_SOUND_MIXER_READ_CAPS TARGET_MIXER_READ(SOUND_MIXER_CAPS)
-
-#define TARGET_MIXER_WRITE(dev) TARGET_IOWR('M', dev, int)
-
-#define TARGET_SOUND_MIXER_WRITE_VOLUME TARGET_MIXER_WRITE(SOUND_MIXER_VOLUME)
-#define TARGET_SOUND_MIXER_WRITE_BASS TARGET_MIXER_WRITE(SOUND_MIXER_BASS)
-#define TARGET_SOUND_MIXER_WRITE_TREBLE TARGET_MIXER_WRITE(SOUND_MIXER_TREBLE)
-#define TARGET_SOUND_MIXER_WRITE_SYNTH TARGET_MIXER_WRITE(SOUND_MIXER_SYNTH)
-#define TARGET_SOUND_MIXER_WRITE_PCM TARGET_MIXER_WRITE(SOUND_MIXER_PCM)
-#define TARGET_SOUND_MIXER_WRITE_SPEAKER TARGET_MIXER_WRITE(SOUND_MIXER_SPEAKER)
-#define TARGET_SOUND_MIXER_WRITE_LINE TARGET_MIXER_WRITE(SOUND_MIXER_LINE)
-#define TARGET_SOUND_MIXER_WRITE_MIC TARGET_MIXER_WRITE(SOUND_MIXER_MIC)
-#define TARGET_SOUND_MIXER_WRITE_CD TARGET_MIXER_WRITE(SOUND_MIXER_CD)
-#define TARGET_SOUND_MIXER_WRITE_IMIX TARGET_MIXER_WRITE(SOUND_MIXER_IMIX)
-#define TARGET_SOUND_MIXER_WRITE_ALTPCM TARGET_MIXER_WRITE(SOUND_MIXER_ALTPCM)
-#define TARGET_SOUND_MIXER_WRITE_RECLEV TARGET_MIXER_WRITE(SOUND_MIXER_RECLEV)
-#define TARGET_SOUND_MIXER_WRITE_IGAIN TARGET_MIXER_WRITE(SOUND_MIXER_IGAIN)
-#define TARGET_SOUND_MIXER_WRITE_OGAIN TARGET_MIXER_WRITE(SOUND_MIXER_OGAIN)
-#define TARGET_SOUND_MIXER_WRITE_LINE1 TARGET_MIXER_WRITE(SOUND_MIXER_LINE1)
-#define TARGET_SOUND_MIXER_WRITE_LINE2 TARGET_MIXER_WRITE(SOUND_MIXER_LINE2)
-#define TARGET_SOUND_MIXER_WRITE_LINE3 TARGET_MIXER_WRITE(SOUND_MIXER_LINE3)
+#define TARGET_SOUND_MIXER_READ_MUTE TARGET_MIXER_READ(SOUND_MIXER_MUTE)
+#define TARGET_SOUND_MIXER_READ_ENHANCE TARGET_MIXER_READ(SOUND_MIXER_ENHANCE)
+#define TARGET_SOUND_MIXER_READ_LOUD TARGET_MIXER_READ(SOUND_MIXER_LOUD)
+
+#define TARGET_SOUND_MIXER_READ_RECSRC TARGET_MIXER_READ(SOUND_MIXER_RECSRC)
+#define TARGET_SOUND_MIXER_READ_DEVMASK TARGET_MIXER_READ(SOUND_MIXER_DEVMASK)
+#define TARGET_SOUND_MIXER_READ_RECMASK TARGET_MIXER_READ(SOUND_MIXER_RECMASK)
+#define TARGET_SOUND_MIXER_READ_STEREODEVS TARGET_MIXER_READ(SOUND_MIXER_STEREODEVS)
+#define TARGET_SOUND_MIXER_READ_CAPS TARGET_MIXER_READ(SOUND_MIXER_CAPS)
+
+#define TARGET_MIXER_WRITE(dev) TARGET_IOWR('M', dev, abi_int)
+
+#define TARGET_SOUND_MIXER_WRITE_VOLUME TARGET_MIXER_WRITE(SOUND_MIXER_VOLUME)
+#define TARGET_SOUND_MIXER_WRITE_BASS TARGET_MIXER_WRITE(SOUND_MIXER_BASS)
+#define TARGET_SOUND_MIXER_WRITE_TREBLE TARGET_MIXER_WRITE(SOUND_MIXER_TREBLE)
+#define TARGET_SOUND_MIXER_WRITE_SYNTH TARGET_MIXER_WRITE(SOUND_MIXER_SYNTH)
+#define TARGET_SOUND_MIXER_WRITE_PCM TARGET_MIXER_WRITE(SOUND_MIXER_PCM)
+#define TARGET_SOUND_MIXER_WRITE_SPEAKER TARGET_MIXER_WRITE(SOUND_MIXER_SPEAKER)
+#define TARGET_SOUND_MIXER_WRITE_LINE TARGET_MIXER_WRITE(SOUND_MIXER_LINE)
+#define TARGET_SOUND_MIXER_WRITE_MIC TARGET_MIXER_WRITE(SOUND_MIXER_MIC)
+#define TARGET_SOUND_MIXER_WRITE_CD TARGET_MIXER_WRITE(SOUND_MIXER_CD)
+#define TARGET_SOUND_MIXER_WRITE_IMIX TARGET_MIXER_WRITE(SOUND_MIXER_IMIX)
+#define TARGET_SOUND_MIXER_WRITE_ALTPCM TARGET_MIXER_WRITE(SOUND_MIXER_ALTPCM)
+#define TARGET_SOUND_MIXER_WRITE_RECLEV TARGET_MIXER_WRITE(SOUND_MIXER_RECLEV)
+#define TARGET_SOUND_MIXER_WRITE_IGAIN TARGET_MIXER_WRITE(SOUND_MIXER_IGAIN)
+#define TARGET_SOUND_MIXER_WRITE_OGAIN TARGET_MIXER_WRITE(SOUND_MIXER_OGAIN)
+#define TARGET_SOUND_MIXER_WRITE_LINE1 TARGET_MIXER_WRITE(SOUND_MIXER_LINE1)
+#define TARGET_SOUND_MIXER_WRITE_LINE2 TARGET_MIXER_WRITE(SOUND_MIXER_LINE2)
+#define TARGET_SOUND_MIXER_WRITE_LINE3 TARGET_MIXER_WRITE(SOUND_MIXER_LINE3)
/* Obsolete macros */
-#define TARGET_SOUND_MIXER_WRITE_MUTE TARGET_MIXER_WRITE(SOUND_MIXER_MUTE)
-#define TARGET_SOUND_MIXER_WRITE_ENHANCE TARGET_MIXER_WRITE(SOUND_MIXER_ENHANCE)
-#define TARGET_SOUND_MIXER_WRITE_LOUD TARGET_MIXER_WRITE(SOUND_MIXER_LOUD)
+#define TARGET_SOUND_MIXER_WRITE_MUTE TARGET_MIXER_WRITE(SOUND_MIXER_MUTE)
+#define TARGET_SOUND_MIXER_WRITE_ENHANCE TARGET_MIXER_WRITE(SOUND_MIXER_ENHANCE)
+#define TARGET_SOUND_MIXER_WRITE_LOUD TARGET_MIXER_WRITE(SOUND_MIXER_LOUD)
-#define TARGET_SOUND_MIXER_WRITE_RECSRC TARGET_MIXER_WRITE(SOUND_MIXER_RECSRC)
+#define TARGET_SOUND_MIXER_WRITE_RECSRC TARGET_MIXER_WRITE(SOUND_MIXER_RECSRC)
struct target_snd_timer_id {
- int dev_class;
- int dev_sclass;
- int card;
- int device;
- int subdevice;
+ abi_int dev_class;
+ abi_int dev_sclass;
+ abi_int card;
+ abi_int device;
+ abi_int subdevice;
};
struct target_snd_timer_ginfo {
struct target_snd_timer_id tid;
- unsigned int flags;
- int card;
+ abi_uint flags;
+ abi_int card;
unsigned char id[64];
unsigned char name[80];
abi_ulong reserved0;
abi_ulong resolution;
abi_ulong resolution_min;
abi_ulong resolution_max;
- unsigned int clients;
+ abi_uint clients;
unsigned char reserved[32];
};
@@ -2614,8 +2441,8 @@ struct target_snd_timer_select {
};
struct target_snd_timer_info {
- unsigned int flags;
- int card;
+ abi_uint flags;
+ abi_int card;
unsigned char id[64];
unsigned char name[80];
abi_ulong reserved0;
@@ -2625,31 +2452,31 @@ struct target_snd_timer_info {
struct target_snd_timer_status {
struct target_timespec tstamp;
- unsigned int resolution;
- unsigned int lost;
- unsigned int overrun;
- unsigned int queue;
+ abi_uint resolution;
+ abi_uint lost;
+ abi_uint overrun;
+ abi_uint queue;
unsigned char reserved[64];
};
/* alsa timer ioctls */
-#define TARGET_SNDRV_TIMER_IOCTL_PVERSION TARGET_IOR('T', 0x00, int)
-#define TARGET_SNDRV_TIMER_IOCTL_NEXT_DEVICE TARGET_IOWR('T', 0x01, \
- struct snd_timer_id)
-#define TARGET_SNDRV_TIMER_IOCTL_GINFO TARGET_IOWR('T', 0x03, \
- struct target_snd_timer_ginfo)
-#define TARGET_SNDRV_TIMER_IOCTL_GPARAMS TARGET_IOW('T', 0x04, \
- struct target_snd_timer_gparams)
-#define TARGET_SNDRV_TIMER_IOCTL_GSTATUS TARGET_IOWR('T', 0x05, \
- struct target_snd_timer_gstatus)
-#define TARGET_SNDRV_TIMER_IOCTL_SELECT TARGET_IOW('T', 0x10, \
- struct target_snd_timer_select)
-#define TARGET_SNDRV_TIMER_IOCTL_INFO TARGET_IOR('T', 0x11, \
- struct target_snd_timer_info)
-#define TARGET_SNDRV_TIMER_IOCTL_PARAMS TARGET_IOW('T', 0x12, \
- struct snd_timer_params)
-#define TARGET_SNDRV_TIMER_IOCTL_STATUS TARGET_IOR('T', 0x14, \
- struct target_snd_timer_status)
+#define TARGET_SNDRV_TIMER_IOCTL_PVERSION TARGET_IOR('T', 0x00, abi_int)
+#define TARGET_SNDRV_TIMER_IOCTL_NEXT_DEVICE TARGET_IOWR('T', 0x01, \
+ struct snd_timer_id)
+#define TARGET_SNDRV_TIMER_IOCTL_GINFO TARGET_IOWR('T', 0x03, \
+ struct target_snd_timer_ginfo)
+#define TARGET_SNDRV_TIMER_IOCTL_GPARAMS TARGET_IOW('T', 0x04, \
+ struct target_snd_timer_gparams)
+#define TARGET_SNDRV_TIMER_IOCTL_GSTATUS TARGET_IOWR('T', 0x05, \
+ struct target_snd_timer_gstatus)
+#define TARGET_SNDRV_TIMER_IOCTL_SELECT TARGET_IOW('T', 0x10, \
+ struct target_snd_timer_select)
+#define TARGET_SNDRV_TIMER_IOCTL_INFO TARGET_IOR('T', 0x11, \
+ struct target_snd_timer_info)
+#define TARGET_SNDRV_TIMER_IOCTL_PARAMS TARGET_IOW('T', 0x12, \
+ struct snd_timer_params)
+#define TARGET_SNDRV_TIMER_IOCTL_STATUS TARGET_IOR('T', 0x14, \
+ struct target_snd_timer_status)
#define TARGET_SNDRV_TIMER_IOCTL_START TARGET_IO('T', 0xa0)
#define TARGET_SNDRV_TIMER_IOCTL_STOP TARGET_IO('T', 0xa1)
#define TARGET_SNDRV_TIMER_IOCTL_CONTINUE TARGET_IO('T', 0xa2)
@@ -2702,11 +2529,11 @@ struct target_sysinfo {
abi_ulong bufferram; /* Memory used by buffers */
abi_ulong totalswap; /* Total swap space size */
abi_ulong freeswap; /* swap space still available */
- unsigned short procs; /* Number of current processes */
- unsigned short pad; /* explicit padding for m68k */
+ abi_ushort procs; /* Number of current processes */
+ abi_ushort pad; /* explicit padding for m68k */
abi_ulong totalhigh; /* Total high memory size */
abi_ulong freehigh; /* Available high memory size */
- unsigned int mem_unit; /* Memory unit size in bytes */
+ abi_uint mem_unit; /* Memory unit size in bytes */
char _f[20-2*sizeof(abi_long)-sizeof(int)]; /* Padding: libc5 uses this.. */
};
@@ -2714,7 +2541,7 @@ struct linux_dirent {
long d_ino;
unsigned long d_off;
unsigned short d_reclen;
- char d_name[256]; /* We must not include limits.h! */
+ char d_name[];
};
struct linux_dirent64 {
@@ -2722,7 +2549,7 @@ struct linux_dirent64 {
int64_t d_off;
unsigned short d_reclen;
unsigned char d_type;
- char d_name[256];
+ char d_name[];
};
struct target_mq_attr {
@@ -2733,9 +2560,9 @@ struct target_mq_attr {
};
struct target_drm_version {
- int version_major;
- int version_minor;
- int version_patchlevel;
+ abi_int version_major;
+ abi_int version_minor;
+ abi_int version_patchlevel;
abi_ulong name_len;
abi_ulong name;
abi_ulong date_len;
@@ -2745,7 +2572,7 @@ struct target_drm_version {
};
struct target_drm_i915_getparam {
- int param;
+ abi_int param;
abi_ulong value;
};
@@ -2764,6 +2591,9 @@ struct target_drm_i915_getparam {
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_LOCK_PI2 13
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -2791,32 +2621,28 @@ struct target_epoll_event {
#define TARGET_EP_MAX_EVENTS (INT_MAX / sizeof(struct target_epoll_event))
#endif
-struct target_rlimit64 {
- uint64_t rlim_cur;
- uint64_t rlim_max;
-};
struct target_ucred {
- uint32_t pid;
- uint32_t uid;
- uint32_t gid;
+ abi_uint pid;
+ abi_uint uid;
+ abi_uint gid;
};
-typedef int32_t target_timer_t;
+typedef abi_int target_timer_t;
#define TARGET_SIGEV_MAX_SIZE 64
/* This is architecture-specific but most architectures use the default */
#ifdef TARGET_MIPS
-#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(int32_t) * 2 + sizeof(abi_long))
+#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(abi_int) * 2 + sizeof(abi_long))
#else
-#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(int32_t) * 2 \
+#define TARGET_SIGEV_PREAMBLE_SIZE (sizeof(abi_int) * 2 \
+ sizeof(target_sigval_t))
#endif
-#define TARGET_SIGEV_PAD_SIZE ((TARGET_SIGEV_MAX_SIZE \
- - TARGET_SIGEV_PREAMBLE_SIZE) \
- / sizeof(int32_t))
+#define TARGET_SIGEV_PAD_SIZE ((TARGET_SIGEV_MAX_SIZE \
+ - TARGET_SIGEV_PREAMBLE_SIZE) \
+ / sizeof(abi_int))
struct target_sigevent {
target_sigval_t sigev_value;
@@ -2838,14 +2664,14 @@ struct target_sigevent {
};
struct target_user_cap_header {
- uint32_t version;
- int pid;
+ abi_uint version;
+ abi_int pid;
};
struct target_user_cap_data {
- uint32_t effective;
- uint32_t permitted;
- uint32_t inheritable;
+ abi_uint effective;
+ abi_uint permitted;
+ abi_uint inheritable;
};
/* from kernel's include/linux/syslog.h */
@@ -2874,40 +2700,58 @@ struct target_user_cap_data {
#define TARGET_SYSLOG_ACTION_SIZE_BUFFER 10
struct target_statx_timestamp {
- int64_t tv_sec;
- uint32_t tv_nsec;
- int32_t __reserved;
+ abi_llong tv_sec;
+ abi_uint tv_nsec;
+ abi_int __reserved;
};
struct target_statx {
- /* 0x00 */
- uint32_t stx_mask; /* What results were written [uncond] */
- uint32_t stx_blksize; /* Preferred general I/O size [uncond] */
- uint64_t stx_attributes; /* Flags conveying information about the file */
- /* 0x10 */
- uint32_t stx_nlink; /* Number of hard links */
- uint32_t stx_uid; /* User ID of owner */
- uint32_t stx_gid; /* Group ID of owner */
- uint16_t stx_mode; /* File mode */
- uint16_t __spare0[1];
- /* 0x20 */
- uint64_t stx_ino; /* Inode number */
- uint64_t stx_size; /* File size */
- uint64_t stx_blocks; /* Number of 512-byte blocks allocated */
- uint64_t stx_attributes_mask; /* Mask to show what is supported */
- /* 0x40 */
- struct target_statx_timestamp stx_atime; /* Last access time */
- struct target_statx_timestamp stx_btime; /* File creation time */
- struct target_statx_timestamp stx_ctime; /* Last attribute change time */
- struct target_statx_timestamp stx_mtime; /* Last data modification time */
- /* 0x80 */
- uint32_t stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
- uint32_t stx_rdev_minor;
- uint32_t stx_dev_major; /* ID of device containing file [uncond] */
- uint32_t stx_dev_minor;
- /* 0x90 */
- uint64_t __spare2[14]; /* Spare space for future expansion */
- /* 0x100 */
+ /* 0x00 */
+ abi_uint stx_mask; /* What results were written [uncond] */
+ abi_uint stx_blksize; /* Preferred general I/O size [uncond] */
+ abi_ullong stx_attributes; /* Flags conveying information about the file */
+ /* 0x10 */
+ abi_uint stx_nlink; /* Number of hard links */
+ abi_uint stx_uid; /* User ID of owner */
+ abi_uint stx_gid; /* Group ID of owner */
+ uint16_t stx_mode; /* File mode */
+ uint16_t __spare0[1];
+ /* 0x20 */
+ abi_ullong stx_ino; /* Inode number */
+ abi_ullong stx_size; /* File size */
+ abi_ullong stx_blocks; /* Number of 512-byte blocks allocated */
+ abi_ullong stx_attributes_mask; /* Mask to show what is supported */
+ /* 0x40 */
+ struct target_statx_timestamp stx_atime; /* Last access time */
+ struct target_statx_timestamp stx_btime; /* File creation time */
+ struct target_statx_timestamp stx_ctime; /* Last attribute change time */
+ struct target_statx_timestamp stx_mtime; /* Last data modification time */
+ /* 0x80 */
+ abi_uint stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
+ abi_uint stx_rdev_minor;
+ abi_uint stx_dev_major; /* ID of device containing file [uncond] */
+ abi_uint stx_dev_minor;
+ /* 0x90 */
+ abi_ullong __spare2[14]; /* Spare space for future expansion */
+ /* 0x100 */
+};
+
+/* from kernel's include/linux/sched/types.h */
+struct target_sched_attr {
+ abi_uint size;
+ abi_uint sched_policy;
+ abi_ullong sched_flags;
+ abi_int sched_nice;
+ abi_uint sched_priority;
+ abi_ullong sched_runtime;
+ abi_ullong sched_deadline;
+ abi_ullong sched_period;
+ abi_uint sched_util_min;
+ abi_uint sched_util_max;
+};
+
+struct target_sched_param {
+ abi_int sched_priority;
};
#endif
diff --git a/linux-user/syscall_types.h b/linux-user/syscall_types.h
index ba2c1518eb..6dd7a80ce5 100644
--- a/linux-user/syscall_types.h
+++ b/linux-user/syscall_types.h
@@ -201,6 +201,12 @@ STRUCT(loop_info64,
MK_ARRAY(TYPE_CHAR, 32), /* lo_encrypt_key */
MK_ARRAY(TYPE_ULONGLONG, 2)) /* lo_init */
+STRUCT(loop_config,
+ TYPE_INT, /* fd */
+ TYPE_INT, /* block_size */
+ MK_STRUCT(STRUCT_loop_info64), /* info */
+ MK_ARRAY(TYPE_ULONGLONG, 8)) /* __reserved */
+
/* mag tape ioctls */
STRUCT(mtop, TYPE_SHORT, TYPE_INT)
STRUCT(mtget, TYPE_LONG, TYPE_LONG, TYPE_LONG, TYPE_LONG, TYPE_LONG,
@@ -335,6 +341,11 @@ STRUCT(file_clone_range,
TYPE_ULONGLONG, /* src_length */
TYPE_ULONGLONG) /* dest_offset */
+STRUCT(fstrim_range,
+ TYPE_ULONGLONG, /* start */
+ TYPE_ULONGLONG, /* len */
+ TYPE_ULONGLONG) /* minlen */
+
STRUCT(fiemap_extent,
TYPE_ULONGLONG, /* fe_logical */
TYPE_ULONGLONG, /* fe_physical */
diff --git a/linux-user/thunk.c b/linux-user/thunk.c
new file mode 100644
index 0000000000..071aad4b5f
--- /dev/null
+++ b/linux-user/thunk.c
@@ -0,0 +1,481 @@
+/*
+ * Generic thunking code to convert data between host and target CPU
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+
+#include "qemu.h"
+#include "exec/user/thunk.h"
+
+//#define DEBUG
+
+static unsigned int max_struct_entries;
+StructEntry *struct_entries;
+
+static const argtype *thunk_type_next_ptr(const argtype *type_ptr);
+
+static inline const argtype *thunk_type_next(const argtype *type_ptr)
+{
+ int type;
+
+ type = *type_ptr++;
+ switch(type) {
+ case TYPE_CHAR:
+ case TYPE_SHORT:
+ case TYPE_INT:
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_OLDDEVT:
+ return type_ptr;
+ case TYPE_PTR:
+ return thunk_type_next_ptr(type_ptr);
+ case TYPE_ARRAY:
+ return thunk_type_next_ptr(type_ptr + 1);
+ case TYPE_STRUCT:
+ return type_ptr + 1;
+ default:
+ return NULL;
+ }
+}
+
+static const argtype *thunk_type_next_ptr(const argtype *type_ptr)
+{
+ return thunk_type_next(type_ptr);
+}
+
+void thunk_register_struct(int id, const char *name, const argtype *types)
+{
+ const argtype *type_ptr;
+ StructEntry *se;
+ int nb_fields, offset, max_align, align, size, i, j;
+
+ assert(id < max_struct_entries);
+
+ /* first we count the number of fields */
+ type_ptr = types;
+ nb_fields = 0;
+ while (*type_ptr != TYPE_NULL) {
+ type_ptr = thunk_type_next(type_ptr);
+ nb_fields++;
+ }
+ assert(nb_fields > 0);
+ se = struct_entries + id;
+ se->field_types = types;
+ se->nb_fields = nb_fields;
+ se->name = name;
+#ifdef DEBUG
+ printf("struct %s: id=%d nb_fields=%d\n",
+ se->name, id, se->nb_fields);
+#endif
+ /* now we can alloc the data */
+
+ for (i = 0; i < ARRAY_SIZE(se->field_offsets); i++) {
+ offset = 0;
+ max_align = 1;
+ se->field_offsets[i] = g_new(int, nb_fields);
+ type_ptr = se->field_types;
+ for(j = 0;j < nb_fields; j++) {
+ size = thunk_type_size(type_ptr, i);
+ align = thunk_type_align(type_ptr, i);
+ offset = (offset + align - 1) & ~(align - 1);
+ se->field_offsets[i][j] = offset;
+ offset += size;
+ if (align > max_align)
+ max_align = align;
+ type_ptr = thunk_type_next(type_ptr);
+ }
+ offset = (offset + max_align - 1) & ~(max_align - 1);
+ se->size[i] = offset;
+ se->align[i] = max_align;
+#ifdef DEBUG
+ printf("%s: size=%d align=%d\n",
+ i == THUNK_HOST ? "host" : "target", offset, max_align);
+#endif
+ }
+}
+
+void thunk_register_struct_direct(int id, const char *name,
+ const StructEntry *se1)
+{
+ StructEntry *se;
+
+ assert(id < max_struct_entries);
+ se = struct_entries + id;
+ *se = *se1;
+ se->name = name;
+}
+
+
+/* now we can define the main conversion functions */
+const argtype *thunk_convert(void *dst, const void *src,
+ const argtype *type_ptr, int to_host)
+{
+ int type;
+
+ type = *type_ptr++;
+ switch(type) {
+ case TYPE_CHAR:
+ *(uint8_t *)dst = *(uint8_t *)src;
+ break;
+ case TYPE_SHORT:
+ *(uint16_t *)dst = tswap16(*(uint16_t *)src);
+ break;
+ case TYPE_INT:
+ *(uint32_t *)dst = tswap32(*(uint32_t *)src);
+ break;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ *(uint64_t *)dst = tswap64(*(uint64_t *)src);
+ break;
+#if HOST_LONG_BITS == 32 && TARGET_ABI_BITS == 32
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ *(uint32_t *)dst = tswap32(*(uint32_t *)src);
+ break;
+#elif HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 32
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ if (to_host) {
+ if (type == TYPE_LONG) {
+ /* sign extension */
+ *(uint64_t *)dst = (int32_t)tswap32(*(uint32_t *)src);
+ } else {
+ *(uint64_t *)dst = tswap32(*(uint32_t *)src);
+ }
+ } else {
+ *(uint32_t *)dst = tswap32(*(uint64_t *)src & 0xffffffff);
+ }
+ break;
+#elif HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ *(uint64_t *)dst = tswap64(*(uint64_t *)src);
+ break;
+#elif HOST_LONG_BITS == 32 && TARGET_ABI_BITS == 64
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ if (to_host) {
+ *(uint32_t *)dst = tswap64(*(uint64_t *)src);
+ } else {
+ if (type == TYPE_LONG) {
+ /* sign extension */
+ *(uint64_t *)dst = tswap64(*(int32_t *)src);
+ } else {
+ *(uint64_t *)dst = tswap64(*(uint32_t *)src);
+ }
+ }
+ break;
+#else
+#warning unsupported conversion
+#endif
+ case TYPE_OLDDEVT:
+ {
+ uint64_t val = 0;
+ switch (thunk_type_size(type_ptr - 1, !to_host)) {
+ case 2:
+ val = *(uint16_t *)src;
+ break;
+ case 4:
+ val = *(uint32_t *)src;
+ break;
+ case 8:
+ val = *(uint64_t *)src;
+ break;
+ }
+ switch (thunk_type_size(type_ptr - 1, to_host)) {
+ case 2:
+ *(uint16_t *)dst = tswap16(val);
+ break;
+ case 4:
+ *(uint32_t *)dst = tswap32(val);
+ break;
+ case 8:
+ *(uint64_t *)dst = tswap64(val);
+ break;
+ }
+ break;
+ }
+ case TYPE_ARRAY:
+ {
+ int array_length, i, dst_size, src_size;
+ const uint8_t *s;
+ uint8_t *d;
+
+ array_length = *type_ptr++;
+ dst_size = thunk_type_size(type_ptr, to_host);
+ src_size = thunk_type_size(type_ptr, 1 - to_host);
+ d = dst;
+ s = src;
+ for(i = 0;i < array_length; i++) {
+ thunk_convert(d, s, type_ptr, to_host);
+ d += dst_size;
+ s += src_size;
+ }
+ type_ptr = thunk_type_next(type_ptr);
+ }
+ break;
+ case TYPE_STRUCT:
+ {
+ int i;
+ const StructEntry *se;
+ const uint8_t *s;
+ uint8_t *d;
+ const argtype *field_types;
+ const int *dst_offsets, *src_offsets;
+
+ assert(*type_ptr < max_struct_entries);
+ se = struct_entries + *type_ptr++;
+ if (se->convert[0] != NULL) {
+ /* specific conversion is needed */
+ (*se->convert[to_host])(dst, src);
+ } else {
+ /* standard struct conversion */
+ field_types = se->field_types;
+ dst_offsets = se->field_offsets[to_host];
+ src_offsets = se->field_offsets[1 - to_host];
+ d = dst;
+ s = src;
+ for(i = 0;i < se->nb_fields; i++) {
+ field_types = thunk_convert(d + dst_offsets[i],
+ s + src_offsets[i],
+ field_types, to_host);
+ }
+ }
+ }
+ break;
+ default:
+ fprintf(stderr, "Invalid type 0x%x\n", type);
+ break;
+ }
+ return type_ptr;
+}
+
+const argtype *thunk_print(void *arg, const argtype *type_ptr)
+{
+ int type;
+
+ type = *type_ptr++;
+
+ switch (type) {
+ case TYPE_CHAR:
+ qemu_log("%c", *(uint8_t *)arg);
+ break;
+ case TYPE_SHORT:
+ qemu_log("%" PRId16, tswap16(*(uint16_t *)arg));
+ break;
+ case TYPE_INT:
+ qemu_log("%" PRId32, tswap32(*(uint32_t *)arg));
+ break;
+ case TYPE_LONGLONG:
+ qemu_log("%" PRId64, tswap64(*(uint64_t *)arg));
+ break;
+ case TYPE_ULONGLONG:
+ qemu_log("%" PRIu64, tswap64(*(uint64_t *)arg));
+ break;
+#if HOST_LONG_BITS == 32 && TARGET_ABI_BITS == 32
+ case TYPE_PTRVOID:
+ qemu_log("0x%" PRIx32, tswap32(*(uint32_t *)arg));
+ break;
+ case TYPE_LONG:
+ qemu_log("%" PRId32, tswap32(*(uint32_t *)arg));
+ break;
+ case TYPE_ULONG:
+ qemu_log("%" PRIu32, tswap32(*(uint32_t *)arg));
+ break;
+#elif HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 32
+ case TYPE_PTRVOID:
+ qemu_log("0x%" PRIx32, tswap32(*(uint64_t *)arg & 0xffffffff));
+ break;
+ case TYPE_LONG:
+ qemu_log("%" PRId32, tswap32(*(uint64_t *)arg & 0xffffffff));
+ break;
+ case TYPE_ULONG:
+ qemu_log("%" PRIu32, tswap32(*(uint64_t *)arg & 0xffffffff));
+ break;
+#elif HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
+ case TYPE_PTRVOID:
+ qemu_log("0x%" PRIx64, tswap64(*(uint64_t *)arg));
+ break;
+ case TYPE_LONG:
+ qemu_log("%" PRId64, tswap64(*(uint64_t *)arg));
+ break;
+ case TYPE_ULONG:
+ qemu_log("%" PRIu64, tswap64(*(uint64_t *)arg));
+ break;
+#else
+ case TYPE_PTRVOID:
+ qemu_log("0x%" PRIx64, tswap64(*(uint64_t *)arg));
+ break;
+ case TYPE_LONG:
+ qemu_log("%" PRId64, tswap64(*(uint64_t *)arg));
+ break;
+ case TYPE_ULONG:
+ qemu_log("%" PRIu64, tswap64(*(uint64_t *)arg));
+ break;
+#endif
+ case TYPE_OLDDEVT:
+ {
+ uint64_t val = 0;
+ switch (thunk_type_size(type_ptr - 1, 1)) {
+ case 2:
+ val = *(uint16_t *)arg;
+ break;
+ case 4:
+ val = *(uint32_t *)arg;
+ break;
+ case 8:
+ val = *(uint64_t *)arg;
+ break;
+ }
+ switch (thunk_type_size(type_ptr - 1, 0)) {
+ case 2:
+ qemu_log("%" PRIu16, tswap16(val));
+ break;
+ case 4:
+ qemu_log("%" PRIu32, tswap32(val));
+ break;
+ case 8:
+ qemu_log("%" PRIu64, tswap64(val));
+ break;
+ }
+ }
+ break;
+ case TYPE_ARRAY:
+ {
+ int i, array_length, arg_size;
+ uint8_t *a;
+ int is_string = 0;
+
+ array_length = *type_ptr++;
+ arg_size = thunk_type_size(type_ptr, 0);
+ a = arg;
+
+ if (*type_ptr == TYPE_CHAR) {
+ qemu_log("\"");
+ is_string = 1;
+ } else {
+ qemu_log("[");
+ }
+
+ for (i = 0; i < array_length; i++) {
+ if (i > 0 && !is_string) {
+ qemu_log(",");
+ }
+ thunk_print(a, type_ptr);
+ a += arg_size;
+ }
+
+ if (is_string) {
+ qemu_log("\"");
+ } else {
+ qemu_log("]");
+ }
+
+ type_ptr = thunk_type_next(type_ptr);
+ }
+ break;
+ case TYPE_STRUCT:
+ {
+ int i;
+ const StructEntry *se;
+ uint8_t *a;
+ const argtype *field_types;
+ const int *arg_offsets;
+
+ se = struct_entries + *type_ptr++;
+
+ if (se->print != NULL) {
+ se->print(arg);
+ } else {
+ a = arg;
+
+ field_types = se->field_types;
+ arg_offsets = se->field_offsets[0];
+
+ qemu_log("{");
+ for (i = 0; i < se->nb_fields; i++) {
+ if (i > 0) {
+ qemu_log(",");
+ }
+ field_types = thunk_print(a + arg_offsets[i], field_types);
+ }
+ qemu_log("}");
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return type_ptr;
+}
+
+/* from em86 */
+
+/* Utility function: Table-driven functions to translate bitmasks
+ * between host and target formats
+ */
+unsigned int target_to_host_bitmask_len(unsigned int target_mask,
+ const bitmask_transtbl *tbl,
+ size_t len)
+{
+ unsigned int host_mask = 0;
+
+ for (size_t i = 0; i < len; ++i) {
+ if ((target_mask & tbl[i].target_mask) == tbl[i].target_bits) {
+ host_mask |= tbl[i].host_bits;
+ }
+ }
+ return host_mask;
+}
+
+unsigned int host_to_target_bitmask_len(unsigned int host_mask,
+ const bitmask_transtbl *tbl,
+ size_t len)
+{
+ unsigned int target_mask = 0;
+
+ for (size_t i = 0; i < len; ++i) {
+ if ((host_mask & tbl[i].host_mask) == tbl[i].host_bits) {
+ target_mask |= tbl[i].target_bits;
+ }
+ }
+ return target_mask;
+}
+
+int thunk_type_size_array(const argtype *type_ptr, int is_host)
+{
+ return thunk_type_size(type_ptr, is_host);
+}
+
+int thunk_type_align_array(const argtype *type_ptr, int is_host)
+{
+ return thunk_type_align(type_ptr, is_host);
+}
+
+void thunk_init(unsigned int max_structs)
+{
+ max_struct_entries = max_structs;
+ struct_entries = g_new0(StructEntry, max_structs);
+}
diff --git a/linux-user/trace-events b/linux-user/trace-events
index e7d2f54e94..f33717f248 100644
--- a/linux-user/trace-events
+++ b/linux-user/trace-events
@@ -9,7 +9,7 @@ user_setup_frame(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64
user_setup_rt_frame(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64
user_do_rt_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64
user_do_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64
-user_force_sig(void *env, int target_sig, int host_sig) "env=%p signal %d (host %d)"
+user_dump_core_and_abort(void *env, int target_sig, int host_sig) "env=%p signal %d (host %d)"
user_handle_signal(void *env, int target_sig) "env=%p signal %d"
user_host_signal(void *env, int host_sig, int target_sig) "env=%p signal %d (target %d)"
user_queue_signal(void *env, int target_sig) "env=%p signal %d"
diff --git a/linux-user/uaccess.c b/linux-user/uaccess.c
index 425cbf677f..27e841e651 100644
--- a/linux-user/uaccess.c
+++ b/linux-user/uaccess.c
@@ -14,7 +14,7 @@ void *lock_user(int type, abi_ulong guest_addr, ssize_t len, bool copy)
return NULL;
}
host_addr = g2h_untagged(guest_addr);
-#ifdef DEBUG_REMAP
+#ifdef CONFIG_DEBUG_REMAP
if (copy) {
host_addr = g_memdup(host_addr, len);
} else {
@@ -24,7 +24,7 @@ void *lock_user(int type, abi_ulong guest_addr, ssize_t len, bool copy)
return host_addr;
}
-#ifdef DEBUG_REMAP
+#ifdef CONFIG_DEBUG_REMAP
void unlock_user(void *host_ptr, abi_ulong guest_addr, ssize_t len)
{
void *host_ptr_conv;
diff --git a/linux-user/uname.c b/linux-user/uname.c
index 1d82608c10..32f71f2492 100644
--- a/linux-user/uname.c
+++ b/linux-user/uname.c
@@ -21,7 +21,6 @@
#include "qemu.h"
#include "user-internals.h"
-//#include "qemu-common.h"
#include "uname.h"
/* return highest utsname machine name for emulated instruction set
@@ -29,7 +28,7 @@
* NB: the default emulated CPU ("any") might not match any existing CPU, e.g.
* on ARM it has all features turned on, so there is no perfect arch string to
* return here */
-const char *cpu_to_uname_machine(void *cpu_env)
+const char *cpu_to_uname_machine(CPUArchState *cpu_env)
{
#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
@@ -41,7 +40,7 @@ const char *cpu_to_uname_machine(void *cpu_env)
/* in theory, endianness is configurable on some ARM CPUs, but this isn't
* used in user mode emulation */
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
#define utsname_suffix "b"
#else
#define utsname_suffix "l"
@@ -55,7 +54,7 @@ const char *cpu_to_uname_machine(void *cpu_env)
return "armv5te" utsname_suffix;
#elif defined(TARGET_I386) && !defined(TARGET_X86_64)
/* see arch/x86/kernel/cpu/bugs.c: check_bugs(), 386, 486, 586, 686 */
- CPUState *cpu = env_cpu((CPUX86State *)cpu_env);
+ CPUState *cpu = env_cpu(cpu_env);
int family = object_property_get_int(OBJECT(cpu), "family", NULL);
if (family == 4) {
return "i486";
diff --git a/linux-user/uname.h b/linux-user/uname.h
index 4503094211..4ae563f46c 100644
--- a/linux-user/uname.h
+++ b/linux-user/uname.h
@@ -4,7 +4,7 @@
#include <sys/utsname.h>
#include <linux/utsname.h>
-const char *cpu_to_uname_machine(void *cpu_env);
+const char *cpu_to_uname_machine(CPUArchState *cpu_env);
int sys_uname(struct new_utsname *buf);
#endif /* UNAME_H */
diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h
index 661612a088..ce11d9e21c 100644
--- a/linux-user/user-internals.h
+++ b/linux-user/user-internals.h
@@ -18,9 +18,9 @@
#ifndef LINUX_USER_USER_INTERNALS_H
#define LINUX_USER_USER_INTERNALS_H
-#include "hostdep.h"
#include "exec/user/thunk.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "qemu/log.h"
extern char *exec_path;
@@ -60,34 +60,35 @@ int info_is_fdpic(struct image_info *info);
void target_set_brk(abi_ulong new_brk);
void syscall_init(void);
-abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
+abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8);
extern __thread CPUState *thread_cpu;
-void cpu_loop(CPUArchState *env);
+G_NORETURN void cpu_loop(CPUArchState *env);
+abi_long get_errno(abi_long ret);
const char *target_strerror(int err);
int get_osversion(void);
void init_qemu_uname_release(void);
void fork_start(void);
-void fork_end(int child);
+void fork_end(pid_t pid);
/**
* probe_guest_base:
* @image_name: the executable being loaded
- * @loaddr: the lowest fixed address in the executable
- * @hiaddr: the highest fixed address in the executable
+ * @loaddr: the lowest fixed address within the executable
+ * @hiaddr: the highest fixed address within the executable
*
* Creates the initial guest address space in the host memory space.
*
- * If @loaddr == 0, then no address in the executable is fixed,
- * i.e. it is fully relocatable. In that case @hiaddr is the size
- * of the executable.
+ * If @loaddr == 0, then no address in the executable is fixed, i.e.
+ * it is fully relocatable. In that case @hiaddr is the size of the
+ * executable minus one.
*
* This function will not return if a valid value for guest_base
* cannot be chosen. On return, the executable loader can expect
*
- * target_mmap(loaddr, hiaddr - loaddr, ...)
+ * target_mmap(loaddr, hiaddr - loaddr + 1, ...)
*
* to succeed.
*/
@@ -113,16 +114,16 @@ static inline int is_error(abi_long ret)
return (abi_ulong)ret >= (abi_ulong)(-4096);
}
-#if TARGET_ABI_BITS == 32
+#if (TARGET_ABI_BITS == 32) && !defined(TARGET_ABI_MIPSN32)
static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
{
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
return ((uint64_t)word0 << 32) | word1;
#else
return ((uint64_t)word1 << 32) | word0;
#endif
}
-#else /* TARGET_ABI_BITS == 32 */
+#else /* TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) */
static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
{
return word0;
@@ -133,22 +134,22 @@ void print_termios(void *arg);
/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
#ifdef TARGET_ARM
-static inline int regpairs_aligned(void *cpu_env, int num)
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num)
{
- return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
+ return cpu_env->eabi;
}
-#elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
-static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
+#elif defined(TARGET_MIPS) && defined(TARGET_ABI_MIPSO32)
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; }
#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
/*
* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
* of registers which translates to the same as ARM/MIPS, because we start with
* r3 as arg1
*/
-static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; }
#elif defined(TARGET_SH4)
/* SH4 doesn't align register pairs, except for p{read,write}64 */
-static inline int regpairs_aligned(void *cpu_env, int num)
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num)
{
switch (num) {
case TARGET_NR_pread64:
@@ -160,11 +161,11 @@ static inline int regpairs_aligned(void *cpu_env, int num)
}
}
#elif defined(TARGET_XTENSA)
-static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; }
#elif defined(TARGET_HEXAGON)
-static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 1; }
#else
-static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
+static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 0; }
#endif
/**
diff --git a/linux-user/user-mmap.h b/linux-user/user-mmap.h
index d1dec99c02..b94bcdcf83 100644
--- a/linux-user/user-mmap.h
+++ b/linux-user/user-mmap.h
@@ -18,17 +18,48 @@
#ifndef LINUX_USER_USER_MMAP_H
#define LINUX_USER_USER_MMAP_H
+/*
+ * Guest parameters for the ADDR_COMPAT_LAYOUT personality
+ * (at present this is the only layout supported by QEMU).
+ *
+ * TASK_UNMAPPED_BASE: For mmap without hint (addr != 0), the search
+ * for unused virtual memory begins at TASK_UNMAPPED_BASE.
+ *
+ * ELF_ET_DYN_BASE: When the executable is ET_DYN (i.e. PIE), and requires
+ * an interpreter (i.e. not -static-pie), use ELF_ET_DYN_BASE instead of
+ * TASK_UNMAPPED_BASE for selecting the address of the executable.
+ * This provides some distance between the executable and the interpreter,
+ * which allows the initial brk to be placed immediately after the
+ * executable and also have room to grow.
+ *
+ * task_unmapped_base, elf_et_dyn_base: When the guest address space is
+ * limited via -R, the values of TASK_UNMAPPED_BASE and ELF_ET_DYN_BASE
+ * must be adjusted to fit.
+ */
+extern abi_ulong task_unmapped_base;
+extern abi_ulong elf_et_dyn_base;
+
+/*
+ * mmap_next_start: The base address for the next mmap without hint,
+ * increased after each successful map, starting at task_unmapped_base.
+ * This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT.
+ */
+extern abi_ulong mmap_next_start;
+
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, abi_ulong offset);
+ int flags, int fd, off_t offset);
int target_munmap(abi_ulong start, abi_ulong len);
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_size, unsigned long flags,
abi_ulong new_addr);
-extern unsigned long last_brk;
-extern abi_ulong mmap_next_start;
+abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice);
abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong);
void mmap_fork_start(void);
void mmap_fork_end(int child);
+abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
+ abi_ulong shmaddr, int shmflg);
+abi_long target_shmdt(abi_ulong shmaddr);
+
#endif /* LINUX_USER_USER_MMAP_H */
diff --git a/linux-user/vm86.c b/linux-user/vm86.c
index c2facf3fc2..9f512a2242 100644
--- a/linux-user/vm86.c
+++ b/linux-user/vm86.c
@@ -74,7 +74,7 @@ static inline unsigned int vm_getl(CPUX86State *env,
void save_v86_state(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
struct target_vm86plus_struct * target_v86;
if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
@@ -134,7 +134,7 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
static inline int set_IF(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
ts->v86flags |= VIF_MASK;
if (ts->v86flags & VIP_MASK) {
@@ -147,7 +147,7 @@ static inline int set_IF(CPUX86State *env)
static inline void clear_IF(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
ts->v86flags &= ~VIF_MASK;
}
@@ -165,7 +165,7 @@ static inline void clear_AC(CPUX86State *env)
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
set_flags(ts->v86flags, eflags, ts->v86mask);
set_flags(env->eflags, eflags, SAFE_MASK);
@@ -179,7 +179,7 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
set_flags(env->eflags, flags, SAFE_MASK);
@@ -193,7 +193,7 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
static inline unsigned int get_vflags(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
unsigned int flags;
flags = env->eflags & RETURN_MASK;
@@ -210,7 +210,7 @@ static inline unsigned int get_vflags(CPUX86State *env)
static void do_int(CPUX86State *env, int intno)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
uint32_t int_addr, segoffs, ssp;
unsigned int sp;
@@ -269,7 +269,7 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
void handle_vm86_fault(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
int data32, pref_done;
@@ -394,7 +394,7 @@ void handle_vm86_fault(CPUX86State *env)
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{
CPUState *cs = env_cpu(env);
- TaskState *ts = cs->opaque;
+ TaskState *ts = get_task_state(cs);
struct target_vm86plus_struct * target_v86;
int ret;
diff --git a/linux-user/x86_64/Makefile.vdso b/linux-user/x86_64/Makefile.vdso
new file mode 100644
index 0000000000..26552b66db
--- /dev/null
+++ b/linux-user/x86_64/Makefile.vdso
@@ -0,0 +1,11 @@
+include $(BUILD_DIR)/tests/tcg/x86_64-linux-user/config-target.mak
+
+SUBDIR = $(SRC_PATH)/linux-user/x86_64
+VPATH += $(SUBDIR)
+
+all: $(SUBDIR)/vdso.so
+
+$(SUBDIR)/vdso.so: vdso.S vdso.ld
+ $(CC) -o $@ -nostdlib -shared -Wl,-h,linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both \
+ -Wl,-T,$(SUBDIR)/vdso.ld $<
diff --git a/linux-user/x86_64/meson.build b/linux-user/x86_64/meson.build
index 203af9a60c..8c60da7a60 100644
--- a/linux-user/x86_64/meson.build
+++ b/linux-user/x86_64/meson.build
@@ -3,3 +3,7 @@ syscall_nr_generators += {
arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
output: '@BASENAME@_nr.h')
}
+
+vdso_inc = gen_vdso.process('vdso.so')
+
+linux_user_ss.add(when: 'TARGET_X86_64', if_true: vdso_inc)
diff --git a/linux-user/x86_64/target_elf.h b/linux-user/x86_64/target_elf.h
index 7b76a90de8..3f628f8d66 100644
--- a/linux-user/x86_64/target_elf.h
+++ b/linux-user/x86_64/target_elf.h
@@ -9,6 +9,6 @@
#define X86_64_TARGET_ELF_H
static inline const char *cpu_get_model(uint32_t eflags)
{
- return "qemu64";
+ return "max";
}
#endif
diff --git a/linux-user/x86_64/target_mman.h b/linux-user/x86_64/target_mman.h
new file mode 100644
index 0000000000..48fbf20b42
--- /dev/null
+++ b/linux-user/x86_64/target_mman.h
@@ -0,0 +1,16 @@
+/*
+ * arch/x86/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
+ * __TASK_UNMAPPED_BASE(S) PAGE_ALIGN(S / 3)
+ *
+ * arch/x86/include/asm/page_64_types.h:
+ * TASK_SIZE_LOW DEFAULT_MAP_WINDOW
+ * DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
+ */
+#define TASK_UNMAPPED_BASE \
+ TARGET_PAGE_ALIGN((1ull << TARGET_VIRT_ADDR_SPACE_BITS) / 3)
+
+/* arch/x86/include/asm/elf.h */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
+
+#include "../generic/target_mman.h"
diff --git a/linux-user/x86_64/target_prctl.h b/linux-user/x86_64/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/x86_64/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/x86_64/target_proc.h b/linux-user/x86_64/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/x86_64/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/x86_64/target_resource.h b/linux-user/x86_64/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/x86_64/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h
index 4ea74f20dd..9d9717406f 100644
--- a/linux-user/x86_64/target_signal.h
+++ b/linux-user/x86_64/target_signal.h
@@ -1,24 +1,9 @@
#ifndef X86_64_TARGET_SIGNAL_H
#define X86_64_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
+/* For x86_64, use of SA_RESTORER is mandatory. */
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
+
#endif /* X86_64_TARGET_SIGNAL_H */
diff --git a/linux-user/x86_64/target_structs.h b/linux-user/x86_64/target_structs.h
index ce367b253b..f1181383c4 100644
--- a/linux-user/x86_64/target_structs.h
+++ b/linux-user/x86_64/target_structs.h
@@ -19,41 +19,7 @@
#ifndef X86_64_TARGET_STRUCTS_H
#define X86_64_TARGET_STRUCTS_H
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_ushort mode; /* Read/write permission. */
- abi_ushort __pad1;
- abi_ushort __seq; /* Sequence number. */
- abi_ushort __pad2;
- abi_ulong __unused1;
- abi_ulong __unused2;
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused1;
-#endif
- abi_ulong shm_dtime; /* time of last shmdt() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused2;
-#endif
- abi_ulong shm_ctime; /* time of last change by shmctl() */
-#if TARGET_ABI_BITS == 32
- abi_ulong __unused3;
-#endif
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ulong shm_nattch; /* number of current attaches */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
+#include "../generic/target_structs.h"
/* The x86 definition differs from the generic one in that the
* two padding fields exist whether the ABI is 32 bits or 64 bits.
diff --git a/linux-user/x86_64/target_syscall.h b/linux-user/x86_64/target_syscall.h
index 3ecccb72be..fb558345d3 100644
--- a/linux-user/x86_64/target_syscall.h
+++ b/linux-user/x86_64/target_syscall.h
@@ -100,7 +100,6 @@ struct target_msqid64_ds {
#define TARGET_ARCH_SET_FS 0x1002
#define TARGET_ARCH_GET_FS 0x1003
#define TARGET_ARCH_GET_GS 0x1004
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/x86_64/vdso.S b/linux-user/x86_64/vdso.S
new file mode 100644
index 0000000000..47d16c00ab
--- /dev/null
+++ b/linux-user/x86_64/vdso.S
@@ -0,0 +1,78 @@
+/*
+ * x86-64 linux replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <asm/unistd.h>
+
+.macro endf name
+ .globl \name
+ .type \name, @function
+ .size \name, . - \name
+.endm
+
+.macro weakalias name
+\name = __vdso_\name
+ .weak \name
+.endm
+
+.macro vdso_syscall name, nr
+__vdso_\name:
+ mov $\nr, %eax
+ syscall
+ ret
+endf __vdso_\name
+weakalias \name
+.endm
+
+ .cfi_startproc
+
+vdso_syscall clock_gettime, __NR_clock_gettime
+vdso_syscall clock_getres, __NR_clock_getres
+vdso_syscall gettimeofday, __NR_gettimeofday
+vdso_syscall time, __NR_time
+
+__vdso_getcpu:
+ /*
+ * There is no syscall number for this allocated on x64.
+ * We can handle this several ways:
+ *
+ * (1) Invent a syscall number for use within qemu.
+ * It should be easy enough to pick a number that
+ * is well out of the way of the kernel numbers.
+ *
+ * (2) Force the emulated cpu to support the rdtscp insn,
+ * and initialize the TSC_AUX value the appropriate value.
+ *
+ * (3) Pretend that we're always running on cpu 0.
+ *
+ * This last is the one that's implemented here, with the
+ * tiny bit of extra code to support rdtscp in place.
+ */
+ xor %ecx, %ecx /* rdtscp w/ tsc_aux = 0 */
+
+ /* if (cpu != NULL) *cpu = (ecx & 0xfff); */
+ test %rdi, %rdi
+ jz 1f
+ mov %ecx, %eax
+ and $0xfff, %eax
+ mov %eax, (%rdi)
+
+ /* if (node != NULL) *node = (ecx >> 12); */
+1: test %rsi, %rsi
+ jz 2f
+ shr $12, %ecx
+ mov %ecx, (%rsi)
+
+2: xor %eax, %eax
+ ret
+endf __vdso_getcpu
+
+weakalias getcpu
+
+ .cfi_endproc
+
+/* TODO: Add elf note for LINUX_VERSION_CODE */
diff --git a/linux-user/x86_64/vdso.ld b/linux-user/x86_64/vdso.ld
new file mode 100644
index 0000000000..ca6001cc3c
--- /dev/null
+++ b/linux-user/x86_64/vdso.ld
@@ -0,0 +1,73 @@
+/*
+ * Linker script for linux x86-64 replacement vdso.
+ *
+ * Copyright 2023 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+VERSION {
+ LINUX_2.6 {
+ global:
+ clock_gettime;
+ __vdso_clock_gettime;
+ gettimeofday;
+ __vdso_gettimeofday;
+ getcpu;
+ __vdso_getcpu;
+ time;
+ __vdso_time;
+ clock_getres;
+ __vdso_clock_getres;
+
+ local: *;
+ };
+}
+
+
+PHDRS {
+ phdr PT_PHDR FLAGS(4) PHDRS;
+ load PT_LOAD FLAGS(7) FILEHDR PHDRS; /* FLAGS=RWX */
+ dynamic PT_DYNAMIC FLAGS(4);
+ eh_frame_hdr PT_GNU_EH_FRAME;
+ note PT_NOTE FLAGS(4);
+}
+
+SECTIONS {
+ . = SIZEOF_HEADERS;
+
+ /*
+ * The following, including the FILEHDRS and PHDRS, are modified
+ * when we relocate the binary. We want them to be initially
+ * writable for the relocation; we'll force them read-only after.
+ */
+ .note : { *(.note*) } :load :note
+ .dynamic : { *(.dynamic) } :load :dynamic
+ .dynsym : { *(.dynsym) } :load
+ .data : {
+ /*
+ * There ought not be any real read-write data.
+ * But since we manipulated the segment layout,
+ * we have to put these sections somewhere.
+ */
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+ }
+
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
+ .eh_frame : { *(.eh_frame) } :load
+
+ .text : { *(.text*) } :load =0x90909090
+}
diff --git a/linux-user/x86_64/vdso.so b/linux-user/x86_64/vdso.so
new file mode 100755
index 0000000000..c873d6ea58
--- /dev/null
+++ b/linux-user/x86_64/vdso.so
Binary files differ
diff --git a/linux-user/xtensa/cpu_loop.c b/linux-user/xtensa/cpu_loop.c
index 622afbcd34..d51ce05392 100644
--- a/linux-user/xtensa/cpu_loop.c
+++ b/linux-user/xtensa/cpu_loop.c
@@ -126,7 +126,6 @@ static void xtensa_underflow12(CPUXtensaState *env)
void cpu_loop(CPUXtensaState *env)
{
CPUState *cs = env_cpu(env);
- target_siginfo_t info;
abi_ulong ret;
int trapnr;
@@ -163,14 +162,12 @@ void cpu_loop(CPUXtensaState *env)
case EXC_USER:
switch (env->sregs[EXCCAUSE]) {
case ILLEGAL_INSTRUCTION_CAUSE:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC,
+ env->sregs[EPC1]);
+ break;
case PRIVILEGED_CAUSE:
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code =
- env->sregs[EXCCAUSE] == ILLEGAL_INSTRUCTION_CAUSE ?
- TARGET_ILL_ILLOPC : TARGET_ILL_PRVOPC;
- info._sifields._sigfault._addr = env->sregs[EPC1];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC,
+ env->sregs[EPC1]);
break;
case SYSCALL_CAUSE:
@@ -184,11 +181,11 @@ void cpu_loop(CPUXtensaState *env)
env->regs[2] = ret;
break;
- case -TARGET_ERESTARTSYS:
+ case -QEMU_ERESTARTSYS:
env->pc -= 3;
break;
- case -TARGET_QEMU_ESIGRETURN:
+ case -QEMU_ESIGRETURN:
break;
}
break;
@@ -219,20 +216,8 @@ void cpu_loop(CPUXtensaState *env)
break;
case INTEGER_DIVIDE_BY_ZERO_CAUSE:
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_INTDIV;
- info._sifields._sigfault._addr = env->sregs[EPC1];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- break;
-
- case LOAD_PROHIBITED_CAUSE:
- case STORE_PROHIBITED_CAUSE:
- info.si_signo = TARGET_SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SEGV_ACCERR;
- info._sifields._sigfault._addr = env->sregs[EXCVADDR];
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV,
+ env->sregs[EPC1]);
break;
default:
@@ -241,10 +226,8 @@ void cpu_loop(CPUXtensaState *env)
}
break;
case EXCP_DEBUG:
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT,
+ env->sregs[EPC1]);
break;
case EXC_DEBUG:
default:
diff --git a/linux-user/xtensa/signal.c b/linux-user/xtensa/signal.c
index 7a3bfb92ca..6514b8dd57 100644
--- a/linux-user/xtensa/signal.c
+++ b/linux-user/xtensa/signal.c
@@ -128,12 +128,38 @@ static int setup_sigcontext(struct target_rt_sigframe *frame,
return 1;
}
+static void install_sigtramp(uint8_t *tramp)
+{
+#if TARGET_BIG_ENDIAN
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ __put_user(0x22, &tramp[0]);
+ __put_user(0x0a, &tramp[1]);
+ __put_user(TARGET_NR_rt_sigreturn, &tramp[2]);
+ /* Generate instruction: SYSCALL */
+ __put_user(0x00, &tramp[3]);
+ __put_user(0x05, &tramp[4]);
+ __put_user(0x00, &tramp[5]);
+#else
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ __put_user(0x22, &tramp[0]);
+ __put_user(0xa0, &tramp[1]);
+ __put_user(TARGET_NR_rt_sigreturn, &tramp[2]);
+ /* Generate instruction: SYSCALL */
+ __put_user(0x00, &tramp[3]);
+ __put_user(0x50, &tramp[4]);
+ __put_user(0x00, &tramp[5]);
+#endif
+}
+
void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUXtensaState *env)
{
abi_ulong frame_addr;
struct target_rt_sigframe *frame;
+ int is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info);
+ abi_ulong handler = 0;
+ abi_ulong handler_fdpic_GOT = 0;
uint32_t ra;
bool abi_call0;
unsigned base;
@@ -142,12 +168,23 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
frame_addr = get_sigframe(ka, env, sizeof(*frame));
trace_user_setup_rt_frame(env, frame_addr);
+ if (is_fdpic) {
+ abi_ulong funcdesc_ptr = ka->_sa_handler;
+
+ if (get_user_ual(handler, funcdesc_ptr)
+ || get_user_ual(handler_fdpic_GOT, funcdesc_ptr + 4)) {
+ goto give_sigsegv;
+ }
+ } else {
+ handler = ka->_sa_handler;
+ }
+
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
goto give_sigsegv;
}
if (ka->sa_flags & SA_SIGINFO) {
- tswap_siginfo(&frame->info, info);
+ frame->info = *info;
}
__put_user(0, &frame->uc.tuc_flags);
@@ -162,31 +199,21 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
}
if (ka->sa_flags & TARGET_SA_RESTORER) {
- ra = ka->sa_restorer;
+ if (is_fdpic) {
+ if (get_user_ual(ra, ka->sa_restorer)) {
+ unlock_user_struct(frame, frame_addr, 0);
+ goto give_sigsegv;
+ }
+ } else {
+ ra = ka->sa_restorer;
+ }
} else {
- ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
-#ifdef TARGET_WORDS_BIGENDIAN
- /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
- __put_user(0x22, &frame->retcode[0]);
- __put_user(0x0a, &frame->retcode[1]);
- __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
- /* Generate instruction: SYSCALL */
- __put_user(0x00, &frame->retcode[3]);
- __put_user(0x05, &frame->retcode[4]);
- __put_user(0x00, &frame->retcode[5]);
-#else
- /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
- __put_user(0x22, &frame->retcode[0]);
- __put_user(0xa0, &frame->retcode[1]);
- __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
- /* Generate instruction: SYSCALL */
- __put_user(0x00, &frame->retcode[3]);
- __put_user(0x50, &frame->retcode[4]);
- __put_user(0x00, &frame->retcode[5]);
-#endif
+ /* Not used, but retain for ABI compatibility. */
+ install_sigtramp(frame->retcode);
+ ra = default_rt_sigreturn;
}
memset(env->regs, 0, sizeof(env->regs));
- env->pc = ka->_sa_handler;
+ env->pc = handler;
env->regs[1] = frame_addr;
env->sregs[WINDOW_BASE] = 0;
env->sregs[WINDOW_START] = 1;
@@ -206,6 +233,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
env->regs[base + 3] = frame_addr + offsetof(struct target_rt_sigframe,
info);
env->regs[base + 4] = frame_addr + offsetof(struct target_rt_sigframe, uc);
+ if (is_fdpic) {
+ env->regs[base + 11] = handler_fdpic_GOT;
+ }
unlock_user_struct(frame, frame_addr, 1);
return;
@@ -257,10 +287,20 @@ long do_rt_sigreturn(CPUXtensaState *env)
target_restore_altstack(&frame->uc.tuc_stack, env);
unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
+ return -QEMU_ESIGRETURN;
+}
+
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint8_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0);
+ assert(tramp != NULL);
+
+ default_rt_sigreturn = sigtramp_page;
+ install_sigtramp(tramp);
+ unlock_user(tramp, sigtramp_page, 6);
}
diff --git a/linux-user/xtensa/target_mman.h b/linux-user/xtensa/target_mman.h
new file mode 100644
index 0000000000..8fa6337a97
--- /dev/null
+++ b/linux-user/xtensa/target_mman.h
@@ -0,0 +1,29 @@
+#ifndef XTENSA_TARGET_MMAN_H
+#define XTENSA_TARGET_MMAN_H
+
+#define TARGET_PROT_SEM 0x10
+
+#define TARGET_MAP_NORESERVE 0x0400
+#define TARGET_MAP_ANONYMOUS 0x0800
+#define TARGET_MAP_GROWSDOWN 0x1000
+#define TARGET_MAP_DENYWRITE 0x2000
+#define TARGET_MAP_EXECUTABLE 0x4000
+#define TARGET_MAP_LOCKED 0x8000
+#define TARGET_MAP_POPULATE 0x10000
+#define TARGET_MAP_NONBLOCK 0x20000
+#define TARGET_MAP_STACK 0x40000
+#define TARGET_MAP_HUGETLB 0x80000
+
+/*
+ * arch/xtensa/include/asm/processor.h:
+ * TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+ */
+#define TASK_UNMAPPED_BASE (1u << (TARGET_VIRT_ADDR_SPACE_BITS - 1))
+
+/* arch/xtensa/include/asm/elf.h */
+#define ELF_ET_DYN_BASE \
+ TARGET_PAGE_ALIGN((1u << TARGET_VIRT_ADDR_SPACE_BITS) / 3)
+
+#include "../generic/target_mman.h"
+
+#endif
diff --git a/linux-user/xtensa/target_prctl.h b/linux-user/xtensa/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/xtensa/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/xtensa/target_proc.h b/linux-user/xtensa/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/xtensa/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/xtensa/target_resource.h b/linux-user/xtensa/target_resource.h
new file mode 100644
index 0000000000..227259594c
--- /dev/null
+++ b/linux-user/xtensa/target_resource.h
@@ -0,0 +1 @@
+#include "../generic/target_resource.h"
diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h
index c60bf656f6..e4b1bea5cb 100644
--- a/linux-user/xtensa/target_signal.h
+++ b/linux-user/xtensa/target_signal.h
@@ -1,23 +1,8 @@
#ifndef XTENSA_TARGET_SIGNAL_H
#define XTENSA_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
+
#endif
diff --git a/linux-user/xtensa/target_structs.h b/linux-user/xtensa/target_structs.h
index 9cde6844b8..cb1b3411cf 100644
--- a/linux-user/xtensa/target_structs.h
+++ b/linux-user/xtensa/target_structs.h
@@ -15,7 +15,7 @@ struct target_ipc_perm {
struct target_semid64_ds {
struct target_ipc_perm sem_perm;
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
abi_ulong __unused1;
abi_ulong sem_otime;
abi_ulong __unused2;