aboutsummaryrefslogtreecommitdiff
path: root/linux-user
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-02-12 10:48:43 -0800
committerPeter Maydell <peter.maydell@linaro.org>2021-02-16 11:04:53 +0000
commit3e8f1628e864201692aa28996f8f64f9761555af (patch)
tree9ab5b2b6687bac3398e021b610fa728c2081dc28 /linux-user
parent141a56d844e0b57d46026c2913179c5ac05e6010 (diff)
exec: Use cpu_untagged_addr in g2h; split out g2h_untagged
Use g2h_untagged in contexts that have no cpu, e.g. the binary loaders that operate before the primary cpu is created. As a colollary, target_mmap and friends must use untagged addresses, since they are used by the loaders. Use g2h_untagged on values returned from target_mmap, as the kernel never applies a tag itself. Use g2h_untagged on all pc values. The only current user of tags, aarch64, removes tags from code addresses upon branch, so "pc" is always untagged. Use g2h with the cpu context on hand wherever possible. Use g2h_untagged in lock_user, which will be updated soon. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210212184902.1251044-13-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'linux-user')
-rw-r--r--linux-user/elfload.c12
-rw-r--r--linux-user/flatload.c2
-rw-r--r--linux-user/hppa/cpu_loop.c31
-rw-r--r--linux-user/i386/cpu_loop.c4
-rw-r--r--linux-user/mmap.c45
-rw-r--r--linux-user/ppc/signal.c4
-rw-r--r--linux-user/qemu.h6
-rw-r--r--linux-user/syscall.c72
8 files changed, 92 insertions, 84 deletions
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 7ec9249c25..902be3ff11 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -389,7 +389,7 @@ enum {
static bool init_guest_commpage(void)
{
- void *want = g2h(ARM_COMMPAGE & -qemu_host_page_size);
+ void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size);
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
@@ -402,7 +402,7 @@ static bool init_guest_commpage(void)
}
/* Set kernel helper versions; rest of page is 0. */
- __put_user(5, (uint32_t *)g2h(0xffff0ffcu));
+ __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
perror("Protecting guest commpage");
@@ -1872,8 +1872,8 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
here is still actually needed. For now, continue with it,
but merge it with the "normal" mmap that would allocate the bss. */
- host_start = (uintptr_t) g2h(elf_bss);
- host_end = (uintptr_t) g2h(last_bss);
+ host_start = (uintptr_t) g2h_untagged(elf_bss);
+ host_end = (uintptr_t) g2h_untagged(last_bss);
host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
if (host_map_start < host_end) {
@@ -2171,7 +2171,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
}
/* Reserve the address space for the binary, or reserved_va. */
- test = g2h(guest_loaddr);
+ test = g2h_untagged(guest_loaddr);
addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
if (test != addr) {
pgb_fail_in_use(image_name);
@@ -2393,7 +2393,7 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
/* Reserve the memory on the host. */
assert(guest_base != 0);
- test = g2h(0);
+ test = g2h_untagged(0);
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED || addr != test) {
error_report("Unable to reserve 0x%lx bytes of virtual address "
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
index 14d2999d15..3e5594cf89 100644
--- a/linux-user/flatload.c
+++ b/linux-user/flatload.c
@@ -668,7 +668,7 @@ static int load_flat_file(struct linux_binprm * bprm,
}
/* zero the BSS. */
- memset(g2h(datapos + data_len), 0, bss_len);
+ memset(g2h_untagged(datapos + data_len), 0, bss_len);
return 0;
}
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
index d7e1ec7722..944511bbe4 100644
--- a/linux-user/hppa/cpu_loop.c
+++ b/linux-user/hppa/cpu_loop.c
@@ -23,6 +23,7 @@
static abi_ulong hppa_lws(CPUHPPAState *env)
{
+ CPUState *cs = env_cpu(env);
uint32_t which = env->gr[20];
abi_ulong addr = env->gr[26];
abi_ulong old = env->gr[25];
@@ -39,7 +40,7 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
}
old = tswap32(old);
new = tswap32(new);
- ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new);
+ ret = qatomic_cmpxchg((uint32_t *)g2h(cs, addr), old, new);
ret = tswap32(ret);
break;
@@ -58,38 +59,38 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
can be host-endian as well. */
switch (size) {
case 0:
- old = *(uint8_t *)g2h(old);
- new = *(uint8_t *)g2h(new);
- ret = qatomic_cmpxchg((uint8_t *)g2h(addr), old, new);
+ old = *(uint8_t *)g2h(cs, old);
+ new = *(uint8_t *)g2h(cs, new);
+ ret = qatomic_cmpxchg((uint8_t *)g2h(cs, addr), old, new);
ret = ret != old;
break;
case 1:
- old = *(uint16_t *)g2h(old);
- new = *(uint16_t *)g2h(new);
- ret = qatomic_cmpxchg((uint16_t *)g2h(addr), old, new);
+ old = *(uint16_t *)g2h(cs, old);
+ new = *(uint16_t *)g2h(cs, new);
+ ret = qatomic_cmpxchg((uint16_t *)g2h(cs, addr), old, new);
ret = ret != old;
break;
case 2:
- old = *(uint32_t *)g2h(old);
- new = *(uint32_t *)g2h(new);
- ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new);
+ old = *(uint32_t *)g2h(cs, old);
+ new = *(uint32_t *)g2h(cs, new);
+ ret = qatomic_cmpxchg((uint32_t *)g2h(cs, addr), old, new);
ret = ret != old;
break;
case 3:
{
uint64_t o64, n64, r64;
- o64 = *(uint64_t *)g2h(old);
- n64 = *(uint64_t *)g2h(new);
+ o64 = *(uint64_t *)g2h(cs, old);
+ n64 = *(uint64_t *)g2h(cs, new);
#ifdef CONFIG_ATOMIC64
- r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(addr),
+ r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(cs, addr),
o64, n64);
ret = r64 != o64;
#else
start_exclusive();
- r64 = *(uint64_t *)g2h(addr);
+ r64 = *(uint64_t *)g2h(cs, addr);
ret = 1;
if (r64 == o64) {
- *(uint64_t *)g2h(addr) = n64;
+ *(uint64_t *)g2h(cs, addr) = n64;
ret = 0;
}
end_exclusive();
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
index 70cde417e6..19c8a18cd3 100644
--- a/linux-user/i386/cpu_loop.c
+++ b/linux-user/i386/cpu_loop.c
@@ -379,7 +379,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
- idt_table = g2h(env->idt.base);
+ idt_table = g2h_untagged(env->idt.base);
set_idt(0, 0);
set_idt(1, 0);
set_idt(2, 0);
@@ -409,7 +409,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
- gdt_table = g2h(env->gdt.base);
+ gdt_table = g2h_untagged(env->gdt.base);
#ifdef TARGET_ABI32
write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index c52b60482e..6decfec68a 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -141,7 +141,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
}
end = host_end;
}
- ret = mprotect(g2h(host_start), qemu_host_page_size,
+ ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
prot1 & PAGE_BITS);
if (ret != 0) {
goto error;
@@ -153,7 +153,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
- ret = mprotect(g2h(host_end - qemu_host_page_size),
+ ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
qemu_host_page_size, prot1 & PAGE_BITS);
if (ret != 0) {
goto error;
@@ -163,7 +163,8 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
/* handle the pages in the middle */
if (host_start < host_end) {
- ret = mprotect(g2h(host_start), host_end - host_start, host_prot);
+ ret = mprotect(g2h_untagged(host_start),
+ host_end - host_start, host_prot);
if (ret != 0) {
goto error;
}
@@ -186,7 +187,7 @@ static int mmap_frag(abi_ulong real_start,
int prot1, prot_new;
real_end = real_start + qemu_host_page_size;
- host_start = g2h(real_start);
+ host_start = g2h_untagged(real_start);
/* get the protection of the target pages outside the mapping */
prot1 = 0;
@@ -218,7 +219,7 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
- if (pread(fd, g2h(start), end - start, offset) == -1)
+ if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
return -1;
/* put final protection */
@@ -229,7 +230,7 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot_new);
}
if (prot_new & PROT_WRITE) {
- memset(g2h(start), 0, end - start);
+ memset(g2h_untagged(start), 0, end - start);
}
}
return 0;
@@ -338,7 +339,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
* - mremap() with MREMAP_FIXED flag
* - shmat() with SHM_REMAP flag
*/
- ptr = mmap(g2h(addr), size, PROT_NONE,
+ ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */
@@ -497,7 +498,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
/* Note: we prefer to control the mapping address. It is
especially important if qemu_host_page_size >
qemu_real_host_page_size */
- p = mmap(g2h(start), host_len, host_prot,
+ p = mmap(g2h_untagged(start), host_len, host_prot,
flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
goto fail;
@@ -505,10 +506,10 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
/* update start so that it points to the file position at 'offset' */
host_start = (unsigned long)p;
if (!(flags & MAP_ANONYMOUS)) {
- p = mmap(g2h(start), len, host_prot,
+ p = mmap(g2h_untagged(start), len, host_prot,
flags | MAP_FIXED, fd, host_offset);
if (p == MAP_FAILED) {
- munmap(g2h(start), host_len);
+ munmap(g2h_untagged(start), host_len);
goto fail;
}
host_start += offset - host_offset;
@@ -548,7 +549,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
-1, 0);
if (retaddr == -1)
goto fail;
- if (pread(fd, g2h(start), len, offset) == -1)
+ if (pread(fd, g2h_untagged(start), len, offset) == -1)
goto fail;
if (!(host_prot & PROT_WRITE)) {
ret = target_mprotect(start, len, target_prot);
@@ -592,7 +593,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
offset1 = 0;
else
offset1 = offset + real_start - start;
- p = mmap(g2h(real_start), real_end - real_start,
+ p = mmap(g2h_untagged(real_start), real_end - real_start,
host_prot, flags, fd, offset1);
if (p == MAP_FAILED)
goto fail;
@@ -652,7 +653,7 @@ static void mmap_reserve(abi_ulong start, abi_ulong size)
real_end -= qemu_host_page_size;
}
if (real_start != real_end) {
- mmap(g2h(real_start), real_end - real_start, PROT_NONE,
+ mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
-1, 0);
}
@@ -707,7 +708,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (reserved_va) {
mmap_reserve(real_start, real_end - real_start);
} else {
- ret = munmap(g2h(real_start), real_end - real_start);
+ ret = munmap(g2h_untagged(real_start), real_end - real_start);
}
}
@@ -738,8 +739,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
mmap_lock();
if (flags & MREMAP_FIXED) {
- host_addr = mremap(g2h(old_addr), old_size, new_size,
- flags, g2h(new_addr));
+ host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
+ flags, g2h_untagged(new_addr));
if (reserved_va && host_addr != MAP_FAILED) {
/* If new and old addresses overlap then the above mremap will
@@ -755,8 +756,9 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
errno = ENOMEM;
host_addr = MAP_FAILED;
} else {
- host_addr = mremap(g2h(old_addr), old_size, new_size,
- flags | MREMAP_FIXED, g2h(mmap_start));
+ host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
+ flags | MREMAP_FIXED,
+ g2h_untagged(mmap_start));
if (reserved_va) {
mmap_reserve(old_addr, old_size);
}
@@ -772,14 +774,15 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
}
}
if (prot == 0) {
- host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
+ host_addr = mremap(g2h_untagged(old_addr),
+ old_size, new_size, flags);
if (host_addr != MAP_FAILED) {
/* Check if address fits target address space */
if (!guest_range_valid(h2g(host_addr), new_size)) {
/* Revert mremap() changes */
- host_addr = mremap(g2h(old_addr), new_size, old_size,
- flags);
+ host_addr = mremap(g2h_untagged(old_addr),
+ new_size, old_size, flags);
errno = ENOMEM;
host_addr = MAP_FAILED;
} else if (reserved_va && old_size > new_size) {
diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c
index 20a02c197c..b78613f7c8 100644
--- a/linux-user/ppc/signal.c
+++ b/linux-user/ppc/signal.c
@@ -365,7 +365,7 @@ static void restore_user_regs(CPUPPCState *env,
uint64_t v_addr;
/* 64-bit needs to recover the pointer to the vectors from the frame */
__get_user(v_addr, &frame->v_regs);
- v_regs = g2h(v_addr);
+ v_regs = g2h(env_cpu(env), v_addr);
#else
v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
#endif
@@ -552,7 +552,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
if (get_ppc64_abi(image) < 2) {
/* ELFv1 PPC64 function pointers are pointers to OPD entries. */
struct target_func_ptr *handler =
- (struct target_func_ptr *)g2h(ka->_sa_handler);
+ (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler);
env->nip = tswapl(handler->entry);
env->gpr[2] = tswapl(handler->toc);
} else {
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 9251337daf..9fbc5edc4b 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -652,7 +652,7 @@ static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy
return addr;
}
#else
- return g2h(guest_addr);
+ return g2h_untagged(guest_addr);
#endif
}
@@ -666,10 +666,10 @@ static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
#ifdef DEBUG_REMAP
if (!host_ptr)
return;
- if (host_ptr == g2h(guest_addr))
+ if (host_ptr == g2h_untagged(guest_addr))
return;
if (len > 0)
- memcpy(g2h(guest_addr), host_ptr, len);
+ memcpy(g2h_untagged(guest_addr), host_ptr, len);
g_free(host_ptr);
#endif
}
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 0c2d660bc4..e7fd99f1ac 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -912,7 +912,7 @@ abi_long do_brk(abi_ulong new_brk)
/* Heap contents are initialized to zero, as for anonymous
* mapped pages. */
if (new_brk > target_brk) {
- memset(g2h(target_brk), 0, new_brk - target_brk);
+ memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
}
target_brk = new_brk;
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
@@ -938,7 +938,7 @@ abi_long do_brk(abi_ulong new_brk)
* come from the remaining part of the previous page: it may
* contains garbage data due to a previous heap usage (grown
* then shrunken). */
- memset(g2h(target_brk), 0, brk_page - target_brk);
+ memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
target_brk = new_brk;
brk_page = HOST_PAGE_ALIGN(target_brk);
@@ -4622,7 +4622,7 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
mmap_lock();
if (shmaddr)
- host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
+ host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
else {
abi_ulong mmap_start;
@@ -4633,7 +4633,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
errno = ENOMEM;
host_raddr = (void *)-1;
} else
- host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
+ host_raddr = shmat(shmid, g2h_untagged(mmap_start),
+ shmflg | SHM_REMAP);
}
if (host_raddr == (void *)-1) {
@@ -4674,7 +4675,7 @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
break;
}
}
- rv = get_errno(shmdt(g2h(shmaddr)));
+ rv = get_errno(shmdt(g2h_untagged(shmaddr)));
mmap_unlock();
@@ -6145,10 +6146,10 @@ static abi_long write_ldt(CPUX86State *env,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (env->ldt.base == -1)
return -TARGET_ENOMEM;
- memset(g2h(env->ldt.base), 0,
+ memset(g2h_untagged(env->ldt.base), 0,
TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
env->ldt.limit = 0xffff;
- ldt_table = g2h(env->ldt.base);
+ ldt_table = g2h_untagged(env->ldt.base);
}
/* NOTE: same code as Linux kernel */
@@ -6216,7 +6217,7 @@ static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
#if defined(TARGET_ABI32)
abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
{
- uint64_t *gdt_table = g2h(env->gdt.base);
+ uint64_t *gdt_table = g2h_untagged(env->gdt.base);
struct target_modify_ldt_ldt_s ldt_info;
struct target_modify_ldt_ldt_s *target_ldt_info;
int seg_32bit, contents, read_exec_only, limit_in_pages;
@@ -6302,7 +6303,7 @@ install:
static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
{
struct target_modify_ldt_ldt_s *target_ldt_info;
- uint64_t *gdt_table = g2h(env->gdt.base);
+ uint64_t *gdt_table = g2h_untagged(env->gdt.base);
uint32_t base_addr, limit, flags;
int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
int seg_not_present, useable, lm;
@@ -7597,8 +7598,8 @@ static int do_safe_futex(int *uaddr, int op, int val,
tricky. However they're probably useless because guest atomic
operations won't work either. */
#if defined(TARGET_NR_futex)
-static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
- target_ulong uaddr2, int val3)
+static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
+ target_ulong timeout, target_ulong uaddr2, int val3)
{
struct timespec ts, *pts;
int base_op;
@@ -7619,11 +7620,14 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
} else {
pts = NULL;
}
- return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
+ return do_safe_futex(g2h(cpu, uaddr),
+ op, tswap32(val), pts, NULL, val3);
case FUTEX_WAKE:
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
+ return do_safe_futex(g2h(cpu, uaddr),
+ op, val, NULL, NULL, 0);
case FUTEX_FD:
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
+ return do_safe_futex(g2h(cpu, uaddr),
+ op, val, NULL, NULL, 0);
case FUTEX_REQUEUE:
case FUTEX_CMP_REQUEUE:
case FUTEX_WAKE_OP:
@@ -7633,10 +7637,9 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
to satisfy the compiler. We do not need to tswap TIMEOUT
since it's not compared to guest memory. */
pts = (struct timespec *)(uintptr_t) timeout;
- return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
+ return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
(base_op == FUTEX_CMP_REQUEUE
- ? tswap32(val3)
- : val3));
+ ? tswap32(val3) : val3));
default:
return -TARGET_ENOSYS;
}
@@ -7644,7 +7647,8 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
#endif
#if defined(TARGET_NR_futex_time64)
-static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
+static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
+ int val, target_ulong timeout,
target_ulong uaddr2, int val3)
{
struct timespec ts, *pts;
@@ -7668,11 +7672,12 @@ static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong tim
} else {
pts = NULL;
}
- return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
+ return do_safe_futex(g2h(cpu, uaddr), op,
+ tswap32(val), pts, NULL, val3);
case FUTEX_WAKE:
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
+ return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
case FUTEX_FD:
- return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
+ return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
case FUTEX_REQUEUE:
case FUTEX_CMP_REQUEUE:
case FUTEX_WAKE_OP:
@@ -7682,10 +7687,9 @@ static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong tim
to satisfy the compiler. We do not need to tswap TIMEOUT
since it's not compared to guest memory. */
pts = (struct timespec *)(uintptr_t) timeout;
- return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
+ return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
(base_op == FUTEX_CMP_REQUEUE
- ? tswap32(val3)
- : val3));
+ ? tswap32(val3) : val3));
default:
return -TARGET_ENOSYS;
}
@@ -7860,7 +7864,7 @@ static int open_self_maps(void *cpu_env, int fd)
const char *path;
max = h2g_valid(max - 1) ?
- max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
+ max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
if (page_check_range(h2g(min), max - min, flags) == -1) {
continue;
@@ -8277,8 +8281,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr);
- do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
- NULL, NULL, 0);
+ do_sys_futex(g2h(cpu, ts->child_tidptr),
+ FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
}
thread_cpu = NULL;
g_free(ts);
@@ -8643,7 +8647,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (!arg5) {
ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
} else {
- ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
+ ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
}
ret = get_errno(ret);
@@ -9738,15 +9742,15 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
/* ??? msync/mlock/munlock are broken for softmmu. */
#ifdef TARGET_NR_msync
case TARGET_NR_msync:
- return get_errno(msync(g2h(arg1), arg2, arg3));
+ return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
#endif
#ifdef TARGET_NR_mlock
case TARGET_NR_mlock:
- return get_errno(mlock(g2h(arg1), arg2));
+ return get_errno(mlock(g2h(cpu, arg1), arg2));
#endif
#ifdef TARGET_NR_munlock
case TARGET_NR_munlock:
- return get_errno(munlock(g2h(arg1), arg2));
+ return get_errno(munlock(g2h(cpu, arg1), arg2));
#endif
#ifdef TARGET_NR_mlockall
case TARGET_NR_mlockall:
@@ -12237,7 +12241,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
case TARGET_NR_set_tid_address:
- return get_errno(set_tid_address((int *)g2h(arg1)));
+ return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
#endif
case TARGET_NR_tkill:
@@ -12324,11 +12328,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_futex
case TARGET_NR_futex:
- return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
+ return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
#endif
#ifdef TARGET_NR_futex_time64
case TARGET_NR_futex_time64:
- return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
+ return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
#endif
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
case TARGET_NR_inotify_init: