aboutsummaryrefslogtreecommitdiff
path: root/include/exec
diff options
context:
space:
mode:
Diffstat (limited to 'include/exec')
-rw-r--r--include/exec/abi_ptr.h33
-rw-r--r--include/exec/address-spaces.h2
-rw-r--r--include/exec/breakpoint.h30
-rw-r--r--include/exec/confidential-guest-support.h34
-rw-r--r--include/exec/cpu-all.h333
-rw-r--r--include/exec/cpu-common.h176
-rw-r--r--include/exec/cpu-defs.h170
-rw-r--r--include/exec/cpu_ldst.h366
-rw-r--r--include/exec/cputlb.h6
-rw-r--r--include/exec/exec-all.h504
-rw-r--r--include/exec/gdbstub.h252
-rw-r--r--include/exec/gen-icount.h81
-rw-r--r--include/exec/helper-gen-common.h18
-rw-r--r--include/exec/helper-gen.h99
-rw-r--r--include/exec/helper-gen.h.inc102
-rw-r--r--include/exec/helper-head.h.inc (renamed from include/exec/helper-head.h)56
-rw-r--r--include/exec/helper-info.c.inc96
-rw-r--r--include/exec/helper-proto-common.h20
-rw-r--r--include/exec/helper-proto.h58
-rw-r--r--include/exec/helper-proto.h.inc68
-rw-r--r--include/exec/helper-tcg.h76
-rw-r--r--include/exec/hwaddr.h2
-rw-r--r--include/exec/ioport.h4
-rw-r--r--include/exec/log.h52
-rw-r--r--include/exec/memattrs.h30
-rw-r--r--include/exec/memop.h87
-rw-r--r--include/exec/memopidx.h55
-rw-r--r--include/exec/memory-internal.h4
-rw-r--r--include/exec/memory.h337
-rw-r--r--include/exec/memory_ldst.h.inc42
-rw-r--r--include/exec/mmu-access-type.h18
-rw-r--r--include/exec/page-vary.h22
-rw-r--r--include/exec/plugin-gen.h30
-rw-r--r--include/exec/poison.h14
-rw-r--r--include/exec/ram_addr.h74
-rw-r--r--include/exec/ramblock.h24
-rw-r--r--include/exec/ramlist.h2
-rw-r--r--include/exec/replay-core.h80
-rw-r--r--include/exec/softmmu-semi.h101
-rw-r--r--include/exec/target_long.h44
-rw-r--r--include/exec/target_page.h2
-rw-r--r--include/exec/tb-flush.h28
-rw-r--r--include/exec/tlb-common.h56
-rw-r--r--include/exec/translate-all.h7
-rw-r--r--include/exec/translation-block.h154
-rw-r--r--include/exec/translator.h148
-rw-r--r--include/exec/tswap.h82
-rw-r--r--include/exec/user/abitypes.h20
-rw-r--r--include/exec/user/thunk.h18
-rw-r--r--include/exec/vaddr.h18
50 files changed, 2385 insertions, 1750 deletions
diff --git a/include/exec/abi_ptr.h b/include/exec/abi_ptr.h
new file mode 100644
index 0000000000..2aedcceb0c
--- /dev/null
+++ b/include/exec/abi_ptr.h
@@ -0,0 +1,33 @@
+/*
+ * QEMU abi_ptr type definitions
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef EXEC_ABI_PTR_H
+#define EXEC_ABI_PTR_H
+
+#include "cpu-param.h"
+
+#if defined(CONFIG_USER_ONLY)
+/*
+ * sparc32plus has 64bit long but 32bit space address
+ * this can make bad result with g2h() and h2g()
+ */
+#if TARGET_VIRT_ADDR_SPACE_BITS <= 32
+typedef uint32_t abi_ptr;
+#define TARGET_ABI_FMT_ptr "%x"
+#else
+typedef uint64_t abi_ptr;
+#define TARGET_ABI_FMT_ptr "%"PRIx64
+#endif
+
+#else /* !CONFIG_USER_ONLY */
+
+#include "exec/target_long.h"
+
+typedef target_ulong abi_ptr;
+#define TARGET_ABI_FMT_ptr TARGET_FMT_lx
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
index db8bfa9a92..0d0aa61d68 100644
--- a/include/exec/address-spaces.h
+++ b/include/exec/address-spaces.h
@@ -19,8 +19,6 @@
* you're one of them.
*/
-#include "exec/memory.h"
-
#ifndef CONFIG_USER_ONLY
/* Get the root memory region. This interface should only be used temporarily
diff --git a/include/exec/breakpoint.h b/include/exec/breakpoint.h
new file mode 100644
index 0000000000..95f0482e6d
--- /dev/null
+++ b/include/exec/breakpoint.h
@@ -0,0 +1,30 @@
+/*
+ * QEMU breakpoint & watchpoint definitions
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef EXEC_BREAKPOINT_H
+#define EXEC_BREAKPOINT_H
+
+#include "qemu/queue.h"
+#include "exec/vaddr.h"
+#include "exec/memattrs.h"
+
+typedef struct CPUBreakpoint {
+ vaddr pc;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUBreakpoint) entry;
+} CPUBreakpoint;
+
+typedef struct CPUWatchpoint {
+ vaddr vaddr;
+ vaddr len;
+ vaddr hitaddr;
+ MemTxAttrs hitattrs;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUWatchpoint) entry;
+} CPUWatchpoint;
+
+#endif
diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h
index ba2dd4b5df..e5b188cffb 100644
--- a/include/exec/confidential-guest-support.h
+++ b/include/exec/confidential-guest-support.h
@@ -23,7 +23,10 @@
#include "qom/object.h"
#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support"
-OBJECT_DECLARE_SIMPLE_TYPE(ConfidentialGuestSupport, CONFIDENTIAL_GUEST_SUPPORT)
+OBJECT_DECLARE_TYPE(ConfidentialGuestSupport,
+ ConfidentialGuestSupportClass,
+ CONFIDENTIAL_GUEST_SUPPORT)
+
struct ConfidentialGuestSupport {
Object parent;
@@ -55,8 +58,37 @@ struct ConfidentialGuestSupport {
typedef struct ConfidentialGuestSupportClass {
ObjectClass parent;
+
+ int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
+ int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp);
} ConfidentialGuestSupportClass;
+static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_init) {
+ return klass->kvm_init(cgs, errp);
+ }
+
+ return 0;
+}
+
+static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_reset) {
+ return klass->kvm_reset(cgs, errp);
+ }
+
+ return 0;
+}
+
#endif /* !CONFIG_USER_ONLY */
#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 32cfb634c6..e75ec13cd0 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -21,106 +21,25 @@
#include "exec/cpu-common.h"
#include "exec/memory.h"
-#include "qemu/thread.h"
+#include "exec/tswap.h"
#include "hw/core/cpu.h"
-#include "qemu/rcu.h"
-
-#define EXCP_INTERRUPT 0x10000 /* async interruption */
-#define EXCP_HLT 0x10001 /* hlt instruction reached */
-#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
-#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
-#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
/* some important defines:
*
- * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * HOST_BIG_ENDIAN : whether the host cpu is big endian and
* otherwise little endian.
*
- * TARGET_WORDS_BIGENDIAN : same for target cpu
+ * TARGET_BIG_ENDIAN : same for the target cpu
*/
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
#define BSWAP_NEEDED
#endif
-#ifdef BSWAP_NEEDED
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return bswap16(s);
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return bswap32(s);
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return bswap64(s);
-}
-
-static inline void tswap16s(uint16_t *s)
-{
- *s = bswap16(*s);
-}
-
-static inline void tswap32s(uint32_t *s)
-{
- *s = bswap32(*s);
-}
-
-static inline void tswap64s(uint64_t *s)
-{
- *s = bswap64(*s);
-}
-
-#else
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return s;
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return s;
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return s;
-}
-
-static inline void tswap16s(uint16_t *s)
-{
-}
-
-static inline void tswap32s(uint32_t *s)
-{
-}
-
-static inline void tswap64s(uint64_t *s)
-{
-}
-
-#endif
-
-#if TARGET_LONG_SIZE == 4
-#define tswapl(s) tswap32(s)
-#define tswapls(s) tswap32s((uint32_t *)(s))
-#define bswaptls(s) bswap32s(s)
-#else
-#define tswapl(s) tswap64(s)
-#define tswapls(s) tswap64s((uint64_t *)(s))
-#define bswaptls(s) bswap64s(s)
-#endif
-
/* Target-endianness CPU memory access functions. These fit into the
* {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
*/
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
#define ldl_p(p) ldl_be_p(p)
@@ -147,11 +66,14 @@ static inline void tswap64s(uint64_t *s)
#if defined(CONFIG_USER_ONLY)
#include "exec/user/abitypes.h"
-/* On some host systems the guest address space is reserved on the host.
- * This allows the guest address space to be offset to a convenient location.
+/*
+ * If non-zero, the guest virtual address space is a contiguous subset
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
+ * either from the command-line or by default. The value is the last
+ * byte of the guest address space e.g. UINT32_MAX.
+ *
+ * If zero, the host and guest virtual address spaces are intermingled.
*/
-extern uintptr_t guest_base;
-extern bool have_guest_base;
extern unsigned long reserved_va;
/*
@@ -171,7 +93,7 @@ extern unsigned long reserved_va;
#define GUEST_ADDR_MAX_ \
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
UINT32_MAX : ~0ul)
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
#else
@@ -234,42 +156,15 @@ extern const TargetPageBits target_page;
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
-/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
- * when intptr_t is 32-bit and we are aligning a long long.
- */
-extern uintptr_t qemu_host_page_size;
-extern intptr_t qemu_host_page_mask;
-
-#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
-#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
-
-/* same as PROT_xxx */
-#define PAGE_READ 0x0001
-#define PAGE_WRITE 0x0002
-#define PAGE_EXEC 0x0004
-#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
-#define PAGE_VALID 0x0008
-/*
- * Original state of the write flag (used when tracking self-modifying code)
- */
-#define PAGE_WRITE_ORG 0x0010
-/*
- * Invalidate the TLB entry immediately, helpful for s390x
- * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
- */
-#define PAGE_WRITE_INV 0x0020
-/* For use with page_set_flags: page is being replaced; target_data cleared. */
-#define PAGE_RESET 0x0040
-/* For linux-user, indicates that the page is MAP_ANON. */
-#define PAGE_ANON 0x0080
-
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away. */
#define PAGE_RESERVED 0x0100
#endif
-/* Target-specific bits that will be used via page_get_flags(). */
-#define PAGE_TARGET_1 0x0200
-#define PAGE_TARGET_2 0x0400
+/*
+ * For linux-user, indicates that the page is mapped with the same semantics
+ * in both guest and host.
+ */
+#define PAGE_PASSTHROUGH 0x0800
#if defined(CONFIG_USER_ONLY)
void page_dump(FILE *f);
@@ -279,32 +174,61 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
int walk_memory_regions(void *, walk_memory_regions_fn);
int page_get_flags(target_ulong address);
-void page_set_flags(target_ulong start, target_ulong end, int flags);
-int page_check_range(target_ulong start, target_ulong len, int flags);
+void page_set_flags(target_ulong start, target_ulong last, int flags);
+void page_reset_target_data(target_ulong start, target_ulong last);
/**
- * page_alloc_target_data(address, size)
- * @address: guest virtual address
- * @size: size of data to allocate
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
*
- * Allocate @size bytes of out-of-band data to associate with the
- * guest page at @address. If the page is not mapped, NULL will
- * be returned. If there is existing data associated with @address,
- * no new memory will be allocated.
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(target_ulong start, target_ulong last, int flags);
+
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ * Context: holding mmap lock
*
- * The memory will be freed when the guest page is deallocated,
- * e.g. with the munmap system call.
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held so that the caller will can ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(target_ulong start, target_ulong last);
+
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x. Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
*/
-void *page_alloc_target_data(target_ulong address, size_t size);
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align);
/**
* page_get_target_data(address)
* @address: guest virtual address
*
- * Return any out-of-bound memory assocated with the guest page
- * at @address, as per page_alloc_target_data.
+ * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
+ * with the guest page at @address, allocating it if necessary. The
+ * caller should already have verified that the address is valid.
+ *
+ * The memory will be freed when the guest page is deallocated,
+ * e.g. with the munmap system call.
*/
-void *page_get_target_data(target_ulong address);
+void *page_get_target_data(target_ulong address)
+ __attribute__((returns_nonnull));
#endif
CPUArchState *cpu_copy(CPUArchState *env);
@@ -369,9 +293,13 @@ CPUArchState *cpu_copy(CPUArchState *env);
* be signaled by probe_access_flags().
*/
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-#define TLB_MMIO 0
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
#define TLB_WATCHPOINT 0
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return MMU_USER_IDX;
+}
#else
/*
@@ -382,6 +310,9 @@ CPUArchState *cpu_copy(CPUArchState *env);
*
* Use TARGET_PAGE_BITS_MIN so that these bits are constant
* when TARGET_PAGE_BITS_VARY is in effect.
+ *
+ * The count, if not the placement of these bits is known
+ * to tcg/tcg-op-ldst.c, check_max_alignment().
*/
/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
@@ -390,19 +321,34 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
-/* Set if TLB entry contains a watchpoint. */
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
-/* Set if TLB entry requires byte swap. */
-#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Set if TLB entry writes ignored. */
-#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
-/* Use this mask to check interception with an alignment mask
+/*
+ * Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+
+/*
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
+ */
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << 0)
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
+
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
+
+/* The two sets of flags must not overlap. */
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
@@ -411,7 +357,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
* @addr: virtual address to test (must be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
-static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
+static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
{
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
}
@@ -422,99 +368,16 @@ static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
* @addr: virtual address to test (need not be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
-static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
+static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
{
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
}
-#ifdef CONFIG_TCG
-/* accel/tcg/cpu-exec.c */
-void dump_drift_info(void);
-/* accel/tcg/translate-all.c */
-void dump_exec_info(void);
-void dump_opcount_info(void);
-#endif /* CONFIG_TCG */
-
#endif /* !CONFIG_USER_ONLY */
-#ifdef CONFIG_TCG
-/* accel/tcg/cpu-exec.c */
-int cpu_exec(CPUState *cpu);
-void tcg_exec_realizefn(CPUState *cpu, Error **errp);
-void tcg_exec_unrealizefn(CPUState *cpu);
-#endif /* CONFIG_TCG */
-
-/* Returns: 0 on success, -1 on error */
-int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- void *ptr, target_ulong len, bool is_write);
-
-/**
- * cpu_set_cpustate_pointers(cpu)
- * @cpu: The cpu object
- *
- * Set the generic pointers in CPUState into the outer object.
- */
-static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
-{
- cpu->parent_obj.env_ptr = &cpu->env;
- cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
-}
-
-/**
- * env_archcpu(env)
- * @env: The architecture environment
- *
- * Return the ArchCPU associated with the environment.
- */
-static inline ArchCPU *env_archcpu(CPUArchState *env)
-{
- return container_of(env, ArchCPU, env);
-}
-
-/**
- * env_cpu(env)
- * @env: The architecture environment
- *
- * Return the CPUState associated with the environment.
- */
-static inline CPUState *env_cpu(CPUArchState *env)
-{
- return &env_archcpu(env)->parent_obj;
-}
-
-/**
- * env_neg(env)
- * @env: The architecture environment
- *
- * Return the CPUNegativeOffsetState associated with the environment.
- */
-static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
-{
- ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
- return &arch_cpu->neg;
-}
-
-/**
- * cpu_neg(cpu)
- * @cpu: The generic CPUState
- *
- * Return the CPUNegativeOffsetState associated with the cpu.
- */
-static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
-{
- ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
- return &arch_cpu->neg;
-}
-
-/**
- * env_tlb(env)
- * @env: The architecture environment
- *
- * Return the CPUTLB state associated with the environment.
- */
-static inline CPUTLB *env_tlb(CPUArchState *env)
-{
- return &env_neg(env)->tlb;
-}
+/* Validate correct placement of CPUArchState. */
+#include "cpu.h"
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 039d422bf4..6d5318895a 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -3,16 +3,31 @@
/* CPU interfaces that are target independent. */
+#include "exec/vaddr.h"
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
+#include "hw/core/cpu.h"
+#include "tcg/debug-assert.h"
+
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
+#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
+
+void cpu_exec_init_all(void);
+void cpu_exec_step_atomic(CPUState *cpu);
+
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size())
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
+extern QemuMutex qemu_cpu_list_lock;
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
-
-void tcg_flush_softmmu_tlb(CPUState *cs);
+unsigned int cpu_list_generation_id_get(void);
void tcg_iommu_init_notifier_list(CPUState *cpu);
void tcg_iommu_free_notifier_list(CPUState *cpu);
@@ -25,7 +40,7 @@ enum device_endian {
DEVICE_LITTLE_ENDIAN,
};
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
#else
#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
@@ -47,7 +62,23 @@ typedef uintptr_t ram_addr_t;
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
RAMBlock *qemu_ram_block_by_name(const char *name);
+
+/*
+ * Translates a host ptr back to a RAMBlock and an offset in that RAMBlock.
+ *
+ * @ptr: The host pointer to translate.
+ * @round_offset: Whether to round the result offset down to a target page
+ * @offset: Will be set to the offset within the returned RAMBlock.
+ *
+ * Returns: RAMBlock (or NULL if not found)
+ *
+ * By the time this function returns, the returned pointer is not protected
+ * by RCU anymore. If the caller is not within an RCU critical section and
+ * does not hold the BQL, it must have other means of protecting the
+ * pointer, such as a reference to the memory region that owns the RAMBlock.
+ */
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
ram_addr_t *offset);
ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
@@ -65,10 +96,34 @@ void qemu_ram_set_uf_zeroable(RAMBlock *rb);
bool qemu_ram_is_migratable(RAMBlock *rb);
void qemu_ram_set_migratable(RAMBlock *rb);
void qemu_ram_unset_migratable(RAMBlock *rb);
+bool qemu_ram_is_named_file(RAMBlock *rb);
+int qemu_ram_get_fd(RAMBlock *rb);
size_t qemu_ram_pagesize(RAMBlock *block);
size_t qemu_ram_pagesize_largest(void);
+/**
+ * cpu_address_space_init:
+ * @cpu: CPU to add this address space to
+ * @asidx: integer index of this address space
+ * @prefix: prefix to be used as name of address space
+ * @mr: the root memory region of address space
+ *
+ * Add the specified address space to the CPU's cpu_ases list.
+ * The address space added with @asidx 0 is the one used for the
+ * convenience pointer cpu->as.
+ * The target-specific code which registers ASes is responsible
+ * for defining what semantics address space 0, 1, 2, etc have.
+ *
+ * Before the first call to this function, the caller must set
+ * cpu->num_ases to the total number of address spaces it needs
+ * to support.
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr);
+
void cpu_physical_memory_rw(hwaddr addr, void *buf,
hwaddr len, bool is_write);
static inline void cpu_physical_memory_read(hwaddr addr,
@@ -104,10 +159,123 @@ typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
+int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
+ size_t length);
#endif
+/* Returns: 0 on success, -1 on error */
+int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ void *ptr, size_t len, bool is_write);
+
/* vl.c */
-extern int singlestep;
+void list_cpus(void);
+
+#ifdef CONFIG_TCG
+/**
+ * cpu_unwind_state_data:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @data: output data
+ *
+ * Attempt to load the the unwind state for a host pc occurring in
+ * translated code. If @host_pc is not in translated code, the
+ * function returns false; otherwise @data is loaded.
+ * This is the same unwind info as given to restore_state_to_opc.
+ */
+bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
+
+/**
+ * cpu_restore_state:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @return: true if state was restored, false otherwise
+ *
+ * Attempt to restore the state for a fault occurring in translated
+ * code. If @host_pc is not in translated code no state is
+ * restored and the function returns false.
+ */
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
+
+G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+#endif /* CONFIG_TCG */
+G_NORETURN void cpu_loop_exit(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/*
+ * Original state of the write flag (used when tracking self-modifying code)
+ */
+#define PAGE_WRITE_ORG 0x0010
+/*
+ * Invalidate the TLB entry immediately, helpful for s390x
+ * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
+ */
+#define PAGE_WRITE_INV 0x0020
+/* For use with page_set_flags: page is being replaced; target_data cleared. */
+#define PAGE_RESET 0x0040
+/* For linux-user, indicates that the page is MAP_ANON. */
+#define PAGE_ANON 0x0080
+
+/* Target-specific bits that will be used via page_get_flags(). */
+#define PAGE_TARGET_1 0x0200
+#define PAGE_TARGET_2 0x0400
+
+/*
+ * For linux-user, indicates that the page is mapped with the same semantics
+ * in both guest and host.
+ */
+#define PAGE_PASSTHROUGH 0x0800
+
+/* accel/tcg/cpu-exec.c */
+int cpu_exec(CPUState *cpu);
+
+/**
+ * env_archcpu(env)
+ * @env: The architecture environment
+ *
+ * Return the ArchCPU associated with the environment.
+ */
+static inline ArchCPU *env_archcpu(CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+/**
+ * env_cpu(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUState associated with the environment.
+ */
+static inline CPUState *env_cpu(CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+#ifndef CONFIG_USER_ONLY
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ *
+ * The user-only version of this function is inline in cpu-all.h,
+ * where it always returns MMU_USER_IDX.
+ */
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ int ret = cs->cc->mmu_index(cs, ifetch);
+ tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
+ return ret;
+}
+#endif /* !CONFIG_USER_ONLY */
#endif /* CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index ba3cd32a1e..0dbef3010c 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -19,7 +19,7 @@
#ifndef CPU_DEFS_H
#define CPU_DEFS_H
-#ifndef NEED_CPU_H
+#ifndef COMPILING_PER_TARGET
#error cpu.h included from common code
#endif
@@ -36,9 +36,6 @@
#ifndef TARGET_LONG_BITS
# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
-#ifndef NB_MMU_MODES
-# error NB_MMU_MODES must be defined in cpu-param.h
-#endif
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
@@ -55,36 +52,9 @@
# endif
#endif
-#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
-
-/* target_ulong is the type of a virtual address */
-#if TARGET_LONG_SIZE == 4
-typedef int32_t target_long;
-typedef uint32_t target_ulong;
-#define TARGET_FMT_lx "%08x"
-#define TARGET_FMT_ld "%d"
-#define TARGET_FMT_lu "%u"
-#elif TARGET_LONG_SIZE == 8
-typedef int64_t target_long;
-typedef uint64_t target_ulong;
-#define TARGET_FMT_lx "%016" PRIx64
-#define TARGET_FMT_ld "%" PRId64
-#define TARGET_FMT_lu "%" PRIu64
-#else
-#error TARGET_LONG_SIZE undefined
-#endif
-
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
-
-/* use a fully associative victim tlb of 8 entries */
-#define CPU_VTLB_SIZE 8
-
-#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
-#define CPU_TLB_ENTRY_BITS 4
-#else
-#define CPU_TLB_ENTRY_BITS 5
-#endif
+#include "exec/target_long.h"
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8
@@ -108,138 +78,6 @@ typedef uint64_t target_ulong;
# endif
# endif
-typedef struct CPUTLBEntry {
- /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
- bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
- go directly to ram.
- bit 3 : indicates that the entry is invalid
- bit 2..0 : zero
- */
- union {
- struct {
- target_ulong addr_read;
- target_ulong addr_write;
- target_ulong addr_code;
- /* Addend to virtual address to get host address. IO accesses
- use the corresponding iotlb value. */
- uintptr_t addend;
- };
- /* padding to get a power of two size */
- uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
- };
-} CPUTLBEntry;
-
-QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
-
-/* The IOTLB is not accessed directly inline by generated TCG code,
- * so the CPUIOTLBEntry layout is not as critical as that of the
- * CPUTLBEntry. (This is also why we don't want to combine the two
- * structs into one.)
- */
-typedef struct CPUIOTLBEntry {
- /*
- * @addr contains:
- * - in the lower TARGET_PAGE_BITS, a physical section number
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
- * must be added to the virtual address to obtain:
- * + the ram_addr_t of the target RAM (if the physical section
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
- * + the offset within the target MemoryRegion (otherwise)
- */
- hwaddr addr;
- MemTxAttrs attrs;
-} CPUIOTLBEntry;
-
-/*
- * Data elements that are per MMU mode, minus the bits accessed by
- * the TCG fast path.
- */
-typedef struct CPUTLBDesc {
- /*
- * Describe a region covering all of the large pages allocated
- * into the tlb. When any page within this region is flushed,
- * we must flush the entire tlb. The region is matched if
- * (addr & large_page_mask) == large_page_addr.
- */
- target_ulong large_page_addr;
- target_ulong large_page_mask;
- /* host time (in ns) at the beginning of the time window */
- int64_t window_begin_ns;
- /* maximum number of entries observed in the window */
- size_t window_max_entries;
- size_t n_used_entries;
- /* The next index to use in the tlb victim table. */
- size_t vindex;
- /* The tlb victim table, in two parts. */
- CPUTLBEntry vtable[CPU_VTLB_SIZE];
- CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
- /* The iotlb. */
- CPUIOTLBEntry *iotlb;
-} CPUTLBDesc;
-
-/*
- * Data elements that are per MMU mode, accessed by the fast path.
- * The structure is aligned to aid loading the pair with one insn.
- */
-typedef struct CPUTLBDescFast {
- /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
- uintptr_t mask;
- /* The array of tlb entries itself. */
- CPUTLBEntry *table;
-} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
-
-/*
- * Data elements that are shared between all MMU modes.
- */
-typedef struct CPUTLBCommon {
- /* Serialize updates to f.table and d.vtable, and others as noted. */
- QemuSpin lock;
- /*
- * Within dirty, for each bit N, modifications have been made to
- * mmu_idx N since the last time that mmu_idx was flushed.
- * Protected by tlb_c.lock.
- */
- uint16_t dirty;
- /*
- * Statistics. These are not lock protected, but are read and
- * written atomically. This allows the monitor to print a snapshot
- * of the stats without interfering with the cpu.
- */
- size_t full_flush_count;
- size_t part_flush_count;
- size_t elide_flush_count;
-} CPUTLBCommon;
-
-/*
- * The entire softmmu tlb, for all MMU modes.
- * The meaning of each of the MMU modes is defined in the target code.
- * Since this is placed within CPUNegativeOffsetState, the smallest
- * negative offsets are at the end of the struct.
- */
-
-typedef struct CPUTLB {
- CPUTLBCommon c;
- CPUTLBDesc d[NB_MMU_MODES];
- CPUTLBDescFast f[NB_MMU_MODES];
-} CPUTLB;
-
-/* This will be used by TCG backends to compute offsets. */
-#define TLB_MASK_TABLE_OFS(IDX) \
- ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
-
-#else
-
-typedef struct CPUTLB { } CPUTLB;
-
-#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
-
-/*
- * This structure must be placed in ArchCPU immediately
- * before CPUArchState, as a field named "neg".
- */
-typedef struct CPUNegativeOffsetState {
- CPUTLB tlb;
- IcountDecr icount_decr;
-} CPUNegativeOffsetState;
+#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
#endif
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index ce6ce82618..11ba3778ba 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -1,5 +1,5 @@
/*
- * Software MMU support
+ * Software MMU support (per-target)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -28,10 +28,12 @@
* load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
* cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
* cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
*
* store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
* cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
* cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
*
* sign is:
* (empty): for 32 and 64 bit sizes
@@ -53,22 +55,26 @@
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
* the index to use; the "data" and "code" suffixes take the index from
* cpu_mmu_index().
+ *
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
+ * MemOp including alignment requirements. The alignment will be enforced.
*/
#ifndef CPU_LDST_H
#define CPU_LDST_H
-#if defined(CONFIG_USER_ONLY)
-/* sparc32plus has 64bit long but 32bit space address
- * this can make bad result with g2h() and h2g()
- */
-#if TARGET_VIRT_ADDR_SPACE_BITS <= 32
-typedef uint32_t abi_ptr;
-#define TARGET_ABI_FMT_ptr "%x"
-#else
-typedef uint64_t abi_ptr;
-#define TARGET_ABI_FMT_ptr "%"PRIx64
+#ifndef CONFIG_TCG
+#error Can only include this header with TCG
#endif
+#include "exec/memopidx.h"
+#include "exec/abi_ptr.h"
+#include "exec/mmu-access-type.h"
+#include "qemu/int128.h"
+
+#if defined(CONFIG_USER_ONLY)
+
+#include "user/guest-base.h"
+
#ifndef TARGET_TAGGED_ADDRESSES
static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
{
@@ -111,19 +117,15 @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
assert(h2g_valid(x)); \
h2g_nocheck(x); \
})
-#else
-typedef target_ulong abi_ptr;
-#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
-#endif
+
+#endif /* CONFIG_USER_ONLY */
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
-
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
-
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
@@ -131,37 +133,31 @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
-
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
-
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t ra);
-
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
@@ -169,216 +165,137 @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t ra);
-#if defined(CONFIG_USER_ONLY)
-
-extern __thread uintptr_t helper_retaddr;
-
-static inline void set_helper_retaddr(uintptr_t ra)
-{
- helper_retaddr = ra;
- /*
- * Ensure that this write is visible to the SIGSEGV handler that
- * may be invoked due to a subsequent invalid memory operation.
- */
- signal_barrier();
-}
-
-static inline void clear_helper_retaddr(void)
-{
- /*
- * Ensure that previous memory operations have succeeded before
- * removing the data visible to the signal handler.
- */
- signal_barrier();
- helper_retaddr = 0;
-}
-
-/*
- * Provide the same *_mmuidx_ra interface as for softmmu.
- * The mmu_idx argument is ignored.
- */
-
-static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldub_data_ra(env, addr, ra);
-}
-
-static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldsb_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_lduw_be_data_ra(env, addr, ra);
-}
-
-static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldsw_be_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldl_be_data_ra(env, addr, ra);
-}
-
-static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldq_be_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_lduw_le_data_ra(env, addr, ra);
-}
-
-static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldsw_le_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldl_le_data_ra(env, addr, ra);
-}
-
-static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldq_le_data_ra(env, addr, ra);
-}
-
-static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx, uintptr_t ra)
-{
- cpu_stb_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stw_be_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stl_be_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stq_be_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stw_le_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stl_le_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stq_le_data_ra(env, addr, val, ra);
-}
-
-#else
-
-/* Needed for TCG_OVERSIZED_GUEST */
-#include "tcg/tcg.h"
-
-static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
-{
-#if TCG_OVERSIZED_GUEST
- return entry->addr_write;
-#else
- return qatomic_read(&entry->addr_write);
-#endif
-}
-
-/* Find the TLB index corresponding to the mmu_idx + address pair. */
-static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
-{
- uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
-
- return (addr >> TARGET_PAGE_BITS) & size_mask;
-}
-
-/* Find the TLB entry corresponding to the mmu_idx + address pair. */
-static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
-{
- return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
-}
-
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
-
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
-#endif /* defined(CONFIG_USER_ONLY) */
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
+
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, abi_ptr addr, TYPE val, \
+ MemOpIdx oi, uintptr_t retaddr);
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
-#ifdef TARGET_WORDS_BIGENDIAN
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#if TARGET_BIG_ENDIAN
# define cpu_lduw_data cpu_lduw_be_data
# define cpu_ldsw_data cpu_ldsw_be_data
# define cpu_ldl_data cpu_ldl_be_data
@@ -424,6 +341,15 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
#endif
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index 19b16e58f8..ef18642a32 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -22,10 +22,14 @@
#include "exec/cpu-common.h"
+#ifdef CONFIG_TCG
+
#if !defined(CONFIG_USER_ONLY)
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
-void tlb_flush_counts(size_t *full, size_t *part, size_t *elide);
#endif
+
+#endif /* CONFIG_TCG */
+
#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 9d5987ba04..4c5e470581 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -21,50 +21,13 @@
#define EXEC_ALL_H
#include "cpu.h"
-#ifdef CONFIG_TCG
-#include "exec/cpu_ldst.h"
-#endif
-#include "sysemu/cpu-timers.h"
-
-/* allow to see translation results - the slowdown should be negligible, so we leave it */
-#define DEBUG_DISAS
-
-/* Page tracking code uses ram addresses in system mode, and virtual
- addresses in userspace mode. Define tb_page_addr_t to be an appropriate
- type. */
#if defined(CONFIG_USER_ONLY)
-typedef abi_ulong tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
-#else
-typedef ram_addr_t tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#include "exec/abi_ptr.h"
+#include "exec/cpu_ldst.h"
#endif
-
-#include "qemu/log.h"
-
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
-void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
- target_ulong *data);
-
-/**
- * cpu_restore_state:
- * @cpu: the vCPU state is to be restore to
- * @searched_pc: the host PC the fault occurred at
- * @will_exit: true if the TB executed will be interrupted after some
- cpu adjustments. Required for maintaining the correct
- icount valus
- * @return: true if state was restored, false otherwise
- *
- * Attempt to restore the state for a fault occurring in translated
- * code. If the searched_pc is not in translated code no state is
- * restored and the function returns false.
- */
-bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
-
-void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
-void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
-void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
-void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+#include "exec/mmu-access-type.h"
+#include "exec/translation-block.h"
+#include "qemu/clang-tsa.h"
/**
* cpu_loop_exit_requested:
@@ -80,34 +43,9 @@ void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
*/
static inline bool cpu_loop_exit_requested(CPUState *cpu)
{
- return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
+ return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
}
-#if !defined(CONFIG_USER_ONLY)
-void cpu_reloading_memory_map(void);
-/**
- * cpu_address_space_init:
- * @cpu: CPU to add this address space to
- * @asidx: integer index of this address space
- * @prefix: prefix to be used as name of address space
- * @mr: the root memory region of address space
- *
- * Add the specified address space to the CPU's cpu_ases list.
- * The address space added with @asidx 0 is the one used for the
- * convenience pointer cpu->as.
- * The target-specific code which registers ASes is responsible
- * for defining what semantics address space 0, 1, 2, etc have.
- *
- * Before the first call to this function, the caller must set
- * cpu->num_ases to the total number of address spaces it needs
- * to support.
- *
- * Note that with KVM only one address space is supported.
- */
-void cpu_address_space_init(CPUState *cpu, int asidx,
- const char *prefix, MemoryRegion *mr);
-#endif
-
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/* cputlb.c */
/**
@@ -128,7 +66,7 @@ void tlb_destroy(CPUState *cpu);
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
@@ -137,7 +75,7 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/**
* tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
@@ -149,7 +87,7 @@ void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
*/
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
@@ -184,7 +122,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus:
@@ -195,7 +133,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
@@ -209,7 +147,7 @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/
-void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_by_mmuidx:
@@ -252,14 +190,14 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
*
* Similar to tlb_flush_page_mask, but with a bitmap of indexes.
*/
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
- (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
+ (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
/**
* tlb_flush_range_by_mmuidx
@@ -272,24 +210,46 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
* For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
* comparing only the low @bits worth of each virtual page.
*/
-void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits);
/* Similarly, with broadcast and syncing. */
-void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits);
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
- target_ulong len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits);
/**
+ * tlb_set_page_full:
+ * @cpu: CPU context
+ * @mmu_idx: mmu index of the tlb to modify
+ * @addr: virtual address of the entry to add
+ * @full: the details of the tlb entry
+ *
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
+ * @full must be filled, except for xlat_section, and constitute
+ * the complete description of the translated page.
+ *
+ * This is generally called by the target tlb_fill function after
+ * having performed a successful page table walk to find the physical
+ * address and attributes for the translation.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
+ CPUTLBEntryFull *full);
+
+/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
- * @vaddr: virtual address of page to add entry for
+ * @addr: virtual address of page to add entry for
* @paddr: physical address of the page
* @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
@@ -297,7 +257,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* @size: size of the page in bytes
*
* Add an entry to this CPU's TLB (a mapping from virtual address
- * @vaddr to physical address @paddr) with the specified memory
+ * @addr to physical address @paddr) with the specified memory
* transaction attributes. This is generally called by the target CPU
* specific code after it has been called through the tlb_fill()
* entry point and performed a successful page table walk to find
@@ -308,18 +268,18 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
* used by tlb_flush_page.
*/
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, target_ulong size);
+ int prot, int mmu_idx, vaddr size);
/* tlb_set_page:
*
* This function is equivalent to calling tlb_set_page_with_attrs()
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
* as a convenience for CPUs which don't use memory transaction attributes.
*/
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
+ int mmu_idx, vaddr size);
#else
static inline void tlb_init(CPUState *cpu)
{
@@ -327,14 +287,13 @@ static inline void tlb_init(CPUState *cpu)
static inline void tlb_destroy(CPUState *cpu)
{
}
-static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
- target_ulong addr)
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush(CPUState *cpu)
@@ -347,7 +306,7 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{
}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
- target_ulong addr, uint16_t idxmap)
+ vaddr addr, uint16_t idxmap)
{
}
@@ -355,12 +314,12 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
@@ -373,37 +332,37 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
{
}
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap,
unsigned bits)
{
}
static inline void
-tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits)
{
}
-static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
- target_ulong len, uint16_t idxmap,
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
- target_ulong len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
- target_long len,
+ vaddr addr,
+ vaddr len,
uint16_t idxmap,
unsigned bits)
{
@@ -426,16 +385,16 @@ static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* Finally, return the host address for a page that is backed by RAM,
* or NULL if the page requires I/O.
*/
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
-static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
-static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
@@ -445,6 +404,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* probe_access_flags:
* @env: CPUArchState
* @addr: guest virtual address to look up
+ * @size: size of the access
* @access_type: read, write or execute permission
* @mmu_idx: MMU index to use for lookup
* @nonfault: suppress the fault
@@ -459,129 +419,103 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/
-int probe_access_flags(CPUArchState *env, target_ulong addr,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
-#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
-
-/* Estimated block size for TB allocation. */
-/* ??? The following is based on a 2015 survey of x86_64 host output.
- Better would seem to be some sort of dynamically sized TB array,
- adapting to the block sizes actually being produced. */
-#if defined(CONFIG_SOFTMMU)
-#define CODE_GEN_AVG_BLOCK_SIZE 400
-#else
-#define CODE_GEN_AVG_BLOCK_SIZE 150
-#endif
-
-/*
- * Translation Cache-related fields of a TB.
- * This struct exists just for convenience; we keep track of TB's in a binary
- * search tree, and the only fields needed to compare TB's in the tree are
- * @ptr and @size.
- * Note: the address of search data can be obtained by adding @size to @ptr.
- */
-struct tb_tc {
- const void *ptr; /* pointer to the translated code */
- size_t size;
-};
-
-struct TranslationBlock {
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
- target_ulong cs_base; /* CS base for this block */
- uint32_t flags; /* flags defining in which context the code was generated */
- uint32_t cflags; /* compile flags */
-
-/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
-#define CF_COUNT_MASK 0x000001ff
-#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
-#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
-#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
-#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
-#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
-#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
-#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
-#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
-#define CF_CLUSTER_SHIFT 24
-
- /* Per-vCPU dynamic tracing state used to generate this TB */
- uint32_t trace_vcpu_dstate;
-
- /*
- * Above fields used for comparing
- */
-
- /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
- uint16_t size;
- uint16_t icount;
+#ifndef CONFIG_USER_ONLY
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ */
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost,
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
- struct tb_tc tc;
+/**
+ * probe_access_mmu() - Like probe_access_full except cannot fault and
+ * doesn't trigger instrumentation.
+ *
+ * @env: CPUArchState
+ * @vaddr: virtual address to probe
+ * @size: size of the probe
+ * @access_type: read, write or execute permission
+ * @mmu_idx: softmmu index
+ * @phost: ptr to return value host address or NULL
+ * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ *
+ * Returns: TLB flags as per probe_access_flags()
+ */
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull);
- /* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[].
- The list is protected by the TB's page('s) lock(s) */
- uintptr_t page_next[2];
- tb_page_addr_t page_addr[2];
+#endif
- /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
- QemuSpin jmp_lock;
+static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ return tb->itree.start;
+#else
+ return tb->page_addr[0];
+#endif
+}
- /* The following data are used to directly call another TB from
- * the code of this one. This can be done either by emitting direct or
- * indirect native jump instructions. These jumps are reset so that the TB
- * just continues its execution. The TB can be linked to another one by
- * setting one of the jump targets (or patching the jump instruction). Only
- * two of such jumps are supported.
- */
- uint16_t jmp_reset_offset[2]; /* offset of original jump target */
-#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
- uintptr_t jmp_target_arg[2]; /* target address or offset */
+static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
+ return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
+#else
+ return tb->page_addr[1];
+#endif
+}
+static inline void tb_set_page_addr0(TranslationBlock *tb,
+ tb_page_addr_t addr)
+{
+#ifdef CONFIG_USER_ONLY
+ tb->itree.start = addr;
/*
- * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
- * Each TB can have two outgoing jumps, and therefore can participate
- * in two lists. The list entries are kept in jmp_list_next[2]. The least
- * significant bit (LSB) of the pointers in these lists is used to encode
- * which of the two list entries is to be used in the pointed TB.
- *
- * List traversals are protected by jmp_lock. The destination TB of each
- * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
- * can be acquired from any origin TB.
- *
- * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
- * being invalidated, so that no further outgoing jumps from it can be set.
- *
- * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
- * to a destination TB that has CF_INVALID set.
+ * To begin, we record an interval of one byte. When the translation
+ * loop encounters a second page, the interval will be extended to
+ * include the first byte of the second page, which is sufficient to
+ * allow tb_page_addr1() above to work properly. The final corrected
+ * interval will be set by tb_page_add() from tb->size before the
+ * node is added to the interval tree.
*/
- uintptr_t jmp_list_head;
- uintptr_t jmp_list_next[2];
- uintptr_t jmp_dest[2];
-};
+ tb->itree.last = addr;
+#else
+ tb->page_addr[0] = addr;
+#endif
+}
-/* Hide the qatomic_read to make code a little easier on the eyes */
-static inline uint32_t tb_cflags(const TranslationBlock *tb)
+static inline void tb_set_page_addr1(TranslationBlock *tb,
+ tb_page_addr_t addr)
{
- return qatomic_read(&tb->cflags);
+#ifdef CONFIG_USER_ONLY
+ /* Extend the interval to the first byte of the second page. See above. */
+ tb->itree.last = addr;
+#else
+ tb->page_addr[1] = addr;
+#endif
}
/* current cflags for hashing/comparison */
uint32_t curr_cflags(CPUState *cpu);
/* TranslationBlock invalidate API */
-#if defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(target_ulong addr);
-void tb_invalidate_phys_range(target_ulong start, target_ulong end);
-#else
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
-#endif
-void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
- uint32_t cflags);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
@@ -602,14 +536,6 @@ extern __thread uintptr_t tci_tb_ptr;
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void)
-{
-}
-#endif
-
#if !defined(CONFIG_USER_ONLY)
/**
@@ -625,92 +551,112 @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
#endif
-#if defined(CONFIG_USER_ONLY)
-void mmap_lock(void);
-void mmap_unlock(void);
-bool have_mmap_lock(void);
-
/**
- * get_page_addr_code() - user-mode version
+ * get_page_addr_code_hostp()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
- * Returns @addr.
+ * See get_page_addr_code() (full-system version) for documentation on the
+ * return value.
+ *
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
+ * to the host address where @addr's content is kept.
+ *
+ * Note: this function can trigger an exception.
*/
-static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
- target_ulong addr)
-{
- return addr;
-}
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
+ void **hostp);
/**
- * get_page_addr_code_hostp() - user-mode version
+ * get_page_addr_code()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
- * Returns @addr.
+ * If we cannot translate and execute from the entire RAM page, or if
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
+ * ram_addr_t corresponding to the guest code at @addr.
*
- * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
- * is kept.
+ * Note: this function can trigger an exception.
*/
-static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
- target_ulong addr,
- void **hostp)
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
+ vaddr addr)
+{
+ return get_page_addr_code_hostp(env, addr, NULL);
+}
+
+#if defined(CONFIG_USER_ONLY)
+void TSA_NO_TSA mmap_lock(void);
+void TSA_NO_TSA mmap_unlock(void);
+bool have_mmap_lock(void);
+
+static inline void mmap_unlock_guard(void *unused)
{
- if (hostp) {
- *hostp = g2h_untagged(addr);
- }
- return addr;
+ mmap_unlock();
}
+#define WITH_MMAP_LOCK_GUARD() \
+ for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
+ = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
+
/**
- * cpu_signal_handler
- * @signum: host signal number
- * @pinfo: host siginfo_t
- * @puc: host ucontext_t
+ * adjust_signal_pc:
+ * @pc: raw pc from the host signal ucontext_t.
+ * @is_write: host memory operation was write, or read-modify-write.
*
- * To be called from the SIGBUS and SIGSEGV signal handler to inform the
- * virtual cpu of exceptions. Returns true if the signal was handled by
- * the virtual CPU.
+ * Alter @pc as required for unwinding. Return the type of the
+ * guest memory access -- host reads may be for guest execution.
*/
-int cpu_signal_handler(int signum, void *pinfo, void *puc);
-
-#else
-static inline void mmap_lock(void) {}
-static inline void mmap_unlock(void) {}
+MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
/**
- * get_page_addr_code() - full-system version
- * @env: CPUArchState
- * @addr: guest virtual address of guest code
- *
- * If we cannot translate and execute from the entire RAM page, or if
- * the region is not backed by RAM, returns -1. Otherwise, returns the
- * ram_addr_t corresponding to the guest code at @addr.
+ * handle_sigsegv_accerr_write:
+ * @cpu: the cpu context
+ * @old_set: the sigset_t from the signal ucontext_t
+ * @host_pc: the host pc, adjusted for the signal
+ * @host_addr: the host address of the fault
*
- * Note: this function can trigger an exception.
+ * Return true if the write fault has been handled, and should be re-tried.
*/
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
+bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
+ uintptr_t host_pc, abi_ptr guest_addr);
/**
- * get_page_addr_code_hostp() - full-system version
- * @env: CPUArchState
- * @addr: guest virtual address of guest code
- *
- * See get_page_addr_code() (full-system version) for documentation on the
- * return value.
- *
- * Sets *@hostp (when @hostp is non-NULL) as follows.
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
- * to the host address where @addr's content is kept.
- *
- * Note: this function can trigger an exception.
+ * cpu_loop_exit_sigsegv:
+ * @cpu: the cpu context
+ * @addr: the guest address of the fault
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGSEGV, and jump to the main cpu loop.
*/
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
- void **hostp);
+G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+
+/**
+ * cpu_loop_exit_sigbus:
+ * @cpu: the cpu context
+ * @addr: the guest address of the alignment fault
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGBUS, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ uintptr_t ra);
+
+#else
+static inline void mmap_lock(void) {}
+static inline void mmap_unlock(void) {}
+#define WITH_MMAP_LOCK_GUARD()
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
+void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
index a024a0350d..eb14b91139 100644
--- a/include/exec/gdbstub.h
+++ b/include/exec/gdbstub.h
@@ -10,182 +10,138 @@
#define GDB_WATCHPOINT_READ 3
#define GDB_WATCHPOINT_ACCESS 4
-#ifdef NEED_CPU_H
-#include "cpu.h"
+typedef struct GDBFeature {
+ const char *xmlname;
+ const char *xml;
+ const char *name;
+ const char * const *regs;
+ int num_regs;
+} GDBFeature;
+
+typedef struct GDBFeatureBuilder {
+ GDBFeature *feature;
+ GPtrArray *xml;
+ GPtrArray *regs;
+ int base_reg;
+} GDBFeatureBuilder;
-typedef void (*gdb_syscall_complete_cb)(CPUState *cpu,
- target_ulong ret, target_ulong err);
+
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_get_reg_cb)(CPUState *cpu, GByteArray *buf, int reg);
+typedef int (*gdb_set_reg_cb)(CPUState *cpu, uint8_t *buf, int reg);
/**
- * gdb_do_syscall:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * ...: list of arguments to interpolate into @fmt
- *
- * Send a GDB syscall request. This function will return immediately;
- * the callback function will be called later when the remote system
- * call has completed.
- *
- * @fmt should be in the 'call-id,parameter,parameter...' format documented
- * for the F request packet in the GDB remote protocol. A limited set of
- * printf-style format specifiers is supported:
- * %x - target_ulong argument printed in hex
- * %lx - 64-bit argument printed in hex
- * %s - string pointer (target_ulong) and length (int) pair
+ * gdb_init_cpu(): Initialize the CPU for gdbstub.
+ * @cpu: The CPU to be initialized.
*/
-void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+void gdb_init_cpu(CPUState *cpu);
+
/**
- * gdb_do_syscallv:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * @va: arguments to interpolate into @fmt
- *
- * As gdb_do_syscall, but taking a va_list rather than a variable
- * argument list.
+ * gdb_register_coprocessor() - register a supplemental set of registers
+ * @cpu - the CPU associated with registers
+ * @get_reg - get function (gdb reading)
+ * @set_reg - set function (gdb modifying)
+ * @num_regs - number of registers in set
+ * @xml - xml name of set
+ * @gpos - non-zero to append to "general" register set at @gpos
*/
-void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va);
-int use_gdb_syscalls(void);
-void gdb_set_stop_cpu(CPUState *cpu);
+void gdb_register_coprocessor(CPUState *cpu,
+ gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
+ const GDBFeature *feature, int g_pos);
/**
- * gdb_exit: exit gdb session, reporting inferior status
- * @code: exit code reported
+ * gdbserver_start: start the gdb server
+ * @port_or_device: connection spec for gdb
*
- * This closes the session and sends a final packet to GDB reporting
- * the exit status of the program. It also cleans up any connections
- * detritus before returning.
+ * For CONFIG_USER this is either a tcp port or a path to a fifo. For
+ * system emulation you can use a full chardev spec for your gdbserver
+ * port.
*/
-void gdb_exit(int code);
+int gdbserver_start(const char *port_or_device);
-#ifdef CONFIG_USER_ONLY
/**
- * gdb_handlesig: yield control to gdb
- * @cpu: CPU
- * @sig: if non-zero, the signal number which caused us to stop
- *
- * This function yields control to gdb, when a user-mode-only target
- * needs to stop execution. If @sig is non-zero, then we will send a
- * stop packet to tell gdb that we have stopped because of this signal.
- *
- * This function will block (handling protocol requests from gdb)
- * until gdb tells us to continue target execution. When it does
- * return, the return value is a signal to deliver to the target,
- * or 0 if no signal should be delivered, ie the signal that caused
- * us to stop should be ignored.
+ * gdb_feature_builder_init() - Initialize GDBFeatureBuilder.
+ * @builder: The builder to be initialized.
+ * @feature: The feature to be filled.
+ * @name: The name of the feature.
+ * @xmlname: The name of the XML.
+ * @base_reg: The base number of the register ID.
*/
-int gdb_handlesig(CPUState *, int);
-void gdb_signalled(CPUArchState *, int);
-void gdbserver_fork(CPUState *);
-#endif
-/* Get or set a register. Returns the size of the register. */
-typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg);
-typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
-void gdb_register_coprocessor(CPUState *cpu,
- gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
- int num_regs, const char *xml, int g_pos);
+void gdb_feature_builder_init(GDBFeatureBuilder *builder, GDBFeature *feature,
+ const char *name, const char *xmlname,
+ int base_reg);
-/*
- * The GDB remote protocol transfers values in target byte order. As
- * the gdbstub may be batching up several register values we always
- * append to the array.
+/**
+ * gdb_feature_builder_append_tag() - Append a tag.
+ * @builder: The builder.
+ * @format: The format of the tag.
+ * @...: The values to be formatted.
*/
+void G_GNUC_PRINTF(2, 3)
+gdb_feature_builder_append_tag(const GDBFeatureBuilder *builder,
+ const char *format, ...);
-static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
-{
- g_byte_array_append(buf, &val, 1);
- return 1;
-}
-
-static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
-{
- uint16_t to_word = tswap16(val);
- g_byte_array_append(buf, (uint8_t *) &to_word, 2);
- return 2;
-}
-
-static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
-{
- uint32_t to_long = tswap32(val);
- g_byte_array_append(buf, (uint8_t *) &to_long, 4);
- return 4;
-}
-
-static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
-{
- uint64_t to_quad = tswap64(val);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- return 8;
-}
-
-static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
- uint64_t val_lo)
-{
- uint64_t to_quad;
-#ifdef TARGET_WORDS_BIGENDIAN
- to_quad = tswap64(val_hi);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- to_quad = tswap64(val_lo);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
-#else
- to_quad = tswap64(val_lo);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- to_quad = tswap64(val_hi);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
-#endif
- return 16;
-}
-
-static inline int gdb_get_zeroes(GByteArray *array, size_t len)
-{
- guint oldlen = array->len;
- g_byte_array_set_size(array, oldlen + len);
- memset(array->data + oldlen, 0, len);
+/**
+ * gdb_feature_builder_append_reg() - Append a register.
+ * @builder: The builder.
+ * @name: The register's name; it must be unique within a CPU.
+ * @bitsize: The register's size, in bits.
+ * @regnum: The offset of the register's number in the feature.
+ * @type: The type of the register.
+ * @group: The register group to which this register belongs; it can be NULL.
+ */
+void gdb_feature_builder_append_reg(const GDBFeatureBuilder *builder,
+ const char *name,
+ int bitsize,
+ int regnum,
+ const char *type,
+ const char *group);
- return len;
-}
+/**
+ * gdb_feature_builder_end() - End building GDBFeature.
+ * @builder: The builder.
+ */
+void gdb_feature_builder_end(const GDBFeatureBuilder *builder);
/**
- * gdb_get_reg_ptr: get pointer to start of last element
- * @len: length of element
+ * gdb_find_static_feature() - Find a static feature.
+ * @xmlname: The name of the XML.
*
- * This is a helper function to extract the pointer to the last
- * element for additional processing. Some front-ends do additional
- * dynamic swapping of the elements based on CPU state.
+ * Return: The static feature.
*/
-static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len)
-{
- return buf->data + buf->len - len;
-}
-
-#if TARGET_LONG_BITS == 64
-#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
-#define ldtul_p(addr) ldq_p(addr)
-#else
-#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
-#define ldtul_p(addr) ldl_p(addr)
-#endif
-
-#endif
+const GDBFeature *gdb_find_static_feature(const char *xmlname);
/**
- * gdbserver_start: start the gdb server
- * @port_or_device: connection spec for gdb
+ * gdb_read_register() - Read a register associated with a CPU.
+ * @cpu: The CPU associated with the register.
+ * @buf: The buffer that the read register will be appended to.
+ * @reg: The register's number returned by gdb_find_feature_register().
*
- * For CONFIG_USER this is either a tcp port or a path to a fifo. For
- * system emulation you can use a full chardev spec for your gdbserver
- * port.
+ * Return: The number of read bytes.
*/
-int gdbserver_start(const char *port_or_device);
+int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+
+/**
+ * typedef GDBRegDesc - a register description from gdbstub
+ */
+typedef struct {
+ int gdb_reg;
+ const char *name;
+ const char *feature_name;
+} GDBRegDesc;
/**
- * gdb_has_xml:
- * This is an ugly hack to cope with both new and old gdb.
- * If gdb sends qXfer:features:read then assume we're talking to a newish
- * gdb that understands target descriptions.
+ * gdb_get_register_list() - Return list of all registers for CPU
+ * @cpu: The CPU being searched
+ *
+ * Returns a GArray of GDBRegDesc, caller frees array but not the
+ * const strings.
*/
-extern bool gdb_has_xml;
+GArray *gdb_get_register_list(CPUState *cpu);
+
+void gdb_set_stop_cpu(CPUState *cpu);
-/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
-extern const char *const xml_builtin[][2];
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.py */
+extern const GDBFeature gdb_static_features[];
#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
deleted file mode 100644
index 610cba58fe..0000000000
--- a/include/exec/gen-icount.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef GEN_ICOUNT_H
-#define GEN_ICOUNT_H
-
-#include "exec/exec-all.h"
-#include "qemu/timer.h"
-
-/* Helpers for instruction counting code generation. */
-
-static TCGOp *icount_start_insn;
-
-static inline void gen_io_start(void)
-{
- TCGv_i32 tmp = tcg_const_i32(1);
- tcg_gen_st_i32(tmp, cpu_env,
- offsetof(ArchCPU, parent_obj.can_do_io) -
- offsetof(ArchCPU, env));
- tcg_temp_free_i32(tmp);
-}
-
-static inline void gen_tb_start(const TranslationBlock *tb)
-{
- TCGv_i32 count;
-
- tcg_ctx->exitreq_label = gen_new_label();
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- count = tcg_temp_local_new_i32();
- } else {
- count = tcg_temp_new_i32();
- }
-
- tcg_gen_ld_i32(count, cpu_env,
- offsetof(ArchCPU, neg.icount_decr.u32) -
- offsetof(ArchCPU, env));
-
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- /*
- * We emit a sub with a dummy immediate argument. Keep the insn index
- * of the sub so that we later (when we know the actual insn count)
- * can update the argument with the actual insn count.
- */
- tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
- icount_start_insn = tcg_last_op();
- }
-
- tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
-
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- tcg_gen_st16_i32(count, cpu_env,
- offsetof(ArchCPU, neg.icount_decr.u16.low) -
- offsetof(ArchCPU, env));
- /*
- * cpu->can_do_io is cleared automatically here at the beginning of
- * each translation block. The cost is minimal and only paid for
- * -icount, plus it would be very easy to forget doing it in the
- * translator. Doing it here means we don't need a gen_io_end() to
- * go with gen_io_start().
- */
- tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
- offsetof(ArchCPU, parent_obj.can_do_io) -
- offsetof(ArchCPU, env));
- }
-
- tcg_temp_free_i32(count);
-}
-
-static inline void gen_tb_end(const TranslationBlock *tb, int num_insns)
-{
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- /*
- * Update the num_insn immediate parameter now that we know
- * the actual insn count.
- */
- tcg_set_insn_param(icount_start_insn, 2,
- tcgv_i32_arg(tcg_constant_i32(num_insns)));
- }
-
- gen_set_label(tcg_ctx->exitreq_label);
- tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
-}
-
-#endif
diff --git a/include/exec/helper-gen-common.h b/include/exec/helper-gen-common.h
new file mode 100644
index 0000000000..5d6d78a625
--- /dev/null
+++ b/include/exec/helper-gen-common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ */
+
+#ifndef HELPER_GEN_COMMON_H
+#define HELPER_GEN_COMMON_H
+
+#define HELPER_H "accel/tcg/tcg-runtime.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+#define HELPER_H "accel/tcg/plugin-helpers.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+#endif /* HELPER_GEN_COMMON_H */
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
index 1c2e7a8ed3..f7ec155699 100644
--- a/include/exec/helper-gen.h
+++ b/include/exec/helper-gen.h
@@ -1,97 +1,16 @@
-/* Helper file for declaring TCG helper functions.
- This one expands generation functions for tcg opcodes. */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ */
#ifndef HELPER_GEN_H
#define HELPER_GEN_H
-#include "exec/helper-head.h"
+#include "exec/helper-gen-common.h"
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
-{ \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 0, NULL); \
-}
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1)) \
-{ \
- TCGTemp *args[1] = { dh_arg(t1, 1) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 1, args); \
-}
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
-{ \
- TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 2, args); \
-}
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
-{ \
- TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 3, args); \
-}
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
- dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
-{ \
- TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
- dh_arg(t3, 3), dh_arg(t4, 4) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 4, args); \
-}
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
-{ \
- TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
-}
-
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
-{ \
- TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \
-}
-
-#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
- dh_arg_decl(t7, 7)) \
-{ \
- TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
- dh_arg(t7, 7) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 7, args); \
-}
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "trace/generated-helpers-wrappers.h"
-#include "accel/tcg/tcg-runtime.h"
-#include "accel/tcg/plugin-helpers.h"
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef DEF_HELPER_FLAGS_7
-#undef GEN_HELPER
+#define HELPER_H "helper.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
#endif /* HELPER_GEN_H */
diff --git a/include/exec/helper-gen.h.inc b/include/exec/helper-gen.h.inc
new file mode 100644
index 0000000000..d9fd3ed72a
--- /dev/null
+++ b/include/exec/helper-gen.h.inc
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ * Define HELPER_H for the header file to be expanded,
+ * and static inline to change from global file scope.
+ */
+
+#include "tcg/tcg.h"
+#include "tcg/helper-info.h"
+#include "exec/helper-head.h.inc"
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1)) \
+{ \
+ tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1)); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
+{ \
+ tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2)); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
+ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), \
+ dh_arg(t3, 3), dh_arg(t4, 4)); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5)); \
+}
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
+{ \
+ tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
+}
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
+ dh_arg_decl(t7, 7)) \
+{ \
+ tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
+ dh_arg(t7, 7)); \
+}
+
+#include HELPER_H
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h.inc
index b974eb394a..5ef467a79d 100644
--- a/include/exec/helper-head.h
+++ b/include/exec/helper-head.h.inc
@@ -1,23 +1,13 @@
-/* Helper file for declaring TCG helper functions.
- Used by other helper files.
-
- Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
- functions. Names should be specified without the helper_ prefix, and
- the return and argument types specified. 3 basic types are understood
- (i32, i64 and ptr). Additional aliases are provided for convenience and
- to match the types used by the C helper implementation.
-
- The target helper.h should be included in all files that use/define
- helper functions. THis will ensure that function prototypes are
- consistent. In addition it should be included an extra two times for
- helper.c, defining:
- GEN_HELPER 1 to produce op generation functions (gen_helper_*)
- GEN_HELPER 2 to do runtime registration helper functions.
+/*
+ * Helper file for declaring TCG helper functions.
+ * Used by other helper files.
*/
#ifndef EXEC_HELPER_HEAD_H
#define EXEC_HELPER_HEAD_H
+#include "fpu/softfloat-types.h"
+
#define HELPER(name) glue(helper_, name)
/* Some types that make sense in C, but not for TCG. */
@@ -26,11 +16,13 @@
#define dh_alias_int i32
#define dh_alias_i64 i64
#define dh_alias_s64 i64
+#define dh_alias_i128 i128
#define dh_alias_f16 i32
#define dh_alias_f32 i32
#define dh_alias_f64 i64
#define dh_alias_ptr ptr
#define dh_alias_cptr ptr
+#define dh_alias_env ptr
#define dh_alias_void void
#define dh_alias_noreturn noreturn
#define dh_alias(t) glue(dh_alias_, t)
@@ -40,27 +32,29 @@
#define dh_ctype_int int
#define dh_ctype_i64 uint64_t
#define dh_ctype_s64 int64_t
+#define dh_ctype_i128 Int128
#define dh_ctype_f16 uint32_t
#define dh_ctype_f32 float32
#define dh_ctype_f64 float64
#define dh_ctype_ptr void *
#define dh_ctype_cptr const void *
+#define dh_ctype_env CPUArchState *
#define dh_ctype_void void
-#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_noreturn G_NORETURN void
#define dh_ctype(t) dh_ctype_##t
-#ifdef NEED_CPU_H
+#ifdef COMPILING_PER_TARGET
# ifdef TARGET_LONG_BITS
# if TARGET_LONG_BITS == 32
# define dh_alias_tl i32
+# define dh_typecode_tl dh_typecode_i32
# else
# define dh_alias_tl i64
+# define dh_typecode_tl dh_typecode_i64
# endif
# endif
-# define dh_alias_env ptr
# define dh_ctype_tl target_ulong
-# define dh_ctype_env CPUArchState *
-#endif
+#endif /* COMPILING_PER_TARGET */
/* We can't use glue() here because it falls foul of C preprocessor
recursive expansion rules. */
@@ -68,6 +62,7 @@
#define dh_retvar_decl0_noreturn void
#define dh_retvar_decl0_i32 TCGv_i32 retval
#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retval_decl0_i128 TCGv_i128 retval
#define dh_retvar_decl0_ptr TCGv_ptr retval
#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
@@ -75,6 +70,7 @@
#define dh_retvar_decl_noreturn
#define dh_retvar_decl_i32 TCGv_i32 retval,
#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_i128 TCGv_i128 retval,
#define dh_retvar_decl_ptr TCGv_ptr retval,
#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
@@ -82,6 +78,7 @@
#define dh_retvar_noreturn NULL
#define dh_retvar_i32 tcgv_i32_temp(retval)
#define dh_retvar_i64 tcgv_i64_temp(retval)
+#define dh_retvar_i128 tcgv_i128_temp(retval)
#define dh_retvar_ptr tcgv_ptr_temp(retval)
#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
@@ -92,18 +89,19 @@
#define dh_typecode_i64 4
#define dh_typecode_s64 5
#define dh_typecode_ptr 6
-#define dh_typecode(t) glue(dh_typecode_, dh_alias(t))
+#define dh_typecode_i128 7
+#define dh_typecode_int dh_typecode_s32
+#define dh_typecode_f16 dh_typecode_i32
+#define dh_typecode_f32 dh_typecode_i32
+#define dh_typecode_f64 dh_typecode_i64
+#define dh_typecode_cptr dh_typecode_ptr
+#define dh_typecode_env dh_typecode_ptr
+#define dh_typecode(t) dh_typecode_##t
#define dh_callflag_i32 0
-#define dh_callflag_s32 0
-#define dh_callflag_int 0
#define dh_callflag_i64 0
-#define dh_callflag_s64 0
-#define dh_callflag_f16 0
-#define dh_callflag_f32 0
-#define dh_callflag_f64 0
+#define dh_callflag_i128 0
#define dh_callflag_ptr 0
-#define dh_callflag_cptr dh_callflag_ptr
#define dh_callflag_void 0
#define dh_callflag_noreturn TCG_CALL_NO_RETURN
#define dh_callflag(t) glue(dh_callflag_, dh_alias(t))
@@ -132,6 +130,6 @@
#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \
DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7)
-/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+/* MAX_CALL_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
#endif /* EXEC_HELPER_HEAD_H */
diff --git a/include/exec/helper-info.c.inc b/include/exec/helper-info.c.inc
new file mode 100644
index 0000000000..c551736d49
--- /dev/null
+++ b/include/exec/helper-info.c.inc
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands info structures for tcg helpers.
+ * Define HELPER_H for the header file to be expanded.
+ */
+
+#include "tcg/tcg.h"
+#include "tcg/helper-info.h"
+#include "exec/helper-head.h.inc"
+
+/*
+ * Need one more level of indirection before stringification
+ * to get all the macros expanded first.
+ */
+#define str(s) #s
+
+#define DEF_HELPER_FLAGS_0(NAME, FLAGS, RET) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) \
+ };
+
+#define DEF_HELPER_FLAGS_1(NAME, FLAGS, RET, T1) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ };
+
+#define DEF_HELPER_FLAGS_2(NAME, FLAGS, RET, T1, T2) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) \
+ };
+
+#define DEF_HELPER_FLAGS_3(NAME, FLAGS, RET, T1, T2, T3) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ };
+
+#define DEF_HELPER_FLAGS_4(NAME, FLAGS, RET, T1, T2, T3, T4) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) \
+ };
+
+#define DEF_HELPER_FLAGS_5(NAME, FLAGS, RET, T1, T2, T3, T4, T5) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ };
+
+#define DEF_HELPER_FLAGS_6(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ | dh_typemask(T6, 6) \
+ };
+
+#define DEF_HELPER_FLAGS_7(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6, T7) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ | dh_typemask(T6, 6) | dh_typemask(T7, 7) \
+ };
+
+#include HELPER_H
+
+#undef str
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h
new file mode 100644
index 0000000000..8b67170a22
--- /dev/null
+++ b/include/exec/helper-proto-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ */
+
+#ifndef HELPER_PROTO_COMMON_H
+#define HELPER_PROTO_COMMON_H
+
+#include "qemu/atomic128.h" /* for HAVE_CMPXCHG128 */
+
+#define HELPER_H "accel/tcg/tcg-runtime.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
+
+#define HELPER_H "accel/tcg/plugin-helpers.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
+
+#endif /* HELPER_PROTO_COMMON_H */
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
index ba100793a7..6935cb4f16 100644
--- a/include/exec/helper-proto.h
+++ b/include/exec/helper-proto.h
@@ -1,56 +1,16 @@
-/* Helper file for declaring TCG helper functions.
- This one expands prototypes for the helper functions. */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ */
#ifndef HELPER_PROTO_H
#define HELPER_PROTO_H
-#include "exec/helper-head.h"
+#include "exec/helper-proto-common.h"
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-dh_ctype(ret) HELPER(name) (void);
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1));
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4));
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5));
-
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
-
-#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
- dh_ctype(t7));
-
-#define IN_HELPER_PROTO
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "accel/tcg/tcg-runtime.h"
-#include "accel/tcg/plugin-helpers.h"
-
-#undef IN_HELPER_PROTO
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef DEF_HELPER_FLAGS_7
+#define HELPER_H "helper.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
#endif /* HELPER_PROTO_H */
diff --git a/include/exec/helper-proto.h.inc b/include/exec/helper-proto.h.inc
new file mode 100644
index 0000000000..f8e57e43ce
--- /dev/null
+++ b/include/exec/helper-proto.h.inc
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ * Define HELPER_H for the header file to be expanded.
+ */
+
+#include "exec/helper-head.h.inc"
+
+/*
+ * Work around an issue with --enable-lto, in which GCC's ipa-split pass
+ * decides to split out the noreturn code paths that raise an exception,
+ * taking the __builtin_return_address() along into the new function,
+ * where it no longer computes a value that returns to TCG generated code.
+ * Despite the name, the noinline attribute affects splitter, so this
+ * prevents the optimization in question. Given that helpers should not
+ * otherwise be called directly, this should not have any other visible effect.
+ *
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1454
+ */
+#define DEF_HELPER_ATTR __attribute__((noinline))
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), \
+ dh_ctype(t3)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), \
+ dh_ctype(t6)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
+ dh_ctype(t7)) DEF_HELPER_ATTR;
+
+#define IN_HELPER_PROTO
+
+#include HELPER_H
+
+#undef IN_HELPER_PROTO
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
+#undef DEF_HELPER_ATTR
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
deleted file mode 100644
index 16cd318b83..0000000000
--- a/include/exec/helper-tcg.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Helper file for declaring TCG helper functions.
- This one defines data structures private to tcg.c. */
-
-#ifndef HELPER_TCG_H
-#define HELPER_TCG_H
-
-#include "exec/helper-head.h"
-
-/* Need one more level of indirection before stringification
- to get all the macros expanded first. */
-#define str(s) #s
-
-#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) },
-
-#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) },
-
-#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
- | dh_typemask(t2, 2) },
-
-#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) },
-
-#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) },
-
-#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
- | dh_typemask(t5, 5) },
-
-#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
- | dh_typemask(t5, 5) | dh_typemask(t6, 6) },
-
-#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \
- { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
- | dh_typemask(t5, 5) | dh_typemask(t6, 6) | dh_typemask(t7, 7) },
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "accel/tcg/tcg-runtime.h"
-#include "accel/tcg/plugin-helpers.h"
-
-#undef str
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef DEF_HELPER_FLAGS_7
-
-#endif /* HELPER_TCG_H */
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
index 8f16d179a8..50fbb2d96c 100644
--- a/include/exec/hwaddr.h
+++ b/include/exec/hwaddr.h
@@ -10,7 +10,7 @@
typedef uint64_t hwaddr;
#define HWADDR_MAX UINT64_MAX
-#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_FMT_plx "%016" PRIx64
#define HWADDR_PRId PRId64
#define HWADDR_PRIi PRIi64
#define HWADDR_PRIo PRIo64
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
index e34f668998..4397f12f93 100644
--- a/include/exec/ioport.h
+++ b/include/exec/ioport.h
@@ -35,7 +35,6 @@ typedef struct MemoryRegionPortio {
unsigned size;
uint32_t (*read)(void *opaque, uint32_t address);
void (*write)(void *opaque, uint32_t address, uint32_t data);
- uint32_t base; /* private field */
} MemoryRegionPortio;
#define PORTIO_END_OF_LIST() { }
@@ -55,6 +54,7 @@ typedef struct PortioList {
const struct MemoryRegionPortio *ports;
Object *owner;
struct MemoryRegion *address_space;
+ uint32_t addr;
unsigned nr;
struct MemoryRegion **regions;
void *opaque;
@@ -71,5 +71,7 @@ void portio_list_add(PortioList *piolist,
struct MemoryRegion *address_space,
uint32_t addr);
void portio_list_del(PortioList *piolist);
+void portio_list_set_enabled(PortioList *piolist, bool enabled);
+void portio_list_set_address(PortioList *piolist, uint32_t addr);
#endif /* IOPORT_H */
diff --git a/include/exec/log.h b/include/exec/log.h
index 3c7fa65ead..4a7375a45f 100644
--- a/include/exec/log.h
+++ b/include/exec/log.h
@@ -15,15 +15,10 @@
*/
static inline void log_cpu_state(CPUState *cpu, int flags)
{
- QemuLogFile *logfile;
-
- if (qemu_log_enabled()) {
- rcu_read_lock();
- logfile = qatomic_rcu_read(&qemu_logfile);
- if (logfile) {
- cpu_dump_state(cpu, logfile->fd, flags);
- }
- rcu_read_unlock();
+ FILE *f = qemu_log_trylock();
+ if (f) {
+ cpu_dump_state(cpu, f, flags);
+ qemu_log_unlock(f);
}
}
@@ -42,43 +37,4 @@ static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
}
}
-#ifdef NEED_CPU_H
-/* disas() and target_disas() to qemu_logfile: */
-static inline void log_target_disas(CPUState *cpu, target_ulong start,
- target_ulong len)
-{
- QemuLogFile *logfile;
- rcu_read_lock();
- logfile = qatomic_rcu_read(&qemu_logfile);
- if (logfile) {
- target_disas(logfile->fd, cpu, start, len);
- }
- rcu_read_unlock();
-}
-
-static inline void log_disas(const void *code, unsigned long size)
-{
- QemuLogFile *logfile;
- rcu_read_lock();
- logfile = qatomic_rcu_read(&qemu_logfile);
- if (logfile) {
- disas(logfile->fd, code, size);
- }
- rcu_read_unlock();
-}
-
-#if defined(CONFIG_USER_ONLY)
-/* page_dump() output to the log file: */
-static inline void log_page_dump(const char *operation)
-{
- FILE *logfile = qemu_log_lock();
- if (logfile) {
- qemu_log("page layout changed following %s\n", operation);
- page_dump(logfile);
- }
- qemu_log_unlock(logfile);
-}
-#endif
-#endif
-
#endif
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index 95f2d20d55..14cdd8d582 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -29,26 +29,29 @@ typedef struct MemTxAttrs {
* "didn't specify" if necessary.
*/
unsigned int unspecified:1;
- /* ARM/AMBA: TrustZone Secure access
+ /*
+ * ARM/AMBA: TrustZone Secure access
* x86: System Management Mode access
*/
unsigned int secure:1;
+ /*
+ * ARM: ArmSecuritySpace. This partially overlaps secure, but it is
+ * easier to have both fields to assist code that does not understand
+ * ARMv9 RME, or no specific knowledge of ARM at all (e.g. pflash).
+ */
+ unsigned int space:2;
/* Memory access is usermode (unprivileged) */
unsigned int user:1;
- /* Requester ID (for MSI for example) */
- unsigned int requester_id:16;
- /* Invert endianness for this page */
- unsigned int byte_swap:1;
/*
- * The following are target-specific page-table bits. These are not
- * related to actual memory transactions at all. However, this structure
- * is part of the tlb_fill interface, cached in the cputlb structure,
- * and has unused bits. These fields will be read by target-specific
- * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN.
+ * Bus interconnect and peripherals can access anything (memories,
+ * devices) by default. By setting the 'memory' bit, bus transaction
+ * are restricted to "normal" memories (per the AMBA documentation)
+ * versus devices. Access to devices will be logged and rejected
+ * (see MEMTX_ACCESS_ERROR).
*/
- unsigned int target_tlb_bit0 : 1;
- unsigned int target_tlb_bit1 : 1;
- unsigned int target_tlb_bit2 : 1;
+ unsigned int memory:1;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
} MemTxAttrs;
/* Bus masters which don't specify any attributes will get this,
@@ -66,6 +69,7 @@ typedef struct MemTxAttrs {
#define MEMTX_OK 0
#define MEMTX_ERROR (1U << 0) /* device returned an error */
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
+#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */
typedef uint32_t MemTxResult;
#endif
diff --git a/include/exec/memop.h b/include/exec/memop.h
index 529d07b02d..06417ff361 100644
--- a/include/exec/memop.h
+++ b/include/exec/memop.h
@@ -19,20 +19,24 @@ typedef enum MemOp {
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
- MO_SIZE = 3, /* Mask for the above. */
+ MO_128 = 4,
+ MO_256 = 5,
+ MO_512 = 6,
+ MO_1024 = 7,
+ MO_SIZE = 0x07, /* Mask for the above. */
- MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
+ MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
- MO_BSWAP = 8, /* Host reverse endian. */
-#ifdef HOST_WORDS_BIGENDIAN
+ MO_BSWAP = 0x10, /* Host reverse endian. */
+#if HOST_BIG_ENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
-#ifdef NEED_CPU_H
-#ifdef TARGET_WORDS_BIGENDIAN
+#ifdef COMPILING_PER_TARGET
+#if TARGET_BIG_ENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
@@ -43,8 +47,6 @@ typedef enum MemOp {
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
- * The default depends on whether the target CPU defines
- * TARGET_ALIGNED_ONLY.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
@@ -59,51 +61,88 @@ typedef enum MemOp {
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
- MO_ASHIFT = 4,
- MO_AMASK = 7 << MO_ASHIFT,
-#ifdef NEED_CPU_H
-#ifdef TARGET_ALIGNED_ONLY
- MO_ALIGN = 0,
- MO_UNALN = MO_AMASK,
-#else
- MO_ALIGN = MO_AMASK,
- MO_UNALN = 0,
-#endif
-#endif
+ MO_ASHIFT = 5,
+ MO_AMASK = 0x7 << MO_ASHIFT,
+ MO_UNALN = 0,
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
+ MO_ALIGN = MO_AMASK,
+
+ /*
+ * MO_ATOM_* describes the atomicity requirements of the operation:
+ * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
+ * is aligned; if unaligned there is no atomicity.
+ * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to
+ * be a pair of half-sized operations which are packed together
+ * for convenience, with single-copy atomicity on each half if
+ * the half is aligned.
+ * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP.
+ * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it
+ * is unaligned, so long as it does not cross a 16-byte boundary;
+ * if it crosses a 16-byte boundary there is no atomicity.
+ * This is the atomicity e.g. of Arm FEAT_LSE2 LDR.
+ * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic,
+ * if it happens to be within a 16-byte boundary, otherwise it
+ * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations.
+ * Depending on alignment, one or both will be single-copy atomic.
+ * This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
+ * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
+ * by the alignment. E.g. if the address is 0 mod 4, then each
+ * 4-byte subobject is single-copy atomic.
+ * This is the atomicity e.g. of IBM Power.
+ * MO_ATOM_NONE: the operation has no atomicity requirements.
+ *
+ * Note the default (i.e. 0) value is single-copy atomic to the
+ * size of the operation, if aligned. This retains the behaviour
+ * from before this field was introduced.
+ */
+ MO_ATOM_SHIFT = 8,
+ MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT,
+ MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT,
+ MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT,
+ MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT,
+ MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT,
+ MO_ATOM_NONE = 5 << MO_ATOM_SHIFT,
+ MO_ATOM_MASK = 7 << MO_ATOM_SHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
+ MO_UQ = MO_64,
+ MO_UO = MO_128,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
- MO_Q = MO_64,
+ MO_SQ = MO_SIGN | MO_64,
+ MO_SO = MO_SIGN | MO_128,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
+ MO_LEUQ = MO_LE | MO_UQ,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
- MO_LEQ = MO_LE | MO_Q,
+ MO_LESQ = MO_LE | MO_SQ,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
+ MO_BEUQ = MO_BE | MO_UQ,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
- MO_BEQ = MO_BE | MO_Q,
+ MO_BESQ = MO_BE | MO_SQ,
-#ifdef NEED_CPU_H
+#ifdef COMPILING_PER_TARGET
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
+ MO_TEUQ = MO_TE | MO_UQ,
+ MO_TEUO = MO_TE | MO_UO,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
- MO_TEQ = MO_TE | MO_Q,
+ MO_TESQ = MO_TE | MO_SQ,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h
new file mode 100644
index 0000000000..eb7f1591a3
--- /dev/null
+++ b/include/exec/memopidx.h
@@ -0,0 +1,55 @@
+/*
+ * Combine the MemOp and mmu_idx parameters into a single value.
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_MEMOPIDX_H
+#define EXEC_MEMOPIDX_H
+
+#include "exec/memop.h"
+
+typedef uint32_t MemOpIdx;
+
+/**
+ * make_memop_idx
+ * @op: memory operation
+ * @idx: mmu index
+ *
+ * Encode these values into a single parameter.
+ */
+static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
+{
+#ifdef CONFIG_DEBUG_TCG
+ assert(idx <= 15);
+#endif
+ return (op << 4) | idx;
+}
+
+/**
+ * get_memop
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory operation from the combined value.
+ */
+static inline MemOp get_memop(MemOpIdx oi)
+{
+ return oi >> 4;
+}
+
+/**
+ * get_mmuidx
+ * @oi: combined op/idx parameter
+ *
+ * Extract the mmu index from the combined value.
+ */
+static inline unsigned get_mmuidx(MemOpIdx oi)
+{
+ return oi & 15;
+}
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
index 9fcc2af25c..100c1237ac 100644
--- a/include/exec/memory-internal.h
+++ b/include/exec/memory-internal.h
@@ -38,10 +38,6 @@ void flatview_unref(FlatView *view);
extern const MemoryRegionOps unassigned_mem_ops;
-bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs);
-
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
void address_space_dispatch_compact(AddressSpaceDispatch *d);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index c3d417d317..dadb5cd65a 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -24,6 +24,7 @@
#include "qemu/bswap.h"
#include "qemu/queue.h"
#include "qemu/int128.h"
+#include "qemu/range.h"
#include "qemu/notify.h"
#include "qom/object.h"
#include "qemu/rcu.h"
@@ -42,7 +43,7 @@ typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
-#define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager"
+#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
typedef struct RamDiscardManagerClass RamDiscardManagerClass;
typedef struct RamDiscardManager RamDiscardManager;
DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
@@ -61,13 +62,25 @@ static inline void fuzz_dma_read_cb(size_t addr,
}
#endif
-extern bool global_dirty_log;
+/* Possible bits for global_dirty_log_{start|stop} */
+
+/* Dirty tracking enabled because migration is running */
+#define GLOBAL_DIRTY_MIGRATION (1U << 0)
+
+/* Dirty tracking enabled because measuring dirty rate */
+#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
+
+/* Dirty tracking enabled because dirty limit */
+#define GLOBAL_DIRTY_LIMIT (1U << 2)
+
+#define GLOBAL_DIRTY_MASK (0x7)
+
+extern unsigned int global_dirty_tracking;
typedef struct MemoryRegionOps MemoryRegionOps;
struct ReservedRegion {
- hwaddr low;
- hwaddr high;
+ Range range;
unsigned type;
};
@@ -82,6 +95,7 @@ struct ReservedRegion {
* relative to the region's address space
* @readonly: writes to this section are ignored
* @nonvolatile: this section is non-volatile
+ * @unmergeable: this section should not get merged with adjacent sections
*/
struct MemoryRegionSection {
Int128 size;
@@ -91,6 +105,7 @@ struct MemoryRegionSection {
hwaddr offset_within_address_space;
bool readonly;
bool nonvolatile;
+ bool unmergeable;
};
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
@@ -116,6 +131,32 @@ struct IOMMUTLBEntry {
/*
* Bitmap for different IOMMUNotifier capabilities. Each notifier can
* register with one or multiple IOMMU Notifier capability bit(s).
+ *
+ * Normally there're two use cases for the notifiers:
+ *
+ * (1) When the device needs accurate synchronizations of the vIOMMU page
+ * tables, it needs to register with both MAP|UNMAP notifies (which
+ * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
+ *
+ * Regarding to accurate synchronization, it's when the notified
+ * device maintains a shadow page table and must be notified on each
+ * guest MAP (page table entry creation) and UNMAP (invalidation)
+ * events (e.g. VFIO). Both notifications must be accurate so that
+ * the shadow page table is fully in sync with the guest view.
+ *
+ * (2) When the device doesn't need accurate synchronizations of the
+ * vIOMMU page tables, it needs to register only with UNMAP or
+ * DEVIOTLB_UNMAP notifies.
+ *
+ * It's when the device maintains a cache of IOMMU translations
+ * (IOTLB) and is able to fill that cache by requesting translations
+ * from the vIOMMU through a protocol similar to ATS (Address
+ * Translation Service).
+ *
+ * Note that in this mode the vIOMMU will not maintain a shadowed
+ * page table for the address space, and the UNMAP messages can cover
+ * more than the pages that used to get mapped. The IOMMU notifiee
+ * should be able to take care of over-sized invalidations.
*/
typedef enum {
IOMMU_NOTIFIER_NONE = 0,
@@ -190,6 +231,21 @@ typedef struct IOMMUTLBEvent {
*/
#define RAM_NORESERVE (1 << 7)
+/* RAM that isn't accessible through normal means. */
+#define RAM_PROTECTED (1 << 8)
+
+/* RAM is an mmap-ed named file */
+#define RAM_NAMED_FILE (1 << 9)
+
+/* RAM is mmap-ed read-only */
+#define RAM_READONLY (1 << 10)
+
+/* RAM FD is opened read-only */
+#define RAM_READONLY_FD (1 << 11)
+
+/* RAM can be private that has kvm guest memfd backend */
+#define RAM_GUEST_MEMFD (1 << 12)
+
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
IOMMUNotifierFlag flags,
hwaddr start, hwaddr end,
@@ -474,6 +530,26 @@ struct IOMMUMemoryRegionClass {
int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
uint64_t page_size_mask,
Error **errp);
+ /**
+ * @iommu_set_iova_ranges:
+ *
+ * Propagate information about the usable IOVA ranges for a given IOMMU
+ * memory region. Used for example to propagate host physical device
+ * reserved memory region constraints to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default IOVA
+ * aperture is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @iova_ranges: list of ordered IOVA ranges (at least one range)
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_iova_ranges)(IOMMUMemoryRegion *iommu,
+ GList *iova_ranges,
+ Error **errp);
};
typedef struct RamDiscardListener RamDiscardListener;
@@ -537,6 +613,7 @@ static inline void ram_discard_listener_init(RamDiscardListener *rdl,
}
typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
+typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
/*
* RamDiscardManagerClass:
@@ -544,11 +621,12 @@ typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
* A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
* regions are currently populated to be used/accessed by the VM, notifying
* after parts were discarded (freeing up memory) and before parts will be
- * populated (consuming memory), to be used/acessed by the VM.
+ * populated (consuming memory), to be used/accessed by the VM.
*
* A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
- * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is
- * mapped.
+ * #MemoryRegion isn't mapped into an address space yet (either directly
+ * or via an alias); it cannot change while the #MemoryRegion is
+ * mapped into an address space.
*
* The #RamDiscardManager is intended to be used by technologies that are
* incompatible with discarding of RAM (e.g., VFIO, which may pin all
@@ -568,7 +646,7 @@ typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
* Listeners are called in multiples of the minimum granularity (unless it
* would exceed the registered range) and changes are aligned to the minimum
* granularity within the #MemoryRegion. Listeners have to prepare for memory
- * becomming discarded in a different granularity than it was populated and the
+ * becoming discarded in a different granularity than it was populated and the
* other way around.
*/
struct RamDiscardManagerClass {
@@ -626,6 +704,21 @@ struct RamDiscardManagerClass {
ReplayRamPopulate replay_fn, void *opaque);
/**
+ * @replay_discarded:
+ *
+ * Call the #ReplayRamDiscard callback for all discarded parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscard callback
+ * @opaque: pointer to forward to the callback
+ */
+ void (*replay_discarded)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn, void *opaque);
+
+ /**
* @register_listener:
*
* Register a #RamDiscardListener for the given #MemoryRegionSection and
@@ -669,6 +762,11 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
ReplayRamPopulate replay_fn,
void *opaque);
+void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn,
+ void *opaque);
+
void ram_discard_manager_register_listener(RamDiscardManager *rdm,
RamDiscardListener *rdl,
MemoryRegionSection *section);
@@ -676,6 +774,10 @@ void ram_discard_manager_register_listener(RamDiscardManager *rdm,
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
RamDiscardListener *rdl);
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
+ ram_addr_t *ram_addr, bool *read_only,
+ bool *mr_has_discard_manager);
+
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
@@ -696,14 +798,18 @@ struct MemoryRegion {
bool nonvolatile;
bool rom_device;
bool flush_coalesced_mmio;
+ bool unmergeable;
uint8_t dirty_log_mask;
bool is_iommu;
RAMBlock *ram_block;
Object *owner;
+ /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
+ DeviceState *dev;
const MemoryRegionOps *ops;
void *opaque;
MemoryRegion *container;
+ int mapped_via_alias; /* Mapped via an alias, container might be NULL */
Int128 size;
hwaddr addr;
void (*destructor)(MemoryRegion *mr);
@@ -723,6 +829,9 @@ struct MemoryRegion {
unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds;
RamDiscardManager *rdm; /* Only for RAM */
+
+ /* For devices designed to perform re-entrant IO into their own IO MRs */
+ bool disable_reentrancy_guard;
};
struct IOMMUMemoryRegion {
@@ -735,6 +844,10 @@ struct IOMMUMemoryRegion {
#define IOMMU_NOTIFIER_FOREACH(n, mr) \
QLIST_FOREACH((n), &(mr)->iommu_notify, node)
+#define MEMORY_LISTENER_PRIORITY_MIN 0
+#define MEMORY_LISTENER_PRIORITY_ACCEL 10
+#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
+
/**
* struct MemoryListener: callbacks structure for updates to the physical memory map
*
@@ -861,8 +974,11 @@ struct MemoryListener {
* its @log_sync must be NULL. Vice versa.
*
* @listener: The #MemoryListener.
+ * @last_stage: The last stage to synchronize the log during migration.
+ * The caller should guarantee that the synchronization with true for
+ * @last_stage is triggered for once after all VCPUs have been stopped.
*/
- void (*log_sync_global)(MemoryListener *listener);
+ void (*log_sync_global)(MemoryListener *listener, bool last_stage);
/**
* @log_clear:
@@ -885,8 +1001,11 @@ struct MemoryListener {
* active at that time.
*
* @listener: The #MemoryListener.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
- void (*log_global_start)(MemoryListener *listener);
+ bool (*log_global_start)(MemoryListener *listener, Error **errp);
/**
* @log_global_stop:
@@ -979,6 +1098,14 @@ struct MemoryListener {
*/
unsigned priority;
+ /**
+ * @name:
+ *
+ * Name of the listener. It can be used in contexts where we'd like to
+ * identify one memory listener with the rest.
+ */
+ const char *name;
+
/* private: */
AddressSpace *address_space;
QTAILQ_ENTRY(MemoryListener) link;
@@ -998,6 +1125,7 @@ struct AddressSpace {
struct FlatView *current_map;
int ioeventfd_nb;
+ int ioeventfd_notifiers;
struct MemoryRegionIoeventfd *ioeventfds;
QTAILQ_HEAD(, MemoryListener) listeners;
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
@@ -1166,8 +1294,10 @@ void memory_region_init_io(MemoryRegion *mr,
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_nomigrate(MemoryRegion *mr,
+bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1183,13 +1313,16 @@ void memory_region_init_ram_nomigrate(MemoryRegion *mr,
* @name: Region name, becomes part of RAMBlock name used in migration stream
* must be unique within any device
* @size: size of the region.
- * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
+ * RAM_GUEST_MEMFD.
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
+bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1197,7 +1330,7 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
Error **errp);
/**
- * memory_region_init_resizeable_ram: Initialize memory region with resizeable
+ * memory_region_init_resizeable_ram: Initialize memory region with resizable
* RAM. Accesses into the region will
* modify memory directly. Only an initial
* portion of this RAM is actually used.
@@ -1216,8 +1349,10 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_resizeable_ram(MemoryRegion *mr,
+bool memory_region_init_resizeable_ram(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1240,22 +1375,25 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
* @align: alignment of the region base address; if 0, the default alignment
* (getpagesize()) will be used.
* @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
- * RAM_NORESERVE,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @path: the path in which to allocate the RAM.
- * @readonly: true to open @path for reading, false for read/write.
+ * @offset: offset within the file referenced by path
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_from_file(MemoryRegion *mr,
+bool memory_region_init_ram_from_file(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
uint64_t align,
uint32_t ram_flags,
const char *path,
- bool readonly,
+ ram_addr_t offset,
Error **errp);
/**
@@ -1267,15 +1405,18 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
* @name: the name of the region.
* @size: size of the region.
* @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
- * RAM_NORESERVE.
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @fd: the fd to mmap.
* @offset: offset within the file referenced by fd
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_from_fd(MemoryRegion *mr,
+bool memory_region_init_ram_from_fd(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1370,8 +1511,10 @@ void memory_region_init_alias(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_nomigrate(MemoryRegion *mr,
+bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1393,8 +1536,10 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
+bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
Object *owner,
const MemoryRegionOps *ops,
void *opaque,
@@ -1452,8 +1597,10 @@ void memory_region_init_iommu(void *_iommu_mr,
* give the RAM block a unique name for migration purposes.
* We should lift this restriction and allow arbitrary Objects.
* If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram(MemoryRegion *mr,
+bool memory_region_init_ram(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1479,8 +1626,10 @@ void memory_region_init_ram(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom(MemoryRegion *mr,
+bool memory_region_init_rom(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
@@ -1510,8 +1659,10 @@ void memory_region_init_rom(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_device(MemoryRegion *mr,
+bool memory_region_init_rom_device(MemoryRegion *mr,
Object *owner,
const MemoryRegionOps *ops,
void *opaque,
@@ -1569,6 +1720,26 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
}
/**
+ * memory_region_is_protected: check whether a memory region is protected
+ *
+ * Returns %true if a memory region is protected RAM and cannot be accessed
+ * via standard mechanisms, e.g. DMA.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_protected(MemoryRegion *mr);
+
+/**
+ * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
+ * associated
+ *
+ * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_has_guest_memfd(MemoryRegion *mr);
+
+/**
* memory_region_get_iommu: check whether a memory region is an iommu
*
* Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
@@ -1646,6 +1817,16 @@ void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
IOMMUTLBEvent *event);
/**
+ * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
+ * translation that covers the
+ * range of a notifier
+ *
+ * @notifier: the notifier to be notified
+ */
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
+
+
+/**
* memory_region_register_iommu_notifier: register a notifier for changes to
* IOMMU translation entries.
*
@@ -1733,6 +1914,18 @@ int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
Error **errp);
/**
+ * memory_region_iommu_set_iova_ranges - Set the usable IOVA ranges
+ * for a given IOMMU MR region
+ *
+ * @iommu: IOMMU memory region
+ * @iova_ranges: list of ordered IOVA ranges (at least one range)
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu,
+ GList *iova_ranges,
+ Error **errp);
+
+/**
* memory_region_name: get a memory region's name
*
* Returns the string that was used to initialize the memory region.
@@ -1806,7 +1999,7 @@ int memory_region_get_fd(MemoryRegion *mr);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1823,7 +2016,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1914,7 +2107,7 @@ void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
* querying the same page multiple times, which is especially useful for
* display updates where the scanlines often are not page aligned.
*
- * The dirty bitmap region which gets copyed into the snapshot (and
+ * The dirty bitmap region which gets copied into the snapshot (and
* cleared afterwards) can be larger than requested. The boundaries
* are rounded up/down so complete bitmap longs (covering 64 pages on
* 64bit hosts) can be copied over into the bitmap snapshot. Which
@@ -2230,6 +2423,25 @@ void memory_region_set_size(MemoryRegion *mr, uint64_t size);
void memory_region_set_alias_offset(MemoryRegion *mr,
hwaddr offset);
+/*
+ * memory_region_set_unmergeable: Set a memory region unmergeable
+ *
+ * Mark a memory region unmergeable, resulting in the memory region (or
+ * everything contained in a memory region container) not getting merged when
+ * simplifying the address space and notifying memory listeners. Consequently,
+ * memory listeners will never get notified about ranges that are larger than
+ * the original memory regions.
+ *
+ * This is primarily useful when multiple aliases to a RAM memory region are
+ * mapped into a memory region container, and updates (e.g., enable/disable or
+ * map/unmap) of individual memory region aliases are not supposed to affect
+ * other memory regions in the same container.
+ *
+ * @mr: the #MemoryRegion to be updated
+ * @unmergeable: whether to mark the #MemoryRegion unmergeable
+ */
+void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
+
/**
* memory_region_present: checks if an address relative to a @container
* translates into #MemoryRegion within @container
@@ -2244,7 +2456,8 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr);
/**
* memory_region_is_mapped: returns true if #MemoryRegion is mapped
- * into any address space.
+ * into another memory region, which does not necessarily imply that it is
+ * mapped into an address space.
*
* @mr: a #MemoryRegion which should be checked if it's mapped
*/
@@ -2320,8 +2533,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
*
* Synchronizes the dirty page log for all address spaces.
+ *
+ * @last_stage: whether this is the last stage of live migration
*/
-void memory_global_dirty_log_sync(void);
+void memory_global_dirty_log_sync(bool last_stage);
/**
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
@@ -2367,16 +2582,27 @@ void memory_listener_unregister(MemoryListener *listener);
/**
* memory_global_dirty_log_start: begin dirty logging for all regions
+ *
+ * @flags: purpose of starting dirty log, migration or dirty rate
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_global_dirty_log_start(void);
+bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
/**
* memory_global_dirty_log_stop: end dirty logging for all regions
+ *
+ * @flags: purpose of stopping dirty log, migration or dirty rate
*/
-void memory_global_dirty_log_stop(void);
+void memory_global_dirty_log_stop(unsigned int flags);
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+
/**
* memory_region_dispatch_read: perform a read directly to the specified
* MemoryRegion.
@@ -2540,9 +2766,6 @@ struct MemoryRegionCache {
bool is_write;
};
-#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
-
-
/* address_space_ld*_cached: load from a cached #MemoryRegion
* address_space_st*_cached: store into a cached #MemoryRegion
*
@@ -2632,6 +2855,21 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
bool is_write);
/**
+ * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache to operate on.
+ *
+ * Initializes #MemoryRegionCache structure without memory region attached.
+ * Cache initialized this way can only be safely destroyed, but not used.
+ */
+static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
+{
+ cache->mrs.mr = NULL;
+ /* There is no real need to initialize fv, but it makes Coverity happy. */
+ cache->fv = NULL;
+}
+
+/**
* address_space_cache_invalidate: complete a write to a #MemoryRegionCache
*
* @cache: The #MemoryRegionCache to operate on.
@@ -2752,6 +2990,9 @@ MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
hwaddr addr, const void *buf,
hwaddr len);
+int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
+bool prepare_mmio_access(MemoryRegion *mr);
+
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
if (is_write) {
@@ -2850,14 +3091,30 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
}
}
-#ifdef NEED_CPU_H
+/**
+ * address_space_set: Fill address space with a constant byte.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
+ uint8_t c, hwaddr len, MemTxAttrs attrs);
+
+#ifdef COMPILING_PER_TARGET
/* enum device_endian to MemOp. */
static inline MemOp devend_memop(enum device_endian end)
{
QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
/* Swap if non-host endianness or native (target) endianness */
return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
#else
@@ -2868,7 +3125,7 @@ static inline MemOp devend_memop(enum device_endian end)
return (end == non_host_endianness) ? MO_BSWAP : 0;
#endif
}
-#endif
+#endif /* COMPILING_PER_TARGET */
/*
* Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
@@ -2909,7 +3166,7 @@ int ram_block_discard_require(bool state);
/*
* See ram_block_discard_require(): only inhibit technologies that disable
- * uncoordinated discarding of pages in RAM blocks, allowing co-existance with
+ * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
* technologies that only inhibit uncoordinated discards (via the
* RamDiscardManager).
*/
diff --git a/include/exec/memory_ldst.h.inc b/include/exec/memory_ldst.h.inc
index 7c3a641f7e..92ad74e956 100644
--- a/include/exec/memory_ldst.h.inc
+++ b/include/exec/memory_ldst.h.inc
@@ -20,48 +20,48 @@
*/
#ifdef TARGET_ENDIANNESS
-extern uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw, SUFFIX)(ARG1_DECL,
+void glue(address_space_stw, SUFFIX)(ARG1_DECL,
hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#else
-extern uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
+uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stb, SUFFIX)(ARG1_DECL,
+void glue(address_space_stb, SUFFIX)(ARG1_DECL,
hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#endif
diff --git a/include/exec/mmu-access-type.h b/include/exec/mmu-access-type.h
new file mode 100644
index 0000000000..28bbb05b94
--- /dev/null
+++ b/include/exec/mmu-access-type.h
@@ -0,0 +1,18 @@
+/*
+ * QEMU MMU Access type definitions
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef EXEC_MMU_ACCESS_TYPE_H
+#define EXEC_MMU_ACCESS_TYPE_H
+
+typedef enum MMUAccessType {
+ MMU_DATA_LOAD = 0,
+ MMU_DATA_STORE = 1,
+ MMU_INST_FETCH = 2
+#define MMU_ACCESS_COUNT 3
+} MMUAccessType;
+
+#endif
diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h
index c22a7a742e..54ddde308a 100644
--- a/include/exec/page-vary.h
+++ b/include/exec/page-vary.h
@@ -27,8 +27,26 @@ typedef struct {
} TargetPageBits;
#ifdef IN_PAGE_VARY
-extern bool set_preferred_target_page_bits_common(int bits);
-extern void finalize_target_page_bits_common(int min);
+bool set_preferred_target_page_bits_common(int bits);
+void finalize_target_page_bits_common(int min);
#endif
+/**
+ * set_preferred_target_page_bits:
+ * @bits: number of bits needed to represent an address within the page
+ *
+ * Set the preferred target page size (the actual target page
+ * size may be smaller than any given CPU's preference).
+ * Returns true on success, false on failure (which can only happen
+ * if this is called after the system has already finalized its
+ * choice of page size and the requested page size is smaller than that).
+ */
+bool set_preferred_target_page_bits(int bits);
+
+/**
+ * finalize_target_page_bits:
+ * Commit the final value set by set_preferred_target_page_bits.
+ */
+void finalize_target_page_bits(void);
+
#endif /* EXEC_PAGE_VARY_H */
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
index b1b72b5d90..c4552b5061 100644
--- a/include/exec/plugin-gen.h
+++ b/include/exec/plugin-gen.h
@@ -12,36 +12,25 @@
#ifndef QEMU_PLUGIN_GEN_H
#define QEMU_PLUGIN_GEN_H
-#include "qemu/plugin.h"
#include "tcg/tcg.h"
struct DisasContextBase;
#ifdef CONFIG_PLUGIN
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress);
-void plugin_gen_tb_end(CPUState *cpu);
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
+ bool supress);
+void plugin_gen_tb_end(CPUState *cpu, size_t num_insns);
void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
void plugin_gen_insn_end(void);
void plugin_gen_disable_mem_helpers(void);
-void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info);
-
-static inline void plugin_insn_append(const void *from, size_t size)
-{
- struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
-
- if (insn == NULL) {
- return;
- }
-
- insn->data = g_byte_array_append(insn->data, from, size);
-}
+void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info);
#else /* !CONFIG_PLUGIN */
-static inline
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress)
+static inline bool
+plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
{
return false;
}
@@ -53,16 +42,13 @@ void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db)
static inline void plugin_gen_insn_end(void)
{ }
-static inline void plugin_gen_tb_end(CPUState *cpu)
+static inline void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
{ }
static inline void plugin_gen_disable_mem_helpers(void)
{ }
-static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
-{ }
-
-static inline void plugin_insn_append(const void *from, size_t size)
+static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
{ }
#endif /* CONFIG_PLUGIN */
diff --git a/include/exec/poison.h b/include/exec/poison.h
index 7ad4ad18e8..792a83f493 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -14,6 +14,7 @@
#pragma GCC poison TARGET_CRIS
#pragma GCC poison TARGET_HEXAGON
#pragma GCC poison TARGET_HPPA
+#pragma GCC poison TARGET_LOONGARCH64
#pragma GCC poison TARGET_M68K
#pragma GCC poison TARGET_MICROBLAZE
#pragma GCC poison TARGET_MIPS
@@ -21,7 +22,6 @@
#pragma GCC poison TARGET_ABI_MIPSO32
#pragma GCC poison TARGET_MIPS64
#pragma GCC poison TARGET_ABI_MIPSN64
-#pragma GCC poison TARGET_NIOS2
#pragma GCC poison TARGET_OPENRISC
#pragma GCC poison TARGET_PPC
#pragma GCC poison TARGET_PPC64
@@ -34,11 +34,10 @@
#pragma GCC poison TARGET_TRICORE
#pragma GCC poison TARGET_XTENSA
-#pragma GCC poison TARGET_ALIGNED_ONLY
#pragma GCC poison TARGET_HAS_BFLT
#pragma GCC poison TARGET_NAME
#pragma GCC poison TARGET_SUPPORTS_MTTCG
-#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison TARGET_BIG_ENDIAN
#pragma GCC poison BSWAP_NEEDED
#pragma GCC poison TARGET_LONG_BITS
@@ -51,8 +50,6 @@
#pragma GCC poison TARGET_PAGE_BITS
#pragma GCC poison TARGET_PAGE_ALIGN
-#pragma GCC poison CPUArchState
-
#pragma GCC poison CPU_INTERRUPT_HARD
#pragma GCC poison CPU_INTERRUPT_EXITTB
#pragma GCC poison CPU_INTERRUPT_HALT
@@ -67,17 +64,14 @@
#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
#pragma GCC poison CONFIG_ALPHA_DIS
-#pragma GCC poison CONFIG_ARM_A64_DIS
-#pragma GCC poison CONFIG_ARM_DIS
#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
#pragma GCC poison CONFIG_HEXAGON_DIS
+#pragma GCC poison CONFIG_LOONGARCH_DIS
#pragma GCC poison CONFIG_M68K_DIS
#pragma GCC poison CONFIG_MICROBLAZE_DIS
#pragma GCC poison CONFIG_MIPS_DIS
-#pragma GCC poison CONFIG_NANOMIPS_DIS
-#pragma GCC poison CONFIG_NIOS2_DIS
#pragma GCC poison CONFIG_PPC_DIS
#pragma GCC poison CONFIG_RISCV_DIS
#pragma GCC poison CONFIG_S390_DIS
@@ -85,11 +79,9 @@
#pragma GCC poison CONFIG_SPARC_DIS
#pragma GCC poison CONFIG_XTENSA_DIS
-#pragma GCC poison CONFIG_HAX
#pragma GCC poison CONFIG_HVF
#pragma GCC poison CONFIG_LINUX_USER
#pragma GCC poison CONFIG_KVM
-#pragma GCC poison CONFIG_SOFTMMU
#pragma GCC poison CONFIG_WHPX
#pragma GCC poison CONFIG_XEN
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 551876bed0..891c44cf2d 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -25,6 +25,10 @@
#include "sysemu/tcg.h"
#include "exec/ramlist.h"
#include "exec/ramblock.h"
+#include "exec/exec-all.h"
+#include "qemu/rcu.h"
+
+extern uint64_t total_dirty_pages;
/**
* clear_bmap_size: calculate clear bitmap size
@@ -40,7 +44,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
}
/**
- * clear_bmap_set: set clear bitmap for the page range
+ * clear_bmap_set: set clear bitmap for the page range. Must be with
+ * bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @start: the start page number
@@ -53,12 +58,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
{
uint8_t shift = rb->clear_bmap_shift;
- bitmap_set_atomic(rb->clear_bmap, start >> shift,
- clear_bmap_size(npages, shift));
+ bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
}
/**
- * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
+ * Must be with bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @page: the page number to check
@@ -69,7 +74,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
{
uint8_t shift = rb->clear_bmap_shift;
- return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+ return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
}
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
@@ -105,9 +110,10 @@ long qemu_maxrampagesize(void);
* @size: the size in bytes of the ram block
* @mr: the memory region where the ram block is
* @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
- * RAM_NORESERVE.
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @mem_path or @fd: specify the backing file or device
- * @readonly: true to open @path for reading, false for read/write.
+ * @offset: Offset into target file
* @errp: pointer to Error*, to store an error if it happens
*
* Return:
@@ -116,10 +122,10 @@ long qemu_maxrampagesize(void);
*/
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
uint32_t ram_flags, const char *mem_path,
- bool readonly, Error **errp);
+ off_t offset, Error **errp);
RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
uint32_t ram_flags, int fd, off_t offset,
- bool readonly, Error **errp);
+ Error **errp);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
@@ -145,8 +151,6 @@ static inline void qemu_ram_block_writeback(RAMBlock *block)
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
-void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
-
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
@@ -332,16 +336,25 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
}
#if !defined(_WIN32)
-static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
- ram_addr_t start,
- ram_addr_t pages)
+
+/*
+ * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
+ * the number of dirty pages in @bitmap passed as argument. On the other hand,
+ * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
+ * weren't set in the global migration bitmap.
+ */
+static inline
+uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+ ram_addr_t start,
+ ram_addr_t pages)
{
unsigned long i, j;
- unsigned long page_number, c;
+ unsigned long page_number, c, nbits;
hwaddr addr;
ram_addr_t ram_addr;
+ uint64_t num_dirty = 0;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
+ unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
@@ -367,14 +380,21 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
+ nbits = ctpopl(temp);
qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
- if (global_dirty_log) {
+ if (global_dirty_tracking) {
qatomic_or(
&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
temp);
+ if (unlikely(
+ global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
}
+ num_dirty += nbits;
+
if (tcg_enabled()) {
qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
temp);
@@ -392,7 +412,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
- if (!global_dirty_log) {
+ if (!global_dirty_tracking) {
clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
}
@@ -403,6 +423,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
for (i = 0; i < len; i++) {
if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]);
+ nbits = ctpopl(c);
+ if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
+ num_dirty += nbits;
do {
j = ctzl(c);
c &= ~(1ul << j);
@@ -415,9 +440,19 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
}
}
}
+
+ return num_dirty;
}
#endif /* not _WIN32 */
+static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
+ ram_addr_t length)
+{
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, length);
+ }
+
+}
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
@@ -479,6 +514,9 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
idx++;
}
}
+ if (num_dirty) {
+ cpu_physical_memory_dirty_bits_cleared(start, length);
+ }
if (rb->clear_bmap) {
/*
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
index 664701b759..0babd105c0 100644
--- a/include/exec/ramblock.h
+++ b/include/exec/ramblock.h
@@ -21,6 +21,8 @@
#ifndef CONFIG_USER_ONLY
#include "cpu-common.h"
+#include "qemu/rcu.h"
+#include "exec/ramlist.h"
struct RAMBlock {
struct rcu_head rcu;
@@ -32,16 +34,31 @@ struct RAMBlock {
ram_addr_t max_length;
void (*resized)(const char*, uint64_t length, void *host);
uint32_t flags;
- /* Protected by iothread lock. */
+ /* Protected by the BQL. */
char idstr[256];
/* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
int fd;
+ uint64_t fd_offset;
+ int guest_memfd;
size_t page_size;
/* dirty bitmap used during migration */
unsigned long *bmap;
- /* bitmap of already received pages in postcopy */
+
+ /*
+ * Below fields are only used by mapped-ram migration
+ */
+ /* bitmap of pages present in the migration file */
+ unsigned long *file_bmap;
+ /*
+ * offset in the file pages belonging to this ramblock are saved,
+ * used only during migration to a file.
+ */
+ off_t bitmap_offset;
+ uint64_t pages_offset;
+
+ /* Bitmap of already received pages. Only used on destination side. */
unsigned long *receivedmap;
/*
@@ -51,6 +68,9 @@ struct RAMBlock {
* and split clearing of dirty bitmap on the remote node (e.g.,
* KVM). The bitmap will be set only when doing global sync.
*
+ * It is only used during src side of ram migration, and it is
+ * protected by the global ram_state.bitmap_mutex.
+ *
* NOTE: this bitmap is different comparing to the other bitmaps
* in that one bit can represent multiple guest pages (which is
* decided by the `clear_bmap_shift' variable below). On
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index ece6497ee2..2ad2a81acc 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -80,6 +80,6 @@ void ram_block_notify_add(void *host, size_t size, size_t max_size);
void ram_block_notify_remove(void *host, size_t size, size_t max_size);
void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
-void ram_block_dump(Monitor *mon);
+GString *ram_block_format(void);
#endif /* RAMLIST_H */
diff --git a/include/exec/replay-core.h b/include/exec/replay-core.h
new file mode 100644
index 0000000000..244c77acce
--- /dev/null
+++ b/include/exec/replay-core.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU replay core API
+ *
+ * Copyright (c) 2010-2015 Institute for System Programming
+ * of the Russian Academy of Sciences.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_REPLAY_H
+#define EXEC_REPLAY_H
+
+#include "qapi/qapi-types-replay.h"
+
+extern ReplayMode replay_mode;
+
+/* Replay process control functions */
+
+/* Enables recording or saving event log with specified parameters */
+void replay_configure(struct QemuOpts *opts);
+/* Initializes timers used for snapshotting and enables events recording */
+void replay_start(void);
+/* Closes replay log file and frees other resources. */
+void replay_finish(void);
+/* Adds replay blocker with the specified error description */
+void replay_add_blocker(const char *feature);
+/* Returns name of the replay log file */
+const char *replay_get_filename(void);
+
+/*
+ * Start making one step in backward direction.
+ * Used by gdbstub for backwards debugging.
+ * Returns true on success.
+ */
+bool replay_reverse_step(void);
+/*
+ * Start searching the last breakpoint/watchpoint.
+ * Used by gdbstub for backwards debugging.
+ * Returns true if the process successfully started.
+ */
+bool replay_reverse_continue(void);
+/*
+ * Returns true if replay module is processing
+ * reverse_continue or reverse_step request
+ */
+bool replay_running_debug(void);
+/* Called in reverse debugging mode to collect breakpoint information */
+void replay_breakpoint(void);
+/* Called when gdb is attached to gdbstub */
+void replay_gdb_attached(void);
+
+/* Interrupts and exceptions */
+
+/* Called by exception handler to write or read exception processing events */
+bool replay_exception(void);
+/*
+ * Used to determine that exception is pending.
+ * Does not proceed to the next event in the log.
+ */
+bool replay_has_exception(void);
+/*
+ * Called by interrupt handlers to write or read interrupt processing events.
+ * Returns true if interrupt should be processed.
+ */
+bool replay_interrupt(void);
+/*
+ * Tries to read interrupt event from the file.
+ * Returns true, when interrupt request is pending.
+ */
+bool replay_has_interrupt(void);
+
+/* Processing data from random generators */
+
+/* Saves the values from the random number generator */
+void replay_save_random(int ret, void *buf, size_t len);
+/* Loads the saved values for the random number generator */
+int replay_read_random(void *buf, size_t len);
+
+#endif
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
deleted file mode 100644
index fbcae88f4b..0000000000
--- a/include/exec/softmmu-semi.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Helper routines to provide target memory access for semihosting
- * syscalls in system emulation mode.
- *
- * Copyright (c) 2007 CodeSourcery.
- *
- * This code is licensed under the GPL
- */
-
-#ifndef SOFTMMU_SEMI_H
-#define SOFTMMU_SEMI_H
-
-#include "cpu.h"
-
-static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
-{
- uint64_t val;
-
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0);
- return tswap64(val);
-}
-
-static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
-{
- uint32_t val;
-
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0);
- return tswap32(val);
-}
-
-static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
-{
- uint8_t val;
-
- cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0);
- return val;
-}
-
-#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; })
-#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
-#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
-#define get_user_ual(arg, p) get_user_u32(arg, p)
-
-static inline void softmmu_tput64(CPUArchState *env,
- target_ulong addr, uint64_t val)
-{
- val = tswap64(val);
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1);
-}
-
-static inline void softmmu_tput32(CPUArchState *env,
- target_ulong addr, uint32_t val)
-{
- val = tswap32(val);
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1);
-}
-#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
-#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
-#define put_user_ual(arg, p) put_user_u32(arg, p)
-
-static void *softmmu_lock_user(CPUArchState *env,
- target_ulong addr, target_ulong len, int copy)
-{
- uint8_t *p;
- /* TODO: Make this something that isn't fixed size. */
- p = malloc(len);
- if (p && copy) {
- cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0);
- }
- return p;
-}
-#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
-static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
-{
- char *p;
- char *s;
- uint8_t c;
- /* TODO: Make this something that isn't fixed size. */
- s = p = malloc(1024);
- if (!s) {
- return NULL;
- }
- do {
- cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0);
- addr++;
- *(p++) = c;
- } while (c);
- return s;
-}
-#define lock_user_string(p) softmmu_lock_user_string(env, p)
-static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
- target_ulong len)
-{
- if (len) {
- cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1);
- }
- free(p);
-}
-#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
-
-#endif
diff --git a/include/exec/target_long.h b/include/exec/target_long.h
new file mode 100644
index 0000000000..3cd8e26a23
--- /dev/null
+++ b/include/exec/target_long.h
@@ -0,0 +1,44 @@
+/*
+ * Target Long Definitions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _TARGET_LONG_H_
+#define _TARGET_LONG_H_
+
+/*
+ * Usually this should only be included via cpu-defs.h however for
+ * certain cases where we want to build only two versions of a binary
+ * object we can include directly. However the build-system must
+ * ensure TARGET_LONG_BITS is defined directly.
+ */
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS not defined
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#define MO_TL MO_32
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#define MO_TL MO_64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#endif /* _TARGET_LONG_H_ */
diff --git a/include/exec/target_page.h b/include/exec/target_page.h
index 96726c36a4..98ffbb5c23 100644
--- a/include/exec/target_page.h
+++ b/include/exec/target_page.h
@@ -15,7 +15,9 @@
#define EXEC_TARGET_PAGE_H
size_t qemu_target_page_size(void);
+int qemu_target_page_mask(void);
int qemu_target_page_bits(void);
int qemu_target_page_bits_min(void);
+size_t qemu_target_pages_to_MiB(size_t pages);
#endif
diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h
new file mode 100644
index 0000000000..142c240d94
--- /dev/null
+++ b/include/exec/tb-flush.h
@@ -0,0 +1,28 @@
+/*
+ * tb-flush prototype for use by the rest of the system.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef _TB_FLUSH_H_
+#define _TB_FLUSH_H_
+
+/**
+ * tb_flush() - flush all translation blocks
+ * @cs: CPUState (must be valid, but treated as anonymous pointer)
+ *
+ * Used to flush all the translation blocks in the system. Sometimes
+ * it is simpler to flush everything than work out which individual
+ * translations are now invalid and ensure they are not called
+ * anymore.
+ *
+ * tb_flush() takes care of running the flush in an exclusive context
+ * if it is not already running in one. This means no guest code will
+ * run until this complete.
+ */
+void tb_flush(CPUState *cs);
+
+void tcg_flush_jmp_cache(CPUState *cs);
+
+#endif /* _TB_FLUSH_H_ */
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
new file mode 100644
index 0000000000..dc5a5faa0b
--- /dev/null
+++ b/include/exec/tlb-common.h
@@ -0,0 +1,56 @@
+/*
+ * Common definitions for the softmmu tlb
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef EXEC_TLB_COMMON_H
+#define EXEC_TLB_COMMON_H 1
+
+#define CPU_TLB_ENTRY_BITS 5
+
+/* Minimalized TLB entry for use by TCG fast path. */
+typedef union CPUTLBEntry {
+ struct {
+ uint64_t addr_read;
+ uint64_t addr_write;
+ uint64_t addr_code;
+ /*
+ * Addend to virtual address to get host address. IO accesses
+ * use the corresponding iotlb value.
+ */
+ uintptr_t addend;
+ };
+ /*
+ * Padding to get a power of two size, as well as index
+ * access to addr_{read,write,code}.
+ */
+ uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
+} CPUTLBEntry;
+
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+/*
+ * Data elements that are per MMU mode, accessed by the fast path.
+ * The structure is aligned to aid loading the pair with one insn.
+ */
+typedef struct CPUTLBDescFast {
+ /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
+ uintptr_t mask;
+ /* The array of tlb entries itself. */
+ CPUTLBEntry *table;
+} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
+
+#endif /* EXEC_TLB_COMMON_H */
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
index 9f646389af..85c9460c7c 100644
--- a/include/exec/translate-all.h
+++ b/include/exec/translate-all.h
@@ -23,13 +23,6 @@
/* translate-all.c */
-struct page_collection *page_collection_lock(tb_page_addr_t start,
- tb_page_addr_t end);
-void page_collection_unlock(struct page_collection *set);
-void tb_invalidate_phys_page_fast(struct page_collection *pages,
- tb_page_addr_t start, int len,
- uintptr_t retaddr);
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
#ifdef CONFIG_USER_ONLY
diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h
new file mode 100644
index 0000000000..48211c890a
--- /dev/null
+++ b/include/exec/translation-block.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Definition of TranslationBlock.
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef EXEC_TRANSLATION_BLOCK_H
+#define EXEC_TRANSLATION_BLOCK_H
+
+#include "qemu/thread.h"
+#include "exec/cpu-common.h"
+#ifdef CONFIG_USER_ONLY
+#include "qemu/interval-tree.h"
+#endif
+
+/*
+ * Page tracking code uses ram addresses in system mode, and virtual
+ * addresses in userspace mode. Define tb_page_addr_t to be an
+ * appropriate type.
+ */
+#if defined(CONFIG_USER_ONLY)
+typedef vaddr tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
+#else
+typedef ram_addr_t tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#endif
+
+/*
+ * Translation Cache-related fields of a TB.
+ * This struct exists just for convenience; we keep track of TB's in a binary
+ * search tree, and the only fields needed to compare TB's in the tree are
+ * @ptr and @size.
+ * Note: the address of search data can be obtained by adding @size to @ptr.
+ */
+struct tb_tc {
+ const void *ptr; /* pointer to the translated code */
+ size_t size;
+};
+
+struct TranslationBlock {
+ /*
+ * Guest PC corresponding to this block. This must be the true
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
+ * privilege, must store those bits elsewhere.
+ *
+ * If CF_PCREL, the opcodes for the TranslationBlock are written
+ * such that the TB is associated only with the physical page and
+ * may be run in any virtual address context. In this case, PC
+ * must always be taken from ENV in a target-specific manner.
+ * Unwind information is taken as offsets from the page, to be
+ * deposited into the "current" PC.
+ */
+ vaddr pc;
+
+ /*
+ * Target-specific data associated with the TranslationBlock, e.g.:
+ * x86: the original user, the Code Segment virtual base,
+ * arm: an extension of tb->flags,
+ * s390x: instruction data for EXECUTE,
+ * sparc: the next pc of the instruction queue (for delay slots).
+ */
+ uint64_t cs_base;
+
+ uint32_t flags; /* flags defining in which context the code was generated */
+ uint32_t cflags; /* compile flags */
+
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
+#define CF_COUNT_MASK 0x000001ff
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
+#define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */
+#define CF_USE_ICOUNT 0x00002000
+#define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */
+#define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */
+#define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */
+#define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+#define CF_CLUSTER_SHIFT 24
+
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
+ struct tb_tc tc;
+
+ /*
+ * Track tb_page_addr_t intervals that intersect this TB.
+ * For user-only, the virtual addresses are always contiguous,
+ * and we use a unified interval tree. For system, we use a
+ * linked list headed in each PageDesc. Within the list, the lsb
+ * of the previous pointer tells the index of page_next[], and the
+ * list is protected by the PageDesc lock(s).
+ */
+#ifdef CONFIG_USER_ONLY
+ IntervalTreeNode itree;
+#else
+ uintptr_t page_next[2];
+ tb_page_addr_t page_addr[2];
+#endif
+
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
+ /* The following data are used to directly call another TB from
+ * the code of this one. This can be done either by emitting direct or
+ * indirect native jump instructions. These jumps are reset so that the TB
+ * just continues its execution. The TB can be linked to another one by
+ * setting one of the jump targets (or patching the jump instruction). Only
+ * two of such jumps are supported.
+ */
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
+ uintptr_t jmp_target_addr[2]; /* target address */
+
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
+ */
+ uintptr_t jmp_list_head;
+ uintptr_t jmp_list_next[2];
+ uintptr_t jmp_dest[2];
+};
+
+/* The alignment given to TranslationBlock during allocation. */
+#define CODE_GEN_ALIGN 16
+
+/* Hide the qatomic_read to make code a little easier on the eyes */
+static inline uint32_t tb_cflags(const TranslationBlock *tb)
+{
+ return qatomic_read(&tb->cflags);
+}
+
+#endif /* EXEC_TRANSLATION_BLOCK_H */
diff --git a/include/exec/translator.h b/include/exec/translator.h
index 9bc46eda59..6cd937ac5c 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -18,14 +18,25 @@
* member in your target-specific DisasContext.
*/
-
#include "qemu/bswap.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
-#include "exec/plugin-gen.h"
-#include "exec/translate-all.h"
-#include "tcg/tcg.h"
+#include "exec/cpu-common.h"
+#include "exec/cpu-defs.h"
+#include "exec/abi_ptr.h"
+#include "cpu.h"
+/**
+ * gen_intermediate_code
+ * @cpu: cpu context
+ * @tb: translation block
+ * @max_insns: max number of instructions to translate
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ *
+ * This function must be provided by the target, which should create
+ * the target-specific DisasContext, and then invoke translator_loop.
+ */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc);
/**
* DisasJumpType:
@@ -64,28 +75,24 @@ typedef enum DisasJumpType {
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
* @singlestep_enabled: "Hardware" single stepping enabled.
+ * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown.
+ * @plugin_enabled: TCG plugin enabled in this TB.
+ * @insn_start: The last op emitted by the insn_start hook,
+ * which is expected to be INDEX_op_insn_start.
*
* Architecture-agnostic disassembly context.
*/
typedef struct DisasContextBase {
- const TranslationBlock *tb;
- target_ulong pc_first;
- target_ulong pc_next;
+ TranslationBlock *tb;
+ vaddr pc_first;
+ vaddr pc_next;
DisasJumpType is_jmp;
int num_insns;
int max_insns;
bool singlestep_enabled;
-#ifdef CONFIG_USER_ONLY
- /*
- * Guest address of the last byte of the last protected page.
- *
- * Pages containing the translated instructions are made non-writable in
- * order to achieve consistency in case another thread is modifying the
- * code while translate_insn() fetches the instruction bytes piecemeal.
- * Such writer threads are blocked on mmap_lock() in page_unprotect().
- */
- target_ulong page_protect_end;
-#endif
+ bool plugin_enabled;
+ struct TCGOp *insn_start;
+ void *host_addr[2];
} DisasContextBase;
/**
@@ -118,16 +125,18 @@ typedef struct TranslatorOps {
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
- void (*disas_log)(const DisasContextBase *db, CPUState *cpu);
+ void (*disas_log)(const DisasContextBase *db, CPUState *cpu, FILE *f);
} TranslatorOps;
/**
* translator_loop:
- * @ops: Target-specific operations.
- * @db: Disassembly context.
* @cpu: Target vCPU.
* @tb: Translation block.
* @max_insns: Maximum number of insns to translate.
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ * @ops: Target-specific operations.
+ * @db: Disassembly context.
*
* Generic translator loop.
*
@@ -141,10 +150,9 @@ typedef struct TranslatorOps {
* - When single-stepping is enabled (system-wide or on the current vCPU).
* - When too many instructions have been translated.
*/
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
- CPUState *cpu, TranslationBlock *tb, int max_insns);
-
-void translator_loop_temp_check(DisasContextBase *db);
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
+ DisasContextBase *db);
/**
* translator_use_goto_tb
@@ -154,7 +162,17 @@ void translator_loop_temp_check(DisasContextBase *db);
* Return true if goto_tb is allowed between the current TB
* and the destination PC.
*/
-bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest);
+
+/**
+ * translator_io_start
+ * @db: Disassembly context
+ *
+ * If icount is enabled, set cpu->can_do_io, adjust db->is_jmp to
+ * DISAS_TOO_MANY if it is still DISAS_NEXT, and return true.
+ * Otherwise return false.
+ */
+bool translator_io_start(DisasContextBase *db);
/*
* Translator Load Functions
@@ -167,24 +185,64 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
* the relevant information at translation time.
*/
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
- abi_ptr pc, bool do_swap); \
- static inline type fullname(CPUArchState *env, \
- DisasContextBase *dcbase, abi_ptr pc) \
- { \
- return fullname ## _swap(env, dcbase, pc, false); \
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+
+static inline uint16_t
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint16_t ret = translator_lduw(env, db, pc);
+ if (do_swap) {
+ ret = bswap16(ret);
}
+ return ret;
+}
+
+static inline uint32_t
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint32_t ret = translator_ldl(env, db, pc);
+ if (do_swap) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+static inline uint64_t
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint64_t ret = translator_ldq(env, db, pc);
+ if (do_swap) {
+ ret = bswap64(ret);
+ }
+ return ret;
+}
-#define FOR_EACH_TRANSLATOR_LD(F) \
- F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
- F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
- F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
- F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
- F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
-
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
+/**
+ * translator_fake_ldb - fake instruction load
+ * @insn8: byte of instruction
+ * @pc: program counter of instruction
+ *
+ * This is a special case helper used where the instruction we are
+ * about to translate comes from somewhere else (e.g. being
+ * re-synthesised for s390x "ex"). It ensures we update other areas of
+ * the translator with details of the executed instruction.
+ */
+void translator_fake_ldb(uint8_t insn8, abi_ptr pc);
-#undef GEN_TRANSLATOR_LD
+/*
+ * Return whether addr is on the same page as where disassembly started.
+ * Translators can use this to enforce the rule that only single-insn
+ * translation blocks are allowed to cross page boundaries.
+ */
+static inline bool is_same_page(const DisasContextBase *db, vaddr addr)
+{
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
+}
-#endif /* EXEC__TRANSLATOR_H */
+#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/exec/tswap.h b/include/exec/tswap.h
new file mode 100644
index 0000000000..b7a4191347
--- /dev/null
+++ b/include/exec/tswap.h
@@ -0,0 +1,82 @@
+/*
+ * Macros for swapping a value if the endianness is different
+ * between the target and the host.
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef TSWAP_H
+#define TSWAP_H
+
+#include "qemu/bswap.h"
+
+/**
+ * target_words_bigendian:
+ * Returns true if the (default) endianness of the target is big endian,
+ * false otherwise. Note that in target-specific code, you can use
+ * TARGET_BIG_ENDIAN directly instead. On the other hand, common
+ * code should normally never need to know about the endianness of the
+ * target, so please do *not* use this function unless you know very well
+ * what you are doing!
+ */
+bool target_words_bigendian(void);
+
+/*
+ * If we're in target-specific code, we can hard-code the swapping
+ * condition, otherwise we have to do (slower) run-time checks.
+ */
+#ifdef COMPILING_PER_TARGET
+#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN)
+#else
+#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN)
+#endif /* COMPILING_PER_TARGET */
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap16(s);
+ } else {
+ return s;
+ }
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap32(s);
+ } else {
+ return s;
+ }
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap64(s);
+ } else {
+ return s;
+ }
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap16(*s);
+ }
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap32(*s);
+ }
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap64(*s);
+ }
+}
+
+#endif /* TSWAP_H */
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
index 743b8bb9ea..3ec1969368 100644
--- a/include/exec/user/abitypes.h
+++ b/include/exec/user/abitypes.h
@@ -1,7 +1,13 @@
#ifndef EXEC_USER_ABITYPES_H
#define EXEC_USER_ABITYPES_H
-#include "cpu.h"
+#ifndef CONFIG_USER_ONLY
+#error Cannot include this header from system emulation
+#endif
+
+#include "exec/cpu-defs.h"
+#include "exec/tswap.h"
+#include "user/tswap-target.h"
#ifdef TARGET_ABI32
#define TARGET_ABI_BITS 32
@@ -15,7 +21,17 @@
#define ABI_LLONG_ALIGNMENT 2
#endif
-#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) || defined(TARGET_SH4)
+#ifdef TARGET_CRIS
+#define ABI_SHORT_ALIGNMENT 1
+#define ABI_INT_ALIGNMENT 1
+#define ABI_LONG_ALIGNMENT 1
+#define ABI_LLONG_ALIGNMENT 1
+#endif
+
+#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \
+ || defined(TARGET_SH4) \
+ || defined(TARGET_OPENRISC) \
+ || defined(TARGET_MICROBLAZE)
#define ABI_LLONG_ALIGNMENT 4
#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
index 300a840d58..2ebfecf58e 100644
--- a/include/exec/user/thunk.h
+++ b/include/exec/user/thunk.h
@@ -111,8 +111,7 @@ static inline int thunk_type_size(const argtype *type_ptr, int is_host)
if (is_host) {
#if defined(HOST_X86_64)
return 8;
-#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
- defined(HOST_PARISC) || defined(HOST_SPARC64)
+#elif defined(HOST_MIPS) || defined(HOST_SPARC64)
return 4;
#elif defined(HOST_PPC)
return sizeof(void *);
@@ -193,10 +192,17 @@ static inline int thunk_type_align(const argtype *type_ptr, int is_host)
}
}
-unsigned int target_to_host_bitmask(unsigned int target_mask,
- const bitmask_transtbl * trans_tbl);
-unsigned int host_to_target_bitmask(unsigned int host_mask,
- const bitmask_transtbl * trans_tbl);
+unsigned int target_to_host_bitmask_len(unsigned int target_mask,
+ const bitmask_transtbl *trans_tbl,
+ size_t trans_len);
+unsigned int host_to_target_bitmask_len(unsigned int host_mask,
+ const bitmask_transtbl * trans_tbl,
+ size_t trans_len);
+
+#define target_to_host_bitmask(M, T) \
+ target_to_host_bitmask_len(M, T, ARRAY_SIZE(T))
+#define host_to_target_bitmask(M, T) \
+ host_to_target_bitmask_len(M, T, ARRAY_SIZE(T))
void thunk_init(unsigned int max_structs);
diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h
new file mode 100644
index 0000000000..b9844afc77
--- /dev/null
+++ b/include/exec/vaddr.h
@@ -0,0 +1,18 @@
+/* Define vaddr. */
+
+#ifndef VADDR_H
+#define VADDR_H
+
+/**
+ * vaddr:
+ * Type wide enough to contain any #target_ulong virtual address.
+ */
+typedef uint64_t vaddr;
+#define VADDR_PRId PRId64
+#define VADDR_PRIu PRIu64
+#define VADDR_PRIo PRIo64
+#define VADDR_PRIx PRIx64
+#define VADDR_PRIX PRIX64
+#define VADDR_MAX UINT64_MAX
+
+#endif