aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-06-21 17:54:26 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-06-21 17:54:26 +0100
commit33836a731562e3d07b3a83f26e81c6b1482d216c (patch)
treea089ff809414c0ccfb142cd2936749bca2bf38a4
parent46012db666990ff2eed1d3dc199ab8006439a93b (diff)
parent9f754620651d3432114f4bb89c7f12cbea814b3e (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180615' into staging
TCG patch queue: Workaround macos assembler lossage. Eliminate tb_lock. Fix TB code generation overflow. # gpg: Signature made Fri 15 Jun 2018 20:40:56 BST # gpg: using RSA key 64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20180615: tcg: Reduce max TB opcode count tcg: remove tb_lock translate-all: remove tb_lock mention from cpu_restore_state_from_tb cputlb: remove tb_lock from tlb_flush functions translate-all: protect TB jumps with a per-destination-TB lock translate-all: discard TB when tb_link_page returns an existing matching TB translate-all: introduce assert_no_pages_locked translate-all: add page_locked assertions translate-all: use per-page locking in !user-mode translate-all: move tb_invalidate_phys_page_range up in the file translate-all: work page-by-page in tb_invalidate_phys_range_1 translate-all: remove hole in PageDesc translate-all: make l1_map lockless translate-all: iterate over TBs in a page with PAGE_FOR_EACH_TB tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx tcg: track TBs with per-region BST's qht: return existing entry when qht_insert fails qht: require a default comparison function tcg/i386: Use byte form of xgetbv instruction Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/tcg/cpu-exec.c96
-rw-r--r--accel/tcg/cputlb.c8
-rw-r--r--accel/tcg/translate-all.c1041
-rw-r--r--accel/tcg/translate-all.h6
-rw-r--r--docs/devel/multi-thread-tcg.txt24
-rw-r--r--exec.c26
-rw-r--r--include/exec/cpu-common.h2
-rw-r--r--include/exec/exec-all.h51
-rw-r--r--include/exec/memory-internal.h6
-rw-r--r--include/exec/tb-context.h4
-rw-r--r--include/qemu/qht.h32
-rw-r--r--linux-user/main.c3
-rw-r--r--tcg/aarch64/tcg-target.inc.c2
-rw-r--r--tcg/arm/tcg-target.inc.c2
-rw-r--r--tcg/i386/tcg-target.inc.c7
-rw-r--r--tcg/mips/tcg-target.inc.c2
-rw-r--r--tcg/ppc/tcg-target.inc.c4
-rw-r--r--tcg/s390/tcg-target.inc.c2
-rw-r--r--tcg/sparc/tcg-target.inc.c4
-rw-r--r--tcg/tcg.c218
-rw-r--r--tcg/tcg.h19
-rw-r--r--tcg/tci/tcg-target.inc.c2
-rw-r--r--tests/qht-bench.c18
-rw-r--r--tests/test-qht.c23
-rw-r--r--util/qht.c41
25 files changed, 1155 insertions, 488 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 4ef95d8dd3..c738b7f7d6 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -212,20 +212,20 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
We only end up here when an existing TB is too long. */
cflags |= MIN(max_cycles, CF_COUNT_MASK);
- tb_lock();
+ mmap_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
orig_tb->flags, cflags);
tb->orig_tb = orig_tb;
- tb_unlock();
+ mmap_unlock();
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
- tb_lock();
+ mmap_lock();
tb_phys_invalidate(tb, -1);
- tb_remove(tb);
- tb_unlock();
+ mmap_unlock();
+ tcg_tb_remove(tb);
}
#endif
@@ -244,12 +244,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
mmap_lock();
- tb_lock();
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
- if (likely(tb == NULL)) {
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- }
- tb_unlock();
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
}
@@ -264,15 +259,14 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb);
cc->cpu_exec_exit(cpu);
} else {
- /* We may have exited due to another problem here, so we need
- * to reset any tb_locks we may have taken but didn't release.
+ /*
* The mmap_lock is dropped by tb_gen_code if it runs out of
* memory.
*/
#ifndef CONFIG_SOFTMMU
tcg_debug_assert(!have_mmap_lock());
#endif
- tb_lock_reset();
+ assert_no_pages_locked();
}
if (in_exclusive_region) {
@@ -295,7 +289,7 @@ struct tb_desc {
uint32_t trace_vcpu_dstate;
};
-static bool tb_cmp(const void *p, const void *d)
+static bool tb_lookup_cmp(const void *p, const void *d)
{
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
@@ -340,7 +334,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
phys_pc = get_page_addr_code(desc.env, pc);
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
- return qht_lookup(&tb_ctx.htable, tb_cmp, &desc, h);
+ return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
@@ -354,28 +348,43 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
}
}
-/* Called with tb_lock held. */
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
{
+ uintptr_t old;
+
assert(n < ARRAY_SIZE(tb->jmp_list_next));
- if (tb->jmp_list_next[n]) {
- /* Another thread has already done this while we were
- * outside of the lock; nothing to do in this case */
- return;
+ qemu_spin_lock(&tb_next->jmp_lock);
+
+ /* make sure the destination TB is valid */
+ if (tb_next->cflags & CF_INVALID) {
+ goto out_unlock_next;
}
+ /* Atomically claim the jump destination slot only if it was NULL */
+ old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
+ if (old) {
+ goto out_unlock_next;
+ }
+
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
+
+ /* add in TB jmp list */
+ tb->jmp_list_next[n] = tb_next->jmp_list_head;
+ tb_next->jmp_list_head = (uintptr_t)tb | n;
+
+ qemu_spin_unlock(&tb_next->jmp_lock);
+
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc.ptr, tb->pc, n,
tb_next->tc.ptr, tb_next->pc);
+ return;
- /* patch the native jump address */
- tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
-
- /* add in TB jmp circular list */
- tb->jmp_list_next[n] = tb_next->jmp_list_first;
- tb_next->jmp_list_first = (uintptr_t)tb | n;
+ out_unlock_next:
+ qemu_spin_unlock(&tb_next->jmp_lock);
+ return;
}
static inline TranslationBlock *tb_find(CPUState *cpu,
@@ -385,27 +394,11 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- bool acquired_tb_lock = false;
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
- /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
- * taken outside tb_lock. As system emulation is currently
- * single threaded the locks are NOPs.
- */
mmap_lock();
- tb_lock();
- acquired_tb_lock = true;
-
- /* There's a chance that our desired tb has been translated while
- * taking the locks so we check again inside the lock.
- */
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
- if (likely(tb == NULL)) {
- /* if no translated code available, then translate it now */
- tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
- }
-
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -421,16 +414,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
#endif
/* See if we can patch the calling TB. */
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- if (!acquired_tb_lock) {
- tb_lock();
- acquired_tb_lock = true;
- }
- if (!(tb->cflags & CF_INVALID)) {
- tb_add_jump(last_tb, tb_exit, tb);
- }
- }
- if (acquired_tb_lock) {
- tb_unlock();
+ tb_add_jump(last_tb, tb_exit, tb);
}
return tb;
}
@@ -706,7 +690,9 @@ int cpu_exec(CPUState *cpu)
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
#endif /* buggy compiler */
- tb_lock_reset();
+#ifndef CONFIG_SOFTMMU
+ tcg_debug_assert(!have_mmap_lock());
+#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 0a721bb9c4..719cca2268 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -125,8 +125,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
tlb_debug("(count: %zu)\n", tlb_flush_count());
- tb_lock();
-
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
cpu_tb_jmp_cache_clear(cpu);
@@ -135,8 +133,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
- tb_unlock();
-
atomic_mb_set(&cpu->pending_tlb_flush, 0);
}
@@ -180,8 +176,6 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
assert_cpu_is_self(cpu);
- tb_lock();
-
tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@@ -197,8 +191,6 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
cpu_tb_jmp_cache_clear(cpu);
tlb_debug("done\n");
-
- tb_unlock();
}
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index d48b56ca38..f0c3fd4d03 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -88,13 +88,13 @@
#endif
/* Access to the various translations structures need to be serialised via locks
- * for consistency. This is automatic for SoftMMU based system
- * emulation due to its single threaded nature. In user-mode emulation
- * access to the memory related structures are protected with the
- * mmap_lock.
+ * for consistency.
+ * In user-mode emulation access to the memory related structures are protected
+ * with mmap_lock.
+ * In !user-mode we use per-page locks.
*/
#ifdef CONFIG_SOFTMMU
-#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
+#define assert_memory_lock()
#else
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#endif
@@ -103,17 +103,76 @@
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
- TranslationBlock *first_tb;
+ uintptr_t first_tb;
#ifdef CONFIG_SOFTMMU
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
- unsigned int code_write_count;
unsigned long *code_bitmap;
+ unsigned int code_write_count;
#else
unsigned long flags;
#endif
+#ifndef CONFIG_USER_ONLY
+ QemuSpin lock;
+#endif
} PageDesc;
+/**
+ * struct page_entry - page descriptor entry
+ * @pd: pointer to the &struct PageDesc of the page this entry represents
+ * @index: page index of the page
+ * @locked: whether the page is locked
+ *
+ * This struct helps us keep track of the locked state of a page, without
+ * bloating &struct PageDesc.
+ *
+ * A page lock protects accesses to all fields of &struct PageDesc.
+ *
+ * See also: &struct page_collection.
+ */
+struct page_entry {
+ PageDesc *pd;
+ tb_page_addr_t index;
+ bool locked;
+};
+
+/**
+ * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
+ * @tree: Binary search tree (BST) of the pages, with key == page index
+ * @max: Pointer to the page in @tree with the highest page index
+ *
+ * To avoid deadlock we lock pages in ascending order of page index.
+ * When operating on a set of pages, we need to keep track of them so that
+ * we can lock them in order and also unlock them later. For this we collect
+ * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
+ * @tree implementation we use does not provide an O(1) operation to obtain the
+ * highest-ranked element, we use @max to keep track of the inserted page
+ * with the highest index. This is valuable because if a page is not in
+ * the tree and its index is higher than @max's, then we can lock it
+ * without breaking the locking order rule.
+ *
+ * Note on naming: 'struct page_set' would be shorter, but we already have a few
+ * page_set_*() helpers, so page_collection is used instead to avoid confusion.
+ *
+ * See also: page_collection_lock().
+ */
+struct page_collection {
+ GTree *tree;
+ struct page_entry *max;
+};
+
+/* list iterators for lists of tagged pointers in TranslationBlock */
+#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
+ for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
+ tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
+ tb = (TranslationBlock *)((uintptr_t)tb & ~1))
+
+#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
+ TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
+
+#define TB_FOR_EACH_JMP(head_tb, tb, n) \
+ TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
+
/* In system mode we want L1_MAP to be based on ram offsets,
while in user mode we want it to be based on virtual addresses. */
#if !defined(CONFIG_USER_ONLY)
@@ -157,9 +216,6 @@ __thread TCGContext *tcg_ctx;
TBContext tb_ctx;
bool parallel_cpus;
-/* translation block context */
-static __thread int have_tb_lock;
-
static void page_table_config_init(void)
{
uint32_t v_l1_bits;
@@ -180,33 +236,6 @@ static void page_table_config_init(void)
assert(v_l2_levels >= 0);
}
-#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
-#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
-
-void tb_lock(void)
-{
- assert_tb_unlocked();
- qemu_mutex_lock(&tb_ctx.tb_lock);
- have_tb_lock++;
-}
-
-void tb_unlock(void)
-{
- assert_tb_locked();
- have_tb_lock--;
- qemu_mutex_unlock(&tb_ctx.tb_lock);
-}
-
-void tb_lock_reset(void)
-{
- if (have_tb_lock) {
- qemu_mutex_unlock(&tb_ctx.tb_lock);
- have_tb_lock = 0;
- }
-}
-
-static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
-
void cpu_gen_init(void)
{
tcg_context_init(&tcg_init_ctx);
@@ -298,7 +327,6 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
}
/* The cpu state corresponding to 'searched_pc' is restored.
- * Called with tb_lock held.
* When reset_icount is true, current TB will be interrupted and
* icount should be recalculated.
*/
@@ -335,7 +363,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
return -1;
found:
- if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
+ if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
assert(use_icount);
/* Reset the cycle counter to the start of the block
and shift if to the number of actually executed instructions */
@@ -364,8 +392,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
* - fault during translation (instruction fetch)
* - fault from helper (not using GETPC() macro)
*
- * Either way we need return early to avoid blowing up on a
- * recursive tb_lock() as we can't resolve it here.
+ * Either way we need return early as we can't resolve it here.
*
* We are using unsigned arithmetic so if host_pc <
* tcg_init_ctx.code_gen_buffer check_offset will wrap to way
@@ -374,18 +401,16 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
- tb_lock();
- tb = tb_find_pc(host_pc);
+ tb = tcg_tb_lookup(host_pc);
if (tb) {
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
- if (tb->cflags & CF_NOCACHE) {
+ if (tb_cflags(tb) & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
tb_phys_invalidate(tb, -1);
- tb_remove(tb);
+ tcg_tb_remove(tb);
}
r = true;
}
- tb_unlock();
}
return r;
@@ -462,20 +487,12 @@ static void page_init(void)
#endif
}
-/* If alloc=1:
- * Called with tb_lock held for system emulation.
- * Called with mmap_lock held for user-mode emulation.
- */
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
{
PageDesc *pd;
void **lp;
int i;
- if (alloc) {
- assert_memory_lock();
- }
-
/* Level 1. Always allocated. */
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
@@ -484,11 +501,17 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
void **p = atomic_rcu_read(lp);
if (p == NULL) {
+ void *existing;
+
if (!alloc) {
return NULL;
}
p = g_new0(void *, V_L2_SIZE);
- atomic_rcu_set(lp, p);
+ existing = atomic_cmpxchg(lp, NULL, p);
+ if (unlikely(existing)) {
+ g_free(p);
+ p = existing;
+ }
}
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
@@ -496,11 +519,26 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
pd = atomic_rcu_read(lp);
if (pd == NULL) {
+ void *existing;
+
if (!alloc) {
return NULL;
}
pd = g_new0(PageDesc, V_L2_SIZE);
- atomic_rcu_set(lp, pd);
+#ifndef CONFIG_USER_ONLY
+ {
+ int i;
+
+ for (i = 0; i < V_L2_SIZE; i++) {
+ qemu_spin_init(&pd[i].lock);
+ }
+ }
+#endif
+ existing = atomic_cmpxchg(lp, NULL, pd);
+ if (unlikely(existing)) {
+ g_free(pd);
+ pd = existing;
+ }
}
return pd + (index & (V_L2_SIZE - 1));
@@ -511,6 +549,331 @@ static inline PageDesc *page_find(tb_page_addr_t index)
return page_find_alloc(index, 0);
}
+static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
+ PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
+
+/* In user-mode page locks aren't used; mmap_lock is enough */
+#ifdef CONFIG_USER_ONLY
+
+#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
+
+static inline void page_lock(PageDesc *pd)
+{ }
+
+static inline void page_unlock(PageDesc *pd)
+{ }
+
+static inline void page_lock_tb(const TranslationBlock *tb)
+{ }
+
+static inline void page_unlock_tb(const TranslationBlock *tb)
+{ }
+
+struct page_collection *
+page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
+{
+ return NULL;
+}
+
+void page_collection_unlock(struct page_collection *set)
+{ }
+#else /* !CONFIG_USER_ONLY */
+
+#ifdef CONFIG_DEBUG_TCG
+
+static __thread GHashTable *ht_pages_locked_debug;
+
+static void ht_pages_locked_debug_init(void)
+{
+ if (ht_pages_locked_debug) {
+ return;
+ }
+ ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
+}
+
+static bool page_is_locked(const PageDesc *pd)
+{
+ PageDesc *found;
+
+ ht_pages_locked_debug_init();
+ found = g_hash_table_lookup(ht_pages_locked_debug, pd);
+ return !!found;
+}
+
+static void page_lock__debug(PageDesc *pd)
+{
+ ht_pages_locked_debug_init();
+ g_assert(!page_is_locked(pd));
+ g_hash_table_insert(ht_pages_locked_debug, pd, pd);
+}
+
+static void page_unlock__debug(const PageDesc *pd)
+{
+ bool removed;
+
+ ht_pages_locked_debug_init();
+ g_assert(page_is_locked(pd));
+ removed = g_hash_table_remove(ht_pages_locked_debug, pd);
+ g_assert(removed);
+}
+
+static void
+do_assert_page_locked(const PageDesc *pd, const char *file, int line)
+{
+ if (unlikely(!page_is_locked(pd))) {
+ error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
+ pd, file, line);
+ abort();
+ }
+}
+
+#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
+
+void assert_no_pages_locked(void)
+{
+ ht_pages_locked_debug_init();
+ g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
+}
+
+#else /* !CONFIG_DEBUG_TCG */
+
+#define assert_page_locked(pd)
+
+static inline void page_lock__debug(const PageDesc *pd)
+{
+}
+
+static inline void page_unlock__debug(const PageDesc *pd)
+{
+}
+
+#endif /* CONFIG_DEBUG_TCG */
+
+static inline void page_lock(PageDesc *pd)
+{
+ page_lock__debug(pd);
+ qemu_spin_lock(&pd->lock);
+}
+
+static inline void page_unlock(PageDesc *pd)
+{
+ qemu_spin_unlock(&pd->lock);
+ page_unlock__debug(pd);
+}
+
+/* lock the page(s) of a TB in the correct acquisition order */
+static inline void page_lock_tb(const TranslationBlock *tb)
+{
+ page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
+}
+
+static inline void page_unlock_tb(const TranslationBlock *tb)
+{
+ page_unlock(page_find(tb->page_addr[0] >> TARGET_PAGE_BITS));
+ if (unlikely(tb->page_addr[1] != -1)) {
+ page_unlock(page_find(tb->page_addr[1] >> TARGET_PAGE_BITS));
+ }
+}
+
+static inline struct page_entry *
+page_entry_new(PageDesc *pd, tb_page_addr_t index)
+{
+ struct page_entry *pe = g_malloc(sizeof(*pe));
+
+ pe->index = index;
+ pe->pd = pd;
+ pe->locked = false;
+ return pe;
+}
+
+static void page_entry_destroy(gpointer p)
+{
+ struct page_entry *pe = p;
+
+ g_assert(pe->locked);
+ page_unlock(pe->pd);
+ g_free(pe);
+}
+
+/* returns false on success */
+static bool page_entry_trylock(struct page_entry *pe)
+{
+ bool busy;
+
+ busy = qemu_spin_trylock(&pe->pd->lock);
+ if (!busy) {
+ g_assert(!pe->locked);
+ pe->locked = true;
+ page_lock__debug(pe->pd);
+ }
+ return busy;
+}
+
+static void do_page_entry_lock(struct page_entry *pe)
+{
+ page_lock(pe->pd);
+ g_assert(!pe->locked);
+ pe->locked = true;
+}
+
+static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
+{
+ struct page_entry *pe = value;
+
+ do_page_entry_lock(pe);
+ return FALSE;
+}
+
+static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
+{
+ struct page_entry *pe = value;
+
+ if (pe->locked) {
+ pe->locked = false;
+ page_unlock(pe->pd);
+ }
+ return FALSE;
+}
+
+/*
+ * Trylock a page, and if successful, add the page to a collection.
+ * Returns true ("busy") if the page could not be locked; false otherwise.
+ */
+static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
+{
+ tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
+ struct page_entry *pe;
+ PageDesc *pd;
+
+ pe = g_tree_lookup(set->tree, &index);
+ if (pe) {
+ return false;
+ }
+
+ pd = page_find(index);
+ if (pd == NULL) {
+ return false;
+ }
+
+ pe = page_entry_new(pd, index);
+ g_tree_insert(set->tree, &pe->index, pe);
+
+ /*
+ * If this is either (1) the first insertion or (2) a page whose index
+ * is higher than any other so far, just lock the page and move on.
+ */
+ if (set->max == NULL || pe->index > set->max->index) {
+ set->max = pe;
+ do_page_entry_lock(pe);
+ return false;
+ }
+ /*
+ * Try to acquire out-of-order lock; if busy, return busy so that we acquire
+ * locks in order.
+ */
+ return page_entry_trylock(pe);
+}
+
+static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
+{
+ tb_page_addr_t a = *(const tb_page_addr_t *)ap;
+ tb_page_addr_t b = *(const tb_page_addr_t *)bp;
+
+ if (a == b) {
+ return 0;
+ } else if (a < b) {
+ return -1;
+ }
+ return 1;
+}
+
+/*
+ * Lock a range of pages ([@start,@end[) as well as the pages of all
+ * intersecting TBs.
+ * Locking order: acquire locks in ascending order of page index.
+ */
+struct page_collection *
+page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
+{
+ struct page_collection *set = g_malloc(sizeof(*set));
+ tb_page_addr_t index;
+ PageDesc *pd;
+
+ start >>= TARGET_PAGE_BITS;
+ end >>= TARGET_PAGE_BITS;
+ g_assert(start <= end);
+
+ set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
+ page_entry_destroy);
+ set->max = NULL;
+ assert_no_pages_locked();
+
+ retry:
+ g_tree_foreach(set->tree, page_entry_lock, NULL);
+
+ for (index = start; index <= end; index++) {
+ TranslationBlock *tb;
+ int n;
+
+ pd = page_find(index);
+ if (pd == NULL) {
+ continue;
+ }
+ if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
+ g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ goto retry;
+ }
+ assert_page_locked(pd);
+ PAGE_FOR_EACH_TB(pd, tb, n) {
+ if (page_trylock_add(set, tb->page_addr[0]) ||
+ (tb->page_addr[1] != -1 &&
+ page_trylock_add(set, tb->page_addr[1]))) {
+ /* drop all locks, and reacquire in order */
+ g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ goto retry;
+ }
+ }
+ }
+ return set;
+}
+
+void page_collection_unlock(struct page_collection *set)
+{
+ /* entries are unlocked and freed via page_entry_destroy */
+ g_tree_destroy(set->tree);
+ g_free(set);
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
+ PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
+{
+ PageDesc *p1, *p2;
+
+ assert_memory_lock();
+ g_assert(phys1 != -1 && phys1 != phys2);
+ p1 = page_find_alloc(phys1 >> TARGET_PAGE_BITS, alloc);
+ if (ret_p1) {
+ *ret_p1 = p1;
+ }
+ if (likely(phys2 == -1)) {
+ page_lock(p1);
+ return;
+ }
+ p2 = page_find_alloc(phys2 >> TARGET_PAGE_BITS, alloc);
+ if (ret_p2) {
+ *ret_p2 = p2;
+ }
+ if (phys1 < phys2) {
+ page_lock(p1);
+ page_lock(p2);
+ } else {
+ page_lock(p2);
+ page_lock(p1);
+ }
+}
+
#if defined(CONFIG_USER_ONLY)
/* Currently it is not recommended to allocate big chunks of data in
user mode. It will change when a dedicated libc will be used. */
@@ -728,48 +1091,6 @@ static inline void *alloc_code_gen_buffer(void)
}
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
-/* compare a pointer @ptr and a tb_tc @s */
-static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
-{
- if (ptr >= s->ptr + s->size) {
- return 1;
- } else if (ptr < s->ptr) {
- return -1;
- }
- return 0;
-}
-
-static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
-{
- const struct tb_tc *a = ap;
- const struct tb_tc *b = bp;
-
- /*
- * When both sizes are set, we know this isn't a lookup.
- * This is the most likely case: every TB must be inserted; lookups
- * are a lot less frequent.
- */
- if (likely(a->size && b->size)) {
- if (a->ptr > b->ptr) {
- return 1;
- } else if (a->ptr < b->ptr) {
- return -1;
- }
- /* a->ptr == b->ptr should happen only on deletions */
- g_assert(a->size == b->size);
- return 0;
- }
- /*
- * All lookups have either .size field set to 0.
- * From the glib sources we see that @ap is always the lookup key. However
- * the docs provide no guarantee, so we just mark this case as likely.
- */
- if (likely(a->size == 0)) {
- return ptr_cmp_tb_tc(a->ptr, b);
- }
- return ptr_cmp_tb_tc(b->ptr, a);
-}
-
static inline void code_gen_alloc(size_t tb_size)
{
tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
@@ -778,15 +1099,27 @@ static inline void code_gen_alloc(size_t tb_size)
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
exit(1);
}
- tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
- qemu_mutex_init(&tb_ctx.tb_lock);
+}
+
+static bool tb_cmp(const void *ap, const void *bp)
+{
+ const TranslationBlock *a = ap;
+ const TranslationBlock *b = bp;
+
+ return a->pc == b->pc &&
+ a->cs_base == b->cs_base &&
+ a->flags == b->flags &&
+ (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
+ a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
+ a->page_addr[0] == b->page_addr[0] &&
+ a->page_addr[1] == b->page_addr[1];
}
static void tb_htable_init(void)
{
unsigned int mode = QHT_MODE_AUTO_RESIZE;
- qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
+ qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
}
/* Must be called before using the QEMU cpus. 'tb_size' is the size
@@ -809,14 +1142,12 @@ void tcg_exec_init(unsigned long tb_size)
/*
* Allocate a new translation block. Flush the translation buffer if
* too many translation blocks or too much generated code.
- *
- * Called with tb_lock held.
*/
static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
- assert_tb_locked();
+ assert_memory_lock();
tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(tb == NULL)) {
@@ -825,16 +1156,10 @@ static TranslationBlock *tb_alloc(target_ulong pc)
return tb;
}
-/* Called with tb_lock held. */
-void tb_remove(TranslationBlock *tb)
-{
- assert_tb_locked();
-
- g_tree_remove(tb_ctx.tb_tree, &tb->tc);
-}
-
+/* call with @p->lock held */
static inline void invalidate_page_bitmap(PageDesc *p)
{
+ assert_page_locked(p);
#ifdef CONFIG_SOFTMMU
g_free(p->code_bitmap);
p->code_bitmap = NULL;
@@ -854,8 +1179,10 @@ static void page_flush_tb_1(int level, void **lp)
PageDesc *pd = *lp;
for (i = 0; i < V_L2_SIZE; ++i) {
- pd[i].first_tb = NULL;
+ page_lock(&pd[i]);
+ pd[i].first_tb = (uintptr_t)NULL;
invalidate_page_bitmap(pd + i);
+ page_unlock(&pd[i]);
}
} else {
void **pp = *lp;
@@ -887,8 +1214,7 @@ static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
/* flush all the translation blocks */
static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
{
- tb_lock();
-
+ mmap_lock();
/* If it is already been done on request of another CPU,
* just retry.
*/
@@ -897,10 +1223,10 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
}
if (DEBUG_TB_FLUSH_GATE) {
- size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
+ size_t nb_tbs = tcg_nb_tbs();
size_t host_size = 0;
- g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
+ tcg_tb_foreach(tb_host_size_iter, &host_size);
printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
}
@@ -909,10 +1235,6 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
cpu_tb_jmp_cache_clear(cpu);
}
- /* Increment the refcount first so that destroy acts as a reset */
- g_tree_ref(tb_ctx.tb_tree);
- g_tree_destroy(tb_ctx.tb_tree);
-
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
page_flush_tb();
@@ -922,7 +1244,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
done:
- tb_unlock();
+ mmap_unlock();
}
void tb_flush(CPUState *cpu)
@@ -956,7 +1278,7 @@ do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
/* verify that all the pages have correct rights for code
*
- * Called with tb_lock held.
+ * Called with mmap_lock held.
*/
static void tb_invalidate_check(target_ulong address)
{
@@ -986,51 +1308,75 @@ static void tb_page_check(void)
#endif /* CONFIG_USER_ONLY */
-static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
+/*
+ * user-mode: call with mmap_lock held
+ * !user-mode: call with @pd->lock held
+ */
+static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
{
TranslationBlock *tb1;
+ uintptr_t *pprev;
unsigned int n1;
- for (;;) {
- tb1 = *ptb;
- n1 = (uintptr_t)tb1 & 3;
- tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
+ assert_page_locked(pd);
+ pprev = &pd->first_tb;
+ PAGE_FOR_EACH_TB(pd, tb1, n1) {
if (tb1 == tb) {
- *ptb = tb1->page_next[n1];
- break;
+ *pprev = tb1->page_next[n1];
+ return;
}
- ptb = &tb1->page_next[n1];
+ pprev = &tb1->page_next[n1];
}
+ g_assert_not_reached();
}
-/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
-static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
+/* remove @orig from its @n_orig-th jump list */
+static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
{
- TranslationBlock *tb1;
- uintptr_t *ptb, ntb;
- unsigned int n1;
+ uintptr_t ptr, ptr_locked;
+ TranslationBlock *dest;
+ TranslationBlock *tb;
+ uintptr_t *pprev;
+ int n;
- ptb = &tb->jmp_list_next[n];
- if (*ptb) {
- /* find tb(n) in circular list */
- for (;;) {
- ntb = *ptb;
- n1 = ntb & 3;
- tb1 = (TranslationBlock *)(ntb & ~3);
- if (n1 == n && tb1 == tb) {
- break;
- }
- if (n1 == 2) {
- ptb = &tb1->jmp_list_first;
- } else {
- ptb = &tb1->jmp_list_next[n1];
- }
- }
- /* now we can suppress tb(n) from the list */
- *ptb = tb->jmp_list_next[n];
+ /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
+ ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
+ dest = (TranslationBlock *)(ptr & ~1);
+ if (dest == NULL) {
+ return;
+ }
- tb->jmp_list_next[n] = (uintptr_t)NULL;
+ qemu_spin_lock(&dest->jmp_lock);
+ /*
+ * While acquiring the lock, the jump might have been removed if the
+ * destination TB was invalidated; check again.
+ */
+ ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
+ if (ptr_locked != ptr) {
+ qemu_spin_unlock(&dest->jmp_lock);
+ /*
+ * The only possibility is that the jump was unlinked via
+ * tb_jump_unlink(dest). Seeing here another destination would be a bug,
+ * because we set the LSB above.
+ */
+ g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
+ return;
+ }
+ /*
+ * We first acquired the lock, and since the destination pointer matches,
+ * we know for sure that @orig is in the jmp list.
+ */
+ pprev = &dest->jmp_list_head;
+ TB_FOR_EACH_JMP(dest, tb, n) {
+ if (tb == orig && n == n_orig) {
+ *pprev = tb->jmp_list_next[n];
+ /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
+ qemu_spin_unlock(&dest->jmp_lock);
+ return;
+ }
+ pprev = &tb->jmp_list_next[n];
}
+ g_assert_not_reached();
}
/* reset the jump entry 'n' of a TB so that it is not chained to
@@ -1042,59 +1388,60 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
}
/* remove any jumps to the TB */
-static inline void tb_jmp_unlink(TranslationBlock *tb)
+static inline void tb_jmp_unlink(TranslationBlock *dest)
{
- TranslationBlock *tb1;
- uintptr_t *ptb, ntb;
- unsigned int n1;
+ TranslationBlock *tb;
+ int n;
- ptb = &tb->jmp_list_first;
- for (;;) {
- ntb = *ptb;
- n1 = ntb & 3;
- tb1 = (TranslationBlock *)(ntb & ~3);
- if (n1 == 2) {
- break;
- }
- tb_reset_jump(tb1, n1);
- *ptb = tb1->jmp_list_next[n1];
- tb1->jmp_list_next[n1] = (uintptr_t)NULL;
+ qemu_spin_lock(&dest->jmp_lock);
+
+ TB_FOR_EACH_JMP(dest, tb, n) {
+ tb_reset_jump(tb, n);
+ atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
+ /* No need to clear the list entry; setting the dest ptr is enough */
}
+ dest->jmp_list_head = (uintptr_t)NULL;
+
+ qemu_spin_unlock(&dest->jmp_lock);
}
-/* invalidate one TB
- *
- * Called with tb_lock held.
+/*
+ * In user-mode, call with mmap_lock held.
+ * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
+ * locks held.
*/
-void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
+static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
{
CPUState *cpu;
PageDesc *p;
uint32_t h;
tb_page_addr_t phys_pc;
- assert_tb_locked();
+ assert_memory_lock();
+ /* make sure no further incoming jumps will be chained to this TB */
+ qemu_spin_lock(&tb->jmp_lock);
atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
+ qemu_spin_unlock(&tb->jmp_lock);
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
}
/* remove the TB from the page list */
- if (tb->page_addr[0] != page_addr) {
+ if (rm_from_page_list) {
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
- tb_page_remove(&p->first_tb, tb);
- invalidate_page_bitmap(p);
- }
- if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
- p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
- tb_page_remove(&p->first_tb, tb);
+ tb_page_remove(p, tb);
invalidate_page_bitmap(p);
+ if (tb->page_addr[1] != -1) {
+ p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
+ tb_page_remove(p, tb);
+ invalidate_page_bitmap(p);
+ }
}
/* remove the TB from the hash list */
@@ -1112,21 +1459,41 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* suppress any remaining jumps to this TB */
tb_jmp_unlink(tb);
- tb_ctx.tb_phys_invalidate_count++;
+ atomic_set(&tcg_ctx->tb_phys_invalidate_count,
+ tcg_ctx->tb_phys_invalidate_count + 1);
+}
+
+static void tb_phys_invalidate__locked(TranslationBlock *tb)
+{
+ do_tb_phys_invalidate(tb, true);
+}
+
+/* invalidate one TB
+ *
+ * Called with mmap_lock held in user-mode.
+ */
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
+{
+ if (page_addr == -1) {
+ page_lock_tb(tb);
+ do_tb_phys_invalidate(tb, true);
+ page_unlock_tb(tb);
+ } else {
+ do_tb_phys_invalidate(tb, false);
+ }
}
#ifdef CONFIG_SOFTMMU
+/* call with @p->lock held */
static void build_page_bitmap(PageDesc *p)
{
int n, tb_start, tb_end;
TranslationBlock *tb;
+ assert_page_locked(p);
p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
- tb = p->first_tb;
- while (tb != NULL) {
- n = (uintptr_t)tb & 3;
- tb = (TranslationBlock *)((uintptr_t)tb & ~3);
+ PAGE_FOR_EACH_TB(p, tb, n) {
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
@@ -1141,7 +1508,6 @@ static void build_page_bitmap(PageDesc *p)
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
}
bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
- tb = tb->page_next[n];
}
}
#endif
@@ -1149,24 +1515,23 @@ static void build_page_bitmap(PageDesc *p)
/* add the tb in the target page and protect it if necessary
*
* Called with mmap_lock held for user-mode emulation.
+ * Called with @p->lock held in !user-mode.
*/
-static inline void tb_alloc_page(TranslationBlock *tb,
- unsigned int n, tb_page_addr_t page_addr)
+static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
+ unsigned int n, tb_page_addr_t page_addr)
{
- PageDesc *p;
#ifndef CONFIG_USER_ONLY
bool page_already_protected;
#endif
- assert_memory_lock();
+ assert_page_locked(p);
tb->page_addr[n] = page_addr;
- p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
#ifndef CONFIG_USER_ONLY
- page_already_protected = p->first_tb != NULL;
+ page_already_protected = p->first_tb != (uintptr_t)NULL;
#endif
- p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
+ p->first_tb = (uintptr_t)tb | n;
invalidate_page_bitmap(p);
#if defined(CONFIG_USER_ONLY)
@@ -1209,18 +1574,35 @@ static inline void tb_alloc_page(TranslationBlock *tb,
* (-1) to indicate that only one page contains the TB.
*
* Called with mmap_lock held for user-mode emulation.
+ *
+ * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
+ * Note that in !user-mode, another thread might have already added a TB
+ * for the same block of guest code that @tb corresponds to. In that case,
+ * the caller should discard the original @tb, and use instead the returned TB.
*/
-static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2)
+static TranslationBlock *
+tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
+ tb_page_addr_t phys_page2)
{
+ PageDesc *p;
+ PageDesc *p2 = NULL;
+ void *existing_tb = NULL;
uint32_t h;
assert_memory_lock();
- /* add in the page list */
- tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
- if (phys_page2 != -1) {
- tb_alloc_page(tb, 1, phys_page2);
+ /*
+ * Add the TB to the page list, acquiring first the pages's locks.
+ * We keep the locks held until after inserting the TB in the hash table,
+ * so that if the insertion fails we know for sure that the TBs are still
+ * in the page descriptors.
+ * Note that inserting into the hash table first isn't an option, since
+ * we can only insert TBs that are fully initialized.
+ */
+ page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
+ tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
+ if (p2) {
+ tb_page_add(p2, tb, 1, phys_page2);
} else {
tb->page_addr[1] = -1;
}
@@ -1228,13 +1610,30 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
/* add in the hash table */
h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
tb->trace_vcpu_dstate);
- qht_insert(&tb_ctx.htable, tb, h);
+ qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
+
+ /* remove TB from the page(s) if we couldn't insert it */
+ if (unlikely(existing_tb)) {
+ tb_page_remove(p, tb);
+ invalidate_page_bitmap(p);
+ if (p2) {
+ tb_page_remove(p2, tb);
+ invalidate_page_bitmap(p2);
+ }
+ tb = existing_tb;
+ }
+
+ if (p2) {
+ page_unlock(p2);
+ }
+ page_unlock(p);
#ifdef CONFIG_USER_ONLY
if (DEBUG_TB_CHECK_GATE) {
tb_page_check();
}
#endif
+ return tb;
}
/* Called with mmap_lock held for user mode emulation. */
@@ -1243,7 +1642,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
- TranslationBlock *tb;
+ TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_page2;
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf;
@@ -1367,10 +1766,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
CODE_GEN_ALIGN));
/* init jump list */
- assert(((uintptr_t)tb & 3) == 0);
- tb->jmp_list_first = (uintptr_t)tb | 2;
+ qemu_spin_init(&tb->jmp_lock);
+ tb->jmp_list_head = (uintptr_t)NULL;
tb->jmp_list_next[0] = (uintptr_t)NULL;
tb->jmp_list_next[1] = (uintptr_t)NULL;
+ tb->jmp_dest[0] = (uintptr_t)NULL;
+ tb->jmp_dest[1] = (uintptr_t)NULL;
/* init original jump addresses wich has been set during tcg_gen_code() */
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
@@ -1386,66 +1787,36 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
- /* As long as consistency of the TB stuff is provided by tb_lock in user
- * mode and is implicit in single-threaded softmmu emulation, no explicit
- * memory barrier is required before tb_link_page() makes the TB visible
- * through the physical hash table and physical page list.
+ /*
+ * No explicit memory barrier is required -- tb_link_page() makes the
+ * TB visible in a consistent state.
*/
- tb_link_page(tb, phys_pc, phys_page2);
- g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
- return tb;
-}
+ existing_tb = tb_link_page(tb, phys_pc, phys_page2);
+ /* if the TB already exists, discard what we just translated */
+ if (unlikely(existing_tb != tb)) {
+ uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
-/*
- * Invalidate all TBs which intersect with the target physical address range
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
- * 'is_cpu_write_access' should be true if called from a real cpu write
- * access: the virtual CPU will exit the current TB if code is modified inside
- * this TB.
- *
- * Called with mmap_lock held for user-mode emulation, grabs tb_lock
- * Called with tb_lock held for system-mode emulation
- */
-static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
-{
- while (start < end) {
- tb_invalidate_phys_page_range(start, end, 0);
- start &= TARGET_PAGE_MASK;
- start += TARGET_PAGE_SIZE;
+ orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
+ atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
+ return existing_tb;
}
+ tcg_tb_insert(tb);
+ return tb;
}
-#ifdef CONFIG_SOFTMMU
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
-{
- assert_tb_locked();
- tb_invalidate_phys_range_1(start, end);
-}
-#else
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
-{
- assert_memory_lock();
- tb_lock();
- tb_invalidate_phys_range_1(start, end);
- tb_unlock();
-}
-#endif
/*
- * Invalidate all TBs which intersect with the target physical address range
- * [start;end[. NOTE: start and end must refer to the *same* physical page.
- * 'is_cpu_write_access' should be true if called from a real cpu write
- * access: the virtual CPU will exit the current TB if code is modified inside
- * this TB.
- *
- * Called with tb_lock/mmap_lock held for user-mode emulation
- * Called with tb_lock held for system-mode emulation
+ * @p must be non-NULL.
+ * user-mode: call with mmap_lock held.
+ * !user-mode: call with all @pages locked.
*/
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
- int is_cpu_write_access)
+static void
+tb_invalidate_phys_page_range__locked(struct page_collection *pages,
+ PageDesc *p, tb_page_addr_t start,
+ tb_page_addr_t end,
+ int is_cpu_write_access)
{
- TranslationBlock *tb, *tb_next;
+ TranslationBlock *tb;
tb_page_addr_t tb_start, tb_end;
- PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
CPUState *cpu = current_cpu;
@@ -1458,13 +1829,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
- assert_memory_lock();
- assert_tb_locked();
+ assert_page_locked(p);
- p = page_find(start >> TARGET_PAGE_BITS);
- if (!p) {
- return;
- }
#if defined(TARGET_HAS_PRECISE_SMC)
if (cpu != NULL) {
env = cpu->env_ptr;
@@ -1474,11 +1840,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all
the code */
- tb = p->first_tb;
- while (tb != NULL) {
- n = (uintptr_t)tb & 3;
- tb = (TranslationBlock *)((uintptr_t)tb & ~3);
- tb_next = tb->page_next[n];
+ PAGE_FOR_EACH_TB(p, tb, n) {
+ assert_page_locked(p);
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
@@ -1496,11 +1859,11 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
current_tb = NULL;
if (cpu->mem_io_pc) {
/* now we have a real cpu fault */
- current_tb = tb_find_pc(cpu->mem_io_pc);
+ current_tb = tcg_tb_lookup(cpu->mem_io_pc);
}
}
if (current_tb == tb &&
- (current_tb->cflags & CF_COUNT_MASK) != 1) {
+ (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
@@ -1514,9 +1877,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
&current_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
- tb_phys_invalidate(tb, -1);
+ tb_phys_invalidate__locked(tb);
}
- tb = tb_next;
}
#if !defined(CONFIG_USER_ONLY)
/* if no code remaining, no need to continue to use slow writes */
@@ -1527,19 +1889,82 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
+ page_collection_unlock(pages);
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
+ mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
#endif
}
+/*
+ * Invalidate all TBs which intersect with the target physical address range
+ * [start;end[. NOTE: start and end must refer to the *same* physical page.
+ * 'is_cpu_write_access' should be true if called from a real cpu write
+ * access: the virtual CPU will exit the current TB if code is modified inside
+ * this TB.
+ *
+ * Called with mmap_lock held for user-mode emulation
+ */
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
+ int is_cpu_write_access)
+{
+ struct page_collection *pages;
+ PageDesc *p;
+
+ assert_memory_lock();
+
+ p = page_find(start >> TARGET_PAGE_BITS);
+ if (p == NULL) {
+ return;
+ }
+ pages = page_collection_lock(start, end);
+ tb_invalidate_phys_page_range__locked(pages, p, start, end,
+ is_cpu_write_access);
+ page_collection_unlock(pages);
+}
+
+/*
+ * Invalidate all TBs which intersect with the target physical address range
+ * [start;end[. NOTE: start and end may refer to *different* physical pages.
+ * 'is_cpu_write_access' should be true if called from a real cpu write
+ * access: the virtual CPU will exit the current TB if code is modified inside
+ * this TB.
+ *
+ * Called with mmap_lock held for user-mode emulation.
+ */
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+{
+ struct page_collection *pages;
+ tb_page_addr_t next;
+
+ assert_memory_lock();
+
+ pages = page_collection_lock(start, end);
+ for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ start < end;
+ start = next, next += TARGET_PAGE_SIZE) {
+ PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
+ tb_page_addr_t bound = MIN(next, end);
+
+ if (pd == NULL) {
+ continue;
+ }
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
+ }
+ page_collection_unlock(pages);
+}
+
#ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with
* iothread mutex not held.
+ *
+ * Call with all @pages in the range [@start, @start + len[ locked.
*/
-void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
+void tb_invalidate_phys_page_fast(struct page_collection *pages,
+ tb_page_addr_t start, int len)
{
PageDesc *p;
@@ -1558,11 +1983,10 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
if (!p) {
return;
}
+
+ assert_page_locked(p);
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
- /* build code bitmap. FIXME: writes should be protected by
- * tb_lock, reads by tb_lock or RCU.
- */
build_page_bitmap(p);
}
if (p->code_bitmap) {
@@ -1576,7 +2000,7 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
}
} else {
do_invalidate:
- tb_invalidate_phys_page_range(start, start + len, 1);
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
}
}
#else
@@ -1609,22 +2033,19 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
return false;
}
- tb_lock();
- tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
- if (tb && pc != 0) {
- current_tb = tb_find_pc(pc);
+ if (p->first_tb && pc != 0) {
+ current_tb = tcg_tb_lookup(pc);
}
if (cpu != NULL) {
env = cpu->env_ptr;
}
#endif
- while (tb != NULL) {
- n = (uintptr_t)tb & 3;
- tb = (TranslationBlock *)((uintptr_t)tb & ~3);
+ assert_page_locked(p);
+ PAGE_FOR_EACH_TB(p, tb, n) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
- (current_tb->cflags & CF_COUNT_MASK) != 1) {
+ (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
@@ -1638,36 +2059,20 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
- tb = tb->page_next[n];
}
- p->first_tb = NULL;
+ p->first_tb = (uintptr_t)NULL;
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
- /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
- * back into the cpu_exec loop. */
return true;
}
#endif
- tb_unlock();
return false;
}
#endif
-/*
- * Find the TB 'tb' such that
- * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
- * Return NULL if not found.
- */
-static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
-{
- struct tb_tc s = { .ptr = (void *)tc_ptr };
-
- return g_tree_lookup(tb_ctx.tb_tree, &s);
-}
-
#if !defined(CONFIG_USER_ONLY)
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
{
@@ -1683,19 +2088,19 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
- tb_lock();
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
- tb_unlock();
rcu_read_unlock();
}
#endif /* !defined(CONFIG_USER_ONLY) */
-/* Called with tb_lock held. */
+/* user-mode: call with mmap_lock held */
void tb_check_watchpoint(CPUState *cpu)
{
TranslationBlock *tb;
- tb = tb_find_pc(cpu->mem_io_pc);
+ assert_memory_lock();
+
+ tb = tcg_tb_lookup(cpu->mem_io_pc);
if (tb) {
/* We can use retranslation to find the PC. */
cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
@@ -1728,8 +2133,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
TranslationBlock *tb;
uint32_t n;
- tb_lock();
- tb = tb_find_pc(retaddr);
+ tb = tcg_tb_lookup(retaddr);
if (!tb) {
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
(void *)retaddr);
@@ -1762,13 +2166,13 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
/* Generate a new TB executing the I/O insn. */
cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
- if (tb->cflags & CF_NOCACHE) {
+ if (tb_cflags(tb) & CF_NOCACHE) {
if (tb->orig_tb) {
/* Invalidate original TB if this TB was generated in
* cpu_exec_nocache() */
tb_phys_invalidate(tb->orig_tb, -1);
}
- tb_remove(tb);
+ tcg_tb_remove(tb);
}
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
@@ -1776,9 +2180,6 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* repeating the fault, which is horribly inefficient.
* Better would be to execute just this insn uncached, or generate a
* second new TB.
- *
- * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
- * tb_lock gets reset.
*/
cpu_loop_exit_noexc(cpu);
}
@@ -1839,6 +2240,7 @@ static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
}
struct tb_tree_stats {
+ size_t nb_tbs;
size_t host_size;
size_t target_size;
size_t max_target_size;
@@ -1852,6 +2254,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
const TranslationBlock *tb = value;
struct tb_tree_stats *tst = data;
+ tst->nb_tbs++;
tst->host_size += tb->tc.size;
tst->target_size += tb->size;
if (tb->size > tst->max_target_size) {
@@ -1875,10 +2278,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
struct qht_stats hst;
size_t nb_tbs;
- tb_lock();
-
- nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
- g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
+ tcg_tb_foreach(tb_tree_stats_iter, &tst);
+ nb_tbs = tst.nb_tbs;
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
/*
@@ -1910,11 +2311,9 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %u\n",
atomic_read(&tb_ctx.tb_flush_count));
- cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
+ cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
tcg_dump_info(f, cpu_fprintf);
-
- tb_unlock();
}
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
@@ -2182,7 +2581,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
* set the page to PAGE_WRITE and did the TB invalidate for us.
*/
#ifdef TARGET_HAS_PRECISE_SMC
- TranslationBlock *current_tb = tb_find_pc(pc);
+ TranslationBlock *current_tb = tcg_tb_lookup(pc);
if (current_tb) {
current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
}
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
index ba8e4d63c4..e6cb963d7e 100644
--- a/accel/tcg/translate-all.h
+++ b/accel/tcg/translate-all.h
@@ -23,7 +23,11 @@
/* translate-all.c */
-void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len);
+struct page_collection *page_collection_lock(tb_page_addr_t start,
+ tb_page_addr_t end);
+void page_collection_unlock(struct page_collection *set);
+void tb_invalidate_phys_page_fast(struct page_collection *pages,
+ tb_page_addr_t start, int len);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
diff --git a/docs/devel/multi-thread-tcg.txt b/docs/devel/multi-thread-tcg.txt
index a99b4564c6..06530be1e9 100644
--- a/docs/devel/multi-thread-tcg.txt
+++ b/docs/devel/multi-thread-tcg.txt
@@ -61,6 +61,7 @@ have their block-to-block jumps patched.
Global TCG State
----------------
+### User-mode emulation
We need to protect the entire code generation cycle including any post
generation patching of the translated code. This also implies a shared
translation buffer which contains code running on all cores. Any
@@ -75,9 +76,11 @@ patching.
(Current solution)
-Mainly as part of the linux-user work all code generation is
-serialised with a tb_lock(). For the SoftMMU tb_lock() also takes the
-place of mmap_lock() in linux-user.
+Code generation is serialised with mmap_lock().
+
+### !User-mode emulation
+Each vCPU has its own TCG context and associated TCG region, thereby
+requiring no locking.
Translation Blocks
------------------
@@ -131,15 +134,20 @@ DESIGN REQUIREMENT: Safely handle invalidation of TBs
The direct jump themselves are updated atomically by the TCG
tb_set_jmp_target() code. Modification to the linked lists that allow
-searching for linked pages are done under the protect of the
-tb_lock().
+searching for linked pages are done under the protection of tb->jmp_lock,
+where tb is the destination block of a jump. Each origin block keeps a
+pointer to its destinations so that the appropriate lock can be acquired before
+iterating over a jump list.
-The global page table is protected by the tb_lock() in system-mode and
-mmap_lock() in linux-user mode.
+The global page table is a lockless radix tree; cmpxchg is used
+to atomically insert new elements.
The lookup caches are updated atomically and the lookup hash uses QHT
which is designed for concurrent safe lookup.
+Parallel code generation is supported. QHT is used at insertion time
+as the synchronization point across threads, thereby ensuring that we only
+keep track of a single TranslationBlock for each guest code block.
Memory maps and TLBs
--------------------
@@ -190,7 +198,7 @@ work as "safe work" and exiting the cpu run loop. This ensure by the
time execution restarts all flush operations have completed.
TLB flag updates are all done atomically and are also protected by the
-tb_lock() which is used by the functions that update the TLB in bulk.
+corresponding page lock.
(Known limitation)
diff --git a/exec.c b/exec.c
index ebadc0e302..28f9bdcbf9 100644
--- a/exec.c
+++ b/exec.c
@@ -1031,9 +1031,7 @@ const char *parse_cpu_model(const char *cpu_model)
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
mmap_lock();
- tb_lock();
tb_invalidate_phys_page_range(pc, pc + 1, 0);
- tb_unlock();
mmap_unlock();
}
#else
@@ -2644,21 +2642,21 @@ void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
ndi->ram_addr = ram_addr;
ndi->mem_vaddr = mem_vaddr;
ndi->size = size;
- ndi->locked = false;
+ ndi->pages = NULL;
assert(tcg_enabled());
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- ndi->locked = true;
- tb_lock();
- tb_invalidate_phys_page_fast(ram_addr, size);
+ ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
+ tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
}
}
/* Called within RCU critical section. */
void memory_notdirty_write_complete(NotDirtyInfo *ndi)
{
- if (ndi->locked) {
- tb_unlock();
+ if (ndi->pages) {
+ page_collection_unlock(ndi->pages);
+ ndi->pages = NULL;
}
/* Set both VGA and migration bits for simplicity and to remove
@@ -2745,18 +2743,16 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
}
cpu->watchpoint_hit = wp;
- /* Both tb_lock and iothread_mutex will be reset when
- * cpu_loop_exit or cpu_loop_exit_noexc longjmp
- * back into the cpu_exec main loop.
- */
- tb_lock();
+ mmap_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
+ mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
+ mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
}
@@ -3147,9 +3143,9 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
- tb_lock();
+ mmap_lock();
tb_invalidate_phys_range(addr, addr + length);
- tb_unlock();
+ mmap_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 0b58e262f3..18b40d6145 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -23,7 +23,7 @@ typedef struct CPUListState {
FILE *file;
} CPUListState;
-/* The CPU list lock nests outside tb_lock/tb_unlock. */
+/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 8bbea787a9..25a6f28ab8 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -345,7 +345,7 @@ struct TranslationBlock {
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x00010000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */
+#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
/* cflags' mask for hashing/comparison */
#define CF_HASH_MASK \
@@ -359,10 +359,14 @@ struct TranslationBlock {
/* original tb when cflags has CF_NOCACHE */
struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[] */
- struct TranslationBlock *page_next[2];
+ of the pointer tells the index in page_next[].
+ The list is protected by the TB's page('s) lock(s) */
+ uintptr_t page_next[2];
tb_page_addr_t page_addr[2];
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
/* The following data are used to directly call another TB from
* the code of this one. This can be done either by emitting direct or
* indirect native jump instructions. These jumps are reset so that the TB
@@ -374,20 +378,26 @@ struct TranslationBlock {
#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
uintptr_t jmp_target_arg[2]; /* target address or offset */
- /* Each TB has an associated circular list of TBs jumping to this one.
- * jmp_list_first points to the first TB jumping to this one.
- * jmp_list_next is used to point to the next TB in a list.
- * Since each TB can have two jumps, it can participate in two lists.
- * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
- * TranslationBlock structure, but the two least significant bits of
- * them are used to encode which data field of the pointed TB should
- * be used to traverse the list further from that TB:
- * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
- * In other words, 0/1 tells which jump is used in the pointed TB,
- * and 2 means that this is a pointer back to the target TB of this list.
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
*/
+ uintptr_t jmp_list_head;
uintptr_t jmp_list_next[2];
- uintptr_t jmp_list_first;
+ uintptr_t jmp_dest[2];
};
extern bool parallel_cpus;
@@ -405,7 +415,6 @@ static inline uint32_t curr_cflags(void)
| (use_icount ? CF_USE_ICOUNT : 0);
}
-void tb_remove(TranslationBlock *tb);
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
@@ -431,9 +440,13 @@ extern uintptr_t tci_tb_ptr;
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-void tb_lock(void);
-void tb_unlock(void);
-void tb_lock_reset(void);
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void)
+{
+}
+#endif
#if !defined(CONFIG_USER_ONLY)
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
index 56c25c0ef7..bb08fa4d2f 100644
--- a/include/exec/memory-internal.h
+++ b/include/exec/memory-internal.h
@@ -49,6 +49,8 @@ void mtree_print_dispatch(fprintf_function mon, void *f,
struct AddressSpaceDispatch *d,
MemoryRegion *root);
+struct page_collection;
+
/* Opaque struct for passing info from memory_notdirty_write_prepare()
* to memory_notdirty_write_complete(). Callers should treat all fields
* as private, with the exception of @active.
@@ -60,10 +62,10 @@ void mtree_print_dispatch(fprintf_function mon, void *f,
*/
typedef struct {
CPUState *cpu;
+ struct page_collection *pages;
ram_addr_t ram_addr;
vaddr mem_vaddr;
unsigned size;
- bool locked;
bool active;
} NotDirtyInfo;
@@ -91,7 +93,7 @@ typedef struct {
*
* This must only be called if we are using TCG; it will assert otherwise.
*
- * We may take a lock in the prepare call, so callers must ensure that
+ * We may take locks in the prepare call, so callers must ensure that
* they don't exit (via longjump or otherwise) without calling complete.
*
* This call must only be made inside an RCU critical section.
diff --git a/include/exec/tb-context.h b/include/exec/tb-context.h
index 1d41202485..feb585e0a7 100644
--- a/include/exec/tb-context.h
+++ b/include/exec/tb-context.h
@@ -31,14 +31,10 @@ typedef struct TBContext TBContext;
struct TBContext {
- GTree *tb_tree;
struct qht htable;
- /* any access to the tbs or the page table must use this lock */
- QemuMutex tb_lock;
/* statistics */
unsigned tb_flush_count;
- int tb_phys_invalidate_count;
};
extern TBContext tb_ctx;
diff --git a/include/qemu/qht.h b/include/qemu/qht.h
index 531aa95325..1fb9116fa0 100644
--- a/include/qemu/qht.h
+++ b/include/qemu/qht.h
@@ -11,8 +11,11 @@
#include "qemu/thread.h"
#include "qemu/qdist.h"
+typedef bool (*qht_cmp_func_t)(const void *a, const void *b);
+
struct qht {
struct qht_map *map;
+ qht_cmp_func_t cmp;
QemuMutex lock; /* serializes setters of ht->map */
unsigned int mode;
};
@@ -47,10 +50,12 @@ typedef void (*qht_iter_func_t)(struct qht *ht, void *p, uint32_t h, void *up);
/**
* qht_init - Initialize a QHT
* @ht: QHT to be initialized
+ * @cmp: default comparison function. Cannot be NULL.
* @n_elems: number of entries the hash table should be optimized for.
* @mode: bitmask with OR'ed QHT_MODE_*
*/
-void qht_init(struct qht *ht, size_t n_elems, unsigned int mode);
+void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
+ unsigned int mode);
/**
* qht_destroy - destroy a previously initialized QHT
@@ -65,6 +70,7 @@ void qht_destroy(struct qht *ht);
* @ht: QHT to insert to
* @p: pointer to be inserted
* @hash: hash corresponding to @p
+ * @existing: address where the pointer to an existing entry can be copied to
*
* Attempting to insert a NULL @p is a bug.
* Inserting the same pointer @p with different @hash values is a bug.
@@ -73,16 +79,18 @@ void qht_destroy(struct qht *ht);
* inserted into the hash table.
*
* Returns true on success.
- * Returns false if the @p-@hash pair already exists in the hash table.
+ * Returns false if there is an existing entry in the table that is equivalent
+ * (i.e. ht->cmp matches and the hash is the same) to @p-@h. If @existing
+ * is !NULL, a pointer to this existing entry is copied to it.
*/
-bool qht_insert(struct qht *ht, void *p, uint32_t hash);
+bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing);
/**
- * qht_lookup - Look up a pointer in a QHT
+ * qht_lookup_custom - Look up a pointer using a custom comparison function.
* @ht: QHT to be looked up
- * @func: function to compare existing pointers against @userp
* @userp: pointer to pass to @func
* @hash: hash of the pointer to be looked up
+ * @func: function to compare existing pointers against @userp
*
* Needs to be called under an RCU read-critical section.
*
@@ -94,8 +102,18 @@ bool qht_insert(struct qht *ht, void *p, uint32_t hash);
* Returns the corresponding pointer when a match is found.
* Returns NULL otherwise.
*/
-void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
- uint32_t hash);
+void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
+ qht_lookup_func_t func);
+
+/**
+ * qht_lookup - Look up a pointer in a QHT
+ * @ht: QHT to be looked up
+ * @userp: pointer to pass to the comparison function
+ * @hash: hash of the pointer to be looked up
+ *
+ * Calls qht_lookup_custom() using @ht's default comparison function.
+ */
+void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash);
/**
* qht_remove - remove a pointer from the hash table
diff --git a/linux-user/main.c b/linux-user/main.c
index 78d6d3e7eb..84e9ec9335 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -120,7 +120,6 @@ void fork_start(void)
{
start_exclusive();
mmap_fork_start();
- qemu_mutex_lock(&tb_ctx.tb_lock);
cpu_list_lock();
}
@@ -136,14 +135,12 @@ void fork_end(int child)
QTAILQ_REMOVE(&cpus, cpu, node);
}
}
- qemu_mutex_init(&tb_ctx.tb_lock);
qemu_init_cpu_list();
gdbserver_fork(thread_cpu);
/* qemu_init_cpu_list() takes care of reinitializing the
* exclusive state, so we don't need to end_exclusive() here.
*/
} else {
- qemu_mutex_unlock(&tb_ctx.tb_lock);
cpu_list_unlock();
end_exclusive();
}
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index be3192078d..4562d36d1b 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -1733,7 +1733,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP);
}
tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index 56a32a470f..e1fbf465cb 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -1822,7 +1822,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_movi32(s, COND_AL, base, ptr - dil);
}
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
}
break;
case INDEX_op_goto_ptr:
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index 5357909fff..e87b0d445e 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -2245,7 +2245,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
(intptr_t)(s->tb_jmp_target_addr + a0));
}
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
@@ -3501,7 +3501,10 @@ static void tcg_target_init(TCGContext *s)
sure of not hitting invalid opcode. */
if (c & bit_OSXSAVE) {
unsigned xcrl, xcrh;
- asm ("xgetbv" : "=a" (xcrl), "=d" (xcrh) : "c" (0));
+ /* The xgetbv instruction is not available to older versions of
+ * the assembler, so we encode the instruction manually.
+ */
+ asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0));
if ((xcrl & 6) == 6) {
have_avx1 = (c & bit_AVX) != 0;
have_avx2 = (b7 & bit_AVX2) != 0;
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index ca5f1d4894..cff525373b 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -1744,7 +1744,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
}
tcg_out_nop(s);
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 86f7de5f7e..c2f729ee8f 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -2025,10 +2025,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
tcg_out32(s, BCCTR | BO_ALWAYS);
- s->tb_jmp_reset_offset[args[0]] = c = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
if (USE_REG_TB) {
/* For the unlinked case, need to reset TCG_REG_TB. */
- c = -c;
+ c = -tcg_current_code_size(s);
assert(c == (int16_t)c);
tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
}
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index 9af6dcef05..17c435ade5 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -1783,7 +1783,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
/* and go there */
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
}
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
/* For the unlinked path of goto_tb, we need to reset
TCG_REG_TB to the beginning of this TB. */
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
index bc673bd8c6..04bdc3df5e 100644
--- a/tcg/sparc/tcg-target.inc.c
+++ b/tcg/sparc/tcg-target.inc.c
@@ -1388,12 +1388,12 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
tcg_out_nop(s);
}
- s->tb_jmp_reset_offset[a0] = c = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
/* For the unlinked path of goto_tb, we need to reset
TCG_REG_TB to the beginning of this TB. */
if (USE_REG_TB) {
- c = -c;
+ c = -tcg_current_code_size(s);
if (check_fit_i32(c, 13)) {
tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
} else {
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 6eeebe0624..f27b22bd3c 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -135,6 +135,12 @@ static TCGContext **tcg_ctxs;
static unsigned int n_tcg_ctxs;
TCGv_env cpu_env = 0;
+struct tcg_region_tree {
+ QemuMutex lock;
+ GTree *tree;
+ /* padding to avoid false sharing is computed at run-time */
+};
+
/*
* We divide code_gen_buffer into equally-sized "regions" that TCG threads
* dynamically allocate from as demand dictates. Given appropriate region
@@ -158,6 +164,13 @@ struct tcg_region_state {
};
static struct tcg_region_state region;
+/*
+ * This is an array of struct tcg_region_tree's, with padding.
+ * We use void * to simplify the computation of region_trees[i]; each
+ * struct is found every tree_size bytes.
+ */
+static void *region_trees;
+static size_t tree_size;
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
static TCGRegSet tcg_target_call_clobber_regs;
@@ -293,8 +306,190 @@ TCGLabel *gen_new_label(void)
return l;
}
+static void set_jmp_reset_offset(TCGContext *s, int which)
+{
+ size_t off = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[which] = off;
+ /* Make sure that we didn't overflow the stored offset. */
+ assert(s->tb_jmp_reset_offset[which] == off);
+}
+
#include "tcg-target.inc.c"
+/* compare a pointer @ptr and a tb_tc @s */
+static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
+{
+ if (ptr >= s->ptr + s->size) {
+ return 1;
+ } else if (ptr < s->ptr) {
+ return -1;
+ }
+ return 0;
+}
+
+static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
+{
+ const struct tb_tc *a = ap;
+ const struct tb_tc *b = bp;
+
+ /*
+ * When both sizes are set, we know this isn't a lookup.
+ * This is the most likely case: every TB must be inserted; lookups
+ * are a lot less frequent.
+ */
+ if (likely(a->size && b->size)) {
+ if (a->ptr > b->ptr) {
+ return 1;
+ } else if (a->ptr < b->ptr) {
+ return -1;
+ }
+ /* a->ptr == b->ptr should happen only on deletions */
+ g_assert(a->size == b->size);
+ return 0;
+ }
+ /*
+ * All lookups have either .size field set to 0.
+ * From the glib sources we see that @ap is always the lookup key. However
+ * the docs provide no guarantee, so we just mark this case as likely.
+ */
+ if (likely(a->size == 0)) {
+ return ptr_cmp_tb_tc(a->ptr, b);
+ }
+ return ptr_cmp_tb_tc(b->ptr, a);
+}
+
+static void tcg_region_trees_init(void)
+{
+ size_t i;
+
+ tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
+ region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
+ for (i = 0; i < region.n; i++) {
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
+
+ qemu_mutex_init(&rt->lock);
+ rt->tree = g_tree_new(tb_tc_cmp);
+ }
+}
+
+static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
+{
+ size_t region_idx;
+
+ if (p < region.start_aligned) {
+ region_idx = 0;
+ } else {
+ ptrdiff_t offset = p - region.start_aligned;
+
+ if (offset > region.stride * (region.n - 1)) {
+ region_idx = region.n - 1;
+ } else {
+ region_idx = offset / region.stride;
+ }
+ }
+ return region_trees + region_idx * tree_size;
+}
+
+void tcg_tb_insert(TranslationBlock *tb)
+{
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
+
+ qemu_mutex_lock(&rt->lock);
+ g_tree_insert(rt->tree, &tb->tc, tb);
+ qemu_mutex_unlock(&rt->lock);
+}
+
+void tcg_tb_remove(TranslationBlock *tb)
+{
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
+
+ qemu_mutex_lock(&rt->lock);
+ g_tree_remove(rt->tree, &tb->tc);
+ qemu_mutex_unlock(&rt->lock);
+}
+
+/*
+ * Find the TB 'tb' such that
+ * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
+ * Return NULL if not found.
+ */
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
+{
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
+ TranslationBlock *tb;
+ struct tb_tc s = { .ptr = (void *)tc_ptr };
+
+ qemu_mutex_lock(&rt->lock);
+ tb = g_tree_lookup(rt->tree, &s);
+ qemu_mutex_unlock(&rt->lock);
+ return tb;
+}
+
+static void tcg_region_tree_lock_all(void)
+{
+ size_t i;
+
+ for (i = 0; i < region.n; i++) {
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
+
+ qemu_mutex_lock(&rt->lock);
+ }
+}
+
+static void tcg_region_tree_unlock_all(void)
+{
+ size_t i;
+
+ for (i = 0; i < region.n; i++) {
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
+
+ qemu_mutex_unlock(&rt->lock);
+ }
+}
+
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
+{
+ size_t i;
+
+ tcg_region_tree_lock_all();
+ for (i = 0; i < region.n; i++) {
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
+
+ g_tree_foreach(rt->tree, func, user_data);
+ }
+ tcg_region_tree_unlock_all();
+}
+
+size_t tcg_nb_tbs(void)
+{
+ size_t nb_tbs = 0;
+ size_t i;
+
+ tcg_region_tree_lock_all();
+ for (i = 0; i < region.n; i++) {
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
+
+ nb_tbs += g_tree_nnodes(rt->tree);
+ }
+ tcg_region_tree_unlock_all();
+ return nb_tbs;
+}
+
+static void tcg_region_tree_reset_all(void)
+{
+ size_t i;
+
+ tcg_region_tree_lock_all();
+ for (i = 0; i < region.n; i++) {
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
+
+ /* Increment the refcount first so that destroy acts as a reset */
+ g_tree_ref(rt->tree);
+ g_tree_destroy(rt->tree);
+ }
+ tcg_region_tree_unlock_all();
+}
+
static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
{
void *start, *end;
@@ -380,6 +575,8 @@ void tcg_region_reset_all(void)
g_assert(!err);
}
qemu_mutex_unlock(&region.lock);
+
+ tcg_region_tree_reset_all();
}
#ifdef CONFIG_USER_ONLY
@@ -496,6 +693,8 @@ void tcg_region_init(void)
g_assert(!rc);
}
+ tcg_region_trees_init();
+
/* In user-mode we support only one ctx, so do the initial allocation now */
#ifdef CONFIG_USER_ONLY
{
@@ -600,6 +799,20 @@ size_t tcg_code_capacity(void)
return capacity;
}
+size_t tcg_tb_phys_invalidate_count(void)
+{
+ unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
+ unsigned int i;
+ size_t total = 0;
+
+ for (i = 0; i < n_ctxs; i++) {
+ const TCGContext *s = atomic_read(&tcg_ctxs[i]);
+
+ total += atomic_read(&s->tb_phys_invalidate_count);
+ }
+ return total;
+}
+
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size)
{
@@ -3327,7 +3540,10 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
break;
case INDEX_op_insn_start:
if (num_insns >= 0) {
- s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
+ size_t off = tcg_current_code_size(s);
+ s->gen_insn_end_off[num_insns] = off;
+ /* Assert that we do not overflow our stored offset. */
+ assert(s->gen_insn_end_off[num_insns] == off);
}
num_insns++;
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 509f4d65d2..f9f12378e9 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -695,6 +695,8 @@ struct TCGContext {
/* Threshold to flush the translated code buffer. */
void *code_gen_highwater;
+ size_t tb_phys_invalidate_count;
+
/* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */
@@ -848,14 +850,16 @@ static inline bool tcg_op_buf_full(void)
/* This is not a hard limit, it merely stops translation when
* we have produced "enough" opcodes. We want to limit TB size
* such that a RISC host can reasonably use a 16-bit signed
- * branch within the TB.
+ * branch within the TB. We also need to be mindful of the
+ * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
+ * and TCGContext.gen_insn_end_off[].
*/
- return tcg_ctx->nb_ops >= 8000;
+ return tcg_ctx->nb_ops >= 4000;
}
/* pool based memory allocation */
-/* user-mode: tb_lock must be held for tcg_malloc_internal. */
+/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);
TranslationBlock *tcg_tb_alloc(TCGContext *s);
@@ -866,7 +870,14 @@ void tcg_region_reset_all(void);
size_t tcg_code_size(void);
size_t tcg_code_capacity(void);
-/* user-mode: Called with tb_lock held. */
+void tcg_tb_insert(TranslationBlock *tb);
+void tcg_tb_remove(TranslationBlock *tb);
+size_t tcg_tb_phys_invalidate_count(void);
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
+size_t tcg_nb_tbs(void);
+
+/* user-mode: Called with mmap_lock held. */
static inline void *tcg_malloc(int size)
{
TCGContext *s = tcg_ctx;
diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c
index cc949bea85..62ed097254 100644
--- a/tcg/tci/tcg-target.inc.c
+++ b/tcg/tci/tcg-target.inc.c
@@ -574,7 +574,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
/* Indirect jump method. */
TODO();
}
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
break;
case INDEX_op_br:
tci_out_label(s, arg_label(args[0]));
diff --git a/tests/qht-bench.c b/tests/qht-bench.c
index 4cabdfd62a..f492b3a20a 100644
--- a/tests/qht-bench.c
+++ b/tests/qht-bench.c
@@ -93,10 +93,10 @@ static void usage_complete(int argc, char *argv[])
exit(-1);
}
-static bool is_equal(const void *obj, const void *userp)
+static bool is_equal(const void *ap, const void *bp)
{
- const long *a = obj;
- const long *b = userp;
+ const long *a = ap;
+ const long *b = bp;
return *a == *b;
}
@@ -150,7 +150,7 @@ static void do_rw(struct thread_info *info)
p = &keys[info->r & (lookup_range - 1)];
hash = h(*p);
- read = qht_lookup(&ht, is_equal, p, hash);
+ read = qht_lookup(&ht, p, hash);
if (read) {
stats->rd++;
} else {
@@ -162,8 +162,8 @@ static void do_rw(struct thread_info *info)
if (info->write_op) {
bool written = false;
- if (qht_lookup(&ht, is_equal, p, hash) == NULL) {
- written = qht_insert(&ht, p, hash);
+ if (qht_lookup(&ht, p, hash) == NULL) {
+ written = qht_insert(&ht, p, hash, NULL);
}
if (written) {
stats->in++;
@@ -173,7 +173,7 @@ static void do_rw(struct thread_info *info)
} else {
bool removed = false;
- if (qht_lookup(&ht, is_equal, p, hash)) {
+ if (qht_lookup(&ht, p, hash)) {
removed = qht_remove(&ht, p, hash);
}
if (removed) {
@@ -308,7 +308,7 @@ static void htable_init(void)
}
/* initialize the hash table */
- qht_init(&ht, qht_n_elems, qht_mode);
+ qht_init(&ht, is_equal, qht_n_elems, qht_mode);
assert(init_size <= init_range);
pr_params();
@@ -322,7 +322,7 @@ static void htable_init(void)
r = xorshift64star(r);
p = &keys[r & (init_range - 1)];
hash = h(*p);
- if (qht_insert(&ht, p, hash)) {
+ if (qht_insert(&ht, p, hash, NULL)) {
break;
}
retries++;
diff --git a/tests/test-qht.c b/tests/test-qht.c
index 9b7423abb6..dda6a067be 100644
--- a/tests/test-qht.c
+++ b/tests/test-qht.c
@@ -13,10 +13,10 @@
static struct qht ht;
static int32_t arr[N * 2];
-static bool is_equal(const void *obj, const void *userp)
+static bool is_equal(const void *ap, const void *bp)
{
- const int32_t *a = obj;
- const int32_t *b = userp;
+ const int32_t *a = ap;
+ const int32_t *b = bp;
return *a == *b;
}
@@ -27,11 +27,17 @@ static void insert(int a, int b)
for (i = a; i < b; i++) {
uint32_t hash;
+ void *existing;
+ bool inserted;
arr[i] = i;
hash = i;
- qht_insert(&ht, &arr[i], hash);
+ inserted = qht_insert(&ht, &arr[i], hash, NULL);
+ g_assert_true(inserted);
+ inserted = qht_insert(&ht, &arr[i], hash, &existing);
+ g_assert_false(inserted);
+ g_assert_true(existing == &arr[i]);
}
}
@@ -60,7 +66,12 @@ static void check(int a, int b, bool expected)
val = i;
hash = i;
- p = qht_lookup(&ht, is_equal, &val, hash);
+ /* test both lookup variants; results should be the same */
+ if (i % 2) {
+ p = qht_lookup(&ht, &val, hash);
+ } else {
+ p = qht_lookup_custom(&ht, &val, hash, is_equal);
+ }
g_assert_true(!!p == expected);
}
rcu_read_unlock();
@@ -102,7 +113,7 @@ static void qht_do_test(unsigned int mode, size_t init_entries)
/* under KVM we might fetch stats from an uninitialized qht */
check_n(0);
- qht_init(&ht, 0, mode);
+ qht_init(&ht, is_equal, 0, mode);
check_n(0);
insert(0, N);
diff --git a/util/qht.c b/util/qht.c
index 55b0738cd1..c138777a9c 100644
--- a/util/qht.c
+++ b/util/qht.c
@@ -351,11 +351,14 @@ static struct qht_map *qht_map_create(size_t n_buckets)
return map;
}
-void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
+void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
+ unsigned int mode)
{
struct qht_map *map;
size_t n_buckets = qht_elems_to_buckets(n_elems);
+ g_assert(cmp);
+ ht->cmp = cmp;
ht->mode = mode;
qemu_mutex_init(&ht->lock);
map = qht_map_create(n_buckets);
@@ -479,8 +482,8 @@ void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
return ret;
}
-void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
- uint32_t hash)
+void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
+ qht_lookup_func_t func)
{
struct qht_bucket *b;
struct qht_map *map;
@@ -502,10 +505,15 @@ void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
return qht_lookup__slowpath(b, func, userp, hash);
}
+void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash)
+{
+ return qht_lookup_custom(ht, userp, hash, ht->cmp);
+}
+
/* call with head->lock held */
-static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
- struct qht_bucket *head, void *p, uint32_t hash,
- bool *needs_resize)
+static void *qht_insert__locked(struct qht *ht, struct qht_map *map,
+ struct qht_bucket *head, void *p, uint32_t hash,
+ bool *needs_resize)
{
struct qht_bucket *b = head;
struct qht_bucket *prev = NULL;
@@ -515,8 +523,9 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
do {
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
if (b->pointers[i]) {
- if (unlikely(b->pointers[i] == p)) {
- return false;
+ if (unlikely(b->hashes[i] == hash &&
+ ht->cmp(b->pointers[i], p))) {
+ return b->pointers[i];
}
} else {
goto found;
@@ -545,7 +554,7 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
atomic_set(&b->hashes[i], hash);
atomic_set(&b->pointers[i], p);
seqlock_write_end(&head->sequence);
- return true;
+ return NULL;
}
static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
@@ -569,25 +578,31 @@ static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
qemu_mutex_unlock(&ht->lock);
}
-bool qht_insert(struct qht *ht, void *p, uint32_t hash)
+bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
{
struct qht_bucket *b;
struct qht_map *map;
bool needs_resize = false;
- bool ret;
+ void *prev;
/* NULL pointers are not supported */
qht_debug_assert(p);
b = qht_bucket_lock__no_stale(ht, hash, &map);
- ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
+ prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
qht_bucket_debug__locked(b);
qemu_spin_unlock(&b->lock);
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
qht_grow_maybe(ht);
}
- return ret;
+ if (likely(prev == NULL)) {
+ return true;
+ }
+ if (existing) {
+ *existing = prev;
+ }
+ return false;
}
static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)