cputlb: Add functions for flushing TLB for a single MMU index

Guest CPU TLB maintenance operations may be sufficiently
specialized to only need to flush TLB entries corresponding
to a particular MMU index. Implement cputlb functions for
this, to avoid the inefficiency of flushing TLB entries
which we don't need to.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Message-id: 1439548879-1972-2-git-send-email-peter.maydell@linaro.org
diff --git a/cputlb.c b/cputlb.c
index a506086..4bc6c24 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -69,6 +69,47 @@
     tlb_flush_count++;
 }
 
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+{
+    CPUArchState *env = cpu->env_ptr;
+
+#if defined(DEBUG_TLB)
+    printf("tlb_flush_by_mmuidx:");
+#endif
+    /* must reset current TB so that interrupts cannot modify the
+       links while we are modifying them */
+    cpu->current_tb = NULL;
+
+    for (;;) {
+        int mmu_idx = va_arg(argp, int);
+
+        if (mmu_idx < 0) {
+            break;
+        }
+
+#if defined(DEBUG_TLB)
+        printf(" %d", mmu_idx);
+#endif
+
+        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
+        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+    }
+
+#if defined(DEBUG_TLB)
+    printf("\n");
+#endif
+
+    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+}
+
+void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+    va_list argp;
+    va_start(argp, cpu);
+    v_tlb_flush_by_mmuidx(cpu, argp);
+    va_end(argp);
+}
+
 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
 {
     if (addr == (tlb_entry->addr_read &
@@ -121,6 +162,62 @@
     tb_flush_jmp_cache(cpu, addr);
 }
 
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
+{
+    CPUArchState *env = cpu->env_ptr;
+    int i, k;
+    va_list argp;
+
+    va_start(argp, addr);
+
+#if defined(DEBUG_TLB)
+    printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
+#endif
+    /* Check if we need to flush due to large pages.  */
+    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+#if defined(DEBUG_TLB)
+        printf(" forced full flush ("
+               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+               env->tlb_flush_addr, env->tlb_flush_mask);
+#endif
+        v_tlb_flush_by_mmuidx(cpu, argp);
+        va_end(argp);
+        return;
+    }
+    /* must reset current TB so that interrupts cannot modify the
+       links while we are modifying them */
+    cpu->current_tb = NULL;
+
+    addr &= TARGET_PAGE_MASK;
+    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+
+    for (;;) {
+        int mmu_idx = va_arg(argp, int);
+
+        if (mmu_idx < 0) {
+            break;
+        }
+
+#if defined(DEBUG_TLB)
+        printf(" %d", mmu_idx);
+#endif
+
+        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+
+        /* check whether there are vltb entries that need to be flushed */
+        for (k = 0; k < CPU_VTLB_SIZE; k++) {
+            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
+        }
+    }
+    va_end(argp);
+
+#if defined(DEBUG_TLB)
+    printf("\n");
+#endif
+
+    tb_flush_jmp_cache(cpu, addr);
+}
+
 /* update the TLBs so that writes to code in the virtual page 'addr'
    can be detected */
 void tlb_protect_code(ram_addr_t ram_addr)