diff options
Diffstat (limited to 'target/hppa/mem_helper.c')
-rw-r--r-- | target/hppa/mem_helper.c | 674 |
1 files changed, 538 insertions, 136 deletions
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index afc5b56c3e..84785b5a5c 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -18,102 +18,237 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "hw/core/cpu.h" #include "trace.h" -#ifdef CONFIG_USER_ONLY -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) { - HPPACPU *cpu = HPPA_CPU(cs); + /* + * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes + * an algorithm in which a 62-bit absolute address is transformed to + * a 64-bit physical address. This must then be combined with that + * pictured in Figure H-11 "Physical Address Space Mapping", in which + * the full physical address is truncated to the N-bit physical address + * supported by the implementation. + * + * Since the supported physical address space is below 54 bits, the + * H-8 algorithm is moot and all that is left is to truncate. + */ + QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54); + return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS); +} - /* ??? Test between data page fault and data memory protection trap, - which would affect si_code. */ - cs->exception_index = EXCP_DMP; - cpu->env.cr[CR_IOR] = address; - cpu_loop_exit_restore(cs, retaddr); +hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) +{ + /* + * See Figure H-10, "Absolute Accesses when PSW W-bit is 0", + * combined with Figure H-11, as above. + */ + if (likely(extract32(addr, 28, 4) != 0xf)) { + /* Memory address space */ + addr = (uint32_t)addr; + } else if (extract32(addr, 24, 4) != 0) { + /* I/O address space */ + addr = (int32_t)addr; + } else { + /* + * PDC address space: + * Figures H-10 and H-11 of the parisc2.0 spec do not specify + * where to map into the 64-bit PDC address space. + * We map with an offset which equals the 32-bit address, which + * is what can be seen on physical machines too. + */ + addr = (uint32_t)addr; + addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); + } + return addr; } -#else -static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) + +static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) { - int i; - - for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { - hppa_tlb_entry *ent = &env->tlb[i]; - if (ent->va_b <= addr && addr <= ent->va_e) { - trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid, - ent->va_b, ent->va_e, ent->pa); - return ent; - } + IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); + + if (i) { + HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); + trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, + ent->itree.start, ent->itree.last, ent->pa); + return ent; } trace_hppa_tlb_find_entry_not_found(env, addr); return NULL; } -static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent) +static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, + bool force_flush_btlb) { CPUState *cs = env_cpu(env); - unsigned i, n = 1 << (2 * ent->page_size); - uint64_t addr = ent->va_b; + bool is_btlb; + + if (!ent->entry_valid) { + return; + } + + trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, + ent->itree.last, ent->pa); - trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa); + tlb_flush_range_by_mmuidx(cs, ent->itree.start, + ent->itree.last - ent->itree.start + 1, + HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); - for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) { - /* Do not flush MMU_PHYS_IDX. */ - tlb_flush_page_by_mmuidx(cs, addr, 0xf); + /* Never clear BTLBs, unless forced to do so. */ + is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; + if (is_btlb && !force_flush_btlb) { + return; } + interval_tree_remove(&ent->itree, &env->tlb_root); memset(ent, 0, sizeof(*ent)); - ent->va_b = -1; + + if (!is_btlb) { + ent->unused_next = env->tlb_unused; + env->tlb_unused = ent; + } +} + +static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) +{ + IntervalTreeNode *i, *n; + + i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); + for (; i ; i = n) { + HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); + + /* + * Find the next entry now: In the normal case the current entry + * will be removed, but in the BTLB case it will remain. + */ + n = interval_tree_iter_next(i, va_b, va_e); + hppa_flush_tlb_ent(env, ent, false); + } } -static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env) +static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) { - hppa_tlb_entry *ent; - uint32_t i = env->tlb_last; + HPPATLBEntry *ent = env->tlb_unused; + + if (ent == NULL) { + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); + uint32_t i = env->tlb_last; - env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1); - ent = &env->tlb[i]; + if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { + i = btlb_entries; + } + env->tlb_last = i + 1; + + ent = &env->tlb[i]; + hppa_flush_tlb_ent(env, ent, false); + } - hppa_flush_tlb_ent(env, ent); + env->tlb_unused = ent->unused_next; return ent; } +#define ACCESS_ID_MASK 0xffff + +/* Return the set of protections allowed by a PID match. */ +static int match_prot_id_1(uint32_t access_id, uint32_t prot_id) +{ + if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) { + return (prot_id & 1 + ? PAGE_EXEC | PAGE_READ + : PAGE_EXEC | PAGE_READ | PAGE_WRITE); + } + return 0; +} + +static int match_prot_id32(CPUHPPAState *env, uint32_t access_id) +{ + int r, i; + + for (i = CR_PID1; i <= CR_PID4; ++i) { + r = match_prot_id_1(access_id, env->cr[i]); + if (r) { + return r; + } + } + return 0; +} + +static int match_prot_id64(CPUHPPAState *env, uint32_t access_id) +{ + int r, i; + + for (i = CR_PID1; i <= CR_PID4; ++i) { + r = match_prot_id_1(access_id, env->cr[i]); + if (r) { + return r; + } + r = match_prot_id_1(access_id, env->cr[i] >> 32); + if (r) { + return r; + } + } + return 0; +} + int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, - int type, hwaddr *pphys, int *pprot) + int type, hwaddr *pphys, int *pprot, + HPPATLBEntry **tlb_entry) { hwaddr phys; - int prot, r_prot, w_prot, x_prot; - hppa_tlb_entry *ent; + int prot, r_prot, w_prot, x_prot, priv; + HPPATLBEntry *ent; int ret = -1; - /* Virtual translation disabled. Direct map virtual to physical. */ - if (mmu_idx == MMU_PHYS_IDX) { - phys = addr; + if (tlb_entry) { + *tlb_entry = NULL; + } + + /* Virtual translation disabled. Map absolute to physical. */ + if (MMU_IDX_MMU_DISABLED(mmu_idx)) { + switch (mmu_idx) { + case MMU_ABS_W_IDX: + phys = hppa_abs_to_phys_pa2_w1(addr); + break; + case MMU_ABS_IDX: + if (hppa_is_pa20(env)) { + phys = hppa_abs_to_phys_pa2_w0(addr); + } else { + phys = (uint32_t)addr; + } + break; + default: + g_assert_not_reached(); + } prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; goto egress; } /* Find a valid tlb entry that matches the virtual address. */ ent = hppa_find_tlb(env, addr); - if (ent == NULL || !ent->entry_valid) { + if (ent == NULL) { phys = 0; prot = 0; ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; goto egress; } + if (tlb_entry) { + *tlb_entry = ent; + } + /* We now know the physical address. */ - phys = ent->pa + (addr & ~TARGET_PAGE_MASK); + phys = ent->pa + (addr - ent->itree.start); /* Map TLB access_rights field to QEMU protection. */ - r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ; - w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE; - x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC; + priv = MMU_IDX_TO_PRIV(mmu_idx); + r_prot = (priv <= ent->ar_pl1) * PAGE_READ; + w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; + x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; switch (ent->ar_type) { case 0: /* read-only: data page */ prot = r_prot; @@ -132,29 +267,30 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, break; } - /* access_id == 0 means public page and no check is performed */ - if ((env->psw & PSW_P) && ent->access_id) { - /* If bits [31:1] match, and bit 0 is set, suppress write. */ - int match = ent->access_id * 2 + 1; - - if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] || - match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) { - prot &= PAGE_READ | PAGE_EXEC; - if (type == PAGE_WRITE) { - ret = EXCP_DMPI; - goto egress; - } - } - } - - /* No guest access type indicates a non-architectural access from - within QEMU. Bypass checks for access, D, B and T bits. */ + /* + * No guest access type indicates a non-architectural access from + * within QEMU. Bypass checks for access, D, B, P and T bits. + */ if (type == 0) { goto egress; } + /* access_id == 0 means public page and no check is performed */ + if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { + int access_prot = (hppa_is_pa20(env) + ? match_prot_id64(env, ent->access_id) + : match_prot_id32(env, ent->access_id)); + if (unlikely(!(type & access_prot))) { + /* Not allowed -- Inst/Data Memory Protection Id Fault. */ + ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI; + goto egress; + } + /* Otherwise exclude permissions not allowed (i.e WD). */ + prot &= access_prot; + } + if (unlikely(!(prot & type))) { - /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ + /* Not allowed -- Inst/Data Memory Access Rights Fault. */ ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; goto egress; } @@ -196,17 +332,16 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { HPPACPU *cpu = HPPA_CPU(cs); hwaddr phys; - int prot, excp; + int prot, excp, mmu_idx; /* If the (data) mmu is disabled, bypass translation. */ /* ??? We really ought to know if the code mmu is disabled too, in order to get the correct debugging dumps. */ - if (!(cpu->env.psw & PSW_D)) { - return addr; - } + mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : + cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); - excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, - &phys, &prot); + excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, + &phys, &prot, NULL); /* Since we're translating for debugging, the only error that is a hard error is no translation at all. Otherwise, while a real cpu @@ -214,12 +349,81 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return excp == EXCP_DTLB_MISS ? -1 : phys; } +void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled) +{ + if (env->psw & PSW_Q) { + /* + * For pa1.x, the offset and space never overlap, and so we + * simply extract the high and low part of the virtual address. + * + * For pa2.0, the formation of these are described in section + * "Interruption Parameter Registers", page 2-15. + */ + env->cr[CR_IOR] = (uint32_t)addr; + env->cr[CR_ISR] = addr >> 32; + + if (hppa_is_pa20(env)) { + if (mmu_disabled) { + /* + * If data translation was disabled, the ISR contains + * the upper portion of the abs address, zero-extended. + */ + env->cr[CR_ISR] &= 0x3fffffff; + } else { + /* + * If data translation was enabled, the upper two bits + * of the IOR (the b field) are equal to the two space + * bits from the base register used to form the gva. + */ + uint64_t b; + + b = env->unwind_breg ? env->gr[env->unwind_breg] : 0; + b >>= (env->psw & PSW_W ? 62 : 30); + env->cr[CR_IOR] |= b << 62; + } + } + } +} + +G_NORETURN static void +raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, + vaddr addr, bool mmu_disabled) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = excp; + cpu_restore_state(cs, retaddr); + hppa_set_ior_and_isr(env, addr, mmu_disabled); + + cpu_loop_exit(cs); +} + +void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + CPUHPPAState *env = cpu_env(cs); + + qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx + " while accessing I/O at %#08" HWADDR_PRIx "\n", + env->iasq_f, env->iaoq_f, physaddr); + + /* FIXME: Enable HPMC exceptions when firmware has clean device probing */ + if (0) { + raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr, + MMU_IDX_MMU_DISABLED(mmu_idx)); + } +} + bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, MMUAccessType type, int mmu_idx, bool probe, uintptr_t retaddr) { HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; + HPPATLBEntry *ent; int prot, excp, a_prot; hwaddr phys; @@ -236,71 +440,58 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, } excp = hppa_get_physical_address(env, addr, mmu_idx, - a_prot, &phys, &prot); + a_prot, &phys, &prot, &ent); if (unlikely(excp >= 0)) { if (probe) { return false; } trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); + /* Failure. Raise the indicated exception. */ - cs->exception_index = excp; - if (cpu->env.psw & PSW_Q) { - /* ??? Needs tweaking for hppa64. */ - cpu->env.cr[CR_IOR] = addr; - cpu->env.cr[CR_ISR] = addr >> 32; - } - cpu_loop_exit_restore(cs, retaddr); + raise_exception_with_ior(env, excp, retaddr, addr, + MMU_IDX_MMU_DISABLED(mmu_idx)); } trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, size, type, mmu_idx); - /* Success! Store the translation into the QEMU TLB. */ + + /* + * Success! Store the translation into the QEMU TLB. + * Note that we always install a single-page entry, because that + * is what works best with softmmu -- anything else will trigger + * the large page protection mask. We do not require this, + * because we record the large page here in the hppa tlb. + */ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ -void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg) +void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) { - hppa_tlb_entry *empty = NULL; - int i; - - /* Zap any old entries covering ADDR; notice empty entries on the way. */ - for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { - hppa_tlb_entry *ent = &env->tlb[i]; - if (ent->va_b <= addr && addr <= ent->va_e) { - if (ent->entry_valid) { - hppa_flush_tlb_ent(env, ent); - } - if (!empty) { - empty = ent; - } - } - } + HPPATLBEntry *ent; - /* If we didn't see an empty entry, evict one. */ - if (empty == NULL) { - empty = hppa_alloc_tlb_ent(env); + /* Zap any old entries covering ADDR. */ + addr &= TARGET_PAGE_MASK; + hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); + + ent = env->tlb_partial; + if (ent == NULL) { + ent = hppa_alloc_tlb_ent(env); + env->tlb_partial = ent; } - /* Note that empty->entry_valid == 0 already. */ - empty->va_b = addr & TARGET_PAGE_MASK; - empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1; - empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; - trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa); + /* Note that ent->entry_valid == 0 already. */ + ent->itree.start = addr; + ent->itree.last = addr + TARGET_PAGE_SIZE - 1; + ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; + trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); } -/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ -void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg) +static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, + target_ulong reg) { - hppa_tlb_entry *ent = hppa_find_tlb(env, addr); - - if (unlikely(ent == NULL)) { - qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); - return; - } - ent->access_id = extract32(reg, 1, 18); ent->u = extract32(reg, 19, 1); ent->ar_pl2 = extract32(reg, 20, 2); @@ -310,36 +501,159 @@ void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg) ent->d = extract32(reg, 28, 1); ent->t = extract32(reg, 29, 1); ent->entry_valid = 1; + + interval_tree_insert(&ent->itree, &env->tlb_root); trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); } -/* Purge (Insn/Data) TLB. This is explicitly page-based, and is - synchronous across all processors. */ -static void ptlb_work(CPUState *cpu, run_on_cpu_data data) +/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ +void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) { - CPUHPPAState *env = cpu->env_ptr; - target_ulong addr = (target_ulong) data.target_ptr; - hppa_tlb_entry *ent = hppa_find_tlb(env, addr); + HPPATLBEntry *ent = env->tlb_partial; - if (ent && ent->entry_valid) { - hppa_flush_tlb_ent(env, ent); + if (ent) { + env->tlb_partial = NULL; + if (ent->itree.start <= addr && addr <= ent->itree.last) { + set_access_bits_pa11(env, ent, reg); + return; + } } + qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); +} + +static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, + target_ulong r2, vaddr va_b) +{ + HPPATLBEntry *ent; + vaddr va_e; + uint64_t va_size; + int mask_shift; + + mask_shift = 2 * (r1 & 0xf); + va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift; + va_b &= -va_size; + va_e = va_b + va_size - 1; + + hppa_flush_tlb_range(env, va_b, va_e); + ent = hppa_alloc_tlb_ent(env); + + ent->itree.start = va_b; + ent->itree.last = va_e; + + /* Extract all 52 bits present in the page table entry. */ + ent->pa = r1 << (TARGET_PAGE_BITS - 5); + /* Align per the page size. */ + ent->pa &= TARGET_PAGE_MASK << mask_shift; + /* Ignore the bits beyond physical address space. */ + ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS); + + ent->t = extract64(r2, 61, 1); + ent->d = extract64(r2, 60, 1); + ent->b = extract64(r2, 59, 1); + ent->ar_type = extract64(r2, 56, 3); + ent->ar_pl1 = extract64(r2, 54, 2); + ent->ar_pl2 = extract64(r2, 52, 2); + ent->u = extract64(r2, 51, 1); + /* o = bit 50 */ + /* p = bit 49 */ + ent->access_id = extract64(r2, 1, 31); + ent->entry_valid = 1; + + interval_tree_insert(&ent->itree, &env->tlb_root); + trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); + trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, + ent->ar_pl2, ent->ar_pl1, ent->ar_type, + ent->b, ent->d, ent->t); } +void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) +{ + vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); + itlbt_pa20(env, r1, r2, va_b); +} + +void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) +{ + vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); + itlbt_pa20(env, r1, r2, va_b); +} + +/* Purge (Insn/Data) TLB. */ +static void ptlb_work(CPUState *cpu, run_on_cpu_data data) +{ + vaddr start = data.target_ptr; + vaddr end; + + /* + * PA2.0 allows a range of pages encoded into GR[b], which we have + * copied into the bottom bits of the otherwise page-aligned address. + * PA1.x will always provide zero here, for a single page flush. + */ + end = start & 0xf; + start &= TARGET_PAGE_MASK; + end = (vaddr)TARGET_PAGE_SIZE << (2 * end); + end = start + end - 1; + + hppa_flush_tlb_range(cpu_env(cpu), start, end); +} + +/* This is local to the current cpu. */ +void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr) +{ + trace_hppa_tlb_ptlb_local(env); + ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr)); +} + +/* This is synchronous across all processors. */ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) { CPUState *src = env_cpu(env); CPUState *cpu; + bool wait = false; + trace_hppa_tlb_ptlb(env); run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); CPU_FOREACH(cpu) { if (cpu != src) { async_run_on_cpu(cpu, ptlb_work, data); + wait = true; + } + } + if (wait) { + async_safe_run_on_cpu(src, ptlb_work, data); + } else { + ptlb_work(src, data); + } +} + +void hppa_ptlbe(CPUHPPAState *env) +{ + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); + uint32_t i; + + /* Zap the (non-btlb) tlb entries themselves. */ + memset(&env->tlb[btlb_entries], 0, + sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); + env->tlb_last = btlb_entries; + env->tlb_partial = NULL; + + /* Put them all onto the unused list. */ + env->tlb_unused = &env->tlb[btlb_entries]; + for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { + env->tlb[i].unused_next = &env->tlb[i + 1]; + } + + /* Re-initialize the interval tree with only the btlb entries. */ + memset(&env->tlb_root, 0, sizeof(env->tlb_root)); + for (i = 0; i < btlb_entries; ++i) { + if (env->tlb[i].entry_valid) { + interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); } } - async_safe_run_on_cpu(src, ptlb_work, data); + + tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); } /* Purge (Insn/Data) TLB entry. This affects an implementation-defined @@ -347,15 +661,13 @@ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) void HELPER(ptlbe)(CPUHPPAState *env) { trace_hppa_tlb_ptlbe(env); - memset(env->tlb, 0, sizeof(env->tlb)); - tlb_flush_by_mmuidx(env_cpu(env), 0xf); + qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); + hppa_ptlbe(env); } void cpu_hppa_change_prot_id(CPUHPPAState *env) { - if (env->psw & PSW_P) { - tlb_flush_by_mmuidx(env_cpu(env), 0xf); - } + tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); } void HELPER(change_prot_id)(CPUHPPAState *env) @@ -363,24 +675,19 @@ void HELPER(change_prot_id)(CPUHPPAState *env) cpu_hppa_change_prot_id(env); } -target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) +target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) { hwaddr phys; int prot, excp; excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, - &phys, &prot); + &phys, &prot, NULL); if (excp >= 0) { - if (env->psw & PSW_Q) { - /* ??? Needs tweaking for hppa64. */ - env->cr[CR_IOR] = addr; - env->cr[CR_ISR] = addr >> 32; - } if (excp == EXCP_DTLB_MISS) { excp = EXCP_NA_DTLB_MISS; } trace_hppa_tlb_lpa_failed(env, addr); - hppa_dynamic_excp(env, excp, GETPC()); + raise_exception_with_ior(env, excp, GETPC(), addr, false); } trace_hppa_tlb_lpa_success(env, addr, phys); return phys; @@ -389,7 +696,102 @@ target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) /* Return the ar_type of the TLB at VADDR, or -1. */ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) { - hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr); + HPPATLBEntry *ent = hppa_find_tlb(env, vaddr); return ent ? ent->ar_type : -1; } -#endif /* CONFIG_USER_ONLY */ + +/* + * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to + * allow operating systems to modify the Block TLB (BTLB) entries. + * For implementation details see page 1-13 in + * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf + */ +void HELPER(diag_btlb)(CPUHPPAState *env) +{ + unsigned int phys_page, len, slot; + int mmu_idx = cpu_mmu_index(env_cpu(env), 0); + uintptr_t ra = GETPC(); + HPPATLBEntry *btlb; + uint64_t virt_page; + uint32_t *vaddr; + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); + + /* BTLBs are not supported on 64-bit CPUs */ + if (btlb_entries == 0) { + env->gr[28] = -1; /* nonexistent procedure */ + return; + } + + env->gr[28] = 0; /* PDC_OK */ + + switch (env->gr[25]) { + case 0: + /* return BTLB parameters */ + qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); + vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t), + MMU_DATA_STORE, mmu_idx, ra); + if (vaddr == NULL) { + env->gr[28] = -10; /* invalid argument */ + } else { + vaddr[0] = cpu_to_be32(1); + vaddr[1] = cpu_to_be32(16 * 1024); + vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); + vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); + } + break; + case 1: + /* insert BTLB entry */ + virt_page = env->gr[24]; /* upper 32 bits */ + virt_page <<= 32; + virt_page |= env->gr[23]; /* lower 32 bits */ + phys_page = env->gr[22]; + len = env->gr[21]; + slot = env->gr[19]; + qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " + "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " + "into slot %d\n", + (long long) virt_page << TARGET_PAGE_BITS, + (long long) (virt_page + len) << TARGET_PAGE_BITS, + (long long) virt_page, phys_page, len, slot); + if (slot < btlb_entries) { + btlb = &env->tlb[slot]; + + /* Force flush of possibly existing BTLB entry. */ + hppa_flush_tlb_ent(env, btlb, true); + + /* Create new BTLB entry */ + btlb->itree.start = virt_page << TARGET_PAGE_BITS; + btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; + btlb->pa = phys_page << TARGET_PAGE_BITS; + set_access_bits_pa11(env, btlb, env->gr[20]); + btlb->t = 0; + btlb->d = 1; + } else { + env->gr[28] = -10; /* invalid argument */ + } + break; + case 2: + /* Purge BTLB entry */ + slot = env->gr[22]; + qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", + slot); + if (slot < btlb_entries) { + btlb = &env->tlb[slot]; + hppa_flush_tlb_ent(env, btlb, true); + } else { + env->gr[28] = -10; /* invalid argument */ + } + break; + case 3: + /* Purge all BTLB entries */ + qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); + for (slot = 0; slot < btlb_entries; slot++) { + btlb = &env->tlb[slot]; + hppa_flush_tlb_ent(env, btlb, true); + } + break; + default: + env->gr[28] = -2; /* nonexistent option */ + break; + } +} |