aboutsummaryrefslogtreecommitdiff
path: root/target-xtensa/op_helper.c
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2011-09-06 03:55:53 +0400
committerBlue Swirl <blauwirbel@gmail.com>2011-09-10 16:57:40 +0000
commitb67ea0cd74417b42482499c29feb90914fbf8097 (patch)
treeccc2e3c2d6ef7bfd9ac7d201e69687ed51478e75 /target-xtensa/op_helper.c
parentccfcaba6fd9f69a9322af1911302e71127bee1e0 (diff)
target-xtensa: implement memory protection options
- TLB opcode group; - region protection option (ISA, 4.6.3); - region translation option (ISA, 4.6.4); - MMU option (ISA, 4.6.5). Cache control attribute bits are not used by this implementation. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'target-xtensa/op_helper.c')
-rw-r--r--target-xtensa/op_helper.c301
1 files changed, 295 insertions, 6 deletions
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index 0047fa3c22..d02706db62 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -70,13 +70,32 @@ static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
}
}
-void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
+void tlb_fill(target_ulong vaddr, int is_write, int mmu_idx, void *retaddr)
{
- tlb_set_page(cpu_single_env,
- addr & ~(TARGET_PAGE_SIZE - 1),
- addr & ~(TARGET_PAGE_SIZE - 1),
- PAGE_READ | PAGE_WRITE | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
+ CPUState *saved_env = env;
+
+ env = cpu_single_env;
+ {
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, vaddr, is_write, mmu_idx,
+ &paddr, &page_size, &access);
+
+ qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__,
+ vaddr, is_write, mmu_idx, paddr, ret);
+
+ if (ret == 0) {
+ tlb_set_page(env,
+ vaddr & TARGET_PAGE_MASK,
+ paddr & TARGET_PAGE_MASK,
+ access, mmu_idx, page_size);
+ } else {
+ do_restore_state(retaddr);
+ HELPER(exception_cause_vaddr)(env->pc, ret, vaddr);
+ }
+ }
+ env = saved_env;
}
void HELPER(exception)(uint32_t excp)
@@ -384,3 +403,273 @@ void HELPER(check_interrupts)(CPUState *env)
{
check_interrupts(env);
}
+
+void HELPER(wsr_rasid)(uint32_t v)
+{
+ v = (v & 0xffffff00) | 0x1;
+ if (v != env->sregs[RASID]) {
+ env->sregs[RASID] = v;
+ tlb_flush(env, 1);
+ }
+}
+
+static uint32_t get_page_size(const CPUState *env, bool dtlb, uint32_t way)
+{
+ uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
+
+ switch (way) {
+ case 4:
+ return (tlbcfg >> 16) & 0x3;
+
+ case 5:
+ return (tlbcfg >> 20) & 0x1;
+
+ case 6:
+ return (tlbcfg >> 24) & 0x1;
+
+ default:
+ return 0;
+ }
+}
+
+/*!
+ * Get bit mask for the virtual address bits translated by the TLB way
+ */
+uint32_t xtensa_tlb_get_addr_mask(const CPUState *env, bool dtlb, uint32_t way)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ switch (way) {
+ case 4:
+ return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
+
+ case 5:
+ if (varway56) {
+ return 0xf8000000 << get_page_size(env, dtlb, way);
+ } else {
+ return 0xf8000000;
+ }
+
+ case 6:
+ if (varway56) {
+ return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
+ } else {
+ return 0xf0000000;
+ }
+
+ default:
+ return 0xfffff000;
+ }
+ } else {
+ return REGION_PAGE_MASK;
+ }
+}
+
+/*!
+ * Get bit mask for the 'VPN without index' field.
+ * See ISA, 4.6.5.6, data format for RxTLB0
+ */
+static uint32_t get_vpn_mask(const CPUState *env, bool dtlb, uint32_t way)
+{
+ if (way < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ return is32 ? 0xffff8000 : 0xffffc000;
+ } else if (way == 4) {
+ return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
+ } else if (way <= 6) {
+ uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (varway56) {
+ return mask << (way == 5 ? 2 : 3);
+ } else {
+ return mask << 1;
+ }
+ } else {
+ return 0xfffff000;
+ }
+}
+
+/*!
+ * Split virtual address into VPN (with index) and entry index
+ * for the given TLB way
+ */
+void split_tlb_entry_spec_way(const CPUState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t wi, uint32_t *ei)
+{
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (!dtlb) {
+ wi &= 7;
+ }
+
+ if (wi < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
+ } else {
+ switch (wi) {
+ case 4:
+ {
+ uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
+ *ei = (v >> eibase) & 0x3;
+ }
+ break;
+
+ case 5:
+ if (varway56) {
+ uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x3;
+ } else {
+ *ei = (v >> 27) & 0x1;
+ }
+ break;
+
+ case 6:
+ if (varway56) {
+ uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x7;
+ } else {
+ *ei = (v >> 28) & 0x1;
+ }
+ break;
+
+ default:
+ *ei = 0;
+ break;
+ }
+ }
+ *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+}
+
+/*!
+ * Split TLB address into TLB way, entry index and VPN (with index).
+ * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
+ */
+static void split_tlb_entry_spec(uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t *wi, uint32_t *ei)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ *wi = v & (dtlb ? 0xf : 0x7);
+ split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+ } else {
+ *vpn = v & REGION_PAGE_MASK;
+ *wi = 0;
+ *ei = (v >> 29) & 0x7;
+ }
+}
+
+static xtensa_tlb_entry *get_tlb_entry(uint32_t v, bool dtlb, uint32_t *pwi)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+
+ split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei);
+ if (pwi) {
+ *pwi = wi;
+ }
+ return xtensa_tlb_get_entry(env, dtlb, wi, ei);
+}
+
+uint32_t HELPER(rtlb0)(uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi);
+ return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+ } else {
+ return v & REGION_PAGE_MASK;
+ }
+}
+
+uint32_t HELPER(rtlb1)(uint32_t v, uint32_t dtlb)
+{
+ const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, NULL);
+ return entry->paddr | entry->attr;
+}
+
+void HELPER(itlb)(uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi);
+ if (entry->variable && entry->asid) {
+ tlb_flush_page(env, entry->vaddr);
+ entry->asid = 0;
+ }
+ }
+}
+
+uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
+
+ switch (res) {
+ case 0:
+ if (ring >= xtensa_get_ring(env)) {
+ return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
+ }
+ break;
+
+ case INST_TLB_MULTI_HIT_CAUSE:
+ case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
+ HELPER(exception_cause_vaddr)(env->pc, res, v);
+ break;
+ }
+ return 0;
+ } else {
+ return (v & REGION_PAGE_MASK) | 0x1;
+ }
+}
+
+void xtensa_tlb_set_entry(CPUState *env, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
+{
+ xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ if (entry->variable) {
+ if (entry->asid) {
+ tlb_flush_page(env, entry->vaddr);
+ }
+ entry->vaddr = vpn;
+ entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+ entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
+ entry->attr = pte & 0xf;
+ } else {
+ qemu_log("%s %d, %d, %d trying to set immutable entry\n",
+ __func__, dtlb, wi, ei);
+ }
+ } else {
+ tlb_flush_page(env, entry->vaddr);
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_REGION_TRANSLATION)) {
+ entry->paddr = pte & REGION_PAGE_MASK;
+ }
+ entry->attr = pte & 0xf;
+ }
+}
+
+void HELPER(wtlb)(uint32_t p, uint32_t v, uint32_t dtlb)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+ split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei);
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+}