aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2013-03-12 00:31:43 +0000
committerAlexander Graf <agraf@suse.de>2013-03-22 15:28:52 +0100
commit6d11d998bb866c92b0f81eb3cea2f7a3e617feb8 (patch)
tree3d87d6d0615602838b977ab3d48a2fba4a127ab5 /target-ppc
parentb344074642e58fc83635c38105f38b85fc086666 (diff)
mmu-hash*: Clean up real address calculation
More recent 64-bit hash MMUs support multiple page sizes, and PTEs for large pages only include the offset of the whole large page. But the qemu tlb only handles pages of the base size (4k) so we need to break up the large pages into 4k pieces for the qemu tlb. To do that we have a somewhat awkward piece of code that adds the folds address bits 4k and the page size from the virtual address into the real address from the pte. This patch simplifies this redefining the raddr output of ppc_hash64_translate() to be the full real address of the faulting address, rather than just the (4k) page offset. Computing that turns out to be simpler, and is fine for the caller, since it already masks with TARGET_PAGE_MASK before inserting into the qemu tlb. The multiple page size complication doesn't exist for 32-bit hash mmus, but we make an analogous cleanup there for consistency. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/mmu-hash32.c14
-rw-r--r--target-ppc/mmu-hash64.c26
2 files changed, 27 insertions, 13 deletions
diff --git a/target-ppc/mmu-hash32.c b/target-ppc/mmu-hash32.c
index 314b7d1634..e5ee29bfc7 100644
--- a/target-ppc/mmu-hash32.c
+++ b/target-ppc/mmu-hash32.c
@@ -343,6 +343,15 @@ static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
return pte_offset;
}
+static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
+ target_ulong eaddr)
+{
+ hwaddr rpn = pte.pte1;
+ hwaddr mask = ~TARGET_PAGE_MASK;
+
+ return (rpn & ~mask) | (eaddr & mask);
+}
+
static int ppc_hash32_translate(CPUPPCState *env, struct mmu_ctx_hash32 *ctx,
target_ulong eaddr, int rwx)
{
@@ -421,8 +430,9 @@ static int ppc_hash32_translate(CPUPPCState *env, struct mmu_ctx_hash32 *ctx,
ppc_hash32_store_hpte1(env, pte_offset, new_pte1);
}
- /* Keep the matching PTE informations */
- ctx->raddr = pte.pte1;
+ /* 9. Determine the real address from the PTE */
+
+ ctx->raddr = ppc_hash32_pte_raddr(sr, pte, eaddr);
return 0;
}
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index c76abb7d22..5e168d5e8d 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -362,6 +362,18 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
return pte_offset;
}
+static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
+ target_ulong eaddr)
+{
+ hwaddr rpn = pte.pte1;
+ /* FIXME: Add support for SLLP extended page sizes */
+ int target_page_bits = (slb->vsid & SLB_VSID_L)
+ ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
+ hwaddr mask = (1ULL << target_page_bits) - 1;
+
+ return (rpn & ~mask) | (eaddr & mask);
+}
+
static int ppc_hash64_translate(CPUPPCState *env, struct mmu_ctx_hash64 *ctx,
target_ulong eaddr, int rwx)
{
@@ -369,7 +381,6 @@ static int ppc_hash64_translate(CPUPPCState *env, struct mmu_ctx_hash64 *ctx,
hwaddr pte_offset;
ppc_hash_pte64_t pte;
uint64_t new_pte1;
- int target_page_bits;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
@@ -429,17 +440,10 @@ static int ppc_hash64_translate(CPUPPCState *env, struct mmu_ctx_hash64 *ctx,
ppc_hash64_store_hpte1(env, pte_offset, new_pte1);
}
- /* Keep the matching PTE informations */
- ctx->raddr = pte.pte1;
+ /* 7. Determine the real address from the PTE */
+
+ ctx->raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
- /* We have a TLB that saves 4K pages, so let's
- * split a huge page to 4k chunks */
- target_page_bits = (slb->vsid & SLB_VSID_L)
- ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
- if (target_page_bits != TARGET_PAGE_BITS) {
- ctx->raddr |= (eaddr & ((1 << target_page_bits) - 1))
- & TARGET_PAGE_MASK;
- }
return 0;
}