aboutsummaryrefslogtreecommitdiff
path: root/tcg/hppa/tcg-target.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2010-04-09 10:49:10 -0700
committerAurelien Jarno <aurelien@aurel32.net>2010-04-26 20:06:17 +0200
commit739734cb5cf90c3e692d41b51fa095240b49ddd7 (patch)
treed56d92d06ff3cc39483a915c459fd6d4bbae3a32 /tcg/hppa/tcg-target.c
parentf061b40e91b96d32e535cc81362904c685483506 (diff)
tcg-hppa: Schedule the address masking after the TLB load.
Issue the tlb load as early as possible and perform the address masking while the load is completing. Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'tcg/hppa/tcg-target.c')
-rw-r--r--tcg/hppa/tcg-target.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
index 2f3b7708a1..6941e22a59 100644
--- a/tcg/hppa/tcg-target.c
+++ b/tcg/hppa/tcg-target.c
@@ -904,7 +904,6 @@ static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
add that follows. */
tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
- tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
@@ -927,6 +926,12 @@ static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
}
+ /* Compute the value that ought to appear in the TLB for a hit, namely, the page
+ of the address. We include the low N bits of the address to catch unaligned
+ accesses and force them onto the slow path. Do this computation after having
+ issued the load from the TLB slot to give the load time to complete. */
+ tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
/* If not equal, jump to lab_miss. */
if (TARGET_LONG_BITS == 64) {
tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,