aboutsummaryrefslogtreecommitdiff
path: root/tcg/i386
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2016-07-14 12:43:06 -0700
committerRichard Henderson <rth@twiddle.net>2016-09-16 08:12:06 -0700
commit85aa80813dd9f5c1f581c743e45678a3bee220f8 (patch)
tree2a140ecb81d60cf1a593a160c0d09f88ae5a3c7d /tcg/i386
parentebc231d7daf1f41b23d8b6a6d1234800b86e5fe2 (diff)
tcg: Support arbitrary size + alignment
Previously we allowed fully unaligned operations, but not operations that are aligned but with less alignment than the operation size. In addition, arm32, ia64, mips, and sparc had been omitted from the previous overalignment patch, which would have led to that alignment being enforced. Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/i386')
-rw-r--r--tcg/i386/tcg-target.inc.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index 6f8cdca756..e30a122f19 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -1202,7 +1202,10 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
TCGType ttype = TCG_TYPE_I32;
TCGType tlbtype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0, tlbrexw = 0;
- int a_bits = get_alignment_bits(opc);
+ unsigned a_bits = get_alignment_bits(opc);
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned a_mask = (1 << a_bits) - 1;
+ unsigned s_mask = (1 << s_bits) - 1;
target_ulong tlb_mask;
if (TCG_TARGET_REG_BITS == 64) {
@@ -1220,17 +1223,15 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
}
tcg_out_mov(s, tlbtype, r0, addrlo);
- if (a_bits >= 0) {
- /* A byte access or an alignment check required */
+ /* If the required alignment is at least as large as the access, simply
+ copy the address and mask. For lesser alignments, check that we don't
+ cross pages for the complete access. */
+ if (a_bits >= s_bits) {
tcg_out_mov(s, ttype, r1, addrlo);
- tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
} else {
- /* For unaligned access check that we don't cross pages using
- the page address of the last byte. */
- tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo,
- (1 << (opc & MO_SIZE)) - 1);
- tlb_mask = TARGET_PAGE_MASK;
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
}
+ tlb_mask = TARGET_PAGE_MASK | a_mask;
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);