aboutsummaryrefslogtreecommitdiff
path: root/tcg/ppc/tcg-target.inc.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2016-07-14 12:43:06 -0700
committerRichard Henderson <rth@twiddle.net>2016-09-16 08:12:06 -0700
commit85aa80813dd9f5c1f581c743e45678a3bee220f8 (patch)
tree2a140ecb81d60cf1a593a160c0d09f88ae5a3c7d /tcg/ppc/tcg-target.inc.c
parentebc231d7daf1f41b23d8b6a6d1234800b86e5fe2 (diff)
tcg: Support arbitrary size + alignment
Previously we allowed fully unaligned operations, but not operations that are aligned but with less alignment than the operation size. In addition, arm32, ia64, mips, and sparc had been omitted from the previous overalignment patch, which would have led to that alignment being enforced. Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/ppc/tcg-target.inc.c')
-rw-r--r--tcg/ppc/tcg-target.inc.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index eaf1bd9bfd..d79969096f 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1404,8 +1404,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
TCGReg base = TCG_AREG0;
- TCGMemOp s_bits = opc & MO_SIZE;
- int a_bits = get_alignment_bits(opc);
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned a_bits = get_alignment_bits(opc);
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 64) {
@@ -1458,39 +1458,43 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off);
/* Clear the non-page, non-alignment bits from the address */
- if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) {
- /* We don't support unaligned accesses on 32-bits, preserve
- * the bottom bits and thus trigger a comparison failure on
- * unaligned accesses
+ if (TCG_TARGET_REG_BITS == 32) {
+ /* We don't support unaligned accesses on 32-bits.
+ * Preserve the bottom bits and thus trigger a comparison
+ * failure on unaligned accesses.
*/
- if (a_bits < 0) {
+ if (a_bits < s_bits) {
a_bits = s_bits;
}
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
(32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
- } else if (a_bits) {
- /* More than byte access, we need to handle alignment */
- if (a_bits > 0) {
- /* Alignment required by the front-end, same as 32-bits */
- tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
+ } else {
+ TCGReg t = addrlo;
+
+ /* If the access is unaligned, we need to make sure we fail if we
+ * cross a page boundary. The trick is to add the access size-1
+ * to the address before masking the low bits. That will make the
+ * address overflow to the next page if we cross a page boundary,
+ * which will then force a mismatch of the TLB compare.
+ */
+ if (a_bits < s_bits) {
+ unsigned a_mask = (1 << a_bits) - 1;
+ unsigned s_mask = (1 << s_bits) - 1;
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
+ t = TCG_REG_R0;
+ }
+
+ /* Mask the address for the requested alignment. */
+ if (TARGET_LONG_BITS == 32) {
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
+ (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ } else if (a_bits == 0) {
+ tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
+ } else {
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
- } else {
- /* We support unaligned accesses, we need to make sure we fail
- * if we cross a page boundary. The trick is to add the
- * access_size-1 to the address before masking the low bits.
- * That will make the address overflow to the next page if we
- * cross a page boundary which will then force a mismatch of
- * the TLB compare since the next page cannot possibly be in
- * the same TLB index.
- */
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, (1 << s_bits) - 1));
- tcg_out_rld(s, RLDICR, TCG_REG_R0, TCG_REG_R0,
- 0, 63 - TARGET_PAGE_BITS);
}
- } else {
- /* Byte access, just chop off the bits below the page index */
- tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, 0, 63 - TARGET_PAGE_BITS);
}
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {