aboutsummaryrefslogtreecommitdiff
path: root/softmmu_template.h
diff options
context:
space:
mode:
authorSergey Sorokin <afarallax@yandex.ru>2016-06-23 21:16:46 +0300
committerRichard Henderson <rth@twiddle.net>2016-07-05 20:50:13 -0700
commit1f00b27f17518a1bcb4cedca49eaec96a4d560bd (patch)
tree90abe3fc60c60ce9ff0aa5c46a87c5589ff55229 /softmmu_template.h
parent59d7c14eeff8d2ad7f61aed86ce5a176113bc153 (diff)
tcg: Improve the alignment check infrastructure
Some architectures (e.g. ARMv8) need the address which is aligned to a size more than the size of the memory access. To support such check it's enough the current costless alignment check implementation in QEMU, but we need to support an alignment size specifying. Signed-off-by: Sergey Sorokin <afarallax@yandex.ru> Message-Id: <1466705806-679898-1-git-send-email-afarallax@yandex.ru> Signed-off-by: Richard Henderson <rth@twiddle.net> [rth: Assert in tcg_canonicalize_memop. Leave get_alignment_bits available for, though unused by, user-mode. Retain logging difference based on ALIGNED_ONLY.]
Diffstat (limited to 'softmmu_template.h')
-rw-r--r--softmmu_template.h88
1 files changed, 24 insertions, 64 deletions
diff --git a/softmmu_template.h b/softmmu_template.h
index 208f808f3e..4d378ca630 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -171,20 +171,21 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(ADDR_READ)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
@@ -215,10 +216,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
@@ -232,13 +229,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
@@ -255,20 +245,21 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(ADDR_READ)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
@@ -299,10 +290,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
@@ -316,13 +303,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
@@ -376,19 +356,20 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(addr_write)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
@@ -416,10 +397,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
@@ -434,13 +411,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
@@ -456,19 +426,20 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(addr_write)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
@@ -496,10 +467,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
@@ -514,13 +481,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}