Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Common CPU TLB handling |
| 3 | * |
| 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 21 | #include "cpu.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 22 | #include "exec/exec-all.h" |
| 23 | #include "exec/memory.h" |
| 24 | #include "exec/address-spaces.h" |
Paolo Bonzini | f08b617 | 2014-03-28 19:42:10 +0100 | [diff] [blame] | 25 | #include "exec/cpu_ldst.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 26 | #include "exec/cputlb.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 27 | #include "exec/memory-internal.h" |
Juan Quintela | 220c3eb | 2013-10-14 17:13:59 +0200 | [diff] [blame] | 28 | #include "exec/ram_addr.h" |
Paolo Bonzini | 0f590e7 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 29 | #include "tcg/tcg.h" |
Peter Maydell | d7f3040 | 2016-06-20 18:07:05 +0100 | [diff] [blame] | 30 | #include "qemu/error-report.h" |
| 31 | #include "exec/log.h" |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 32 | #include "exec/helper-proto.h" |
| 33 | #include "qemu/atomic.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 34 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 35 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
| 36 | /* #define DEBUG_TLB */ |
| 37 | /* #define DEBUG_TLB_LOG */ |
| 38 | |
| 39 | #ifdef DEBUG_TLB |
| 40 | # define DEBUG_TLB_GATE 1 |
| 41 | # ifdef DEBUG_TLB_LOG |
| 42 | # define DEBUG_TLB_LOG_GATE 1 |
| 43 | # else |
| 44 | # define DEBUG_TLB_LOG_GATE 0 |
| 45 | # endif |
| 46 | #else |
| 47 | # define DEBUG_TLB_GATE 0 |
| 48 | # define DEBUG_TLB_LOG_GATE 0 |
| 49 | #endif |
| 50 | |
| 51 | #define tlb_debug(fmt, ...) do { \ |
| 52 | if (DEBUG_TLB_LOG_GATE) { \ |
| 53 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ |
| 54 | ## __VA_ARGS__); \ |
| 55 | } else if (DEBUG_TLB_GATE) { \ |
| 56 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ |
| 57 | } \ |
| 58 | } while (0) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 59 | |
| 60 | /* statistics */ |
| 61 | int tlb_flush_count; |
| 62 | |
Alex Bennée | d10eb08 | 2016-11-14 14:17:28 +0000 | [diff] [blame] | 63 | /* This is OK because CPU architectures generally permit an |
| 64 | * implementation to drop entries from the TLB at any time, so |
| 65 | * flushing more entries than required is only an efficiency issue, |
| 66 | * not a correctness issue. |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 67 | */ |
Alex Bennée | d10eb08 | 2016-11-14 14:17:28 +0000 | [diff] [blame] | 68 | void tlb_flush(CPUState *cpu) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 69 | { |
Andreas Färber | 00c8cb0 | 2013-09-04 02:19:44 +0200 | [diff] [blame] | 70 | CPUArchState *env = cpu->env_ptr; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 71 | |
Richard Henderson | 4fadb3b | 2013-12-07 10:44:51 +1300 | [diff] [blame] | 72 | memset(env->tlb_table, -1, sizeof(env->tlb_table)); |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 73 | memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); |
Andreas Färber | 8cd7043 | 2013-08-26 06:03:38 +0200 | [diff] [blame] | 74 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 75 | |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 76 | env->vtlb_index = 0; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 77 | env->tlb_flush_addr = -1; |
| 78 | env->tlb_flush_mask = 0; |
| 79 | tlb_flush_count++; |
| 80 | } |
| 81 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 82 | static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp) |
| 83 | { |
| 84 | CPUArchState *env = cpu->env_ptr; |
| 85 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 86 | tlb_debug("start\n"); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 87 | |
| 88 | for (;;) { |
| 89 | int mmu_idx = va_arg(argp, int); |
| 90 | |
| 91 | if (mmu_idx < 0) { |
| 92 | break; |
| 93 | } |
| 94 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 95 | tlb_debug("%d\n", mmu_idx); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 96 | |
| 97 | memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); |
| 98 | memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); |
| 99 | } |
| 100 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 101 | memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); |
| 102 | } |
| 103 | |
| 104 | void tlb_flush_by_mmuidx(CPUState *cpu, ...) |
| 105 | { |
| 106 | va_list argp; |
| 107 | va_start(argp, cpu); |
| 108 | v_tlb_flush_by_mmuidx(cpu, argp); |
| 109 | va_end(argp); |
| 110 | } |
| 111 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 112 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
| 113 | { |
| 114 | if (addr == (tlb_entry->addr_read & |
| 115 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
| 116 | addr == (tlb_entry->addr_write & |
| 117 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
| 118 | addr == (tlb_entry->addr_code & |
| 119 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
Richard Henderson | 4fadb3b | 2013-12-07 10:44:51 +1300 | [diff] [blame] | 120 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 121 | } |
| 122 | } |
| 123 | |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 124 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 125 | { |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 126 | CPUArchState *env = cpu->env_ptr; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 127 | int i; |
| 128 | int mmu_idx; |
| 129 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 130 | tlb_debug("page :" TARGET_FMT_lx "\n", addr); |
| 131 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 132 | /* Check if we need to flush due to large pages. */ |
| 133 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 134 | tlb_debug("forcing full flush (" |
| 135 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
| 136 | env->tlb_flush_addr, env->tlb_flush_mask); |
| 137 | |
Alex Bennée | d10eb08 | 2016-11-14 14:17:28 +0000 | [diff] [blame] | 138 | tlb_flush(cpu); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 139 | return; |
| 140 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 141 | |
| 142 | addr &= TARGET_PAGE_MASK; |
| 143 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 144 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 145 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); |
| 146 | } |
| 147 | |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 148 | /* check whether there are entries that need to be flushed in the vtlb */ |
| 149 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 150 | int k; |
| 151 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
| 152 | tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); |
| 153 | } |
| 154 | } |
| 155 | |
Andreas Färber | 611d4f9 | 2013-09-01 17:52:07 +0200 | [diff] [blame] | 156 | tb_flush_jmp_cache(cpu, addr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 157 | } |
| 158 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 159 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...) |
| 160 | { |
| 161 | CPUArchState *env = cpu->env_ptr; |
| 162 | int i, k; |
| 163 | va_list argp; |
| 164 | |
| 165 | va_start(argp, addr); |
| 166 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 167 | tlb_debug("addr "TARGET_FMT_lx"\n", addr); |
| 168 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 169 | /* Check if we need to flush due to large pages. */ |
| 170 | if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 171 | tlb_debug("forced full flush (" |
| 172 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
| 173 | env->tlb_flush_addr, env->tlb_flush_mask); |
| 174 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 175 | v_tlb_flush_by_mmuidx(cpu, argp); |
| 176 | va_end(argp); |
| 177 | return; |
| 178 | } |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 179 | |
| 180 | addr &= TARGET_PAGE_MASK; |
| 181 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 182 | |
| 183 | for (;;) { |
| 184 | int mmu_idx = va_arg(argp, int); |
| 185 | |
| 186 | if (mmu_idx < 0) { |
| 187 | break; |
| 188 | } |
| 189 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 190 | tlb_debug("idx %d\n", mmu_idx); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 191 | |
| 192 | tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); |
| 193 | |
| 194 | /* check whether there are vltb entries that need to be flushed */ |
| 195 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
| 196 | tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); |
| 197 | } |
| 198 | } |
| 199 | va_end(argp); |
| 200 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 201 | tb_flush_jmp_cache(cpu, addr); |
| 202 | } |
| 203 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 204 | /* update the TLBs so that writes to code in the virtual page 'addr' |
| 205 | can be detected */ |
| 206 | void tlb_protect_code(ram_addr_t ram_addr) |
| 207 | { |
Stefan Hajnoczi | 03eebc9 | 2014-12-02 11:23:18 +0000 | [diff] [blame] | 208 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
| 209 | DIRTY_MEMORY_CODE); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
| 213 | tested for self modifying code */ |
Paolo Bonzini | 9564f52 | 2015-04-22 14:24:54 +0200 | [diff] [blame] | 214 | void tlb_unprotect_code(ram_addr_t ram_addr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 215 | { |
Juan Quintela | 5215919 | 2013-10-08 12:44:04 +0200 | [diff] [blame] | 216 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe) |
| 220 | { |
| 221 | return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0; |
| 222 | } |
| 223 | |
| 224 | void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, |
| 225 | uintptr_t length) |
| 226 | { |
| 227 | uintptr_t addr; |
| 228 | |
| 229 | if (tlb_is_dirty_ram(tlb_entry)) { |
| 230 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
| 231 | if ((addr - start) < length) { |
| 232 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
| 233 | } |
| 234 | } |
| 235 | } |
| 236 | |
Paolo Bonzini | 7443b43 | 2013-06-03 12:44:02 +0200 | [diff] [blame] | 237 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
| 238 | { |
| 239 | ram_addr_t ram_addr; |
| 240 | |
Paolo Bonzini | 07bdaa4 | 2016-03-25 12:55:08 +0100 | [diff] [blame] | 241 | ram_addr = qemu_ram_addr_from_host(ptr); |
| 242 | if (ram_addr == RAM_ADDR_INVALID) { |
Paolo Bonzini | 7443b43 | 2013-06-03 12:44:02 +0200 | [diff] [blame] | 243 | fprintf(stderr, "Bad ram pointer %p\n", ptr); |
| 244 | abort(); |
| 245 | } |
| 246 | return ram_addr; |
| 247 | } |
| 248 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 249 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 250 | { |
| 251 | CPUArchState *env; |
| 252 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 253 | int mmu_idx; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 254 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 255 | env = cpu->env_ptr; |
| 256 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 257 | unsigned int i; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 258 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 259 | for (i = 0; i < CPU_TLB_SIZE; i++) { |
| 260 | tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], |
| 261 | start1, length); |
| 262 | } |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 263 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 264 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
| 265 | tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], |
| 266 | start1, length); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 267 | } |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
| 272 | { |
| 273 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { |
| 274 | tlb_entry->addr_write = vaddr; |
| 275 | } |
| 276 | } |
| 277 | |
| 278 | /* update the TLB corresponding to virtual page vaddr |
| 279 | so that it is no longer dirty */ |
Peter Crosthwaite | bcae01e | 2015-09-10 22:39:42 -0700 | [diff] [blame] | 280 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 281 | { |
Peter Crosthwaite | bcae01e | 2015-09-10 22:39:42 -0700 | [diff] [blame] | 282 | CPUArchState *env = cpu->env_ptr; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 283 | int i; |
| 284 | int mmu_idx; |
| 285 | |
| 286 | vaddr &= TARGET_PAGE_MASK; |
| 287 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 288 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 289 | tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); |
| 290 | } |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 291 | |
| 292 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 293 | int k; |
| 294 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
| 295 | tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); |
| 296 | } |
| 297 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* Our TLB does not support large pages, so remember the area covered by |
| 301 | large pages and trigger a full TLB flush if these are invalidated. */ |
| 302 | static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, |
| 303 | target_ulong size) |
| 304 | { |
| 305 | target_ulong mask = ~(size - 1); |
| 306 | |
| 307 | if (env->tlb_flush_addr == (target_ulong)-1) { |
| 308 | env->tlb_flush_addr = vaddr & mask; |
| 309 | env->tlb_flush_mask = mask; |
| 310 | return; |
| 311 | } |
| 312 | /* Extend the existing region to include the new page. |
| 313 | This is a compromise between unnecessary flushes and the cost |
| 314 | of maintaining a full variable size TLB. */ |
| 315 | mask &= env->tlb_flush_mask; |
| 316 | while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { |
| 317 | mask <<= 1; |
| 318 | } |
| 319 | env->tlb_flush_addr &= mask; |
| 320 | env->tlb_flush_mask = mask; |
| 321 | } |
| 322 | |
| 323 | /* Add a new TLB entry. At most one entry for a given virtual address |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 324 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
| 325 | * supplied size is only used by tlb_flush_page. |
| 326 | * |
| 327 | * Called from TCG-generated code, which is under an RCU read-side |
| 328 | * critical section. |
| 329 | */ |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 330 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
| 331 | hwaddr paddr, MemTxAttrs attrs, int prot, |
| 332 | int mmu_idx, target_ulong size) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 333 | { |
Andreas Färber | 0c591eb | 2013-09-03 13:59:37 +0200 | [diff] [blame] | 334 | CPUArchState *env = cpu->env_ptr; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 335 | MemoryRegionSection *section; |
| 336 | unsigned int index; |
| 337 | target_ulong address; |
| 338 | target_ulong code_address; |
| 339 | uintptr_t addend; |
| 340 | CPUTLBEntry *te; |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 341 | hwaddr iotlb, xlat, sz; |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 342 | unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; |
Peter Maydell | d7898cd | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 343 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 344 | |
| 345 | assert(size >= TARGET_PAGE_SIZE); |
| 346 | if (size != TARGET_PAGE_SIZE) { |
| 347 | tlb_add_large_page(env, vaddr, size); |
| 348 | } |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 349 | |
| 350 | sz = size; |
Peter Maydell | d7898cd | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 351 | section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz); |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 352 | assert(sz >= TARGET_PAGE_SIZE); |
| 353 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 354 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
| 355 | " prot=%x idx=%d\n", |
| 356 | vaddr, paddr, prot, mmu_idx); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 357 | |
| 358 | address = vaddr; |
Paolo Bonzini | 8f3e03c | 2013-05-24 16:45:30 +0200 | [diff] [blame] | 359 | if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { |
| 360 | /* IO memory case */ |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 361 | address |= TLB_MMIO; |
Paolo Bonzini | 8f3e03c | 2013-05-24 16:45:30 +0200 | [diff] [blame] | 362 | addend = 0; |
| 363 | } else { |
| 364 | /* TLB_MMIO for rom/romd handled below */ |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 365 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 366 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 367 | |
| 368 | code_address = address; |
Andreas Färber | bb0e627 | 2013-09-03 13:32:01 +0200 | [diff] [blame] | 369 | iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 370 | prot, &address); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 371 | |
| 372 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 373 | te = &env->tlb_table[mmu_idx][index]; |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 374 | |
| 375 | /* do not discard the translation in te, evict it into a victim tlb */ |
| 376 | env->tlb_v_table[mmu_idx][vidx] = *te; |
| 377 | env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; |
| 378 | |
| 379 | /* refill the tlb */ |
Peter Maydell | e469b22 | 2015-04-26 16:49:23 +0100 | [diff] [blame] | 380 | env->iotlb[mmu_idx][index].addr = iotlb - vaddr; |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 381 | env->iotlb[mmu_idx][index].attrs = attrs; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 382 | te->addend = addend - vaddr; |
| 383 | if (prot & PAGE_READ) { |
| 384 | te->addr_read = address; |
| 385 | } else { |
| 386 | te->addr_read = -1; |
| 387 | } |
| 388 | |
| 389 | if (prot & PAGE_EXEC) { |
| 390 | te->addr_code = code_address; |
| 391 | } else { |
| 392 | te->addr_code = -1; |
| 393 | } |
| 394 | if (prot & PAGE_WRITE) { |
| 395 | if ((memory_region_is_ram(section->mr) && section->readonly) |
Blue Swirl | cc5bea6 | 2012-04-14 14:56:48 +0000 | [diff] [blame] | 396 | || memory_region_is_romd(section->mr)) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 397 | /* Write access calls the I/O callback. */ |
| 398 | te->addr_write = address | TLB_MMIO; |
| 399 | } else if (memory_region_is_ram(section->mr) |
Fam Zheng | 8e41fb6 | 2016-03-01 14:18:21 +0800 | [diff] [blame] | 400 | && cpu_physical_memory_is_clean( |
| 401 | memory_region_get_ram_addr(section->mr) + xlat)) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 402 | te->addr_write = address | TLB_NOTDIRTY; |
| 403 | } else { |
| 404 | te->addr_write = address; |
| 405 | } |
| 406 | } else { |
| 407 | te->addr_write = -1; |
| 408 | } |
| 409 | } |
| 410 | |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 411 | /* Add a new TLB entry, but without specifying the memory |
| 412 | * transaction attributes to be used. |
| 413 | */ |
| 414 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, |
| 415 | hwaddr paddr, int prot, |
| 416 | int mmu_idx, target_ulong size) |
| 417 | { |
| 418 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, |
| 419 | prot, mmu_idx, size); |
| 420 | } |
| 421 | |
Peter Maydell | d7f3040 | 2016-06-20 18:07:05 +0100 | [diff] [blame] | 422 | static void report_bad_exec(CPUState *cpu, target_ulong addr) |
| 423 | { |
| 424 | /* Accidentally executing outside RAM or ROM is quite common for |
| 425 | * several user-error situations, so report it in a way that |
| 426 | * makes it clear that this isn't a QEMU bug and provide suggestions |
| 427 | * about what a user could do to fix things. |
| 428 | */ |
| 429 | error_report("Trying to execute code outside RAM or ROM at 0x" |
| 430 | TARGET_FMT_lx, addr); |
| 431 | error_printf("This usually means one of the following happened:\n\n" |
| 432 | "(1) You told QEMU to execute a kernel for the wrong machine " |
| 433 | "type, and it crashed on startup (eg trying to run a " |
| 434 | "raspberry pi kernel on a versatilepb QEMU machine)\n" |
| 435 | "(2) You didn't give QEMU a kernel or BIOS filename at all, " |
| 436 | "and QEMU executed a ROM full of no-op instructions until " |
| 437 | "it fell off the end\n" |
| 438 | "(3) Your guest kernel has a bug and crashed by jumping " |
| 439 | "off into nowhere\n\n" |
| 440 | "This is almost always one of the first two, so check your " |
| 441 | "command line and that you are using the right type of kernel " |
| 442 | "for this machine.\n" |
| 443 | "If you think option (3) is likely then you can try debugging " |
| 444 | "your guest with the -d debug options; in particular " |
| 445 | "-d guest_errors will cause the log to include a dump of the " |
| 446 | "guest register state at this point.\n\n" |
| 447 | "Execution cannot continue; stopping here.\n\n"); |
| 448 | |
| 449 | /* Report also to the logs, with more detail including register dump */ |
| 450 | qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code " |
| 451 | "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); |
| 452 | log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
| 453 | } |
| 454 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 455 | /* NOTE: this function can trigger an exception */ |
| 456 | /* NOTE2: the returned address is not exactly the physical address: it |
Peter Maydell | 116aae3 | 2012-08-10 17:14:05 +0100 | [diff] [blame] | 457 | * is actually a ram_addr_t (in system mode; the user mode emulation |
| 458 | * version of this function returns a guest virtual address). |
| 459 | */ |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 460 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
| 461 | { |
| 462 | int mmu_idx, page_index, pd; |
| 463 | void *p; |
| 464 | MemoryRegion *mr; |
Edgar E. Iglesias | 09daed8 | 2013-12-17 13:06:51 +1000 | [diff] [blame] | 465 | CPUState *cpu = ENV_GET_CPU(env1); |
Peter Maydell | a54c87b | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 466 | CPUIOTLBEntry *iotlbentry; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 467 | |
| 468 | page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
Benjamin Herrenschmidt | 97ed5cc | 2015-08-17 17:34:10 +1000 | [diff] [blame] | 469 | mmu_idx = cpu_mmu_index(env1, true); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 470 | if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != |
| 471 | (addr & TARGET_PAGE_MASK))) { |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 472 | cpu_ldub_code(env1, addr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 473 | } |
Peter Maydell | a54c87b | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 474 | iotlbentry = &env1->iotlb[mmu_idx][page_index]; |
| 475 | pd = iotlbentry->addr & ~TARGET_PAGE_MASK; |
| 476 | mr = iotlb_to_region(cpu, pd, iotlbentry->attrs); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 477 | if (memory_region_is_unassigned(mr)) { |
Andreas Färber | c658b94 | 2013-05-27 06:49:53 +0200 | [diff] [blame] | 478 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 479 | |
| 480 | if (cc->do_unassigned_access) { |
| 481 | cc->do_unassigned_access(cpu, addr, false, true, 0, 4); |
| 482 | } else { |
Peter Maydell | d7f3040 | 2016-06-20 18:07:05 +0100 | [diff] [blame] | 483 | report_bad_exec(cpu, addr); |
| 484 | exit(1); |
Andreas Färber | c658b94 | 2013-05-27 06:49:53 +0200 | [diff] [blame] | 485 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 486 | } |
| 487 | p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); |
| 488 | return qemu_ram_addr_from_host_nofail(p); |
| 489 | } |
| 490 | |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 491 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
| 492 | target_ulong addr, uintptr_t retaddr, int size) |
| 493 | { |
| 494 | CPUState *cpu = ENV_GET_CPU(env); |
| 495 | hwaddr physaddr = iotlbentry->addr; |
| 496 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); |
| 497 | uint64_t val; |
| 498 | |
| 499 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
| 500 | cpu->mem_io_pc = retaddr; |
| 501 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
| 502 | cpu_io_recompile(cpu, retaddr); |
| 503 | } |
| 504 | |
| 505 | cpu->mem_io_vaddr = addr; |
| 506 | memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs); |
| 507 | return val; |
| 508 | } |
| 509 | |
| 510 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
| 511 | uint64_t val, target_ulong addr, |
| 512 | uintptr_t retaddr, int size) |
| 513 | { |
| 514 | CPUState *cpu = ENV_GET_CPU(env); |
| 515 | hwaddr physaddr = iotlbentry->addr; |
| 516 | MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); |
| 517 | |
| 518 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
| 519 | if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { |
| 520 | cpu_io_recompile(cpu, retaddr); |
| 521 | } |
| 522 | |
| 523 | cpu->mem_io_vaddr = addr; |
| 524 | cpu->mem_io_pc = retaddr; |
| 525 | memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs); |
| 526 | } |
| 527 | |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 528 | /* Return true if ADDR is present in the victim tlb, and has been copied |
| 529 | back to the main tlb. */ |
| 530 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, |
| 531 | size_t elt_ofs, target_ulong page) |
| 532 | { |
| 533 | size_t vidx; |
| 534 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { |
| 535 | CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; |
| 536 | target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); |
| 537 | |
| 538 | if (cmp == page) { |
| 539 | /* Found entry in victim tlb, swap tlb and iotlb. */ |
| 540 | CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; |
| 541 | CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; |
| 542 | CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; |
| 543 | |
| 544 | tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb; |
| 545 | tmpio = *io; *io = *vio; *vio = tmpio; |
| 546 | return true; |
| 547 | } |
| 548 | } |
| 549 | return false; |
| 550 | } |
| 551 | |
| 552 | /* Macro to call the above, with local variables from the use context. */ |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 553 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 554 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 555 | (ADDR) & TARGET_PAGE_MASK) |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 556 | |
Richard Henderson | 3b08f0a | 2016-07-08 18:22:26 -0700 | [diff] [blame] | 557 | /* Probe for whether the specified guest write access is permitted. |
| 558 | * If it is not permitted then an exception will be taken in the same |
| 559 | * way as if this were a real write access (and we will not return). |
| 560 | * Otherwise the function will return, and there will be a valid |
| 561 | * entry in the TLB for this access. |
| 562 | */ |
| 563 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, |
| 564 | uintptr_t retaddr) |
| 565 | { |
| 566 | int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 567 | target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
| 568 | |
| 569 | if ((addr & TARGET_PAGE_MASK) |
| 570 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
| 571 | /* TLB entry is for a different page */ |
| 572 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
| 573 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
| 574 | } |
| 575 | } |
| 576 | } |
| 577 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 578 | /* Probe for a read-modify-write atomic operation. Do not allow unaligned |
| 579 | * operations, or io operations to proceed. Return the host address. */ |
| 580 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, |
| 581 | TCGMemOpIdx oi, uintptr_t retaddr) |
| 582 | { |
| 583 | size_t mmu_idx = get_mmuidx(oi); |
| 584 | size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 585 | CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; |
| 586 | target_ulong tlb_addr = tlbe->addr_write; |
| 587 | TCGMemOp mop = get_memop(oi); |
| 588 | int a_bits = get_alignment_bits(mop); |
| 589 | int s_bits = mop & MO_SIZE; |
| 590 | |
| 591 | /* Adjust the given return address. */ |
| 592 | retaddr -= GETPC_ADJ; |
| 593 | |
| 594 | /* Enforce guest required alignment. */ |
| 595 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { |
| 596 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ |
| 597 | cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, |
| 598 | mmu_idx, retaddr); |
| 599 | } |
| 600 | |
| 601 | /* Enforce qemu required alignment. */ |
| 602 | if (unlikely(addr & ((1 << s_bits) - 1))) { |
| 603 | /* We get here if guest alignment was not requested, |
| 604 | or was not enforced by cpu_unaligned_access above. |
| 605 | We might widen the access and emulate, but for now |
| 606 | mark an exception and exit the cpu loop. */ |
| 607 | goto stop_the_world; |
| 608 | } |
| 609 | |
| 610 | /* Check TLB entry and enforce page permissions. */ |
| 611 | if ((addr & TARGET_PAGE_MASK) |
| 612 | != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
| 613 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
| 614 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); |
| 615 | } |
| 616 | tlb_addr = tlbe->addr_write; |
| 617 | } |
| 618 | |
| 619 | /* Notice an IO access, or a notdirty page. */ |
| 620 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
| 621 | /* There's really nothing that can be done to |
| 622 | support this apart from stop-the-world. */ |
| 623 | goto stop_the_world; |
| 624 | } |
| 625 | |
| 626 | /* Let the guest notice RMW on a write-only page. */ |
| 627 | if (unlikely(tlbe->addr_read != tlb_addr)) { |
| 628 | tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); |
| 629 | /* Since we don't support reads and writes to different addresses, |
| 630 | and we do have the proper page loaded for write, this shouldn't |
| 631 | ever return. But just in case, handle via stop-the-world. */ |
| 632 | goto stop_the_world; |
| 633 | } |
| 634 | |
| 635 | return (void *)((uintptr_t)addr + tlbe->addend); |
| 636 | |
| 637 | stop_the_world: |
| 638 | cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); |
| 639 | } |
| 640 | |
Richard Henderson | c86c6e4 | 2016-07-08 19:02:33 -0700 | [diff] [blame] | 641 | #ifdef TARGET_WORDS_BIGENDIAN |
| 642 | # define TGT_BE(X) (X) |
| 643 | # define TGT_LE(X) BSWAP(X) |
| 644 | #else |
| 645 | # define TGT_BE(X) BSWAP(X) |
| 646 | # define TGT_LE(X) (X) |
| 647 | #endif |
| 648 | |
Paolo Bonzini | 0f590e7 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 649 | #define MMUSUFFIX _mmu |
| 650 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 651 | #define DATA_SIZE 1 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 652 | #include "softmmu_template.h" |
Paolo Bonzini | 0f590e7 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 653 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 654 | #define DATA_SIZE 2 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 655 | #include "softmmu_template.h" |
Paolo Bonzini | 0f590e7 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 656 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 657 | #define DATA_SIZE 4 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 658 | #include "softmmu_template.h" |
Paolo Bonzini | 0f590e7 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 659 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 660 | #define DATA_SIZE 8 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 661 | #include "softmmu_template.h" |
Paolo Bonzini | 0f590e7 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 662 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 663 | /* First set of helpers allows passing in of OI and RETADDR. This makes |
| 664 | them callable from other helpers. */ |
| 665 | |
| 666 | #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr |
| 667 | #define ATOMIC_NAME(X) \ |
| 668 | HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) |
| 669 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) |
| 670 | |
| 671 | #define DATA_SIZE 1 |
| 672 | #include "atomic_template.h" |
| 673 | |
| 674 | #define DATA_SIZE 2 |
| 675 | #include "atomic_template.h" |
| 676 | |
| 677 | #define DATA_SIZE 4 |
| 678 | #include "atomic_template.h" |
| 679 | |
Richard Henderson | df79b99 | 2016-09-02 12:23:57 -0700 | [diff] [blame] | 680 | #ifdef CONFIG_ATOMIC64 |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 681 | #define DATA_SIZE 8 |
| 682 | #include "atomic_template.h" |
Richard Henderson | df79b99 | 2016-09-02 12:23:57 -0700 | [diff] [blame] | 683 | #endif |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 684 | |
Richard Henderson | 7ebee43 | 2016-06-29 21:10:59 -0700 | [diff] [blame] | 685 | #ifdef CONFIG_ATOMIC128 |
| 686 | #define DATA_SIZE 16 |
| 687 | #include "atomic_template.h" |
| 688 | #endif |
| 689 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 690 | /* Second set of helpers are directly callable from TCG as helpers. */ |
| 691 | |
| 692 | #undef EXTRA_ARGS |
| 693 | #undef ATOMIC_NAME |
| 694 | #undef ATOMIC_MMU_LOOKUP |
| 695 | #define EXTRA_ARGS , TCGMemOpIdx oi |
| 696 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) |
| 697 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) |
| 698 | |
| 699 | #define DATA_SIZE 1 |
| 700 | #include "atomic_template.h" |
| 701 | |
| 702 | #define DATA_SIZE 2 |
| 703 | #include "atomic_template.h" |
| 704 | |
| 705 | #define DATA_SIZE 4 |
| 706 | #include "atomic_template.h" |
| 707 | |
Richard Henderson | df79b99 | 2016-09-02 12:23:57 -0700 | [diff] [blame] | 708 | #ifdef CONFIG_ATOMIC64 |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 709 | #define DATA_SIZE 8 |
| 710 | #include "atomic_template.h" |
Richard Henderson | df79b99 | 2016-09-02 12:23:57 -0700 | [diff] [blame] | 711 | #endif |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 712 | |
| 713 | /* Code access functions. */ |
| 714 | |
| 715 | #undef MMUSUFFIX |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 716 | #define MMUSUFFIX _cmmu |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 717 | #undef GETPC |
| 718 | #define GETPC() ((uintptr_t)0) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 719 | #define SOFTMMU_CODE_ACCESS |
| 720 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 721 | #define DATA_SIZE 1 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 722 | #include "softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 723 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 724 | #define DATA_SIZE 2 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 725 | #include "softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 726 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 727 | #define DATA_SIZE 4 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 728 | #include "softmmu_template.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 729 | |
Richard Henderson | dea2198 | 2016-07-08 18:14:28 -0700 | [diff] [blame] | 730 | #define DATA_SIZE 8 |
Paolo Bonzini | 58ed270 | 2014-03-28 18:00:25 +0100 | [diff] [blame] | 731 | #include "softmmu_template.h" |