blob: 4f2c5002f6d515f633c959131f7a790bcee4d339 [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000021#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010026#include "exec/cputlb.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020028#include "exec/ram_addr.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010029#include "exec/exec-all.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +010030#include "tcg/tcg.h"
Peter Maydelld7f30402016-06-20 18:07:05 +010031#include "qemu/error-report.h"
32#include "exec/log.h"
Richard Hendersonc482cb12016-06-28 11:37:27 -070033#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000035
Alex Bennée8526e1f2016-03-15 14:30:24 +000036/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37/* #define DEBUG_TLB */
38/* #define DEBUG_TLB_LOG */
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
Blue Swirl0cac1b62012-04-09 16:50:52 +000060
61/* statistics */
62int tlb_flush_count;
63
Blue Swirl0cac1b62012-04-09 16:50:52 +000064/* NOTE:
65 * If flush_global is true (the usual case), flush all tlb entries.
66 * If flush_global is false, flush (at least) all tlb entries not
67 * marked global.
68 *
69 * Since QEMU doesn't currently implement a global/not-global flag
70 * for tlb entries, at the moment tlb_flush() will also flush all
71 * tlb entries in the flush_global == false case. This is OK because
72 * CPU architectures generally permit an implementation to drop
73 * entries from the TLB at any time, so flushing more entries than
74 * required is only an efficiency issue, not a correctness issue.
75 */
Andreas Färber00c8cb02013-09-04 02:19:44 +020076void tlb_flush(CPUState *cpu, int flush_global)
Blue Swirl0cac1b62012-04-09 16:50:52 +000077{
Andreas Färber00c8cb02013-09-04 02:19:44 +020078 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000079
Alex Bennée8526e1f2016-03-15 14:30:24 +000080 tlb_debug("(%d)\n", flush_global);
81
Richard Henderson4fadb3b2013-12-07 10:44:51 +130082 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -050083 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +020084 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000085
Xin Tong88e89a52014-08-04 20:35:23 -050086 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +000087 env->tlb_flush_addr = -1;
88 env->tlb_flush_mask = 0;
89 tlb_flush_count++;
90}
91
Peter Maydelld7a74a92015-08-25 15:45:09 +010092static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
93{
94 CPUArchState *env = cpu->env_ptr;
95
Alex Bennée8526e1f2016-03-15 14:30:24 +000096 tlb_debug("start\n");
Peter Maydelld7a74a92015-08-25 15:45:09 +010097
98 for (;;) {
99 int mmu_idx = va_arg(argp, int);
100
101 if (mmu_idx < 0) {
102 break;
103 }
104
Alex Bennée8526e1f2016-03-15 14:30:24 +0000105 tlb_debug("%d\n", mmu_idx);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100106
107 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
108 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
109 }
110
Peter Maydelld7a74a92015-08-25 15:45:09 +0100111 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
112}
113
114void tlb_flush_by_mmuidx(CPUState *cpu, ...)
115{
116 va_list argp;
117 va_start(argp, cpu);
118 v_tlb_flush_by_mmuidx(cpu, argp);
119 va_end(argp);
120}
121
Blue Swirl0cac1b62012-04-09 16:50:52 +0000122static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
123{
124 if (addr == (tlb_entry->addr_read &
125 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126 addr == (tlb_entry->addr_write &
127 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
128 addr == (tlb_entry->addr_code &
129 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300130 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +0000131 }
132}
133
Andreas Färber31b030d2013-09-04 01:29:02 +0200134void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000135{
Andreas Färber31b030d2013-09-04 01:29:02 +0200136 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000137 int i;
138 int mmu_idx;
139
Alex Bennée8526e1f2016-03-15 14:30:24 +0000140 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
141
Blue Swirl0cac1b62012-04-09 16:50:52 +0000142 /* Check if we need to flush due to large pages. */
143 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
Alex Bennée8526e1f2016-03-15 14:30:24 +0000144 tlb_debug("forcing full flush ("
145 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
146 env->tlb_flush_addr, env->tlb_flush_mask);
147
Andreas Färber00c8cb02013-09-04 02:19:44 +0200148 tlb_flush(cpu, 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000149 return;
150 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000151
152 addr &= TARGET_PAGE_MASK;
153 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
154 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
155 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
156 }
157
Xin Tong88e89a52014-08-04 20:35:23 -0500158 /* check whether there are entries that need to be flushed in the vtlb */
159 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
160 int k;
161 for (k = 0; k < CPU_VTLB_SIZE; k++) {
162 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
163 }
164 }
165
Andreas Färber611d4f92013-09-01 17:52:07 +0200166 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000167}
168
Peter Maydelld7a74a92015-08-25 15:45:09 +0100169void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
170{
171 CPUArchState *env = cpu->env_ptr;
172 int i, k;
173 va_list argp;
174
175 va_start(argp, addr);
176
Alex Bennée8526e1f2016-03-15 14:30:24 +0000177 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
178
Peter Maydelld7a74a92015-08-25 15:45:09 +0100179 /* Check if we need to flush due to large pages. */
180 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
Alex Bennée8526e1f2016-03-15 14:30:24 +0000181 tlb_debug("forced full flush ("
182 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
183 env->tlb_flush_addr, env->tlb_flush_mask);
184
Peter Maydelld7a74a92015-08-25 15:45:09 +0100185 v_tlb_flush_by_mmuidx(cpu, argp);
186 va_end(argp);
187 return;
188 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100189
190 addr &= TARGET_PAGE_MASK;
191 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
192
193 for (;;) {
194 int mmu_idx = va_arg(argp, int);
195
196 if (mmu_idx < 0) {
197 break;
198 }
199
Alex Bennée8526e1f2016-03-15 14:30:24 +0000200 tlb_debug("idx %d\n", mmu_idx);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100201
202 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
203
204 /* check whether there are vltb entries that need to be flushed */
205 for (k = 0; k < CPU_VTLB_SIZE; k++) {
206 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
207 }
208 }
209 va_end(argp);
210
Peter Maydelld7a74a92015-08-25 15:45:09 +0100211 tb_flush_jmp_cache(cpu, addr);
212}
213
Blue Swirl0cac1b62012-04-09 16:50:52 +0000214/* update the TLBs so that writes to code in the virtual page 'addr'
215 can be detected */
216void tlb_protect_code(ram_addr_t ram_addr)
217{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000218 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
219 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000220}
221
222/* update the TLB so that writes in physical page 'phys_addr' are no longer
223 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200224void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000225{
Juan Quintela52159192013-10-08 12:44:04 +0200226 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000227}
228
229static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
230{
231 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
232}
233
234void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
235 uintptr_t length)
236{
237 uintptr_t addr;
238
239 if (tlb_is_dirty_ram(tlb_entry)) {
240 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
241 if ((addr - start) < length) {
242 tlb_entry->addr_write |= TLB_NOTDIRTY;
243 }
244 }
245}
246
Paolo Bonzini7443b432013-06-03 12:44:02 +0200247static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
248{
249 ram_addr_t ram_addr;
250
Paolo Bonzini07bdaa42016-03-25 12:55:08 +0100251 ram_addr = qemu_ram_addr_from_host(ptr);
252 if (ram_addr == RAM_ADDR_INVALID) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200253 fprintf(stderr, "Bad ram pointer %p\n", ptr);
254 abort();
255 }
256 return ram_addr;
257}
258
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700259void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000260{
261 CPUArchState *env;
262
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700263 int mmu_idx;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000264
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700265 env = cpu->env_ptr;
266 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
267 unsigned int i;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000268
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700269 for (i = 0; i < CPU_TLB_SIZE; i++) {
270 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
271 start1, length);
272 }
Xin Tong88e89a52014-08-04 20:35:23 -0500273
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700274 for (i = 0; i < CPU_VTLB_SIZE; i++) {
275 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
276 start1, length);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000277 }
278 }
279}
280
281static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
282{
283 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
284 tlb_entry->addr_write = vaddr;
285 }
286}
287
288/* update the TLB corresponding to virtual page vaddr
289 so that it is no longer dirty */
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -0700290void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000291{
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -0700292 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000293 int i;
294 int mmu_idx;
295
296 vaddr &= TARGET_PAGE_MASK;
297 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
298 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
299 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
300 }
Xin Tong88e89a52014-08-04 20:35:23 -0500301
302 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
303 int k;
304 for (k = 0; k < CPU_VTLB_SIZE; k++) {
305 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
306 }
307 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000308}
309
310/* Our TLB does not support large pages, so remember the area covered by
311 large pages and trigger a full TLB flush if these are invalidated. */
312static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
313 target_ulong size)
314{
315 target_ulong mask = ~(size - 1);
316
317 if (env->tlb_flush_addr == (target_ulong)-1) {
318 env->tlb_flush_addr = vaddr & mask;
319 env->tlb_flush_mask = mask;
320 return;
321 }
322 /* Extend the existing region to include the new page.
323 This is a compromise between unnecessary flushes and the cost
324 of maintaining a full variable size TLB. */
325 mask &= env->tlb_flush_mask;
326 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
327 mask <<= 1;
328 }
329 env->tlb_flush_addr &= mask;
330 env->tlb_flush_mask = mask;
331}
332
333/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100334 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
335 * supplied size is only used by tlb_flush_page.
336 *
337 * Called from TCG-generated code, which is under an RCU read-side
338 * critical section.
339 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100340void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
341 hwaddr paddr, MemTxAttrs attrs, int prot,
342 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000343{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200344 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000345 MemoryRegionSection *section;
346 unsigned int index;
347 target_ulong address;
348 target_ulong code_address;
349 uintptr_t addend;
350 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200351 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500352 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000353 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000354
355 assert(size >= TARGET_PAGE_SIZE);
356 if (size != TARGET_PAGE_SIZE) {
357 tlb_add_large_page(env, vaddr, size);
358 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200359
360 sz = size;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000361 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200362 assert(sz >= TARGET_PAGE_SIZE);
363
Alex Bennée8526e1f2016-03-15 14:30:24 +0000364 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
365 " prot=%x idx=%d\n",
366 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000367
368 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200369 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
370 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000371 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200372 addend = 0;
373 } else {
374 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200375 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000376 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000377
378 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200379 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200380 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000381
382 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000383 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500384
385 /* do not discard the translation in te, evict it into a victim tlb */
386 env->tlb_v_table[mmu_idx][vidx] = *te;
387 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
388
389 /* refill the tlb */
Peter Maydelle469b222015-04-26 16:49:23 +0100390 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100391 env->iotlb[mmu_idx][index].attrs = attrs;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000392 te->addend = addend - vaddr;
393 if (prot & PAGE_READ) {
394 te->addr_read = address;
395 } else {
396 te->addr_read = -1;
397 }
398
399 if (prot & PAGE_EXEC) {
400 te->addr_code = code_address;
401 } else {
402 te->addr_code = -1;
403 }
404 if (prot & PAGE_WRITE) {
405 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000406 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000407 /* Write access calls the I/O callback. */
408 te->addr_write = address | TLB_MMIO;
409 } else if (memory_region_is_ram(section->mr)
Fam Zheng8e41fb62016-03-01 14:18:21 +0800410 && cpu_physical_memory_is_clean(
411 memory_region_get_ram_addr(section->mr) + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000412 te->addr_write = address | TLB_NOTDIRTY;
413 } else {
414 te->addr_write = address;
415 }
416 } else {
417 te->addr_write = -1;
418 }
419}
420
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100421/* Add a new TLB entry, but without specifying the memory
422 * transaction attributes to be used.
423 */
424void tlb_set_page(CPUState *cpu, target_ulong vaddr,
425 hwaddr paddr, int prot,
426 int mmu_idx, target_ulong size)
427{
428 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
429 prot, mmu_idx, size);
430}
431
Peter Maydelld7f30402016-06-20 18:07:05 +0100432static void report_bad_exec(CPUState *cpu, target_ulong addr)
433{
434 /* Accidentally executing outside RAM or ROM is quite common for
435 * several user-error situations, so report it in a way that
436 * makes it clear that this isn't a QEMU bug and provide suggestions
437 * about what a user could do to fix things.
438 */
439 error_report("Trying to execute code outside RAM or ROM at 0x"
440 TARGET_FMT_lx, addr);
441 error_printf("This usually means one of the following happened:\n\n"
442 "(1) You told QEMU to execute a kernel for the wrong machine "
443 "type, and it crashed on startup (eg trying to run a "
444 "raspberry pi kernel on a versatilepb QEMU machine)\n"
445 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
446 "and QEMU executed a ROM full of no-op instructions until "
447 "it fell off the end\n"
448 "(3) Your guest kernel has a bug and crashed by jumping "
449 "off into nowhere\n\n"
450 "This is almost always one of the first two, so check your "
451 "command line and that you are using the right type of kernel "
452 "for this machine.\n"
453 "If you think option (3) is likely then you can try debugging "
454 "your guest with the -d debug options; in particular "
455 "-d guest_errors will cause the log to include a dump of the "
456 "guest register state at this point.\n\n"
457 "Execution cannot continue; stopping here.\n\n");
458
459 /* Report also to the logs, with more detail including register dump */
460 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
461 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
462 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
463}
464
Blue Swirl0cac1b62012-04-09 16:50:52 +0000465/* NOTE: this function can trigger an exception */
466/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100467 * is actually a ram_addr_t (in system mode; the user mode emulation
468 * version of this function returns a guest virtual address).
469 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000470tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
471{
472 int mmu_idx, page_index, pd;
473 void *p;
474 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000475 CPUState *cpu = ENV_GET_CPU(env1);
Peter Maydella54c87b2016-01-21 14:15:05 +0000476 CPUIOTLBEntry *iotlbentry;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000477
478 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Benjamin Herrenschmidt97ed5cc2015-08-17 17:34:10 +1000479 mmu_idx = cpu_mmu_index(env1, true);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000480 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
481 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000482 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000483 }
Peter Maydella54c87b2016-01-21 14:15:05 +0000484 iotlbentry = &env1->iotlb[mmu_idx][page_index];
485 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
486 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000487 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200488 CPUClass *cc = CPU_GET_CLASS(cpu);
489
490 if (cc->do_unassigned_access) {
491 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
492 } else {
Peter Maydelld7f30402016-06-20 18:07:05 +0100493 report_bad_exec(cpu, addr);
494 exit(1);
Andreas Färberc658b942013-05-27 06:49:53 +0200495 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000496 }
497 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
498 return qemu_ram_addr_from_host_nofail(p);
499}
500
Richard Henderson82a45b92016-07-08 18:51:28 -0700501static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
502 target_ulong addr, uintptr_t retaddr, int size)
503{
504 CPUState *cpu = ENV_GET_CPU(env);
505 hwaddr physaddr = iotlbentry->addr;
506 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
507 uint64_t val;
508
509 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
510 cpu->mem_io_pc = retaddr;
511 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
512 cpu_io_recompile(cpu, retaddr);
513 }
514
515 cpu->mem_io_vaddr = addr;
516 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
517 return val;
518}
519
520static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
521 uint64_t val, target_ulong addr,
522 uintptr_t retaddr, int size)
523{
524 CPUState *cpu = ENV_GET_CPU(env);
525 hwaddr physaddr = iotlbentry->addr;
526 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
527
528 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
529 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
530 cpu_io_recompile(cpu, retaddr);
531 }
532
533 cpu->mem_io_vaddr = addr;
534 cpu->mem_io_pc = retaddr;
535 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
536}
537
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700538/* Return true if ADDR is present in the victim tlb, and has been copied
539 back to the main tlb. */
540static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
541 size_t elt_ofs, target_ulong page)
542{
543 size_t vidx;
544 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
545 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
546 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
547
548 if (cmp == page) {
549 /* Found entry in victim tlb, swap tlb and iotlb. */
550 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
551 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
552 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
553
554 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
555 tmpio = *io; *io = *vio; *vio = tmpio;
556 return true;
557 }
558 }
559 return false;
560}
561
562/* Macro to call the above, with local variables from the use context. */
Samuel Damasheka3902842016-07-06 14:26:52 -0400563#define VICTIM_TLB_HIT(TY, ADDR) \
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700564 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
Samuel Damasheka3902842016-07-06 14:26:52 -0400565 (ADDR) & TARGET_PAGE_MASK)
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700566
Richard Henderson3b08f0a2016-07-08 18:22:26 -0700567/* Probe for whether the specified guest write access is permitted.
568 * If it is not permitted then an exception will be taken in the same
569 * way as if this were a real write access (and we will not return).
570 * Otherwise the function will return, and there will be a valid
571 * entry in the TLB for this access.
572 */
573void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
574 uintptr_t retaddr)
575{
576 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
577 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
578
579 if ((addr & TARGET_PAGE_MASK)
580 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
581 /* TLB entry is for a different page */
582 if (!VICTIM_TLB_HIT(addr_write, addr)) {
583 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
584 }
585 }
586}
587
Richard Hendersonc482cb12016-06-28 11:37:27 -0700588/* Probe for a read-modify-write atomic operation. Do not allow unaligned
589 * operations, or io operations to proceed. Return the host address. */
590static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
591 TCGMemOpIdx oi, uintptr_t retaddr)
592{
593 size_t mmu_idx = get_mmuidx(oi);
594 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
595 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
596 target_ulong tlb_addr = tlbe->addr_write;
597 TCGMemOp mop = get_memop(oi);
598 int a_bits = get_alignment_bits(mop);
599 int s_bits = mop & MO_SIZE;
600
601 /* Adjust the given return address. */
602 retaddr -= GETPC_ADJ;
603
604 /* Enforce guest required alignment. */
605 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
606 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
607 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
608 mmu_idx, retaddr);
609 }
610
611 /* Enforce qemu required alignment. */
612 if (unlikely(addr & ((1 << s_bits) - 1))) {
613 /* We get here if guest alignment was not requested,
614 or was not enforced by cpu_unaligned_access above.
615 We might widen the access and emulate, but for now
616 mark an exception and exit the cpu loop. */
617 goto stop_the_world;
618 }
619
620 /* Check TLB entry and enforce page permissions. */
621 if ((addr & TARGET_PAGE_MASK)
622 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
623 if (!VICTIM_TLB_HIT(addr_write, addr)) {
624 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
625 }
626 tlb_addr = tlbe->addr_write;
627 }
628
629 /* Notice an IO access, or a notdirty page. */
630 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
631 /* There's really nothing that can be done to
632 support this apart from stop-the-world. */
633 goto stop_the_world;
634 }
635
636 /* Let the guest notice RMW on a write-only page. */
637 if (unlikely(tlbe->addr_read != tlb_addr)) {
638 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
639 /* Since we don't support reads and writes to different addresses,
640 and we do have the proper page loaded for write, this shouldn't
641 ever return. But just in case, handle via stop-the-world. */
642 goto stop_the_world;
643 }
644
645 return (void *)((uintptr_t)addr + tlbe->addend);
646
647 stop_the_world:
648 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
649}
650
Richard Hendersonc86c6e42016-07-08 19:02:33 -0700651#ifdef TARGET_WORDS_BIGENDIAN
652# define TGT_BE(X) (X)
653# define TGT_LE(X) BSWAP(X)
654#else
655# define TGT_BE(X) BSWAP(X)
656# define TGT_LE(X) (X)
657#endif
658
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100659#define MMUSUFFIX _mmu
660
Richard Hendersondea21982016-07-08 18:14:28 -0700661#define DATA_SIZE 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100662#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100663
Richard Hendersondea21982016-07-08 18:14:28 -0700664#define DATA_SIZE 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100665#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100666
Richard Hendersondea21982016-07-08 18:14:28 -0700667#define DATA_SIZE 4
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100668#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100669
Richard Hendersondea21982016-07-08 18:14:28 -0700670#define DATA_SIZE 8
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100671#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100672
Richard Hendersonc482cb12016-06-28 11:37:27 -0700673/* First set of helpers allows passing in of OI and RETADDR. This makes
674 them callable from other helpers. */
675
676#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
677#define ATOMIC_NAME(X) \
678 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
679#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
680
681#define DATA_SIZE 1
682#include "atomic_template.h"
683
684#define DATA_SIZE 2
685#include "atomic_template.h"
686
687#define DATA_SIZE 4
688#include "atomic_template.h"
689
690#define DATA_SIZE 8
691#include "atomic_template.h"
692
693/* Second set of helpers are directly callable from TCG as helpers. */
694
695#undef EXTRA_ARGS
696#undef ATOMIC_NAME
697#undef ATOMIC_MMU_LOOKUP
698#define EXTRA_ARGS , TCGMemOpIdx oi
699#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
700#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
701
702#define DATA_SIZE 1
703#include "atomic_template.h"
704
705#define DATA_SIZE 2
706#include "atomic_template.h"
707
708#define DATA_SIZE 4
709#include "atomic_template.h"
710
711#define DATA_SIZE 8
712#include "atomic_template.h"
713
714/* Code access functions. */
715
716#undef MMUSUFFIX
Blue Swirl0cac1b62012-04-09 16:50:52 +0000717#define MMUSUFFIX _cmmu
Richard Henderson01ecaf42016-07-26 06:09:16 +0530718#undef GETPC
719#define GETPC() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000720#define SOFTMMU_CODE_ACCESS
721
Richard Hendersondea21982016-07-08 18:14:28 -0700722#define DATA_SIZE 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100723#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000724
Richard Hendersondea21982016-07-08 18:14:28 -0700725#define DATA_SIZE 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100726#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000727
Richard Hendersondea21982016-07-08 18:14:28 -0700728#define DATA_SIZE 4
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100729#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000730
Richard Hendersondea21982016-07-08 18:14:28 -0700731#define DATA_SIZE 8
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100732#include "softmmu_template.h"