blob: 6c39927455bff8973622634de644725d437076db [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000021#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010026#include "exec/cputlb.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020028#include "exec/ram_addr.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +010029#include "tcg/tcg.h"
Peter Maydelld7f30402016-06-20 18:07:05 +010030#include "qemu/error-report.h"
31#include "exec/log.h"
Richard Hendersonc482cb12016-06-28 11:37:27 -070032#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000034
Alex Bennée8526e1f2016-03-15 14:30:24 +000035/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
36/* #define DEBUG_TLB */
37/* #define DEBUG_TLB_LOG */
38
39#ifdef DEBUG_TLB
40# define DEBUG_TLB_GATE 1
41# ifdef DEBUG_TLB_LOG
42# define DEBUG_TLB_LOG_GATE 1
43# else
44# define DEBUG_TLB_LOG_GATE 0
45# endif
46#else
47# define DEBUG_TLB_GATE 0
48# define DEBUG_TLB_LOG_GATE 0
49#endif
50
51#define tlb_debug(fmt, ...) do { \
52 if (DEBUG_TLB_LOG_GATE) { \
53 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
54 ## __VA_ARGS__); \
55 } else if (DEBUG_TLB_GATE) { \
56 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
57 } \
58} while (0)
Blue Swirl0cac1b62012-04-09 16:50:52 +000059
60/* statistics */
61int tlb_flush_count;
62
Alex Bennéed10eb082016-11-14 14:17:28 +000063/* This is OK because CPU architectures generally permit an
64 * implementation to drop entries from the TLB at any time, so
65 * flushing more entries than required is only an efficiency issue,
66 * not a correctness issue.
Blue Swirl0cac1b62012-04-09 16:50:52 +000067 */
Alex Bennéed10eb082016-11-14 14:17:28 +000068void tlb_flush(CPUState *cpu)
Blue Swirl0cac1b62012-04-09 16:50:52 +000069{
Andreas Färber00c8cb02013-09-04 02:19:44 +020070 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000071
Richard Henderson4fadb3b2013-12-07 10:44:51 +130072 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -050073 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +020074 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000075
Xin Tong88e89a52014-08-04 20:35:23 -050076 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +000077 env->tlb_flush_addr = -1;
78 env->tlb_flush_mask = 0;
79 tlb_flush_count++;
80}
81
Peter Maydelld7a74a92015-08-25 15:45:09 +010082static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
83{
84 CPUArchState *env = cpu->env_ptr;
85
Alex Bennée8526e1f2016-03-15 14:30:24 +000086 tlb_debug("start\n");
Peter Maydelld7a74a92015-08-25 15:45:09 +010087
88 for (;;) {
89 int mmu_idx = va_arg(argp, int);
90
91 if (mmu_idx < 0) {
92 break;
93 }
94
Alex Bennée8526e1f2016-03-15 14:30:24 +000095 tlb_debug("%d\n", mmu_idx);
Peter Maydelld7a74a92015-08-25 15:45:09 +010096
97 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
98 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
99 }
100
Peter Maydelld7a74a92015-08-25 15:45:09 +0100101 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
102}
103
104void tlb_flush_by_mmuidx(CPUState *cpu, ...)
105{
106 va_list argp;
107 va_start(argp, cpu);
108 v_tlb_flush_by_mmuidx(cpu, argp);
109 va_end(argp);
110}
111
Blue Swirl0cac1b62012-04-09 16:50:52 +0000112static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
113{
114 if (addr == (tlb_entry->addr_read &
115 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
116 addr == (tlb_entry->addr_write &
117 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
118 addr == (tlb_entry->addr_code &
119 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300120 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +0000121 }
122}
123
Andreas Färber31b030d2013-09-04 01:29:02 +0200124void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000125{
Andreas Färber31b030d2013-09-04 01:29:02 +0200126 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000127 int i;
128 int mmu_idx;
129
Alex Bennée8526e1f2016-03-15 14:30:24 +0000130 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
131
Blue Swirl0cac1b62012-04-09 16:50:52 +0000132 /* Check if we need to flush due to large pages. */
133 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
Alex Bennée8526e1f2016-03-15 14:30:24 +0000134 tlb_debug("forcing full flush ("
135 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
136 env->tlb_flush_addr, env->tlb_flush_mask);
137
Alex Bennéed10eb082016-11-14 14:17:28 +0000138 tlb_flush(cpu);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000139 return;
140 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000141
142 addr &= TARGET_PAGE_MASK;
143 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
145 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
146 }
147
Xin Tong88e89a52014-08-04 20:35:23 -0500148 /* check whether there are entries that need to be flushed in the vtlb */
149 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
150 int k;
151 for (k = 0; k < CPU_VTLB_SIZE; k++) {
152 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
153 }
154 }
155
Andreas Färber611d4f92013-09-01 17:52:07 +0200156 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000157}
158
Peter Maydelld7a74a92015-08-25 15:45:09 +0100159void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
160{
161 CPUArchState *env = cpu->env_ptr;
162 int i, k;
163 va_list argp;
164
165 va_start(argp, addr);
166
Alex Bennée8526e1f2016-03-15 14:30:24 +0000167 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
168
Peter Maydelld7a74a92015-08-25 15:45:09 +0100169 /* Check if we need to flush due to large pages. */
170 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
Alex Bennée8526e1f2016-03-15 14:30:24 +0000171 tlb_debug("forced full flush ("
172 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
173 env->tlb_flush_addr, env->tlb_flush_mask);
174
Peter Maydelld7a74a92015-08-25 15:45:09 +0100175 v_tlb_flush_by_mmuidx(cpu, argp);
176 va_end(argp);
177 return;
178 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100179
180 addr &= TARGET_PAGE_MASK;
181 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
182
183 for (;;) {
184 int mmu_idx = va_arg(argp, int);
185
186 if (mmu_idx < 0) {
187 break;
188 }
189
Alex Bennée8526e1f2016-03-15 14:30:24 +0000190 tlb_debug("idx %d\n", mmu_idx);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100191
192 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
193
194 /* check whether there are vltb entries that need to be flushed */
195 for (k = 0; k < CPU_VTLB_SIZE; k++) {
196 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
197 }
198 }
199 va_end(argp);
200
Peter Maydelld7a74a92015-08-25 15:45:09 +0100201 tb_flush_jmp_cache(cpu, addr);
202}
203
Blue Swirl0cac1b62012-04-09 16:50:52 +0000204/* update the TLBs so that writes to code in the virtual page 'addr'
205 can be detected */
206void tlb_protect_code(ram_addr_t ram_addr)
207{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000208 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
209 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000210}
211
212/* update the TLB so that writes in physical page 'phys_addr' are no longer
213 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200214void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000215{
Juan Quintela52159192013-10-08 12:44:04 +0200216 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000217}
218
219static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
220{
221 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
222}
223
224void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
225 uintptr_t length)
226{
227 uintptr_t addr;
228
229 if (tlb_is_dirty_ram(tlb_entry)) {
230 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
231 if ((addr - start) < length) {
232 tlb_entry->addr_write |= TLB_NOTDIRTY;
233 }
234 }
235}
236
Paolo Bonzini7443b432013-06-03 12:44:02 +0200237static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
238{
239 ram_addr_t ram_addr;
240
Paolo Bonzini07bdaa42016-03-25 12:55:08 +0100241 ram_addr = qemu_ram_addr_from_host(ptr);
242 if (ram_addr == RAM_ADDR_INVALID) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200243 fprintf(stderr, "Bad ram pointer %p\n", ptr);
244 abort();
245 }
246 return ram_addr;
247}
248
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700249void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000250{
251 CPUArchState *env;
252
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700253 int mmu_idx;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000254
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700255 env = cpu->env_ptr;
256 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
257 unsigned int i;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000258
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700259 for (i = 0; i < CPU_TLB_SIZE; i++) {
260 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
261 start1, length);
262 }
Xin Tong88e89a52014-08-04 20:35:23 -0500263
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700264 for (i = 0; i < CPU_VTLB_SIZE; i++) {
265 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
266 start1, length);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000267 }
268 }
269}
270
271static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
272{
273 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
274 tlb_entry->addr_write = vaddr;
275 }
276}
277
278/* update the TLB corresponding to virtual page vaddr
279 so that it is no longer dirty */
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -0700280void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000281{
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -0700282 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000283 int i;
284 int mmu_idx;
285
286 vaddr &= TARGET_PAGE_MASK;
287 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
288 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
289 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
290 }
Xin Tong88e89a52014-08-04 20:35:23 -0500291
292 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
293 int k;
294 for (k = 0; k < CPU_VTLB_SIZE; k++) {
295 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
296 }
297 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000298}
299
300/* Our TLB does not support large pages, so remember the area covered by
301 large pages and trigger a full TLB flush if these are invalidated. */
302static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
303 target_ulong size)
304{
305 target_ulong mask = ~(size - 1);
306
307 if (env->tlb_flush_addr == (target_ulong)-1) {
308 env->tlb_flush_addr = vaddr & mask;
309 env->tlb_flush_mask = mask;
310 return;
311 }
312 /* Extend the existing region to include the new page.
313 This is a compromise between unnecessary flushes and the cost
314 of maintaining a full variable size TLB. */
315 mask &= env->tlb_flush_mask;
316 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
317 mask <<= 1;
318 }
319 env->tlb_flush_addr &= mask;
320 env->tlb_flush_mask = mask;
321}
322
323/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100324 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
325 * supplied size is only used by tlb_flush_page.
326 *
327 * Called from TCG-generated code, which is under an RCU read-side
328 * critical section.
329 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100330void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
331 hwaddr paddr, MemTxAttrs attrs, int prot,
332 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000333{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200334 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000335 MemoryRegionSection *section;
336 unsigned int index;
337 target_ulong address;
338 target_ulong code_address;
339 uintptr_t addend;
340 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200341 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500342 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000343 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000344
345 assert(size >= TARGET_PAGE_SIZE);
346 if (size != TARGET_PAGE_SIZE) {
347 tlb_add_large_page(env, vaddr, size);
348 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349
350 sz = size;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000351 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200352 assert(sz >= TARGET_PAGE_SIZE);
353
Alex Bennée8526e1f2016-03-15 14:30:24 +0000354 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
355 " prot=%x idx=%d\n",
356 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000357
358 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200359 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
360 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000361 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200362 addend = 0;
363 } else {
364 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000366 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000367
368 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200369 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200370 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000371
372 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000373 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500374
375 /* do not discard the translation in te, evict it into a victim tlb */
376 env->tlb_v_table[mmu_idx][vidx] = *te;
377 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
378
379 /* refill the tlb */
Peter Maydelle469b222015-04-26 16:49:23 +0100380 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100381 env->iotlb[mmu_idx][index].attrs = attrs;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000382 te->addend = addend - vaddr;
383 if (prot & PAGE_READ) {
384 te->addr_read = address;
385 } else {
386 te->addr_read = -1;
387 }
388
389 if (prot & PAGE_EXEC) {
390 te->addr_code = code_address;
391 } else {
392 te->addr_code = -1;
393 }
394 if (prot & PAGE_WRITE) {
395 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000396 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000397 /* Write access calls the I/O callback. */
398 te->addr_write = address | TLB_MMIO;
399 } else if (memory_region_is_ram(section->mr)
Fam Zheng8e41fb62016-03-01 14:18:21 +0800400 && cpu_physical_memory_is_clean(
401 memory_region_get_ram_addr(section->mr) + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000402 te->addr_write = address | TLB_NOTDIRTY;
403 } else {
404 te->addr_write = address;
405 }
406 } else {
407 te->addr_write = -1;
408 }
409}
410
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100411/* Add a new TLB entry, but without specifying the memory
412 * transaction attributes to be used.
413 */
414void tlb_set_page(CPUState *cpu, target_ulong vaddr,
415 hwaddr paddr, int prot,
416 int mmu_idx, target_ulong size)
417{
418 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
419 prot, mmu_idx, size);
420}
421
Peter Maydelld7f30402016-06-20 18:07:05 +0100422static void report_bad_exec(CPUState *cpu, target_ulong addr)
423{
424 /* Accidentally executing outside RAM or ROM is quite common for
425 * several user-error situations, so report it in a way that
426 * makes it clear that this isn't a QEMU bug and provide suggestions
427 * about what a user could do to fix things.
428 */
429 error_report("Trying to execute code outside RAM or ROM at 0x"
430 TARGET_FMT_lx, addr);
431 error_printf("This usually means one of the following happened:\n\n"
432 "(1) You told QEMU to execute a kernel for the wrong machine "
433 "type, and it crashed on startup (eg trying to run a "
434 "raspberry pi kernel on a versatilepb QEMU machine)\n"
435 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
436 "and QEMU executed a ROM full of no-op instructions until "
437 "it fell off the end\n"
438 "(3) Your guest kernel has a bug and crashed by jumping "
439 "off into nowhere\n\n"
440 "This is almost always one of the first two, so check your "
441 "command line and that you are using the right type of kernel "
442 "for this machine.\n"
443 "If you think option (3) is likely then you can try debugging "
444 "your guest with the -d debug options; in particular "
445 "-d guest_errors will cause the log to include a dump of the "
446 "guest register state at this point.\n\n"
447 "Execution cannot continue; stopping here.\n\n");
448
449 /* Report also to the logs, with more detail including register dump */
450 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
451 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
452 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
453}
454
Blue Swirl0cac1b62012-04-09 16:50:52 +0000455/* NOTE: this function can trigger an exception */
456/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100457 * is actually a ram_addr_t (in system mode; the user mode emulation
458 * version of this function returns a guest virtual address).
459 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000460tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
461{
462 int mmu_idx, page_index, pd;
463 void *p;
464 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000465 CPUState *cpu = ENV_GET_CPU(env1);
Peter Maydella54c87b2016-01-21 14:15:05 +0000466 CPUIOTLBEntry *iotlbentry;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000467
468 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Benjamin Herrenschmidt97ed5cc2015-08-17 17:34:10 +1000469 mmu_idx = cpu_mmu_index(env1, true);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000470 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
471 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000472 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000473 }
Peter Maydella54c87b2016-01-21 14:15:05 +0000474 iotlbentry = &env1->iotlb[mmu_idx][page_index];
475 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
476 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000477 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200478 CPUClass *cc = CPU_GET_CLASS(cpu);
479
480 if (cc->do_unassigned_access) {
481 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
482 } else {
Peter Maydelld7f30402016-06-20 18:07:05 +0100483 report_bad_exec(cpu, addr);
484 exit(1);
Andreas Färberc658b942013-05-27 06:49:53 +0200485 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000486 }
487 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
488 return qemu_ram_addr_from_host_nofail(p);
489}
490
Richard Henderson82a45b92016-07-08 18:51:28 -0700491static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
492 target_ulong addr, uintptr_t retaddr, int size)
493{
494 CPUState *cpu = ENV_GET_CPU(env);
495 hwaddr physaddr = iotlbentry->addr;
496 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
497 uint64_t val;
498
499 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
500 cpu->mem_io_pc = retaddr;
501 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
502 cpu_io_recompile(cpu, retaddr);
503 }
504
505 cpu->mem_io_vaddr = addr;
506 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
507 return val;
508}
509
510static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
511 uint64_t val, target_ulong addr,
512 uintptr_t retaddr, int size)
513{
514 CPUState *cpu = ENV_GET_CPU(env);
515 hwaddr physaddr = iotlbentry->addr;
516 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
517
518 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
519 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
520 cpu_io_recompile(cpu, retaddr);
521 }
522
523 cpu->mem_io_vaddr = addr;
524 cpu->mem_io_pc = retaddr;
525 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
526}
527
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700528/* Return true if ADDR is present in the victim tlb, and has been copied
529 back to the main tlb. */
530static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
531 size_t elt_ofs, target_ulong page)
532{
533 size_t vidx;
534 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
535 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
536 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
537
538 if (cmp == page) {
539 /* Found entry in victim tlb, swap tlb and iotlb. */
540 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
541 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
542 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
543
544 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
545 tmpio = *io; *io = *vio; *vio = tmpio;
546 return true;
547 }
548 }
549 return false;
550}
551
552/* Macro to call the above, with local variables from the use context. */
Samuel Damasheka3902842016-07-06 14:26:52 -0400553#define VICTIM_TLB_HIT(TY, ADDR) \
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700554 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
Samuel Damasheka3902842016-07-06 14:26:52 -0400555 (ADDR) & TARGET_PAGE_MASK)
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700556
Richard Henderson3b08f0a2016-07-08 18:22:26 -0700557/* Probe for whether the specified guest write access is permitted.
558 * If it is not permitted then an exception will be taken in the same
559 * way as if this were a real write access (and we will not return).
560 * Otherwise the function will return, and there will be a valid
561 * entry in the TLB for this access.
562 */
563void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
564 uintptr_t retaddr)
565{
566 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
567 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
568
569 if ((addr & TARGET_PAGE_MASK)
570 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
571 /* TLB entry is for a different page */
572 if (!VICTIM_TLB_HIT(addr_write, addr)) {
573 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
574 }
575 }
576}
577
Richard Hendersonc482cb12016-06-28 11:37:27 -0700578/* Probe for a read-modify-write atomic operation. Do not allow unaligned
579 * operations, or io operations to proceed. Return the host address. */
580static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
581 TCGMemOpIdx oi, uintptr_t retaddr)
582{
583 size_t mmu_idx = get_mmuidx(oi);
584 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
585 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
586 target_ulong tlb_addr = tlbe->addr_write;
587 TCGMemOp mop = get_memop(oi);
588 int a_bits = get_alignment_bits(mop);
589 int s_bits = mop & MO_SIZE;
590
591 /* Adjust the given return address. */
592 retaddr -= GETPC_ADJ;
593
594 /* Enforce guest required alignment. */
595 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
596 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
597 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
598 mmu_idx, retaddr);
599 }
600
601 /* Enforce qemu required alignment. */
602 if (unlikely(addr & ((1 << s_bits) - 1))) {
603 /* We get here if guest alignment was not requested,
604 or was not enforced by cpu_unaligned_access above.
605 We might widen the access and emulate, but for now
606 mark an exception and exit the cpu loop. */
607 goto stop_the_world;
608 }
609
610 /* Check TLB entry and enforce page permissions. */
611 if ((addr & TARGET_PAGE_MASK)
612 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
613 if (!VICTIM_TLB_HIT(addr_write, addr)) {
614 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
615 }
616 tlb_addr = tlbe->addr_write;
617 }
618
619 /* Notice an IO access, or a notdirty page. */
620 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
621 /* There's really nothing that can be done to
622 support this apart from stop-the-world. */
623 goto stop_the_world;
624 }
625
626 /* Let the guest notice RMW on a write-only page. */
627 if (unlikely(tlbe->addr_read != tlb_addr)) {
628 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
629 /* Since we don't support reads and writes to different addresses,
630 and we do have the proper page loaded for write, this shouldn't
631 ever return. But just in case, handle via stop-the-world. */
632 goto stop_the_world;
633 }
634
635 return (void *)((uintptr_t)addr + tlbe->addend);
636
637 stop_the_world:
638 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
639}
640
Richard Hendersonc86c6e42016-07-08 19:02:33 -0700641#ifdef TARGET_WORDS_BIGENDIAN
642# define TGT_BE(X) (X)
643# define TGT_LE(X) BSWAP(X)
644#else
645# define TGT_BE(X) BSWAP(X)
646# define TGT_LE(X) (X)
647#endif
648
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100649#define MMUSUFFIX _mmu
650
Richard Hendersondea21982016-07-08 18:14:28 -0700651#define DATA_SIZE 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100652#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100653
Richard Hendersondea21982016-07-08 18:14:28 -0700654#define DATA_SIZE 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100655#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100656
Richard Hendersondea21982016-07-08 18:14:28 -0700657#define DATA_SIZE 4
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100658#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100659
Richard Hendersondea21982016-07-08 18:14:28 -0700660#define DATA_SIZE 8
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100661#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100662
Richard Hendersonc482cb12016-06-28 11:37:27 -0700663/* First set of helpers allows passing in of OI and RETADDR. This makes
664 them callable from other helpers. */
665
666#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
667#define ATOMIC_NAME(X) \
668 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
669#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
670
671#define DATA_SIZE 1
672#include "atomic_template.h"
673
674#define DATA_SIZE 2
675#include "atomic_template.h"
676
677#define DATA_SIZE 4
678#include "atomic_template.h"
679
Richard Hendersondf79b992016-09-02 12:23:57 -0700680#ifdef CONFIG_ATOMIC64
Richard Hendersonc482cb12016-06-28 11:37:27 -0700681#define DATA_SIZE 8
682#include "atomic_template.h"
Richard Hendersondf79b992016-09-02 12:23:57 -0700683#endif
Richard Hendersonc482cb12016-06-28 11:37:27 -0700684
Richard Henderson7ebee432016-06-29 21:10:59 -0700685#ifdef CONFIG_ATOMIC128
686#define DATA_SIZE 16
687#include "atomic_template.h"
688#endif
689
Richard Hendersonc482cb12016-06-28 11:37:27 -0700690/* Second set of helpers are directly callable from TCG as helpers. */
691
692#undef EXTRA_ARGS
693#undef ATOMIC_NAME
694#undef ATOMIC_MMU_LOOKUP
695#define EXTRA_ARGS , TCGMemOpIdx oi
696#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
697#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
698
699#define DATA_SIZE 1
700#include "atomic_template.h"
701
702#define DATA_SIZE 2
703#include "atomic_template.h"
704
705#define DATA_SIZE 4
706#include "atomic_template.h"
707
Richard Hendersondf79b992016-09-02 12:23:57 -0700708#ifdef CONFIG_ATOMIC64
Richard Hendersonc482cb12016-06-28 11:37:27 -0700709#define DATA_SIZE 8
710#include "atomic_template.h"
Richard Hendersondf79b992016-09-02 12:23:57 -0700711#endif
Richard Hendersonc482cb12016-06-28 11:37:27 -0700712
713/* Code access functions. */
714
715#undef MMUSUFFIX
Blue Swirl0cac1b62012-04-09 16:50:52 +0000716#define MMUSUFFIX _cmmu
Richard Henderson01ecaf42016-07-26 06:09:16 +0530717#undef GETPC
718#define GETPC() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000719#define SOFTMMU_CODE_ACCESS
720
Richard Hendersondea21982016-07-08 18:14:28 -0700721#define DATA_SIZE 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100722#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000723
Richard Hendersondea21982016-07-08 18:14:28 -0700724#define DATA_SIZE 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100725#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000726
Richard Hendersondea21982016-07-08 18:14:28 -0700727#define DATA_SIZE 4
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100728#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000729
Richard Hendersondea21982016-07-08 18:14:28 -0700730#define DATA_SIZE 8
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100731#include "softmmu_template.h"