blob: c50254be26c491cd173eb02f72cb76fca4857e47 [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
Jan Kiszka8d04fb52017-02-23 18:29:11 +000021#include "qemu/main-loop.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000022#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010023#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010026#include "exec/cpu_ldst.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/cputlb.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010028#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020029#include "exec/ram_addr.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +010030#include "tcg/tcg.h"
Peter Maydelld7f30402016-06-20 18:07:05 +010031#include "qemu/error-report.h"
32#include "exec/log.h"
Richard Hendersonc482cb12016-06-28 11:37:27 -070033#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000035
Alex Bennée8526e1f2016-03-15 14:30:24 +000036/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37/* #define DEBUG_TLB */
38/* #define DEBUG_TLB_LOG */
39
40#ifdef DEBUG_TLB
41# define DEBUG_TLB_GATE 1
42# ifdef DEBUG_TLB_LOG
43# define DEBUG_TLB_LOG_GATE 1
44# else
45# define DEBUG_TLB_LOG_GATE 0
46# endif
47#else
48# define DEBUG_TLB_GATE 0
49# define DEBUG_TLB_LOG_GATE 0
50#endif
51
52#define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59} while (0)
Blue Swirl0cac1b62012-04-09 16:50:52 +000060
Alex Bennéef0aff0f2017-02-23 18:29:16 +000061#define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
66
KONRAD Frederice3b9ca82017-02-23 18:29:18 +000067/* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70
Alex Bennéee7218442017-02-23 18:29:20 +000071/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72 */
73QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75
Blue Swirl0cac1b62012-04-09 16:50:52 +000076/* statistics */
77int tlb_flush_count;
78
Alex Bennéed10eb082016-11-14 14:17:28 +000079/* This is OK because CPU architectures generally permit an
80 * implementation to drop entries from the TLB at any time, so
81 * flushing more entries than required is only an efficiency issue,
82 * not a correctness issue.
Blue Swirl0cac1b62012-04-09 16:50:52 +000083 */
KONRAD Frederice3b9ca82017-02-23 18:29:18 +000084static void tlb_flush_nocheck(CPUState *cpu)
Blue Swirl0cac1b62012-04-09 16:50:52 +000085{
Andreas Färber00c8cb02013-09-04 02:19:44 +020086 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000087
KONRAD Frederice3b9ca82017-02-23 18:29:18 +000088 /* The QOM tests will trigger tlb_flushes without setting up TCG
89 * so we bug out here in that case.
90 */
91 if (!tcg_enabled()) {
92 return;
93 }
94
Alex Bennéef0aff0f2017-02-23 18:29:16 +000095 assert_cpu_is_self(cpu);
96 tlb_debug("(count: %d)\n", tlb_flush_count++);
97
KONRAD Frederice3b9ca82017-02-23 18:29:18 +000098 tb_lock();
99
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300100 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -0500101 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +0200102 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +0000103
Xin Tong88e89a52014-08-04 20:35:23 -0500104 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000105 env->tlb_flush_addr = -1;
106 env->tlb_flush_mask = 0;
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000107
108 tb_unlock();
109
Alex Bennéee7218442017-02-23 18:29:20 +0000110 atomic_mb_set(&cpu->pending_tlb_flush, 0);
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000111}
112
113static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
114{
115 tlb_flush_nocheck(cpu);
116}
117
118void tlb_flush(CPUState *cpu)
119{
120 if (cpu->created && !qemu_cpu_is_self(cpu)) {
Alex Bennéee7218442017-02-23 18:29:20 +0000121 if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
122 atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000123 async_run_on_cpu(cpu, tlb_flush_global_async_work,
124 RUN_ON_CPU_NULL);
125 }
126 } else {
127 tlb_flush_nocheck(cpu);
128 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000129}
130
Alex Bennéee7218442017-02-23 18:29:20 +0000131static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100132{
133 CPUArchState *env = cpu->env_ptr;
Alex Bennéee7218442017-02-23 18:29:20 +0000134 unsigned long mmu_idx_bitmask = data.host_int;
Alex Bennée0336cbf2017-02-23 18:29:19 +0000135 int mmu_idx;
Peter Maydelld7a74a92015-08-25 15:45:09 +0100136
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000137 assert_cpu_is_self(cpu);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100138
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000139 tb_lock();
140
Alex Bennéee7218442017-02-23 18:29:20 +0000141 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
142
Alex Bennée0336cbf2017-02-23 18:29:19 +0000143 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Peter Maydelld7a74a92015-08-25 15:45:09 +0100144
Alex Bennée0336cbf2017-02-23 18:29:19 +0000145 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
146 tlb_debug("%d\n", mmu_idx);
147
148 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
149 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
Peter Maydelld7a74a92015-08-25 15:45:09 +0100150 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100151 }
152
Peter Maydelld7a74a92015-08-25 15:45:09 +0100153 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000154
Alex Bennéee7218442017-02-23 18:29:20 +0000155 tlb_debug("done\n");
156
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000157 tb_unlock();
Peter Maydelld7a74a92015-08-25 15:45:09 +0100158}
159
Alex Bennée0336cbf2017-02-23 18:29:19 +0000160void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100161{
Alex Bennéee7218442017-02-23 18:29:20 +0000162 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
163
164 if (!qemu_cpu_is_self(cpu)) {
165 uint16_t pending_flushes = idxmap;
166 pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
167
168 if (pending_flushes) {
169 tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
170
171 atomic_or(&cpu->pending_tlb_flush, pending_flushes);
172 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
173 RUN_ON_CPU_HOST_INT(pending_flushes));
174 }
175 } else {
176 tlb_flush_by_mmuidx_async_work(cpu,
177 RUN_ON_CPU_HOST_INT(idxmap));
178 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100179}
180
Blue Swirl0cac1b62012-04-09 16:50:52 +0000181static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
182{
183 if (addr == (tlb_entry->addr_read &
184 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
185 addr == (tlb_entry->addr_write &
186 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
187 addr == (tlb_entry->addr_code &
188 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300189 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +0000190 }
191}
192
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000193static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000194{
Andreas Färber31b030d2013-09-04 01:29:02 +0200195 CPUArchState *env = cpu->env_ptr;
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000196 target_ulong addr = (target_ulong) data.target_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000197 int i;
198 int mmu_idx;
199
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000200 assert_cpu_is_self(cpu);
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000201
Alex Bennée8526e1f2016-03-15 14:30:24 +0000202 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
203
Blue Swirl0cac1b62012-04-09 16:50:52 +0000204 /* Check if we need to flush due to large pages. */
205 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
Alex Bennée8526e1f2016-03-15 14:30:24 +0000206 tlb_debug("forcing full flush ("
207 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
208 env->tlb_flush_addr, env->tlb_flush_mask);
209
Alex Bennéed10eb082016-11-14 14:17:28 +0000210 tlb_flush(cpu);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000211 return;
212 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000213
214 addr &= TARGET_PAGE_MASK;
215 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
216 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
217 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
218 }
219
Xin Tong88e89a52014-08-04 20:35:23 -0500220 /* check whether there are entries that need to be flushed in the vtlb */
221 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
222 int k;
223 for (k = 0; k < CPU_VTLB_SIZE; k++) {
224 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
225 }
226 }
227
Andreas Färber611d4f92013-09-01 17:52:07 +0200228 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000229}
230
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000231void tlb_flush_page(CPUState *cpu, target_ulong addr)
232{
233 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
234
235 if (!qemu_cpu_is_self(cpu)) {
236 async_run_on_cpu(cpu, tlb_flush_page_async_work,
237 RUN_ON_CPU_TARGET_PTR(addr));
238 } else {
239 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
240 }
241}
242
Alex Bennéee7218442017-02-23 18:29:20 +0000243/* As we are going to hijack the bottom bits of the page address for a
244 * mmuidx bit mask we need to fail to build if we can't do that
245 */
246QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
247
248static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
249 run_on_cpu_data data)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100250{
251 CPUArchState *env = cpu->env_ptr;
Alex Bennéee7218442017-02-23 18:29:20 +0000252 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
253 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
254 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
255 int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
256 int mmu_idx;
257 int i;
Peter Maydelld7a74a92015-08-25 15:45:09 +0100258
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000259 assert_cpu_is_self(cpu);
Alex Bennée8526e1f2016-03-15 14:30:24 +0000260
Alex Bennéee7218442017-02-23 18:29:20 +0000261 tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
262 page, addr, mmu_idx_bitmap);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100263
Alex Bennée0336cbf2017-02-23 18:29:19 +0000264 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
265 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
266 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100267
Alex Bennée0336cbf2017-02-23 18:29:19 +0000268 /* check whether there are vltb entries that need to be flushed */
269 for (i = 0; i < CPU_VTLB_SIZE; i++) {
270 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
271 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100272 }
273 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100274
Peter Maydelld7a74a92015-08-25 15:45:09 +0100275 tb_flush_jmp_cache(cpu, addr);
276}
277
Alex Bennéee7218442017-02-23 18:29:20 +0000278static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
279 run_on_cpu_data data)
280{
281 CPUArchState *env = cpu->env_ptr;
282 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
283 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
284 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
285
286 tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
287
288 /* Check if we need to flush due to large pages. */
289 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
290 tlb_debug("forced full flush ("
291 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
292 env->tlb_flush_addr, env->tlb_flush_mask);
293
294 tlb_flush_by_mmuidx_async_work(cpu,
295 RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
296 } else {
297 tlb_flush_page_by_mmuidx_async_work(cpu, data);
298 }
299}
300
301void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
302{
303 target_ulong addr_and_mmu_idx;
304
305 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
306
307 /* This should already be page aligned */
308 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
309 addr_and_mmu_idx |= idxmap;
310
311 if (!qemu_cpu_is_self(cpu)) {
312 async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
313 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
314 } else {
315 tlb_check_page_and_flush_by_mmuidx_async_work(
316 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
317 }
318}
319
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000320void tlb_flush_page_all(target_ulong addr)
321{
322 CPUState *cpu;
323
324 CPU_FOREACH(cpu) {
325 async_run_on_cpu(cpu, tlb_flush_page_async_work,
326 RUN_ON_CPU_TARGET_PTR(addr));
327 }
328}
329
Blue Swirl0cac1b62012-04-09 16:50:52 +0000330/* update the TLBs so that writes to code in the virtual page 'addr'
331 can be detected */
332void tlb_protect_code(ram_addr_t ram_addr)
333{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000334 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
335 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000336}
337
338/* update the TLB so that writes in physical page 'phys_addr' are no longer
339 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200340void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000341{
Juan Quintela52159192013-10-08 12:44:04 +0200342 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000343}
344
345static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
346{
347 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
348}
349
350void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
351 uintptr_t length)
352{
353 uintptr_t addr;
354
355 if (tlb_is_dirty_ram(tlb_entry)) {
356 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
357 if ((addr - start) < length) {
358 tlb_entry->addr_write |= TLB_NOTDIRTY;
359 }
360 }
361}
362
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700363void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000364{
365 CPUArchState *env;
366
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700367 int mmu_idx;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000368
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000369 assert_cpu_is_self(cpu);
370
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700371 env = cpu->env_ptr;
372 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
373 unsigned int i;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000374
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700375 for (i = 0; i < CPU_TLB_SIZE; i++) {
376 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
377 start1, length);
378 }
Xin Tong88e89a52014-08-04 20:35:23 -0500379
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700380 for (i = 0; i < CPU_VTLB_SIZE; i++) {
381 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
382 start1, length);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000383 }
384 }
385}
386
387static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
388{
389 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
390 tlb_entry->addr_write = vaddr;
391 }
392}
393
394/* update the TLB corresponding to virtual page vaddr
395 so that it is no longer dirty */
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -0700396void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000397{
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -0700398 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000399 int i;
400 int mmu_idx;
401
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000402 assert_cpu_is_self(cpu);
403
Blue Swirl0cac1b62012-04-09 16:50:52 +0000404 vaddr &= TARGET_PAGE_MASK;
405 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
406 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
407 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
408 }
Xin Tong88e89a52014-08-04 20:35:23 -0500409
410 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
411 int k;
412 for (k = 0; k < CPU_VTLB_SIZE; k++) {
413 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
414 }
415 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000416}
417
418/* Our TLB does not support large pages, so remember the area covered by
419 large pages and trigger a full TLB flush if these are invalidated. */
420static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
421 target_ulong size)
422{
423 target_ulong mask = ~(size - 1);
424
425 if (env->tlb_flush_addr == (target_ulong)-1) {
426 env->tlb_flush_addr = vaddr & mask;
427 env->tlb_flush_mask = mask;
428 return;
429 }
430 /* Extend the existing region to include the new page.
431 This is a compromise between unnecessary flushes and the cost
432 of maintaining a full variable size TLB. */
433 mask &= env->tlb_flush_mask;
434 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
435 mask <<= 1;
436 }
437 env->tlb_flush_addr &= mask;
438 env->tlb_flush_mask = mask;
439}
440
441/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100442 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
443 * supplied size is only used by tlb_flush_page.
444 *
445 * Called from TCG-generated code, which is under an RCU read-side
446 * critical section.
447 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100448void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
449 hwaddr paddr, MemTxAttrs attrs, int prot,
450 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000451{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200452 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000453 MemoryRegionSection *section;
454 unsigned int index;
455 target_ulong address;
456 target_ulong code_address;
457 uintptr_t addend;
458 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200459 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500460 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000461 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000462
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000463 assert_cpu_is_self(cpu);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000464 assert(size >= TARGET_PAGE_SIZE);
465 if (size != TARGET_PAGE_SIZE) {
466 tlb_add_large_page(env, vaddr, size);
467 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200468
469 sz = size;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000470 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200471 assert(sz >= TARGET_PAGE_SIZE);
472
Alex Bennée8526e1f2016-03-15 14:30:24 +0000473 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
474 " prot=%x idx=%d\n",
475 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000476
477 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200478 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
479 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000480 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200481 addend = 0;
482 } else {
483 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200484 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000485 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000486
487 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200488 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200489 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000490
491 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000492 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500493
494 /* do not discard the translation in te, evict it into a victim tlb */
495 env->tlb_v_table[mmu_idx][vidx] = *te;
496 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
497
498 /* refill the tlb */
Peter Maydelle469b222015-04-26 16:49:23 +0100499 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100500 env->iotlb[mmu_idx][index].attrs = attrs;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000501 te->addend = addend - vaddr;
502 if (prot & PAGE_READ) {
503 te->addr_read = address;
504 } else {
505 te->addr_read = -1;
506 }
507
508 if (prot & PAGE_EXEC) {
509 te->addr_code = code_address;
510 } else {
511 te->addr_code = -1;
512 }
513 if (prot & PAGE_WRITE) {
514 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000515 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000516 /* Write access calls the I/O callback. */
517 te->addr_write = address | TLB_MMIO;
518 } else if (memory_region_is_ram(section->mr)
Fam Zheng8e41fb62016-03-01 14:18:21 +0800519 && cpu_physical_memory_is_clean(
520 memory_region_get_ram_addr(section->mr) + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000521 te->addr_write = address | TLB_NOTDIRTY;
522 } else {
523 te->addr_write = address;
524 }
525 } else {
526 te->addr_write = -1;
527 }
528}
529
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100530/* Add a new TLB entry, but without specifying the memory
531 * transaction attributes to be used.
532 */
533void tlb_set_page(CPUState *cpu, target_ulong vaddr,
534 hwaddr paddr, int prot,
535 int mmu_idx, target_ulong size)
536{
537 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
538 prot, mmu_idx, size);
539}
540
Peter Maydelld7f30402016-06-20 18:07:05 +0100541static void report_bad_exec(CPUState *cpu, target_ulong addr)
542{
543 /* Accidentally executing outside RAM or ROM is quite common for
544 * several user-error situations, so report it in a way that
545 * makes it clear that this isn't a QEMU bug and provide suggestions
546 * about what a user could do to fix things.
547 */
548 error_report("Trying to execute code outside RAM or ROM at 0x"
549 TARGET_FMT_lx, addr);
550 error_printf("This usually means one of the following happened:\n\n"
551 "(1) You told QEMU to execute a kernel for the wrong machine "
552 "type, and it crashed on startup (eg trying to run a "
553 "raspberry pi kernel on a versatilepb QEMU machine)\n"
554 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
555 "and QEMU executed a ROM full of no-op instructions until "
556 "it fell off the end\n"
557 "(3) Your guest kernel has a bug and crashed by jumping "
558 "off into nowhere\n\n"
559 "This is almost always one of the first two, so check your "
560 "command line and that you are using the right type of kernel "
561 "for this machine.\n"
562 "If you think option (3) is likely then you can try debugging "
563 "your guest with the -d debug options; in particular "
564 "-d guest_errors will cause the log to include a dump of the "
565 "guest register state at this point.\n\n"
566 "Execution cannot continue; stopping here.\n\n");
567
568 /* Report also to the logs, with more detail including register dump */
569 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
570 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
571 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
572}
573
Alex Bennée857baec2017-02-23 18:29:17 +0000574static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
575{
576 ram_addr_t ram_addr;
577
578 ram_addr = qemu_ram_addr_from_host(ptr);
579 if (ram_addr == RAM_ADDR_INVALID) {
580 error_report("Bad ram pointer %p", ptr);
581 abort();
582 }
583 return ram_addr;
584}
585
Blue Swirl0cac1b62012-04-09 16:50:52 +0000586/* NOTE: this function can trigger an exception */
587/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100588 * is actually a ram_addr_t (in system mode; the user mode emulation
589 * version of this function returns a guest virtual address).
590 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000591tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
592{
593 int mmu_idx, page_index, pd;
594 void *p;
595 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000596 CPUState *cpu = ENV_GET_CPU(env1);
Peter Maydella54c87b2016-01-21 14:15:05 +0000597 CPUIOTLBEntry *iotlbentry;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000598
599 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Benjamin Herrenschmidt97ed5cc2015-08-17 17:34:10 +1000600 mmu_idx = cpu_mmu_index(env1, true);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000601 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
602 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000603 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000604 }
Peter Maydella54c87b2016-01-21 14:15:05 +0000605 iotlbentry = &env1->iotlb[mmu_idx][page_index];
606 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
607 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000608 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200609 CPUClass *cc = CPU_GET_CLASS(cpu);
610
611 if (cc->do_unassigned_access) {
612 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
613 } else {
Peter Maydelld7f30402016-06-20 18:07:05 +0100614 report_bad_exec(cpu, addr);
615 exit(1);
Andreas Färberc658b942013-05-27 06:49:53 +0200616 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000617 }
618 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
619 return qemu_ram_addr_from_host_nofail(p);
620}
621
Richard Henderson82a45b92016-07-08 18:51:28 -0700622static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
623 target_ulong addr, uintptr_t retaddr, int size)
624{
625 CPUState *cpu = ENV_GET_CPU(env);
626 hwaddr physaddr = iotlbentry->addr;
627 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
628 uint64_t val;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000629 bool locked = false;
Richard Henderson82a45b92016-07-08 18:51:28 -0700630
631 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
632 cpu->mem_io_pc = retaddr;
633 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
634 cpu_io_recompile(cpu, retaddr);
635 }
636
637 cpu->mem_io_vaddr = addr;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000638
639 if (mr->global_locking) {
640 qemu_mutex_lock_iothread();
641 locked = true;
642 }
Richard Henderson82a45b92016-07-08 18:51:28 -0700643 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000644 if (locked) {
645 qemu_mutex_unlock_iothread();
646 }
647
Richard Henderson82a45b92016-07-08 18:51:28 -0700648 return val;
649}
650
651static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
652 uint64_t val, target_ulong addr,
653 uintptr_t retaddr, int size)
654{
655 CPUState *cpu = ENV_GET_CPU(env);
656 hwaddr physaddr = iotlbentry->addr;
657 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000658 bool locked = false;
Richard Henderson82a45b92016-07-08 18:51:28 -0700659
660 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
661 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
662 cpu_io_recompile(cpu, retaddr);
663 }
Richard Henderson82a45b92016-07-08 18:51:28 -0700664 cpu->mem_io_vaddr = addr;
665 cpu->mem_io_pc = retaddr;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000666
667 if (mr->global_locking) {
668 qemu_mutex_lock_iothread();
669 locked = true;
670 }
Richard Henderson82a45b92016-07-08 18:51:28 -0700671 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000672 if (locked) {
673 qemu_mutex_unlock_iothread();
674 }
Richard Henderson82a45b92016-07-08 18:51:28 -0700675}
676
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700677/* Return true if ADDR is present in the victim tlb, and has been copied
678 back to the main tlb. */
679static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
680 size_t elt_ofs, target_ulong page)
681{
682 size_t vidx;
683 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
684 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
685 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
686
687 if (cmp == page) {
688 /* Found entry in victim tlb, swap tlb and iotlb. */
689 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
690 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
691 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
692
693 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
694 tmpio = *io; *io = *vio; *vio = tmpio;
695 return true;
696 }
697 }
698 return false;
699}
700
701/* Macro to call the above, with local variables from the use context. */
Samuel Damasheka3902842016-07-06 14:26:52 -0400702#define VICTIM_TLB_HIT(TY, ADDR) \
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700703 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
Samuel Damasheka3902842016-07-06 14:26:52 -0400704 (ADDR) & TARGET_PAGE_MASK)
Richard Henderson7e9a7c52016-07-08 12:19:32 -0700705
Richard Henderson3b08f0a2016-07-08 18:22:26 -0700706/* Probe for whether the specified guest write access is permitted.
707 * If it is not permitted then an exception will be taken in the same
708 * way as if this were a real write access (and we will not return).
709 * Otherwise the function will return, and there will be a valid
710 * entry in the TLB for this access.
711 */
712void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
713 uintptr_t retaddr)
714{
715 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
716 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
717
718 if ((addr & TARGET_PAGE_MASK)
719 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
720 /* TLB entry is for a different page */
721 if (!VICTIM_TLB_HIT(addr_write, addr)) {
722 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
723 }
724 }
725}
726
Richard Hendersonc482cb12016-06-28 11:37:27 -0700727/* Probe for a read-modify-write atomic operation. Do not allow unaligned
728 * operations, or io operations to proceed. Return the host address. */
729static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
730 TCGMemOpIdx oi, uintptr_t retaddr)
731{
732 size_t mmu_idx = get_mmuidx(oi);
733 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
734 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
735 target_ulong tlb_addr = tlbe->addr_write;
736 TCGMemOp mop = get_memop(oi);
737 int a_bits = get_alignment_bits(mop);
738 int s_bits = mop & MO_SIZE;
739
740 /* Adjust the given return address. */
741 retaddr -= GETPC_ADJ;
742
743 /* Enforce guest required alignment. */
744 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
745 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
746 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
747 mmu_idx, retaddr);
748 }
749
750 /* Enforce qemu required alignment. */
751 if (unlikely(addr & ((1 << s_bits) - 1))) {
752 /* We get here if guest alignment was not requested,
753 or was not enforced by cpu_unaligned_access above.
754 We might widen the access and emulate, but for now
755 mark an exception and exit the cpu loop. */
756 goto stop_the_world;
757 }
758
759 /* Check TLB entry and enforce page permissions. */
760 if ((addr & TARGET_PAGE_MASK)
761 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
762 if (!VICTIM_TLB_HIT(addr_write, addr)) {
763 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
764 }
765 tlb_addr = tlbe->addr_write;
766 }
767
768 /* Notice an IO access, or a notdirty page. */
769 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
770 /* There's really nothing that can be done to
771 support this apart from stop-the-world. */
772 goto stop_the_world;
773 }
774
775 /* Let the guest notice RMW on a write-only page. */
776 if (unlikely(tlbe->addr_read != tlb_addr)) {
777 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
778 /* Since we don't support reads and writes to different addresses,
779 and we do have the proper page loaded for write, this shouldn't
780 ever return. But just in case, handle via stop-the-world. */
781 goto stop_the_world;
782 }
783
784 return (void *)((uintptr_t)addr + tlbe->addend);
785
786 stop_the_world:
787 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
788}
789
Richard Hendersonc86c6e42016-07-08 19:02:33 -0700790#ifdef TARGET_WORDS_BIGENDIAN
791# define TGT_BE(X) (X)
792# define TGT_LE(X) BSWAP(X)
793#else
794# define TGT_BE(X) BSWAP(X)
795# define TGT_LE(X) (X)
796#endif
797
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100798#define MMUSUFFIX _mmu
799
Richard Hendersondea21982016-07-08 18:14:28 -0700800#define DATA_SIZE 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100801#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100802
Richard Hendersondea21982016-07-08 18:14:28 -0700803#define DATA_SIZE 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100804#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100805
Richard Hendersondea21982016-07-08 18:14:28 -0700806#define DATA_SIZE 4
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100807#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100808
Richard Hendersondea21982016-07-08 18:14:28 -0700809#define DATA_SIZE 8
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100810#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100811
Richard Hendersonc482cb12016-06-28 11:37:27 -0700812/* First set of helpers allows passing in of OI and RETADDR. This makes
813 them callable from other helpers. */
814
815#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
816#define ATOMIC_NAME(X) \
817 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
818#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
819
820#define DATA_SIZE 1
821#include "atomic_template.h"
822
823#define DATA_SIZE 2
824#include "atomic_template.h"
825
826#define DATA_SIZE 4
827#include "atomic_template.h"
828
Richard Hendersondf79b992016-09-02 12:23:57 -0700829#ifdef CONFIG_ATOMIC64
Richard Hendersonc482cb12016-06-28 11:37:27 -0700830#define DATA_SIZE 8
831#include "atomic_template.h"
Richard Hendersondf79b992016-09-02 12:23:57 -0700832#endif
Richard Hendersonc482cb12016-06-28 11:37:27 -0700833
Richard Henderson7ebee432016-06-29 21:10:59 -0700834#ifdef CONFIG_ATOMIC128
835#define DATA_SIZE 16
836#include "atomic_template.h"
837#endif
838
Richard Hendersonc482cb12016-06-28 11:37:27 -0700839/* Second set of helpers are directly callable from TCG as helpers. */
840
841#undef EXTRA_ARGS
842#undef ATOMIC_NAME
843#undef ATOMIC_MMU_LOOKUP
844#define EXTRA_ARGS , TCGMemOpIdx oi
845#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
846#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
847
848#define DATA_SIZE 1
849#include "atomic_template.h"
850
851#define DATA_SIZE 2
852#include "atomic_template.h"
853
854#define DATA_SIZE 4
855#include "atomic_template.h"
856
Richard Hendersondf79b992016-09-02 12:23:57 -0700857#ifdef CONFIG_ATOMIC64
Richard Hendersonc482cb12016-06-28 11:37:27 -0700858#define DATA_SIZE 8
859#include "atomic_template.h"
Richard Hendersondf79b992016-09-02 12:23:57 -0700860#endif
Richard Hendersonc482cb12016-06-28 11:37:27 -0700861
862/* Code access functions. */
863
864#undef MMUSUFFIX
Blue Swirl0cac1b62012-04-09 16:50:52 +0000865#define MMUSUFFIX _cmmu
Richard Henderson01ecaf42016-07-26 06:09:16 +0530866#undef GETPC
867#define GETPC() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000868#define SOFTMMU_CODE_ACCESS
869
Richard Hendersondea21982016-07-08 18:14:28 -0700870#define DATA_SIZE 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100871#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000872
Richard Hendersondea21982016-07-08 18:14:28 -0700873#define DATA_SIZE 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100874#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000875
Richard Hendersondea21982016-07-08 18:14:28 -0700876#define DATA_SIZE 4
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100877#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000878
Richard Hendersondea21982016-07-08 18:14:28 -0700879#define DATA_SIZE 8
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100880#include "softmmu_template.h"