blob: 427379740a6b9c7dade11198650939bc02150f41 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000041#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000042#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020044#include <signal.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010045#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46#include <sys/param.h>
47#if __FreeBSD_version >= 700104
48#define HAVE_KINFO_GETVMMAP
49#define sigqueue sigqueue_freebsd /* avoid redefinition */
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <machine/profile.h>
53#define _KERNEL
54#include <sys/user.h>
55#undef _KERNEL
56#undef sigqueue
57#include <libutil.h>
58#endif
59#endif
pbrook53a59602006-03-25 19:31:22 +000060#endif
bellard54936002003-05-13 00:25:15 +000061
bellardfd6ce8f2003-05-14 19:00:11 +000062//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000063//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000064//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000065//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000066
67/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000068//#define DEBUG_TB_CHECK
69//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000070
ths1196be32007-03-17 15:17:58 +000071//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000072//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000073
pbrook99773bd2006-04-16 15:14:59 +000074#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
bellard9fa3e852004-01-04 18:06:42 +000079#define SMC_BITMAP_USE_THRESHOLD 10
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000082int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
blueswir1141ac462008-07-26 15:05:57 +000088#if defined(__arm__) || defined(__sparc_v9__)
89/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000091 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020095#elif defined(_WIN32)
96/* Maximum alignment for Win32 is 16. */
97#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000109uint8_t *code_gen_ptr;
110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000113uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
116typedef struct RAMBlock {
117 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500118 ram_addr_t offset;
119 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000120 struct RAMBlock *next;
121} RAMBlock;
122
123static RAMBlock *ram_blocks;
124/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100125 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000126 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500127ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000128#endif
bellard9fa3e852004-01-04 18:06:42 +0000129
bellard6a00d602005-11-21 23:25:50 +0000130CPUState *first_cpu;
131/* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000133CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000134/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000135 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000136 2 = Adaptive rate instruction counting. */
137int use_icount = 0;
138/* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000141
bellard54936002003-05-13 00:25:15 +0000142typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000143 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000144 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149#if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151#endif
bellard54936002003-05-13 00:25:15 +0000152} PageDesc;
153
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155 while in user mode we want it to be based on virtual addresses. */
156#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000157#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
158# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
159#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000161#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000162#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000164#endif
bellard54936002003-05-13 00:25:15 +0000165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* Size of the L2 (and L3, etc) page tables. */
167#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000168#define L2_SIZE (1 << L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170/* The bits remaining after N lower levels of page tables. */
171#define P_L1_BITS_REM \
172 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
173#define V_L1_BITS_REM \
174 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
175
176/* Size of the L1 page table. Avoid silly small sizes. */
177#if P_L1_BITS_REM < 4
178#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
179#else
180#define P_L1_BITS P_L1_BITS_REM
181#endif
182
183#if V_L1_BITS_REM < 4
184#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
185#else
186#define V_L1_BITS V_L1_BITS_REM
187#endif
188
189#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
190#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
191
192#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
193#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
194
bellard83fb7ad2004-07-05 21:25:26 +0000195unsigned long qemu_real_host_page_size;
196unsigned long qemu_host_page_bits;
197unsigned long qemu_host_page_size;
198unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the virtual address space.
201 The bottom level has pointers to PageDesc. */
202static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000205typedef struct PhysPageDesc {
206 /* offset in host memory of the page + io_index in the low bits */
207 ram_addr_t phys_offset;
208 ram_addr_t region_offset;
209} PhysPageDesc;
210
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800211/* This is a multi-level map on the physical address space.
212 The bottom level has pointers to PhysPageDesc. */
213static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000214
pbrooke2eef172008-06-08 01:09:01 +0000215static void io_mem_init(void);
216
bellard33417e72003-08-10 21:47:01 +0000217/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000221static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000222static int io_mem_watch;
223#endif
bellard33417e72003-08-10 21:47:01 +0000224
bellard34865132003-10-05 14:28:56 +0000225/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#ifdef WIN32
227static const char *logfilename = "qemu.log";
228#else
blueswir1d9b630f2008-10-05 09:57:08 +0000229static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200230#endif
bellard34865132003-10-05 14:28:56 +0000231FILE *logfile;
232int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000233static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000234
bellarde3db7222005-01-26 22:00:47 +0000235/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000236#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000237static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000238#endif
bellarde3db7222005-01-26 22:00:47 +0000239static int tb_flush_count;
240static int tb_phys_invalidate_count;
241
bellard7cb69ca2008-05-10 10:55:51 +0000242#ifdef _WIN32
243static void map_exec(void *addr, long size)
244{
245 DWORD old_protect;
246 VirtualProtect(addr, size,
247 PAGE_EXECUTE_READWRITE, &old_protect);
248
249}
250#else
251static void map_exec(void *addr, long size)
252{
bellard43694152008-05-29 09:35:57 +0000253 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000254
bellard43694152008-05-29 09:35:57 +0000255 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000256 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000257 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000260 end += page_size - 1;
261 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000262
263 mprotect((void *)start, end - start,
264 PROT_READ | PROT_WRITE | PROT_EXEC);
265}
266#endif
267
bellardb346ff42003-06-15 20:05:50 +0000268static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000269{
bellard83fb7ad2004-07-05 21:25:26 +0000270 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000271 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000272#ifdef _WIN32
273 {
274 SYSTEM_INFO system_info;
275
276 GetSystemInfo(&system_info);
277 qemu_real_host_page_size = system_info.dwPageSize;
278 }
279#else
280 qemu_real_host_page_size = getpagesize();
281#endif
bellard83fb7ad2004-07-05 21:25:26 +0000282 if (qemu_host_page_size == 0)
283 qemu_host_page_size = qemu_real_host_page_size;
284 if (qemu_host_page_size < TARGET_PAGE_SIZE)
285 qemu_host_page_size = TARGET_PAGE_SIZE;
286 qemu_host_page_bits = 0;
287 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
288 qemu_host_page_bits++;
289 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000290
Paul Brook2e9a5712010-05-05 16:32:59 +0100291#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000292 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#ifdef HAVE_KINFO_GETVMMAP
294 struct kinfo_vmentry *freep;
295 int i, cnt;
296
297 freep = kinfo_getvmmap(getpid(), &cnt);
298 if (freep) {
299 mmap_lock();
300 for (i = 0; i < cnt; i++) {
301 unsigned long startaddr, endaddr;
302
303 startaddr = freep[i].kve_start;
304 endaddr = freep[i].kve_end;
305 if (h2g_valid(startaddr)) {
306 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307
308 if (h2g_valid(endaddr)) {
309 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200310 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100311 } else {
312#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
313 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200314 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100315#endif
316 }
317 }
318 }
319 free(freep);
320 mmap_unlock();
321 }
322#else
balrog50a95692007-12-12 01:16:23 +0000323 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000324
pbrook07765902008-05-31 16:33:53 +0000325 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
Aurelien Jarnofd436902010-04-10 17:20:36 +0200327 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000328 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800329 mmap_lock();
330
balrog50a95692007-12-12 01:16:23 +0000331 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332 unsigned long startaddr, endaddr;
333 int n;
334
335 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
336
337 if (n == 2 && h2g_valid(startaddr)) {
338 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
339
340 if (h2g_valid(endaddr)) {
341 endaddr = h2g(endaddr);
342 } else {
343 endaddr = ~0ul;
344 }
345 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000346 }
347 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800348
balrog50a95692007-12-12 01:16:23 +0000349 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800350 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000351 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100352#endif
balrog50a95692007-12-12 01:16:23 +0000353 }
354#endif
bellard54936002003-05-13 00:25:15 +0000355}
356
Paul Brook41c1b1c2010-03-12 16:54:58 +0000357static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000358{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000359 PageDesc *pd;
360 void **lp;
361 int i;
362
pbrook17e23772008-06-09 13:47:45 +0000363#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100364 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365# define ALLOC(P, SIZE) \
366 do { \
367 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
368 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800369 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000370#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800371# define ALLOC(P, SIZE) \
372 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000373#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800374
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800375 /* Level 1. Always allocated. */
376 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
377
378 /* Level 2..N-1. */
379 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
380 void **p = *lp;
381
382 if (p == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(p, sizeof(void *) * L2_SIZE);
387 *lp = p;
388 }
389
390 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000391 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800392
393 pd = *lp;
394 if (pd == NULL) {
395 if (!alloc) {
396 return NULL;
397 }
398 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
399 *lp = pd;
400 }
401
402#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403
404 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000405}
406
Paul Brook41c1b1c2010-03-12 16:54:58 +0000407static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000408{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000410}
411
Paul Brook6d9a1302010-02-28 23:55:53 +0000412#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500413static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000414{
pbrooke3f4e2a2006-04-08 20:02:06 +0000415 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800416 void **lp;
417 int i;
bellard92e873b2004-05-21 14:52:29 +0000418
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 /* Level 1. Always allocated. */
420 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000421
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 /* Level 2..N-1. */
423 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
424 void **p = *lp;
425 if (p == NULL) {
426 if (!alloc) {
427 return NULL;
428 }
429 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
430 }
431 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
pbrooke3f4e2a2006-04-08 20:02:06 +0000434 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800435 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000436 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800437
438 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000439 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800440 }
441
442 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
443
pbrook67c4d232009-02-23 13:16:07 +0000444 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800445 pd[i].phys_offset = IO_MEM_UNASSIGNED;
446 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000447 }
bellard92e873b2004-05-21 14:52:29 +0000448 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800449
450 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000451}
452
Anthony Liguoric227f092009-10-01 16:12:16 -0500453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000454{
bellard108c49b2005-07-24 12:55:09 +0000455 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000456}
457
Anthony Liguoric227f092009-10-01 16:12:16 -0500458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000460 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000463#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000464
bellard43694152008-05-29 09:35:57 +0000465#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466
467#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100468/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000469 user mode. It will change when a dedicated libc will be used */
470#define USE_STATIC_CODE_GEN_BUFFER
471#endif
472
473#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200474static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000476#endif
477
blueswir18fcd3692008-08-17 20:26:25 +0000478static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000479{
bellard43694152008-05-29 09:35:57 +0000480#ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer = static_code_gen_buffer;
482 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 map_exec(code_gen_buffer, code_gen_buffer_size);
484#else
bellard26a5f132008-05-28 12:30:31 +0000485 code_gen_buffer_size = tb_size;
486 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000487#if defined(CONFIG_USER_ONLY)
488 /* in user mode, phys_ram_size is not meaningful */
489 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
490#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100491 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000492 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000493#endif
bellard26a5f132008-05-28 12:30:31 +0000494 }
495 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
496 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
497 /* The code gen buffer location may have constraints depending on
498 the host cpu and OS */
499#if defined(__linux__)
500 {
501 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000502 void *start = NULL;
503
bellard26a5f132008-05-28 12:30:31 +0000504 flags = MAP_PRIVATE | MAP_ANONYMOUS;
505#if defined(__x86_64__)
506 flags |= MAP_32BIT;
507 /* Cannot map more than that */
508 if (code_gen_buffer_size > (800 * 1024 * 1024))
509 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000510#elif defined(__sparc_v9__)
511 // Map the buffer below 2G, so we can use direct calls and branches
512 flags |= MAP_FIXED;
513 start = (void *) 0x60000000UL;
514 if (code_gen_buffer_size > (512 * 1024 * 1024))
515 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000516#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000517 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000518 flags |= MAP_FIXED;
519 start = (void *) 0x01000000UL;
520 if (code_gen_buffer_size > 16 * 1024 * 1024)
521 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700522#elif defined(__s390x__)
523 /* Map the buffer so that we can use direct calls and branches. */
524 /* We have a +- 4GB range on the branches; leave some slop. */
525 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
526 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
527 }
528 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000529#endif
blueswir1141ac462008-07-26 15:05:57 +0000530 code_gen_buffer = mmap(start, code_gen_buffer_size,
531 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000532 flags, -1, 0);
533 if (code_gen_buffer == MAP_FAILED) {
534 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
535 exit(1);
536 }
537 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100538#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000539 {
540 int flags;
541 void *addr = NULL;
542 flags = MAP_PRIVATE | MAP_ANONYMOUS;
543#if defined(__x86_64__)
544 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
545 * 0x40000000 is free */
546 flags |= MAP_FIXED;
547 addr = (void *)0x40000000;
548 /* Cannot map more than that */
549 if (code_gen_buffer_size > (800 * 1024 * 1024))
550 code_gen_buffer_size = (800 * 1024 * 1024);
551#endif
552 code_gen_buffer = mmap(addr, code_gen_buffer_size,
553 PROT_WRITE | PROT_READ | PROT_EXEC,
554 flags, -1, 0);
555 if (code_gen_buffer == MAP_FAILED) {
556 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
557 exit(1);
558 }
559 }
bellard26a5f132008-05-28 12:30:31 +0000560#else
561 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000562 map_exec(code_gen_buffer, code_gen_buffer_size);
563#endif
bellard43694152008-05-29 09:35:57 +0000564#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000565 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
566 code_gen_buffer_max_size = code_gen_buffer_size -
Aurelien Jarno239fda32010-06-03 19:29:31 +0200567 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000568 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
569 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
570}
571
572/* Must be called before using the QEMU cpus. 'tb_size' is the size
573 (in bytes) allocated to the translation buffer. Zero means default
574 size. */
575void cpu_exec_init_all(unsigned long tb_size)
576{
bellard26a5f132008-05-28 12:30:31 +0000577 cpu_gen_init();
578 code_gen_alloc(tb_size);
579 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000580 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000581#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000582 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000583#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700584#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
588#endif
bellard26a5f132008-05-28 12:30:31 +0000589}
590
pbrook9656f322008-07-01 20:01:19 +0000591#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
592
Juan Quintelae59fb372009-09-29 22:48:21 +0200593static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200594{
595 CPUState *env = opaque;
596
aurel323098dba2009-03-07 21:28:24 +0000597 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
598 version_id is increased. */
599 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000600 tlb_flush(env, 1);
601
602 return 0;
603}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200604
605static const VMStateDescription vmstate_cpu_common = {
606 .name = "cpu_common",
607 .version_id = 1,
608 .minimum_version_id = 1,
609 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200610 .post_load = cpu_common_post_load,
611 .fields = (VMStateField []) {
612 VMSTATE_UINT32(halted, CPUState),
613 VMSTATE_UINT32(interrupt_request, CPUState),
614 VMSTATE_END_OF_LIST()
615 }
616};
pbrook9656f322008-07-01 20:01:19 +0000617#endif
618
Glauber Costa950f1472009-06-09 12:15:18 -0400619CPUState *qemu_get_cpu(int cpu)
620{
621 CPUState *env = first_cpu;
622
623 while (env) {
624 if (env->cpu_index == cpu)
625 break;
626 env = env->next_cpu;
627 }
628
629 return env;
630}
631
bellard6a00d602005-11-21 23:25:50 +0000632void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000633{
bellard6a00d602005-11-21 23:25:50 +0000634 CPUState **penv;
635 int cpu_index;
636
pbrookc2764712009-03-07 15:24:59 +0000637#if defined(CONFIG_USER_ONLY)
638 cpu_list_lock();
639#endif
bellard6a00d602005-11-21 23:25:50 +0000640 env->next_cpu = NULL;
641 penv = &first_cpu;
642 cpu_index = 0;
643 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700644 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000645 cpu_index++;
646 }
647 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000648 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000649 QTAILQ_INIT(&env->breakpoints);
650 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000651 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000652#if defined(CONFIG_USER_ONLY)
653 cpu_list_unlock();
654#endif
pbrookb3c77242008-06-30 16:31:04 +0000655#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000657 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
658 cpu_save, cpu_load, env);
659#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000660}
661
bellard9fa3e852004-01-04 18:06:42 +0000662static inline void invalidate_page_bitmap(PageDesc *p)
663{
664 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000665 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000666 p->code_bitmap = NULL;
667 }
668 p->code_write_count = 0;
669}
670
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800671/* Set to NULL all the 'first_tb' fields in all PageDescs. */
672
673static void page_flush_tb_1 (int level, void **lp)
674{
675 int i;
676
677 if (*lp == NULL) {
678 return;
679 }
680 if (level == 0) {
681 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000682 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800683 pd[i].first_tb = NULL;
684 invalidate_page_bitmap(pd + i);
685 }
686 } else {
687 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000688 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800689 page_flush_tb_1 (level - 1, pp + i);
690 }
691 }
692}
693
bellardfd6ce8f2003-05-14 19:00:11 +0000694static void page_flush_tb(void)
695{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800696 int i;
697 for (i = 0; i < V_L1_SIZE; i++) {
698 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000699 }
700}
701
702/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000703/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000704void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000705{
bellard6a00d602005-11-21 23:25:50 +0000706 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000707#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000708 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
709 (unsigned long)(code_gen_ptr - code_gen_buffer),
710 nb_tbs, nb_tbs > 0 ?
711 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000712#endif
bellard26a5f132008-05-28 12:30:31 +0000713 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000714 cpu_abort(env1, "Internal error: code buffer overflow\n");
715
bellardfd6ce8f2003-05-14 19:00:11 +0000716 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000717
bellard6a00d602005-11-21 23:25:50 +0000718 for(env = first_cpu; env != NULL; env = env->next_cpu) {
719 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
720 }
bellard9fa3e852004-01-04 18:06:42 +0000721
bellard8a8a6082004-10-03 13:36:49 +0000722 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000723 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000724
bellardfd6ce8f2003-05-14 19:00:11 +0000725 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000726 /* XXX: flush processor icache at this point if cache flush is
727 expensive */
bellarde3db7222005-01-26 22:00:47 +0000728 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000729}
730
731#ifdef DEBUG_TB_CHECK
732
j_mayerbc98a7e2007-04-04 07:55:12 +0000733static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000734{
735 TranslationBlock *tb;
736 int i;
737 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000738 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
739 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000740 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
741 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000742 printf("ERROR invalidate: address=" TARGET_FMT_lx
743 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000744 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000745 }
746 }
747 }
748}
749
750/* verify that all the pages have correct rights for code */
751static void tb_page_check(void)
752{
753 TranslationBlock *tb;
754 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000755
pbrook99773bd2006-04-16 15:14:59 +0000756 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
757 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000758 flags1 = page_get_flags(tb->pc);
759 flags2 = page_get_flags(tb->pc + tb->size - 1);
760 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
761 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000762 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000763 }
764 }
765 }
766}
767
768#endif
769
770/* invalidate one TB */
771static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
772 int next_offset)
773{
774 TranslationBlock *tb1;
775 for(;;) {
776 tb1 = *ptb;
777 if (tb1 == tb) {
778 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
779 break;
780 }
781 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
782 }
783}
784
bellard9fa3e852004-01-04 18:06:42 +0000785static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
786{
787 TranslationBlock *tb1;
788 unsigned int n1;
789
790 for(;;) {
791 tb1 = *ptb;
792 n1 = (long)tb1 & 3;
793 tb1 = (TranslationBlock *)((long)tb1 & ~3);
794 if (tb1 == tb) {
795 *ptb = tb1->page_next[n1];
796 break;
797 }
798 ptb = &tb1->page_next[n1];
799 }
800}
801
bellardd4e81642003-05-25 16:46:15 +0000802static inline void tb_jmp_remove(TranslationBlock *tb, int n)
803{
804 TranslationBlock *tb1, **ptb;
805 unsigned int n1;
806
807 ptb = &tb->jmp_next[n];
808 tb1 = *ptb;
809 if (tb1) {
810 /* find tb(n) in circular list */
811 for(;;) {
812 tb1 = *ptb;
813 n1 = (long)tb1 & 3;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 if (n1 == n && tb1 == tb)
816 break;
817 if (n1 == 2) {
818 ptb = &tb1->jmp_first;
819 } else {
820 ptb = &tb1->jmp_next[n1];
821 }
822 }
823 /* now we can suppress tb(n) from the list */
824 *ptb = tb->jmp_next[n];
825
826 tb->jmp_next[n] = NULL;
827 }
828}
829
830/* reset the jump entry 'n' of a TB so that it is not chained to
831 another TB */
832static inline void tb_reset_jump(TranslationBlock *tb, int n)
833{
834 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
835}
836
Paul Brook41c1b1c2010-03-12 16:54:58 +0000837void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000838{
bellard6a00d602005-11-21 23:25:50 +0000839 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000840 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000841 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000842 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000843 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000844
bellard9fa3e852004-01-04 18:06:42 +0000845 /* remove the TB from the hash list */
846 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
847 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000848 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000849 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000850
bellard9fa3e852004-01-04 18:06:42 +0000851 /* remove the TB from the page list */
852 if (tb->page_addr[0] != page_addr) {
853 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
854 tb_page_remove(&p->first_tb, tb);
855 invalidate_page_bitmap(p);
856 }
857 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
858 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
859 tb_page_remove(&p->first_tb, tb);
860 invalidate_page_bitmap(p);
861 }
862
bellard8a40a182005-11-20 10:35:40 +0000863 tb_invalidated_flag = 1;
864
865 /* remove the TB from the hash list */
866 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000867 for(env = first_cpu; env != NULL; env = env->next_cpu) {
868 if (env->tb_jmp_cache[h] == tb)
869 env->tb_jmp_cache[h] = NULL;
870 }
bellard8a40a182005-11-20 10:35:40 +0000871
872 /* suppress this TB from the two jump lists */
873 tb_jmp_remove(tb, 0);
874 tb_jmp_remove(tb, 1);
875
876 /* suppress any remaining jumps to this TB */
877 tb1 = tb->jmp_first;
878 for(;;) {
879 n1 = (long)tb1 & 3;
880 if (n1 == 2)
881 break;
882 tb1 = (TranslationBlock *)((long)tb1 & ~3);
883 tb2 = tb1->jmp_next[n1];
884 tb_reset_jump(tb1, n1);
885 tb1->jmp_next[n1] = NULL;
886 tb1 = tb2;
887 }
888 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
889
bellarde3db7222005-01-26 22:00:47 +0000890 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000891}
892
893static inline void set_bits(uint8_t *tab, int start, int len)
894{
895 int end, mask, end1;
896
897 end = start + len;
898 tab += start >> 3;
899 mask = 0xff << (start & 7);
900 if ((start & ~7) == (end & ~7)) {
901 if (start < end) {
902 mask &= ~(0xff << (end & 7));
903 *tab |= mask;
904 }
905 } else {
906 *tab++ |= mask;
907 start = (start + 8) & ~7;
908 end1 = end & ~7;
909 while (start < end1) {
910 *tab++ = 0xff;
911 start += 8;
912 }
913 if (start < end) {
914 mask = ~(0xff << (end & 7));
915 *tab |= mask;
916 }
917 }
918}
919
920static void build_page_bitmap(PageDesc *p)
921{
922 int n, tb_start, tb_end;
923 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000924
pbrookb2a70812008-06-09 13:57:23 +0000925 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000926
927 tb = p->first_tb;
928 while (tb != NULL) {
929 n = (long)tb & 3;
930 tb = (TranslationBlock *)((long)tb & ~3);
931 /* NOTE: this is subtle as a TB may span two physical pages */
932 if (n == 0) {
933 /* NOTE: tb_end may be after the end of the page, but
934 it is not a problem */
935 tb_start = tb->pc & ~TARGET_PAGE_MASK;
936 tb_end = tb_start + tb->size;
937 if (tb_end > TARGET_PAGE_SIZE)
938 tb_end = TARGET_PAGE_SIZE;
939 } else {
940 tb_start = 0;
941 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
942 }
943 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
944 tb = tb->page_next[n];
945 }
946}
947
pbrook2e70f6e2008-06-29 01:03:05 +0000948TranslationBlock *tb_gen_code(CPUState *env,
949 target_ulong pc, target_ulong cs_base,
950 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000951{
952 TranslationBlock *tb;
953 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000954 tb_page_addr_t phys_pc, phys_page2;
955 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000956 int code_gen_size;
957
Paul Brook41c1b1c2010-03-12 16:54:58 +0000958 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000959 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000960 if (!tb) {
961 /* flush must be done */
962 tb_flush(env);
963 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000964 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000965 /* Don't forget to invalidate previous TB info. */
966 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000967 }
968 tc_ptr = code_gen_ptr;
969 tb->tc_ptr = tc_ptr;
970 tb->cs_base = cs_base;
971 tb->flags = flags;
972 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000973 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000974 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000975
bellardd720b932004-04-25 17:57:43 +0000976 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000977 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000978 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000979 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +0000980 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +0000981 }
Paul Brook41c1b1c2010-03-12 16:54:58 +0000982 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000983 return tb;
bellardd720b932004-04-25 17:57:43 +0000984}
ths3b46e622007-09-17 08:09:54 +0000985
bellard9fa3e852004-01-04 18:06:42 +0000986/* invalidate all TBs which intersect with the target physical page
987 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000988 the same physical page. 'is_cpu_write_access' should be true if called
989 from a real cpu write access: the virtual CPU will exit the current
990 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000991void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000992 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000993{
aliguori6b917542008-11-18 19:46:41 +0000994 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000995 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000996 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000997 PageDesc *p;
998 int n;
999#ifdef TARGET_HAS_PRECISE_SMC
1000 int current_tb_not_found = is_cpu_write_access;
1001 TranslationBlock *current_tb = NULL;
1002 int current_tb_modified = 0;
1003 target_ulong current_pc = 0;
1004 target_ulong current_cs_base = 0;
1005 int current_flags = 0;
1006#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001007
1008 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001009 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001010 return;
ths5fafdf22007-09-16 21:08:06 +00001011 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001012 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1013 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001014 /* build code bitmap */
1015 build_page_bitmap(p);
1016 }
1017
1018 /* we remove all the TBs in the range [start, end[ */
1019 /* XXX: see if in some cases it could be faster to invalidate all the code */
1020 tb = p->first_tb;
1021 while (tb != NULL) {
1022 n = (long)tb & 3;
1023 tb = (TranslationBlock *)((long)tb & ~3);
1024 tb_next = tb->page_next[n];
1025 /* NOTE: this is subtle as a TB may span two physical pages */
1026 if (n == 0) {
1027 /* NOTE: tb_end may be after the end of the page, but
1028 it is not a problem */
1029 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1030 tb_end = tb_start + tb->size;
1031 } else {
1032 tb_start = tb->page_addr[1];
1033 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1034 }
1035 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001036#ifdef TARGET_HAS_PRECISE_SMC
1037 if (current_tb_not_found) {
1038 current_tb_not_found = 0;
1039 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001040 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001041 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001042 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001043 }
1044 }
1045 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001046 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001047 /* If we are modifying the current TB, we must stop
1048 its execution. We could be more precise by checking
1049 that the modification is after the current PC, but it
1050 would require a specialized function to partially
1051 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001052
bellardd720b932004-04-25 17:57:43 +00001053 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +00001054 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +00001055 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +00001056 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1057 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001058 }
1059#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001060 /* we need to do that to handle the case where a signal
1061 occurs while doing tb_phys_invalidate() */
1062 saved_tb = NULL;
1063 if (env) {
1064 saved_tb = env->current_tb;
1065 env->current_tb = NULL;
1066 }
bellard9fa3e852004-01-04 18:06:42 +00001067 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001068 if (env) {
1069 env->current_tb = saved_tb;
1070 if (env->interrupt_request && env->current_tb)
1071 cpu_interrupt(env, env->interrupt_request);
1072 }
bellard9fa3e852004-01-04 18:06:42 +00001073 }
1074 tb = tb_next;
1075 }
1076#if !defined(CONFIG_USER_ONLY)
1077 /* if no code remaining, no need to continue to use slow writes */
1078 if (!p->first_tb) {
1079 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001080 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001081 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001082 }
1083 }
1084#endif
1085#ifdef TARGET_HAS_PRECISE_SMC
1086 if (current_tb_modified) {
1087 /* we generate a block containing just the instruction
1088 modifying the memory. It will ensure that it cannot modify
1089 itself */
bellardea1c1802004-06-14 18:56:36 +00001090 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001091 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001092 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001093 }
1094#endif
1095}
1096
1097/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001098static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001099{
1100 PageDesc *p;
1101 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001102#if 0
bellarda4193c82004-06-03 14:01:43 +00001103 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001104 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1105 cpu_single_env->mem_io_vaddr, len,
1106 cpu_single_env->eip,
1107 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001108 }
1109#endif
bellard9fa3e852004-01-04 18:06:42 +00001110 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001111 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001112 return;
1113 if (p->code_bitmap) {
1114 offset = start & ~TARGET_PAGE_MASK;
1115 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1116 if (b & ((1 << len) - 1))
1117 goto do_invalidate;
1118 } else {
1119 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001120 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001121 }
1122}
1123
bellard9fa3e852004-01-04 18:06:42 +00001124#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001125static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001126 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001127{
aliguori6b917542008-11-18 19:46:41 +00001128 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001129 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001130 int n;
bellardd720b932004-04-25 17:57:43 +00001131#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001132 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001133 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001134 int current_tb_modified = 0;
1135 target_ulong current_pc = 0;
1136 target_ulong current_cs_base = 0;
1137 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001138#endif
bellard9fa3e852004-01-04 18:06:42 +00001139
1140 addr &= TARGET_PAGE_MASK;
1141 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001142 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001143 return;
1144 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001145#ifdef TARGET_HAS_PRECISE_SMC
1146 if (tb && pc != 0) {
1147 current_tb = tb_find_pc(pc);
1148 }
1149#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001150 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001151 n = (long)tb & 3;
1152 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001153#ifdef TARGET_HAS_PRECISE_SMC
1154 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001155 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001156 /* If we are modifying the current TB, we must stop
1157 its execution. We could be more precise by checking
1158 that the modification is after the current PC, but it
1159 would require a specialized function to partially
1160 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001161
bellardd720b932004-04-25 17:57:43 +00001162 current_tb_modified = 1;
1163 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001164 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1165 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001166 }
1167#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001168 tb_phys_invalidate(tb, addr);
1169 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001170 }
1171 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001172#ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb_modified) {
1174 /* we generate a block containing just the instruction
1175 modifying the memory. It will ensure that it cannot modify
1176 itself */
bellardea1c1802004-06-14 18:56:36 +00001177 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001178 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001179 cpu_resume_from_signal(env, puc);
1180 }
1181#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001182}
bellard9fa3e852004-01-04 18:06:42 +00001183#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001184
1185/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001186static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001187 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001188{
1189 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001190 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001191
bellard9fa3e852004-01-04 18:06:42 +00001192 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001193 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001194 tb->page_next[n] = p->first_tb;
1195 last_first_tb = p->first_tb;
1196 p->first_tb = (TranslationBlock *)((long)tb | n);
1197 invalidate_page_bitmap(p);
1198
bellard107db442004-06-22 18:48:46 +00001199#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001200
bellard9fa3e852004-01-04 18:06:42 +00001201#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001202 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001203 target_ulong addr;
1204 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001205 int prot;
1206
bellardfd6ce8f2003-05-14 19:00:11 +00001207 /* force the host page as non writable (writes will have a
1208 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001209 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001210 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001211 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1212 addr += TARGET_PAGE_SIZE) {
1213
1214 p2 = page_find (addr >> TARGET_PAGE_BITS);
1215 if (!p2)
1216 continue;
1217 prot |= p2->flags;
1218 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001219 }
ths5fafdf22007-09-16 21:08:06 +00001220 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001221 (prot & PAGE_BITS) & ~PAGE_WRITE);
1222#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001223 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001224 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226 }
bellard9fa3e852004-01-04 18:06:42 +00001227#else
1228 /* if some code is already present, then the pages are already
1229 protected. So we handle the case where only the first TB is
1230 allocated in a physical page */
1231 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001232 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001233 }
1234#endif
bellardd720b932004-04-25 17:57:43 +00001235
1236#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001237}
1238
1239/* Allocate a new translation block. Flush the translation buffer if
1240 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001241TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001242{
1243 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001244
bellard26a5f132008-05-28 12:30:31 +00001245 if (nb_tbs >= code_gen_max_blocks ||
1246 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001247 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001248 tb = &tbs[nb_tbs++];
1249 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001250 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001251 return tb;
1252}
1253
pbrook2e70f6e2008-06-29 01:03:05 +00001254void tb_free(TranslationBlock *tb)
1255{
thsbf20dc02008-06-30 17:22:19 +00001256 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001257 Ignore the hard cases and just back up if this TB happens to
1258 be the last one generated. */
1259 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1260 code_gen_ptr = tb->tc_ptr;
1261 nb_tbs--;
1262 }
1263}
1264
bellard9fa3e852004-01-04 18:06:42 +00001265/* add a new TB and link it to the physical page tables. phys_page2 is
1266 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001267void tb_link_page(TranslationBlock *tb,
1268 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001269{
bellard9fa3e852004-01-04 18:06:42 +00001270 unsigned int h;
1271 TranslationBlock **ptb;
1272
pbrookc8a706f2008-06-02 16:16:42 +00001273 /* Grab the mmap lock to stop another thread invalidating this TB
1274 before we are done. */
1275 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001276 /* add in the physical hash table */
1277 h = tb_phys_hash_func(phys_pc);
1278 ptb = &tb_phys_hash[h];
1279 tb->phys_hash_next = *ptb;
1280 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001281
1282 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001283 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1284 if (phys_page2 != -1)
1285 tb_alloc_page(tb, 1, phys_page2);
1286 else
1287 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001288
bellardd4e81642003-05-25 16:46:15 +00001289 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1290 tb->jmp_next[0] = NULL;
1291 tb->jmp_next[1] = NULL;
1292
1293 /* init original jump addresses */
1294 if (tb->tb_next_offset[0] != 0xffff)
1295 tb_reset_jump(tb, 0);
1296 if (tb->tb_next_offset[1] != 0xffff)
1297 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001298
1299#ifdef DEBUG_TB_CHECK
1300 tb_page_check();
1301#endif
pbrookc8a706f2008-06-02 16:16:42 +00001302 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001303}
1304
bellarda513fe12003-05-27 23:29:48 +00001305/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1306 tb[1].tc_ptr. Return NULL if not found */
1307TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1308{
1309 int m_min, m_max, m;
1310 unsigned long v;
1311 TranslationBlock *tb;
1312
1313 if (nb_tbs <= 0)
1314 return NULL;
1315 if (tc_ptr < (unsigned long)code_gen_buffer ||
1316 tc_ptr >= (unsigned long)code_gen_ptr)
1317 return NULL;
1318 /* binary search (cf Knuth) */
1319 m_min = 0;
1320 m_max = nb_tbs - 1;
1321 while (m_min <= m_max) {
1322 m = (m_min + m_max) >> 1;
1323 tb = &tbs[m];
1324 v = (unsigned long)tb->tc_ptr;
1325 if (v == tc_ptr)
1326 return tb;
1327 else if (tc_ptr < v) {
1328 m_max = m - 1;
1329 } else {
1330 m_min = m + 1;
1331 }
ths5fafdf22007-09-16 21:08:06 +00001332 }
bellarda513fe12003-05-27 23:29:48 +00001333 return &tbs[m_max];
1334}
bellard75012672003-06-21 13:11:07 +00001335
bellardea041c02003-06-25 16:16:50 +00001336static void tb_reset_jump_recursive(TranslationBlock *tb);
1337
1338static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1339{
1340 TranslationBlock *tb1, *tb_next, **ptb;
1341 unsigned int n1;
1342
1343 tb1 = tb->jmp_next[n];
1344 if (tb1 != NULL) {
1345 /* find head of list */
1346 for(;;) {
1347 n1 = (long)tb1 & 3;
1348 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1349 if (n1 == 2)
1350 break;
1351 tb1 = tb1->jmp_next[n1];
1352 }
1353 /* we are now sure now that tb jumps to tb1 */
1354 tb_next = tb1;
1355
1356 /* remove tb from the jmp_first list */
1357 ptb = &tb_next->jmp_first;
1358 for(;;) {
1359 tb1 = *ptb;
1360 n1 = (long)tb1 & 3;
1361 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1362 if (n1 == n && tb1 == tb)
1363 break;
1364 ptb = &tb1->jmp_next[n1];
1365 }
1366 *ptb = tb->jmp_next[n];
1367 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001368
bellardea041c02003-06-25 16:16:50 +00001369 /* suppress the jump to next tb in generated code */
1370 tb_reset_jump(tb, n);
1371
bellard01243112004-01-04 15:48:17 +00001372 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001373 tb_reset_jump_recursive(tb_next);
1374 }
1375}
1376
1377static void tb_reset_jump_recursive(TranslationBlock *tb)
1378{
1379 tb_reset_jump_recursive2(tb, 0);
1380 tb_reset_jump_recursive2(tb, 1);
1381}
1382
bellard1fddef42005-04-17 19:16:13 +00001383#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001384#if defined(CONFIG_USER_ONLY)
1385static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1386{
1387 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1388}
1389#else
bellardd720b932004-04-25 17:57:43 +00001390static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1391{
Anthony Liguoric227f092009-10-01 16:12:16 -05001392 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001393 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001394 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001395 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001396
pbrookc2f07f82006-04-08 17:14:56 +00001397 addr = cpu_get_phys_page_debug(env, pc);
1398 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1399 if (!p) {
1400 pd = IO_MEM_UNASSIGNED;
1401 } else {
1402 pd = p->phys_offset;
1403 }
1404 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001405 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001406}
bellardc27004e2005-01-03 23:35:10 +00001407#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001408#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001409
Paul Brookc527ee82010-03-01 03:31:14 +00001410#if defined(CONFIG_USER_ONLY)
1411void cpu_watchpoint_remove_all(CPUState *env, int mask)
1412
1413{
1414}
1415
1416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
1418{
1419 return -ENOSYS;
1420}
1421#else
pbrook6658ffb2007-03-16 23:58:11 +00001422/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001423int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1424 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001425{
aliguorib4051332008-11-18 20:14:20 +00001426 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001427 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001428
aliguorib4051332008-11-18 20:14:20 +00001429 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1430 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1431 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1432 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1433 return -EINVAL;
1434 }
aliguoria1d1bb32008-11-18 20:07:32 +00001435 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001436
aliguoria1d1bb32008-11-18 20:07:32 +00001437 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001438 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001439 wp->flags = flags;
1440
aliguori2dc9f412008-11-18 20:56:59 +00001441 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001442 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001443 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001444 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001445 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001446
pbrook6658ffb2007-03-16 23:58:11 +00001447 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001448
1449 if (watchpoint)
1450 *watchpoint = wp;
1451 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001452}
1453
aliguoria1d1bb32008-11-18 20:07:32 +00001454/* Remove a specific watchpoint. */
1455int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1456 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001457{
aliguorib4051332008-11-18 20:14:20 +00001458 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001459 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001460
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001462 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001463 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001464 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001465 return 0;
1466 }
1467 }
aliguoria1d1bb32008-11-18 20:07:32 +00001468 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001469}
1470
aliguoria1d1bb32008-11-18 20:07:32 +00001471/* Remove a specific watchpoint by reference. */
1472void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1473{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001474 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001475
aliguoria1d1bb32008-11-18 20:07:32 +00001476 tlb_flush_page(env, watchpoint->vaddr);
1477
1478 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001479}
1480
aliguoria1d1bb32008-11-18 20:07:32 +00001481/* Remove all matching watchpoints. */
1482void cpu_watchpoint_remove_all(CPUState *env, int mask)
1483{
aliguoric0ce9982008-11-25 22:13:57 +00001484 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001485
Blue Swirl72cf2d42009-09-12 07:36:22 +00001486 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001487 if (wp->flags & mask)
1488 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001489 }
aliguoria1d1bb32008-11-18 20:07:32 +00001490}
Paul Brookc527ee82010-03-01 03:31:14 +00001491#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001492
1493/* Add a breakpoint. */
1494int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1495 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001496{
bellard1fddef42005-04-17 19:16:13 +00001497#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001498 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001499
aliguoria1d1bb32008-11-18 20:07:32 +00001500 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001501
1502 bp->pc = pc;
1503 bp->flags = flags;
1504
aliguori2dc9f412008-11-18 20:56:59 +00001505 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001506 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001507 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001508 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001509 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001510
1511 breakpoint_invalidate(env, pc);
1512
1513 if (breakpoint)
1514 *breakpoint = bp;
1515 return 0;
1516#else
1517 return -ENOSYS;
1518#endif
1519}
1520
1521/* Remove a specific breakpoint. */
1522int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1523{
1524#if defined(TARGET_HAS_ICE)
1525 CPUBreakpoint *bp;
1526
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001528 if (bp->pc == pc && bp->flags == flags) {
1529 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001530 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001531 }
bellard4c3a88a2003-07-26 12:06:08 +00001532 }
aliguoria1d1bb32008-11-18 20:07:32 +00001533 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001534#else
aliguoria1d1bb32008-11-18 20:07:32 +00001535 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001536#endif
1537}
1538
aliguoria1d1bb32008-11-18 20:07:32 +00001539/* Remove a specific breakpoint by reference. */
1540void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001541{
bellard1fddef42005-04-17 19:16:13 +00001542#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001544
aliguoria1d1bb32008-11-18 20:07:32 +00001545 breakpoint_invalidate(env, breakpoint->pc);
1546
1547 qemu_free(breakpoint);
1548#endif
1549}
1550
1551/* Remove all matching breakpoints. */
1552void cpu_breakpoint_remove_all(CPUState *env, int mask)
1553{
1554#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001555 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001556
Blue Swirl72cf2d42009-09-12 07:36:22 +00001557 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001558 if (bp->flags & mask)
1559 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001560 }
bellard4c3a88a2003-07-26 12:06:08 +00001561#endif
1562}
1563
bellardc33a3462003-07-29 20:50:33 +00001564/* enable or disable single step mode. EXCP_DEBUG is returned by the
1565 CPU loop after each instruction */
1566void cpu_single_step(CPUState *env, int enabled)
1567{
bellard1fddef42005-04-17 19:16:13 +00001568#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001569 if (env->singlestep_enabled != enabled) {
1570 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001571 if (kvm_enabled())
1572 kvm_update_guest_debug(env, 0);
1573 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001574 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001575 /* XXX: only flush what is necessary */
1576 tb_flush(env);
1577 }
bellardc33a3462003-07-29 20:50:33 +00001578 }
1579#endif
1580}
1581
bellard34865132003-10-05 14:28:56 +00001582/* enable or disable low levels log */
1583void cpu_set_log(int log_flags)
1584{
1585 loglevel = log_flags;
1586 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001587 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001588 if (!logfile) {
1589 perror(logfilename);
1590 _exit(1);
1591 }
bellard9fa3e852004-01-04 18:06:42 +00001592#if !defined(CONFIG_SOFTMMU)
1593 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1594 {
blueswir1b55266b2008-09-20 08:07:15 +00001595 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001596 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1597 }
Filip Navarabf65f532009-07-27 10:02:04 -05001598#elif !defined(_WIN32)
1599 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001600 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001601#endif
pbrooke735b912007-06-30 13:53:24 +00001602 log_append = 1;
1603 }
1604 if (!loglevel && logfile) {
1605 fclose(logfile);
1606 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001607 }
1608}
1609
1610void cpu_set_log_filename(const char *filename)
1611{
1612 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001613 if (logfile) {
1614 fclose(logfile);
1615 logfile = NULL;
1616 }
1617 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001618}
bellardc33a3462003-07-29 20:50:33 +00001619
aurel323098dba2009-03-07 21:28:24 +00001620static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001621{
pbrookd5975362008-06-07 20:50:51 +00001622 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1623 problem and hope the cpu will stop of its own accord. For userspace
1624 emulation this often isn't actually as bad as it sounds. Often
1625 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001626 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001627 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001628
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001629 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001630 tb = env->current_tb;
1631 /* if the cpu is currently executing code, we must unlink it and
1632 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001633 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001634 env->current_tb = NULL;
1635 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001636 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001637 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001638}
1639
1640/* mask must never be zero, except for A20 change call */
1641void cpu_interrupt(CPUState *env, int mask)
1642{
1643 int old_mask;
1644
1645 old_mask = env->interrupt_request;
1646 env->interrupt_request |= mask;
1647
aliguori8edac962009-04-24 18:03:45 +00001648#ifndef CONFIG_USER_ONLY
1649 /*
1650 * If called from iothread context, wake the target cpu in
1651 * case its halted.
1652 */
1653 if (!qemu_cpu_self(env)) {
1654 qemu_cpu_kick(env);
1655 return;
1656 }
1657#endif
1658
pbrook2e70f6e2008-06-29 01:03:05 +00001659 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001660 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001661#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001662 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001663 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001664 cpu_abort(env, "Raised interrupt while not in I/O function");
1665 }
1666#endif
1667 } else {
aurel323098dba2009-03-07 21:28:24 +00001668 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001669 }
1670}
1671
bellardb54ad042004-05-20 13:42:52 +00001672void cpu_reset_interrupt(CPUState *env, int mask)
1673{
1674 env->interrupt_request &= ~mask;
1675}
1676
aurel323098dba2009-03-07 21:28:24 +00001677void cpu_exit(CPUState *env)
1678{
1679 env->exit_request = 1;
1680 cpu_unlink_tb(env);
1681}
1682
blueswir1c7cd6a32008-10-02 18:27:46 +00001683const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001684 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001685 "show generated host assembly code for each compiled TB" },
1686 { CPU_LOG_TB_IN_ASM, "in_asm",
1687 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001688 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001689 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001690 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001691 "show micro ops "
1692#ifdef TARGET_I386
1693 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001694#endif
blueswir1e01a1152008-03-14 17:37:11 +00001695 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001696 { CPU_LOG_INT, "int",
1697 "show interrupts/exceptions in short format" },
1698 { CPU_LOG_EXEC, "exec",
1699 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001700 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001701 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001702#ifdef TARGET_I386
1703 { CPU_LOG_PCALL, "pcall",
1704 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001705 { CPU_LOG_RESET, "cpu_reset",
1706 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001707#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001708#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001709 { CPU_LOG_IOPORT, "ioport",
1710 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001711#endif
bellardf193c792004-03-21 17:06:25 +00001712 { 0, NULL, NULL },
1713};
1714
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001715#ifndef CONFIG_USER_ONLY
1716static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1717 = QLIST_HEAD_INITIALIZER(memory_client_list);
1718
1719static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1720 ram_addr_t size,
1721 ram_addr_t phys_offset)
1722{
1723 CPUPhysMemoryClient *client;
1724 QLIST_FOREACH(client, &memory_client_list, list) {
1725 client->set_memory(client, start_addr, size, phys_offset);
1726 }
1727}
1728
1729static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1730 target_phys_addr_t end)
1731{
1732 CPUPhysMemoryClient *client;
1733 QLIST_FOREACH(client, &memory_client_list, list) {
1734 int r = client->sync_dirty_bitmap(client, start, end);
1735 if (r < 0)
1736 return r;
1737 }
1738 return 0;
1739}
1740
1741static int cpu_notify_migration_log(int enable)
1742{
1743 CPUPhysMemoryClient *client;
1744 QLIST_FOREACH(client, &memory_client_list, list) {
1745 int r = client->migration_log(client, enable);
1746 if (r < 0)
1747 return r;
1748 }
1749 return 0;
1750}
1751
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001752static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1753 int level, void **lp)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001754{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001755 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001756
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001757 if (*lp == NULL) {
1758 return;
1759 }
1760 if (level == 0) {
1761 PhysPageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001762 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001763 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1764 client->set_memory(client, pd[i].region_offset,
1765 TARGET_PAGE_SIZE, pd[i].phys_offset);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001766 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001767 }
1768 } else {
1769 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001770 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001771 phys_page_for_each_1(client, level - 1, pp + i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001772 }
1773 }
1774}
1775
1776static void phys_page_for_each(CPUPhysMemoryClient *client)
1777{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001778 int i;
1779 for (i = 0; i < P_L1_SIZE; ++i) {
1780 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1781 l1_phys_map + 1);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001782 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001783}
1784
1785void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1786{
1787 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1788 phys_page_for_each(client);
1789}
1790
1791void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1792{
1793 QLIST_REMOVE(client, list);
1794}
1795#endif
1796
bellardf193c792004-03-21 17:06:25 +00001797static int cmp1(const char *s1, int n, const char *s2)
1798{
1799 if (strlen(s2) != n)
1800 return 0;
1801 return memcmp(s1, s2, n) == 0;
1802}
ths3b46e622007-09-17 08:09:54 +00001803
bellardf193c792004-03-21 17:06:25 +00001804/* takes a comma separated list of log masks. Return 0 if error. */
1805int cpu_str_to_log_mask(const char *str)
1806{
blueswir1c7cd6a32008-10-02 18:27:46 +00001807 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001808 int mask;
1809 const char *p, *p1;
1810
1811 p = str;
1812 mask = 0;
1813 for(;;) {
1814 p1 = strchr(p, ',');
1815 if (!p1)
1816 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001817 if(cmp1(p,p1-p,"all")) {
1818 for(item = cpu_log_items; item->mask != 0; item++) {
1819 mask |= item->mask;
1820 }
1821 } else {
bellardf193c792004-03-21 17:06:25 +00001822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 if (cmp1(p, p1 - p, item->name))
1824 goto found;
1825 }
1826 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001827 }
bellardf193c792004-03-21 17:06:25 +00001828 found:
1829 mask |= item->mask;
1830 if (*p1 != ',')
1831 break;
1832 p = p1 + 1;
1833 }
1834 return mask;
1835}
bellardea041c02003-06-25 16:16:50 +00001836
bellard75012672003-06-21 13:11:07 +00001837void cpu_abort(CPUState *env, const char *fmt, ...)
1838{
1839 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001840 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001841
1842 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001843 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001844 fprintf(stderr, "qemu: fatal: ");
1845 vfprintf(stderr, fmt, ap);
1846 fprintf(stderr, "\n");
1847#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001848 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1849#else
1850 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001851#endif
aliguori93fcfe32009-01-15 22:34:14 +00001852 if (qemu_log_enabled()) {
1853 qemu_log("qemu: fatal: ");
1854 qemu_log_vprintf(fmt, ap2);
1855 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001856#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001857 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001858#else
aliguori93fcfe32009-01-15 22:34:14 +00001859 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001860#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001861 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001862 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001863 }
pbrook493ae1f2007-11-23 16:53:59 +00001864 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001865 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001866#if defined(CONFIG_USER_ONLY)
1867 {
1868 struct sigaction act;
1869 sigfillset(&act.sa_mask);
1870 act.sa_handler = SIG_DFL;
1871 sigaction(SIGABRT, &act, NULL);
1872 }
1873#endif
bellard75012672003-06-21 13:11:07 +00001874 abort();
1875}
1876
thsc5be9f02007-02-28 20:20:53 +00001877CPUState *cpu_copy(CPUState *env)
1878{
ths01ba9812007-12-09 02:22:57 +00001879 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001880 CPUState *next_cpu = new_env->next_cpu;
1881 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001882#if defined(TARGET_HAS_ICE)
1883 CPUBreakpoint *bp;
1884 CPUWatchpoint *wp;
1885#endif
1886
thsc5be9f02007-02-28 20:20:53 +00001887 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001888
1889 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001890 new_env->next_cpu = next_cpu;
1891 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001892
1893 /* Clone all break/watchpoints.
1894 Note: Once we support ptrace with hw-debug register access, make sure
1895 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001896 QTAILQ_INIT(&env->breakpoints);
1897 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001898#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001899 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001900 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1901 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001902 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001903 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1904 wp->flags, NULL);
1905 }
1906#endif
1907
thsc5be9f02007-02-28 20:20:53 +00001908 return new_env;
1909}
1910
bellard01243112004-01-04 15:48:17 +00001911#if !defined(CONFIG_USER_ONLY)
1912
edgar_igl5c751e92008-05-06 08:44:21 +00001913static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1914{
1915 unsigned int i;
1916
1917 /* Discard jump cache entries for any tb which might potentially
1918 overlap the flushed page. */
1919 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1920 memset (&env->tb_jmp_cache[i], 0,
1921 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1922
1923 i = tb_jmp_cache_hash_page(addr);
1924 memset (&env->tb_jmp_cache[i], 0,
1925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1926}
1927
Igor Kovalenko08738982009-07-12 02:15:40 +04001928static CPUTLBEntry s_cputlb_empty_entry = {
1929 .addr_read = -1,
1930 .addr_write = -1,
1931 .addr_code = -1,
1932 .addend = -1,
1933};
1934
bellardee8b7022004-02-03 23:35:10 +00001935/* NOTE: if flush_global is true, also flush global entries (not
1936 implemented yet) */
1937void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001938{
bellard33417e72003-08-10 21:47:01 +00001939 int i;
bellard01243112004-01-04 15:48:17 +00001940
bellard9fa3e852004-01-04 18:06:42 +00001941#if defined(DEBUG_TLB)
1942 printf("tlb_flush:\n");
1943#endif
bellard01243112004-01-04 15:48:17 +00001944 /* must reset current TB so that interrupts cannot modify the
1945 links while we are modifying them */
1946 env->current_tb = NULL;
1947
bellard33417e72003-08-10 21:47:01 +00001948 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001949 int mmu_idx;
1950 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001951 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001952 }
bellard33417e72003-08-10 21:47:01 +00001953 }
bellard9fa3e852004-01-04 18:06:42 +00001954
bellard8a40a182005-11-20 10:35:40 +00001955 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001956
Paul Brookd4c430a2010-03-17 02:14:28 +00001957 env->tlb_flush_addr = -1;
1958 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001959 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001960}
1961
bellard274da6b2004-05-20 21:56:27 +00001962static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001963{
ths5fafdf22007-09-16 21:08:06 +00001964 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001965 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001966 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001967 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001968 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001969 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001970 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001971 }
bellard61382a52003-10-27 21:22:23 +00001972}
1973
bellard2e126692004-04-25 21:28:44 +00001974void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001975{
bellard8a40a182005-11-20 10:35:40 +00001976 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001977 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001978
bellard9fa3e852004-01-04 18:06:42 +00001979#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001980 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001981#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001982 /* Check if we need to flush due to large pages. */
1983 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1984#if defined(DEBUG_TLB)
1985 printf("tlb_flush_page: forced full flush ("
1986 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1987 env->tlb_flush_addr, env->tlb_flush_mask);
1988#endif
1989 tlb_flush(env, 1);
1990 return;
1991 }
bellard01243112004-01-04 15:48:17 +00001992 /* must reset current TB so that interrupts cannot modify the
1993 links while we are modifying them */
1994 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001995
bellard61382a52003-10-27 21:22:23 +00001996 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001997 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001998 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1999 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002000
edgar_igl5c751e92008-05-06 08:44:21 +00002001 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002002}
2003
bellard9fa3e852004-01-04 18:06:42 +00002004/* update the TLBs so that writes to code in the virtual page 'addr'
2005 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002006static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002007{
ths5fafdf22007-09-16 21:08:06 +00002008 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002009 ram_addr + TARGET_PAGE_SIZE,
2010 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002011}
2012
bellard9fa3e852004-01-04 18:06:42 +00002013/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002014 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002015static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002016 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002017{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002018 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002019}
2020
ths5fafdf22007-09-16 21:08:06 +00002021static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002022 unsigned long start, unsigned long length)
2023{
2024 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002025 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2026 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002027 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002028 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002029 }
2030 }
2031}
2032
pbrook5579c7f2009-04-11 14:47:08 +00002033/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002034void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002035 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002036{
2037 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002038 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002039 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002040
2041 start &= TARGET_PAGE_MASK;
2042 end = TARGET_PAGE_ALIGN(end);
2043
2044 length = end - start;
2045 if (length == 0)
2046 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002047 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002048
bellard1ccde1c2004-02-06 19:46:14 +00002049 /* we modify the TLB cache so that the dirty bit will be set again
2050 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00002051 start1 = (unsigned long)qemu_get_ram_ptr(start);
2052 /* Chek that we don't span multiple blocks - this breaks the
2053 address comparisons below. */
2054 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2055 != (end - 1) - start) {
2056 abort();
2057 }
2058
bellard6a00d602005-11-21 23:25:50 +00002059 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002060 int mmu_idx;
2061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2062 for(i = 0; i < CPU_TLB_SIZE; i++)
2063 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2064 start1, length);
2065 }
bellard6a00d602005-11-21 23:25:50 +00002066 }
bellard1ccde1c2004-02-06 19:46:14 +00002067}
2068
aliguori74576192008-10-06 14:02:03 +00002069int cpu_physical_memory_set_dirty_tracking(int enable)
2070{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002071 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002072 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002073 ret = cpu_notify_migration_log(!!enable);
2074 return ret;
aliguori74576192008-10-06 14:02:03 +00002075}
2076
2077int cpu_physical_memory_get_dirty_tracking(void)
2078{
2079 return in_migration;
2080}
2081
Anthony Liguoric227f092009-10-01 16:12:16 -05002082int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2083 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002084{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002085 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002086
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002087 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002088 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002089}
2090
bellard3a7d9292005-08-21 09:26:42 +00002091static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2092{
Anthony Liguoric227f092009-10-01 16:12:16 -05002093 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002094 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002095
bellard84b7b8e2005-11-28 21:19:04 +00002096 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002097 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2098 + tlb_entry->addend);
2099 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00002100 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002101 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002102 }
2103 }
2104}
2105
2106/* update the TLB according to the current state of the dirty bits */
2107void cpu_tlb_update_dirty(CPUState *env)
2108{
2109 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002110 int mmu_idx;
2111 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2112 for(i = 0; i < CPU_TLB_SIZE; i++)
2113 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2114 }
bellard3a7d9292005-08-21 09:26:42 +00002115}
2116
pbrook0f459d12008-06-09 00:20:13 +00002117static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002118{
pbrook0f459d12008-06-09 00:20:13 +00002119 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2120 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002121}
2122
pbrook0f459d12008-06-09 00:20:13 +00002123/* update the TLB corresponding to virtual page vaddr
2124 so that it is no longer dirty */
2125static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002126{
bellard1ccde1c2004-02-06 19:46:14 +00002127 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002128 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002129
pbrook0f459d12008-06-09 00:20:13 +00002130 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002131 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002132 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2133 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002134}
2135
Paul Brookd4c430a2010-03-17 02:14:28 +00002136/* Our TLB does not support large pages, so remember the area covered by
2137 large pages and trigger a full TLB flush if these are invalidated. */
2138static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2139 target_ulong size)
2140{
2141 target_ulong mask = ~(size - 1);
2142
2143 if (env->tlb_flush_addr == (target_ulong)-1) {
2144 env->tlb_flush_addr = vaddr & mask;
2145 env->tlb_flush_mask = mask;
2146 return;
2147 }
2148 /* Extend the existing region to include the new page.
2149 This is a compromise between unnecessary flushes and the cost
2150 of maintaining a full variable size TLB. */
2151 mask &= env->tlb_flush_mask;
2152 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2153 mask <<= 1;
2154 }
2155 env->tlb_flush_addr &= mask;
2156 env->tlb_flush_mask = mask;
2157}
2158
2159/* Add a new TLB entry. At most one entry for a given virtual address
2160 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2161 supplied size is only used by tlb_flush_page. */
2162void tlb_set_page(CPUState *env, target_ulong vaddr,
2163 target_phys_addr_t paddr, int prot,
2164 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002165{
bellard92e873b2004-05-21 14:52:29 +00002166 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002167 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002168 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002169 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002170 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002171 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002172 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002173 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002174 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002175
Paul Brookd4c430a2010-03-17 02:14:28 +00002176 assert(size >= TARGET_PAGE_SIZE);
2177 if (size != TARGET_PAGE_SIZE) {
2178 tlb_add_large_page(env, vaddr, size);
2179 }
bellard92e873b2004-05-21 14:52:29 +00002180 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002181 if (!p) {
2182 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002183 } else {
2184 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002185 }
2186#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002187 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2188 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002189#endif
2190
pbrook0f459d12008-06-09 00:20:13 +00002191 address = vaddr;
2192 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2193 /* IO memory case (romd handled later) */
2194 address |= TLB_MMIO;
2195 }
pbrook5579c7f2009-04-11 14:47:08 +00002196 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002197 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2198 /* Normal RAM. */
2199 iotlb = pd & TARGET_PAGE_MASK;
2200 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2201 iotlb |= IO_MEM_NOTDIRTY;
2202 else
2203 iotlb |= IO_MEM_ROM;
2204 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002205 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002206 It would be nice to pass an offset from the base address
2207 of that region. This would avoid having to special case RAM,
2208 and avoid full address decoding in every device.
2209 We can't use the high bits of pd for this because
2210 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002211 iotlb = (pd & ~TARGET_PAGE_MASK);
2212 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002213 iotlb += p->region_offset;
2214 } else {
2215 iotlb += paddr;
2216 }
pbrook0f459d12008-06-09 00:20:13 +00002217 }
pbrook6658ffb2007-03-16 23:58:11 +00002218
pbrook0f459d12008-06-09 00:20:13 +00002219 code_address = address;
2220 /* Make accesses to pages with watchpoints go via the
2221 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002222 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002223 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002224 iotlb = io_mem_watch + paddr;
2225 /* TODO: The memory case can be optimized by not trapping
2226 reads of pages with a write breakpoint. */
2227 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002228 }
pbrook0f459d12008-06-09 00:20:13 +00002229 }
balrogd79acba2007-06-26 20:01:13 +00002230
pbrook0f459d12008-06-09 00:20:13 +00002231 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2232 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2233 te = &env->tlb_table[mmu_idx][index];
2234 te->addend = addend - vaddr;
2235 if (prot & PAGE_READ) {
2236 te->addr_read = address;
2237 } else {
2238 te->addr_read = -1;
2239 }
edgar_igl5c751e92008-05-06 08:44:21 +00002240
pbrook0f459d12008-06-09 00:20:13 +00002241 if (prot & PAGE_EXEC) {
2242 te->addr_code = code_address;
2243 } else {
2244 te->addr_code = -1;
2245 }
2246 if (prot & PAGE_WRITE) {
2247 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2248 (pd & IO_MEM_ROMD)) {
2249 /* Write access calls the I/O callback. */
2250 te->addr_write = address | TLB_MMIO;
2251 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2252 !cpu_physical_memory_is_dirty(pd)) {
2253 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002254 } else {
pbrook0f459d12008-06-09 00:20:13 +00002255 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002256 }
pbrook0f459d12008-06-09 00:20:13 +00002257 } else {
2258 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002259 }
bellard9fa3e852004-01-04 18:06:42 +00002260}
2261
bellard01243112004-01-04 15:48:17 +00002262#else
2263
bellardee8b7022004-02-03 23:35:10 +00002264void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002265{
2266}
2267
bellard2e126692004-04-25 21:28:44 +00002268void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002269{
2270}
2271
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002272/*
2273 * Walks guest process memory "regions" one by one
2274 * and calls callback function 'fn' for each region.
2275 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002276
2277struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002278{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002279 walk_memory_regions_fn fn;
2280 void *priv;
2281 unsigned long start;
2282 int prot;
2283};
bellard9fa3e852004-01-04 18:06:42 +00002284
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002285static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002286 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002287{
2288 if (data->start != -1ul) {
2289 int rc = data->fn(data->priv, data->start, end, data->prot);
2290 if (rc != 0) {
2291 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002292 }
bellard33417e72003-08-10 21:47:01 +00002293 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002294
2295 data->start = (new_prot ? end : -1ul);
2296 data->prot = new_prot;
2297
2298 return 0;
2299}
2300
2301static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002302 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002303{
Paul Brookb480d9b2010-03-12 23:23:29 +00002304 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002305 int i, rc;
2306
2307 if (*lp == NULL) {
2308 return walk_memory_regions_end(data, base, 0);
2309 }
2310
2311 if (level == 0) {
2312 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002313 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002314 int prot = pd[i].flags;
2315
2316 pa = base | (i << TARGET_PAGE_BITS);
2317 if (prot != data->prot) {
2318 rc = walk_memory_regions_end(data, pa, prot);
2319 if (rc != 0) {
2320 return rc;
2321 }
2322 }
2323 }
2324 } else {
2325 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002326 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002327 pa = base | ((abi_ulong)i <<
2328 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002329 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2330 if (rc != 0) {
2331 return rc;
2332 }
2333 }
2334 }
2335
2336 return 0;
2337}
2338
2339int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2340{
2341 struct walk_memory_regions_data data;
2342 unsigned long i;
2343
2344 data.fn = fn;
2345 data.priv = priv;
2346 data.start = -1ul;
2347 data.prot = 0;
2348
2349 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002350 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002351 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2352 if (rc != 0) {
2353 return rc;
2354 }
2355 }
2356
2357 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002358}
2359
Paul Brookb480d9b2010-03-12 23:23:29 +00002360static int dump_region(void *priv, abi_ulong start,
2361 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002362{
2363 FILE *f = (FILE *)priv;
2364
Paul Brookb480d9b2010-03-12 23:23:29 +00002365 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2366 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002367 start, end, end - start,
2368 ((prot & PAGE_READ) ? 'r' : '-'),
2369 ((prot & PAGE_WRITE) ? 'w' : '-'),
2370 ((prot & PAGE_EXEC) ? 'x' : '-'));
2371
2372 return (0);
2373}
2374
2375/* dump memory mappings */
2376void page_dump(FILE *f)
2377{
2378 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2379 "start", "end", "size", "prot");
2380 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002381}
2382
pbrook53a59602006-03-25 19:31:22 +00002383int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002384{
bellard9fa3e852004-01-04 18:06:42 +00002385 PageDesc *p;
2386
2387 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002388 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002389 return 0;
2390 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002391}
2392
Richard Henderson376a7902010-03-10 15:57:04 -08002393/* Modify the flags of a page and invalidate the code if necessary.
2394 The flag PAGE_WRITE_ORG is positioned automatically depending
2395 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002396void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002397{
Richard Henderson376a7902010-03-10 15:57:04 -08002398 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002399
Richard Henderson376a7902010-03-10 15:57:04 -08002400 /* This function should never be called with addresses outside the
2401 guest address space. If this assert fires, it probably indicates
2402 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002403#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2404 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002405#endif
2406 assert(start < end);
2407
bellard9fa3e852004-01-04 18:06:42 +00002408 start = start & TARGET_PAGE_MASK;
2409 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002410
2411 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002412 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002413 }
2414
2415 for (addr = start, len = end - start;
2416 len != 0;
2417 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2418 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2419
2420 /* If the write protection bit is set, then we invalidate
2421 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002422 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002423 (flags & PAGE_WRITE) &&
2424 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002425 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002426 }
2427 p->flags = flags;
2428 }
bellard9fa3e852004-01-04 18:06:42 +00002429}
2430
ths3d97b402007-11-02 19:02:07 +00002431int page_check_range(target_ulong start, target_ulong len, int flags)
2432{
2433 PageDesc *p;
2434 target_ulong end;
2435 target_ulong addr;
2436
Richard Henderson376a7902010-03-10 15:57:04 -08002437 /* This function should never be called with addresses outside the
2438 guest address space. If this assert fires, it probably indicates
2439 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002440#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2441 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002442#endif
2443
Richard Henderson3e0650a2010-03-29 10:54:42 -07002444 if (len == 0) {
2445 return 0;
2446 }
Richard Henderson376a7902010-03-10 15:57:04 -08002447 if (start + len - 1 < start) {
2448 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002449 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002450 }
balrog55f280c2008-10-28 10:24:11 +00002451
ths3d97b402007-11-02 19:02:07 +00002452 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2453 start = start & TARGET_PAGE_MASK;
2454
Richard Henderson376a7902010-03-10 15:57:04 -08002455 for (addr = start, len = end - start;
2456 len != 0;
2457 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002458 p = page_find(addr >> TARGET_PAGE_BITS);
2459 if( !p )
2460 return -1;
2461 if( !(p->flags & PAGE_VALID) )
2462 return -1;
2463
bellarddae32702007-11-14 10:51:00 +00002464 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002465 return -1;
bellarddae32702007-11-14 10:51:00 +00002466 if (flags & PAGE_WRITE) {
2467 if (!(p->flags & PAGE_WRITE_ORG))
2468 return -1;
2469 /* unprotect the page if it was put read-only because it
2470 contains translated code */
2471 if (!(p->flags & PAGE_WRITE)) {
2472 if (!page_unprotect(addr, 0, NULL))
2473 return -1;
2474 }
2475 return 0;
2476 }
ths3d97b402007-11-02 19:02:07 +00002477 }
2478 return 0;
2479}
2480
bellard9fa3e852004-01-04 18:06:42 +00002481/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002482 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002483int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002484{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002485 unsigned int prot;
2486 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002487 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002488
pbrookc8a706f2008-06-02 16:16:42 +00002489 /* Technically this isn't safe inside a signal handler. However we
2490 know this only ever happens in a synchronous SEGV handler, so in
2491 practice it seems to be ok. */
2492 mmap_lock();
2493
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002494 p = page_find(address >> TARGET_PAGE_BITS);
2495 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002496 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002497 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002498 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002499
bellard9fa3e852004-01-04 18:06:42 +00002500 /* if the page was really writable, then we change its
2501 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002502 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2503 host_start = address & qemu_host_page_mask;
2504 host_end = host_start + qemu_host_page_size;
2505
2506 prot = 0;
2507 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2508 p = page_find(addr >> TARGET_PAGE_BITS);
2509 p->flags |= PAGE_WRITE;
2510 prot |= p->flags;
2511
bellard9fa3e852004-01-04 18:06:42 +00002512 /* and since the content will be modified, we must invalidate
2513 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002514 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002515#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002516 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002517#endif
bellard9fa3e852004-01-04 18:06:42 +00002518 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002519 mprotect((void *)g2h(host_start), qemu_host_page_size,
2520 prot & PAGE_BITS);
2521
2522 mmap_unlock();
2523 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002524 }
pbrookc8a706f2008-06-02 16:16:42 +00002525 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002526 return 0;
2527}
2528
bellard6a00d602005-11-21 23:25:50 +00002529static inline void tlb_set_dirty(CPUState *env,
2530 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002531{
2532}
bellard9fa3e852004-01-04 18:06:42 +00002533#endif /* defined(CONFIG_USER_ONLY) */
2534
pbrooke2eef172008-06-08 01:09:01 +00002535#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002536
Paul Brookc04b2b72010-03-01 03:31:14 +00002537#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2538typedef struct subpage_t {
2539 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002540 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2541 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002542} subpage_t;
2543
Anthony Liguoric227f092009-10-01 16:12:16 -05002544static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2545 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002546static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2547 ram_addr_t orig_memory,
2548 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002549#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2550 need_subpage) \
2551 do { \
2552 if (addr > start_addr) \
2553 start_addr2 = 0; \
2554 else { \
2555 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2556 if (start_addr2 > 0) \
2557 need_subpage = 1; \
2558 } \
2559 \
blueswir149e9fba2007-05-30 17:25:06 +00002560 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002561 end_addr2 = TARGET_PAGE_SIZE - 1; \
2562 else { \
2563 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2564 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2565 need_subpage = 1; \
2566 } \
2567 } while (0)
2568
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002569/* register physical memory.
2570 For RAM, 'size' must be a multiple of the target page size.
2571 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002572 io memory page. The address used when calling the IO function is
2573 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002574 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002575 before calculating this offset. This should not be a problem unless
2576 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002577void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2578 ram_addr_t size,
2579 ram_addr_t phys_offset,
2580 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002581{
Anthony Liguoric227f092009-10-01 16:12:16 -05002582 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002583 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002584 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002585 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002586 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002587
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002588 cpu_notify_set_memory(start_addr, size, phys_offset);
2589
pbrook67c4d232009-02-23 13:16:07 +00002590 if (phys_offset == IO_MEM_UNASSIGNED) {
2591 region_offset = start_addr;
2592 }
pbrook8da3ff12008-12-01 18:59:50 +00002593 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002594 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002595 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002596 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002597 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2598 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002599 ram_addr_t orig_memory = p->phys_offset;
2600 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002601 int need_subpage = 0;
2602
2603 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2604 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002605 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002606 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2607 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002608 &p->phys_offset, orig_memory,
2609 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002610 } else {
2611 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2612 >> IO_MEM_SHIFT];
2613 }
pbrook8da3ff12008-12-01 18:59:50 +00002614 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2615 region_offset);
2616 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002617 } else {
2618 p->phys_offset = phys_offset;
2619 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2620 (phys_offset & IO_MEM_ROMD))
2621 phys_offset += TARGET_PAGE_SIZE;
2622 }
2623 } else {
2624 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2625 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002626 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002627 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002628 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002629 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002630 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002631 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002632 int need_subpage = 0;
2633
2634 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2635 end_addr2, need_subpage);
2636
Richard Hendersonf6405242010-04-22 16:47:31 -07002637 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002638 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002639 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002640 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002641 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002642 phys_offset, region_offset);
2643 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002644 }
2645 }
2646 }
pbrook8da3ff12008-12-01 18:59:50 +00002647 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002648 }
ths3b46e622007-09-17 08:09:54 +00002649
bellard9d420372006-06-25 22:25:22 +00002650 /* since each CPU stores ram addresses in its TLB cache, we must
2651 reset the modified entries */
2652 /* XXX: slow ! */
2653 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2654 tlb_flush(env, 1);
2655 }
bellard33417e72003-08-10 21:47:01 +00002656}
2657
bellardba863452006-09-24 18:41:10 +00002658/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002659ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002660{
2661 PhysPageDesc *p;
2662
2663 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2664 if (!p)
2665 return IO_MEM_UNASSIGNED;
2666 return p->phys_offset;
2667}
2668
Anthony Liguoric227f092009-10-01 16:12:16 -05002669void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002670{
2671 if (kvm_enabled())
2672 kvm_coalesce_mmio_region(addr, size);
2673}
2674
Anthony Liguoric227f092009-10-01 16:12:16 -05002675void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002676{
2677 if (kvm_enabled())
2678 kvm_uncoalesce_mmio_region(addr, size);
2679}
2680
Sheng Yang62a27442010-01-26 19:21:16 +08002681void qemu_flush_coalesced_mmio_buffer(void)
2682{
2683 if (kvm_enabled())
2684 kvm_flush_coalesced_mmio_buffer();
2685}
2686
Marcelo Tosattic9027602010-03-01 20:25:08 -03002687#if defined(__linux__) && !defined(TARGET_S390X)
2688
2689#include <sys/vfs.h>
2690
2691#define HUGETLBFS_MAGIC 0x958458f6
2692
2693static long gethugepagesize(const char *path)
2694{
2695 struct statfs fs;
2696 int ret;
2697
2698 do {
2699 ret = statfs(path, &fs);
2700 } while (ret != 0 && errno == EINTR);
2701
2702 if (ret != 0) {
Michael Tokarev6adc0542010-03-27 16:35:37 +03002703 perror(path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002704 return 0;
2705 }
2706
2707 if (fs.f_type != HUGETLBFS_MAGIC)
2708 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2709
2710 return fs.f_bsize;
2711}
2712
2713static void *file_ram_alloc(ram_addr_t memory, const char *path)
2714{
2715 char *filename;
2716 void *area;
2717 int fd;
2718#ifdef MAP_POPULATE
2719 int flags;
2720#endif
2721 unsigned long hpagesize;
2722
2723 hpagesize = gethugepagesize(path);
2724 if (!hpagesize) {
2725 return NULL;
2726 }
2727
2728 if (memory < hpagesize) {
2729 return NULL;
2730 }
2731
2732 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2733 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2734 return NULL;
2735 }
2736
2737 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2738 return NULL;
2739 }
2740
2741 fd = mkstemp(filename);
2742 if (fd < 0) {
Michael Tokarev6adc0542010-03-27 16:35:37 +03002743 perror("unable to create backing store for hugepages");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002744 free(filename);
2745 return NULL;
2746 }
2747 unlink(filename);
2748 free(filename);
2749
2750 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2751
2752 /*
2753 * ftruncate is not supported by hugetlbfs in older
2754 * hosts, so don't bother bailing out on errors.
2755 * If anything goes wrong with it under other filesystems,
2756 * mmap will fail.
2757 */
2758 if (ftruncate(fd, memory))
2759 perror("ftruncate");
2760
2761#ifdef MAP_POPULATE
2762 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2763 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2764 * to sidestep this quirk.
2765 */
2766 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2767 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2768#else
2769 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2770#endif
2771 if (area == MAP_FAILED) {
2772 perror("file_ram_alloc: can't mmap RAM pages");
2773 close(fd);
2774 return (NULL);
2775 }
2776 return area;
2777}
2778#endif
2779
Anthony Liguoric227f092009-10-01 16:12:16 -05002780ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002781{
2782 RAMBlock *new_block;
2783
pbrook94a6b542009-04-11 17:15:54 +00002784 size = TARGET_PAGE_ALIGN(size);
2785 new_block = qemu_malloc(sizeof(*new_block));
2786
Marcelo Tosattic9027602010-03-01 20:25:08 -03002787 if (mem_path) {
2788#if defined (__linux__) && !defined(TARGET_S390X)
2789 new_block->host = file_ram_alloc(size, mem_path);
Marcelo Tosatti618a5682010-05-03 18:12:23 -03002790 if (!new_block->host) {
2791 new_block->host = qemu_vmalloc(size);
2792#ifdef MADV_MERGEABLE
2793 madvise(new_block->host, size, MADV_MERGEABLE);
2794#endif
2795 }
Alexander Graf6b024942009-12-05 12:44:25 +01002796#else
Marcelo Tosattic9027602010-03-01 20:25:08 -03002797 fprintf(stderr, "-mem-path option unsupported\n");
2798 exit(1);
2799#endif
2800 } else {
2801#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2802 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2803 new_block->host = mmap((void*)0x1000000, size,
2804 PROT_EXEC|PROT_READ|PROT_WRITE,
2805 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2806#else
2807 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002808#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002809#ifdef MADV_MERGEABLE
Marcelo Tosattic9027602010-03-01 20:25:08 -03002810 madvise(new_block->host, size, MADV_MERGEABLE);
Izik Eidusccb167e2009-10-08 16:39:39 +02002811#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03002812 }
pbrook94a6b542009-04-11 17:15:54 +00002813 new_block->offset = last_ram_offset;
2814 new_block->length = size;
2815
2816 new_block->next = ram_blocks;
2817 ram_blocks = new_block;
2818
2819 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2820 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2821 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2822 0xff, size >> TARGET_PAGE_BITS);
2823
2824 last_ram_offset += size;
2825
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002826 if (kvm_enabled())
2827 kvm_setup_guest_memory(new_block->host, size);
2828
pbrook94a6b542009-04-11 17:15:54 +00002829 return new_block->offset;
2830}
bellarde9a1ab12007-02-08 23:08:38 +00002831
Anthony Liguoric227f092009-10-01 16:12:16 -05002832void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002833{
pbrook94a6b542009-04-11 17:15:54 +00002834 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002835}
2836
pbrookdc828ca2009-04-09 22:21:07 +00002837/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002838 With the exception of the softmmu code in this file, this should
2839 only be used for local memory (e.g. video ram) that the device owns,
2840 and knows it isn't going to access beyond the end of the block.
2841
2842 It should not be used for general purpose DMA.
2843 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2844 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002845void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002846{
pbrook94a6b542009-04-11 17:15:54 +00002847 RAMBlock *prev;
2848 RAMBlock **prevp;
2849 RAMBlock *block;
2850
pbrook94a6b542009-04-11 17:15:54 +00002851 prev = NULL;
2852 prevp = &ram_blocks;
2853 block = ram_blocks;
2854 while (block && (block->offset > addr
2855 || block->offset + block->length <= addr)) {
2856 if (prev)
2857 prevp = &prev->next;
2858 prev = block;
2859 block = block->next;
2860 }
2861 if (!block) {
2862 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2863 abort();
2864 }
2865 /* Move this entry to to start of the list. */
2866 if (prev) {
2867 prev->next = block->next;
2868 block->next = *prevp;
2869 *prevp = block;
2870 }
2871 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002872}
2873
pbrook5579c7f2009-04-11 14:47:08 +00002874/* Some of the softmmu routines need to translate from a host pointer
2875 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002876ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002877{
pbrook94a6b542009-04-11 17:15:54 +00002878 RAMBlock *block;
2879 uint8_t *host = ptr;
2880
pbrook94a6b542009-04-11 17:15:54 +00002881 block = ram_blocks;
2882 while (block && (block->host > host
2883 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002884 block = block->next;
2885 }
2886 if (!block) {
2887 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2888 abort();
2889 }
2890 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002891}
2892
Anthony Liguoric227f092009-10-01 16:12:16 -05002893static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002894{
pbrook67d3b952006-12-18 05:03:52 +00002895#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002896 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002897#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002898#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002899 do_unassigned_access(addr, 0, 0, 0, 1);
2900#endif
2901 return 0;
2902}
2903
Anthony Liguoric227f092009-10-01 16:12:16 -05002904static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002905{
2906#ifdef DEBUG_UNASSIGNED
2907 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2908#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002909#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002910 do_unassigned_access(addr, 0, 0, 0, 2);
2911#endif
2912 return 0;
2913}
2914
Anthony Liguoric227f092009-10-01 16:12:16 -05002915static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002916{
2917#ifdef DEBUG_UNASSIGNED
2918 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2919#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002920#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002921 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002922#endif
bellard33417e72003-08-10 21:47:01 +00002923 return 0;
2924}
2925
Anthony Liguoric227f092009-10-01 16:12:16 -05002926static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002927{
pbrook67d3b952006-12-18 05:03:52 +00002928#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002929 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002930#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002931#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002932 do_unassigned_access(addr, 1, 0, 0, 1);
2933#endif
2934}
2935
Anthony Liguoric227f092009-10-01 16:12:16 -05002936static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002937{
2938#ifdef DEBUG_UNASSIGNED
2939 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2940#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002941#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002942 do_unassigned_access(addr, 1, 0, 0, 2);
2943#endif
2944}
2945
Anthony Liguoric227f092009-10-01 16:12:16 -05002946static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002947{
2948#ifdef DEBUG_UNASSIGNED
2949 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2950#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002951#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002952 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002953#endif
bellard33417e72003-08-10 21:47:01 +00002954}
2955
Blue Swirld60efc62009-08-25 18:29:31 +00002956static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002957 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002958 unassigned_mem_readw,
2959 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002960};
2961
Blue Swirld60efc62009-08-25 18:29:31 +00002962static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002963 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002964 unassigned_mem_writew,
2965 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002966};
2967
Anthony Liguoric227f092009-10-01 16:12:16 -05002968static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002969 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002970{
bellard3a7d9292005-08-21 09:26:42 +00002971 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002972 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002973 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2974#if !defined(CONFIG_USER_ONLY)
2975 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002976 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002977#endif
2978 }
pbrook5579c7f2009-04-11 14:47:08 +00002979 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002980 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002981 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002982 /* we remove the notdirty callback only if the code has been
2983 flushed */
2984 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002985 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002986}
2987
Anthony Liguoric227f092009-10-01 16:12:16 -05002988static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002989 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002990{
bellard3a7d9292005-08-21 09:26:42 +00002991 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002992 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002993 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2994#if !defined(CONFIG_USER_ONLY)
2995 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002996 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002997#endif
2998 }
pbrook5579c7f2009-04-11 14:47:08 +00002999 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003000 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003001 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003002 /* we remove the notdirty callback only if the code has been
3003 flushed */
3004 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003005 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003006}
3007
Anthony Liguoric227f092009-10-01 16:12:16 -05003008static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003009 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003010{
bellard3a7d9292005-08-21 09:26:42 +00003011 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003012 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003013 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3014#if !defined(CONFIG_USER_ONLY)
3015 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003016 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003017#endif
3018 }
pbrook5579c7f2009-04-11 14:47:08 +00003019 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003020 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003021 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003022 /* we remove the notdirty callback only if the code has been
3023 flushed */
3024 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003025 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003026}
3027
Blue Swirld60efc62009-08-25 18:29:31 +00003028static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003029 NULL, /* never used */
3030 NULL, /* never used */
3031 NULL, /* never used */
3032};
3033
Blue Swirld60efc62009-08-25 18:29:31 +00003034static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003035 notdirty_mem_writeb,
3036 notdirty_mem_writew,
3037 notdirty_mem_writel,
3038};
3039
pbrook0f459d12008-06-09 00:20:13 +00003040/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003041static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003042{
3043 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003044 target_ulong pc, cs_base;
3045 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003046 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003047 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003048 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003049
aliguori06d55cc2008-11-18 20:24:06 +00003050 if (env->watchpoint_hit) {
3051 /* We re-entered the check after replacing the TB. Now raise
3052 * the debug interrupt so that is will trigger after the
3053 * current instruction. */
3054 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3055 return;
3056 }
pbrook2e70f6e2008-06-29 01:03:05 +00003057 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003058 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003059 if ((vaddr == (wp->vaddr & len_mask) ||
3060 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003061 wp->flags |= BP_WATCHPOINT_HIT;
3062 if (!env->watchpoint_hit) {
3063 env->watchpoint_hit = wp;
3064 tb = tb_find_pc(env->mem_io_pc);
3065 if (!tb) {
3066 cpu_abort(env, "check_watchpoint: could not find TB for "
3067 "pc=%p", (void *)env->mem_io_pc);
3068 }
3069 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3070 tb_phys_invalidate(tb, -1);
3071 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3072 env->exception_index = EXCP_DEBUG;
3073 } else {
3074 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3075 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3076 }
3077 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003078 }
aliguori6e140f22008-11-18 20:37:55 +00003079 } else {
3080 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003081 }
3082 }
3083}
3084
pbrook6658ffb2007-03-16 23:58:11 +00003085/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3086 so these check for a hit then pass through to the normal out-of-line
3087 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003088static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003089{
aliguorib4051332008-11-18 20:14:20 +00003090 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003091 return ldub_phys(addr);
3092}
3093
Anthony Liguoric227f092009-10-01 16:12:16 -05003094static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003095{
aliguorib4051332008-11-18 20:14:20 +00003096 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003097 return lduw_phys(addr);
3098}
3099
Anthony Liguoric227f092009-10-01 16:12:16 -05003100static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003101{
aliguorib4051332008-11-18 20:14:20 +00003102 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003103 return ldl_phys(addr);
3104}
3105
Anthony Liguoric227f092009-10-01 16:12:16 -05003106static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003107 uint32_t val)
3108{
aliguorib4051332008-11-18 20:14:20 +00003109 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003110 stb_phys(addr, val);
3111}
3112
Anthony Liguoric227f092009-10-01 16:12:16 -05003113static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003114 uint32_t val)
3115{
aliguorib4051332008-11-18 20:14:20 +00003116 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003117 stw_phys(addr, val);
3118}
3119
Anthony Liguoric227f092009-10-01 16:12:16 -05003120static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003121 uint32_t val)
3122{
aliguorib4051332008-11-18 20:14:20 +00003123 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003124 stl_phys(addr, val);
3125}
3126
Blue Swirld60efc62009-08-25 18:29:31 +00003127static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003128 watch_mem_readb,
3129 watch_mem_readw,
3130 watch_mem_readl,
3131};
3132
Blue Swirld60efc62009-08-25 18:29:31 +00003133static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003134 watch_mem_writeb,
3135 watch_mem_writew,
3136 watch_mem_writel,
3137};
pbrook6658ffb2007-03-16 23:58:11 +00003138
Richard Hendersonf6405242010-04-22 16:47:31 -07003139static inline uint32_t subpage_readlen (subpage_t *mmio,
3140 target_phys_addr_t addr,
3141 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003142{
Richard Hendersonf6405242010-04-22 16:47:31 -07003143 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003144#if defined(DEBUG_SUBPAGE)
3145 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3146 mmio, len, addr, idx);
3147#endif
blueswir1db7b5422007-05-26 17:36:03 +00003148
Richard Hendersonf6405242010-04-22 16:47:31 -07003149 addr += mmio->region_offset[idx];
3150 idx = mmio->sub_io_index[idx];
3151 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003152}
3153
Anthony Liguoric227f092009-10-01 16:12:16 -05003154static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003155 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003156{
Richard Hendersonf6405242010-04-22 16:47:31 -07003157 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003158#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003159 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3160 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003161#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003162
3163 addr += mmio->region_offset[idx];
3164 idx = mmio->sub_io_index[idx];
3165 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003166}
3167
Anthony Liguoric227f092009-10-01 16:12:16 -05003168static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003169{
blueswir1db7b5422007-05-26 17:36:03 +00003170 return subpage_readlen(opaque, addr, 0);
3171}
3172
Anthony Liguoric227f092009-10-01 16:12:16 -05003173static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003174 uint32_t value)
3175{
blueswir1db7b5422007-05-26 17:36:03 +00003176 subpage_writelen(opaque, addr, value, 0);
3177}
3178
Anthony Liguoric227f092009-10-01 16:12:16 -05003179static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003180{
blueswir1db7b5422007-05-26 17:36:03 +00003181 return subpage_readlen(opaque, addr, 1);
3182}
3183
Anthony Liguoric227f092009-10-01 16:12:16 -05003184static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003185 uint32_t value)
3186{
blueswir1db7b5422007-05-26 17:36:03 +00003187 subpage_writelen(opaque, addr, value, 1);
3188}
3189
Anthony Liguoric227f092009-10-01 16:12:16 -05003190static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003191{
blueswir1db7b5422007-05-26 17:36:03 +00003192 return subpage_readlen(opaque, addr, 2);
3193}
3194
Richard Hendersonf6405242010-04-22 16:47:31 -07003195static void subpage_writel (void *opaque, target_phys_addr_t addr,
3196 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003197{
blueswir1db7b5422007-05-26 17:36:03 +00003198 subpage_writelen(opaque, addr, value, 2);
3199}
3200
Blue Swirld60efc62009-08-25 18:29:31 +00003201static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003202 &subpage_readb,
3203 &subpage_readw,
3204 &subpage_readl,
3205};
3206
Blue Swirld60efc62009-08-25 18:29:31 +00003207static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003208 &subpage_writeb,
3209 &subpage_writew,
3210 &subpage_writel,
3211};
3212
Anthony Liguoric227f092009-10-01 16:12:16 -05003213static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3214 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003215{
3216 int idx, eidx;
3217
3218 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3219 return -1;
3220 idx = SUBPAGE_IDX(start);
3221 eidx = SUBPAGE_IDX(end);
3222#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003223 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003224 mmio, start, end, idx, eidx, memory);
3225#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003226 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003227 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003228 mmio->sub_io_index[idx] = memory;
3229 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003230 }
3231
3232 return 0;
3233}
3234
Richard Hendersonf6405242010-04-22 16:47:31 -07003235static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3236 ram_addr_t orig_memory,
3237 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003238{
Anthony Liguoric227f092009-10-01 16:12:16 -05003239 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003240 int subpage_memory;
3241
Anthony Liguoric227f092009-10-01 16:12:16 -05003242 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003243
3244 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03003245 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003246#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003247 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3248 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003249#endif
aliguori1eec6142009-02-05 22:06:18 +00003250 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003251 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003252
3253 return mmio;
3254}
3255
aliguori88715652009-02-11 15:20:58 +00003256static int get_free_io_mem_idx(void)
3257{
3258 int i;
3259
3260 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3261 if (!io_mem_used[i]) {
3262 io_mem_used[i] = 1;
3263 return i;
3264 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003265 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003266 return -1;
3267}
3268
bellard33417e72003-08-10 21:47:01 +00003269/* mem_read and mem_write are arrays of functions containing the
3270 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003271 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003272 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003273 modified. If it is zero, a new io zone is allocated. The return
3274 value can be used with cpu_register_physical_memory(). (-1) is
3275 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003276static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003277 CPUReadMemoryFunc * const *mem_read,
3278 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003279 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003280{
Richard Henderson3cab7212010-05-07 09:52:51 -07003281 int i;
3282
bellard33417e72003-08-10 21:47:01 +00003283 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003284 io_index = get_free_io_mem_idx();
3285 if (io_index == -1)
3286 return io_index;
bellard33417e72003-08-10 21:47:01 +00003287 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003288 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003289 if (io_index >= IO_MEM_NB_ENTRIES)
3290 return -1;
3291 }
bellardb5ff1b32005-11-26 10:38:39 +00003292
Richard Henderson3cab7212010-05-07 09:52:51 -07003293 for (i = 0; i < 3; ++i) {
3294 io_mem_read[io_index][i]
3295 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3296 }
3297 for (i = 0; i < 3; ++i) {
3298 io_mem_write[io_index][i]
3299 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3300 }
bellarda4193c82004-06-03 14:01:43 +00003301 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003302
3303 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003304}
bellard61382a52003-10-27 21:22:23 +00003305
Blue Swirld60efc62009-08-25 18:29:31 +00003306int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3307 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003308 void *opaque)
3309{
3310 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3311}
3312
aliguori88715652009-02-11 15:20:58 +00003313void cpu_unregister_io_memory(int io_table_address)
3314{
3315 int i;
3316 int io_index = io_table_address >> IO_MEM_SHIFT;
3317
3318 for (i=0;i < 3; i++) {
3319 io_mem_read[io_index][i] = unassigned_mem_read[i];
3320 io_mem_write[io_index][i] = unassigned_mem_write[i];
3321 }
3322 io_mem_opaque[io_index] = NULL;
3323 io_mem_used[io_index] = 0;
3324}
3325
Avi Kivitye9179ce2009-06-14 11:38:52 +03003326static void io_mem_init(void)
3327{
3328 int i;
3329
3330 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3331 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3332 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3333 for (i=0; i<5; i++)
3334 io_mem_used[i] = 1;
3335
3336 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3337 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003338}
3339
pbrooke2eef172008-06-08 01:09:01 +00003340#endif /* !defined(CONFIG_USER_ONLY) */
3341
bellard13eb76e2004-01-24 15:23:36 +00003342/* physical memory access (slow version, mainly for debug) */
3343#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003344int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3345 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003346{
3347 int l, flags;
3348 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003349 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003350
3351 while (len > 0) {
3352 page = addr & TARGET_PAGE_MASK;
3353 l = (page + TARGET_PAGE_SIZE) - addr;
3354 if (l > len)
3355 l = len;
3356 flags = page_get_flags(page);
3357 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003358 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003359 if (is_write) {
3360 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003361 return -1;
bellard579a97f2007-11-11 14:26:47 +00003362 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003363 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003364 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003365 memcpy(p, buf, l);
3366 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003367 } else {
3368 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003369 return -1;
bellard579a97f2007-11-11 14:26:47 +00003370 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003371 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003372 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003373 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003374 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003375 }
3376 len -= l;
3377 buf += l;
3378 addr += l;
3379 }
Paul Brooka68fe892010-03-01 00:08:59 +00003380 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003381}
bellard8df1cd02005-01-28 22:37:22 +00003382
bellard13eb76e2004-01-24 15:23:36 +00003383#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003384void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003385 int len, int is_write)
3386{
3387 int l, io_index;
3388 uint8_t *ptr;
3389 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003390 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003391 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003392 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003393
bellard13eb76e2004-01-24 15:23:36 +00003394 while (len > 0) {
3395 page = addr & TARGET_PAGE_MASK;
3396 l = (page + TARGET_PAGE_SIZE) - addr;
3397 if (l > len)
3398 l = len;
bellard92e873b2004-05-21 14:52:29 +00003399 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003400 if (!p) {
3401 pd = IO_MEM_UNASSIGNED;
3402 } else {
3403 pd = p->phys_offset;
3404 }
ths3b46e622007-09-17 08:09:54 +00003405
bellard13eb76e2004-01-24 15:23:36 +00003406 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003407 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003408 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003409 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003410 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003411 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003412 /* XXX: could force cpu_single_env to NULL to avoid
3413 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003414 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003415 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003416 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003417 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003418 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003419 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003420 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003421 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003422 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003423 l = 2;
3424 } else {
bellard1c213d12005-09-03 10:49:04 +00003425 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003426 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003427 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003428 l = 1;
3429 }
3430 } else {
bellardb448f2f2004-02-25 23:24:04 +00003431 unsigned long addr1;
3432 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003433 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003434 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003435 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003436 if (!cpu_physical_memory_is_dirty(addr1)) {
3437 /* invalidate code */
3438 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3439 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003440 cpu_physical_memory_set_dirty_flags(
3441 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003442 }
bellard13eb76e2004-01-24 15:23:36 +00003443 }
3444 } else {
ths5fafdf22007-09-16 21:08:06 +00003445 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003446 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003447 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003448 /* I/O case */
3449 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003450 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003451 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3452 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003453 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003454 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003455 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003456 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003457 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003458 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003459 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003460 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003461 l = 2;
3462 } else {
bellard1c213d12005-09-03 10:49:04 +00003463 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003464 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003465 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003466 l = 1;
3467 }
3468 } else {
3469 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003470 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003471 (addr & ~TARGET_PAGE_MASK);
3472 memcpy(buf, ptr, l);
3473 }
3474 }
3475 len -= l;
3476 buf += l;
3477 addr += l;
3478 }
3479}
bellard8df1cd02005-01-28 22:37:22 +00003480
bellardd0ecd2a2006-04-23 17:14:48 +00003481/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003482void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003483 const uint8_t *buf, int len)
3484{
3485 int l;
3486 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003487 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003488 unsigned long pd;
3489 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003490
bellardd0ecd2a2006-04-23 17:14:48 +00003491 while (len > 0) {
3492 page = addr & TARGET_PAGE_MASK;
3493 l = (page + TARGET_PAGE_SIZE) - addr;
3494 if (l > len)
3495 l = len;
3496 p = phys_page_find(page >> TARGET_PAGE_BITS);
3497 if (!p) {
3498 pd = IO_MEM_UNASSIGNED;
3499 } else {
3500 pd = p->phys_offset;
3501 }
ths3b46e622007-09-17 08:09:54 +00003502
bellardd0ecd2a2006-04-23 17:14:48 +00003503 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003504 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3505 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003506 /* do nothing */
3507 } else {
3508 unsigned long addr1;
3509 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3510 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003511 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003512 memcpy(ptr, buf, l);
3513 }
3514 len -= l;
3515 buf += l;
3516 addr += l;
3517 }
3518}
3519
aliguori6d16c2f2009-01-22 16:59:11 +00003520typedef struct {
3521 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003522 target_phys_addr_t addr;
3523 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003524} BounceBuffer;
3525
3526static BounceBuffer bounce;
3527
aliguoriba223c22009-01-22 16:59:16 +00003528typedef struct MapClient {
3529 void *opaque;
3530 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003531 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003532} MapClient;
3533
Blue Swirl72cf2d42009-09-12 07:36:22 +00003534static QLIST_HEAD(map_client_list, MapClient) map_client_list
3535 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003536
3537void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3538{
3539 MapClient *client = qemu_malloc(sizeof(*client));
3540
3541 client->opaque = opaque;
3542 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003543 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003544 return client;
3545}
3546
3547void cpu_unregister_map_client(void *_client)
3548{
3549 MapClient *client = (MapClient *)_client;
3550
Blue Swirl72cf2d42009-09-12 07:36:22 +00003551 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003552 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003553}
3554
3555static void cpu_notify_map_clients(void)
3556{
3557 MapClient *client;
3558
Blue Swirl72cf2d42009-09-12 07:36:22 +00003559 while (!QLIST_EMPTY(&map_client_list)) {
3560 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003561 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003562 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003563 }
3564}
3565
aliguori6d16c2f2009-01-22 16:59:11 +00003566/* Map a physical memory region into a host virtual address.
3567 * May map a subset of the requested range, given by and returned in *plen.
3568 * May return NULL if resources needed to perform the mapping are exhausted.
3569 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003570 * Use cpu_register_map_client() to know when retrying the map operation is
3571 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003572 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003573void *cpu_physical_memory_map(target_phys_addr_t addr,
3574 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003575 int is_write)
3576{
Anthony Liguoric227f092009-10-01 16:12:16 -05003577 target_phys_addr_t len = *plen;
3578 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003579 int l;
3580 uint8_t *ret = NULL;
3581 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003582 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003583 unsigned long pd;
3584 PhysPageDesc *p;
3585 unsigned long addr1;
3586
3587 while (len > 0) {
3588 page = addr & TARGET_PAGE_MASK;
3589 l = (page + TARGET_PAGE_SIZE) - addr;
3590 if (l > len)
3591 l = len;
3592 p = phys_page_find(page >> TARGET_PAGE_BITS);
3593 if (!p) {
3594 pd = IO_MEM_UNASSIGNED;
3595 } else {
3596 pd = p->phys_offset;
3597 }
3598
3599 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3600 if (done || bounce.buffer) {
3601 break;
3602 }
3603 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3604 bounce.addr = addr;
3605 bounce.len = l;
3606 if (!is_write) {
3607 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3608 }
3609 ptr = bounce.buffer;
3610 } else {
3611 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003612 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003613 }
3614 if (!done) {
3615 ret = ptr;
3616 } else if (ret + done != ptr) {
3617 break;
3618 }
3619
3620 len -= l;
3621 addr += l;
3622 done += l;
3623 }
3624 *plen = done;
3625 return ret;
3626}
3627
3628/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3629 * Will also mark the memory as dirty if is_write == 1. access_len gives
3630 * the amount of memory that was actually read or written by the caller.
3631 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003632void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3633 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003634{
3635 if (buffer != bounce.buffer) {
3636 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003637 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003638 while (access_len) {
3639 unsigned l;
3640 l = TARGET_PAGE_SIZE;
3641 if (l > access_len)
3642 l = access_len;
3643 if (!cpu_physical_memory_is_dirty(addr1)) {
3644 /* invalidate code */
3645 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3646 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003647 cpu_physical_memory_set_dirty_flags(
3648 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003649 }
3650 addr1 += l;
3651 access_len -= l;
3652 }
3653 }
3654 return;
3655 }
3656 if (is_write) {
3657 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3658 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003659 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003660 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003661 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003662}
bellardd0ecd2a2006-04-23 17:14:48 +00003663
bellard8df1cd02005-01-28 22:37:22 +00003664/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003665uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003666{
3667 int io_index;
3668 uint8_t *ptr;
3669 uint32_t val;
3670 unsigned long pd;
3671 PhysPageDesc *p;
3672
3673 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3674 if (!p) {
3675 pd = IO_MEM_UNASSIGNED;
3676 } else {
3677 pd = p->phys_offset;
3678 }
ths3b46e622007-09-17 08:09:54 +00003679
ths5fafdf22007-09-16 21:08:06 +00003680 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003681 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003682 /* I/O case */
3683 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003684 if (p)
3685 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003686 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3687 } else {
3688 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003689 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003690 (addr & ~TARGET_PAGE_MASK);
3691 val = ldl_p(ptr);
3692 }
3693 return val;
3694}
3695
bellard84b7b8e2005-11-28 21:19:04 +00003696/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003697uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003698{
3699 int io_index;
3700 uint8_t *ptr;
3701 uint64_t val;
3702 unsigned long pd;
3703 PhysPageDesc *p;
3704
3705 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3706 if (!p) {
3707 pd = IO_MEM_UNASSIGNED;
3708 } else {
3709 pd = p->phys_offset;
3710 }
ths3b46e622007-09-17 08:09:54 +00003711
bellard2a4188a2006-06-25 21:54:59 +00003712 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3713 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003714 /* I/O case */
3715 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003716 if (p)
3717 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003718#ifdef TARGET_WORDS_BIGENDIAN
3719 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3720 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3721#else
3722 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3723 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3724#endif
3725 } else {
3726 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003727 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003728 (addr & ~TARGET_PAGE_MASK);
3729 val = ldq_p(ptr);
3730 }
3731 return val;
3732}
3733
bellardaab33092005-10-30 20:48:42 +00003734/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003735uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003736{
3737 uint8_t val;
3738 cpu_physical_memory_read(addr, &val, 1);
3739 return val;
3740}
3741
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003742/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003743uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003744{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003745 int io_index;
3746 uint8_t *ptr;
3747 uint64_t val;
3748 unsigned long pd;
3749 PhysPageDesc *p;
3750
3751 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3752 if (!p) {
3753 pd = IO_MEM_UNASSIGNED;
3754 } else {
3755 pd = p->phys_offset;
3756 }
3757
3758 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3759 !(pd & IO_MEM_ROMD)) {
3760 /* I/O case */
3761 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3762 if (p)
3763 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3764 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3765 } else {
3766 /* RAM case */
3767 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3768 (addr & ~TARGET_PAGE_MASK);
3769 val = lduw_p(ptr);
3770 }
3771 return val;
bellardaab33092005-10-30 20:48:42 +00003772}
3773
bellard8df1cd02005-01-28 22:37:22 +00003774/* warning: addr must be aligned. The ram page is not masked as dirty
3775 and the code inside is not invalidated. It is useful if the dirty
3776 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003777void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003778{
3779 int io_index;
3780 uint8_t *ptr;
3781 unsigned long pd;
3782 PhysPageDesc *p;
3783
3784 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3785 if (!p) {
3786 pd = IO_MEM_UNASSIGNED;
3787 } else {
3788 pd = p->phys_offset;
3789 }
ths3b46e622007-09-17 08:09:54 +00003790
bellard3a7d9292005-08-21 09:26:42 +00003791 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003792 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003793 if (p)
3794 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003795 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3796 } else {
aliguori74576192008-10-06 14:02:03 +00003797 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003798 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003799 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003800
3801 if (unlikely(in_migration)) {
3802 if (!cpu_physical_memory_is_dirty(addr1)) {
3803 /* invalidate code */
3804 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3805 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003806 cpu_physical_memory_set_dirty_flags(
3807 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003808 }
3809 }
bellard8df1cd02005-01-28 22:37:22 +00003810 }
3811}
3812
Anthony Liguoric227f092009-10-01 16:12:16 -05003813void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003814{
3815 int io_index;
3816 uint8_t *ptr;
3817 unsigned long pd;
3818 PhysPageDesc *p;
3819
3820 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3821 if (!p) {
3822 pd = IO_MEM_UNASSIGNED;
3823 } else {
3824 pd = p->phys_offset;
3825 }
ths3b46e622007-09-17 08:09:54 +00003826
j_mayerbc98a7e2007-04-04 07:55:12 +00003827 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3828 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003829 if (p)
3830 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003831#ifdef TARGET_WORDS_BIGENDIAN
3832 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3833 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3834#else
3835 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3836 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3837#endif
3838 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003839 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003840 (addr & ~TARGET_PAGE_MASK);
3841 stq_p(ptr, val);
3842 }
3843}
3844
bellard8df1cd02005-01-28 22:37:22 +00003845/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003846void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003847{
3848 int io_index;
3849 uint8_t *ptr;
3850 unsigned long pd;
3851 PhysPageDesc *p;
3852
3853 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3854 if (!p) {
3855 pd = IO_MEM_UNASSIGNED;
3856 } else {
3857 pd = p->phys_offset;
3858 }
ths3b46e622007-09-17 08:09:54 +00003859
bellard3a7d9292005-08-21 09:26:42 +00003860 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003861 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003862 if (p)
3863 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003864 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3865 } else {
3866 unsigned long addr1;
3867 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3868 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003869 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003870 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003871 if (!cpu_physical_memory_is_dirty(addr1)) {
3872 /* invalidate code */
3873 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3874 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003875 cpu_physical_memory_set_dirty_flags(addr1,
3876 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003877 }
bellard8df1cd02005-01-28 22:37:22 +00003878 }
3879}
3880
bellardaab33092005-10-30 20:48:42 +00003881/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003882void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003883{
3884 uint8_t v = val;
3885 cpu_physical_memory_write(addr, &v, 1);
3886}
3887
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003888/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003889void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003890{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003891 int io_index;
3892 uint8_t *ptr;
3893 unsigned long pd;
3894 PhysPageDesc *p;
3895
3896 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3897 if (!p) {
3898 pd = IO_MEM_UNASSIGNED;
3899 } else {
3900 pd = p->phys_offset;
3901 }
3902
3903 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3904 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3905 if (p)
3906 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3907 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3908 } else {
3909 unsigned long addr1;
3910 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3911 /* RAM case */
3912 ptr = qemu_get_ram_ptr(addr1);
3913 stw_p(ptr, val);
3914 if (!cpu_physical_memory_is_dirty(addr1)) {
3915 /* invalidate code */
3916 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3917 /* set dirty bit */
3918 cpu_physical_memory_set_dirty_flags(addr1,
3919 (0xff & ~CODE_DIRTY_FLAG));
3920 }
3921 }
bellardaab33092005-10-30 20:48:42 +00003922}
3923
3924/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003925void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003926{
3927 val = tswap64(val);
3928 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3929}
3930
aliguori5e2972f2009-03-28 17:51:36 +00003931/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003932int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003933 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003934{
3935 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003936 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003937 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003938
3939 while (len > 0) {
3940 page = addr & TARGET_PAGE_MASK;
3941 phys_addr = cpu_get_phys_page_debug(env, page);
3942 /* if no physical page mapped, return an error */
3943 if (phys_addr == -1)
3944 return -1;
3945 l = (page + TARGET_PAGE_SIZE) - addr;
3946 if (l > len)
3947 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003948 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00003949 if (is_write)
3950 cpu_physical_memory_write_rom(phys_addr, buf, l);
3951 else
aliguori5e2972f2009-03-28 17:51:36 +00003952 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003953 len -= l;
3954 buf += l;
3955 addr += l;
3956 }
3957 return 0;
3958}
Paul Brooka68fe892010-03-01 00:08:59 +00003959#endif
bellard13eb76e2004-01-24 15:23:36 +00003960
pbrook2e70f6e2008-06-29 01:03:05 +00003961/* in deterministic execution mode, instructions doing device I/Os
3962 must be at the end of the TB */
3963void cpu_io_recompile(CPUState *env, void *retaddr)
3964{
3965 TranslationBlock *tb;
3966 uint32_t n, cflags;
3967 target_ulong pc, cs_base;
3968 uint64_t flags;
3969
3970 tb = tb_find_pc((unsigned long)retaddr);
3971 if (!tb) {
3972 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3973 retaddr);
3974 }
3975 n = env->icount_decr.u16.low + tb->icount;
3976 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3977 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003978 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003979 n = n - env->icount_decr.u16.low;
3980 /* Generate a new TB ending on the I/O insn. */
3981 n++;
3982 /* On MIPS and SH, delay slot instructions can only be restarted if
3983 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003984 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003985 branch. */
3986#if defined(TARGET_MIPS)
3987 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3988 env->active_tc.PC -= 4;
3989 env->icount_decr.u16.low++;
3990 env->hflags &= ~MIPS_HFLAG_BMASK;
3991 }
3992#elif defined(TARGET_SH4)
3993 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3994 && n > 1) {
3995 env->pc -= 2;
3996 env->icount_decr.u16.low++;
3997 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3998 }
3999#endif
4000 /* This should never happen. */
4001 if (n > CF_COUNT_MASK)
4002 cpu_abort(env, "TB too big during recompile");
4003
4004 cflags = n | CF_LAST_IO;
4005 pc = tb->pc;
4006 cs_base = tb->cs_base;
4007 flags = tb->flags;
4008 tb_phys_invalidate(tb, -1);
4009 /* FIXME: In theory this could raise an exception. In practice
4010 we have already translated the block once so it's probably ok. */
4011 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004012 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004013 the first in the TB) then we end up generating a whole new TB and
4014 repeating the fault, which is horribly inefficient.
4015 Better would be to execute just this insn uncached, or generate a
4016 second new TB. */
4017 cpu_resume_from_signal(env, NULL);
4018}
4019
Paul Brookb3755a92010-03-12 16:54:58 +00004020#if !defined(CONFIG_USER_ONLY)
4021
bellarde3db7222005-01-26 22:00:47 +00004022void dump_exec_info(FILE *f,
4023 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4024{
4025 int i, target_code_size, max_target_code_size;
4026 int direct_jmp_count, direct_jmp2_count, cross_page;
4027 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004028
bellarde3db7222005-01-26 22:00:47 +00004029 target_code_size = 0;
4030 max_target_code_size = 0;
4031 cross_page = 0;
4032 direct_jmp_count = 0;
4033 direct_jmp2_count = 0;
4034 for(i = 0; i < nb_tbs; i++) {
4035 tb = &tbs[i];
4036 target_code_size += tb->size;
4037 if (tb->size > max_target_code_size)
4038 max_target_code_size = tb->size;
4039 if (tb->page_addr[1] != -1)
4040 cross_page++;
4041 if (tb->tb_next_offset[0] != 0xffff) {
4042 direct_jmp_count++;
4043 if (tb->tb_next_offset[1] != 0xffff) {
4044 direct_jmp2_count++;
4045 }
4046 }
4047 }
4048 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004049 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00004050 cpu_fprintf(f, "gen code size %ld/%ld\n",
4051 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4052 cpu_fprintf(f, "TB count %d/%d\n",
4053 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004054 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004055 nb_tbs ? target_code_size / nb_tbs : 0,
4056 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00004057 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004058 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4059 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004060 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4061 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004062 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4063 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004064 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004065 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4066 direct_jmp2_count,
4067 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004068 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004069 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4070 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4071 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004072 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004073}
4074
bellard61382a52003-10-27 21:22:23 +00004075#define MMUSUFFIX _cmmu
4076#define GETPC() NULL
4077#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004078#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004079
4080#define SHIFT 0
4081#include "softmmu_template.h"
4082
4083#define SHIFT 1
4084#include "softmmu_template.h"
4085
4086#define SHIFT 2
4087#include "softmmu_template.h"
4088
4089#define SHIFT 3
4090#include "softmmu_template.h"
4091
4092#undef env
4093
4094#endif