blob: 0607c9b6aa9a4b72c32eadbcc2e9e5646dc455ae [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200401
Avi Kivityf7bf5462012-02-13 20:12:05 +0200402static void phys_map_node_reserve(unsigned nodes)
403{
404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
412}
413
414static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415{
416 unsigned i;
417 uint16_t ret;
418
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200422 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
Avi Kivityf7bf5462012-02-13 20:12:05 +0200434
Avi Kivity29990972012-02-13 20:21:20 +0200435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438{
439 PhysPageEntry *p;
440 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442
Avi Kivity07f07b32012-02-13 20:45:32 +0200443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200449 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 }
451 }
452 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
Avi Kivity29990972012-02-13 20:21:20 +0200455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200456
Avi Kivity29990972012-02-13 20:21:20 +0200457 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200460 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 *index += step;
462 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200467 }
468}
469
Avi Kivity29990972012-02-13 20:21:20 +0200470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000472{
Avi Kivity29990972012-02-13 20:21:20 +0200473 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200474 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000475
Avi Kivity29990972012-02-13 20:21:20 +0200476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000477}
478
Blue Swirl0cac1b62012-04-09 16:50:52 +0000479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000480{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200484 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200485
Avi Kivity07f07b32012-02-13 20:45:32 +0200486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 goto not_found;
489 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200490 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200492 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200493
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200496 return &phys_sections[s_index];
497}
498
Blue Swirle5548612012-04-21 13:08:33 +0000499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
pbrookc8a706f2008-06-02 16:16:42 +0000506#define mmap_lock() do { } while(0)
507#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000508#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000509
bellard43694152008-05-29 09:35:57 +0000510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511
512#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100513/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000514 user mode. It will change when a dedicated libc will be used */
515#define USE_STATIC_CODE_GEN_BUFFER
516#endif
517
518#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000521#endif
522
blueswir18fcd3692008-08-17 20:26:25 +0000523static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000524{
bellard43694152008-05-29 09:35:57 +0000525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
bellard26a5f132008-05-28 12:30:31 +0000530 code_gen_buffer_size = tb_size;
531 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000532#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100535 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000536 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000537#endif
bellard26a5f132008-05-28 12:30:31 +0000538 }
539 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 /* The code gen buffer location may have constraints depending on
542 the host cpu and OS */
543#if defined(__linux__)
544 {
545 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000546 void *start = NULL;
547
bellard26a5f132008-05-28 12:30:31 +0000548 flags = MAP_PRIVATE | MAP_ANONYMOUS;
549#if defined(__x86_64__)
550 flags |= MAP_32BIT;
551 /* Cannot map more than that */
552 if (code_gen_buffer_size > (800 * 1024 * 1024))
553 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000554#elif defined(__sparc_v9__)
555 // Map the buffer below 2G, so we can use direct calls and branches
556 flags |= MAP_FIXED;
557 start = (void *) 0x60000000UL;
558 if (code_gen_buffer_size > (512 * 1024 * 1024))
559 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000560#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100561 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000562 if (code_gen_buffer_size > 16 * 1024 * 1024)
563 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700564#elif defined(__s390x__)
565 /* Map the buffer so that we can use direct calls and branches. */
566 /* We have a +- 4GB range on the branches; leave some slop. */
567 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 }
570 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000571#endif
blueswir1141ac462008-07-26 15:05:57 +0000572 code_gen_buffer = mmap(start, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
Bradcbb608a2010-12-20 21:25:40 -0500580#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000581 || defined(__DragonFly__) || defined(__OpenBSD__) \
582 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000583 {
584 int flags;
585 void *addr = NULL;
586 flags = MAP_PRIVATE | MAP_ANONYMOUS;
587#if defined(__x86_64__)
588 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 * 0x40000000 is free */
590 flags |= MAP_FIXED;
591 addr = (void *)0x40000000;
592 /* Cannot map more than that */
593 if (code_gen_buffer_size > (800 * 1024 * 1024))
594 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000595#elif defined(__sparc_v9__)
596 // Map the buffer below 2G, so we can use direct calls and branches
597 flags |= MAP_FIXED;
598 addr = (void *) 0x60000000UL;
599 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 code_gen_buffer_size = (512 * 1024 * 1024);
601 }
aliguori06e67a82008-09-27 15:32:41 +0000602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
bellard26a5f132008-05-28 12:30:31 +0000611#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000613 map_exec(code_gen_buffer, code_gen_buffer_size);
614#endif
bellard43694152008-05-29 09:35:57 +0000615#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000616 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200626void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000627{
bellard26a5f132008-05-28 12:30:31 +0000628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000632 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
bellard26a5f132008-05-28 12:30:31 +0000638}
639
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
pbrook9656f322008-07-01 20:01:19 +0000653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
Juan Quintelae59fb372009-09-29 22:48:21 +0200655static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100657 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200658
aurel323098dba2009-03-07 21:28:24 +0000659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000662 tlb_flush(env, 1);
663
664 return 0;
665}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 VMSTATE_END_OF_LIST()
677 }
678};
pbrook9656f322008-07-01 20:01:19 +0000679#endif
680
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400682{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100683 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
Andreas Färber9349b4f2012-03-14 01:38:32 +0100694void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000695{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100696 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000697 int cpu_index;
698
pbrookc2764712009-03-07 15:24:59 +0000699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
bellard6a00d602005-11-21 23:25:50 +0000702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700706 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000710 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
bellard6a00d602005-11-21 23:25:50 +0000716 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
pbrookb3c77242008-06-30 16:31:04 +0000720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000723 cpu_save, cpu_load, env);
724#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000725}
726
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
bellard9fa3e852004-01-04 18:06:42 +0000753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500756 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
765{
766 int i;
767
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000773 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
776 }
777 } else {
778 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
bellardfd6ce8f2003-05-14 19:00:11 +0000785static void page_flush_tb(void)
786{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000790 }
791}
792
793/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000794/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100795void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000796{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100797 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000798#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000803#endif
bellard26a5f132008-05-28 12:30:31 +0000804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000808
bellard6a00d602005-11-21 23:25:50 +0000809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
bellard9fa3e852004-01-04 18:06:42 +0000812
bellard8a8a6082004-10-03 13:36:49 +0000813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000814 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000815
bellardfd6ce8f2003-05-14 19:00:11 +0000816 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
bellarde3db7222005-01-26 22:00:47 +0000819 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000820}
821
822#ifdef DEBUG_TB_CHECK
823
j_mayerbc98a7e2007-04-04 07:55:12 +0000824static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000835 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000846
pbrook99773bd2006-04-16 15:14:59 +0000847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000853 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
bellard9fa3e852004-01-04 18:06:42 +0000876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
bellardd4e81642003-05-25 16:46:15 +0000893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000926}
927
Paul Brook41c1b1c2010-03-12 16:54:58 +0000928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000929{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100930 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000931 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000932 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000933 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000934 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000935
bellard9fa3e852004-01-04 18:06:42 +0000936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000939 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000940 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
bellard8a40a182005-11-20 10:35:40 +0000954 tb_invalidated_flag = 1;
955
956 /* remove the TB from the hash list */
957 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
bellard8a40a182005-11-20 10:35:40 +0000962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200970 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000971 if (n1 == 2)
972 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000980
bellarde3db7222005-01-26 22:00:47 +0000981 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001015
Anthony Liguori7267c092011-08-20 22:09:37 -05001016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
Andreas Färber9349b4f2012-03-14 01:38:32 +01001039TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001047 int code_gen_size;
1048
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001050 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001055 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001064 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001067
bellardd720b932004-04-25 17:57:43 +00001068 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001070 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001072 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001073 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001074 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001075 return tb;
bellardd720b932004-04-25 17:57:43 +00001076}
ths3b46e622007-09-17 08:09:54 +00001077
bellard9fa3e852004-01-04 18:06:42 +00001078/* invalidate all TBs which intersect with the target physical page
1079 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001080 the same physical page. 'is_cpu_write_access' should be true if called
1081 from a real cpu write access: the virtual CPU will exit the current
1082 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001083void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001084 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001085{
aliguori6b917542008-11-18 19:46:41 +00001086 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001087 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001088 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001089 PageDesc *p;
1090 int n;
1091#ifdef TARGET_HAS_PRECISE_SMC
1092 int current_tb_not_found = is_cpu_write_access;
1093 TranslationBlock *current_tb = NULL;
1094 int current_tb_modified = 0;
1095 target_ulong current_pc = 0;
1096 target_ulong current_cs_base = 0;
1097 int current_flags = 0;
1098#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001099
1100 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001101 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001102 return;
ths5fafdf22007-09-16 21:08:06 +00001103 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001104 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1105 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001106 /* build code bitmap */
1107 build_page_bitmap(p);
1108 }
1109
1110 /* we remove all the TBs in the range [start, end[ */
1111 /* XXX: see if in some cases it could be faster to invalidate all the code */
1112 tb = p->first_tb;
1113 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001114 n = (uintptr_t)tb & 3;
1115 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001116 tb_next = tb->page_next[n];
1117 /* NOTE: this is subtle as a TB may span two physical pages */
1118 if (n == 0) {
1119 /* NOTE: tb_end may be after the end of the page, but
1120 it is not a problem */
1121 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1122 tb_end = tb_start + tb->size;
1123 } else {
1124 tb_start = tb->page_addr[1];
1125 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1126 }
1127 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001128#ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_not_found) {
1130 current_tb_not_found = 0;
1131 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001132 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001133 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001134 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001135 }
1136 }
1137 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001138 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001139 /* If we are modifying the current TB, we must stop
1140 its execution. We could be more precise by checking
1141 that the modification is after the current PC, but it
1142 would require a specialized function to partially
1143 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001144
bellardd720b932004-04-25 17:57:43 +00001145 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001146 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001147 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1148 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001149 }
1150#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001151 /* we need to do that to handle the case where a signal
1152 occurs while doing tb_phys_invalidate() */
1153 saved_tb = NULL;
1154 if (env) {
1155 saved_tb = env->current_tb;
1156 env->current_tb = NULL;
1157 }
bellard9fa3e852004-01-04 18:06:42 +00001158 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001159 if (env) {
1160 env->current_tb = saved_tb;
1161 if (env->interrupt_request && env->current_tb)
1162 cpu_interrupt(env, env->interrupt_request);
1163 }
bellard9fa3e852004-01-04 18:06:42 +00001164 }
1165 tb = tb_next;
1166 }
1167#if !defined(CONFIG_USER_ONLY)
1168 /* if no code remaining, no need to continue to use slow writes */
1169 if (!p->first_tb) {
1170 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001171 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001172 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001173 }
1174 }
1175#endif
1176#ifdef TARGET_HAS_PRECISE_SMC
1177 if (current_tb_modified) {
1178 /* we generate a block containing just the instruction
1179 modifying the memory. It will ensure that it cannot modify
1180 itself */
bellardea1c1802004-06-14 18:56:36 +00001181 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001182 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001183 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001184 }
1185#endif
1186}
1187
1188/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001189static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001190{
1191 PageDesc *p;
1192 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001193#if 0
bellarda4193c82004-06-03 14:01:43 +00001194 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001195 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1196 cpu_single_env->mem_io_vaddr, len,
1197 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001198 cpu_single_env->eip +
1199 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001200 }
1201#endif
bellard9fa3e852004-01-04 18:06:42 +00001202 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001203 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001204 return;
1205 if (p->code_bitmap) {
1206 offset = start & ~TARGET_PAGE_MASK;
1207 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1208 if (b & ((1 << len) - 1))
1209 goto do_invalidate;
1210 } else {
1211 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001212 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001213 }
1214}
1215
bellard9fa3e852004-01-04 18:06:42 +00001216#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001217static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001218 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001219{
aliguori6b917542008-11-18 19:46:41 +00001220 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001221 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001222 int n;
bellardd720b932004-04-25 17:57:43 +00001223#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001224 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001225 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001226 int current_tb_modified = 0;
1227 target_ulong current_pc = 0;
1228 target_ulong current_cs_base = 0;
1229 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001230#endif
bellard9fa3e852004-01-04 18:06:42 +00001231
1232 addr &= TARGET_PAGE_MASK;
1233 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001234 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001235 return;
1236 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001237#ifdef TARGET_HAS_PRECISE_SMC
1238 if (tb && pc != 0) {
1239 current_tb = tb_find_pc(pc);
1240 }
1241#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001242 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001243 n = (uintptr_t)tb & 3;
1244 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001247 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001248 /* If we are modifying the current TB, we must stop
1249 its execution. We could be more precise by checking
1250 that the modification is after the current PC, but it
1251 would require a specialized function to partially
1252 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001253
bellardd720b932004-04-25 17:57:43 +00001254 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001255 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001256 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1257 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001258 }
1259#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001260 tb_phys_invalidate(tb, addr);
1261 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001262 }
1263 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (current_tb_modified) {
1266 /* we generate a block containing just the instruction
1267 modifying the memory. It will ensure that it cannot modify
1268 itself */
bellardea1c1802004-06-14 18:56:36 +00001269 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001270 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001271 cpu_resume_from_signal(env, puc);
1272 }
1273#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001274}
bellard9fa3e852004-01-04 18:06:42 +00001275#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001276
1277/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001278static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001279 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001280{
1281 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001282#ifndef CONFIG_USER_ONLY
1283 bool page_already_protected;
1284#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001285
bellard9fa3e852004-01-04 18:06:42 +00001286 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001287 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001288 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001289#ifndef CONFIG_USER_ONLY
1290 page_already_protected = p->first_tb != NULL;
1291#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001292 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001293 invalidate_page_bitmap(p);
1294
bellard107db442004-06-22 18:48:46 +00001295#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001296
bellard9fa3e852004-01-04 18:06:42 +00001297#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001298 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001299 target_ulong addr;
1300 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001301 int prot;
1302
bellardfd6ce8f2003-05-14 19:00:11 +00001303 /* force the host page as non writable (writes will have a
1304 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001305 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001306 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001307 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1308 addr += TARGET_PAGE_SIZE) {
1309
1310 p2 = page_find (addr >> TARGET_PAGE_BITS);
1311 if (!p2)
1312 continue;
1313 prot |= p2->flags;
1314 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001315 }
ths5fafdf22007-09-16 21:08:06 +00001316 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001317 (prot & PAGE_BITS) & ~PAGE_WRITE);
1318#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001319 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001320 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001321#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001322 }
bellard9fa3e852004-01-04 18:06:42 +00001323#else
1324 /* if some code is already present, then the pages are already
1325 protected. So we handle the case where only the first TB is
1326 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001327 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001328 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001329 }
1330#endif
bellardd720b932004-04-25 17:57:43 +00001331
1332#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001333}
1334
bellard9fa3e852004-01-04 18:06:42 +00001335/* add a new TB and link it to the physical page tables. phys_page2 is
1336 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001337void tb_link_page(TranslationBlock *tb,
1338 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001339{
bellard9fa3e852004-01-04 18:06:42 +00001340 unsigned int h;
1341 TranslationBlock **ptb;
1342
pbrookc8a706f2008-06-02 16:16:42 +00001343 /* Grab the mmap lock to stop another thread invalidating this TB
1344 before we are done. */
1345 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001346 /* add in the physical hash table */
1347 h = tb_phys_hash_func(phys_pc);
1348 ptb = &tb_phys_hash[h];
1349 tb->phys_hash_next = *ptb;
1350 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001351
1352 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001353 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1354 if (phys_page2 != -1)
1355 tb_alloc_page(tb, 1, phys_page2);
1356 else
1357 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001358
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001359 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001360 tb->jmp_next[0] = NULL;
1361 tb->jmp_next[1] = NULL;
1362
1363 /* init original jump addresses */
1364 if (tb->tb_next_offset[0] != 0xffff)
1365 tb_reset_jump(tb, 0);
1366 if (tb->tb_next_offset[1] != 0xffff)
1367 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001368
1369#ifdef DEBUG_TB_CHECK
1370 tb_page_check();
1371#endif
pbrookc8a706f2008-06-02 16:16:42 +00001372 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001373}
1374
bellarda513fe12003-05-27 23:29:48 +00001375/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1376 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001377TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001378{
1379 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001380 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001381 TranslationBlock *tb;
1382
1383 if (nb_tbs <= 0)
1384 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001385 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1386 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001387 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001388 }
bellarda513fe12003-05-27 23:29:48 +00001389 /* binary search (cf Knuth) */
1390 m_min = 0;
1391 m_max = nb_tbs - 1;
1392 while (m_min <= m_max) {
1393 m = (m_min + m_max) >> 1;
1394 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001395 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001396 if (v == tc_ptr)
1397 return tb;
1398 else if (tc_ptr < v) {
1399 m_max = m - 1;
1400 } else {
1401 m_min = m + 1;
1402 }
ths5fafdf22007-09-16 21:08:06 +00001403 }
bellarda513fe12003-05-27 23:29:48 +00001404 return &tbs[m_max];
1405}
bellard75012672003-06-21 13:11:07 +00001406
bellardea041c02003-06-25 16:16:50 +00001407static void tb_reset_jump_recursive(TranslationBlock *tb);
1408
1409static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1410{
1411 TranslationBlock *tb1, *tb_next, **ptb;
1412 unsigned int n1;
1413
1414 tb1 = tb->jmp_next[n];
1415 if (tb1 != NULL) {
1416 /* find head of list */
1417 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001418 n1 = (uintptr_t)tb1 & 3;
1419 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001420 if (n1 == 2)
1421 break;
1422 tb1 = tb1->jmp_next[n1];
1423 }
1424 /* we are now sure now that tb jumps to tb1 */
1425 tb_next = tb1;
1426
1427 /* remove tb from the jmp_first list */
1428 ptb = &tb_next->jmp_first;
1429 for(;;) {
1430 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001431 n1 = (uintptr_t)tb1 & 3;
1432 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001433 if (n1 == n && tb1 == tb)
1434 break;
1435 ptb = &tb1->jmp_next[n1];
1436 }
1437 *ptb = tb->jmp_next[n];
1438 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001439
bellardea041c02003-06-25 16:16:50 +00001440 /* suppress the jump to next tb in generated code */
1441 tb_reset_jump(tb, n);
1442
bellard01243112004-01-04 15:48:17 +00001443 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001444 tb_reset_jump_recursive(tb_next);
1445 }
1446}
1447
1448static void tb_reset_jump_recursive(TranslationBlock *tb)
1449{
1450 tb_reset_jump_recursive2(tb, 0);
1451 tb_reset_jump_recursive2(tb, 1);
1452}
1453
bellard1fddef42005-04-17 19:16:13 +00001454#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001455#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001456static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001457{
1458 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1459}
1460#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001461void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001462{
Anthony Liguoric227f092009-10-01 16:12:16 -05001463 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001464 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001465
Avi Kivity06ef3522012-02-13 16:11:22 +02001466 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001467 if (!(memory_region_is_ram(section->mr)
1468 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001469 return;
1470 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001471 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001472 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001473 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001474}
Max Filippov1e7855a2012-04-10 02:48:17 +04001475
1476static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1477{
1478 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1479}
bellardc27004e2005-01-03 23:35:10 +00001480#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001481#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001482
Paul Brookc527ee82010-03-01 03:31:14 +00001483#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001484void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001485
1486{
1487}
1488
Andreas Färber9349b4f2012-03-14 01:38:32 +01001489int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001490 int flags, CPUWatchpoint **watchpoint)
1491{
1492 return -ENOSYS;
1493}
1494#else
pbrook6658ffb2007-03-16 23:58:11 +00001495/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001496int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001497 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001498{
aliguorib4051332008-11-18 20:14:20 +00001499 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001501
aliguorib4051332008-11-18 20:14:20 +00001502 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001503 if ((len & (len - 1)) || (addr & ~len_mask) ||
1504 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001505 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1506 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1507 return -EINVAL;
1508 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001509 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001510
aliguoria1d1bb32008-11-18 20:07:32 +00001511 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001512 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001513 wp->flags = flags;
1514
aliguori2dc9f412008-11-18 20:56:59 +00001515 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001516 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001518 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
pbrook6658ffb2007-03-16 23:58:11 +00001521 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001522
1523 if (watchpoint)
1524 *watchpoint = wp;
1525 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001526}
1527
aliguoria1d1bb32008-11-18 20:07:32 +00001528/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001529int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001530 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001531{
aliguorib4051332008-11-18 20:14:20 +00001532 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001533 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001534
Blue Swirl72cf2d42009-09-12 07:36:22 +00001535 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001536 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001537 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001539 return 0;
1540 }
1541 }
aliguoria1d1bb32008-11-18 20:07:32 +00001542 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001543}
1544
aliguoria1d1bb32008-11-18 20:07:32 +00001545/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001546void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001547{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001549
aliguoria1d1bb32008-11-18 20:07:32 +00001550 tlb_flush_page(env, watchpoint->vaddr);
1551
Anthony Liguori7267c092011-08-20 22:09:37 -05001552 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001556void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001557{
aliguoric0ce9982008-11-25 22:13:57 +00001558 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001559
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001561 if (wp->flags & mask)
1562 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001563 }
aliguoria1d1bb32008-11-18 20:07:32 +00001564}
Paul Brookc527ee82010-03-01 03:31:14 +00001565#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001566
1567/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001568int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001569 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001570{
bellard1fddef42005-04-17 19:16:13 +00001571#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001572 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001573
Anthony Liguori7267c092011-08-20 22:09:37 -05001574 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001575
1576 bp->pc = pc;
1577 bp->flags = flags;
1578
aliguori2dc9f412008-11-18 20:56:59 +00001579 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001580 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001581 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001582 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001584
1585 breakpoint_invalidate(env, pc);
1586
1587 if (breakpoint)
1588 *breakpoint = bp;
1589 return 0;
1590#else
1591 return -ENOSYS;
1592#endif
1593}
1594
1595/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001596int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001597{
1598#if defined(TARGET_HAS_ICE)
1599 CPUBreakpoint *bp;
1600
Blue Swirl72cf2d42009-09-12 07:36:22 +00001601 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001602 if (bp->pc == pc && bp->flags == flags) {
1603 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001604 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001605 }
bellard4c3a88a2003-07-26 12:06:08 +00001606 }
aliguoria1d1bb32008-11-18 20:07:32 +00001607 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001608#else
aliguoria1d1bb32008-11-18 20:07:32 +00001609 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001610#endif
1611}
1612
aliguoria1d1bb32008-11-18 20:07:32 +00001613/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001614void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001615{
bellard1fddef42005-04-17 19:16:13 +00001616#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001617 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001618
aliguoria1d1bb32008-11-18 20:07:32 +00001619 breakpoint_invalidate(env, breakpoint->pc);
1620
Anthony Liguori7267c092011-08-20 22:09:37 -05001621 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001622#endif
1623}
1624
1625/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001626void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001627{
1628#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001629 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001630
Blue Swirl72cf2d42009-09-12 07:36:22 +00001631 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001632 if (bp->flags & mask)
1633 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001634 }
bellard4c3a88a2003-07-26 12:06:08 +00001635#endif
1636}
1637
bellardc33a3462003-07-29 20:50:33 +00001638/* enable or disable single step mode. EXCP_DEBUG is returned by the
1639 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001640void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001641{
bellard1fddef42005-04-17 19:16:13 +00001642#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001643 if (env->singlestep_enabled != enabled) {
1644 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001645 if (kvm_enabled())
1646 kvm_update_guest_debug(env, 0);
1647 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001648 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001649 /* XXX: only flush what is necessary */
1650 tb_flush(env);
1651 }
bellardc33a3462003-07-29 20:50:33 +00001652 }
1653#endif
1654}
1655
bellard34865132003-10-05 14:28:56 +00001656/* enable or disable low levels log */
1657void cpu_set_log(int log_flags)
1658{
1659 loglevel = log_flags;
1660 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001661 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001662 if (!logfile) {
1663 perror(logfilename);
1664 _exit(1);
1665 }
bellard9fa3e852004-01-04 18:06:42 +00001666#if !defined(CONFIG_SOFTMMU)
1667 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1668 {
blueswir1b55266b2008-09-20 08:07:15 +00001669 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001670 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1671 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001672#elif defined(_WIN32)
1673 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1674 setvbuf(logfile, NULL, _IONBF, 0);
1675#else
bellard34865132003-10-05 14:28:56 +00001676 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001677#endif
pbrooke735b912007-06-30 13:53:24 +00001678 log_append = 1;
1679 }
1680 if (!loglevel && logfile) {
1681 fclose(logfile);
1682 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001683 }
1684}
1685
1686void cpu_set_log_filename(const char *filename)
1687{
1688 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001689 if (logfile) {
1690 fclose(logfile);
1691 logfile = NULL;
1692 }
1693 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001694}
bellardc33a3462003-07-29 20:50:33 +00001695
Andreas Färber9349b4f2012-03-14 01:38:32 +01001696static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001697{
pbrookd5975362008-06-07 20:50:51 +00001698 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1699 problem and hope the cpu will stop of its own accord. For userspace
1700 emulation this often isn't actually as bad as it sounds. Often
1701 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001702 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001703 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001704
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001705 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001706 tb = env->current_tb;
1707 /* if the cpu is currently executing code, we must unlink it and
1708 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001709 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001710 env->current_tb = NULL;
1711 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001712 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001713 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001714}
1715
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001716#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001717/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001718static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001719{
1720 int old_mask;
1721
1722 old_mask = env->interrupt_request;
1723 env->interrupt_request |= mask;
1724
aliguori8edac962009-04-24 18:03:45 +00001725 /*
1726 * If called from iothread context, wake the target cpu in
1727 * case its halted.
1728 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001729 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001730 qemu_cpu_kick(env);
1731 return;
1732 }
aliguori8edac962009-04-24 18:03:45 +00001733
pbrook2e70f6e2008-06-29 01:03:05 +00001734 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001735 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001736 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001737 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001738 cpu_abort(env, "Raised interrupt while not in I/O function");
1739 }
pbrook2e70f6e2008-06-29 01:03:05 +00001740 } else {
aurel323098dba2009-03-07 21:28:24 +00001741 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001742 }
1743}
1744
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001745CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1746
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001747#else /* CONFIG_USER_ONLY */
1748
Andreas Färber9349b4f2012-03-14 01:38:32 +01001749void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001750{
1751 env->interrupt_request |= mask;
1752 cpu_unlink_tb(env);
1753}
1754#endif /* CONFIG_USER_ONLY */
1755
Andreas Färber9349b4f2012-03-14 01:38:32 +01001756void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001757{
1758 env->interrupt_request &= ~mask;
1759}
1760
Andreas Färber9349b4f2012-03-14 01:38:32 +01001761void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001762{
1763 env->exit_request = 1;
1764 cpu_unlink_tb(env);
1765}
1766
blueswir1c7cd6a32008-10-02 18:27:46 +00001767const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001768 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001769 "show generated host assembly code for each compiled TB" },
1770 { CPU_LOG_TB_IN_ASM, "in_asm",
1771 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001772 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001773 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001774 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001775 "show micro ops "
1776#ifdef TARGET_I386
1777 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001778#endif
blueswir1e01a1152008-03-14 17:37:11 +00001779 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001780 { CPU_LOG_INT, "int",
1781 "show interrupts/exceptions in short format" },
1782 { CPU_LOG_EXEC, "exec",
1783 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001784 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001785 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001786#ifdef TARGET_I386
1787 { CPU_LOG_PCALL, "pcall",
1788 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001789 { CPU_LOG_RESET, "cpu_reset",
1790 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001791#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001792#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001793 { CPU_LOG_IOPORT, "ioport",
1794 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001795#endif
bellardf193c792004-03-21 17:06:25 +00001796 { 0, NULL, NULL },
1797};
1798
1799static int cmp1(const char *s1, int n, const char *s2)
1800{
1801 if (strlen(s2) != n)
1802 return 0;
1803 return memcmp(s1, s2, n) == 0;
1804}
ths3b46e622007-09-17 08:09:54 +00001805
bellardf193c792004-03-21 17:06:25 +00001806/* takes a comma separated list of log masks. Return 0 if error. */
1807int cpu_str_to_log_mask(const char *str)
1808{
blueswir1c7cd6a32008-10-02 18:27:46 +00001809 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001810 int mask;
1811 const char *p, *p1;
1812
1813 p = str;
1814 mask = 0;
1815 for(;;) {
1816 p1 = strchr(p, ',');
1817 if (!p1)
1818 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001819 if(cmp1(p,p1-p,"all")) {
1820 for(item = cpu_log_items; item->mask != 0; item++) {
1821 mask |= item->mask;
1822 }
1823 } else {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 if (cmp1(p, p1 - p, item->name))
1826 goto found;
1827 }
1828 return 0;
bellardf193c792004-03-21 17:06:25 +00001829 }
bellardf193c792004-03-21 17:06:25 +00001830 found:
1831 mask |= item->mask;
1832 if (*p1 != ',')
1833 break;
1834 p = p1 + 1;
1835 }
1836 return mask;
1837}
bellardea041c02003-06-25 16:16:50 +00001838
Andreas Färber9349b4f2012-03-14 01:38:32 +01001839void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001840{
1841 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001842 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001843
1844 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001845 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001846 fprintf(stderr, "qemu: fatal: ");
1847 vfprintf(stderr, fmt, ap);
1848 fprintf(stderr, "\n");
1849#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001850 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1851#else
1852 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001853#endif
aliguori93fcfe32009-01-15 22:34:14 +00001854 if (qemu_log_enabled()) {
1855 qemu_log("qemu: fatal: ");
1856 qemu_log_vprintf(fmt, ap2);
1857 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001858#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001859 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001860#else
aliguori93fcfe32009-01-15 22:34:14 +00001861 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001862#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001863 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001864 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001865 }
pbrook493ae1f2007-11-23 16:53:59 +00001866 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001867 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001868#if defined(CONFIG_USER_ONLY)
1869 {
1870 struct sigaction act;
1871 sigfillset(&act.sa_mask);
1872 act.sa_handler = SIG_DFL;
1873 sigaction(SIGABRT, &act, NULL);
1874 }
1875#endif
bellard75012672003-06-21 13:11:07 +00001876 abort();
1877}
1878
Andreas Färber9349b4f2012-03-14 01:38:32 +01001879CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001880{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001881 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1882 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001883 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001884#if defined(TARGET_HAS_ICE)
1885 CPUBreakpoint *bp;
1886 CPUWatchpoint *wp;
1887#endif
1888
Andreas Färber9349b4f2012-03-14 01:38:32 +01001889 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001890
1891 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001892 new_env->next_cpu = next_cpu;
1893 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001894
1895 /* Clone all break/watchpoints.
1896 Note: Once we support ptrace with hw-debug register access, make sure
1897 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001898 QTAILQ_INIT(&env->breakpoints);
1899 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001900#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001901 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001902 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1903 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001904 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001905 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1906 wp->flags, NULL);
1907 }
1908#endif
1909
thsc5be9f02007-02-28 20:20:53 +00001910 return new_env;
1911}
1912
bellard01243112004-01-04 15:48:17 +00001913#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001914void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001915{
1916 unsigned int i;
1917
1918 /* Discard jump cache entries for any tb which might potentially
1919 overlap the flushed page. */
1920 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1921 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001922 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001923
1924 i = tb_jmp_cache_hash_page(addr);
1925 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001926 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001927}
1928
pbrook5579c7f2009-04-11 14:47:08 +00001929/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001930void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001931 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001932{
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001933 uintptr_t length, start1;
bellard1ccde1c2004-02-06 19:46:14 +00001934
1935 start &= TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937
1938 length = end - start;
1939 if (length == 0)
1940 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001941 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001942
bellard1ccde1c2004-02-06 19:46:14 +00001943 /* we modify the TLB cache so that the dirty bit will be set again
1944 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001945 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001946 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001947 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001948 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001949 != (end - 1) - start) {
1950 abort();
1951 }
Blue Swirle5548612012-04-21 13:08:33 +00001952 cpu_tlb_reset_dirty_all(start1, length);
bellard1ccde1c2004-02-06 19:46:14 +00001953}
1954
aliguori74576192008-10-06 14:02:03 +00001955int cpu_physical_memory_set_dirty_tracking(int enable)
1956{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001957 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001958 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001959 return ret;
aliguori74576192008-10-06 14:02:03 +00001960}
1961
Blue Swirle5548612012-04-21 13:08:33 +00001962target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1963 MemoryRegionSection *section,
1964 target_ulong vaddr,
1965 target_phys_addr_t paddr,
1966 int prot,
1967 target_ulong *address)
1968{
1969 target_phys_addr_t iotlb;
1970 CPUWatchpoint *wp;
1971
Blue Swirlcc5bea62012-04-14 14:56:48 +00001972 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001973 /* Normal RAM. */
1974 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001975 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001976 if (!section->readonly) {
1977 iotlb |= phys_section_notdirty;
1978 } else {
1979 iotlb |= phys_section_rom;
1980 }
1981 } else {
1982 /* IO handlers are currently passed a physical address.
1983 It would be nice to pass an offset from the base address
1984 of that region. This would avoid having to special case RAM,
1985 and avoid full address decoding in every device.
1986 We can't use the high bits of pd for this because
1987 IO_MEM_ROMD uses these as a ram address. */
1988 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001989 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001990 }
1991
1992 /* Make accesses to pages with watchpoints go via the
1993 watchpoint trap routines. */
1994 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1995 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1996 /* Avoid trapping reads of pages with a write breakpoint. */
1997 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1998 iotlb = phys_section_watch + paddr;
1999 *address |= TLB_MMIO;
2000 break;
2001 }
2002 }
2003 }
2004
2005 return iotlb;
2006}
2007
bellard01243112004-01-04 15:48:17 +00002008#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002009/*
2010 * Walks guest process memory "regions" one by one
2011 * and calls callback function 'fn' for each region.
2012 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002013
2014struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002015{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002016 walk_memory_regions_fn fn;
2017 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002018 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002019 int prot;
2020};
bellard9fa3e852004-01-04 18:06:42 +00002021
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002022static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002023 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002024{
2025 if (data->start != -1ul) {
2026 int rc = data->fn(data->priv, data->start, end, data->prot);
2027 if (rc != 0) {
2028 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002029 }
bellard33417e72003-08-10 21:47:01 +00002030 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002031
2032 data->start = (new_prot ? end : -1ul);
2033 data->prot = new_prot;
2034
2035 return 0;
2036}
2037
2038static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002039 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002040{
Paul Brookb480d9b2010-03-12 23:23:29 +00002041 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002042 int i, rc;
2043
2044 if (*lp == NULL) {
2045 return walk_memory_regions_end(data, base, 0);
2046 }
2047
2048 if (level == 0) {
2049 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002050 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002051 int prot = pd[i].flags;
2052
2053 pa = base | (i << TARGET_PAGE_BITS);
2054 if (prot != data->prot) {
2055 rc = walk_memory_regions_end(data, pa, prot);
2056 if (rc != 0) {
2057 return rc;
2058 }
2059 }
2060 }
2061 } else {
2062 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002063 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002064 pa = base | ((abi_ulong)i <<
2065 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002066 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2067 if (rc != 0) {
2068 return rc;
2069 }
2070 }
2071 }
2072
2073 return 0;
2074}
2075
2076int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2077{
2078 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002079 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002080
2081 data.fn = fn;
2082 data.priv = priv;
2083 data.start = -1ul;
2084 data.prot = 0;
2085
2086 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002087 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002088 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2089 if (rc != 0) {
2090 return rc;
2091 }
2092 }
2093
2094 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002095}
2096
Paul Brookb480d9b2010-03-12 23:23:29 +00002097static int dump_region(void *priv, abi_ulong start,
2098 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002099{
2100 FILE *f = (FILE *)priv;
2101
Paul Brookb480d9b2010-03-12 23:23:29 +00002102 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2103 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002104 start, end, end - start,
2105 ((prot & PAGE_READ) ? 'r' : '-'),
2106 ((prot & PAGE_WRITE) ? 'w' : '-'),
2107 ((prot & PAGE_EXEC) ? 'x' : '-'));
2108
2109 return (0);
2110}
2111
2112/* dump memory mappings */
2113void page_dump(FILE *f)
2114{
2115 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2116 "start", "end", "size", "prot");
2117 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002118}
2119
pbrook53a59602006-03-25 19:31:22 +00002120int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002121{
bellard9fa3e852004-01-04 18:06:42 +00002122 PageDesc *p;
2123
2124 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002125 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002126 return 0;
2127 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002128}
2129
Richard Henderson376a7902010-03-10 15:57:04 -08002130/* Modify the flags of a page and invalidate the code if necessary.
2131 The flag PAGE_WRITE_ORG is positioned automatically depending
2132 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002133void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002134{
Richard Henderson376a7902010-03-10 15:57:04 -08002135 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002136
Richard Henderson376a7902010-03-10 15:57:04 -08002137 /* This function should never be called with addresses outside the
2138 guest address space. If this assert fires, it probably indicates
2139 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002140#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2141 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002142#endif
2143 assert(start < end);
2144
bellard9fa3e852004-01-04 18:06:42 +00002145 start = start & TARGET_PAGE_MASK;
2146 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002147
2148 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002149 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002150 }
2151
2152 for (addr = start, len = end - start;
2153 len != 0;
2154 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2155 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2156
2157 /* If the write protection bit is set, then we invalidate
2158 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002159 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002160 (flags & PAGE_WRITE) &&
2161 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002162 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002163 }
2164 p->flags = flags;
2165 }
bellard9fa3e852004-01-04 18:06:42 +00002166}
2167
ths3d97b402007-11-02 19:02:07 +00002168int page_check_range(target_ulong start, target_ulong len, int flags)
2169{
2170 PageDesc *p;
2171 target_ulong end;
2172 target_ulong addr;
2173
Richard Henderson376a7902010-03-10 15:57:04 -08002174 /* This function should never be called with addresses outside the
2175 guest address space. If this assert fires, it probably indicates
2176 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002177#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2178 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002179#endif
2180
Richard Henderson3e0650a2010-03-29 10:54:42 -07002181 if (len == 0) {
2182 return 0;
2183 }
Richard Henderson376a7902010-03-10 15:57:04 -08002184 if (start + len - 1 < start) {
2185 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002186 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002187 }
balrog55f280c2008-10-28 10:24:11 +00002188
ths3d97b402007-11-02 19:02:07 +00002189 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2190 start = start & TARGET_PAGE_MASK;
2191
Richard Henderson376a7902010-03-10 15:57:04 -08002192 for (addr = start, len = end - start;
2193 len != 0;
2194 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002195 p = page_find(addr >> TARGET_PAGE_BITS);
2196 if( !p )
2197 return -1;
2198 if( !(p->flags & PAGE_VALID) )
2199 return -1;
2200
bellarddae32702007-11-14 10:51:00 +00002201 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002202 return -1;
bellarddae32702007-11-14 10:51:00 +00002203 if (flags & PAGE_WRITE) {
2204 if (!(p->flags & PAGE_WRITE_ORG))
2205 return -1;
2206 /* unprotect the page if it was put read-only because it
2207 contains translated code */
2208 if (!(p->flags & PAGE_WRITE)) {
2209 if (!page_unprotect(addr, 0, NULL))
2210 return -1;
2211 }
2212 return 0;
2213 }
ths3d97b402007-11-02 19:02:07 +00002214 }
2215 return 0;
2216}
2217
bellard9fa3e852004-01-04 18:06:42 +00002218/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002219 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002220int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002221{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002222 unsigned int prot;
2223 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002224 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002225
pbrookc8a706f2008-06-02 16:16:42 +00002226 /* Technically this isn't safe inside a signal handler. However we
2227 know this only ever happens in a synchronous SEGV handler, so in
2228 practice it seems to be ok. */
2229 mmap_lock();
2230
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002231 p = page_find(address >> TARGET_PAGE_BITS);
2232 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002233 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002234 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002235 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002236
bellard9fa3e852004-01-04 18:06:42 +00002237 /* if the page was really writable, then we change its
2238 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002239 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2240 host_start = address & qemu_host_page_mask;
2241 host_end = host_start + qemu_host_page_size;
2242
2243 prot = 0;
2244 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2245 p = page_find(addr >> TARGET_PAGE_BITS);
2246 p->flags |= PAGE_WRITE;
2247 prot |= p->flags;
2248
bellard9fa3e852004-01-04 18:06:42 +00002249 /* and since the content will be modified, we must invalidate
2250 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002251 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002252#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002253 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002254#endif
bellard9fa3e852004-01-04 18:06:42 +00002255 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002256 mprotect((void *)g2h(host_start), qemu_host_page_size,
2257 prot & PAGE_BITS);
2258
2259 mmap_unlock();
2260 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002261 }
pbrookc8a706f2008-06-02 16:16:42 +00002262 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002263 return 0;
2264}
bellard9fa3e852004-01-04 18:06:42 +00002265#endif /* defined(CONFIG_USER_ONLY) */
2266
pbrooke2eef172008-06-08 01:09:01 +00002267#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002268
Paul Brookc04b2b72010-03-01 03:31:14 +00002269#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2270typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002271 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002272 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002273 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002274} subpage_t;
2275
Anthony Liguoric227f092009-10-01 16:12:16 -05002276static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002277 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002278static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002279static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002280{
Avi Kivity5312bd82012-02-12 18:32:55 +02002281 MemoryRegionSection *section = &phys_sections[section_index];
2282 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002283
2284 if (mr->subpage) {
2285 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2286 memory_region_destroy(&subpage->iomem);
2287 g_free(subpage);
2288 }
2289}
2290
Avi Kivity4346ae32012-02-10 17:00:01 +02002291static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002292{
2293 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002294 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002295
Avi Kivityc19e8802012-02-13 20:25:31 +02002296 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002297 return;
2298 }
2299
Avi Kivityc19e8802012-02-13 20:25:31 +02002300 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002301 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002302 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002303 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002304 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002305 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002306 }
Avi Kivity54688b12012-02-09 17:34:32 +02002307 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002308 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002309 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002310}
2311
2312static void destroy_all_mappings(void)
2313{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002314 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002315 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002316}
2317
Avi Kivity5312bd82012-02-12 18:32:55 +02002318static uint16_t phys_section_add(MemoryRegionSection *section)
2319{
2320 if (phys_sections_nb == phys_sections_nb_alloc) {
2321 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2322 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2323 phys_sections_nb_alloc);
2324 }
2325 phys_sections[phys_sections_nb] = *section;
2326 return phys_sections_nb++;
2327}
2328
2329static void phys_sections_clear(void)
2330{
2331 phys_sections_nb = 0;
2332}
2333
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002334/* register physical memory.
2335 For RAM, 'size' must be a multiple of the target page size.
2336 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002337 io memory page. The address used when calling the IO function is
2338 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002339 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002340 before calculating this offset. This should not be a problem unless
2341 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002342static void register_subpage(MemoryRegionSection *section)
2343{
2344 subpage_t *subpage;
2345 target_phys_addr_t base = section->offset_within_address_space
2346 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002347 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002348 MemoryRegionSection subsection = {
2349 .offset_within_address_space = base,
2350 .size = TARGET_PAGE_SIZE,
2351 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002352 target_phys_addr_t start, end;
2353
Avi Kivityf3705d52012-03-08 16:16:34 +02002354 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002355
Avi Kivityf3705d52012-03-08 16:16:34 +02002356 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002357 subpage = subpage_init(base);
2358 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002359 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2360 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002361 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002362 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002363 }
2364 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2365 end = start + section->size;
2366 subpage_register(subpage, start, end, phys_section_add(section));
2367}
2368
2369
2370static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002371{
Avi Kivitydd811242012-01-02 12:17:03 +02002372 target_phys_addr_t start_addr = section->offset_within_address_space;
2373 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002374 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002375 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002376
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002377 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002378
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002379 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002380 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2381 section_index);
bellard33417e72003-08-10 21:47:01 +00002382}
2383
Avi Kivity0f0cb162012-02-13 17:14:32 +02002384void cpu_register_physical_memory_log(MemoryRegionSection *section,
2385 bool readonly)
2386{
2387 MemoryRegionSection now = *section, remain = *section;
2388
2389 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2390 || (now.size < TARGET_PAGE_SIZE)) {
2391 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2392 - now.offset_within_address_space,
2393 now.size);
2394 register_subpage(&now);
2395 remain.size -= now.size;
2396 remain.offset_within_address_space += now.size;
2397 remain.offset_within_region += now.size;
2398 }
2399 now = remain;
2400 now.size &= TARGET_PAGE_MASK;
2401 if (now.size) {
2402 register_multipage(&now);
2403 remain.size -= now.size;
2404 remain.offset_within_address_space += now.size;
2405 remain.offset_within_region += now.size;
2406 }
2407 now = remain;
2408 if (now.size) {
2409 register_subpage(&now);
2410 }
2411}
2412
2413
Anthony Liguoric227f092009-10-01 16:12:16 -05002414void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002415{
2416 if (kvm_enabled())
2417 kvm_coalesce_mmio_region(addr, size);
2418}
2419
Anthony Liguoric227f092009-10-01 16:12:16 -05002420void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002421{
2422 if (kvm_enabled())
2423 kvm_uncoalesce_mmio_region(addr, size);
2424}
2425
Sheng Yang62a27442010-01-26 19:21:16 +08002426void qemu_flush_coalesced_mmio_buffer(void)
2427{
2428 if (kvm_enabled())
2429 kvm_flush_coalesced_mmio_buffer();
2430}
2431
Marcelo Tosattic9027602010-03-01 20:25:08 -03002432#if defined(__linux__) && !defined(TARGET_S390X)
2433
2434#include <sys/vfs.h>
2435
2436#define HUGETLBFS_MAGIC 0x958458f6
2437
2438static long gethugepagesize(const char *path)
2439{
2440 struct statfs fs;
2441 int ret;
2442
2443 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002444 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002445 } while (ret != 0 && errno == EINTR);
2446
2447 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002448 perror(path);
2449 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002450 }
2451
2452 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002453 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002454
2455 return fs.f_bsize;
2456}
2457
Alex Williamson04b16652010-07-02 11:13:17 -06002458static void *file_ram_alloc(RAMBlock *block,
2459 ram_addr_t memory,
2460 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002461{
2462 char *filename;
2463 void *area;
2464 int fd;
2465#ifdef MAP_POPULATE
2466 int flags;
2467#endif
2468 unsigned long hpagesize;
2469
2470 hpagesize = gethugepagesize(path);
2471 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002472 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002473 }
2474
2475 if (memory < hpagesize) {
2476 return NULL;
2477 }
2478
2479 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2480 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2481 return NULL;
2482 }
2483
2484 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002485 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002486 }
2487
2488 fd = mkstemp(filename);
2489 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002490 perror("unable to create backing store for hugepages");
2491 free(filename);
2492 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002493 }
2494 unlink(filename);
2495 free(filename);
2496
2497 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2498
2499 /*
2500 * ftruncate is not supported by hugetlbfs in older
2501 * hosts, so don't bother bailing out on errors.
2502 * If anything goes wrong with it under other filesystems,
2503 * mmap will fail.
2504 */
2505 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002506 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002507
2508#ifdef MAP_POPULATE
2509 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2510 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2511 * to sidestep this quirk.
2512 */
2513 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2514 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2515#else
2516 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2517#endif
2518 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002519 perror("file_ram_alloc: can't mmap RAM pages");
2520 close(fd);
2521 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002522 }
Alex Williamson04b16652010-07-02 11:13:17 -06002523 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002524 return area;
2525}
2526#endif
2527
Alex Williamsond17b5282010-06-25 11:08:38 -06002528static ram_addr_t find_ram_offset(ram_addr_t size)
2529{
Alex Williamson04b16652010-07-02 11:13:17 -06002530 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002531 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002532
2533 if (QLIST_EMPTY(&ram_list.blocks))
2534 return 0;
2535
2536 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002537 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002538
2539 end = block->offset + block->length;
2540
2541 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2542 if (next_block->offset >= end) {
2543 next = MIN(next, next_block->offset);
2544 }
2545 }
2546 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002547 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002548 mingap = next - end;
2549 }
2550 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002551
2552 if (offset == RAM_ADDR_MAX) {
2553 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2554 (uint64_t)size);
2555 abort();
2556 }
2557
Alex Williamson04b16652010-07-02 11:13:17 -06002558 return offset;
2559}
2560
2561static ram_addr_t last_ram_offset(void)
2562{
Alex Williamsond17b5282010-06-25 11:08:38 -06002563 RAMBlock *block;
2564 ram_addr_t last = 0;
2565
2566 QLIST_FOREACH(block, &ram_list.blocks, next)
2567 last = MAX(last, block->offset + block->length);
2568
2569 return last;
2570}
2571
Avi Kivityc5705a72011-12-20 15:59:12 +02002572void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002573{
2574 RAMBlock *new_block, *block;
2575
Avi Kivityc5705a72011-12-20 15:59:12 +02002576 new_block = NULL;
2577 QLIST_FOREACH(block, &ram_list.blocks, next) {
2578 if (block->offset == addr) {
2579 new_block = block;
2580 break;
2581 }
2582 }
2583 assert(new_block);
2584 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002585
2586 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2587 char *id = dev->parent_bus->info->get_dev_path(dev);
2588 if (id) {
2589 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002590 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002591 }
2592 }
2593 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2594
2595 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002596 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002597 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2598 new_block->idstr);
2599 abort();
2600 }
2601 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002602}
2603
2604ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2605 MemoryRegion *mr)
2606{
2607 RAMBlock *new_block;
2608
2609 size = TARGET_PAGE_ALIGN(size);
2610 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002611
Avi Kivity7c637362011-12-21 13:09:49 +02002612 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002613 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002614 if (host) {
2615 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002616 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002617 } else {
2618 if (mem_path) {
2619#if defined (__linux__) && !defined(TARGET_S390X)
2620 new_block->host = file_ram_alloc(new_block, size, mem_path);
2621 if (!new_block->host) {
2622 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002623 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002624 }
2625#else
2626 fprintf(stderr, "-mem-path option unsupported\n");
2627 exit(1);
2628#endif
2629 } else {
2630#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002631 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2632 an system defined value, which is at least 256GB. Larger systems
2633 have larger values. We put the guest between the end of data
2634 segment (system break) and this value. We use 32GB as a base to
2635 have enough room for the system break to grow. */
2636 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002637 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002638 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002639 if (new_block->host == MAP_FAILED) {
2640 fprintf(stderr, "Allocating RAM failed\n");
2641 abort();
2642 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002643#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002644 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002645 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002646 } else {
2647 new_block->host = qemu_vmalloc(size);
2648 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002649#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002650 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002651 }
2652 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002653 new_block->length = size;
2654
2655 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2656
Anthony Liguori7267c092011-08-20 22:09:37 -05002657 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002658 last_ram_offset() >> TARGET_PAGE_BITS);
2659 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2660 0xff, size >> TARGET_PAGE_BITS);
2661
2662 if (kvm_enabled())
2663 kvm_setup_guest_memory(new_block->host, size);
2664
2665 return new_block->offset;
2666}
2667
Avi Kivityc5705a72011-12-20 15:59:12 +02002668ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002669{
Avi Kivityc5705a72011-12-20 15:59:12 +02002670 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002671}
bellarde9a1ab12007-02-08 23:08:38 +00002672
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002673void qemu_ram_free_from_ptr(ram_addr_t addr)
2674{
2675 RAMBlock *block;
2676
2677 QLIST_FOREACH(block, &ram_list.blocks, next) {
2678 if (addr == block->offset) {
2679 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002680 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002681 return;
2682 }
2683 }
2684}
2685
Anthony Liguoric227f092009-10-01 16:12:16 -05002686void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002687{
Alex Williamson04b16652010-07-02 11:13:17 -06002688 RAMBlock *block;
2689
2690 QLIST_FOREACH(block, &ram_list.blocks, next) {
2691 if (addr == block->offset) {
2692 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002693 if (block->flags & RAM_PREALLOC_MASK) {
2694 ;
2695 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002696#if defined (__linux__) && !defined(TARGET_S390X)
2697 if (block->fd) {
2698 munmap(block->host, block->length);
2699 close(block->fd);
2700 } else {
2701 qemu_vfree(block->host);
2702 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002703#else
2704 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002705#endif
2706 } else {
2707#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2708 munmap(block->host, block->length);
2709#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002710 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002711 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002712 } else {
2713 qemu_vfree(block->host);
2714 }
Alex Williamson04b16652010-07-02 11:13:17 -06002715#endif
2716 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002717 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002718 return;
2719 }
2720 }
2721
bellarde9a1ab12007-02-08 23:08:38 +00002722}
2723
Huang Yingcd19cfa2011-03-02 08:56:19 +01002724#ifndef _WIN32
2725void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2726{
2727 RAMBlock *block;
2728 ram_addr_t offset;
2729 int flags;
2730 void *area, *vaddr;
2731
2732 QLIST_FOREACH(block, &ram_list.blocks, next) {
2733 offset = addr - block->offset;
2734 if (offset < block->length) {
2735 vaddr = block->host + offset;
2736 if (block->flags & RAM_PREALLOC_MASK) {
2737 ;
2738 } else {
2739 flags = MAP_FIXED;
2740 munmap(vaddr, length);
2741 if (mem_path) {
2742#if defined(__linux__) && !defined(TARGET_S390X)
2743 if (block->fd) {
2744#ifdef MAP_POPULATE
2745 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2746 MAP_PRIVATE;
2747#else
2748 flags |= MAP_PRIVATE;
2749#endif
2750 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2751 flags, block->fd, offset);
2752 } else {
2753 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2754 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2755 flags, -1, 0);
2756 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002757#else
2758 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002759#endif
2760 } else {
2761#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2762 flags |= MAP_SHARED | MAP_ANONYMOUS;
2763 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2764 flags, -1, 0);
2765#else
2766 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2767 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2768 flags, -1, 0);
2769#endif
2770 }
2771 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002772 fprintf(stderr, "Could not remap addr: "
2773 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002774 length, addr);
2775 exit(1);
2776 }
2777 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2778 }
2779 return;
2780 }
2781 }
2782}
2783#endif /* !_WIN32 */
2784
pbrookdc828ca2009-04-09 22:21:07 +00002785/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002786 With the exception of the softmmu code in this file, this should
2787 only be used for local memory (e.g. video ram) that the device owns,
2788 and knows it isn't going to access beyond the end of the block.
2789
2790 It should not be used for general purpose DMA.
2791 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2792 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002793void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002794{
pbrook94a6b542009-04-11 17:15:54 +00002795 RAMBlock *block;
2796
Alex Williamsonf471a172010-06-11 11:11:42 -06002797 QLIST_FOREACH(block, &ram_list.blocks, next) {
2798 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002799 /* Move this entry to to start of the list. */
2800 if (block != QLIST_FIRST(&ram_list.blocks)) {
2801 QLIST_REMOVE(block, next);
2802 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2803 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002804 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002805 /* We need to check if the requested address is in the RAM
2806 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002807 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002808 */
2809 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002810 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002811 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002812 block->host =
2813 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002814 }
2815 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002816 return block->host + (addr - block->offset);
2817 }
pbrook94a6b542009-04-11 17:15:54 +00002818 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002819
2820 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2821 abort();
2822
2823 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002824}
2825
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002826/* Return a host pointer to ram allocated with qemu_ram_alloc.
2827 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2828 */
2829void *qemu_safe_ram_ptr(ram_addr_t addr)
2830{
2831 RAMBlock *block;
2832
2833 QLIST_FOREACH(block, &ram_list.blocks, next) {
2834 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002835 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002836 /* We need to check if the requested address is in the RAM
2837 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002838 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002839 */
2840 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002841 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002842 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002843 block->host =
2844 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002845 }
2846 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002847 return block->host + (addr - block->offset);
2848 }
2849 }
2850
2851 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2852 abort();
2853
2854 return NULL;
2855}
2856
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002857/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2858 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002859void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002860{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002861 if (*size == 0) {
2862 return NULL;
2863 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002864 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002865 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002866 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002867 RAMBlock *block;
2868
2869 QLIST_FOREACH(block, &ram_list.blocks, next) {
2870 if (addr - block->offset < block->length) {
2871 if (addr - block->offset + *size > block->length)
2872 *size = block->length - addr + block->offset;
2873 return block->host + (addr - block->offset);
2874 }
2875 }
2876
2877 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2878 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002879 }
2880}
2881
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002882void qemu_put_ram_ptr(void *addr)
2883{
2884 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002885}
2886
Marcelo Tosattie8902612010-10-11 15:31:19 -03002887int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002888{
pbrook94a6b542009-04-11 17:15:54 +00002889 RAMBlock *block;
2890 uint8_t *host = ptr;
2891
Jan Kiszka868bb332011-06-21 22:59:09 +02002892 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002893 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002894 return 0;
2895 }
2896
Alex Williamsonf471a172010-06-11 11:11:42 -06002897 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002898 /* This case append when the block is not mapped. */
2899 if (block->host == NULL) {
2900 continue;
2901 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002902 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002903 *ram_addr = block->offset + (host - block->host);
2904 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002905 }
pbrook94a6b542009-04-11 17:15:54 +00002906 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002907
Marcelo Tosattie8902612010-10-11 15:31:19 -03002908 return -1;
2909}
Alex Williamsonf471a172010-06-11 11:11:42 -06002910
Marcelo Tosattie8902612010-10-11 15:31:19 -03002911/* Some of the softmmu routines need to translate from a host pointer
2912 (typically a TLB entry) back to a ram offset. */
2913ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2914{
2915 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002916
Marcelo Tosattie8902612010-10-11 15:31:19 -03002917 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2918 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2919 abort();
2920 }
2921 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002922}
2923
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002924static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2925 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002926{
pbrook67d3b952006-12-18 05:03:52 +00002927#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002928 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002929#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002930#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002931 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002932#endif
2933 return 0;
2934}
2935
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002936static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2937 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002938{
2939#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002940 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002941#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002942#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002943 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002944#endif
2945}
2946
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002947static const MemoryRegionOps unassigned_mem_ops = {
2948 .read = unassigned_mem_read,
2949 .write = unassigned_mem_write,
2950 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002951};
2952
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002953static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2954 unsigned size)
2955{
2956 abort();
2957}
2958
2959static void error_mem_write(void *opaque, target_phys_addr_t addr,
2960 uint64_t value, unsigned size)
2961{
2962 abort();
2963}
2964
2965static const MemoryRegionOps error_mem_ops = {
2966 .read = error_mem_read,
2967 .write = error_mem_write,
2968 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002969};
2970
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002971static const MemoryRegionOps rom_mem_ops = {
2972 .read = error_mem_read,
2973 .write = unassigned_mem_write,
2974 .endianness = DEVICE_NATIVE_ENDIAN,
2975};
2976
2977static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2978 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002979{
bellard3a7d9292005-08-21 09:26:42 +00002980 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002981 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002982 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2983#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002984 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002985 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002986#endif
2987 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002988 switch (size) {
2989 case 1:
2990 stb_p(qemu_get_ram_ptr(ram_addr), val);
2991 break;
2992 case 2:
2993 stw_p(qemu_get_ram_ptr(ram_addr), val);
2994 break;
2995 case 4:
2996 stl_p(qemu_get_ram_ptr(ram_addr), val);
2997 break;
2998 default:
2999 abort();
3000 }
bellardf23db162005-08-21 19:12:28 +00003001 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003002 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003003 /* we remove the notdirty callback only if the code has been
3004 flushed */
3005 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003006 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003007}
3008
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003009static const MemoryRegionOps notdirty_mem_ops = {
3010 .read = error_mem_read,
3011 .write = notdirty_mem_write,
3012 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003013};
3014
pbrook0f459d12008-06-09 00:20:13 +00003015/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003016static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003017{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003018 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003019 target_ulong pc, cs_base;
3020 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003021 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003022 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003023 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003024
aliguori06d55cc2008-11-18 20:24:06 +00003025 if (env->watchpoint_hit) {
3026 /* We re-entered the check after replacing the TB. Now raise
3027 * the debug interrupt so that is will trigger after the
3028 * current instruction. */
3029 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3030 return;
3031 }
pbrook2e70f6e2008-06-29 01:03:05 +00003032 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003033 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003034 if ((vaddr == (wp->vaddr & len_mask) ||
3035 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003036 wp->flags |= BP_WATCHPOINT_HIT;
3037 if (!env->watchpoint_hit) {
3038 env->watchpoint_hit = wp;
3039 tb = tb_find_pc(env->mem_io_pc);
3040 if (!tb) {
3041 cpu_abort(env, "check_watchpoint: could not find TB for "
3042 "pc=%p", (void *)env->mem_io_pc);
3043 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003044 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003045 tb_phys_invalidate(tb, -1);
3046 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3047 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003048 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003049 } else {
3050 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3051 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003052 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003053 }
aliguori06d55cc2008-11-18 20:24:06 +00003054 }
aliguori6e140f22008-11-18 20:37:55 +00003055 } else {
3056 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003057 }
3058 }
3059}
3060
pbrook6658ffb2007-03-16 23:58:11 +00003061/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3062 so these check for a hit then pass through to the normal out-of-line
3063 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003064static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3065 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003066{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003067 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3068 switch (size) {
3069 case 1: return ldub_phys(addr);
3070 case 2: return lduw_phys(addr);
3071 case 4: return ldl_phys(addr);
3072 default: abort();
3073 }
pbrook6658ffb2007-03-16 23:58:11 +00003074}
3075
Avi Kivity1ec9b902012-01-02 12:47:48 +02003076static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3077 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003078{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003079 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3080 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003081 case 1:
3082 stb_phys(addr, val);
3083 break;
3084 case 2:
3085 stw_phys(addr, val);
3086 break;
3087 case 4:
3088 stl_phys(addr, val);
3089 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003090 default: abort();
3091 }
pbrook6658ffb2007-03-16 23:58:11 +00003092}
3093
Avi Kivity1ec9b902012-01-02 12:47:48 +02003094static const MemoryRegionOps watch_mem_ops = {
3095 .read = watch_mem_read,
3096 .write = watch_mem_write,
3097 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003098};
pbrook6658ffb2007-03-16 23:58:11 +00003099
Avi Kivity70c68e42012-01-02 12:32:48 +02003100static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3101 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003102{
Avi Kivity70c68e42012-01-02 12:32:48 +02003103 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003104 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003105 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003106#if defined(DEBUG_SUBPAGE)
3107 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3108 mmio, len, addr, idx);
3109#endif
blueswir1db7b5422007-05-26 17:36:03 +00003110
Avi Kivity5312bd82012-02-12 18:32:55 +02003111 section = &phys_sections[mmio->sub_section[idx]];
3112 addr += mmio->base;
3113 addr -= section->offset_within_address_space;
3114 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003115 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003116}
3117
Avi Kivity70c68e42012-01-02 12:32:48 +02003118static void subpage_write(void *opaque, target_phys_addr_t addr,
3119 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003120{
Avi Kivity70c68e42012-01-02 12:32:48 +02003121 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003122 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003123 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003124#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003125 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3126 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003127 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003128#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003129
Avi Kivity5312bd82012-02-12 18:32:55 +02003130 section = &phys_sections[mmio->sub_section[idx]];
3131 addr += mmio->base;
3132 addr -= section->offset_within_address_space;
3133 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003134 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003135}
3136
Avi Kivity70c68e42012-01-02 12:32:48 +02003137static const MemoryRegionOps subpage_ops = {
3138 .read = subpage_read,
3139 .write = subpage_write,
3140 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003141};
3142
Avi Kivityde712f92012-01-02 12:41:07 +02003143static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3144 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003145{
3146 ram_addr_t raddr = addr;
3147 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003148 switch (size) {
3149 case 1: return ldub_p(ptr);
3150 case 2: return lduw_p(ptr);
3151 case 4: return ldl_p(ptr);
3152 default: abort();
3153 }
Andreas Färber56384e82011-11-30 16:26:21 +01003154}
3155
Avi Kivityde712f92012-01-02 12:41:07 +02003156static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3157 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003158{
3159 ram_addr_t raddr = addr;
3160 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003161 switch (size) {
3162 case 1: return stb_p(ptr, value);
3163 case 2: return stw_p(ptr, value);
3164 case 4: return stl_p(ptr, value);
3165 default: abort();
3166 }
Andreas Färber56384e82011-11-30 16:26:21 +01003167}
3168
Avi Kivityde712f92012-01-02 12:41:07 +02003169static const MemoryRegionOps subpage_ram_ops = {
3170 .read = subpage_ram_read,
3171 .write = subpage_ram_write,
3172 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003173};
3174
Anthony Liguoric227f092009-10-01 16:12:16 -05003175static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003176 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003177{
3178 int idx, eidx;
3179
3180 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3181 return -1;
3182 idx = SUBPAGE_IDX(start);
3183 eidx = SUBPAGE_IDX(end);
3184#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003185 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003186 mmio, start, end, idx, eidx, memory);
3187#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003188 if (memory_region_is_ram(phys_sections[section].mr)) {
3189 MemoryRegionSection new_section = phys_sections[section];
3190 new_section.mr = &io_mem_subpage_ram;
3191 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003192 }
blueswir1db7b5422007-05-26 17:36:03 +00003193 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003194 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003195 }
3196
3197 return 0;
3198}
3199
Avi Kivity0f0cb162012-02-13 17:14:32 +02003200static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003201{
Anthony Liguoric227f092009-10-01 16:12:16 -05003202 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003203
Anthony Liguori7267c092011-08-20 22:09:37 -05003204 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003205
3206 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003207 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3208 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003209 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003210#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003211 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3212 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003213#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003214 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003215
3216 return mmio;
3217}
3218
Avi Kivity5312bd82012-02-12 18:32:55 +02003219static uint16_t dummy_section(MemoryRegion *mr)
3220{
3221 MemoryRegionSection section = {
3222 .mr = mr,
3223 .offset_within_address_space = 0,
3224 .offset_within_region = 0,
3225 .size = UINT64_MAX,
3226 };
3227
3228 return phys_section_add(&section);
3229}
3230
Avi Kivity37ec01d2012-03-08 18:08:35 +02003231MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003232{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003233 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003234}
3235
Avi Kivitye9179ce2009-06-14 11:38:52 +03003236static void io_mem_init(void)
3237{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003238 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003239 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3240 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3241 "unassigned", UINT64_MAX);
3242 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3243 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003244 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3245 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003246 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3247 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003248}
3249
Avi Kivity50c1e142012-02-08 21:36:02 +02003250static void core_begin(MemoryListener *listener)
3251{
Avi Kivity54688b12012-02-09 17:34:32 +02003252 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003253 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003254 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003255 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003256 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3257 phys_section_rom = dummy_section(&io_mem_rom);
3258 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003259}
3260
3261static void core_commit(MemoryListener *listener)
3262{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003263 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003264
3265 /* since each CPU stores ram addresses in its TLB cache, we must
3266 reset the modified entries */
3267 /* XXX: slow ! */
3268 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3269 tlb_flush(env, 1);
3270 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003271}
3272
Avi Kivity93632742012-02-08 16:54:16 +02003273static void core_region_add(MemoryListener *listener,
3274 MemoryRegionSection *section)
3275{
Avi Kivity4855d412012-02-08 21:16:05 +02003276 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003277}
3278
3279static void core_region_del(MemoryListener *listener,
3280 MemoryRegionSection *section)
3281{
Avi Kivity93632742012-02-08 16:54:16 +02003282}
3283
Avi Kivity50c1e142012-02-08 21:36:02 +02003284static void core_region_nop(MemoryListener *listener,
3285 MemoryRegionSection *section)
3286{
Avi Kivity54688b12012-02-09 17:34:32 +02003287 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003288}
3289
Avi Kivity93632742012-02-08 16:54:16 +02003290static void core_log_start(MemoryListener *listener,
3291 MemoryRegionSection *section)
3292{
3293}
3294
3295static void core_log_stop(MemoryListener *listener,
3296 MemoryRegionSection *section)
3297{
3298}
3299
3300static void core_log_sync(MemoryListener *listener,
3301 MemoryRegionSection *section)
3302{
3303}
3304
3305static void core_log_global_start(MemoryListener *listener)
3306{
3307 cpu_physical_memory_set_dirty_tracking(1);
3308}
3309
3310static void core_log_global_stop(MemoryListener *listener)
3311{
3312 cpu_physical_memory_set_dirty_tracking(0);
3313}
3314
3315static void core_eventfd_add(MemoryListener *listener,
3316 MemoryRegionSection *section,
3317 bool match_data, uint64_t data, int fd)
3318{
3319}
3320
3321static void core_eventfd_del(MemoryListener *listener,
3322 MemoryRegionSection *section,
3323 bool match_data, uint64_t data, int fd)
3324{
3325}
3326
Avi Kivity50c1e142012-02-08 21:36:02 +02003327static void io_begin(MemoryListener *listener)
3328{
3329}
3330
3331static void io_commit(MemoryListener *listener)
3332{
3333}
3334
Avi Kivity4855d412012-02-08 21:16:05 +02003335static void io_region_add(MemoryListener *listener,
3336 MemoryRegionSection *section)
3337{
Avi Kivitya2d33522012-03-05 17:40:12 +02003338 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3339
3340 mrio->mr = section->mr;
3341 mrio->offset = section->offset_within_region;
3342 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003343 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003344 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003345}
3346
3347static void io_region_del(MemoryListener *listener,
3348 MemoryRegionSection *section)
3349{
3350 isa_unassign_ioport(section->offset_within_address_space, section->size);
3351}
3352
Avi Kivity50c1e142012-02-08 21:36:02 +02003353static void io_region_nop(MemoryListener *listener,
3354 MemoryRegionSection *section)
3355{
3356}
3357
Avi Kivity4855d412012-02-08 21:16:05 +02003358static void io_log_start(MemoryListener *listener,
3359 MemoryRegionSection *section)
3360{
3361}
3362
3363static void io_log_stop(MemoryListener *listener,
3364 MemoryRegionSection *section)
3365{
3366}
3367
3368static void io_log_sync(MemoryListener *listener,
3369 MemoryRegionSection *section)
3370{
3371}
3372
3373static void io_log_global_start(MemoryListener *listener)
3374{
3375}
3376
3377static void io_log_global_stop(MemoryListener *listener)
3378{
3379}
3380
3381static void io_eventfd_add(MemoryListener *listener,
3382 MemoryRegionSection *section,
3383 bool match_data, uint64_t data, int fd)
3384{
3385}
3386
3387static void io_eventfd_del(MemoryListener *listener,
3388 MemoryRegionSection *section,
3389 bool match_data, uint64_t data, int fd)
3390{
3391}
3392
Avi Kivity93632742012-02-08 16:54:16 +02003393static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003394 .begin = core_begin,
3395 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003396 .region_add = core_region_add,
3397 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003398 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003399 .log_start = core_log_start,
3400 .log_stop = core_log_stop,
3401 .log_sync = core_log_sync,
3402 .log_global_start = core_log_global_start,
3403 .log_global_stop = core_log_global_stop,
3404 .eventfd_add = core_eventfd_add,
3405 .eventfd_del = core_eventfd_del,
3406 .priority = 0,
3407};
3408
Avi Kivity4855d412012-02-08 21:16:05 +02003409static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003410 .begin = io_begin,
3411 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003412 .region_add = io_region_add,
3413 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003414 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003415 .log_start = io_log_start,
3416 .log_stop = io_log_stop,
3417 .log_sync = io_log_sync,
3418 .log_global_start = io_log_global_start,
3419 .log_global_stop = io_log_global_stop,
3420 .eventfd_add = io_eventfd_add,
3421 .eventfd_del = io_eventfd_del,
3422 .priority = 0,
3423};
3424
Avi Kivity62152b82011-07-26 14:26:14 +03003425static void memory_map_init(void)
3426{
Anthony Liguori7267c092011-08-20 22:09:37 -05003427 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003428 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003429 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003430
Anthony Liguori7267c092011-08-20 22:09:37 -05003431 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003432 memory_region_init(system_io, "io", 65536);
3433 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003434
Avi Kivity4855d412012-02-08 21:16:05 +02003435 memory_listener_register(&core_memory_listener, system_memory);
3436 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003437}
3438
3439MemoryRegion *get_system_memory(void)
3440{
3441 return system_memory;
3442}
3443
Avi Kivity309cb472011-08-08 16:09:03 +03003444MemoryRegion *get_system_io(void)
3445{
3446 return system_io;
3447}
3448
pbrooke2eef172008-06-08 01:09:01 +00003449#endif /* !defined(CONFIG_USER_ONLY) */
3450
bellard13eb76e2004-01-24 15:23:36 +00003451/* physical memory access (slow version, mainly for debug) */
3452#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003453int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003454 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003455{
3456 int l, flags;
3457 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003458 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003459
3460 while (len > 0) {
3461 page = addr & TARGET_PAGE_MASK;
3462 l = (page + TARGET_PAGE_SIZE) - addr;
3463 if (l > len)
3464 l = len;
3465 flags = page_get_flags(page);
3466 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003467 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003468 if (is_write) {
3469 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003470 return -1;
bellard579a97f2007-11-11 14:26:47 +00003471 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003472 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003473 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003474 memcpy(p, buf, l);
3475 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003476 } else {
3477 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003478 return -1;
bellard579a97f2007-11-11 14:26:47 +00003479 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003480 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003481 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003482 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003483 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003484 }
3485 len -= l;
3486 buf += l;
3487 addr += l;
3488 }
Paul Brooka68fe892010-03-01 00:08:59 +00003489 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003490}
bellard8df1cd02005-01-28 22:37:22 +00003491
bellard13eb76e2004-01-24 15:23:36 +00003492#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003493void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003494 int len, int is_write)
3495{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003496 int l;
bellard13eb76e2004-01-24 15:23:36 +00003497 uint8_t *ptr;
3498 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003499 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003500 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003501
bellard13eb76e2004-01-24 15:23:36 +00003502 while (len > 0) {
3503 page = addr & TARGET_PAGE_MASK;
3504 l = (page + TARGET_PAGE_SIZE) - addr;
3505 if (l > len)
3506 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003507 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003508
bellard13eb76e2004-01-24 15:23:36 +00003509 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003510 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003511 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003512 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003513 /* XXX: could force cpu_single_env to NULL to avoid
3514 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003515 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003516 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003517 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003518 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003519 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003520 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003521 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003522 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003523 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003524 l = 2;
3525 } else {
bellard1c213d12005-09-03 10:49:04 +00003526 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003527 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003528 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003529 l = 1;
3530 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003531 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003532 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003533 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003534 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003535 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003536 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003537 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003538 if (!cpu_physical_memory_is_dirty(addr1)) {
3539 /* invalidate code */
3540 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3541 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003542 cpu_physical_memory_set_dirty_flags(
3543 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003544 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003545 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003546 }
3547 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003548 if (!(memory_region_is_ram(section->mr) ||
3549 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003550 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003551 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003552 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003553 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003554 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003555 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003556 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003557 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003558 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003559 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003560 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003561 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003562 l = 2;
3563 } else {
bellard1c213d12005-09-03 10:49:04 +00003564 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003565 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003566 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003567 l = 1;
3568 }
3569 } else {
3570 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003571 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003572 + memory_region_section_addr(section,
3573 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003574 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003575 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003576 }
3577 }
3578 len -= l;
3579 buf += l;
3580 addr += l;
3581 }
3582}
bellard8df1cd02005-01-28 22:37:22 +00003583
bellardd0ecd2a2006-04-23 17:14:48 +00003584/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003585void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003586 const uint8_t *buf, int len)
3587{
3588 int l;
3589 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003590 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003591 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003592
bellardd0ecd2a2006-04-23 17:14:48 +00003593 while (len > 0) {
3594 page = addr & TARGET_PAGE_MASK;
3595 l = (page + TARGET_PAGE_SIZE) - addr;
3596 if (l > len)
3597 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003598 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003599
Blue Swirlcc5bea62012-04-14 14:56:48 +00003600 if (!(memory_region_is_ram(section->mr) ||
3601 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003602 /* do nothing */
3603 } else {
3604 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003605 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003606 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003607 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003608 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003609 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003610 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003611 }
3612 len -= l;
3613 buf += l;
3614 addr += l;
3615 }
3616}
3617
aliguori6d16c2f2009-01-22 16:59:11 +00003618typedef struct {
3619 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003620 target_phys_addr_t addr;
3621 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003622} BounceBuffer;
3623
3624static BounceBuffer bounce;
3625
aliguoriba223c22009-01-22 16:59:16 +00003626typedef struct MapClient {
3627 void *opaque;
3628 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003629 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003630} MapClient;
3631
Blue Swirl72cf2d42009-09-12 07:36:22 +00003632static QLIST_HEAD(map_client_list, MapClient) map_client_list
3633 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003634
3635void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3636{
Anthony Liguori7267c092011-08-20 22:09:37 -05003637 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003638
3639 client->opaque = opaque;
3640 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003641 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003642 return client;
3643}
3644
3645void cpu_unregister_map_client(void *_client)
3646{
3647 MapClient *client = (MapClient *)_client;
3648
Blue Swirl72cf2d42009-09-12 07:36:22 +00003649 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003650 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003651}
3652
3653static void cpu_notify_map_clients(void)
3654{
3655 MapClient *client;
3656
Blue Swirl72cf2d42009-09-12 07:36:22 +00003657 while (!QLIST_EMPTY(&map_client_list)) {
3658 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003659 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003660 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003661 }
3662}
3663
aliguori6d16c2f2009-01-22 16:59:11 +00003664/* Map a physical memory region into a host virtual address.
3665 * May map a subset of the requested range, given by and returned in *plen.
3666 * May return NULL if resources needed to perform the mapping are exhausted.
3667 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003668 * Use cpu_register_map_client() to know when retrying the map operation is
3669 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003670 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003671void *cpu_physical_memory_map(target_phys_addr_t addr,
3672 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003673 int is_write)
3674{
Anthony Liguoric227f092009-10-01 16:12:16 -05003675 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003676 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003677 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003678 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003679 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003680 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003681 ram_addr_t rlen;
3682 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003683
3684 while (len > 0) {
3685 page = addr & TARGET_PAGE_MASK;
3686 l = (page + TARGET_PAGE_SIZE) - addr;
3687 if (l > len)
3688 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003689 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003690
Avi Kivityf3705d52012-03-08 16:16:34 +02003691 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003692 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003693 break;
3694 }
3695 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3696 bounce.addr = addr;
3697 bounce.len = l;
3698 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003699 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003700 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003701
3702 *plen = l;
3703 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003704 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003705 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003706 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003707 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003708 }
aliguori6d16c2f2009-01-22 16:59:11 +00003709
3710 len -= l;
3711 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003712 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003713 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003714 rlen = todo;
3715 ret = qemu_ram_ptr_length(raddr, &rlen);
3716 *plen = rlen;
3717 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003718}
3719
3720/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3721 * Will also mark the memory as dirty if is_write == 1. access_len gives
3722 * the amount of memory that was actually read or written by the caller.
3723 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003724void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3725 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003726{
3727 if (buffer != bounce.buffer) {
3728 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003729 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003730 while (access_len) {
3731 unsigned l;
3732 l = TARGET_PAGE_SIZE;
3733 if (l > access_len)
3734 l = access_len;
3735 if (!cpu_physical_memory_is_dirty(addr1)) {
3736 /* invalidate code */
3737 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3738 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003739 cpu_physical_memory_set_dirty_flags(
3740 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003741 }
3742 addr1 += l;
3743 access_len -= l;
3744 }
3745 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003746 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003747 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003748 }
aliguori6d16c2f2009-01-22 16:59:11 +00003749 return;
3750 }
3751 if (is_write) {
3752 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3753 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003754 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003755 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003756 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003757}
bellardd0ecd2a2006-04-23 17:14:48 +00003758
bellard8df1cd02005-01-28 22:37:22 +00003759/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003760static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3761 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003762{
bellard8df1cd02005-01-28 22:37:22 +00003763 uint8_t *ptr;
3764 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003765 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003766
Avi Kivity06ef3522012-02-13 16:11:22 +02003767 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003768
Blue Swirlcc5bea62012-04-14 14:56:48 +00003769 if (!(memory_region_is_ram(section->mr) ||
3770 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003771 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003772 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003773 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003774#if defined(TARGET_WORDS_BIGENDIAN)
3775 if (endian == DEVICE_LITTLE_ENDIAN) {
3776 val = bswap32(val);
3777 }
3778#else
3779 if (endian == DEVICE_BIG_ENDIAN) {
3780 val = bswap32(val);
3781 }
3782#endif
bellard8df1cd02005-01-28 22:37:22 +00003783 } else {
3784 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003785 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003786 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003787 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003788 switch (endian) {
3789 case DEVICE_LITTLE_ENDIAN:
3790 val = ldl_le_p(ptr);
3791 break;
3792 case DEVICE_BIG_ENDIAN:
3793 val = ldl_be_p(ptr);
3794 break;
3795 default:
3796 val = ldl_p(ptr);
3797 break;
3798 }
bellard8df1cd02005-01-28 22:37:22 +00003799 }
3800 return val;
3801}
3802
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003803uint32_t ldl_phys(target_phys_addr_t addr)
3804{
3805 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3806}
3807
3808uint32_t ldl_le_phys(target_phys_addr_t addr)
3809{
3810 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3811}
3812
3813uint32_t ldl_be_phys(target_phys_addr_t addr)
3814{
3815 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3816}
3817
bellard84b7b8e2005-11-28 21:19:04 +00003818/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003819static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3820 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003821{
bellard84b7b8e2005-11-28 21:19:04 +00003822 uint8_t *ptr;
3823 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003824 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003825
Avi Kivity06ef3522012-02-13 16:11:22 +02003826 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003827
Blue Swirlcc5bea62012-04-14 14:56:48 +00003828 if (!(memory_region_is_ram(section->mr) ||
3829 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003830 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003831 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003832
3833 /* XXX This is broken when device endian != cpu endian.
3834 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003835#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003836 val = io_mem_read(section->mr, addr, 4) << 32;
3837 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003838#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003839 val = io_mem_read(section->mr, addr, 4);
3840 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003841#endif
3842 } else {
3843 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003844 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003845 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003846 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003847 switch (endian) {
3848 case DEVICE_LITTLE_ENDIAN:
3849 val = ldq_le_p(ptr);
3850 break;
3851 case DEVICE_BIG_ENDIAN:
3852 val = ldq_be_p(ptr);
3853 break;
3854 default:
3855 val = ldq_p(ptr);
3856 break;
3857 }
bellard84b7b8e2005-11-28 21:19:04 +00003858 }
3859 return val;
3860}
3861
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003862uint64_t ldq_phys(target_phys_addr_t addr)
3863{
3864 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3865}
3866
3867uint64_t ldq_le_phys(target_phys_addr_t addr)
3868{
3869 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3870}
3871
3872uint64_t ldq_be_phys(target_phys_addr_t addr)
3873{
3874 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3875}
3876
bellardaab33092005-10-30 20:48:42 +00003877/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003878uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003879{
3880 uint8_t val;
3881 cpu_physical_memory_read(addr, &val, 1);
3882 return val;
3883}
3884
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003885/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003886static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3887 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003888{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003889 uint8_t *ptr;
3890 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003891 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003892
Avi Kivity06ef3522012-02-13 16:11:22 +02003893 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003894
Blue Swirlcc5bea62012-04-14 14:56:48 +00003895 if (!(memory_region_is_ram(section->mr) ||
3896 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003897 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003898 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003899 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003900#if defined(TARGET_WORDS_BIGENDIAN)
3901 if (endian == DEVICE_LITTLE_ENDIAN) {
3902 val = bswap16(val);
3903 }
3904#else
3905 if (endian == DEVICE_BIG_ENDIAN) {
3906 val = bswap16(val);
3907 }
3908#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003909 } else {
3910 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003911 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003912 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003913 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003914 switch (endian) {
3915 case DEVICE_LITTLE_ENDIAN:
3916 val = lduw_le_p(ptr);
3917 break;
3918 case DEVICE_BIG_ENDIAN:
3919 val = lduw_be_p(ptr);
3920 break;
3921 default:
3922 val = lduw_p(ptr);
3923 break;
3924 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003925 }
3926 return val;
bellardaab33092005-10-30 20:48:42 +00003927}
3928
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003929uint32_t lduw_phys(target_phys_addr_t addr)
3930{
3931 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3932}
3933
3934uint32_t lduw_le_phys(target_phys_addr_t addr)
3935{
3936 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3937}
3938
3939uint32_t lduw_be_phys(target_phys_addr_t addr)
3940{
3941 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3942}
3943
bellard8df1cd02005-01-28 22:37:22 +00003944/* warning: addr must be aligned. The ram page is not masked as dirty
3945 and the code inside is not invalidated. It is useful if the dirty
3946 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003947void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003948{
bellard8df1cd02005-01-28 22:37:22 +00003949 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003950 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003951
Avi Kivity06ef3522012-02-13 16:11:22 +02003952 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003953
Avi Kivityf3705d52012-03-08 16:16:34 +02003954 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003955 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003956 if (memory_region_is_ram(section->mr)) {
3957 section = &phys_sections[phys_section_rom];
3958 }
3959 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003960 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003961 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003962 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003963 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003964 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003965 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003966
3967 if (unlikely(in_migration)) {
3968 if (!cpu_physical_memory_is_dirty(addr1)) {
3969 /* invalidate code */
3970 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3971 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003972 cpu_physical_memory_set_dirty_flags(
3973 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003974 }
3975 }
bellard8df1cd02005-01-28 22:37:22 +00003976 }
3977}
3978
Anthony Liguoric227f092009-10-01 16:12:16 -05003979void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003980{
j_mayerbc98a7e2007-04-04 07:55:12 +00003981 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003982 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003983
Avi Kivity06ef3522012-02-13 16:11:22 +02003984 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003985
Avi Kivityf3705d52012-03-08 16:16:34 +02003986 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003987 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003988 if (memory_region_is_ram(section->mr)) {
3989 section = &phys_sections[phys_section_rom];
3990 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003991#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003992 io_mem_write(section->mr, addr, val >> 32, 4);
3993 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003994#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003995 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3996 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003997#endif
3998 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003999 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004000 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004001 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004002 stq_p(ptr, val);
4003 }
4004}
4005
bellard8df1cd02005-01-28 22:37:22 +00004006/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004007static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4008 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004009{
bellard8df1cd02005-01-28 22:37:22 +00004010 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004011 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004012
Avi Kivity06ef3522012-02-13 16:11:22 +02004013 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004014
Avi Kivityf3705d52012-03-08 16:16:34 +02004015 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004016 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004017 if (memory_region_is_ram(section->mr)) {
4018 section = &phys_sections[phys_section_rom];
4019 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004020#if defined(TARGET_WORDS_BIGENDIAN)
4021 if (endian == DEVICE_LITTLE_ENDIAN) {
4022 val = bswap32(val);
4023 }
4024#else
4025 if (endian == DEVICE_BIG_ENDIAN) {
4026 val = bswap32(val);
4027 }
4028#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004029 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004030 } else {
4031 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004032 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004033 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004034 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004035 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004036 switch (endian) {
4037 case DEVICE_LITTLE_ENDIAN:
4038 stl_le_p(ptr, val);
4039 break;
4040 case DEVICE_BIG_ENDIAN:
4041 stl_be_p(ptr, val);
4042 break;
4043 default:
4044 stl_p(ptr, val);
4045 break;
4046 }
bellard3a7d9292005-08-21 09:26:42 +00004047 if (!cpu_physical_memory_is_dirty(addr1)) {
4048 /* invalidate code */
4049 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4050 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004051 cpu_physical_memory_set_dirty_flags(addr1,
4052 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004053 }
bellard8df1cd02005-01-28 22:37:22 +00004054 }
4055}
4056
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004057void stl_phys(target_phys_addr_t addr, uint32_t val)
4058{
4059 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4060}
4061
4062void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4063{
4064 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4065}
4066
4067void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4068{
4069 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4070}
4071
bellardaab33092005-10-30 20:48:42 +00004072/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004073void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004074{
4075 uint8_t v = val;
4076 cpu_physical_memory_write(addr, &v, 1);
4077}
4078
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004079/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004080static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4081 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004082{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004083 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004084 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004085
Avi Kivity06ef3522012-02-13 16:11:22 +02004086 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004087
Avi Kivityf3705d52012-03-08 16:16:34 +02004088 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004089 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004090 if (memory_region_is_ram(section->mr)) {
4091 section = &phys_sections[phys_section_rom];
4092 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004093#if defined(TARGET_WORDS_BIGENDIAN)
4094 if (endian == DEVICE_LITTLE_ENDIAN) {
4095 val = bswap16(val);
4096 }
4097#else
4098 if (endian == DEVICE_BIG_ENDIAN) {
4099 val = bswap16(val);
4100 }
4101#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004102 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004103 } else {
4104 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004105 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004106 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004107 /* RAM case */
4108 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004109 switch (endian) {
4110 case DEVICE_LITTLE_ENDIAN:
4111 stw_le_p(ptr, val);
4112 break;
4113 case DEVICE_BIG_ENDIAN:
4114 stw_be_p(ptr, val);
4115 break;
4116 default:
4117 stw_p(ptr, val);
4118 break;
4119 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004120 if (!cpu_physical_memory_is_dirty(addr1)) {
4121 /* invalidate code */
4122 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4123 /* set dirty bit */
4124 cpu_physical_memory_set_dirty_flags(addr1,
4125 (0xff & ~CODE_DIRTY_FLAG));
4126 }
4127 }
bellardaab33092005-10-30 20:48:42 +00004128}
4129
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004130void stw_phys(target_phys_addr_t addr, uint32_t val)
4131{
4132 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4133}
4134
4135void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4136{
4137 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4138}
4139
4140void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4141{
4142 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4143}
4144
bellardaab33092005-10-30 20:48:42 +00004145/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004146void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004147{
4148 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004149 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004150}
4151
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004152void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4153{
4154 val = cpu_to_le64(val);
4155 cpu_physical_memory_write(addr, &val, 8);
4156}
4157
4158void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4159{
4160 val = cpu_to_be64(val);
4161 cpu_physical_memory_write(addr, &val, 8);
4162}
4163
aliguori5e2972f2009-03-28 17:51:36 +00004164/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004165int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004166 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004167{
4168 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004169 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004170 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004171
4172 while (len > 0) {
4173 page = addr & TARGET_PAGE_MASK;
4174 phys_addr = cpu_get_phys_page_debug(env, page);
4175 /* if no physical page mapped, return an error */
4176 if (phys_addr == -1)
4177 return -1;
4178 l = (page + TARGET_PAGE_SIZE) - addr;
4179 if (l > len)
4180 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004181 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004182 if (is_write)
4183 cpu_physical_memory_write_rom(phys_addr, buf, l);
4184 else
aliguori5e2972f2009-03-28 17:51:36 +00004185 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004186 len -= l;
4187 buf += l;
4188 addr += l;
4189 }
4190 return 0;
4191}
Paul Brooka68fe892010-03-01 00:08:59 +00004192#endif
bellard13eb76e2004-01-24 15:23:36 +00004193
pbrook2e70f6e2008-06-29 01:03:05 +00004194/* in deterministic execution mode, instructions doing device I/Os
4195 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004196void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004197{
4198 TranslationBlock *tb;
4199 uint32_t n, cflags;
4200 target_ulong pc, cs_base;
4201 uint64_t flags;
4202
Blue Swirl20503962012-04-09 14:20:20 +00004203 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004204 if (!tb) {
4205 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004206 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004207 }
4208 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004209 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004210 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004211 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004212 n = n - env->icount_decr.u16.low;
4213 /* Generate a new TB ending on the I/O insn. */
4214 n++;
4215 /* On MIPS and SH, delay slot instructions can only be restarted if
4216 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004217 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004218 branch. */
4219#if defined(TARGET_MIPS)
4220 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4221 env->active_tc.PC -= 4;
4222 env->icount_decr.u16.low++;
4223 env->hflags &= ~MIPS_HFLAG_BMASK;
4224 }
4225#elif defined(TARGET_SH4)
4226 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4227 && n > 1) {
4228 env->pc -= 2;
4229 env->icount_decr.u16.low++;
4230 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4231 }
4232#endif
4233 /* This should never happen. */
4234 if (n > CF_COUNT_MASK)
4235 cpu_abort(env, "TB too big during recompile");
4236
4237 cflags = n | CF_LAST_IO;
4238 pc = tb->pc;
4239 cs_base = tb->cs_base;
4240 flags = tb->flags;
4241 tb_phys_invalidate(tb, -1);
4242 /* FIXME: In theory this could raise an exception. In practice
4243 we have already translated the block once so it's probably ok. */
4244 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004245 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004246 the first in the TB) then we end up generating a whole new TB and
4247 repeating the fault, which is horribly inefficient.
4248 Better would be to execute just this insn uncached, or generate a
4249 second new TB. */
4250 cpu_resume_from_signal(env, NULL);
4251}
4252
Paul Brookb3755a92010-03-12 16:54:58 +00004253#if !defined(CONFIG_USER_ONLY)
4254
Stefan Weil055403b2010-10-22 23:03:32 +02004255void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004256{
4257 int i, target_code_size, max_target_code_size;
4258 int direct_jmp_count, direct_jmp2_count, cross_page;
4259 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004260
bellarde3db7222005-01-26 22:00:47 +00004261 target_code_size = 0;
4262 max_target_code_size = 0;
4263 cross_page = 0;
4264 direct_jmp_count = 0;
4265 direct_jmp2_count = 0;
4266 for(i = 0; i < nb_tbs; i++) {
4267 tb = &tbs[i];
4268 target_code_size += tb->size;
4269 if (tb->size > max_target_code_size)
4270 max_target_code_size = tb->size;
4271 if (tb->page_addr[1] != -1)
4272 cross_page++;
4273 if (tb->tb_next_offset[0] != 0xffff) {
4274 direct_jmp_count++;
4275 if (tb->tb_next_offset[1] != 0xffff) {
4276 direct_jmp2_count++;
4277 }
4278 }
4279 }
4280 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004281 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004282 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004283 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4284 cpu_fprintf(f, "TB count %d/%d\n",
4285 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004286 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004287 nb_tbs ? target_code_size / nb_tbs : 0,
4288 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004289 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004290 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4291 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004292 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4293 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004294 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4295 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004296 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004297 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4298 direct_jmp2_count,
4299 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004300 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004301 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4302 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4303 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004304 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004305}
4306
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004307/*
4308 * A helper function for the _utterly broken_ virtio device model to find out if
4309 * it's running on a big endian machine. Don't do this at home kids!
4310 */
4311bool virtio_is_big_endian(void);
4312bool virtio_is_big_endian(void)
4313{
4314#if defined(TARGET_WORDS_BIGENDIAN)
4315 return true;
4316#else
4317 return false;
4318#endif
4319}
4320
bellard61382a52003-10-27 21:22:23 +00004321#endif