blob: f567a386fec46dae3f6a60a4b90a3e4ab1ea0a49 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200401
Avi Kivityf7bf5462012-02-13 20:12:05 +0200402static void phys_map_node_reserve(unsigned nodes)
403{
404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
412}
413
414static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415{
416 unsigned i;
417 uint16_t ret;
418
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200422 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
Avi Kivityf7bf5462012-02-13 20:12:05 +0200434
Avi Kivity29990972012-02-13 20:21:20 +0200435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438{
439 PhysPageEntry *p;
440 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442
Avi Kivity07f07b32012-02-13 20:45:32 +0200443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200449 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 }
451 }
452 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
Avi Kivity29990972012-02-13 20:21:20 +0200455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200456
Avi Kivity29990972012-02-13 20:21:20 +0200457 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200460 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 *index += step;
462 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200467 }
468}
469
Avi Kivity29990972012-02-13 20:21:20 +0200470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000472{
Avi Kivity29990972012-02-13 20:21:20 +0200473 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200474 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000475
Avi Kivity29990972012-02-13 20:21:20 +0200476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000477}
478
Blue Swirl0cac1b62012-04-09 16:50:52 +0000479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000480{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200484 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200485
Avi Kivity07f07b32012-02-13 20:45:32 +0200486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 goto not_found;
489 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200490 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200492 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200493
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200496 return &phys_sections[s_index];
497}
498
Blue Swirle5548612012-04-21 13:08:33 +0000499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
pbrookc8a706f2008-06-02 16:16:42 +0000506#define mmap_lock() do { } while(0)
507#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000508#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000509
bellard43694152008-05-29 09:35:57 +0000510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511
512#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100513/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000514 user mode. It will change when a dedicated libc will be used */
515#define USE_STATIC_CODE_GEN_BUFFER
516#endif
517
518#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000521#endif
522
blueswir18fcd3692008-08-17 20:26:25 +0000523static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000524{
bellard43694152008-05-29 09:35:57 +0000525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
bellard26a5f132008-05-28 12:30:31 +0000530 code_gen_buffer_size = tb_size;
531 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000532#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100535 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000536 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000537#endif
bellard26a5f132008-05-28 12:30:31 +0000538 }
539 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 /* The code gen buffer location may have constraints depending on
542 the host cpu and OS */
543#if defined(__linux__)
544 {
545 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000546 void *start = NULL;
547
bellard26a5f132008-05-28 12:30:31 +0000548 flags = MAP_PRIVATE | MAP_ANONYMOUS;
549#if defined(__x86_64__)
550 flags |= MAP_32BIT;
551 /* Cannot map more than that */
552 if (code_gen_buffer_size > (800 * 1024 * 1024))
553 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000554#elif defined(__sparc_v9__)
555 // Map the buffer below 2G, so we can use direct calls and branches
556 flags |= MAP_FIXED;
557 start = (void *) 0x60000000UL;
558 if (code_gen_buffer_size > (512 * 1024 * 1024))
559 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000560#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100561 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000562 if (code_gen_buffer_size > 16 * 1024 * 1024)
563 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700564#elif defined(__s390x__)
565 /* Map the buffer so that we can use direct calls and branches. */
566 /* We have a +- 4GB range on the branches; leave some slop. */
567 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 }
570 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000571#endif
blueswir1141ac462008-07-26 15:05:57 +0000572 code_gen_buffer = mmap(start, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
Bradcbb608a2010-12-20 21:25:40 -0500580#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000581 || defined(__DragonFly__) || defined(__OpenBSD__) \
582 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000583 {
584 int flags;
585 void *addr = NULL;
586 flags = MAP_PRIVATE | MAP_ANONYMOUS;
587#if defined(__x86_64__)
588 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 * 0x40000000 is free */
590 flags |= MAP_FIXED;
591 addr = (void *)0x40000000;
592 /* Cannot map more than that */
593 if (code_gen_buffer_size > (800 * 1024 * 1024))
594 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000595#elif defined(__sparc_v9__)
596 // Map the buffer below 2G, so we can use direct calls and branches
597 flags |= MAP_FIXED;
598 addr = (void *) 0x60000000UL;
599 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 code_gen_buffer_size = (512 * 1024 * 1024);
601 }
aliguori06e67a82008-09-27 15:32:41 +0000602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
bellard26a5f132008-05-28 12:30:31 +0000611#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000613 map_exec(code_gen_buffer, code_gen_buffer_size);
614#endif
bellard43694152008-05-29 09:35:57 +0000615#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000616 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200626void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000627{
bellard26a5f132008-05-28 12:30:31 +0000628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000632 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
bellard26a5f132008-05-28 12:30:31 +0000638}
639
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
pbrook9656f322008-07-01 20:01:19 +0000653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
Juan Quintelae59fb372009-09-29 22:48:21 +0200655static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100657 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200658
aurel323098dba2009-03-07 21:28:24 +0000659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000662 tlb_flush(env, 1);
663
664 return 0;
665}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 VMSTATE_END_OF_LIST()
677 }
678};
pbrook9656f322008-07-01 20:01:19 +0000679#endif
680
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400682{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100683 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
Andreas Färber9349b4f2012-03-14 01:38:32 +0100694void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000695{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100696 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000697 int cpu_index;
698
pbrookc2764712009-03-07 15:24:59 +0000699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
bellard6a00d602005-11-21 23:25:50 +0000702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700706 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000710 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
bellard6a00d602005-11-21 23:25:50 +0000716 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
pbrookb3c77242008-06-30 16:31:04 +0000720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000723 cpu_save, cpu_load, env);
724#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000725}
726
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
bellard9fa3e852004-01-04 18:06:42 +0000753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500756 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
765{
766 int i;
767
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000773 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
776 }
777 } else {
778 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
bellardfd6ce8f2003-05-14 19:00:11 +0000785static void page_flush_tb(void)
786{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000790 }
791}
792
793/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000794/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100795void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000796{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100797 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000798#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000803#endif
bellard26a5f132008-05-28 12:30:31 +0000804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000808
bellard6a00d602005-11-21 23:25:50 +0000809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
bellard9fa3e852004-01-04 18:06:42 +0000812
bellard8a8a6082004-10-03 13:36:49 +0000813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000814 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000815
bellardfd6ce8f2003-05-14 19:00:11 +0000816 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
bellarde3db7222005-01-26 22:00:47 +0000819 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000820}
821
822#ifdef DEBUG_TB_CHECK
823
j_mayerbc98a7e2007-04-04 07:55:12 +0000824static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000835 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000846
pbrook99773bd2006-04-16 15:14:59 +0000847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000853 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
bellard9fa3e852004-01-04 18:06:42 +0000876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
bellardd4e81642003-05-25 16:46:15 +0000893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000926}
927
Paul Brook41c1b1c2010-03-12 16:54:58 +0000928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000929{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100930 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000931 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000932 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000933 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000934 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000935
bellard9fa3e852004-01-04 18:06:42 +0000936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000939 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000940 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
bellard8a40a182005-11-20 10:35:40 +0000954 tb_invalidated_flag = 1;
955
956 /* remove the TB from the hash list */
957 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
bellard8a40a182005-11-20 10:35:40 +0000962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200970 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000971 if (n1 == 2)
972 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000980
bellarde3db7222005-01-26 22:00:47 +0000981 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001015
Anthony Liguori7267c092011-08-20 22:09:37 -05001016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
Andreas Färber9349b4f2012-03-14 01:38:32 +01001039TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001047 int code_gen_size;
1048
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001050 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001055 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001064 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001067
bellardd720b932004-04-25 17:57:43 +00001068 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001070 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001072 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001073 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001074 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001075 return tb;
bellardd720b932004-04-25 17:57:43 +00001076}
ths3b46e622007-09-17 08:09:54 +00001077
bellard9fa3e852004-01-04 18:06:42 +00001078/* invalidate all TBs which intersect with the target physical page
1079 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001080 the same physical page. 'is_cpu_write_access' should be true if called
1081 from a real cpu write access: the virtual CPU will exit the current
1082 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001083void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001084 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001085{
aliguori6b917542008-11-18 19:46:41 +00001086 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001087 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001088 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001089 PageDesc *p;
1090 int n;
1091#ifdef TARGET_HAS_PRECISE_SMC
1092 int current_tb_not_found = is_cpu_write_access;
1093 TranslationBlock *current_tb = NULL;
1094 int current_tb_modified = 0;
1095 target_ulong current_pc = 0;
1096 target_ulong current_cs_base = 0;
1097 int current_flags = 0;
1098#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001099
1100 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001101 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001102 return;
ths5fafdf22007-09-16 21:08:06 +00001103 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001104 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1105 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001106 /* build code bitmap */
1107 build_page_bitmap(p);
1108 }
1109
1110 /* we remove all the TBs in the range [start, end[ */
1111 /* XXX: see if in some cases it could be faster to invalidate all the code */
1112 tb = p->first_tb;
1113 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001114 n = (uintptr_t)tb & 3;
1115 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001116 tb_next = tb->page_next[n];
1117 /* NOTE: this is subtle as a TB may span two physical pages */
1118 if (n == 0) {
1119 /* NOTE: tb_end may be after the end of the page, but
1120 it is not a problem */
1121 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1122 tb_end = tb_start + tb->size;
1123 } else {
1124 tb_start = tb->page_addr[1];
1125 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1126 }
1127 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001128#ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_not_found) {
1130 current_tb_not_found = 0;
1131 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001132 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001133 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001134 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001135 }
1136 }
1137 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001138 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001139 /* If we are modifying the current TB, we must stop
1140 its execution. We could be more precise by checking
1141 that the modification is after the current PC, but it
1142 would require a specialized function to partially
1143 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001144
bellardd720b932004-04-25 17:57:43 +00001145 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001146 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001147 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1148 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001149 }
1150#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001151 /* we need to do that to handle the case where a signal
1152 occurs while doing tb_phys_invalidate() */
1153 saved_tb = NULL;
1154 if (env) {
1155 saved_tb = env->current_tb;
1156 env->current_tb = NULL;
1157 }
bellard9fa3e852004-01-04 18:06:42 +00001158 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001159 if (env) {
1160 env->current_tb = saved_tb;
1161 if (env->interrupt_request && env->current_tb)
1162 cpu_interrupt(env, env->interrupt_request);
1163 }
bellard9fa3e852004-01-04 18:06:42 +00001164 }
1165 tb = tb_next;
1166 }
1167#if !defined(CONFIG_USER_ONLY)
1168 /* if no code remaining, no need to continue to use slow writes */
1169 if (!p->first_tb) {
1170 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001171 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001172 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001173 }
1174 }
1175#endif
1176#ifdef TARGET_HAS_PRECISE_SMC
1177 if (current_tb_modified) {
1178 /* we generate a block containing just the instruction
1179 modifying the memory. It will ensure that it cannot modify
1180 itself */
bellardea1c1802004-06-14 18:56:36 +00001181 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001182 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001183 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001184 }
1185#endif
1186}
1187
1188/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001189static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001190{
1191 PageDesc *p;
1192 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001193#if 0
bellarda4193c82004-06-03 14:01:43 +00001194 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001195 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1196 cpu_single_env->mem_io_vaddr, len,
1197 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001198 cpu_single_env->eip +
1199 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001200 }
1201#endif
bellard9fa3e852004-01-04 18:06:42 +00001202 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001203 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001204 return;
1205 if (p->code_bitmap) {
1206 offset = start & ~TARGET_PAGE_MASK;
1207 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1208 if (b & ((1 << len) - 1))
1209 goto do_invalidate;
1210 } else {
1211 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001212 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001213 }
1214}
1215
bellard9fa3e852004-01-04 18:06:42 +00001216#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001217static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001218 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001219{
aliguori6b917542008-11-18 19:46:41 +00001220 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001221 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001222 int n;
bellardd720b932004-04-25 17:57:43 +00001223#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001224 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001225 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001226 int current_tb_modified = 0;
1227 target_ulong current_pc = 0;
1228 target_ulong current_cs_base = 0;
1229 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001230#endif
bellard9fa3e852004-01-04 18:06:42 +00001231
1232 addr &= TARGET_PAGE_MASK;
1233 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001234 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001235 return;
1236 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001237#ifdef TARGET_HAS_PRECISE_SMC
1238 if (tb && pc != 0) {
1239 current_tb = tb_find_pc(pc);
1240 }
1241#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001242 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001243 n = (uintptr_t)tb & 3;
1244 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001247 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001248 /* If we are modifying the current TB, we must stop
1249 its execution. We could be more precise by checking
1250 that the modification is after the current PC, but it
1251 would require a specialized function to partially
1252 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001253
bellardd720b932004-04-25 17:57:43 +00001254 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001255 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001256 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1257 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001258 }
1259#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001260 tb_phys_invalidate(tb, addr);
1261 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001262 }
1263 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (current_tb_modified) {
1266 /* we generate a block containing just the instruction
1267 modifying the memory. It will ensure that it cannot modify
1268 itself */
bellardea1c1802004-06-14 18:56:36 +00001269 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001270 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001271 cpu_resume_from_signal(env, puc);
1272 }
1273#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001274}
bellard9fa3e852004-01-04 18:06:42 +00001275#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001276
1277/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001278static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001279 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001280{
1281 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001282#ifndef CONFIG_USER_ONLY
1283 bool page_already_protected;
1284#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001285
bellard9fa3e852004-01-04 18:06:42 +00001286 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001287 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001288 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001289#ifndef CONFIG_USER_ONLY
1290 page_already_protected = p->first_tb != NULL;
1291#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001292 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001293 invalidate_page_bitmap(p);
1294
bellard107db442004-06-22 18:48:46 +00001295#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001296
bellard9fa3e852004-01-04 18:06:42 +00001297#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001298 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001299 target_ulong addr;
1300 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001301 int prot;
1302
bellardfd6ce8f2003-05-14 19:00:11 +00001303 /* force the host page as non writable (writes will have a
1304 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001305 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001306 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001307 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1308 addr += TARGET_PAGE_SIZE) {
1309
1310 p2 = page_find (addr >> TARGET_PAGE_BITS);
1311 if (!p2)
1312 continue;
1313 prot |= p2->flags;
1314 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001315 }
ths5fafdf22007-09-16 21:08:06 +00001316 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001317 (prot & PAGE_BITS) & ~PAGE_WRITE);
1318#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001319 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001320 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001321#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001322 }
bellard9fa3e852004-01-04 18:06:42 +00001323#else
1324 /* if some code is already present, then the pages are already
1325 protected. So we handle the case where only the first TB is
1326 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001327 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001328 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001329 }
1330#endif
bellardd720b932004-04-25 17:57:43 +00001331
1332#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001333}
1334
bellard9fa3e852004-01-04 18:06:42 +00001335/* add a new TB and link it to the physical page tables. phys_page2 is
1336 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001337void tb_link_page(TranslationBlock *tb,
1338 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001339{
bellard9fa3e852004-01-04 18:06:42 +00001340 unsigned int h;
1341 TranslationBlock **ptb;
1342
pbrookc8a706f2008-06-02 16:16:42 +00001343 /* Grab the mmap lock to stop another thread invalidating this TB
1344 before we are done. */
1345 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001346 /* add in the physical hash table */
1347 h = tb_phys_hash_func(phys_pc);
1348 ptb = &tb_phys_hash[h];
1349 tb->phys_hash_next = *ptb;
1350 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001351
1352 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001353 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1354 if (phys_page2 != -1)
1355 tb_alloc_page(tb, 1, phys_page2);
1356 else
1357 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001358
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001359 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001360 tb->jmp_next[0] = NULL;
1361 tb->jmp_next[1] = NULL;
1362
1363 /* init original jump addresses */
1364 if (tb->tb_next_offset[0] != 0xffff)
1365 tb_reset_jump(tb, 0);
1366 if (tb->tb_next_offset[1] != 0xffff)
1367 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001368
1369#ifdef DEBUG_TB_CHECK
1370 tb_page_check();
1371#endif
pbrookc8a706f2008-06-02 16:16:42 +00001372 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001373}
1374
bellarda513fe12003-05-27 23:29:48 +00001375/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1376 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001377TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001378{
1379 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001380 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001381 TranslationBlock *tb;
1382
1383 if (nb_tbs <= 0)
1384 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001385 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1386 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001387 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001388 }
bellarda513fe12003-05-27 23:29:48 +00001389 /* binary search (cf Knuth) */
1390 m_min = 0;
1391 m_max = nb_tbs - 1;
1392 while (m_min <= m_max) {
1393 m = (m_min + m_max) >> 1;
1394 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001395 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001396 if (v == tc_ptr)
1397 return tb;
1398 else if (tc_ptr < v) {
1399 m_max = m - 1;
1400 } else {
1401 m_min = m + 1;
1402 }
ths5fafdf22007-09-16 21:08:06 +00001403 }
bellarda513fe12003-05-27 23:29:48 +00001404 return &tbs[m_max];
1405}
bellard75012672003-06-21 13:11:07 +00001406
bellardea041c02003-06-25 16:16:50 +00001407static void tb_reset_jump_recursive(TranslationBlock *tb);
1408
1409static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1410{
1411 TranslationBlock *tb1, *tb_next, **ptb;
1412 unsigned int n1;
1413
1414 tb1 = tb->jmp_next[n];
1415 if (tb1 != NULL) {
1416 /* find head of list */
1417 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001418 n1 = (uintptr_t)tb1 & 3;
1419 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001420 if (n1 == 2)
1421 break;
1422 tb1 = tb1->jmp_next[n1];
1423 }
1424 /* we are now sure now that tb jumps to tb1 */
1425 tb_next = tb1;
1426
1427 /* remove tb from the jmp_first list */
1428 ptb = &tb_next->jmp_first;
1429 for(;;) {
1430 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001431 n1 = (uintptr_t)tb1 & 3;
1432 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001433 if (n1 == n && tb1 == tb)
1434 break;
1435 ptb = &tb1->jmp_next[n1];
1436 }
1437 *ptb = tb->jmp_next[n];
1438 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001439
bellardea041c02003-06-25 16:16:50 +00001440 /* suppress the jump to next tb in generated code */
1441 tb_reset_jump(tb, n);
1442
bellard01243112004-01-04 15:48:17 +00001443 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001444 tb_reset_jump_recursive(tb_next);
1445 }
1446}
1447
1448static void tb_reset_jump_recursive(TranslationBlock *tb)
1449{
1450 tb_reset_jump_recursive2(tb, 0);
1451 tb_reset_jump_recursive2(tb, 1);
1452}
1453
bellard1fddef42005-04-17 19:16:13 +00001454#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001455#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001456static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001457{
1458 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1459}
1460#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001461void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001462{
Anthony Liguoric227f092009-10-01 16:12:16 -05001463 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001464 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001465
Avi Kivity06ef3522012-02-13 16:11:22 +02001466 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001467 if (!(memory_region_is_ram(section->mr)
1468 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001469 return;
1470 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001471 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001472 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001473 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001474}
Max Filippov1e7855a2012-04-10 02:48:17 +04001475
1476static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1477{
1478 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1479}
bellardc27004e2005-01-03 23:35:10 +00001480#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001481#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001482
Paul Brookc527ee82010-03-01 03:31:14 +00001483#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001484void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001485
1486{
1487}
1488
Andreas Färber9349b4f2012-03-14 01:38:32 +01001489int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001490 int flags, CPUWatchpoint **watchpoint)
1491{
1492 return -ENOSYS;
1493}
1494#else
pbrook6658ffb2007-03-16 23:58:11 +00001495/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001496int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001497 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001498{
aliguorib4051332008-11-18 20:14:20 +00001499 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001501
aliguorib4051332008-11-18 20:14:20 +00001502 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001503 if ((len & (len - 1)) || (addr & ~len_mask) ||
1504 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001505 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1506 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1507 return -EINVAL;
1508 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001509 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001510
aliguoria1d1bb32008-11-18 20:07:32 +00001511 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001512 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001513 wp->flags = flags;
1514
aliguori2dc9f412008-11-18 20:56:59 +00001515 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001516 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001518 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
pbrook6658ffb2007-03-16 23:58:11 +00001521 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001522
1523 if (watchpoint)
1524 *watchpoint = wp;
1525 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001526}
1527
aliguoria1d1bb32008-11-18 20:07:32 +00001528/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001529int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001530 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001531{
aliguorib4051332008-11-18 20:14:20 +00001532 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001533 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001534
Blue Swirl72cf2d42009-09-12 07:36:22 +00001535 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001536 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001537 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001539 return 0;
1540 }
1541 }
aliguoria1d1bb32008-11-18 20:07:32 +00001542 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001543}
1544
aliguoria1d1bb32008-11-18 20:07:32 +00001545/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001546void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001547{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001549
aliguoria1d1bb32008-11-18 20:07:32 +00001550 tlb_flush_page(env, watchpoint->vaddr);
1551
Anthony Liguori7267c092011-08-20 22:09:37 -05001552 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001556void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001557{
aliguoric0ce9982008-11-25 22:13:57 +00001558 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001559
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001561 if (wp->flags & mask)
1562 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001563 }
aliguoria1d1bb32008-11-18 20:07:32 +00001564}
Paul Brookc527ee82010-03-01 03:31:14 +00001565#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001566
1567/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001568int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001569 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001570{
bellard1fddef42005-04-17 19:16:13 +00001571#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001572 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001573
Anthony Liguori7267c092011-08-20 22:09:37 -05001574 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001575
1576 bp->pc = pc;
1577 bp->flags = flags;
1578
aliguori2dc9f412008-11-18 20:56:59 +00001579 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001580 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001581 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001582 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001584
1585 breakpoint_invalidate(env, pc);
1586
1587 if (breakpoint)
1588 *breakpoint = bp;
1589 return 0;
1590#else
1591 return -ENOSYS;
1592#endif
1593}
1594
1595/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001596int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001597{
1598#if defined(TARGET_HAS_ICE)
1599 CPUBreakpoint *bp;
1600
Blue Swirl72cf2d42009-09-12 07:36:22 +00001601 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001602 if (bp->pc == pc && bp->flags == flags) {
1603 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001604 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001605 }
bellard4c3a88a2003-07-26 12:06:08 +00001606 }
aliguoria1d1bb32008-11-18 20:07:32 +00001607 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001608#else
aliguoria1d1bb32008-11-18 20:07:32 +00001609 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001610#endif
1611}
1612
aliguoria1d1bb32008-11-18 20:07:32 +00001613/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001614void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001615{
bellard1fddef42005-04-17 19:16:13 +00001616#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001617 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001618
aliguoria1d1bb32008-11-18 20:07:32 +00001619 breakpoint_invalidate(env, breakpoint->pc);
1620
Anthony Liguori7267c092011-08-20 22:09:37 -05001621 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001622#endif
1623}
1624
1625/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001626void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001627{
1628#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001629 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001630
Blue Swirl72cf2d42009-09-12 07:36:22 +00001631 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001632 if (bp->flags & mask)
1633 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001634 }
bellard4c3a88a2003-07-26 12:06:08 +00001635#endif
1636}
1637
bellardc33a3462003-07-29 20:50:33 +00001638/* enable or disable single step mode. EXCP_DEBUG is returned by the
1639 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001640void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001641{
bellard1fddef42005-04-17 19:16:13 +00001642#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001643 if (env->singlestep_enabled != enabled) {
1644 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001645 if (kvm_enabled())
1646 kvm_update_guest_debug(env, 0);
1647 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001648 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001649 /* XXX: only flush what is necessary */
1650 tb_flush(env);
1651 }
bellardc33a3462003-07-29 20:50:33 +00001652 }
1653#endif
1654}
1655
bellard34865132003-10-05 14:28:56 +00001656/* enable or disable low levels log */
1657void cpu_set_log(int log_flags)
1658{
1659 loglevel = log_flags;
1660 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001661 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001662 if (!logfile) {
1663 perror(logfilename);
1664 _exit(1);
1665 }
bellard9fa3e852004-01-04 18:06:42 +00001666#if !defined(CONFIG_SOFTMMU)
1667 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1668 {
blueswir1b55266b2008-09-20 08:07:15 +00001669 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001670 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1671 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001672#elif defined(_WIN32)
1673 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1674 setvbuf(logfile, NULL, _IONBF, 0);
1675#else
bellard34865132003-10-05 14:28:56 +00001676 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001677#endif
pbrooke735b912007-06-30 13:53:24 +00001678 log_append = 1;
1679 }
1680 if (!loglevel && logfile) {
1681 fclose(logfile);
1682 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001683 }
1684}
1685
1686void cpu_set_log_filename(const char *filename)
1687{
1688 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001689 if (logfile) {
1690 fclose(logfile);
1691 logfile = NULL;
1692 }
1693 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001694}
bellardc33a3462003-07-29 20:50:33 +00001695
Andreas Färber9349b4f2012-03-14 01:38:32 +01001696static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001697{
pbrookd5975362008-06-07 20:50:51 +00001698 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1699 problem and hope the cpu will stop of its own accord. For userspace
1700 emulation this often isn't actually as bad as it sounds. Often
1701 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001702 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001703 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001704
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001705 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001706 tb = env->current_tb;
1707 /* if the cpu is currently executing code, we must unlink it and
1708 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001709 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001710 env->current_tb = NULL;
1711 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001712 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001713 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001714}
1715
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001716#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001717/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001718static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001719{
1720 int old_mask;
1721
1722 old_mask = env->interrupt_request;
1723 env->interrupt_request |= mask;
1724
aliguori8edac962009-04-24 18:03:45 +00001725 /*
1726 * If called from iothread context, wake the target cpu in
1727 * case its halted.
1728 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001729 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001730 qemu_cpu_kick(env);
1731 return;
1732 }
aliguori8edac962009-04-24 18:03:45 +00001733
pbrook2e70f6e2008-06-29 01:03:05 +00001734 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001735 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001736 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001737 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001738 cpu_abort(env, "Raised interrupt while not in I/O function");
1739 }
pbrook2e70f6e2008-06-29 01:03:05 +00001740 } else {
aurel323098dba2009-03-07 21:28:24 +00001741 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001742 }
1743}
1744
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001745CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1746
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001747#else /* CONFIG_USER_ONLY */
1748
Andreas Färber9349b4f2012-03-14 01:38:32 +01001749void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001750{
1751 env->interrupt_request |= mask;
1752 cpu_unlink_tb(env);
1753}
1754#endif /* CONFIG_USER_ONLY */
1755
Andreas Färber9349b4f2012-03-14 01:38:32 +01001756void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001757{
1758 env->interrupt_request &= ~mask;
1759}
1760
Andreas Färber9349b4f2012-03-14 01:38:32 +01001761void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001762{
1763 env->exit_request = 1;
1764 cpu_unlink_tb(env);
1765}
1766
blueswir1c7cd6a32008-10-02 18:27:46 +00001767const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001768 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001769 "show generated host assembly code for each compiled TB" },
1770 { CPU_LOG_TB_IN_ASM, "in_asm",
1771 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001772 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001773 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001774 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001775 "show micro ops "
1776#ifdef TARGET_I386
1777 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001778#endif
blueswir1e01a1152008-03-14 17:37:11 +00001779 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001780 { CPU_LOG_INT, "int",
1781 "show interrupts/exceptions in short format" },
1782 { CPU_LOG_EXEC, "exec",
1783 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001784 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001785 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001786#ifdef TARGET_I386
1787 { CPU_LOG_PCALL, "pcall",
1788 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001789 { CPU_LOG_RESET, "cpu_reset",
1790 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001791#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001792#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001793 { CPU_LOG_IOPORT, "ioport",
1794 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001795#endif
bellardf193c792004-03-21 17:06:25 +00001796 { 0, NULL, NULL },
1797};
1798
1799static int cmp1(const char *s1, int n, const char *s2)
1800{
1801 if (strlen(s2) != n)
1802 return 0;
1803 return memcmp(s1, s2, n) == 0;
1804}
ths3b46e622007-09-17 08:09:54 +00001805
bellardf193c792004-03-21 17:06:25 +00001806/* takes a comma separated list of log masks. Return 0 if error. */
1807int cpu_str_to_log_mask(const char *str)
1808{
blueswir1c7cd6a32008-10-02 18:27:46 +00001809 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001810 int mask;
1811 const char *p, *p1;
1812
1813 p = str;
1814 mask = 0;
1815 for(;;) {
1816 p1 = strchr(p, ',');
1817 if (!p1)
1818 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001819 if(cmp1(p,p1-p,"all")) {
1820 for(item = cpu_log_items; item->mask != 0; item++) {
1821 mask |= item->mask;
1822 }
1823 } else {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 if (cmp1(p, p1 - p, item->name))
1826 goto found;
1827 }
1828 return 0;
bellardf193c792004-03-21 17:06:25 +00001829 }
bellardf193c792004-03-21 17:06:25 +00001830 found:
1831 mask |= item->mask;
1832 if (*p1 != ',')
1833 break;
1834 p = p1 + 1;
1835 }
1836 return mask;
1837}
bellardea041c02003-06-25 16:16:50 +00001838
Andreas Färber9349b4f2012-03-14 01:38:32 +01001839void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001840{
1841 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001842 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001843
1844 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001845 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001846 fprintf(stderr, "qemu: fatal: ");
1847 vfprintf(stderr, fmt, ap);
1848 fprintf(stderr, "\n");
1849#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001850 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1851#else
1852 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001853#endif
aliguori93fcfe32009-01-15 22:34:14 +00001854 if (qemu_log_enabled()) {
1855 qemu_log("qemu: fatal: ");
1856 qemu_log_vprintf(fmt, ap2);
1857 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001858#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001859 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001860#else
aliguori93fcfe32009-01-15 22:34:14 +00001861 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001862#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001863 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001864 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001865 }
pbrook493ae1f2007-11-23 16:53:59 +00001866 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001867 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001868#if defined(CONFIG_USER_ONLY)
1869 {
1870 struct sigaction act;
1871 sigfillset(&act.sa_mask);
1872 act.sa_handler = SIG_DFL;
1873 sigaction(SIGABRT, &act, NULL);
1874 }
1875#endif
bellard75012672003-06-21 13:11:07 +00001876 abort();
1877}
1878
Peter Maydellad41b6e2012-05-10 15:32:50 +00001879/* Note that this function is suitable for use as the CPUClass copy
1880 * callback if your CPUArchState has no members which are unsuitable
1881 * for simple shallow copy via memcpy().
1882 */
1883CPUState *cpu_default_copy(CPUState *oldcpu)
thsc5be9f02007-02-28 20:20:53 +00001884{
Peter Maydellad41b6e2012-05-10 15:32:50 +00001885 CPUArchState *env = CPU_GET_ENV(oldcpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +01001886 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1887 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001888 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001889#if defined(TARGET_HAS_ICE)
1890 CPUBreakpoint *bp;
1891 CPUWatchpoint *wp;
1892#endif
1893
Andreas Färber9349b4f2012-03-14 01:38:32 +01001894 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001895
1896 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001897 new_env->next_cpu = next_cpu;
1898 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001899
1900 /* Clone all break/watchpoints.
1901 Note: Once we support ptrace with hw-debug register access, make sure
1902 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001903 QTAILQ_INIT(&env->breakpoints);
1904 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001905#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001906 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001907 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1908 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001909 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001910 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1911 wp->flags, NULL);
1912 }
1913#endif
1914
Peter Maydellad41b6e2012-05-10 15:32:50 +00001915 return ENV_GET_CPU(new_env);
thsc5be9f02007-02-28 20:20:53 +00001916}
1917
bellard01243112004-01-04 15:48:17 +00001918#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001919void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001920{
1921 unsigned int i;
1922
1923 /* Discard jump cache entries for any tb which might potentially
1924 overlap the flushed page. */
1925 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1926 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001928
1929 i = tb_jmp_cache_hash_page(addr);
1930 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001931 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001932}
1933
pbrook5579c7f2009-04-11 14:47:08 +00001934/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001935void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001936 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001937{
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001938 uintptr_t length, start1;
bellard1ccde1c2004-02-06 19:46:14 +00001939
1940 start &= TARGET_PAGE_MASK;
1941 end = TARGET_PAGE_ALIGN(end);
1942
1943 length = end - start;
1944 if (length == 0)
1945 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001946 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001947
bellard1ccde1c2004-02-06 19:46:14 +00001948 /* we modify the TLB cache so that the dirty bit will be set again
1949 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001950 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001951 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001952 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001953 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001954 != (end - 1) - start) {
1955 abort();
1956 }
Blue Swirle5548612012-04-21 13:08:33 +00001957 cpu_tlb_reset_dirty_all(start1, length);
bellard1ccde1c2004-02-06 19:46:14 +00001958}
1959
aliguori74576192008-10-06 14:02:03 +00001960int cpu_physical_memory_set_dirty_tracking(int enable)
1961{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001962 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001963 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001964 return ret;
aliguori74576192008-10-06 14:02:03 +00001965}
1966
Blue Swirle5548612012-04-21 13:08:33 +00001967target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1968 MemoryRegionSection *section,
1969 target_ulong vaddr,
1970 target_phys_addr_t paddr,
1971 int prot,
1972 target_ulong *address)
1973{
1974 target_phys_addr_t iotlb;
1975 CPUWatchpoint *wp;
1976
Blue Swirlcc5bea62012-04-14 14:56:48 +00001977 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001978 /* Normal RAM. */
1979 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001980 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001981 if (!section->readonly) {
1982 iotlb |= phys_section_notdirty;
1983 } else {
1984 iotlb |= phys_section_rom;
1985 }
1986 } else {
1987 /* IO handlers are currently passed a physical address.
1988 It would be nice to pass an offset from the base address
1989 of that region. This would avoid having to special case RAM,
1990 and avoid full address decoding in every device.
1991 We can't use the high bits of pd for this because
1992 IO_MEM_ROMD uses these as a ram address. */
1993 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001994 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001995 }
1996
1997 /* Make accesses to pages with watchpoints go via the
1998 watchpoint trap routines. */
1999 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2000 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2001 /* Avoid trapping reads of pages with a write breakpoint. */
2002 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2003 iotlb = phys_section_watch + paddr;
2004 *address |= TLB_MMIO;
2005 break;
2006 }
2007 }
2008 }
2009
2010 return iotlb;
2011}
2012
bellard01243112004-01-04 15:48:17 +00002013#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002014/*
2015 * Walks guest process memory "regions" one by one
2016 * and calls callback function 'fn' for each region.
2017 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002018
2019struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002020{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002021 walk_memory_regions_fn fn;
2022 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002023 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002024 int prot;
2025};
bellard9fa3e852004-01-04 18:06:42 +00002026
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002027static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002028 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002029{
2030 if (data->start != -1ul) {
2031 int rc = data->fn(data->priv, data->start, end, data->prot);
2032 if (rc != 0) {
2033 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002034 }
bellard33417e72003-08-10 21:47:01 +00002035 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002036
2037 data->start = (new_prot ? end : -1ul);
2038 data->prot = new_prot;
2039
2040 return 0;
2041}
2042
2043static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002044 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002045{
Paul Brookb480d9b2010-03-12 23:23:29 +00002046 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002047 int i, rc;
2048
2049 if (*lp == NULL) {
2050 return walk_memory_regions_end(data, base, 0);
2051 }
2052
2053 if (level == 0) {
2054 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002055 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002056 int prot = pd[i].flags;
2057
2058 pa = base | (i << TARGET_PAGE_BITS);
2059 if (prot != data->prot) {
2060 rc = walk_memory_regions_end(data, pa, prot);
2061 if (rc != 0) {
2062 return rc;
2063 }
2064 }
2065 }
2066 } else {
2067 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002068 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002069 pa = base | ((abi_ulong)i <<
2070 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002071 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2072 if (rc != 0) {
2073 return rc;
2074 }
2075 }
2076 }
2077
2078 return 0;
2079}
2080
2081int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2082{
2083 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002084 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002085
2086 data.fn = fn;
2087 data.priv = priv;
2088 data.start = -1ul;
2089 data.prot = 0;
2090
2091 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002092 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002093 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2094 if (rc != 0) {
2095 return rc;
2096 }
2097 }
2098
2099 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002100}
2101
Paul Brookb480d9b2010-03-12 23:23:29 +00002102static int dump_region(void *priv, abi_ulong start,
2103 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002104{
2105 FILE *f = (FILE *)priv;
2106
Paul Brookb480d9b2010-03-12 23:23:29 +00002107 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2108 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002109 start, end, end - start,
2110 ((prot & PAGE_READ) ? 'r' : '-'),
2111 ((prot & PAGE_WRITE) ? 'w' : '-'),
2112 ((prot & PAGE_EXEC) ? 'x' : '-'));
2113
2114 return (0);
2115}
2116
2117/* dump memory mappings */
2118void page_dump(FILE *f)
2119{
2120 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2121 "start", "end", "size", "prot");
2122 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002123}
2124
pbrook53a59602006-03-25 19:31:22 +00002125int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002126{
bellard9fa3e852004-01-04 18:06:42 +00002127 PageDesc *p;
2128
2129 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002130 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002131 return 0;
2132 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002133}
2134
Richard Henderson376a7902010-03-10 15:57:04 -08002135/* Modify the flags of a page and invalidate the code if necessary.
2136 The flag PAGE_WRITE_ORG is positioned automatically depending
2137 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002138void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002139{
Richard Henderson376a7902010-03-10 15:57:04 -08002140 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002141
Richard Henderson376a7902010-03-10 15:57:04 -08002142 /* This function should never be called with addresses outside the
2143 guest address space. If this assert fires, it probably indicates
2144 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002145#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2146 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002147#endif
2148 assert(start < end);
2149
bellard9fa3e852004-01-04 18:06:42 +00002150 start = start & TARGET_PAGE_MASK;
2151 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002152
2153 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002154 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002155 }
2156
2157 for (addr = start, len = end - start;
2158 len != 0;
2159 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2160 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2161
2162 /* If the write protection bit is set, then we invalidate
2163 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002164 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002165 (flags & PAGE_WRITE) &&
2166 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002167 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002168 }
2169 p->flags = flags;
2170 }
bellard9fa3e852004-01-04 18:06:42 +00002171}
2172
ths3d97b402007-11-02 19:02:07 +00002173int page_check_range(target_ulong start, target_ulong len, int flags)
2174{
2175 PageDesc *p;
2176 target_ulong end;
2177 target_ulong addr;
2178
Richard Henderson376a7902010-03-10 15:57:04 -08002179 /* This function should never be called with addresses outside the
2180 guest address space. If this assert fires, it probably indicates
2181 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002182#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2183 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002184#endif
2185
Richard Henderson3e0650a2010-03-29 10:54:42 -07002186 if (len == 0) {
2187 return 0;
2188 }
Richard Henderson376a7902010-03-10 15:57:04 -08002189 if (start + len - 1 < start) {
2190 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002191 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002192 }
balrog55f280c2008-10-28 10:24:11 +00002193
ths3d97b402007-11-02 19:02:07 +00002194 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2195 start = start & TARGET_PAGE_MASK;
2196
Richard Henderson376a7902010-03-10 15:57:04 -08002197 for (addr = start, len = end - start;
2198 len != 0;
2199 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002200 p = page_find(addr >> TARGET_PAGE_BITS);
2201 if( !p )
2202 return -1;
2203 if( !(p->flags & PAGE_VALID) )
2204 return -1;
2205
bellarddae32702007-11-14 10:51:00 +00002206 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002207 return -1;
bellarddae32702007-11-14 10:51:00 +00002208 if (flags & PAGE_WRITE) {
2209 if (!(p->flags & PAGE_WRITE_ORG))
2210 return -1;
2211 /* unprotect the page if it was put read-only because it
2212 contains translated code */
2213 if (!(p->flags & PAGE_WRITE)) {
2214 if (!page_unprotect(addr, 0, NULL))
2215 return -1;
2216 }
2217 return 0;
2218 }
ths3d97b402007-11-02 19:02:07 +00002219 }
2220 return 0;
2221}
2222
bellard9fa3e852004-01-04 18:06:42 +00002223/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002224 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002225int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002226{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002227 unsigned int prot;
2228 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002229 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002230
pbrookc8a706f2008-06-02 16:16:42 +00002231 /* Technically this isn't safe inside a signal handler. However we
2232 know this only ever happens in a synchronous SEGV handler, so in
2233 practice it seems to be ok. */
2234 mmap_lock();
2235
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002236 p = page_find(address >> TARGET_PAGE_BITS);
2237 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002238 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002239 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002240 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002241
bellard9fa3e852004-01-04 18:06:42 +00002242 /* if the page was really writable, then we change its
2243 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002244 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2245 host_start = address & qemu_host_page_mask;
2246 host_end = host_start + qemu_host_page_size;
2247
2248 prot = 0;
2249 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2250 p = page_find(addr >> TARGET_PAGE_BITS);
2251 p->flags |= PAGE_WRITE;
2252 prot |= p->flags;
2253
bellard9fa3e852004-01-04 18:06:42 +00002254 /* and since the content will be modified, we must invalidate
2255 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002256 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002257#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002258 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002259#endif
bellard9fa3e852004-01-04 18:06:42 +00002260 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002261 mprotect((void *)g2h(host_start), qemu_host_page_size,
2262 prot & PAGE_BITS);
2263
2264 mmap_unlock();
2265 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002266 }
pbrookc8a706f2008-06-02 16:16:42 +00002267 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002268 return 0;
2269}
bellard9fa3e852004-01-04 18:06:42 +00002270#endif /* defined(CONFIG_USER_ONLY) */
2271
pbrooke2eef172008-06-08 01:09:01 +00002272#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002273
Paul Brookc04b2b72010-03-01 03:31:14 +00002274#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2275typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002276 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002277 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002278 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002279} subpage_t;
2280
Anthony Liguoric227f092009-10-01 16:12:16 -05002281static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002282 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002283static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002284static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002285{
Avi Kivity5312bd82012-02-12 18:32:55 +02002286 MemoryRegionSection *section = &phys_sections[section_index];
2287 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002288
2289 if (mr->subpage) {
2290 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2291 memory_region_destroy(&subpage->iomem);
2292 g_free(subpage);
2293 }
2294}
2295
Avi Kivity4346ae32012-02-10 17:00:01 +02002296static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002297{
2298 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002299 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002300
Avi Kivityc19e8802012-02-13 20:25:31 +02002301 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002302 return;
2303 }
2304
Avi Kivityc19e8802012-02-13 20:25:31 +02002305 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002306 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002307 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002308 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002309 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002310 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002311 }
Avi Kivity54688b12012-02-09 17:34:32 +02002312 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002313 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002314 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002315}
2316
2317static void destroy_all_mappings(void)
2318{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002319 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002320 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002321}
2322
Avi Kivity5312bd82012-02-12 18:32:55 +02002323static uint16_t phys_section_add(MemoryRegionSection *section)
2324{
2325 if (phys_sections_nb == phys_sections_nb_alloc) {
2326 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2327 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2328 phys_sections_nb_alloc);
2329 }
2330 phys_sections[phys_sections_nb] = *section;
2331 return phys_sections_nb++;
2332}
2333
2334static void phys_sections_clear(void)
2335{
2336 phys_sections_nb = 0;
2337}
2338
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002339/* register physical memory.
2340 For RAM, 'size' must be a multiple of the target page size.
2341 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002342 io memory page. The address used when calling the IO function is
2343 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002344 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002345 before calculating this offset. This should not be a problem unless
2346 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002347static void register_subpage(MemoryRegionSection *section)
2348{
2349 subpage_t *subpage;
2350 target_phys_addr_t base = section->offset_within_address_space
2351 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002352 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002353 MemoryRegionSection subsection = {
2354 .offset_within_address_space = base,
2355 .size = TARGET_PAGE_SIZE,
2356 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002357 target_phys_addr_t start, end;
2358
Avi Kivityf3705d52012-03-08 16:16:34 +02002359 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002360
Avi Kivityf3705d52012-03-08 16:16:34 +02002361 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002362 subpage = subpage_init(base);
2363 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002364 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2365 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002366 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002367 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002368 }
2369 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2370 end = start + section->size;
2371 subpage_register(subpage, start, end, phys_section_add(section));
2372}
2373
2374
2375static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002376{
Avi Kivitydd811242012-01-02 12:17:03 +02002377 target_phys_addr_t start_addr = section->offset_within_address_space;
2378 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002379 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002380 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002381
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002382 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002383
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002384 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002385 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2386 section_index);
bellard33417e72003-08-10 21:47:01 +00002387}
2388
Avi Kivity0f0cb162012-02-13 17:14:32 +02002389void cpu_register_physical_memory_log(MemoryRegionSection *section,
2390 bool readonly)
2391{
2392 MemoryRegionSection now = *section, remain = *section;
2393
2394 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2395 || (now.size < TARGET_PAGE_SIZE)) {
2396 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2397 - now.offset_within_address_space,
2398 now.size);
2399 register_subpage(&now);
2400 remain.size -= now.size;
2401 remain.offset_within_address_space += now.size;
2402 remain.offset_within_region += now.size;
2403 }
2404 now = remain;
2405 now.size &= TARGET_PAGE_MASK;
2406 if (now.size) {
2407 register_multipage(&now);
2408 remain.size -= now.size;
2409 remain.offset_within_address_space += now.size;
2410 remain.offset_within_region += now.size;
2411 }
2412 now = remain;
2413 if (now.size) {
2414 register_subpage(&now);
2415 }
2416}
2417
2418
Anthony Liguoric227f092009-10-01 16:12:16 -05002419void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002420{
2421 if (kvm_enabled())
2422 kvm_coalesce_mmio_region(addr, size);
2423}
2424
Anthony Liguoric227f092009-10-01 16:12:16 -05002425void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002426{
2427 if (kvm_enabled())
2428 kvm_uncoalesce_mmio_region(addr, size);
2429}
2430
Sheng Yang62a27442010-01-26 19:21:16 +08002431void qemu_flush_coalesced_mmio_buffer(void)
2432{
2433 if (kvm_enabled())
2434 kvm_flush_coalesced_mmio_buffer();
2435}
2436
Marcelo Tosattic9027602010-03-01 20:25:08 -03002437#if defined(__linux__) && !defined(TARGET_S390X)
2438
2439#include <sys/vfs.h>
2440
2441#define HUGETLBFS_MAGIC 0x958458f6
2442
2443static long gethugepagesize(const char *path)
2444{
2445 struct statfs fs;
2446 int ret;
2447
2448 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002449 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002450 } while (ret != 0 && errno == EINTR);
2451
2452 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002453 perror(path);
2454 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002455 }
2456
2457 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002458 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002459
2460 return fs.f_bsize;
2461}
2462
Alex Williamson04b16652010-07-02 11:13:17 -06002463static void *file_ram_alloc(RAMBlock *block,
2464 ram_addr_t memory,
2465 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002466{
2467 char *filename;
2468 void *area;
2469 int fd;
2470#ifdef MAP_POPULATE
2471 int flags;
2472#endif
2473 unsigned long hpagesize;
2474
2475 hpagesize = gethugepagesize(path);
2476 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002477 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002478 }
2479
2480 if (memory < hpagesize) {
2481 return NULL;
2482 }
2483
2484 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2485 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2486 return NULL;
2487 }
2488
2489 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002490 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002491 }
2492
2493 fd = mkstemp(filename);
2494 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002495 perror("unable to create backing store for hugepages");
2496 free(filename);
2497 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002498 }
2499 unlink(filename);
2500 free(filename);
2501
2502 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2503
2504 /*
2505 * ftruncate is not supported by hugetlbfs in older
2506 * hosts, so don't bother bailing out on errors.
2507 * If anything goes wrong with it under other filesystems,
2508 * mmap will fail.
2509 */
2510 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002511 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002512
2513#ifdef MAP_POPULATE
2514 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2515 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2516 * to sidestep this quirk.
2517 */
2518 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2519 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2520#else
2521 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2522#endif
2523 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002524 perror("file_ram_alloc: can't mmap RAM pages");
2525 close(fd);
2526 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002527 }
Alex Williamson04b16652010-07-02 11:13:17 -06002528 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002529 return area;
2530}
2531#endif
2532
Alex Williamsond17b5282010-06-25 11:08:38 -06002533static ram_addr_t find_ram_offset(ram_addr_t size)
2534{
Alex Williamson04b16652010-07-02 11:13:17 -06002535 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002536 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002537
2538 if (QLIST_EMPTY(&ram_list.blocks))
2539 return 0;
2540
2541 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002542 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002543
2544 end = block->offset + block->length;
2545
2546 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2547 if (next_block->offset >= end) {
2548 next = MIN(next, next_block->offset);
2549 }
2550 }
2551 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002552 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002553 mingap = next - end;
2554 }
2555 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002556
2557 if (offset == RAM_ADDR_MAX) {
2558 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2559 (uint64_t)size);
2560 abort();
2561 }
2562
Alex Williamson04b16652010-07-02 11:13:17 -06002563 return offset;
2564}
2565
2566static ram_addr_t last_ram_offset(void)
2567{
Alex Williamsond17b5282010-06-25 11:08:38 -06002568 RAMBlock *block;
2569 ram_addr_t last = 0;
2570
2571 QLIST_FOREACH(block, &ram_list.blocks, next)
2572 last = MAX(last, block->offset + block->length);
2573
2574 return last;
2575}
2576
Avi Kivityc5705a72011-12-20 15:59:12 +02002577void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002578{
2579 RAMBlock *new_block, *block;
2580
Avi Kivityc5705a72011-12-20 15:59:12 +02002581 new_block = NULL;
2582 QLIST_FOREACH(block, &ram_list.blocks, next) {
2583 if (block->offset == addr) {
2584 new_block = block;
2585 break;
2586 }
2587 }
2588 assert(new_block);
2589 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002590
2591 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2592 char *id = dev->parent_bus->info->get_dev_path(dev);
2593 if (id) {
2594 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002595 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002596 }
2597 }
2598 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2599
2600 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002601 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002602 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2603 new_block->idstr);
2604 abort();
2605 }
2606 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002607}
2608
2609ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2610 MemoryRegion *mr)
2611{
2612 RAMBlock *new_block;
2613
2614 size = TARGET_PAGE_ALIGN(size);
2615 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002616
Avi Kivity7c637362011-12-21 13:09:49 +02002617 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002618 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002619 if (host) {
2620 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002621 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002622 } else {
2623 if (mem_path) {
2624#if defined (__linux__) && !defined(TARGET_S390X)
2625 new_block->host = file_ram_alloc(new_block, size, mem_path);
2626 if (!new_block->host) {
2627 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002628 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002629 }
2630#else
2631 fprintf(stderr, "-mem-path option unsupported\n");
2632 exit(1);
2633#endif
2634 } else {
2635#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002636 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2637 an system defined value, which is at least 256GB. Larger systems
2638 have larger values. We put the guest between the end of data
2639 segment (system break) and this value. We use 32GB as a base to
2640 have enough room for the system break to grow. */
2641 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002642 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002643 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002644 if (new_block->host == MAP_FAILED) {
2645 fprintf(stderr, "Allocating RAM failed\n");
2646 abort();
2647 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002648#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002649 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002650 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002651 } else {
2652 new_block->host = qemu_vmalloc(size);
2653 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002654#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002655 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002656 }
2657 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002658 new_block->length = size;
2659
2660 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2661
Anthony Liguori7267c092011-08-20 22:09:37 -05002662 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002663 last_ram_offset() >> TARGET_PAGE_BITS);
2664 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2665 0xff, size >> TARGET_PAGE_BITS);
2666
2667 if (kvm_enabled())
2668 kvm_setup_guest_memory(new_block->host, size);
2669
2670 return new_block->offset;
2671}
2672
Avi Kivityc5705a72011-12-20 15:59:12 +02002673ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002674{
Avi Kivityc5705a72011-12-20 15:59:12 +02002675 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002676}
bellarde9a1ab12007-02-08 23:08:38 +00002677
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002678void qemu_ram_free_from_ptr(ram_addr_t addr)
2679{
2680 RAMBlock *block;
2681
2682 QLIST_FOREACH(block, &ram_list.blocks, next) {
2683 if (addr == block->offset) {
2684 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002685 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002686 return;
2687 }
2688 }
2689}
2690
Anthony Liguoric227f092009-10-01 16:12:16 -05002691void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002692{
Alex Williamson04b16652010-07-02 11:13:17 -06002693 RAMBlock *block;
2694
2695 QLIST_FOREACH(block, &ram_list.blocks, next) {
2696 if (addr == block->offset) {
2697 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002698 if (block->flags & RAM_PREALLOC_MASK) {
2699 ;
2700 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002701#if defined (__linux__) && !defined(TARGET_S390X)
2702 if (block->fd) {
2703 munmap(block->host, block->length);
2704 close(block->fd);
2705 } else {
2706 qemu_vfree(block->host);
2707 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002708#else
2709 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002710#endif
2711 } else {
2712#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2713 munmap(block->host, block->length);
2714#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002715 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002716 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002717 } else {
2718 qemu_vfree(block->host);
2719 }
Alex Williamson04b16652010-07-02 11:13:17 -06002720#endif
2721 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002722 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002723 return;
2724 }
2725 }
2726
bellarde9a1ab12007-02-08 23:08:38 +00002727}
2728
Huang Yingcd19cfa2011-03-02 08:56:19 +01002729#ifndef _WIN32
2730void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2731{
2732 RAMBlock *block;
2733 ram_addr_t offset;
2734 int flags;
2735 void *area, *vaddr;
2736
2737 QLIST_FOREACH(block, &ram_list.blocks, next) {
2738 offset = addr - block->offset;
2739 if (offset < block->length) {
2740 vaddr = block->host + offset;
2741 if (block->flags & RAM_PREALLOC_MASK) {
2742 ;
2743 } else {
2744 flags = MAP_FIXED;
2745 munmap(vaddr, length);
2746 if (mem_path) {
2747#if defined(__linux__) && !defined(TARGET_S390X)
2748 if (block->fd) {
2749#ifdef MAP_POPULATE
2750 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2751 MAP_PRIVATE;
2752#else
2753 flags |= MAP_PRIVATE;
2754#endif
2755 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2756 flags, block->fd, offset);
2757 } else {
2758 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2759 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2760 flags, -1, 0);
2761 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002762#else
2763 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002764#endif
2765 } else {
2766#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2767 flags |= MAP_SHARED | MAP_ANONYMOUS;
2768 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2769 flags, -1, 0);
2770#else
2771 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2772 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2773 flags, -1, 0);
2774#endif
2775 }
2776 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002777 fprintf(stderr, "Could not remap addr: "
2778 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002779 length, addr);
2780 exit(1);
2781 }
2782 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2783 }
2784 return;
2785 }
2786 }
2787}
2788#endif /* !_WIN32 */
2789
pbrookdc828ca2009-04-09 22:21:07 +00002790/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002791 With the exception of the softmmu code in this file, this should
2792 only be used for local memory (e.g. video ram) that the device owns,
2793 and knows it isn't going to access beyond the end of the block.
2794
2795 It should not be used for general purpose DMA.
2796 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2797 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002798void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002799{
pbrook94a6b542009-04-11 17:15:54 +00002800 RAMBlock *block;
2801
Alex Williamsonf471a172010-06-11 11:11:42 -06002802 QLIST_FOREACH(block, &ram_list.blocks, next) {
2803 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002804 /* Move this entry to to start of the list. */
2805 if (block != QLIST_FIRST(&ram_list.blocks)) {
2806 QLIST_REMOVE(block, next);
2807 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2808 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002809 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002810 /* We need to check if the requested address is in the RAM
2811 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002812 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002813 */
2814 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002815 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002816 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002817 block->host =
2818 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002819 }
2820 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002821 return block->host + (addr - block->offset);
2822 }
pbrook94a6b542009-04-11 17:15:54 +00002823 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002824
2825 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2826 abort();
2827
2828 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002829}
2830
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002831/* Return a host pointer to ram allocated with qemu_ram_alloc.
2832 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2833 */
2834void *qemu_safe_ram_ptr(ram_addr_t addr)
2835{
2836 RAMBlock *block;
2837
2838 QLIST_FOREACH(block, &ram_list.blocks, next) {
2839 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002840 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002841 /* We need to check if the requested address is in the RAM
2842 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002843 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002844 */
2845 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002846 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002847 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002848 block->host =
2849 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002850 }
2851 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002852 return block->host + (addr - block->offset);
2853 }
2854 }
2855
2856 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2857 abort();
2858
2859 return NULL;
2860}
2861
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002862/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2863 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002864void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002865{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002866 if (*size == 0) {
2867 return NULL;
2868 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002869 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002870 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002871 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002872 RAMBlock *block;
2873
2874 QLIST_FOREACH(block, &ram_list.blocks, next) {
2875 if (addr - block->offset < block->length) {
2876 if (addr - block->offset + *size > block->length)
2877 *size = block->length - addr + block->offset;
2878 return block->host + (addr - block->offset);
2879 }
2880 }
2881
2882 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2883 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002884 }
2885}
2886
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002887void qemu_put_ram_ptr(void *addr)
2888{
2889 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002890}
2891
Marcelo Tosattie8902612010-10-11 15:31:19 -03002892int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002893{
pbrook94a6b542009-04-11 17:15:54 +00002894 RAMBlock *block;
2895 uint8_t *host = ptr;
2896
Jan Kiszka868bb332011-06-21 22:59:09 +02002897 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002898 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002899 return 0;
2900 }
2901
Alex Williamsonf471a172010-06-11 11:11:42 -06002902 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002903 /* This case append when the block is not mapped. */
2904 if (block->host == NULL) {
2905 continue;
2906 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002907 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002908 *ram_addr = block->offset + (host - block->host);
2909 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002910 }
pbrook94a6b542009-04-11 17:15:54 +00002911 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002912
Marcelo Tosattie8902612010-10-11 15:31:19 -03002913 return -1;
2914}
Alex Williamsonf471a172010-06-11 11:11:42 -06002915
Marcelo Tosattie8902612010-10-11 15:31:19 -03002916/* Some of the softmmu routines need to translate from a host pointer
2917 (typically a TLB entry) back to a ram offset. */
2918ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2919{
2920 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002921
Marcelo Tosattie8902612010-10-11 15:31:19 -03002922 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2923 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2924 abort();
2925 }
2926 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002927}
2928
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002929static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2930 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002931{
pbrook67d3b952006-12-18 05:03:52 +00002932#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002933 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002934#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002935#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002936 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002937#endif
2938 return 0;
2939}
2940
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002941static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2942 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002943{
2944#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002945 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002946#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002947#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002948 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002949#endif
2950}
2951
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002952static const MemoryRegionOps unassigned_mem_ops = {
2953 .read = unassigned_mem_read,
2954 .write = unassigned_mem_write,
2955 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002956};
2957
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002958static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2959 unsigned size)
2960{
2961 abort();
2962}
2963
2964static void error_mem_write(void *opaque, target_phys_addr_t addr,
2965 uint64_t value, unsigned size)
2966{
2967 abort();
2968}
2969
2970static const MemoryRegionOps error_mem_ops = {
2971 .read = error_mem_read,
2972 .write = error_mem_write,
2973 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002974};
2975
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002976static const MemoryRegionOps rom_mem_ops = {
2977 .read = error_mem_read,
2978 .write = unassigned_mem_write,
2979 .endianness = DEVICE_NATIVE_ENDIAN,
2980};
2981
2982static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2983 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002984{
bellard3a7d9292005-08-21 09:26:42 +00002985 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002986 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002987 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2988#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002989 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002990 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002991#endif
2992 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002993 switch (size) {
2994 case 1:
2995 stb_p(qemu_get_ram_ptr(ram_addr), val);
2996 break;
2997 case 2:
2998 stw_p(qemu_get_ram_ptr(ram_addr), val);
2999 break;
3000 case 4:
3001 stl_p(qemu_get_ram_ptr(ram_addr), val);
3002 break;
3003 default:
3004 abort();
3005 }
bellardf23db162005-08-21 19:12:28 +00003006 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003007 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003008 /* we remove the notdirty callback only if the code has been
3009 flushed */
3010 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003011 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003012}
3013
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003014static const MemoryRegionOps notdirty_mem_ops = {
3015 .read = error_mem_read,
3016 .write = notdirty_mem_write,
3017 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003018};
3019
pbrook0f459d12008-06-09 00:20:13 +00003020/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003021static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003022{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003023 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003024 target_ulong pc, cs_base;
3025 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003026 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003027 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003028 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003029
aliguori06d55cc2008-11-18 20:24:06 +00003030 if (env->watchpoint_hit) {
3031 /* We re-entered the check after replacing the TB. Now raise
3032 * the debug interrupt so that is will trigger after the
3033 * current instruction. */
3034 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3035 return;
3036 }
pbrook2e70f6e2008-06-29 01:03:05 +00003037 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003038 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003039 if ((vaddr == (wp->vaddr & len_mask) ||
3040 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003041 wp->flags |= BP_WATCHPOINT_HIT;
3042 if (!env->watchpoint_hit) {
3043 env->watchpoint_hit = wp;
3044 tb = tb_find_pc(env->mem_io_pc);
3045 if (!tb) {
3046 cpu_abort(env, "check_watchpoint: could not find TB for "
3047 "pc=%p", (void *)env->mem_io_pc);
3048 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003049 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003050 tb_phys_invalidate(tb, -1);
3051 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3052 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003053 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003054 } else {
3055 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3056 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003057 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003058 }
aliguori06d55cc2008-11-18 20:24:06 +00003059 }
aliguori6e140f22008-11-18 20:37:55 +00003060 } else {
3061 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003062 }
3063 }
3064}
3065
pbrook6658ffb2007-03-16 23:58:11 +00003066/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3067 so these check for a hit then pass through to the normal out-of-line
3068 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003069static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3070 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003071{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003072 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3073 switch (size) {
3074 case 1: return ldub_phys(addr);
3075 case 2: return lduw_phys(addr);
3076 case 4: return ldl_phys(addr);
3077 default: abort();
3078 }
pbrook6658ffb2007-03-16 23:58:11 +00003079}
3080
Avi Kivity1ec9b902012-01-02 12:47:48 +02003081static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3082 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003083{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003084 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3085 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003086 case 1:
3087 stb_phys(addr, val);
3088 break;
3089 case 2:
3090 stw_phys(addr, val);
3091 break;
3092 case 4:
3093 stl_phys(addr, val);
3094 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003095 default: abort();
3096 }
pbrook6658ffb2007-03-16 23:58:11 +00003097}
3098
Avi Kivity1ec9b902012-01-02 12:47:48 +02003099static const MemoryRegionOps watch_mem_ops = {
3100 .read = watch_mem_read,
3101 .write = watch_mem_write,
3102 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003103};
pbrook6658ffb2007-03-16 23:58:11 +00003104
Avi Kivity70c68e42012-01-02 12:32:48 +02003105static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3106 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003107{
Avi Kivity70c68e42012-01-02 12:32:48 +02003108 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003109 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003110 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003111#if defined(DEBUG_SUBPAGE)
3112 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3113 mmio, len, addr, idx);
3114#endif
blueswir1db7b5422007-05-26 17:36:03 +00003115
Avi Kivity5312bd82012-02-12 18:32:55 +02003116 section = &phys_sections[mmio->sub_section[idx]];
3117 addr += mmio->base;
3118 addr -= section->offset_within_address_space;
3119 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003120 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003121}
3122
Avi Kivity70c68e42012-01-02 12:32:48 +02003123static void subpage_write(void *opaque, target_phys_addr_t addr,
3124 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003125{
Avi Kivity70c68e42012-01-02 12:32:48 +02003126 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003127 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003128 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003129#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003130 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3131 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003132 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003133#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003134
Avi Kivity5312bd82012-02-12 18:32:55 +02003135 section = &phys_sections[mmio->sub_section[idx]];
3136 addr += mmio->base;
3137 addr -= section->offset_within_address_space;
3138 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003139 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003140}
3141
Avi Kivity70c68e42012-01-02 12:32:48 +02003142static const MemoryRegionOps subpage_ops = {
3143 .read = subpage_read,
3144 .write = subpage_write,
3145 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003146};
3147
Avi Kivityde712f92012-01-02 12:41:07 +02003148static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3149 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003150{
3151 ram_addr_t raddr = addr;
3152 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003153 switch (size) {
3154 case 1: return ldub_p(ptr);
3155 case 2: return lduw_p(ptr);
3156 case 4: return ldl_p(ptr);
3157 default: abort();
3158 }
Andreas Färber56384e82011-11-30 16:26:21 +01003159}
3160
Avi Kivityde712f92012-01-02 12:41:07 +02003161static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3162 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003163{
3164 ram_addr_t raddr = addr;
3165 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003166 switch (size) {
3167 case 1: return stb_p(ptr, value);
3168 case 2: return stw_p(ptr, value);
3169 case 4: return stl_p(ptr, value);
3170 default: abort();
3171 }
Andreas Färber56384e82011-11-30 16:26:21 +01003172}
3173
Avi Kivityde712f92012-01-02 12:41:07 +02003174static const MemoryRegionOps subpage_ram_ops = {
3175 .read = subpage_ram_read,
3176 .write = subpage_ram_write,
3177 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003178};
3179
Anthony Liguoric227f092009-10-01 16:12:16 -05003180static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003181 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003182{
3183 int idx, eidx;
3184
3185 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3186 return -1;
3187 idx = SUBPAGE_IDX(start);
3188 eidx = SUBPAGE_IDX(end);
3189#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003190 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003191 mmio, start, end, idx, eidx, memory);
3192#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003193 if (memory_region_is_ram(phys_sections[section].mr)) {
3194 MemoryRegionSection new_section = phys_sections[section];
3195 new_section.mr = &io_mem_subpage_ram;
3196 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003197 }
blueswir1db7b5422007-05-26 17:36:03 +00003198 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003199 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003200 }
3201
3202 return 0;
3203}
3204
Avi Kivity0f0cb162012-02-13 17:14:32 +02003205static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003206{
Anthony Liguoric227f092009-10-01 16:12:16 -05003207 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003208
Anthony Liguori7267c092011-08-20 22:09:37 -05003209 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003210
3211 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003212 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3213 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003214 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003215#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003216 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3217 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003218#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003219 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003220
3221 return mmio;
3222}
3223
Avi Kivity5312bd82012-02-12 18:32:55 +02003224static uint16_t dummy_section(MemoryRegion *mr)
3225{
3226 MemoryRegionSection section = {
3227 .mr = mr,
3228 .offset_within_address_space = 0,
3229 .offset_within_region = 0,
3230 .size = UINT64_MAX,
3231 };
3232
3233 return phys_section_add(&section);
3234}
3235
Avi Kivity37ec01d2012-03-08 18:08:35 +02003236MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003237{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003238 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003239}
3240
Avi Kivitye9179ce2009-06-14 11:38:52 +03003241static void io_mem_init(void)
3242{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003243 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003244 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3245 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3246 "unassigned", UINT64_MAX);
3247 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3248 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003249 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3250 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003251 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3252 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003253}
3254
Avi Kivity50c1e142012-02-08 21:36:02 +02003255static void core_begin(MemoryListener *listener)
3256{
Avi Kivity54688b12012-02-09 17:34:32 +02003257 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003258 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003259 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003260 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003261 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3262 phys_section_rom = dummy_section(&io_mem_rom);
3263 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003264}
3265
3266static void core_commit(MemoryListener *listener)
3267{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003268 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003269
3270 /* since each CPU stores ram addresses in its TLB cache, we must
3271 reset the modified entries */
3272 /* XXX: slow ! */
3273 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3274 tlb_flush(env, 1);
3275 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003276}
3277
Avi Kivity93632742012-02-08 16:54:16 +02003278static void core_region_add(MemoryListener *listener,
3279 MemoryRegionSection *section)
3280{
Avi Kivity4855d412012-02-08 21:16:05 +02003281 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003282}
3283
3284static void core_region_del(MemoryListener *listener,
3285 MemoryRegionSection *section)
3286{
Avi Kivity93632742012-02-08 16:54:16 +02003287}
3288
Avi Kivity50c1e142012-02-08 21:36:02 +02003289static void core_region_nop(MemoryListener *listener,
3290 MemoryRegionSection *section)
3291{
Avi Kivity54688b12012-02-09 17:34:32 +02003292 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003293}
3294
Avi Kivity93632742012-02-08 16:54:16 +02003295static void core_log_start(MemoryListener *listener,
3296 MemoryRegionSection *section)
3297{
3298}
3299
3300static void core_log_stop(MemoryListener *listener,
3301 MemoryRegionSection *section)
3302{
3303}
3304
3305static void core_log_sync(MemoryListener *listener,
3306 MemoryRegionSection *section)
3307{
3308}
3309
3310static void core_log_global_start(MemoryListener *listener)
3311{
3312 cpu_physical_memory_set_dirty_tracking(1);
3313}
3314
3315static void core_log_global_stop(MemoryListener *listener)
3316{
3317 cpu_physical_memory_set_dirty_tracking(0);
3318}
3319
3320static void core_eventfd_add(MemoryListener *listener,
3321 MemoryRegionSection *section,
3322 bool match_data, uint64_t data, int fd)
3323{
3324}
3325
3326static void core_eventfd_del(MemoryListener *listener,
3327 MemoryRegionSection *section,
3328 bool match_data, uint64_t data, int fd)
3329{
3330}
3331
Avi Kivity50c1e142012-02-08 21:36:02 +02003332static void io_begin(MemoryListener *listener)
3333{
3334}
3335
3336static void io_commit(MemoryListener *listener)
3337{
3338}
3339
Avi Kivity4855d412012-02-08 21:16:05 +02003340static void io_region_add(MemoryListener *listener,
3341 MemoryRegionSection *section)
3342{
Avi Kivitya2d33522012-03-05 17:40:12 +02003343 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3344
3345 mrio->mr = section->mr;
3346 mrio->offset = section->offset_within_region;
3347 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003348 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003349 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003350}
3351
3352static void io_region_del(MemoryListener *listener,
3353 MemoryRegionSection *section)
3354{
3355 isa_unassign_ioport(section->offset_within_address_space, section->size);
3356}
3357
Avi Kivity50c1e142012-02-08 21:36:02 +02003358static void io_region_nop(MemoryListener *listener,
3359 MemoryRegionSection *section)
3360{
3361}
3362
Avi Kivity4855d412012-02-08 21:16:05 +02003363static void io_log_start(MemoryListener *listener,
3364 MemoryRegionSection *section)
3365{
3366}
3367
3368static void io_log_stop(MemoryListener *listener,
3369 MemoryRegionSection *section)
3370{
3371}
3372
3373static void io_log_sync(MemoryListener *listener,
3374 MemoryRegionSection *section)
3375{
3376}
3377
3378static void io_log_global_start(MemoryListener *listener)
3379{
3380}
3381
3382static void io_log_global_stop(MemoryListener *listener)
3383{
3384}
3385
3386static void io_eventfd_add(MemoryListener *listener,
3387 MemoryRegionSection *section,
3388 bool match_data, uint64_t data, int fd)
3389{
3390}
3391
3392static void io_eventfd_del(MemoryListener *listener,
3393 MemoryRegionSection *section,
3394 bool match_data, uint64_t data, int fd)
3395{
3396}
3397
Avi Kivity93632742012-02-08 16:54:16 +02003398static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003399 .begin = core_begin,
3400 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003401 .region_add = core_region_add,
3402 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003403 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003404 .log_start = core_log_start,
3405 .log_stop = core_log_stop,
3406 .log_sync = core_log_sync,
3407 .log_global_start = core_log_global_start,
3408 .log_global_stop = core_log_global_stop,
3409 .eventfd_add = core_eventfd_add,
3410 .eventfd_del = core_eventfd_del,
3411 .priority = 0,
3412};
3413
Avi Kivity4855d412012-02-08 21:16:05 +02003414static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003415 .begin = io_begin,
3416 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003417 .region_add = io_region_add,
3418 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003419 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003420 .log_start = io_log_start,
3421 .log_stop = io_log_stop,
3422 .log_sync = io_log_sync,
3423 .log_global_start = io_log_global_start,
3424 .log_global_stop = io_log_global_stop,
3425 .eventfd_add = io_eventfd_add,
3426 .eventfd_del = io_eventfd_del,
3427 .priority = 0,
3428};
3429
Avi Kivity62152b82011-07-26 14:26:14 +03003430static void memory_map_init(void)
3431{
Anthony Liguori7267c092011-08-20 22:09:37 -05003432 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003433 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003434 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003435
Anthony Liguori7267c092011-08-20 22:09:37 -05003436 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003437 memory_region_init(system_io, "io", 65536);
3438 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003439
Avi Kivity4855d412012-02-08 21:16:05 +02003440 memory_listener_register(&core_memory_listener, system_memory);
3441 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003442}
3443
3444MemoryRegion *get_system_memory(void)
3445{
3446 return system_memory;
3447}
3448
Avi Kivity309cb472011-08-08 16:09:03 +03003449MemoryRegion *get_system_io(void)
3450{
3451 return system_io;
3452}
3453
pbrooke2eef172008-06-08 01:09:01 +00003454#endif /* !defined(CONFIG_USER_ONLY) */
3455
bellard13eb76e2004-01-24 15:23:36 +00003456/* physical memory access (slow version, mainly for debug) */
3457#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003458int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003459 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003460{
3461 int l, flags;
3462 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003463 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003464
3465 while (len > 0) {
3466 page = addr & TARGET_PAGE_MASK;
3467 l = (page + TARGET_PAGE_SIZE) - addr;
3468 if (l > len)
3469 l = len;
3470 flags = page_get_flags(page);
3471 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003472 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003473 if (is_write) {
3474 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003475 return -1;
bellard579a97f2007-11-11 14:26:47 +00003476 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003477 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003478 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003479 memcpy(p, buf, l);
3480 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003481 } else {
3482 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003483 return -1;
bellard579a97f2007-11-11 14:26:47 +00003484 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003485 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003486 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003487 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003488 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003489 }
3490 len -= l;
3491 buf += l;
3492 addr += l;
3493 }
Paul Brooka68fe892010-03-01 00:08:59 +00003494 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003495}
bellard8df1cd02005-01-28 22:37:22 +00003496
bellard13eb76e2004-01-24 15:23:36 +00003497#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003498void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003499 int len, int is_write)
3500{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003501 int l;
bellard13eb76e2004-01-24 15:23:36 +00003502 uint8_t *ptr;
3503 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003504 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003505 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003506
bellard13eb76e2004-01-24 15:23:36 +00003507 while (len > 0) {
3508 page = addr & TARGET_PAGE_MASK;
3509 l = (page + TARGET_PAGE_SIZE) - addr;
3510 if (l > len)
3511 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003512 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003513
bellard13eb76e2004-01-24 15:23:36 +00003514 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003515 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003516 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003517 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003518 /* XXX: could force cpu_single_env to NULL to avoid
3519 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003520 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003521 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003522 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003523 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003524 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003525 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003526 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003527 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003528 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003529 l = 2;
3530 } else {
bellard1c213d12005-09-03 10:49:04 +00003531 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003532 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003533 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003534 l = 1;
3535 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003536 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003537 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003538 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003539 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003540 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003541 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003542 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003543 if (!cpu_physical_memory_is_dirty(addr1)) {
3544 /* invalidate code */
3545 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3546 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003547 cpu_physical_memory_set_dirty_flags(
3548 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003549 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003550 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003551 }
3552 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003553 if (!(memory_region_is_ram(section->mr) ||
3554 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003555 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003556 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003557 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003558 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003559 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003560 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003561 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003562 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003563 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003564 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003565 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003566 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003567 l = 2;
3568 } else {
bellard1c213d12005-09-03 10:49:04 +00003569 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003570 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003571 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003572 l = 1;
3573 }
3574 } else {
3575 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003576 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003577 + memory_region_section_addr(section,
3578 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003579 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003580 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003581 }
3582 }
3583 len -= l;
3584 buf += l;
3585 addr += l;
3586 }
3587}
bellard8df1cd02005-01-28 22:37:22 +00003588
bellardd0ecd2a2006-04-23 17:14:48 +00003589/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003590void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003591 const uint8_t *buf, int len)
3592{
3593 int l;
3594 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003595 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003596 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003597
bellardd0ecd2a2006-04-23 17:14:48 +00003598 while (len > 0) {
3599 page = addr & TARGET_PAGE_MASK;
3600 l = (page + TARGET_PAGE_SIZE) - addr;
3601 if (l > len)
3602 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003603 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003604
Blue Swirlcc5bea62012-04-14 14:56:48 +00003605 if (!(memory_region_is_ram(section->mr) ||
3606 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003607 /* do nothing */
3608 } else {
3609 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003610 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003611 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003612 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003613 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003614 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003615 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003616 }
3617 len -= l;
3618 buf += l;
3619 addr += l;
3620 }
3621}
3622
aliguori6d16c2f2009-01-22 16:59:11 +00003623typedef struct {
3624 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003625 target_phys_addr_t addr;
3626 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003627} BounceBuffer;
3628
3629static BounceBuffer bounce;
3630
aliguoriba223c22009-01-22 16:59:16 +00003631typedef struct MapClient {
3632 void *opaque;
3633 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003634 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003635} MapClient;
3636
Blue Swirl72cf2d42009-09-12 07:36:22 +00003637static QLIST_HEAD(map_client_list, MapClient) map_client_list
3638 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003639
3640void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3641{
Anthony Liguori7267c092011-08-20 22:09:37 -05003642 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003643
3644 client->opaque = opaque;
3645 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003646 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003647 return client;
3648}
3649
3650void cpu_unregister_map_client(void *_client)
3651{
3652 MapClient *client = (MapClient *)_client;
3653
Blue Swirl72cf2d42009-09-12 07:36:22 +00003654 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003655 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003656}
3657
3658static void cpu_notify_map_clients(void)
3659{
3660 MapClient *client;
3661
Blue Swirl72cf2d42009-09-12 07:36:22 +00003662 while (!QLIST_EMPTY(&map_client_list)) {
3663 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003664 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003665 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003666 }
3667}
3668
aliguori6d16c2f2009-01-22 16:59:11 +00003669/* Map a physical memory region into a host virtual address.
3670 * May map a subset of the requested range, given by and returned in *plen.
3671 * May return NULL if resources needed to perform the mapping are exhausted.
3672 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003673 * Use cpu_register_map_client() to know when retrying the map operation is
3674 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003675 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003676void *cpu_physical_memory_map(target_phys_addr_t addr,
3677 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003678 int is_write)
3679{
Anthony Liguoric227f092009-10-01 16:12:16 -05003680 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003681 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003682 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003683 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003684 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003685 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003686 ram_addr_t rlen;
3687 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003688
3689 while (len > 0) {
3690 page = addr & TARGET_PAGE_MASK;
3691 l = (page + TARGET_PAGE_SIZE) - addr;
3692 if (l > len)
3693 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003694 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003695
Avi Kivityf3705d52012-03-08 16:16:34 +02003696 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003697 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003698 break;
3699 }
3700 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3701 bounce.addr = addr;
3702 bounce.len = l;
3703 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003704 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003705 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003706
3707 *plen = l;
3708 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003709 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003710 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003711 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003712 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003713 }
aliguori6d16c2f2009-01-22 16:59:11 +00003714
3715 len -= l;
3716 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003717 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003718 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003719 rlen = todo;
3720 ret = qemu_ram_ptr_length(raddr, &rlen);
3721 *plen = rlen;
3722 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003723}
3724
3725/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3726 * Will also mark the memory as dirty if is_write == 1. access_len gives
3727 * the amount of memory that was actually read or written by the caller.
3728 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003729void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3730 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003731{
3732 if (buffer != bounce.buffer) {
3733 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003734 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003735 while (access_len) {
3736 unsigned l;
3737 l = TARGET_PAGE_SIZE;
3738 if (l > access_len)
3739 l = access_len;
3740 if (!cpu_physical_memory_is_dirty(addr1)) {
3741 /* invalidate code */
3742 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3743 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003744 cpu_physical_memory_set_dirty_flags(
3745 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003746 }
3747 addr1 += l;
3748 access_len -= l;
3749 }
3750 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003751 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003752 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003753 }
aliguori6d16c2f2009-01-22 16:59:11 +00003754 return;
3755 }
3756 if (is_write) {
3757 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3758 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003759 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003760 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003761 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003762}
bellardd0ecd2a2006-04-23 17:14:48 +00003763
bellard8df1cd02005-01-28 22:37:22 +00003764/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003765static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3766 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003767{
bellard8df1cd02005-01-28 22:37:22 +00003768 uint8_t *ptr;
3769 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003770 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003771
Avi Kivity06ef3522012-02-13 16:11:22 +02003772 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003773
Blue Swirlcc5bea62012-04-14 14:56:48 +00003774 if (!(memory_region_is_ram(section->mr) ||
3775 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003776 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003777 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003778 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003779#if defined(TARGET_WORDS_BIGENDIAN)
3780 if (endian == DEVICE_LITTLE_ENDIAN) {
3781 val = bswap32(val);
3782 }
3783#else
3784 if (endian == DEVICE_BIG_ENDIAN) {
3785 val = bswap32(val);
3786 }
3787#endif
bellard8df1cd02005-01-28 22:37:22 +00003788 } else {
3789 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003790 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003791 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003792 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003793 switch (endian) {
3794 case DEVICE_LITTLE_ENDIAN:
3795 val = ldl_le_p(ptr);
3796 break;
3797 case DEVICE_BIG_ENDIAN:
3798 val = ldl_be_p(ptr);
3799 break;
3800 default:
3801 val = ldl_p(ptr);
3802 break;
3803 }
bellard8df1cd02005-01-28 22:37:22 +00003804 }
3805 return val;
3806}
3807
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003808uint32_t ldl_phys(target_phys_addr_t addr)
3809{
3810 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3811}
3812
3813uint32_t ldl_le_phys(target_phys_addr_t addr)
3814{
3815 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3816}
3817
3818uint32_t ldl_be_phys(target_phys_addr_t addr)
3819{
3820 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3821}
3822
bellard84b7b8e2005-11-28 21:19:04 +00003823/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003824static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3825 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003826{
bellard84b7b8e2005-11-28 21:19:04 +00003827 uint8_t *ptr;
3828 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003829 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003830
Avi Kivity06ef3522012-02-13 16:11:22 +02003831 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003832
Blue Swirlcc5bea62012-04-14 14:56:48 +00003833 if (!(memory_region_is_ram(section->mr) ||
3834 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003835 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003836 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003837
3838 /* XXX This is broken when device endian != cpu endian.
3839 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003840#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003841 val = io_mem_read(section->mr, addr, 4) << 32;
3842 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003843#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003844 val = io_mem_read(section->mr, addr, 4);
3845 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003846#endif
3847 } else {
3848 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003849 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003850 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003851 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003852 switch (endian) {
3853 case DEVICE_LITTLE_ENDIAN:
3854 val = ldq_le_p(ptr);
3855 break;
3856 case DEVICE_BIG_ENDIAN:
3857 val = ldq_be_p(ptr);
3858 break;
3859 default:
3860 val = ldq_p(ptr);
3861 break;
3862 }
bellard84b7b8e2005-11-28 21:19:04 +00003863 }
3864 return val;
3865}
3866
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003867uint64_t ldq_phys(target_phys_addr_t addr)
3868{
3869 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3870}
3871
3872uint64_t ldq_le_phys(target_phys_addr_t addr)
3873{
3874 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3875}
3876
3877uint64_t ldq_be_phys(target_phys_addr_t addr)
3878{
3879 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3880}
3881
bellardaab33092005-10-30 20:48:42 +00003882/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003883uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003884{
3885 uint8_t val;
3886 cpu_physical_memory_read(addr, &val, 1);
3887 return val;
3888}
3889
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003890/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003891static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3892 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003893{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003894 uint8_t *ptr;
3895 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003896 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003897
Avi Kivity06ef3522012-02-13 16:11:22 +02003898 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003899
Blue Swirlcc5bea62012-04-14 14:56:48 +00003900 if (!(memory_region_is_ram(section->mr) ||
3901 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003902 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003903 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003904 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003905#if defined(TARGET_WORDS_BIGENDIAN)
3906 if (endian == DEVICE_LITTLE_ENDIAN) {
3907 val = bswap16(val);
3908 }
3909#else
3910 if (endian == DEVICE_BIG_ENDIAN) {
3911 val = bswap16(val);
3912 }
3913#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003914 } else {
3915 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003916 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003917 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003918 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003919 switch (endian) {
3920 case DEVICE_LITTLE_ENDIAN:
3921 val = lduw_le_p(ptr);
3922 break;
3923 case DEVICE_BIG_ENDIAN:
3924 val = lduw_be_p(ptr);
3925 break;
3926 default:
3927 val = lduw_p(ptr);
3928 break;
3929 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003930 }
3931 return val;
bellardaab33092005-10-30 20:48:42 +00003932}
3933
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003934uint32_t lduw_phys(target_phys_addr_t addr)
3935{
3936 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3937}
3938
3939uint32_t lduw_le_phys(target_phys_addr_t addr)
3940{
3941 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3942}
3943
3944uint32_t lduw_be_phys(target_phys_addr_t addr)
3945{
3946 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3947}
3948
bellard8df1cd02005-01-28 22:37:22 +00003949/* warning: addr must be aligned. The ram page is not masked as dirty
3950 and the code inside is not invalidated. It is useful if the dirty
3951 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003952void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003953{
bellard8df1cd02005-01-28 22:37:22 +00003954 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003955 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003956
Avi Kivity06ef3522012-02-13 16:11:22 +02003957 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003958
Avi Kivityf3705d52012-03-08 16:16:34 +02003959 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003960 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003961 if (memory_region_is_ram(section->mr)) {
3962 section = &phys_sections[phys_section_rom];
3963 }
3964 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003965 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003966 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003967 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003968 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003969 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003970 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003971
3972 if (unlikely(in_migration)) {
3973 if (!cpu_physical_memory_is_dirty(addr1)) {
3974 /* invalidate code */
3975 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3976 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003977 cpu_physical_memory_set_dirty_flags(
3978 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003979 }
3980 }
bellard8df1cd02005-01-28 22:37:22 +00003981 }
3982}
3983
Anthony Liguoric227f092009-10-01 16:12:16 -05003984void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003985{
j_mayerbc98a7e2007-04-04 07:55:12 +00003986 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003987 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003988
Avi Kivity06ef3522012-02-13 16:11:22 +02003989 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003990
Avi Kivityf3705d52012-03-08 16:16:34 +02003991 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003992 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003993 if (memory_region_is_ram(section->mr)) {
3994 section = &phys_sections[phys_section_rom];
3995 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003996#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003997 io_mem_write(section->mr, addr, val >> 32, 4);
3998 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003999#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004000 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4001 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004002#endif
4003 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004004 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004005 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004006 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004007 stq_p(ptr, val);
4008 }
4009}
4010
bellard8df1cd02005-01-28 22:37:22 +00004011/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004012static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4013 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004014{
bellard8df1cd02005-01-28 22:37:22 +00004015 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004016 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004017
Avi Kivity06ef3522012-02-13 16:11:22 +02004018 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004019
Avi Kivityf3705d52012-03-08 16:16:34 +02004020 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004021 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004022 if (memory_region_is_ram(section->mr)) {
4023 section = &phys_sections[phys_section_rom];
4024 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004025#if defined(TARGET_WORDS_BIGENDIAN)
4026 if (endian == DEVICE_LITTLE_ENDIAN) {
4027 val = bswap32(val);
4028 }
4029#else
4030 if (endian == DEVICE_BIG_ENDIAN) {
4031 val = bswap32(val);
4032 }
4033#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004034 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004035 } else {
4036 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004037 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004038 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004039 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004040 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004041 switch (endian) {
4042 case DEVICE_LITTLE_ENDIAN:
4043 stl_le_p(ptr, val);
4044 break;
4045 case DEVICE_BIG_ENDIAN:
4046 stl_be_p(ptr, val);
4047 break;
4048 default:
4049 stl_p(ptr, val);
4050 break;
4051 }
bellard3a7d9292005-08-21 09:26:42 +00004052 if (!cpu_physical_memory_is_dirty(addr1)) {
4053 /* invalidate code */
4054 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4055 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004056 cpu_physical_memory_set_dirty_flags(addr1,
4057 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004058 }
bellard8df1cd02005-01-28 22:37:22 +00004059 }
4060}
4061
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004062void stl_phys(target_phys_addr_t addr, uint32_t val)
4063{
4064 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4065}
4066
4067void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4068{
4069 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4070}
4071
4072void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4073{
4074 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4075}
4076
bellardaab33092005-10-30 20:48:42 +00004077/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004078void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004079{
4080 uint8_t v = val;
4081 cpu_physical_memory_write(addr, &v, 1);
4082}
4083
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004084/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004085static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4086 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004087{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004088 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004089 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004090
Avi Kivity06ef3522012-02-13 16:11:22 +02004091 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004092
Avi Kivityf3705d52012-03-08 16:16:34 +02004093 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004094 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004095 if (memory_region_is_ram(section->mr)) {
4096 section = &phys_sections[phys_section_rom];
4097 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004098#if defined(TARGET_WORDS_BIGENDIAN)
4099 if (endian == DEVICE_LITTLE_ENDIAN) {
4100 val = bswap16(val);
4101 }
4102#else
4103 if (endian == DEVICE_BIG_ENDIAN) {
4104 val = bswap16(val);
4105 }
4106#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004107 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004108 } else {
4109 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004110 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004111 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004112 /* RAM case */
4113 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004114 switch (endian) {
4115 case DEVICE_LITTLE_ENDIAN:
4116 stw_le_p(ptr, val);
4117 break;
4118 case DEVICE_BIG_ENDIAN:
4119 stw_be_p(ptr, val);
4120 break;
4121 default:
4122 stw_p(ptr, val);
4123 break;
4124 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004125 if (!cpu_physical_memory_is_dirty(addr1)) {
4126 /* invalidate code */
4127 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4128 /* set dirty bit */
4129 cpu_physical_memory_set_dirty_flags(addr1,
4130 (0xff & ~CODE_DIRTY_FLAG));
4131 }
4132 }
bellardaab33092005-10-30 20:48:42 +00004133}
4134
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004135void stw_phys(target_phys_addr_t addr, uint32_t val)
4136{
4137 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4138}
4139
4140void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4141{
4142 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4143}
4144
4145void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4146{
4147 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4148}
4149
bellardaab33092005-10-30 20:48:42 +00004150/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004151void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004152{
4153 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004154 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004155}
4156
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004157void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4158{
4159 val = cpu_to_le64(val);
4160 cpu_physical_memory_write(addr, &val, 8);
4161}
4162
4163void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4164{
4165 val = cpu_to_be64(val);
4166 cpu_physical_memory_write(addr, &val, 8);
4167}
4168
aliguori5e2972f2009-03-28 17:51:36 +00004169/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004170int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004171 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004172{
4173 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004174 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004175 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004176
4177 while (len > 0) {
4178 page = addr & TARGET_PAGE_MASK;
4179 phys_addr = cpu_get_phys_page_debug(env, page);
4180 /* if no physical page mapped, return an error */
4181 if (phys_addr == -1)
4182 return -1;
4183 l = (page + TARGET_PAGE_SIZE) - addr;
4184 if (l > len)
4185 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004186 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004187 if (is_write)
4188 cpu_physical_memory_write_rom(phys_addr, buf, l);
4189 else
aliguori5e2972f2009-03-28 17:51:36 +00004190 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004191 len -= l;
4192 buf += l;
4193 addr += l;
4194 }
4195 return 0;
4196}
Paul Brooka68fe892010-03-01 00:08:59 +00004197#endif
bellard13eb76e2004-01-24 15:23:36 +00004198
pbrook2e70f6e2008-06-29 01:03:05 +00004199/* in deterministic execution mode, instructions doing device I/Os
4200 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004201void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004202{
4203 TranslationBlock *tb;
4204 uint32_t n, cflags;
4205 target_ulong pc, cs_base;
4206 uint64_t flags;
4207
Blue Swirl20503962012-04-09 14:20:20 +00004208 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004209 if (!tb) {
4210 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004211 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004212 }
4213 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004214 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004215 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004216 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004217 n = n - env->icount_decr.u16.low;
4218 /* Generate a new TB ending on the I/O insn. */
4219 n++;
4220 /* On MIPS and SH, delay slot instructions can only be restarted if
4221 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004222 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004223 branch. */
4224#if defined(TARGET_MIPS)
4225 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4226 env->active_tc.PC -= 4;
4227 env->icount_decr.u16.low++;
4228 env->hflags &= ~MIPS_HFLAG_BMASK;
4229 }
4230#elif defined(TARGET_SH4)
4231 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4232 && n > 1) {
4233 env->pc -= 2;
4234 env->icount_decr.u16.low++;
4235 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4236 }
4237#endif
4238 /* This should never happen. */
4239 if (n > CF_COUNT_MASK)
4240 cpu_abort(env, "TB too big during recompile");
4241
4242 cflags = n | CF_LAST_IO;
4243 pc = tb->pc;
4244 cs_base = tb->cs_base;
4245 flags = tb->flags;
4246 tb_phys_invalidate(tb, -1);
4247 /* FIXME: In theory this could raise an exception. In practice
4248 we have already translated the block once so it's probably ok. */
4249 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004250 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004251 the first in the TB) then we end up generating a whole new TB and
4252 repeating the fault, which is horribly inefficient.
4253 Better would be to execute just this insn uncached, or generate a
4254 second new TB. */
4255 cpu_resume_from_signal(env, NULL);
4256}
4257
Paul Brookb3755a92010-03-12 16:54:58 +00004258#if !defined(CONFIG_USER_ONLY)
4259
Stefan Weil055403b2010-10-22 23:03:32 +02004260void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004261{
4262 int i, target_code_size, max_target_code_size;
4263 int direct_jmp_count, direct_jmp2_count, cross_page;
4264 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004265
bellarde3db7222005-01-26 22:00:47 +00004266 target_code_size = 0;
4267 max_target_code_size = 0;
4268 cross_page = 0;
4269 direct_jmp_count = 0;
4270 direct_jmp2_count = 0;
4271 for(i = 0; i < nb_tbs; i++) {
4272 tb = &tbs[i];
4273 target_code_size += tb->size;
4274 if (tb->size > max_target_code_size)
4275 max_target_code_size = tb->size;
4276 if (tb->page_addr[1] != -1)
4277 cross_page++;
4278 if (tb->tb_next_offset[0] != 0xffff) {
4279 direct_jmp_count++;
4280 if (tb->tb_next_offset[1] != 0xffff) {
4281 direct_jmp2_count++;
4282 }
4283 }
4284 }
4285 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004286 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004287 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004288 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4289 cpu_fprintf(f, "TB count %d/%d\n",
4290 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004291 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004292 nb_tbs ? target_code_size / nb_tbs : 0,
4293 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004294 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004295 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4296 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004297 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4298 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004299 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4300 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004301 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004302 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4303 direct_jmp2_count,
4304 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004305 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004306 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4307 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4308 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004309 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004310}
4311
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004312/*
4313 * A helper function for the _utterly broken_ virtio device model to find out if
4314 * it's running on a big endian machine. Don't do this at home kids!
4315 */
4316bool virtio_is_big_endian(void);
4317bool virtio_is_big_endian(void)
4318{
4319#if defined(TARGET_WORDS_BIGENDIAN)
4320 return true;
4321#else
4322 return false;
4323#endif
4324}
4325
bellard61382a52003-10-27 21:22:23 +00004326#endif