blob: be93ebb505c7f4471963584e8c30b5c375a491a9 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020053#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010054#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020055
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020056#include "qemu/range.h"
57
blueswir1db7b5422007-05-26 17:36:03 +000058//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000059
pbrook99773bd2006-04-16 15:14:59 +000060#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020061static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000062
Paolo Bonzinia3161032012-11-14 15:54:48 +010063RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030064
65static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030066static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030067
Avi Kivityf6790af2012-10-02 20:13:51 +020068AddressSpace address_space_io;
69AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020070
Paolo Bonzini0844e002013-05-24 14:37:28 +020071MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020072static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020073
pbrooke2eef172008-06-08 01:09:01 +000074#endif
bellard9fa3e852004-01-04 18:06:42 +000075
Andreas Färberbdc44642013-06-24 23:50:24 +020076struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000077/* current CPU in the current thread. It is only valid inside
78 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020079DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000080/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000081 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000082 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010083int use_icount;
bellard6a00d602005-11-21 23:25:50 +000084
pbrooke2eef172008-06-08 01:09:01 +000085#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020086
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020087typedef struct PhysPageEntry PhysPageEntry;
88
89struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020092 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020093 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020094};
95
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020096#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
97
Paolo Bonzini03f49952013-11-07 17:14:36 +010098/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010099#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200101#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100102#define P_L2_SIZE (1 << P_L2_BITS)
103
104#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
105
106typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200108typedef struct PhysPageMap {
109 unsigned sections_nb;
110 unsigned sections_nb_alloc;
111 unsigned nodes_nb;
112 unsigned nodes_nb_alloc;
113 Node *nodes;
114 MemoryRegionSection *sections;
115} PhysPageMap;
116
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200117struct AddressSpaceDispatch {
118 /* This is a multi-level map on the physical address space.
119 * The bottom level has pointers to MemoryRegionSections.
120 */
121 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200123 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200124};
125
Jan Kiszka90260c62013-05-26 21:46:51 +0200126#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
127typedef struct subpage_t {
128 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200129 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200130 hwaddr base;
131 uint16_t sub_section[TARGET_PAGE_SIZE];
132} subpage_t;
133
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200134#define PHYS_SECTION_UNASSIGNED 0
135#define PHYS_SECTION_NOTDIRTY 1
136#define PHYS_SECTION_ROM 2
137#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200138
pbrooke2eef172008-06-08 01:09:01 +0000139static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300140static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154}
155
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200156static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157{
158 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200159 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100164 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169}
170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200173 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174{
175 PhysPageEntry *p;
176 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100183 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186 }
187 }
188 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200194 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200195 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 *index += step;
198 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200199 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200201 }
202 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
204}
205
Avi Kivityac1970f2012-10-03 16:22:53 +0200206static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200207 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200208 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000209{
Avi Kivity29990972012-02-13 20:21:20 +0200210 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000214}
215
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200275 }
276}
277
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200279 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000280{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200282 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200283 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200291 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200300}
301
Blue Swirle5548612012-04-21 13:08:33 +0000302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000305 && mr != &io_mem_watch;
306}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200307
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200309 hwaddr addr,
310 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200311{
Jan Kiszka90260c62013-05-26 21:46:51 +0200312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 }
320 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200321}
322
Jan Kiszka90260c62013-05-26 21:46:51 +0200323static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200326{
327 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100328 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200329
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200330 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339 return section;
340}
Jan Kiszka90260c62013-05-26 21:46:51 +0200341
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200357{
Avi Kivity30951152012-10-30 13:47:46 +0200358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100383 if (memory_access_is_direct(mr, is_write)) {
384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
Avi Kivity30951152012-10-30 13:47:46 +0200388 *plen = len;
389 *xlat = addr;
390 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200399
400 assert(!section->mr->iommu_ops);
401 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200402}
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200405void cpu_exec_init_all(void)
406{
407#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700408 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200409 memory_map_init();
410 io_mem_init();
411#endif
412}
413
Andreas Färberb170fce2013-01-20 20:23:22 +0100414#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000415
Juan Quintelae59fb372009-09-29 22:48:21 +0200416static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417{
Andreas Färber259186a2013-01-17 18:51:17 +0100418 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200419
aurel323098dba2009-03-07 21:28:24 +0000420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100422 cpu->interrupt_request &= ~0x01;
423 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000424
425 return 0;
426}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200427
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
432 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200433 .post_load = cpu_common_post_load,
434 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100435 VMSTATE_UINT32(halted, CPUState),
436 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200437 VMSTATE_END_OF_LIST()
438 }
439};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200440
pbrook9656f322008-07-01 20:01:19 +0000441#endif
442
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100443CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400444{
Andreas Färberbdc44642013-06-24 23:50:24 +0200445 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400446
Andreas Färberbdc44642013-06-24 23:50:24 +0200447 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100448 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200449 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100450 }
Glauber Costa950f1472009-06-09 12:15:18 -0400451 }
452
Andreas Färberbdc44642013-06-24 23:50:24 +0200453 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400454}
455
Andreas Färber9349b4f2012-03-14 01:38:32 +0100456void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000457{
Andreas Färber9f09e182012-05-03 06:59:07 +0200458 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100459 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200460 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000461 int cpu_index;
462
pbrookc2764712009-03-07 15:24:59 +0000463#if defined(CONFIG_USER_ONLY)
464 cpu_list_lock();
465#endif
bellard6a00d602005-11-21 23:25:50 +0000466 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200467 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000468 cpu_index++;
469 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100470 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100471 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000472 QTAILQ_INIT(&env->breakpoints);
473 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100474#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200475 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100476#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200477 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000478#if defined(CONFIG_USER_ONLY)
479 cpu_list_unlock();
480#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200481 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
482 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
483 }
pbrookb3c77242008-06-30 16:31:04 +0000484#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600485 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000486 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100487 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200488 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000489#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100490 if (cc->vmsd != NULL) {
491 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
492 }
bellardfd6ce8f2003-05-14 19:00:11 +0000493}
494
bellard1fddef42005-04-17 19:16:13 +0000495#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000496#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200497static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000498{
499 tb_invalidate_phys_page_range(pc, pc + 1, 0);
500}
501#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200502static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400503{
Max Filippove8262a12013-09-27 22:29:17 +0400504 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
505 if (phys != -1) {
506 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
507 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400508}
bellardc27004e2005-01-03 23:35:10 +0000509#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000510#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000511
Paul Brookc527ee82010-03-01 03:31:14 +0000512#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000514
515{
516}
517
Andreas Färber9349b4f2012-03-14 01:38:32 +0100518int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000519 int flags, CPUWatchpoint **watchpoint)
520{
521 return -ENOSYS;
522}
523#else
pbrook6658ffb2007-03-16 23:58:11 +0000524/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100525int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000526 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000527{
aliguorib4051332008-11-18 20:14:20 +0000528 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000529 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000530
aliguorib4051332008-11-18 20:14:20 +0000531 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400532 if ((len & (len - 1)) || (addr & ~len_mask) ||
533 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000534 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
535 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
536 return -EINVAL;
537 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500538 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000539
aliguoria1d1bb32008-11-18 20:07:32 +0000540 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000541 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000542 wp->flags = flags;
543
aliguori2dc9f412008-11-18 20:56:59 +0000544 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000545 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000546 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000547 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000548 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000549
pbrook6658ffb2007-03-16 23:58:11 +0000550 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000551
552 if (watchpoint)
553 *watchpoint = wp;
554 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000555}
556
aliguoria1d1bb32008-11-18 20:07:32 +0000557/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100558int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000559 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000560{
aliguorib4051332008-11-18 20:14:20 +0000561 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000562 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000563
Blue Swirl72cf2d42009-09-12 07:36:22 +0000564 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000565 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000566 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000567 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000568 return 0;
569 }
570 }
aliguoria1d1bb32008-11-18 20:07:32 +0000571 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000572}
573
aliguoria1d1bb32008-11-18 20:07:32 +0000574/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100575void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000576{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000577 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000578
aliguoria1d1bb32008-11-18 20:07:32 +0000579 tlb_flush_page(env, watchpoint->vaddr);
580
Anthony Liguori7267c092011-08-20 22:09:37 -0500581 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000582}
583
aliguoria1d1bb32008-11-18 20:07:32 +0000584/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100585void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000586{
aliguoric0ce9982008-11-25 22:13:57 +0000587 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000588
Blue Swirl72cf2d42009-09-12 07:36:22 +0000589 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000590 if (wp->flags & mask)
591 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000592 }
aliguoria1d1bb32008-11-18 20:07:32 +0000593}
Paul Brookc527ee82010-03-01 03:31:14 +0000594#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000595
596/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100597int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000598 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000599{
bellard1fddef42005-04-17 19:16:13 +0000600#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000601 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000602
Anthony Liguori7267c092011-08-20 22:09:37 -0500603 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000604
605 bp->pc = pc;
606 bp->flags = flags;
607
aliguori2dc9f412008-11-18 20:56:59 +0000608 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200609 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000610 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200611 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000612 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200613 }
aliguoria1d1bb32008-11-18 20:07:32 +0000614
Andreas Färber00b941e2013-06-29 18:55:54 +0200615 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000616
Andreas Färber00b941e2013-06-29 18:55:54 +0200617 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000618 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200619 }
aliguoria1d1bb32008-11-18 20:07:32 +0000620 return 0;
621#else
622 return -ENOSYS;
623#endif
624}
625
626/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100627int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000628{
629#if defined(TARGET_HAS_ICE)
630 CPUBreakpoint *bp;
631
Blue Swirl72cf2d42009-09-12 07:36:22 +0000632 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000633 if (bp->pc == pc && bp->flags == flags) {
634 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000635 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000636 }
bellard4c3a88a2003-07-26 12:06:08 +0000637 }
aliguoria1d1bb32008-11-18 20:07:32 +0000638 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000639#else
aliguoria1d1bb32008-11-18 20:07:32 +0000640 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000641#endif
642}
643
aliguoria1d1bb32008-11-18 20:07:32 +0000644/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100645void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000646{
bellard1fddef42005-04-17 19:16:13 +0000647#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000648 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000649
Andreas Färber00b941e2013-06-29 18:55:54 +0200650 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000651
Anthony Liguori7267c092011-08-20 22:09:37 -0500652 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000653#endif
654}
655
656/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100657void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000658{
659#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000660 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000661
Blue Swirl72cf2d42009-09-12 07:36:22 +0000662 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000663 if (bp->flags & mask)
664 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000665 }
bellard4c3a88a2003-07-26 12:06:08 +0000666#endif
667}
668
bellardc33a3462003-07-29 20:50:33 +0000669/* enable or disable single step mode. EXCP_DEBUG is returned by the
670 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200671void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000672{
bellard1fddef42005-04-17 19:16:13 +0000673#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200674 if (cpu->singlestep_enabled != enabled) {
675 cpu->singlestep_enabled = enabled;
676 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200677 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200678 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100679 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000680 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200681 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000682 tb_flush(env);
683 }
bellardc33a3462003-07-29 20:50:33 +0000684 }
685#endif
686}
687
Andreas Färber9349b4f2012-03-14 01:38:32 +0100688void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000689{
Andreas Färber878096e2013-05-27 01:33:50 +0200690 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000691 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000692 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000693
694 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000695 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000696 fprintf(stderr, "qemu: fatal: ");
697 vfprintf(stderr, fmt, ap);
698 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200699 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000700 if (qemu_log_enabled()) {
701 qemu_log("qemu: fatal: ");
702 qemu_log_vprintf(fmt, ap2);
703 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200704 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000705 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000706 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000707 }
pbrook493ae1f2007-11-23 16:53:59 +0000708 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000709 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200710#if defined(CONFIG_USER_ONLY)
711 {
712 struct sigaction act;
713 sigfillset(&act.sa_mask);
714 act.sa_handler = SIG_DFL;
715 sigaction(SIGABRT, &act, NULL);
716 }
717#endif
bellard75012672003-06-21 13:11:07 +0000718 abort();
719}
720
bellard01243112004-01-04 15:48:17 +0000721#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200722static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
723{
724 RAMBlock *block;
725
726 /* The list is protected by the iothread lock here. */
727 block = ram_list.mru_block;
728 if (block && addr - block->offset < block->length) {
729 goto found;
730 }
731 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
732 if (addr - block->offset < block->length) {
733 goto found;
734 }
735 }
736
737 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
738 abort();
739
740found:
741 ram_list.mru_block = block;
742 return block;
743}
744
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200745static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000746{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200747 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200748 RAMBlock *block;
749 ram_addr_t end;
750
751 end = TARGET_PAGE_ALIGN(start + length);
752 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000753
Paolo Bonzini041603f2013-09-09 17:49:45 +0200754 block = qemu_get_ram_block(start);
755 assert(block == qemu_get_ram_block(end - 1));
756 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000757 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200758}
759
760/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200761void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200762 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200763{
Juan Quintelad24981d2012-05-22 00:42:40 +0200764 if (length == 0)
765 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200766 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200767
768 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200769 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200770 }
bellard1ccde1c2004-02-06 19:46:14 +0000771}
772
Juan Quintela981fdf22013-10-10 11:54:09 +0200773static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000774{
775 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000776}
777
Avi Kivitya8170e52012-10-23 12:30:10 +0200778hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200779 MemoryRegionSection *section,
780 target_ulong vaddr,
781 hwaddr paddr, hwaddr xlat,
782 int prot,
783 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000784{
Avi Kivitya8170e52012-10-23 12:30:10 +0200785 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000786 CPUWatchpoint *wp;
787
Blue Swirlcc5bea62012-04-14 14:56:48 +0000788 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000789 /* Normal RAM. */
790 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200791 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000792 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200793 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000794 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200795 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000796 }
797 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200798 iotlb = section - address_space_memory.dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200799 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000800 }
801
802 /* Make accesses to pages with watchpoints go via the
803 watchpoint trap routines. */
804 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
805 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
806 /* Avoid trapping reads of pages with a write breakpoint. */
807 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200808 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000809 *address |= TLB_MMIO;
810 break;
811 }
812 }
813 }
814
815 return iotlb;
816}
bellard9fa3e852004-01-04 18:06:42 +0000817#endif /* defined(CONFIG_USER_ONLY) */
818
pbrooke2eef172008-06-08 01:09:01 +0000819#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000820
Anthony Liguoric227f092009-10-01 16:12:16 -0500821static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200822 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200823static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200824
Stefan Weil575ddeb2013-09-29 20:56:45 +0200825static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200826
827/*
828 * Set a custom physical guest memory alloator.
829 * Accelerators with unusual needs may need this. Hopefully, we can
830 * get rid of it eventually.
831 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200832void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200833{
834 phys_mem_alloc = alloc;
835}
836
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200837static uint16_t phys_section_add(PhysPageMap *map,
838 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200839{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200840 /* The physical section number is ORed with a page-aligned
841 * pointer to produce the iotlb entries. Thus it should
842 * never overflow into the page-aligned value.
843 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200844 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200845
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200846 if (map->sections_nb == map->sections_nb_alloc) {
847 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
848 map->sections = g_renew(MemoryRegionSection, map->sections,
849 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200850 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200851 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200852 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200853 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200854}
855
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200856static void phys_section_destroy(MemoryRegion *mr)
857{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200858 memory_region_unref(mr);
859
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200860 if (mr->subpage) {
861 subpage_t *subpage = container_of(mr, subpage_t, iomem);
862 memory_region_destroy(&subpage->iomem);
863 g_free(subpage);
864 }
865}
866
Paolo Bonzini60926662013-05-29 12:30:26 +0200867static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200868{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200869 while (map->sections_nb > 0) {
870 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200871 phys_section_destroy(section->mr);
872 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200873 g_free(map->sections);
874 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200875}
876
Avi Kivityac1970f2012-10-03 16:22:53 +0200877static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200878{
879 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200880 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200881 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200882 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200883 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200884 MemoryRegionSection subsection = {
885 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200886 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200887 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200888 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200889
Avi Kivityf3705d52012-03-08 16:16:34 +0200890 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200891
Avi Kivityf3705d52012-03-08 16:16:34 +0200892 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200893 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200894 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200895 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200896 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200897 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200898 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200899 }
900 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200901 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200902 subpage_register(subpage, start, end,
903 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200904}
905
906
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200907static void register_multipage(AddressSpaceDispatch *d,
908 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000909{
Avi Kivitya8170e52012-10-23 12:30:10 +0200910 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200911 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200912 uint64_t num_pages = int128_get64(int128_rshift(section->size,
913 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200914
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200915 assert(num_pages);
916 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000917}
918
Avi Kivityac1970f2012-10-03 16:22:53 +0200919static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200920{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200921 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200922 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200923 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200924 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200925
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200926 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
927 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
928 - now.offset_within_address_space;
929
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200930 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200931 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200932 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200933 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200934 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200935 while (int128_ne(remain.size, now.size)) {
936 remain.size = int128_sub(remain.size, now.size);
937 remain.offset_within_address_space += int128_get64(now.size);
938 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400939 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200940 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200941 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800942 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200943 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200944 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400945 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200946 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200947 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400948 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200949 }
950}
951
Sheng Yang62a27442010-01-26 19:21:16 +0800952void qemu_flush_coalesced_mmio_buffer(void)
953{
954 if (kvm_enabled())
955 kvm_flush_coalesced_mmio_buffer();
956}
957
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700958void qemu_mutex_lock_ramlist(void)
959{
960 qemu_mutex_lock(&ram_list.mutex);
961}
962
963void qemu_mutex_unlock_ramlist(void)
964{
965 qemu_mutex_unlock(&ram_list.mutex);
966}
967
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200968#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300969
970#include <sys/vfs.h>
971
972#define HUGETLBFS_MAGIC 0x958458f6
973
974static long gethugepagesize(const char *path)
975{
976 struct statfs fs;
977 int ret;
978
979 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900980 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300981 } while (ret != 0 && errno == EINTR);
982
983 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900984 perror(path);
985 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300986 }
987
988 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900989 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300990
991 return fs.f_bsize;
992}
993
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200994static sigjmp_buf sigjump;
995
996static void sigbus_handler(int signal)
997{
998 siglongjmp(sigjump, 1);
999}
1000
Alex Williamson04b16652010-07-02 11:13:17 -06001001static void *file_ram_alloc(RAMBlock *block,
1002 ram_addr_t memory,
1003 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001004{
1005 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001006 char *sanitized_name;
1007 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001008 void *area;
1009 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001010 unsigned long hpagesize;
1011
1012 hpagesize = gethugepagesize(path);
1013 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001014 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001015 }
1016
1017 if (memory < hpagesize) {
1018 return NULL;
1019 }
1020
1021 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1022 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1023 return NULL;
1024 }
1025
Peter Feiner8ca761f2013-03-04 13:54:25 -05001026 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1027 sanitized_name = g_strdup(block->mr->name);
1028 for (c = sanitized_name; *c != '\0'; c++) {
1029 if (*c == '/')
1030 *c = '_';
1031 }
1032
1033 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1034 sanitized_name);
1035 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001036
1037 fd = mkstemp(filename);
1038 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001039 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001040 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001041 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001042 }
1043 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001044 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001045
1046 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1047
1048 /*
1049 * ftruncate is not supported by hugetlbfs in older
1050 * hosts, so don't bother bailing out on errors.
1051 * If anything goes wrong with it under other filesystems,
1052 * mmap will fail.
1053 */
1054 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001055 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001056
Marcelo Tosattic9027602010-03-01 20:25:08 -03001057 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001058 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001059 perror("file_ram_alloc: can't mmap RAM pages");
1060 close(fd);
1061 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001062 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001063
1064 if (mem_prealloc) {
1065 int ret, i;
1066 struct sigaction act, oldact;
1067 sigset_t set, oldset;
1068
1069 memset(&act, 0, sizeof(act));
1070 act.sa_handler = &sigbus_handler;
1071 act.sa_flags = 0;
1072
1073 ret = sigaction(SIGBUS, &act, &oldact);
1074 if (ret) {
1075 perror("file_ram_alloc: failed to install signal handler");
1076 exit(1);
1077 }
1078
1079 /* unblock SIGBUS */
1080 sigemptyset(&set);
1081 sigaddset(&set, SIGBUS);
1082 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1083
1084 if (sigsetjmp(sigjump, 1)) {
1085 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1086 exit(1);
1087 }
1088
1089 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001090 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001091 memset(area + (hpagesize*i), 0, 1);
1092 }
1093
1094 ret = sigaction(SIGBUS, &oldact, NULL);
1095 if (ret) {
1096 perror("file_ram_alloc: failed to reinstall signal handler");
1097 exit(1);
1098 }
1099
1100 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1101 }
1102
Alex Williamson04b16652010-07-02 11:13:17 -06001103 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001104 return area;
1105}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001106#else
1107static void *file_ram_alloc(RAMBlock *block,
1108 ram_addr_t memory,
1109 const char *path)
1110{
1111 fprintf(stderr, "-mem-path not supported on this host\n");
1112 exit(1);
1113}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001114#endif
1115
Alex Williamsond17b5282010-06-25 11:08:38 -06001116static ram_addr_t find_ram_offset(ram_addr_t size)
1117{
Alex Williamson04b16652010-07-02 11:13:17 -06001118 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001119 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001120
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001121 assert(size != 0); /* it would hand out same offset multiple times */
1122
Paolo Bonzinia3161032012-11-14 15:54:48 +01001123 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001124 return 0;
1125
Paolo Bonzinia3161032012-11-14 15:54:48 +01001126 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001127 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001128
1129 end = block->offset + block->length;
1130
Paolo Bonzinia3161032012-11-14 15:54:48 +01001131 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001132 if (next_block->offset >= end) {
1133 next = MIN(next, next_block->offset);
1134 }
1135 }
1136 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001137 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001138 mingap = next - end;
1139 }
1140 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001141
1142 if (offset == RAM_ADDR_MAX) {
1143 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1144 (uint64_t)size);
1145 abort();
1146 }
1147
Alex Williamson04b16652010-07-02 11:13:17 -06001148 return offset;
1149}
1150
Juan Quintela652d7ec2012-07-20 10:37:54 +02001151ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001152{
Alex Williamsond17b5282010-06-25 11:08:38 -06001153 RAMBlock *block;
1154 ram_addr_t last = 0;
1155
Paolo Bonzinia3161032012-11-14 15:54:48 +01001156 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001157 last = MAX(last, block->offset + block->length);
1158
1159 return last;
1160}
1161
Jason Baronddb97f12012-08-02 15:44:16 -04001162static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1163{
1164 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001165
1166 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001167 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1168 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001169 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1170 if (ret) {
1171 perror("qemu_madvise");
1172 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1173 "but dump_guest_core=off specified\n");
1174 }
1175 }
1176}
1177
Avi Kivityc5705a72011-12-20 15:59:12 +02001178void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001179{
1180 RAMBlock *new_block, *block;
1181
Avi Kivityc5705a72011-12-20 15:59:12 +02001182 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001183 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001184 if (block->offset == addr) {
1185 new_block = block;
1186 break;
1187 }
1188 }
1189 assert(new_block);
1190 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001191
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001192 if (dev) {
1193 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001194 if (id) {
1195 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001196 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001197 }
1198 }
1199 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1200
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001201 /* This assumes the iothread lock is taken here too. */
1202 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001203 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001204 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001205 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1206 new_block->idstr);
1207 abort();
1208 }
1209 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001210 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001211}
1212
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001213static int memory_try_enable_merging(void *addr, size_t len)
1214{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001215 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001216 /* disabled by the user */
1217 return 0;
1218 }
1219
1220 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1221}
1222
Avi Kivityc5705a72011-12-20 15:59:12 +02001223ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1224 MemoryRegion *mr)
1225{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001226 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001227 ram_addr_t old_ram_size, new_ram_size;
1228
1229 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001230
1231 size = TARGET_PAGE_ALIGN(size);
1232 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001233 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001234
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001235 /* This assumes the iothread lock is taken here too. */
1236 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001237 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001238 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001239 if (host) {
1240 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001241 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001242 } else if (xen_enabled()) {
1243 if (mem_path) {
1244 fprintf(stderr, "-mem-path not supported with Xen\n");
1245 exit(1);
1246 }
1247 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001248 } else {
1249 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001250 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1251 /*
1252 * file_ram_alloc() needs to allocate just like
1253 * phys_mem_alloc, but we haven't bothered to provide
1254 * a hook there.
1255 */
1256 fprintf(stderr,
1257 "-mem-path not supported with this accelerator\n");
1258 exit(1);
1259 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001260 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001261 }
1262 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001263 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001264 if (!new_block->host) {
1265 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1266 new_block->mr->name, strerror(errno));
1267 exit(1);
1268 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001269 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001270 }
1271 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001272 new_block->length = size;
1273
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001274 /* Keep the list sorted from biggest to smallest block. */
1275 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1276 if (block->length < new_block->length) {
1277 break;
1278 }
1279 }
1280 if (block) {
1281 QTAILQ_INSERT_BEFORE(block, new_block, next);
1282 } else {
1283 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1284 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001285 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001286
Umesh Deshpandef798b072011-08-18 11:41:17 -07001287 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001288 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001289
Juan Quintela2152f5c2013-10-08 13:52:02 +02001290 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1291
1292 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001293 int i;
1294 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1295 ram_list.dirty_memory[i] =
1296 bitmap_zero_extend(ram_list.dirty_memory[i],
1297 old_ram_size, new_ram_size);
1298 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001299 }
Juan Quintela75218e72013-10-08 12:31:54 +02001300 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001301
Jason Baronddb97f12012-08-02 15:44:16 -04001302 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001303 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001304 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001305
Cam Macdonell84b89d72010-07-26 18:10:57 -06001306 if (kvm_enabled())
1307 kvm_setup_guest_memory(new_block->host, size);
1308
1309 return new_block->offset;
1310}
1311
Avi Kivityc5705a72011-12-20 15:59:12 +02001312ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001313{
Avi Kivityc5705a72011-12-20 15:59:12 +02001314 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001315}
bellarde9a1ab12007-02-08 23:08:38 +00001316
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001317void qemu_ram_free_from_ptr(ram_addr_t addr)
1318{
1319 RAMBlock *block;
1320
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001321 /* This assumes the iothread lock is taken here too. */
1322 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001323 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001324 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001325 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001326 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001327 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001328 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001329 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001330 }
1331 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001332 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001333}
1334
Anthony Liguoric227f092009-10-01 16:12:16 -05001335void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001336{
Alex Williamson04b16652010-07-02 11:13:17 -06001337 RAMBlock *block;
1338
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001339 /* This assumes the iothread lock is taken here too. */
1340 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001341 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001342 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001343 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001344 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001345 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001346 if (block->flags & RAM_PREALLOC_MASK) {
1347 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001348 } else if (xen_enabled()) {
1349 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001350#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001351 } else if (block->fd >= 0) {
1352 munmap(block->host, block->length);
1353 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001354#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001355 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001356 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001357 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001358 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001359 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001360 }
1361 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001362 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001363
bellarde9a1ab12007-02-08 23:08:38 +00001364}
1365
Huang Yingcd19cfa2011-03-02 08:56:19 +01001366#ifndef _WIN32
1367void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1368{
1369 RAMBlock *block;
1370 ram_addr_t offset;
1371 int flags;
1372 void *area, *vaddr;
1373
Paolo Bonzinia3161032012-11-14 15:54:48 +01001374 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001375 offset = addr - block->offset;
1376 if (offset < block->length) {
1377 vaddr = block->host + offset;
1378 if (block->flags & RAM_PREALLOC_MASK) {
1379 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001380 } else if (xen_enabled()) {
1381 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001382 } else {
1383 flags = MAP_FIXED;
1384 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001385 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001386#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001387 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1388 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001389#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001390 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001391#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001392 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1393 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001394 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001395 /*
1396 * Remap needs to match alloc. Accelerators that
1397 * set phys_mem_alloc never remap. If they did,
1398 * we'd need a remap hook here.
1399 */
1400 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1401
Huang Yingcd19cfa2011-03-02 08:56:19 +01001402 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1403 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1404 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001405 }
1406 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001407 fprintf(stderr, "Could not remap addr: "
1408 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001409 length, addr);
1410 exit(1);
1411 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001412 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001413 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001414 }
1415 return;
1416 }
1417 }
1418}
1419#endif /* !_WIN32 */
1420
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001421/* Return a host pointer to ram allocated with qemu_ram_alloc.
1422 With the exception of the softmmu code in this file, this should
1423 only be used for local memory (e.g. video ram) that the device owns,
1424 and knows it isn't going to access beyond the end of the block.
1425
1426 It should not be used for general purpose DMA.
1427 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1428 */
1429void *qemu_get_ram_ptr(ram_addr_t addr)
1430{
1431 RAMBlock *block = qemu_get_ram_block(addr);
1432
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001433 if (xen_enabled()) {
1434 /* We need to check if the requested address is in the RAM
1435 * because we don't want to map the entire memory in QEMU.
1436 * In that case just map until the end of the page.
1437 */
1438 if (block->offset == 0) {
1439 return xen_map_cache(addr, 0, 0);
1440 } else if (block->host == NULL) {
1441 block->host =
1442 xen_map_cache(block->offset, block->length, 1);
1443 }
1444 }
1445 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001446}
1447
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001448/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1449 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001450static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001451{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001452 if (*size == 0) {
1453 return NULL;
1454 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001455 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001456 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001457 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001458 RAMBlock *block;
1459
Paolo Bonzinia3161032012-11-14 15:54:48 +01001460 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001461 if (addr - block->offset < block->length) {
1462 if (addr - block->offset + *size > block->length)
1463 *size = block->length - addr + block->offset;
1464 return block->host + (addr - block->offset);
1465 }
1466 }
1467
1468 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1469 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001470 }
1471}
1472
Paolo Bonzini7443b432013-06-03 12:44:02 +02001473/* Some of the softmmu routines need to translate from a host pointer
1474 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001475MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001476{
pbrook94a6b542009-04-11 17:15:54 +00001477 RAMBlock *block;
1478 uint8_t *host = ptr;
1479
Jan Kiszka868bb332011-06-21 22:59:09 +02001480 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001481 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001482 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001483 }
1484
Paolo Bonzini23887b72013-05-06 14:28:39 +02001485 block = ram_list.mru_block;
1486 if (block && block->host && host - block->host < block->length) {
1487 goto found;
1488 }
1489
Paolo Bonzinia3161032012-11-14 15:54:48 +01001490 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001491 /* This case append when the block is not mapped. */
1492 if (block->host == NULL) {
1493 continue;
1494 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001495 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001496 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001497 }
pbrook94a6b542009-04-11 17:15:54 +00001498 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001499
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001500 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001501
1502found:
1503 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001504 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001505}
Alex Williamsonf471a172010-06-11 11:11:42 -06001506
Avi Kivitya8170e52012-10-23 12:30:10 +02001507static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001508 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001509{
Juan Quintela52159192013-10-08 12:44:04 +02001510 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001511 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001512 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001513 switch (size) {
1514 case 1:
1515 stb_p(qemu_get_ram_ptr(ram_addr), val);
1516 break;
1517 case 2:
1518 stw_p(qemu_get_ram_ptr(ram_addr), val);
1519 break;
1520 case 4:
1521 stl_p(qemu_get_ram_ptr(ram_addr), val);
1522 break;
1523 default:
1524 abort();
1525 }
Juan Quintela52159192013-10-08 12:44:04 +02001526 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1527 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001528 /* we remove the notdirty callback only if the code has been
1529 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001530 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001531 CPUArchState *env = current_cpu->env_ptr;
1532 tlb_set_dirty(env, env->mem_io_vaddr);
1533 }
bellard1ccde1c2004-02-06 19:46:14 +00001534}
1535
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001536static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1537 unsigned size, bool is_write)
1538{
1539 return is_write;
1540}
1541
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001542static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001543 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001544 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001545 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001546};
1547
pbrook0f459d12008-06-09 00:20:13 +00001548/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001549static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001550{
Andreas Färber4917cf42013-05-27 05:17:50 +02001551 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001552 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001553 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001554 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001555 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001556
aliguori06d55cc2008-11-18 20:24:06 +00001557 if (env->watchpoint_hit) {
1558 /* We re-entered the check after replacing the TB. Now raise
1559 * the debug interrupt so that is will trigger after the
1560 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001561 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001562 return;
1563 }
pbrook2e70f6e2008-06-29 01:03:05 +00001564 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001565 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001566 if ((vaddr == (wp->vaddr & len_mask) ||
1567 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001568 wp->flags |= BP_WATCHPOINT_HIT;
1569 if (!env->watchpoint_hit) {
1570 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001571 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001572 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1573 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001574 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001575 } else {
1576 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1577 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001578 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001579 }
aliguori06d55cc2008-11-18 20:24:06 +00001580 }
aliguori6e140f22008-11-18 20:37:55 +00001581 } else {
1582 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001583 }
1584 }
1585}
1586
pbrook6658ffb2007-03-16 23:58:11 +00001587/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1588 so these check for a hit then pass through to the normal out-of-line
1589 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001590static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001591 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001592{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001593 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1594 switch (size) {
1595 case 1: return ldub_phys(addr);
1596 case 2: return lduw_phys(addr);
1597 case 4: return ldl_phys(addr);
1598 default: abort();
1599 }
pbrook6658ffb2007-03-16 23:58:11 +00001600}
1601
Avi Kivitya8170e52012-10-23 12:30:10 +02001602static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001603 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001604{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001605 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1606 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001607 case 1:
1608 stb_phys(addr, val);
1609 break;
1610 case 2:
1611 stw_phys(addr, val);
1612 break;
1613 case 4:
1614 stl_phys(addr, val);
1615 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001616 default: abort();
1617 }
pbrook6658ffb2007-03-16 23:58:11 +00001618}
1619
Avi Kivity1ec9b902012-01-02 12:47:48 +02001620static const MemoryRegionOps watch_mem_ops = {
1621 .read = watch_mem_read,
1622 .write = watch_mem_write,
1623 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001624};
pbrook6658ffb2007-03-16 23:58:11 +00001625
Avi Kivitya8170e52012-10-23 12:30:10 +02001626static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001627 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001628{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001629 subpage_t *subpage = opaque;
1630 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001631
blueswir1db7b5422007-05-26 17:36:03 +00001632#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001633 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001634 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001635#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001636 address_space_read(subpage->as, addr + subpage->base, buf, len);
1637 switch (len) {
1638 case 1:
1639 return ldub_p(buf);
1640 case 2:
1641 return lduw_p(buf);
1642 case 4:
1643 return ldl_p(buf);
1644 default:
1645 abort();
1646 }
blueswir1db7b5422007-05-26 17:36:03 +00001647}
1648
Avi Kivitya8170e52012-10-23 12:30:10 +02001649static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001650 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001651{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001652 subpage_t *subpage = opaque;
1653 uint8_t buf[4];
1654
blueswir1db7b5422007-05-26 17:36:03 +00001655#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001656 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001657 " value %"PRIx64"\n",
1658 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001659#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001660 switch (len) {
1661 case 1:
1662 stb_p(buf, value);
1663 break;
1664 case 2:
1665 stw_p(buf, value);
1666 break;
1667 case 4:
1668 stl_p(buf, value);
1669 break;
1670 default:
1671 abort();
1672 }
1673 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001674}
1675
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001676static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001677 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001678{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001679 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001680#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001681 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001682 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001683#endif
1684
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001685 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001686 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001687}
1688
Avi Kivity70c68e42012-01-02 12:32:48 +02001689static const MemoryRegionOps subpage_ops = {
1690 .read = subpage_read,
1691 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001692 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001693 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001694};
1695
Anthony Liguoric227f092009-10-01 16:12:16 -05001696static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001697 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001698{
1699 int idx, eidx;
1700
1701 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1702 return -1;
1703 idx = SUBPAGE_IDX(start);
1704 eidx = SUBPAGE_IDX(end);
1705#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001706 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1707 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001708#endif
blueswir1db7b5422007-05-26 17:36:03 +00001709 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001710 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001711 }
1712
1713 return 0;
1714}
1715
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001716static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001717{
Anthony Liguoric227f092009-10-01 16:12:16 -05001718 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001719
Anthony Liguori7267c092011-08-20 22:09:37 -05001720 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001721
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001722 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001723 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001724 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001725 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001726 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001727#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001728 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1729 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001730#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001731 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001732
1733 return mmio;
1734}
1735
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001736static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001737{
1738 MemoryRegionSection section = {
1739 .mr = mr,
1740 .offset_within_address_space = 0,
1741 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001742 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001743 };
1744
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001745 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001746}
1747
Avi Kivitya8170e52012-10-23 12:30:10 +02001748MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001749{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001750 return address_space_memory.dispatch->map.sections[
1751 index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001752}
1753
Avi Kivitye9179ce2009-06-14 11:38:52 +03001754static void io_mem_init(void)
1755{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001756 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1757 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001758 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001759 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001760 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001761 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001762 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001763}
1764
Avi Kivityac1970f2012-10-03 16:22:53 +02001765static void mem_begin(MemoryListener *listener)
1766{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001767 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001768 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1769 uint16_t n;
1770
1771 n = dummy_section(&d->map, &io_mem_unassigned);
1772 assert(n == PHYS_SECTION_UNASSIGNED);
1773 n = dummy_section(&d->map, &io_mem_notdirty);
1774 assert(n == PHYS_SECTION_NOTDIRTY);
1775 n = dummy_section(&d->map, &io_mem_rom);
1776 assert(n == PHYS_SECTION_ROM);
1777 n = dummy_section(&d->map, &io_mem_watch);
1778 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001779
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001780 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001781 d->as = as;
1782 as->next_dispatch = d;
1783}
1784
1785static void mem_commit(MemoryListener *listener)
1786{
1787 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001788 AddressSpaceDispatch *cur = as->dispatch;
1789 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001790
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001791 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001792
Paolo Bonzini0475d942013-05-29 12:28:21 +02001793 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001794
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001795 if (cur) {
1796 phys_sections_free(&cur->map);
1797 g_free(cur);
1798 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001799}
1800
Avi Kivity1d711482012-10-02 18:54:45 +02001801static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001802{
Andreas Färber182735e2013-05-29 22:29:20 +02001803 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001804
1805 /* since each CPU stores ram addresses in its TLB cache, we must
1806 reset the modified entries */
1807 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001808 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001809 CPUArchState *env = cpu->env_ptr;
1810
Avi Kivity117712c2012-02-12 21:23:17 +02001811 tlb_flush(env, 1);
1812 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001813}
1814
Avi Kivity93632742012-02-08 16:54:16 +02001815static void core_log_global_start(MemoryListener *listener)
1816{
Juan Quintela981fdf22013-10-10 11:54:09 +02001817 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001818}
1819
1820static void core_log_global_stop(MemoryListener *listener)
1821{
Juan Quintela981fdf22013-10-10 11:54:09 +02001822 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001823}
1824
Avi Kivity93632742012-02-08 16:54:16 +02001825static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001826 .log_global_start = core_log_global_start,
1827 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001828 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001829};
1830
Avi Kivity1d711482012-10-02 18:54:45 +02001831static MemoryListener tcg_memory_listener = {
1832 .commit = tcg_commit,
1833};
1834
Avi Kivityac1970f2012-10-03 16:22:53 +02001835void address_space_init_dispatch(AddressSpace *as)
1836{
Paolo Bonzini00752702013-05-29 12:13:54 +02001837 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001838 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001839 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001840 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001841 .region_add = mem_add,
1842 .region_nop = mem_add,
1843 .priority = 0,
1844 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001845 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001846}
1847
Avi Kivity83f3c252012-10-07 12:59:55 +02001848void address_space_destroy_dispatch(AddressSpace *as)
1849{
1850 AddressSpaceDispatch *d = as->dispatch;
1851
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001852 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001853 g_free(d);
1854 as->dispatch = NULL;
1855}
1856
Avi Kivity62152b82011-07-26 14:26:14 +03001857static void memory_map_init(void)
1858{
Anthony Liguori7267c092011-08-20 22:09:37 -05001859 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001860
Paolo Bonzini57271d62013-11-07 17:14:37 +01001861 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001862 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001863
Anthony Liguori7267c092011-08-20 22:09:37 -05001864 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001865 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1866 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001867 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001868
Avi Kivityf6790af2012-10-02 20:13:51 +02001869 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001870 if (tcg_enabled()) {
1871 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1872 }
Avi Kivity62152b82011-07-26 14:26:14 +03001873}
1874
1875MemoryRegion *get_system_memory(void)
1876{
1877 return system_memory;
1878}
1879
Avi Kivity309cb472011-08-08 16:09:03 +03001880MemoryRegion *get_system_io(void)
1881{
1882 return system_io;
1883}
1884
pbrooke2eef172008-06-08 01:09:01 +00001885#endif /* !defined(CONFIG_USER_ONLY) */
1886
bellard13eb76e2004-01-24 15:23:36 +00001887/* physical memory access (slow version, mainly for debug) */
1888#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001889int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001890 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001891{
1892 int l, flags;
1893 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001894 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001895
1896 while (len > 0) {
1897 page = addr & TARGET_PAGE_MASK;
1898 l = (page + TARGET_PAGE_SIZE) - addr;
1899 if (l > len)
1900 l = len;
1901 flags = page_get_flags(page);
1902 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001903 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001904 if (is_write) {
1905 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001906 return -1;
bellard579a97f2007-11-11 14:26:47 +00001907 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001908 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001909 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001910 memcpy(p, buf, l);
1911 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001912 } else {
1913 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001914 return -1;
bellard579a97f2007-11-11 14:26:47 +00001915 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001916 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001917 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001918 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001919 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001920 }
1921 len -= l;
1922 buf += l;
1923 addr += l;
1924 }
Paul Brooka68fe892010-03-01 00:08:59 +00001925 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001926}
bellard8df1cd02005-01-28 22:37:22 +00001927
bellard13eb76e2004-01-24 15:23:36 +00001928#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001929
Avi Kivitya8170e52012-10-23 12:30:10 +02001930static void invalidate_and_set_dirty(hwaddr addr,
1931 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001932{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001933 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001934 /* invalidate code */
1935 tb_invalidate_phys_page_range(addr, addr + length, 0);
1936 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001937 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1938 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001939 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001940 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001941}
1942
Richard Henderson23326162013-07-08 14:55:59 -07001943static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001944{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001945 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001946
1947 /* Regions are assumed to support 1-4 byte accesses unless
1948 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001949 if (access_size_max == 0) {
1950 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001951 }
Richard Henderson23326162013-07-08 14:55:59 -07001952
1953 /* Bound the maximum access by the alignment of the address. */
1954 if (!mr->ops->impl.unaligned) {
1955 unsigned align_size_max = addr & -addr;
1956 if (align_size_max != 0 && align_size_max < access_size_max) {
1957 access_size_max = align_size_max;
1958 }
1959 }
1960
1961 /* Don't attempt accesses larger than the maximum. */
1962 if (l > access_size_max) {
1963 l = access_size_max;
1964 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001965 if (l & (l - 1)) {
1966 l = 1 << (qemu_fls(l) - 1);
1967 }
Richard Henderson23326162013-07-08 14:55:59 -07001968
1969 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001970}
1971
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001972bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001973 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001974{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001975 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001976 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001977 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001978 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001979 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001980 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001981
bellard13eb76e2004-01-24 15:23:36 +00001982 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001983 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001984 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001985
bellard13eb76e2004-01-24 15:23:36 +00001986 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001987 if (!memory_access_is_direct(mr, is_write)) {
1988 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001989 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001990 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001991 switch (l) {
1992 case 8:
1993 /* 64 bit write access */
1994 val = ldq_p(buf);
1995 error |= io_mem_write(mr, addr1, val, 8);
1996 break;
1997 case 4:
bellard1c213d12005-09-03 10:49:04 +00001998 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001999 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002000 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002001 break;
2002 case 2:
bellard1c213d12005-09-03 10:49:04 +00002003 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002004 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002005 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002006 break;
2007 case 1:
bellard1c213d12005-09-03 10:49:04 +00002008 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002009 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002010 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002011 break;
2012 default:
2013 abort();
bellard13eb76e2004-01-24 15:23:36 +00002014 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002015 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002016 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002017 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002018 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002019 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002020 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002021 }
2022 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002023 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002024 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002025 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002026 switch (l) {
2027 case 8:
2028 /* 64 bit read access */
2029 error |= io_mem_read(mr, addr1, &val, 8);
2030 stq_p(buf, val);
2031 break;
2032 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002033 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002034 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002035 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002036 break;
2037 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002038 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002039 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002040 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002041 break;
2042 case 1:
bellard1c213d12005-09-03 10:49:04 +00002043 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002044 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002045 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002046 break;
2047 default:
2048 abort();
bellard13eb76e2004-01-24 15:23:36 +00002049 }
2050 } else {
2051 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002052 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002053 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002054 }
2055 }
2056 len -= l;
2057 buf += l;
2058 addr += l;
2059 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002060
2061 return error;
bellard13eb76e2004-01-24 15:23:36 +00002062}
bellard8df1cd02005-01-28 22:37:22 +00002063
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002064bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002065 const uint8_t *buf, int len)
2066{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002067 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002068}
2069
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002070bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002071{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002072 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002073}
2074
2075
Avi Kivitya8170e52012-10-23 12:30:10 +02002076void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002077 int len, int is_write)
2078{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002079 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002080}
2081
Alexander Graf582b55a2013-12-11 14:17:44 +01002082enum write_rom_type {
2083 WRITE_DATA,
2084 FLUSH_CACHE,
2085};
2086
2087static inline void cpu_physical_memory_write_rom_internal(
2088 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002089{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002090 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002091 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002092 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002093 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002094
bellardd0ecd2a2006-04-23 17:14:48 +00002095 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002096 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002097 mr = address_space_translate(&address_space_memory,
2098 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002099
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002100 if (!(memory_region_is_ram(mr) ||
2101 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002102 /* do nothing */
2103 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002104 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002105 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002106 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002107 switch (type) {
2108 case WRITE_DATA:
2109 memcpy(ptr, buf, l);
2110 invalidate_and_set_dirty(addr1, l);
2111 break;
2112 case FLUSH_CACHE:
2113 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2114 break;
2115 }
bellardd0ecd2a2006-04-23 17:14:48 +00002116 }
2117 len -= l;
2118 buf += l;
2119 addr += l;
2120 }
2121}
2122
Alexander Graf582b55a2013-12-11 14:17:44 +01002123/* used for ROM loading : can write in RAM and ROM */
2124void cpu_physical_memory_write_rom(hwaddr addr,
2125 const uint8_t *buf, int len)
2126{
2127 cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2128}
2129
2130void cpu_flush_icache_range(hwaddr start, int len)
2131{
2132 /*
2133 * This function should do the same thing as an icache flush that was
2134 * triggered from within the guest. For TCG we are always cache coherent,
2135 * so there is no need to flush anything. For KVM / Xen we need to flush
2136 * the host's instruction cache at least.
2137 */
2138 if (tcg_enabled()) {
2139 return;
2140 }
2141
2142 cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2143}
2144
aliguori6d16c2f2009-01-22 16:59:11 +00002145typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002146 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002147 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002148 hwaddr addr;
2149 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002150} BounceBuffer;
2151
2152static BounceBuffer bounce;
2153
aliguoriba223c22009-01-22 16:59:16 +00002154typedef struct MapClient {
2155 void *opaque;
2156 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002157 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002158} MapClient;
2159
Blue Swirl72cf2d42009-09-12 07:36:22 +00002160static QLIST_HEAD(map_client_list, MapClient) map_client_list
2161 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002162
2163void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2164{
Anthony Liguori7267c092011-08-20 22:09:37 -05002165 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002166
2167 client->opaque = opaque;
2168 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002169 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002170 return client;
2171}
2172
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002173static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002174{
2175 MapClient *client = (MapClient *)_client;
2176
Blue Swirl72cf2d42009-09-12 07:36:22 +00002177 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002178 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002179}
2180
2181static void cpu_notify_map_clients(void)
2182{
2183 MapClient *client;
2184
Blue Swirl72cf2d42009-09-12 07:36:22 +00002185 while (!QLIST_EMPTY(&map_client_list)) {
2186 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002187 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002188 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002189 }
2190}
2191
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002192bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2193{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002194 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002195 hwaddr l, xlat;
2196
2197 while (len > 0) {
2198 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002199 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2200 if (!memory_access_is_direct(mr, is_write)) {
2201 l = memory_access_size(mr, l, addr);
2202 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002203 return false;
2204 }
2205 }
2206
2207 len -= l;
2208 addr += l;
2209 }
2210 return true;
2211}
2212
aliguori6d16c2f2009-01-22 16:59:11 +00002213/* Map a physical memory region into a host virtual address.
2214 * May map a subset of the requested range, given by and returned in *plen.
2215 * May return NULL if resources needed to perform the mapping are exhausted.
2216 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002217 * Use cpu_register_map_client() to know when retrying the map operation is
2218 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002219 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002220void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002221 hwaddr addr,
2222 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002223 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002224{
Avi Kivitya8170e52012-10-23 12:30:10 +02002225 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002226 hwaddr done = 0;
2227 hwaddr l, xlat, base;
2228 MemoryRegion *mr, *this_mr;
2229 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002230
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002231 if (len == 0) {
2232 return NULL;
2233 }
aliguori6d16c2f2009-01-22 16:59:11 +00002234
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002235 l = len;
2236 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2237 if (!memory_access_is_direct(mr, is_write)) {
2238 if (bounce.buffer) {
2239 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002240 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002241 /* Avoid unbounded allocations */
2242 l = MIN(l, TARGET_PAGE_SIZE);
2243 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002244 bounce.addr = addr;
2245 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002246
2247 memory_region_ref(mr);
2248 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002249 if (!is_write) {
2250 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002251 }
aliguori6d16c2f2009-01-22 16:59:11 +00002252
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002253 *plen = l;
2254 return bounce.buffer;
2255 }
2256
2257 base = xlat;
2258 raddr = memory_region_get_ram_addr(mr);
2259
2260 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002261 len -= l;
2262 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002263 done += l;
2264 if (len == 0) {
2265 break;
2266 }
2267
2268 l = len;
2269 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2270 if (this_mr != mr || xlat != base + done) {
2271 break;
2272 }
aliguori6d16c2f2009-01-22 16:59:11 +00002273 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002274
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002275 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002276 *plen = done;
2277 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002278}
2279
Avi Kivityac1970f2012-10-03 16:22:53 +02002280/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002281 * Will also mark the memory as dirty if is_write == 1. access_len gives
2282 * the amount of memory that was actually read or written by the caller.
2283 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002284void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2285 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002286{
2287 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002288 MemoryRegion *mr;
2289 ram_addr_t addr1;
2290
2291 mr = qemu_ram_addr_from_host(buffer, &addr1);
2292 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002293 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002294 while (access_len) {
2295 unsigned l;
2296 l = TARGET_PAGE_SIZE;
2297 if (l > access_len)
2298 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002299 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002300 addr1 += l;
2301 access_len -= l;
2302 }
2303 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002304 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002305 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002306 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002307 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002308 return;
2309 }
2310 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002311 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002312 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002313 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002314 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002315 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002316 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002317}
bellardd0ecd2a2006-04-23 17:14:48 +00002318
Avi Kivitya8170e52012-10-23 12:30:10 +02002319void *cpu_physical_memory_map(hwaddr addr,
2320 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002321 int is_write)
2322{
2323 return address_space_map(&address_space_memory, addr, plen, is_write);
2324}
2325
Avi Kivitya8170e52012-10-23 12:30:10 +02002326void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2327 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002328{
2329 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2330}
2331
bellard8df1cd02005-01-28 22:37:22 +00002332/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002333static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002334 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002335{
bellard8df1cd02005-01-28 22:37:22 +00002336 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002337 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002338 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002339 hwaddr l = 4;
2340 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002341
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002342 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2343 false);
2344 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002345 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002346 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002347#if defined(TARGET_WORDS_BIGENDIAN)
2348 if (endian == DEVICE_LITTLE_ENDIAN) {
2349 val = bswap32(val);
2350 }
2351#else
2352 if (endian == DEVICE_BIG_ENDIAN) {
2353 val = bswap32(val);
2354 }
2355#endif
bellard8df1cd02005-01-28 22:37:22 +00002356 } else {
2357 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002358 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002359 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002360 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002361 switch (endian) {
2362 case DEVICE_LITTLE_ENDIAN:
2363 val = ldl_le_p(ptr);
2364 break;
2365 case DEVICE_BIG_ENDIAN:
2366 val = ldl_be_p(ptr);
2367 break;
2368 default:
2369 val = ldl_p(ptr);
2370 break;
2371 }
bellard8df1cd02005-01-28 22:37:22 +00002372 }
2373 return val;
2374}
2375
Avi Kivitya8170e52012-10-23 12:30:10 +02002376uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002377{
2378 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2379}
2380
Avi Kivitya8170e52012-10-23 12:30:10 +02002381uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002382{
2383 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2384}
2385
Avi Kivitya8170e52012-10-23 12:30:10 +02002386uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002387{
2388 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2389}
2390
bellard84b7b8e2005-11-28 21:19:04 +00002391/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002392static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002393 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002394{
bellard84b7b8e2005-11-28 21:19:04 +00002395 uint8_t *ptr;
2396 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002397 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002398 hwaddr l = 8;
2399 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002400
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002401 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2402 false);
2403 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002404 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002405 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002406#if defined(TARGET_WORDS_BIGENDIAN)
2407 if (endian == DEVICE_LITTLE_ENDIAN) {
2408 val = bswap64(val);
2409 }
2410#else
2411 if (endian == DEVICE_BIG_ENDIAN) {
2412 val = bswap64(val);
2413 }
2414#endif
bellard84b7b8e2005-11-28 21:19:04 +00002415 } else {
2416 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002417 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002418 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002419 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002420 switch (endian) {
2421 case DEVICE_LITTLE_ENDIAN:
2422 val = ldq_le_p(ptr);
2423 break;
2424 case DEVICE_BIG_ENDIAN:
2425 val = ldq_be_p(ptr);
2426 break;
2427 default:
2428 val = ldq_p(ptr);
2429 break;
2430 }
bellard84b7b8e2005-11-28 21:19:04 +00002431 }
2432 return val;
2433}
2434
Avi Kivitya8170e52012-10-23 12:30:10 +02002435uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002436{
2437 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2438}
2439
Avi Kivitya8170e52012-10-23 12:30:10 +02002440uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002441{
2442 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2443}
2444
Avi Kivitya8170e52012-10-23 12:30:10 +02002445uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002446{
2447 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2448}
2449
bellardaab33092005-10-30 20:48:42 +00002450/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002451uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002452{
2453 uint8_t val;
2454 cpu_physical_memory_read(addr, &val, 1);
2455 return val;
2456}
2457
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002458/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002459static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002460 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002461{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002462 uint8_t *ptr;
2463 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002464 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002465 hwaddr l = 2;
2466 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002467
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002468 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2469 false);
2470 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002471 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002472 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002473#if defined(TARGET_WORDS_BIGENDIAN)
2474 if (endian == DEVICE_LITTLE_ENDIAN) {
2475 val = bswap16(val);
2476 }
2477#else
2478 if (endian == DEVICE_BIG_ENDIAN) {
2479 val = bswap16(val);
2480 }
2481#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002482 } else {
2483 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002484 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002485 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002486 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002487 switch (endian) {
2488 case DEVICE_LITTLE_ENDIAN:
2489 val = lduw_le_p(ptr);
2490 break;
2491 case DEVICE_BIG_ENDIAN:
2492 val = lduw_be_p(ptr);
2493 break;
2494 default:
2495 val = lduw_p(ptr);
2496 break;
2497 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002498 }
2499 return val;
bellardaab33092005-10-30 20:48:42 +00002500}
2501
Avi Kivitya8170e52012-10-23 12:30:10 +02002502uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503{
2504 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2505}
2506
Avi Kivitya8170e52012-10-23 12:30:10 +02002507uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508{
2509 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2510}
2511
Avi Kivitya8170e52012-10-23 12:30:10 +02002512uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002513{
2514 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2515}
2516
bellard8df1cd02005-01-28 22:37:22 +00002517/* warning: addr must be aligned. The ram page is not masked as dirty
2518 and the code inside is not invalidated. It is useful if the dirty
2519 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002520void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002521{
bellard8df1cd02005-01-28 22:37:22 +00002522 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002523 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002524 hwaddr l = 4;
2525 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002526
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002527 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2528 true);
2529 if (l < 4 || !memory_access_is_direct(mr, true)) {
2530 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002531 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002532 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002533 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002534 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002535
2536 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002537 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002538 /* invalidate code */
2539 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2540 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002541 cpu_physical_memory_set_dirty_flag(addr1,
2542 DIRTY_MEMORY_MIGRATION);
2543 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002544 }
2545 }
bellard8df1cd02005-01-28 22:37:22 +00002546 }
2547}
2548
2549/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002550static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002551 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002552{
bellard8df1cd02005-01-28 22:37:22 +00002553 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002554 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002555 hwaddr l = 4;
2556 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002557
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002558 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2559 true);
2560 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002561#if defined(TARGET_WORDS_BIGENDIAN)
2562 if (endian == DEVICE_LITTLE_ENDIAN) {
2563 val = bswap32(val);
2564 }
2565#else
2566 if (endian == DEVICE_BIG_ENDIAN) {
2567 val = bswap32(val);
2568 }
2569#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002570 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002571 } else {
bellard8df1cd02005-01-28 22:37:22 +00002572 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002573 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002574 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002575 switch (endian) {
2576 case DEVICE_LITTLE_ENDIAN:
2577 stl_le_p(ptr, val);
2578 break;
2579 case DEVICE_BIG_ENDIAN:
2580 stl_be_p(ptr, val);
2581 break;
2582 default:
2583 stl_p(ptr, val);
2584 break;
2585 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002586 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002587 }
2588}
2589
Avi Kivitya8170e52012-10-23 12:30:10 +02002590void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002591{
2592 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2593}
2594
Avi Kivitya8170e52012-10-23 12:30:10 +02002595void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002596{
2597 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2598}
2599
Avi Kivitya8170e52012-10-23 12:30:10 +02002600void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002601{
2602 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2603}
2604
bellardaab33092005-10-30 20:48:42 +00002605/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002606void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002607{
2608 uint8_t v = val;
2609 cpu_physical_memory_write(addr, &v, 1);
2610}
2611
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002612/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002613static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002614 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002615{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002616 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002617 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002618 hwaddr l = 2;
2619 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002620
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002621 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2622 true);
2623 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002624#if defined(TARGET_WORDS_BIGENDIAN)
2625 if (endian == DEVICE_LITTLE_ENDIAN) {
2626 val = bswap16(val);
2627 }
2628#else
2629 if (endian == DEVICE_BIG_ENDIAN) {
2630 val = bswap16(val);
2631 }
2632#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002633 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002634 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002635 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002636 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002637 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002638 switch (endian) {
2639 case DEVICE_LITTLE_ENDIAN:
2640 stw_le_p(ptr, val);
2641 break;
2642 case DEVICE_BIG_ENDIAN:
2643 stw_be_p(ptr, val);
2644 break;
2645 default:
2646 stw_p(ptr, val);
2647 break;
2648 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002649 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002650 }
bellardaab33092005-10-30 20:48:42 +00002651}
2652
Avi Kivitya8170e52012-10-23 12:30:10 +02002653void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002654{
2655 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2656}
2657
Avi Kivitya8170e52012-10-23 12:30:10 +02002658void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002659{
2660 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2661}
2662
Avi Kivitya8170e52012-10-23 12:30:10 +02002663void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002664{
2665 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2666}
2667
bellardaab33092005-10-30 20:48:42 +00002668/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002669void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002670{
2671 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002672 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002673}
2674
Avi Kivitya8170e52012-10-23 12:30:10 +02002675void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002676{
2677 val = cpu_to_le64(val);
2678 cpu_physical_memory_write(addr, &val, 8);
2679}
2680
Avi Kivitya8170e52012-10-23 12:30:10 +02002681void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002682{
2683 val = cpu_to_be64(val);
2684 cpu_physical_memory_write(addr, &val, 8);
2685}
2686
aliguori5e2972f2009-03-28 17:51:36 +00002687/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002688int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002689 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002690{
2691 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002692 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002693 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002694
2695 while (len > 0) {
2696 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002697 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002698 /* if no physical page mapped, return an error */
2699 if (phys_addr == -1)
2700 return -1;
2701 l = (page + TARGET_PAGE_SIZE) - addr;
2702 if (l > len)
2703 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002704 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002705 if (is_write)
2706 cpu_physical_memory_write_rom(phys_addr, buf, l);
2707 else
aliguori5e2972f2009-03-28 17:51:36 +00002708 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002709 len -= l;
2710 buf += l;
2711 addr += l;
2712 }
2713 return 0;
2714}
Paul Brooka68fe892010-03-01 00:08:59 +00002715#endif
bellard13eb76e2004-01-24 15:23:36 +00002716
Blue Swirl8e4a4242013-01-06 18:30:17 +00002717#if !defined(CONFIG_USER_ONLY)
2718
2719/*
2720 * A helper function for the _utterly broken_ virtio device model to find out if
2721 * it's running on a big endian machine. Don't do this at home kids!
2722 */
2723bool virtio_is_big_endian(void);
2724bool virtio_is_big_endian(void)
2725{
2726#if defined(TARGET_WORDS_BIGENDIAN)
2727 return true;
2728#else
2729 return false;
2730#endif
2731}
2732
2733#endif
2734
Wen Congyang76f35532012-05-07 12:04:18 +08002735#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002736bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002737{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002738 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002739 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002740
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002741 mr = address_space_translate(&address_space_memory,
2742 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002743
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002744 return !(memory_region_is_ram(mr) ||
2745 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002746}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002747
2748void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2749{
2750 RAMBlock *block;
2751
2752 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2753 func(block->host, block->offset, block->length, opaque);
2754 }
2755}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002756#endif