blob: b387d2856d59d36dd0d807f41fc4a51bac8dcde2 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020053#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010054#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020055
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020056#include "qemu/range.h"
57
blueswir1db7b5422007-05-26 17:36:03 +000058//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000059
pbrook99773bd2006-04-16 15:14:59 +000060#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020061static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000062
Paolo Bonzinia3161032012-11-14 15:54:48 +010063RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030064
65static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030066static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030067
Avi Kivityf6790af2012-10-02 20:13:51 +020068AddressSpace address_space_io;
69AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020070
Paolo Bonzini0844e002013-05-24 14:37:28 +020071MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020072static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020073
pbrooke2eef172008-06-08 01:09:01 +000074#endif
bellard9fa3e852004-01-04 18:06:42 +000075
Andreas Färberbdc44642013-06-24 23:50:24 +020076struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000077/* current CPU in the current thread. It is only valid inside
78 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020079DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000080/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000081 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000082 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010083int use_icount;
bellard6a00d602005-11-21 23:25:50 +000084
pbrooke2eef172008-06-08 01:09:01 +000085#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020086
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020087typedef struct PhysPageEntry PhysPageEntry;
88
89struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020092 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020093 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020094};
95
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020096#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
97
Paolo Bonzini03f49952013-11-07 17:14:36 +010098/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010099#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200101#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100102#define P_L2_SIZE (1 << P_L2_BITS)
103
104#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
105
106typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200108typedef struct PhysPageMap {
109 unsigned sections_nb;
110 unsigned sections_nb_alloc;
111 unsigned nodes_nb;
112 unsigned nodes_nb_alloc;
113 Node *nodes;
114 MemoryRegionSection *sections;
115} PhysPageMap;
116
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200117struct AddressSpaceDispatch {
118 /* This is a multi-level map on the physical address space.
119 * The bottom level has pointers to MemoryRegionSections.
120 */
121 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200123 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200124};
125
Jan Kiszka90260c62013-05-26 21:46:51 +0200126#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
127typedef struct subpage_t {
128 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200129 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200130 hwaddr base;
131 uint16_t sub_section[TARGET_PAGE_SIZE];
132} subpage_t;
133
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200134#define PHYS_SECTION_UNASSIGNED 0
135#define PHYS_SECTION_NOTDIRTY 1
136#define PHYS_SECTION_ROM 2
137#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200138
pbrooke2eef172008-06-08 01:09:01 +0000139static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300140static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154}
155
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200156static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157{
158 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200159 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100164 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169}
170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200173 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174{
175 PhysPageEntry *p;
176 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100183 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186 }
187 }
188 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200194 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200195 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 *index += step;
198 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200199 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200201 }
202 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
204}
205
Avi Kivityac1970f2012-10-03 16:22:53 +0200206static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200207 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200208 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000209{
Avi Kivity29990972012-02-13 20:21:20 +0200210 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000214}
215
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200275 }
276}
277
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200279 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000280{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200282 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200283 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200291 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200300}
301
Blue Swirle5548612012-04-21 13:08:33 +0000302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000305 && mr != &io_mem_watch;
306}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200307
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200309 hwaddr addr,
310 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200311{
Jan Kiszka90260c62013-05-26 21:46:51 +0200312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 }
320 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200321}
322
Jan Kiszka90260c62013-05-26 21:46:51 +0200323static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200326{
327 MemoryRegionSection *section;
328 Int128 diff;
329
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200330 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339 return section;
340}
Jan Kiszka90260c62013-05-26 21:46:51 +0200341
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200342MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
343 hwaddr *xlat, hwaddr *plen,
344 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200345{
Avi Kivity30951152012-10-30 13:47:46 +0200346 IOMMUTLBEntry iotlb;
347 MemoryRegionSection *section;
348 MemoryRegion *mr;
349 hwaddr len = *plen;
350
351 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200352 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200353 mr = section->mr;
354
355 if (!mr->iommu_ops) {
356 break;
357 }
358
359 iotlb = mr->iommu_ops->translate(mr, addr);
360 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
361 | (addr & iotlb.addr_mask));
362 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
363 if (!(iotlb.perm & (1 << is_write))) {
364 mr = &io_mem_unassigned;
365 break;
366 }
367
368 as = iotlb.target_as;
369 }
370
371 *plen = len;
372 *xlat = addr;
373 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200374}
375
376MemoryRegionSection *
377address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
378 hwaddr *plen)
379{
Avi Kivity30951152012-10-30 13:47:46 +0200380 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200381 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200382
383 assert(!section->mr->iommu_ops);
384 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200385}
bellard9fa3e852004-01-04 18:06:42 +0000386#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000387
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200388void cpu_exec_init_all(void)
389{
390#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700391 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200392 memory_map_init();
393 io_mem_init();
394#endif
395}
396
Andreas Färberb170fce2013-01-20 20:23:22 +0100397#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000398
Juan Quintelae59fb372009-09-29 22:48:21 +0200399static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200400{
Andreas Färber259186a2013-01-17 18:51:17 +0100401 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200402
aurel323098dba2009-03-07 21:28:24 +0000403 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
404 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100405 cpu->interrupt_request &= ~0x01;
406 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000407
408 return 0;
409}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200410
Andreas Färber1a1562f2013-06-17 04:09:11 +0200411const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200412 .name = "cpu_common",
413 .version_id = 1,
414 .minimum_version_id = 1,
415 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200416 .post_load = cpu_common_post_load,
417 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100418 VMSTATE_UINT32(halted, CPUState),
419 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200420 VMSTATE_END_OF_LIST()
421 }
422};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200423
pbrook9656f322008-07-01 20:01:19 +0000424#endif
425
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100426CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400427{
Andreas Färberbdc44642013-06-24 23:50:24 +0200428 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400429
Andreas Färberbdc44642013-06-24 23:50:24 +0200430 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100431 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200432 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100433 }
Glauber Costa950f1472009-06-09 12:15:18 -0400434 }
435
Andreas Färberbdc44642013-06-24 23:50:24 +0200436 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400437}
438
Andreas Färber9349b4f2012-03-14 01:38:32 +0100439void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000440{
Andreas Färber9f09e182012-05-03 06:59:07 +0200441 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100442 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200443 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000444 int cpu_index;
445
pbrookc2764712009-03-07 15:24:59 +0000446#if defined(CONFIG_USER_ONLY)
447 cpu_list_lock();
448#endif
bellard6a00d602005-11-21 23:25:50 +0000449 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200450 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000451 cpu_index++;
452 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100453 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100454 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000455 QTAILQ_INIT(&env->breakpoints);
456 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100457#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200458 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100459#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200460 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000461#if defined(CONFIG_USER_ONLY)
462 cpu_list_unlock();
463#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200464 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
465 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
466 }
pbrookb3c77242008-06-30 16:31:04 +0000467#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600468 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000469 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100470 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200471 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000472#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100473 if (cc->vmsd != NULL) {
474 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
475 }
bellardfd6ce8f2003-05-14 19:00:11 +0000476}
477
bellard1fddef42005-04-17 19:16:13 +0000478#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000479#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200480static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000481{
482 tb_invalidate_phys_page_range(pc, pc + 1, 0);
483}
484#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200485static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400486{
Max Filippove8262a12013-09-27 22:29:17 +0400487 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
488 if (phys != -1) {
489 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
490 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400491}
bellardc27004e2005-01-03 23:35:10 +0000492#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000493#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000494
Paul Brookc527ee82010-03-01 03:31:14 +0000495#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100496void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000497
498{
499}
500
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000502 int flags, CPUWatchpoint **watchpoint)
503{
504 return -ENOSYS;
505}
506#else
pbrook6658ffb2007-03-16 23:58:11 +0000507/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100508int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000509 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000510{
aliguorib4051332008-11-18 20:14:20 +0000511 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000512 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000513
aliguorib4051332008-11-18 20:14:20 +0000514 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400515 if ((len & (len - 1)) || (addr & ~len_mask) ||
516 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000517 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
518 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
519 return -EINVAL;
520 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500521 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000522
aliguoria1d1bb32008-11-18 20:07:32 +0000523 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000524 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000525 wp->flags = flags;
526
aliguori2dc9f412008-11-18 20:56:59 +0000527 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000528 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000529 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000530 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000531 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000532
pbrook6658ffb2007-03-16 23:58:11 +0000533 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000534
535 if (watchpoint)
536 *watchpoint = wp;
537 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000538}
539
aliguoria1d1bb32008-11-18 20:07:32 +0000540/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100541int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000542 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000543{
aliguorib4051332008-11-18 20:14:20 +0000544 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000545 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000546
Blue Swirl72cf2d42009-09-12 07:36:22 +0000547 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000548 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000549 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000550 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000551 return 0;
552 }
553 }
aliguoria1d1bb32008-11-18 20:07:32 +0000554 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000555}
556
aliguoria1d1bb32008-11-18 20:07:32 +0000557/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100558void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000559{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000560 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000561
aliguoria1d1bb32008-11-18 20:07:32 +0000562 tlb_flush_page(env, watchpoint->vaddr);
563
Anthony Liguori7267c092011-08-20 22:09:37 -0500564 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000565}
566
aliguoria1d1bb32008-11-18 20:07:32 +0000567/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100568void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000569{
aliguoric0ce9982008-11-25 22:13:57 +0000570 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000571
Blue Swirl72cf2d42009-09-12 07:36:22 +0000572 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000573 if (wp->flags & mask)
574 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000575 }
aliguoria1d1bb32008-11-18 20:07:32 +0000576}
Paul Brookc527ee82010-03-01 03:31:14 +0000577#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000578
579/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100580int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000581 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000582{
bellard1fddef42005-04-17 19:16:13 +0000583#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000584 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000585
Anthony Liguori7267c092011-08-20 22:09:37 -0500586 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000587
588 bp->pc = pc;
589 bp->flags = flags;
590
aliguori2dc9f412008-11-18 20:56:59 +0000591 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200592 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000593 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200594 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000595 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200596 }
aliguoria1d1bb32008-11-18 20:07:32 +0000597
Andreas Färber00b941e2013-06-29 18:55:54 +0200598 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000599
Andreas Färber00b941e2013-06-29 18:55:54 +0200600 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000601 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200602 }
aliguoria1d1bb32008-11-18 20:07:32 +0000603 return 0;
604#else
605 return -ENOSYS;
606#endif
607}
608
609/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100610int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000611{
612#if defined(TARGET_HAS_ICE)
613 CPUBreakpoint *bp;
614
Blue Swirl72cf2d42009-09-12 07:36:22 +0000615 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000616 if (bp->pc == pc && bp->flags == flags) {
617 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000618 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000619 }
bellard4c3a88a2003-07-26 12:06:08 +0000620 }
aliguoria1d1bb32008-11-18 20:07:32 +0000621 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000622#else
aliguoria1d1bb32008-11-18 20:07:32 +0000623 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000624#endif
625}
626
aliguoria1d1bb32008-11-18 20:07:32 +0000627/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100628void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000629{
bellard1fddef42005-04-17 19:16:13 +0000630#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000631 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000632
Andreas Färber00b941e2013-06-29 18:55:54 +0200633 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000634
Anthony Liguori7267c092011-08-20 22:09:37 -0500635 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000636#endif
637}
638
639/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100640void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000641{
642#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000643 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000644
Blue Swirl72cf2d42009-09-12 07:36:22 +0000645 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000646 if (bp->flags & mask)
647 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000648 }
bellard4c3a88a2003-07-26 12:06:08 +0000649#endif
650}
651
bellardc33a3462003-07-29 20:50:33 +0000652/* enable or disable single step mode. EXCP_DEBUG is returned by the
653 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200654void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000655{
bellard1fddef42005-04-17 19:16:13 +0000656#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200657 if (cpu->singlestep_enabled != enabled) {
658 cpu->singlestep_enabled = enabled;
659 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200660 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200661 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100662 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000663 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200664 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000665 tb_flush(env);
666 }
bellardc33a3462003-07-29 20:50:33 +0000667 }
668#endif
669}
670
Andreas Färber9349b4f2012-03-14 01:38:32 +0100671void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000672{
Andreas Färber878096e2013-05-27 01:33:50 +0200673 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000674 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000675 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000676
677 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000678 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000679 fprintf(stderr, "qemu: fatal: ");
680 vfprintf(stderr, fmt, ap);
681 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200682 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000683 if (qemu_log_enabled()) {
684 qemu_log("qemu: fatal: ");
685 qemu_log_vprintf(fmt, ap2);
686 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200687 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000688 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000689 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000690 }
pbrook493ae1f2007-11-23 16:53:59 +0000691 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000692 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200693#if defined(CONFIG_USER_ONLY)
694 {
695 struct sigaction act;
696 sigfillset(&act.sa_mask);
697 act.sa_handler = SIG_DFL;
698 sigaction(SIGABRT, &act, NULL);
699 }
700#endif
bellard75012672003-06-21 13:11:07 +0000701 abort();
702}
703
bellard01243112004-01-04 15:48:17 +0000704#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200705static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
706{
707 RAMBlock *block;
708
709 /* The list is protected by the iothread lock here. */
710 block = ram_list.mru_block;
711 if (block && addr - block->offset < block->length) {
712 goto found;
713 }
714 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
715 if (addr - block->offset < block->length) {
716 goto found;
717 }
718 }
719
720 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
721 abort();
722
723found:
724 ram_list.mru_block = block;
725 return block;
726}
727
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200728static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000729{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200730 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200731 RAMBlock *block;
732 ram_addr_t end;
733
734 end = TARGET_PAGE_ALIGN(start + length);
735 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000736
Paolo Bonzini041603f2013-09-09 17:49:45 +0200737 block = qemu_get_ram_block(start);
738 assert(block == qemu_get_ram_block(end - 1));
739 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000740 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200741}
742
743/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200744void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200745 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200746{
Juan Quintelad24981d2012-05-22 00:42:40 +0200747 if (length == 0)
748 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200749 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200750
751 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200752 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200753 }
bellard1ccde1c2004-02-06 19:46:14 +0000754}
755
Juan Quintela981fdf22013-10-10 11:54:09 +0200756static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000757{
758 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000759}
760
Avi Kivitya8170e52012-10-23 12:30:10 +0200761hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200762 MemoryRegionSection *section,
763 target_ulong vaddr,
764 hwaddr paddr, hwaddr xlat,
765 int prot,
766 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000767{
Avi Kivitya8170e52012-10-23 12:30:10 +0200768 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000769 CPUWatchpoint *wp;
770
Blue Swirlcc5bea62012-04-14 14:56:48 +0000771 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000772 /* Normal RAM. */
773 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200774 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000775 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200776 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000777 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200778 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000779 }
780 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200781 iotlb = section - address_space_memory.dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200782 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000783 }
784
785 /* Make accesses to pages with watchpoints go via the
786 watchpoint trap routines. */
787 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
788 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
789 /* Avoid trapping reads of pages with a write breakpoint. */
790 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200791 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000792 *address |= TLB_MMIO;
793 break;
794 }
795 }
796 }
797
798 return iotlb;
799}
bellard9fa3e852004-01-04 18:06:42 +0000800#endif /* defined(CONFIG_USER_ONLY) */
801
pbrooke2eef172008-06-08 01:09:01 +0000802#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000803
Anthony Liguoric227f092009-10-01 16:12:16 -0500804static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200805 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200806static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200807
Stefan Weil575ddeb2013-09-29 20:56:45 +0200808static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200809
810/*
811 * Set a custom physical guest memory alloator.
812 * Accelerators with unusual needs may need this. Hopefully, we can
813 * get rid of it eventually.
814 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200815void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200816{
817 phys_mem_alloc = alloc;
818}
819
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200820static uint16_t phys_section_add(PhysPageMap *map,
821 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200822{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200823 /* The physical section number is ORed with a page-aligned
824 * pointer to produce the iotlb entries. Thus it should
825 * never overflow into the page-aligned value.
826 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200827 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200828
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200829 if (map->sections_nb == map->sections_nb_alloc) {
830 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
831 map->sections = g_renew(MemoryRegionSection, map->sections,
832 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200833 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200834 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200835 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200836 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200837}
838
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200839static void phys_section_destroy(MemoryRegion *mr)
840{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200841 memory_region_unref(mr);
842
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200843 if (mr->subpage) {
844 subpage_t *subpage = container_of(mr, subpage_t, iomem);
845 memory_region_destroy(&subpage->iomem);
846 g_free(subpage);
847 }
848}
849
Paolo Bonzini60926662013-05-29 12:30:26 +0200850static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200851{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200852 while (map->sections_nb > 0) {
853 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200854 phys_section_destroy(section->mr);
855 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200856 g_free(map->sections);
857 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200858}
859
Avi Kivityac1970f2012-10-03 16:22:53 +0200860static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200861{
862 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200863 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200864 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200865 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200866 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200867 MemoryRegionSection subsection = {
868 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200869 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200870 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200871 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200872
Avi Kivityf3705d52012-03-08 16:16:34 +0200873 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200874
Avi Kivityf3705d52012-03-08 16:16:34 +0200875 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200876 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200877 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200878 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200879 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200880 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200881 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200882 }
883 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200884 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200885 subpage_register(subpage, start, end,
886 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200887}
888
889
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200890static void register_multipage(AddressSpaceDispatch *d,
891 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000892{
Avi Kivitya8170e52012-10-23 12:30:10 +0200893 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200894 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200895 uint64_t num_pages = int128_get64(int128_rshift(section->size,
896 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200897
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200898 assert(num_pages);
899 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000900}
901
Avi Kivityac1970f2012-10-03 16:22:53 +0200902static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200903{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200904 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200905 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200906 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200907 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200908
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200909 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
910 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
911 - now.offset_within_address_space;
912
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200913 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200914 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200915 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200916 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200917 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200918 while (int128_ne(remain.size, now.size)) {
919 remain.size = int128_sub(remain.size, now.size);
920 remain.offset_within_address_space += int128_get64(now.size);
921 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400922 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200923 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200924 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800925 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200926 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200927 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400928 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200929 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200930 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400931 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200932 }
933}
934
Sheng Yang62a27442010-01-26 19:21:16 +0800935void qemu_flush_coalesced_mmio_buffer(void)
936{
937 if (kvm_enabled())
938 kvm_flush_coalesced_mmio_buffer();
939}
940
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700941void qemu_mutex_lock_ramlist(void)
942{
943 qemu_mutex_lock(&ram_list.mutex);
944}
945
946void qemu_mutex_unlock_ramlist(void)
947{
948 qemu_mutex_unlock(&ram_list.mutex);
949}
950
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200951#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300952
953#include <sys/vfs.h>
954
955#define HUGETLBFS_MAGIC 0x958458f6
956
957static long gethugepagesize(const char *path)
958{
959 struct statfs fs;
960 int ret;
961
962 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900963 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300964 } while (ret != 0 && errno == EINTR);
965
966 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900967 perror(path);
968 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300969 }
970
971 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900972 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300973
974 return fs.f_bsize;
975}
976
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200977static sigjmp_buf sigjump;
978
979static void sigbus_handler(int signal)
980{
981 siglongjmp(sigjump, 1);
982}
983
Alex Williamson04b16652010-07-02 11:13:17 -0600984static void *file_ram_alloc(RAMBlock *block,
985 ram_addr_t memory,
986 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300987{
988 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500989 char *sanitized_name;
990 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300991 void *area;
992 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300993 unsigned long hpagesize;
994
995 hpagesize = gethugepagesize(path);
996 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900997 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300998 }
999
1000 if (memory < hpagesize) {
1001 return NULL;
1002 }
1003
1004 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1005 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1006 return NULL;
1007 }
1008
Peter Feiner8ca761f2013-03-04 13:54:25 -05001009 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1010 sanitized_name = g_strdup(block->mr->name);
1011 for (c = sanitized_name; *c != '\0'; c++) {
1012 if (*c == '/')
1013 *c = '_';
1014 }
1015
1016 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1017 sanitized_name);
1018 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001019
1020 fd = mkstemp(filename);
1021 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001022 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001023 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001024 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001025 }
1026 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001027 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001028
1029 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1030
1031 /*
1032 * ftruncate is not supported by hugetlbfs in older
1033 * hosts, so don't bother bailing out on errors.
1034 * If anything goes wrong with it under other filesystems,
1035 * mmap will fail.
1036 */
1037 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001038 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001039
Marcelo Tosattic9027602010-03-01 20:25:08 -03001040 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001041 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001042 perror("file_ram_alloc: can't mmap RAM pages");
1043 close(fd);
1044 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001045 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001046
1047 if (mem_prealloc) {
1048 int ret, i;
1049 struct sigaction act, oldact;
1050 sigset_t set, oldset;
1051
1052 memset(&act, 0, sizeof(act));
1053 act.sa_handler = &sigbus_handler;
1054 act.sa_flags = 0;
1055
1056 ret = sigaction(SIGBUS, &act, &oldact);
1057 if (ret) {
1058 perror("file_ram_alloc: failed to install signal handler");
1059 exit(1);
1060 }
1061
1062 /* unblock SIGBUS */
1063 sigemptyset(&set);
1064 sigaddset(&set, SIGBUS);
1065 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1066
1067 if (sigsetjmp(sigjump, 1)) {
1068 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1069 exit(1);
1070 }
1071
1072 /* MAP_POPULATE silently ignores failures */
1073 for (i = 0; i < (memory/hpagesize)-1; i++) {
1074 memset(area + (hpagesize*i), 0, 1);
1075 }
1076
1077 ret = sigaction(SIGBUS, &oldact, NULL);
1078 if (ret) {
1079 perror("file_ram_alloc: failed to reinstall signal handler");
1080 exit(1);
1081 }
1082
1083 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1084 }
1085
Alex Williamson04b16652010-07-02 11:13:17 -06001086 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001087 return area;
1088}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001089#else
1090static void *file_ram_alloc(RAMBlock *block,
1091 ram_addr_t memory,
1092 const char *path)
1093{
1094 fprintf(stderr, "-mem-path not supported on this host\n");
1095 exit(1);
1096}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001097#endif
1098
Alex Williamsond17b5282010-06-25 11:08:38 -06001099static ram_addr_t find_ram_offset(ram_addr_t size)
1100{
Alex Williamson04b16652010-07-02 11:13:17 -06001101 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001102 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001103
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001104 assert(size != 0); /* it would hand out same offset multiple times */
1105
Paolo Bonzinia3161032012-11-14 15:54:48 +01001106 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001107 return 0;
1108
Paolo Bonzinia3161032012-11-14 15:54:48 +01001109 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001110 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001111
1112 end = block->offset + block->length;
1113
Paolo Bonzinia3161032012-11-14 15:54:48 +01001114 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001115 if (next_block->offset >= end) {
1116 next = MIN(next, next_block->offset);
1117 }
1118 }
1119 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001120 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001121 mingap = next - end;
1122 }
1123 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001124
1125 if (offset == RAM_ADDR_MAX) {
1126 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1127 (uint64_t)size);
1128 abort();
1129 }
1130
Alex Williamson04b16652010-07-02 11:13:17 -06001131 return offset;
1132}
1133
Juan Quintela652d7ec2012-07-20 10:37:54 +02001134ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001135{
Alex Williamsond17b5282010-06-25 11:08:38 -06001136 RAMBlock *block;
1137 ram_addr_t last = 0;
1138
Paolo Bonzinia3161032012-11-14 15:54:48 +01001139 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001140 last = MAX(last, block->offset + block->length);
1141
1142 return last;
1143}
1144
Jason Baronddb97f12012-08-02 15:44:16 -04001145static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1146{
1147 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001148
1149 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001150 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1151 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001152 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1153 if (ret) {
1154 perror("qemu_madvise");
1155 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1156 "but dump_guest_core=off specified\n");
1157 }
1158 }
1159}
1160
Avi Kivityc5705a72011-12-20 15:59:12 +02001161void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001162{
1163 RAMBlock *new_block, *block;
1164
Avi Kivityc5705a72011-12-20 15:59:12 +02001165 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001166 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001167 if (block->offset == addr) {
1168 new_block = block;
1169 break;
1170 }
1171 }
1172 assert(new_block);
1173 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001174
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001175 if (dev) {
1176 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001177 if (id) {
1178 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001179 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001180 }
1181 }
1182 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1183
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001184 /* This assumes the iothread lock is taken here too. */
1185 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001186 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001187 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001188 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1189 new_block->idstr);
1190 abort();
1191 }
1192 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001193 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001194}
1195
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001196static int memory_try_enable_merging(void *addr, size_t len)
1197{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001198 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001199 /* disabled by the user */
1200 return 0;
1201 }
1202
1203 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1204}
1205
Avi Kivityc5705a72011-12-20 15:59:12 +02001206ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1207 MemoryRegion *mr)
1208{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001209 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001210 ram_addr_t old_ram_size, new_ram_size;
1211
1212 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001213
1214 size = TARGET_PAGE_ALIGN(size);
1215 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001216 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001217
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001218 /* This assumes the iothread lock is taken here too. */
1219 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001220 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001221 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001222 if (host) {
1223 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001224 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001225 } else if (xen_enabled()) {
1226 if (mem_path) {
1227 fprintf(stderr, "-mem-path not supported with Xen\n");
1228 exit(1);
1229 }
1230 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001231 } else {
1232 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001233 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1234 /*
1235 * file_ram_alloc() needs to allocate just like
1236 * phys_mem_alloc, but we haven't bothered to provide
1237 * a hook there.
1238 */
1239 fprintf(stderr,
1240 "-mem-path not supported with this accelerator\n");
1241 exit(1);
1242 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001243 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001244 }
1245 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001246 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001247 if (!new_block->host) {
1248 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1249 new_block->mr->name, strerror(errno));
1250 exit(1);
1251 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001252 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001253 }
1254 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001255 new_block->length = size;
1256
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001257 /* Keep the list sorted from biggest to smallest block. */
1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1259 if (block->length < new_block->length) {
1260 break;
1261 }
1262 }
1263 if (block) {
1264 QTAILQ_INSERT_BEFORE(block, new_block, next);
1265 } else {
1266 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1267 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001268 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001269
Umesh Deshpandef798b072011-08-18 11:41:17 -07001270 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001271 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001272
Juan Quintela2152f5c2013-10-08 13:52:02 +02001273 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1274
1275 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001276 int i;
1277 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1278 ram_list.dirty_memory[i] =
1279 bitmap_zero_extend(ram_list.dirty_memory[i],
1280 old_ram_size, new_ram_size);
1281 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001282 }
Juan Quintela75218e72013-10-08 12:31:54 +02001283 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001284
Jason Baronddb97f12012-08-02 15:44:16 -04001285 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001286 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001287 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001288
Cam Macdonell84b89d72010-07-26 18:10:57 -06001289 if (kvm_enabled())
1290 kvm_setup_guest_memory(new_block->host, size);
1291
1292 return new_block->offset;
1293}
1294
Avi Kivityc5705a72011-12-20 15:59:12 +02001295ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001296{
Avi Kivityc5705a72011-12-20 15:59:12 +02001297 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001298}
bellarde9a1ab12007-02-08 23:08:38 +00001299
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001300void qemu_ram_free_from_ptr(ram_addr_t addr)
1301{
1302 RAMBlock *block;
1303
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001304 /* This assumes the iothread lock is taken here too. */
1305 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001306 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001307 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001308 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001309 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001310 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001311 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001312 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001313 }
1314 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001315 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001316}
1317
Anthony Liguoric227f092009-10-01 16:12:16 -05001318void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001319{
Alex Williamson04b16652010-07-02 11:13:17 -06001320 RAMBlock *block;
1321
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001322 /* This assumes the iothread lock is taken here too. */
1323 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001325 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001326 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001327 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001328 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001329 if (block->flags & RAM_PREALLOC_MASK) {
1330 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001331 } else if (xen_enabled()) {
1332 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001333#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001334 } else if (block->fd >= 0) {
1335 munmap(block->host, block->length);
1336 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001337#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001338 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001339 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001340 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001341 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001342 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001343 }
1344 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001345 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001346
bellarde9a1ab12007-02-08 23:08:38 +00001347}
1348
Huang Yingcd19cfa2011-03-02 08:56:19 +01001349#ifndef _WIN32
1350void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1351{
1352 RAMBlock *block;
1353 ram_addr_t offset;
1354 int flags;
1355 void *area, *vaddr;
1356
Paolo Bonzinia3161032012-11-14 15:54:48 +01001357 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001358 offset = addr - block->offset;
1359 if (offset < block->length) {
1360 vaddr = block->host + offset;
1361 if (block->flags & RAM_PREALLOC_MASK) {
1362 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001363 } else if (xen_enabled()) {
1364 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001365 } else {
1366 flags = MAP_FIXED;
1367 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001368 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001369#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001370 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1371 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001372#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001373 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001374#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001375 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1376 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001377 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001378 /*
1379 * Remap needs to match alloc. Accelerators that
1380 * set phys_mem_alloc never remap. If they did,
1381 * we'd need a remap hook here.
1382 */
1383 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1384
Huang Yingcd19cfa2011-03-02 08:56:19 +01001385 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1386 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1387 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001388 }
1389 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001390 fprintf(stderr, "Could not remap addr: "
1391 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001392 length, addr);
1393 exit(1);
1394 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001395 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001396 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001397 }
1398 return;
1399 }
1400 }
1401}
1402#endif /* !_WIN32 */
1403
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001404/* Return a host pointer to ram allocated with qemu_ram_alloc.
1405 With the exception of the softmmu code in this file, this should
1406 only be used for local memory (e.g. video ram) that the device owns,
1407 and knows it isn't going to access beyond the end of the block.
1408
1409 It should not be used for general purpose DMA.
1410 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1411 */
1412void *qemu_get_ram_ptr(ram_addr_t addr)
1413{
1414 RAMBlock *block = qemu_get_ram_block(addr);
1415
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001416 if (xen_enabled()) {
1417 /* We need to check if the requested address is in the RAM
1418 * because we don't want to map the entire memory in QEMU.
1419 * In that case just map until the end of the page.
1420 */
1421 if (block->offset == 0) {
1422 return xen_map_cache(addr, 0, 0);
1423 } else if (block->host == NULL) {
1424 block->host =
1425 xen_map_cache(block->offset, block->length, 1);
1426 }
1427 }
1428 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001429}
1430
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001431/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1432 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001433static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001434{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001435 if (*size == 0) {
1436 return NULL;
1437 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001438 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001439 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001440 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001441 RAMBlock *block;
1442
Paolo Bonzinia3161032012-11-14 15:54:48 +01001443 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001444 if (addr - block->offset < block->length) {
1445 if (addr - block->offset + *size > block->length)
1446 *size = block->length - addr + block->offset;
1447 return block->host + (addr - block->offset);
1448 }
1449 }
1450
1451 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1452 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001453 }
1454}
1455
Paolo Bonzini7443b432013-06-03 12:44:02 +02001456/* Some of the softmmu routines need to translate from a host pointer
1457 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001458MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001459{
pbrook94a6b542009-04-11 17:15:54 +00001460 RAMBlock *block;
1461 uint8_t *host = ptr;
1462
Jan Kiszka868bb332011-06-21 22:59:09 +02001463 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001464 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001465 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001466 }
1467
Paolo Bonzini23887b72013-05-06 14:28:39 +02001468 block = ram_list.mru_block;
1469 if (block && block->host && host - block->host < block->length) {
1470 goto found;
1471 }
1472
Paolo Bonzinia3161032012-11-14 15:54:48 +01001473 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001474 /* This case append when the block is not mapped. */
1475 if (block->host == NULL) {
1476 continue;
1477 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001478 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001479 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001480 }
pbrook94a6b542009-04-11 17:15:54 +00001481 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001482
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001483 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001484
1485found:
1486 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001487 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001488}
Alex Williamsonf471a172010-06-11 11:11:42 -06001489
Avi Kivitya8170e52012-10-23 12:30:10 +02001490static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001491 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001492{
Juan Quintela52159192013-10-08 12:44:04 +02001493 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001494 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001495 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001496 switch (size) {
1497 case 1:
1498 stb_p(qemu_get_ram_ptr(ram_addr), val);
1499 break;
1500 case 2:
1501 stw_p(qemu_get_ram_ptr(ram_addr), val);
1502 break;
1503 case 4:
1504 stl_p(qemu_get_ram_ptr(ram_addr), val);
1505 break;
1506 default:
1507 abort();
1508 }
Juan Quintela52159192013-10-08 12:44:04 +02001509 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1510 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001511 /* we remove the notdirty callback only if the code has been
1512 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001513 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001514 CPUArchState *env = current_cpu->env_ptr;
1515 tlb_set_dirty(env, env->mem_io_vaddr);
1516 }
bellard1ccde1c2004-02-06 19:46:14 +00001517}
1518
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001519static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1520 unsigned size, bool is_write)
1521{
1522 return is_write;
1523}
1524
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001525static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001526 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001527 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001528 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001529};
1530
pbrook0f459d12008-06-09 00:20:13 +00001531/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001532static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001533{
Andreas Färber4917cf42013-05-27 05:17:50 +02001534 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001535 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001536 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001537 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001538 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001539
aliguori06d55cc2008-11-18 20:24:06 +00001540 if (env->watchpoint_hit) {
1541 /* We re-entered the check after replacing the TB. Now raise
1542 * the debug interrupt so that is will trigger after the
1543 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001544 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001545 return;
1546 }
pbrook2e70f6e2008-06-29 01:03:05 +00001547 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001549 if ((vaddr == (wp->vaddr & len_mask) ||
1550 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001551 wp->flags |= BP_WATCHPOINT_HIT;
1552 if (!env->watchpoint_hit) {
1553 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001554 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001555 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1556 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001557 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001558 } else {
1559 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1560 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001561 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001562 }
aliguori06d55cc2008-11-18 20:24:06 +00001563 }
aliguori6e140f22008-11-18 20:37:55 +00001564 } else {
1565 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001566 }
1567 }
1568}
1569
pbrook6658ffb2007-03-16 23:58:11 +00001570/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1571 so these check for a hit then pass through to the normal out-of-line
1572 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001573static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001574 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001575{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001576 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1577 switch (size) {
1578 case 1: return ldub_phys(addr);
1579 case 2: return lduw_phys(addr);
1580 case 4: return ldl_phys(addr);
1581 default: abort();
1582 }
pbrook6658ffb2007-03-16 23:58:11 +00001583}
1584
Avi Kivitya8170e52012-10-23 12:30:10 +02001585static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001586 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001587{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001588 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1589 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001590 case 1:
1591 stb_phys(addr, val);
1592 break;
1593 case 2:
1594 stw_phys(addr, val);
1595 break;
1596 case 4:
1597 stl_phys(addr, val);
1598 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001599 default: abort();
1600 }
pbrook6658ffb2007-03-16 23:58:11 +00001601}
1602
Avi Kivity1ec9b902012-01-02 12:47:48 +02001603static const MemoryRegionOps watch_mem_ops = {
1604 .read = watch_mem_read,
1605 .write = watch_mem_write,
1606 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001607};
pbrook6658ffb2007-03-16 23:58:11 +00001608
Avi Kivitya8170e52012-10-23 12:30:10 +02001609static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001610 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001611{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001612 subpage_t *subpage = opaque;
1613 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001614
blueswir1db7b5422007-05-26 17:36:03 +00001615#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001616 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001617 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001618#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001619 address_space_read(subpage->as, addr + subpage->base, buf, len);
1620 switch (len) {
1621 case 1:
1622 return ldub_p(buf);
1623 case 2:
1624 return lduw_p(buf);
1625 case 4:
1626 return ldl_p(buf);
1627 default:
1628 abort();
1629 }
blueswir1db7b5422007-05-26 17:36:03 +00001630}
1631
Avi Kivitya8170e52012-10-23 12:30:10 +02001632static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001633 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001634{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001635 subpage_t *subpage = opaque;
1636 uint8_t buf[4];
1637
blueswir1db7b5422007-05-26 17:36:03 +00001638#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001639 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001640 " value %"PRIx64"\n",
1641 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001642#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001643 switch (len) {
1644 case 1:
1645 stb_p(buf, value);
1646 break;
1647 case 2:
1648 stw_p(buf, value);
1649 break;
1650 case 4:
1651 stl_p(buf, value);
1652 break;
1653 default:
1654 abort();
1655 }
1656 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001657}
1658
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001659static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001660 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001661{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001662 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001663#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001664 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001665 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001666#endif
1667
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001668 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001669 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001670}
1671
Avi Kivity70c68e42012-01-02 12:32:48 +02001672static const MemoryRegionOps subpage_ops = {
1673 .read = subpage_read,
1674 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001675 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001676 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001677};
1678
Anthony Liguoric227f092009-10-01 16:12:16 -05001679static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001680 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001681{
1682 int idx, eidx;
1683
1684 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1685 return -1;
1686 idx = SUBPAGE_IDX(start);
1687 eidx = SUBPAGE_IDX(end);
1688#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001689 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1690 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001691#endif
blueswir1db7b5422007-05-26 17:36:03 +00001692 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001693 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001694 }
1695
1696 return 0;
1697}
1698
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001699static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001700{
Anthony Liguoric227f092009-10-01 16:12:16 -05001701 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001702
Anthony Liguori7267c092011-08-20 22:09:37 -05001703 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001704
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001705 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001706 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001707 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001708 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001709 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001710#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001711 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1712 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001713#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001714 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001715
1716 return mmio;
1717}
1718
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001719static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001720{
1721 MemoryRegionSection section = {
1722 .mr = mr,
1723 .offset_within_address_space = 0,
1724 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001725 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001726 };
1727
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001728 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001729}
1730
Avi Kivitya8170e52012-10-23 12:30:10 +02001731MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001732{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001733 return address_space_memory.dispatch->map.sections[
1734 index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001735}
1736
Avi Kivitye9179ce2009-06-14 11:38:52 +03001737static void io_mem_init(void)
1738{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001739 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1740 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001741 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001742 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001743 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001744 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001745 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001746}
1747
Avi Kivityac1970f2012-10-03 16:22:53 +02001748static void mem_begin(MemoryListener *listener)
1749{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001750 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001751 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1752 uint16_t n;
1753
1754 n = dummy_section(&d->map, &io_mem_unassigned);
1755 assert(n == PHYS_SECTION_UNASSIGNED);
1756 n = dummy_section(&d->map, &io_mem_notdirty);
1757 assert(n == PHYS_SECTION_NOTDIRTY);
1758 n = dummy_section(&d->map, &io_mem_rom);
1759 assert(n == PHYS_SECTION_ROM);
1760 n = dummy_section(&d->map, &io_mem_watch);
1761 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001762
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001763 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001764 d->as = as;
1765 as->next_dispatch = d;
1766}
1767
1768static void mem_commit(MemoryListener *listener)
1769{
1770 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001771 AddressSpaceDispatch *cur = as->dispatch;
1772 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001773
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001774 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001775
Paolo Bonzini0475d942013-05-29 12:28:21 +02001776 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001777
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001778 if (cur) {
1779 phys_sections_free(&cur->map);
1780 g_free(cur);
1781 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001782}
1783
Avi Kivity1d711482012-10-02 18:54:45 +02001784static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001785{
Andreas Färber182735e2013-05-29 22:29:20 +02001786 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001787
1788 /* since each CPU stores ram addresses in its TLB cache, we must
1789 reset the modified entries */
1790 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001791 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001792 CPUArchState *env = cpu->env_ptr;
1793
Avi Kivity117712c2012-02-12 21:23:17 +02001794 tlb_flush(env, 1);
1795 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001796}
1797
Avi Kivity93632742012-02-08 16:54:16 +02001798static void core_log_global_start(MemoryListener *listener)
1799{
Juan Quintela981fdf22013-10-10 11:54:09 +02001800 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001801}
1802
1803static void core_log_global_stop(MemoryListener *listener)
1804{
Juan Quintela981fdf22013-10-10 11:54:09 +02001805 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001806}
1807
Avi Kivity93632742012-02-08 16:54:16 +02001808static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001809 .log_global_start = core_log_global_start,
1810 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001811 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001812};
1813
Avi Kivity1d711482012-10-02 18:54:45 +02001814static MemoryListener tcg_memory_listener = {
1815 .commit = tcg_commit,
1816};
1817
Avi Kivityac1970f2012-10-03 16:22:53 +02001818void address_space_init_dispatch(AddressSpace *as)
1819{
Paolo Bonzini00752702013-05-29 12:13:54 +02001820 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001821 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001822 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001823 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001824 .region_add = mem_add,
1825 .region_nop = mem_add,
1826 .priority = 0,
1827 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001828 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001829}
1830
Avi Kivity83f3c252012-10-07 12:59:55 +02001831void address_space_destroy_dispatch(AddressSpace *as)
1832{
1833 AddressSpaceDispatch *d = as->dispatch;
1834
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001835 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001836 g_free(d);
1837 as->dispatch = NULL;
1838}
1839
Avi Kivity62152b82011-07-26 14:26:14 +03001840static void memory_map_init(void)
1841{
Anthony Liguori7267c092011-08-20 22:09:37 -05001842 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001843
Paolo Bonzini57271d62013-11-07 17:14:37 +01001844 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001845 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001846
Anthony Liguori7267c092011-08-20 22:09:37 -05001847 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001848 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1849 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001850 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001851
Avi Kivityf6790af2012-10-02 20:13:51 +02001852 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001853 if (tcg_enabled()) {
1854 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1855 }
Avi Kivity62152b82011-07-26 14:26:14 +03001856}
1857
1858MemoryRegion *get_system_memory(void)
1859{
1860 return system_memory;
1861}
1862
Avi Kivity309cb472011-08-08 16:09:03 +03001863MemoryRegion *get_system_io(void)
1864{
1865 return system_io;
1866}
1867
pbrooke2eef172008-06-08 01:09:01 +00001868#endif /* !defined(CONFIG_USER_ONLY) */
1869
bellard13eb76e2004-01-24 15:23:36 +00001870/* physical memory access (slow version, mainly for debug) */
1871#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001872int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001873 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001874{
1875 int l, flags;
1876 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001877 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001878
1879 while (len > 0) {
1880 page = addr & TARGET_PAGE_MASK;
1881 l = (page + TARGET_PAGE_SIZE) - addr;
1882 if (l > len)
1883 l = len;
1884 flags = page_get_flags(page);
1885 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001886 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001887 if (is_write) {
1888 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001889 return -1;
bellard579a97f2007-11-11 14:26:47 +00001890 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001891 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001892 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001893 memcpy(p, buf, l);
1894 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001895 } else {
1896 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001897 return -1;
bellard579a97f2007-11-11 14:26:47 +00001898 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001899 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001900 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001901 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001902 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001903 }
1904 len -= l;
1905 buf += l;
1906 addr += l;
1907 }
Paul Brooka68fe892010-03-01 00:08:59 +00001908 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001909}
bellard8df1cd02005-01-28 22:37:22 +00001910
bellard13eb76e2004-01-24 15:23:36 +00001911#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001912
Avi Kivitya8170e52012-10-23 12:30:10 +02001913static void invalidate_and_set_dirty(hwaddr addr,
1914 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001915{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001916 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001917 /* invalidate code */
1918 tb_invalidate_phys_page_range(addr, addr + length, 0);
1919 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001920 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1921 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001922 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001923 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001924}
1925
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001926static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1927{
1928 if (memory_region_is_ram(mr)) {
1929 return !(is_write && mr->readonly);
1930 }
1931 if (memory_region_is_romd(mr)) {
1932 return !is_write;
1933 }
1934
1935 return false;
1936}
1937
Richard Henderson23326162013-07-08 14:55:59 -07001938static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001939{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001940 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001941
1942 /* Regions are assumed to support 1-4 byte accesses unless
1943 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001944 if (access_size_max == 0) {
1945 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001946 }
Richard Henderson23326162013-07-08 14:55:59 -07001947
1948 /* Bound the maximum access by the alignment of the address. */
1949 if (!mr->ops->impl.unaligned) {
1950 unsigned align_size_max = addr & -addr;
1951 if (align_size_max != 0 && align_size_max < access_size_max) {
1952 access_size_max = align_size_max;
1953 }
1954 }
1955
1956 /* Don't attempt accesses larger than the maximum. */
1957 if (l > access_size_max) {
1958 l = access_size_max;
1959 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001960 if (l & (l - 1)) {
1961 l = 1 << (qemu_fls(l) - 1);
1962 }
Richard Henderson23326162013-07-08 14:55:59 -07001963
1964 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001965}
1966
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001967bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001968 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001969{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001970 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001971 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001972 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001973 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001974 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001975 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001976
bellard13eb76e2004-01-24 15:23:36 +00001977 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001978 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001979 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001980
bellard13eb76e2004-01-24 15:23:36 +00001981 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001982 if (!memory_access_is_direct(mr, is_write)) {
1983 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001984 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001985 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001986 switch (l) {
1987 case 8:
1988 /* 64 bit write access */
1989 val = ldq_p(buf);
1990 error |= io_mem_write(mr, addr1, val, 8);
1991 break;
1992 case 4:
bellard1c213d12005-09-03 10:49:04 +00001993 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001994 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001995 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001996 break;
1997 case 2:
bellard1c213d12005-09-03 10:49:04 +00001998 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001999 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002000 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002001 break;
2002 case 1:
bellard1c213d12005-09-03 10:49:04 +00002003 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002004 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002005 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002006 break;
2007 default:
2008 abort();
bellard13eb76e2004-01-24 15:23:36 +00002009 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002010 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002011 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002012 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002013 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002014 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002015 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002016 }
2017 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002018 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002019 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002020 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002021 switch (l) {
2022 case 8:
2023 /* 64 bit read access */
2024 error |= io_mem_read(mr, addr1, &val, 8);
2025 stq_p(buf, val);
2026 break;
2027 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002028 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002029 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002030 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002031 break;
2032 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002033 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002034 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002035 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002036 break;
2037 case 1:
bellard1c213d12005-09-03 10:49:04 +00002038 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002039 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002040 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002041 break;
2042 default:
2043 abort();
bellard13eb76e2004-01-24 15:23:36 +00002044 }
2045 } else {
2046 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002047 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002048 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002049 }
2050 }
2051 len -= l;
2052 buf += l;
2053 addr += l;
2054 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002055
2056 return error;
bellard13eb76e2004-01-24 15:23:36 +00002057}
bellard8df1cd02005-01-28 22:37:22 +00002058
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002059bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002060 const uint8_t *buf, int len)
2061{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002062 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002063}
2064
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002065bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002066{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002067 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002068}
2069
2070
Avi Kivitya8170e52012-10-23 12:30:10 +02002071void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002072 int len, int is_write)
2073{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002074 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002075}
2076
Alexander Graf582b55a2013-12-11 14:17:44 +01002077enum write_rom_type {
2078 WRITE_DATA,
2079 FLUSH_CACHE,
2080};
2081
2082static inline void cpu_physical_memory_write_rom_internal(
2083 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002084{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002085 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002086 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002087 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002088 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002089
bellardd0ecd2a2006-04-23 17:14:48 +00002090 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002091 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002092 mr = address_space_translate(&address_space_memory,
2093 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002094
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002095 if (!(memory_region_is_ram(mr) ||
2096 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002097 /* do nothing */
2098 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002099 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002100 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002101 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002102 switch (type) {
2103 case WRITE_DATA:
2104 memcpy(ptr, buf, l);
2105 invalidate_and_set_dirty(addr1, l);
2106 break;
2107 case FLUSH_CACHE:
2108 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2109 break;
2110 }
bellardd0ecd2a2006-04-23 17:14:48 +00002111 }
2112 len -= l;
2113 buf += l;
2114 addr += l;
2115 }
2116}
2117
Alexander Graf582b55a2013-12-11 14:17:44 +01002118/* used for ROM loading : can write in RAM and ROM */
2119void cpu_physical_memory_write_rom(hwaddr addr,
2120 const uint8_t *buf, int len)
2121{
2122 cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2123}
2124
2125void cpu_flush_icache_range(hwaddr start, int len)
2126{
2127 /*
2128 * This function should do the same thing as an icache flush that was
2129 * triggered from within the guest. For TCG we are always cache coherent,
2130 * so there is no need to flush anything. For KVM / Xen we need to flush
2131 * the host's instruction cache at least.
2132 */
2133 if (tcg_enabled()) {
2134 return;
2135 }
2136
2137 cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2138}
2139
aliguori6d16c2f2009-01-22 16:59:11 +00002140typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002141 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002142 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002143 hwaddr addr;
2144 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002145} BounceBuffer;
2146
2147static BounceBuffer bounce;
2148
aliguoriba223c22009-01-22 16:59:16 +00002149typedef struct MapClient {
2150 void *opaque;
2151 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002152 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002153} MapClient;
2154
Blue Swirl72cf2d42009-09-12 07:36:22 +00002155static QLIST_HEAD(map_client_list, MapClient) map_client_list
2156 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002157
2158void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2159{
Anthony Liguori7267c092011-08-20 22:09:37 -05002160 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002161
2162 client->opaque = opaque;
2163 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002164 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002165 return client;
2166}
2167
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002168static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002169{
2170 MapClient *client = (MapClient *)_client;
2171
Blue Swirl72cf2d42009-09-12 07:36:22 +00002172 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002173 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002174}
2175
2176static void cpu_notify_map_clients(void)
2177{
2178 MapClient *client;
2179
Blue Swirl72cf2d42009-09-12 07:36:22 +00002180 while (!QLIST_EMPTY(&map_client_list)) {
2181 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002182 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002183 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002184 }
2185}
2186
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002187bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2188{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002189 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002190 hwaddr l, xlat;
2191
2192 while (len > 0) {
2193 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002194 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2195 if (!memory_access_is_direct(mr, is_write)) {
2196 l = memory_access_size(mr, l, addr);
2197 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002198 return false;
2199 }
2200 }
2201
2202 len -= l;
2203 addr += l;
2204 }
2205 return true;
2206}
2207
aliguori6d16c2f2009-01-22 16:59:11 +00002208/* Map a physical memory region into a host virtual address.
2209 * May map a subset of the requested range, given by and returned in *plen.
2210 * May return NULL if resources needed to perform the mapping are exhausted.
2211 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002212 * Use cpu_register_map_client() to know when retrying the map operation is
2213 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002214 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002215void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002216 hwaddr addr,
2217 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002218 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002219{
Avi Kivitya8170e52012-10-23 12:30:10 +02002220 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002221 hwaddr done = 0;
2222 hwaddr l, xlat, base;
2223 MemoryRegion *mr, *this_mr;
2224 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002225
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002226 if (len == 0) {
2227 return NULL;
2228 }
aliguori6d16c2f2009-01-22 16:59:11 +00002229
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002230 l = len;
2231 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2232 if (!memory_access_is_direct(mr, is_write)) {
2233 if (bounce.buffer) {
2234 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002235 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002236 /* Avoid unbounded allocations */
2237 l = MIN(l, TARGET_PAGE_SIZE);
2238 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002239 bounce.addr = addr;
2240 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002241
2242 memory_region_ref(mr);
2243 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002244 if (!is_write) {
2245 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002246 }
aliguori6d16c2f2009-01-22 16:59:11 +00002247
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002248 *plen = l;
2249 return bounce.buffer;
2250 }
2251
2252 base = xlat;
2253 raddr = memory_region_get_ram_addr(mr);
2254
2255 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002256 len -= l;
2257 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002258 done += l;
2259 if (len == 0) {
2260 break;
2261 }
2262
2263 l = len;
2264 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2265 if (this_mr != mr || xlat != base + done) {
2266 break;
2267 }
aliguori6d16c2f2009-01-22 16:59:11 +00002268 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002269
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002270 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002271 *plen = done;
2272 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002273}
2274
Avi Kivityac1970f2012-10-03 16:22:53 +02002275/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002276 * Will also mark the memory as dirty if is_write == 1. access_len gives
2277 * the amount of memory that was actually read or written by the caller.
2278 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002279void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2280 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002281{
2282 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002283 MemoryRegion *mr;
2284 ram_addr_t addr1;
2285
2286 mr = qemu_ram_addr_from_host(buffer, &addr1);
2287 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002288 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002289 while (access_len) {
2290 unsigned l;
2291 l = TARGET_PAGE_SIZE;
2292 if (l > access_len)
2293 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002294 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002295 addr1 += l;
2296 access_len -= l;
2297 }
2298 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002299 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002300 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002301 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002302 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002303 return;
2304 }
2305 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002306 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002307 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002308 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002309 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002310 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002311 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002312}
bellardd0ecd2a2006-04-23 17:14:48 +00002313
Avi Kivitya8170e52012-10-23 12:30:10 +02002314void *cpu_physical_memory_map(hwaddr addr,
2315 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002316 int is_write)
2317{
2318 return address_space_map(&address_space_memory, addr, plen, is_write);
2319}
2320
Avi Kivitya8170e52012-10-23 12:30:10 +02002321void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2322 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002323{
2324 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2325}
2326
bellard8df1cd02005-01-28 22:37:22 +00002327/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002328static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002329 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002330{
bellard8df1cd02005-01-28 22:37:22 +00002331 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002332 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002333 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002334 hwaddr l = 4;
2335 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002336
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002337 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2338 false);
2339 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002340 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002341 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002342#if defined(TARGET_WORDS_BIGENDIAN)
2343 if (endian == DEVICE_LITTLE_ENDIAN) {
2344 val = bswap32(val);
2345 }
2346#else
2347 if (endian == DEVICE_BIG_ENDIAN) {
2348 val = bswap32(val);
2349 }
2350#endif
bellard8df1cd02005-01-28 22:37:22 +00002351 } else {
2352 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002353 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002354 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002355 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002356 switch (endian) {
2357 case DEVICE_LITTLE_ENDIAN:
2358 val = ldl_le_p(ptr);
2359 break;
2360 case DEVICE_BIG_ENDIAN:
2361 val = ldl_be_p(ptr);
2362 break;
2363 default:
2364 val = ldl_p(ptr);
2365 break;
2366 }
bellard8df1cd02005-01-28 22:37:22 +00002367 }
2368 return val;
2369}
2370
Avi Kivitya8170e52012-10-23 12:30:10 +02002371uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002372{
2373 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2374}
2375
Avi Kivitya8170e52012-10-23 12:30:10 +02002376uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002377{
2378 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2379}
2380
Avi Kivitya8170e52012-10-23 12:30:10 +02002381uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002382{
2383 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2384}
2385
bellard84b7b8e2005-11-28 21:19:04 +00002386/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002387static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002388 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002389{
bellard84b7b8e2005-11-28 21:19:04 +00002390 uint8_t *ptr;
2391 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002392 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002393 hwaddr l = 8;
2394 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002395
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002396 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2397 false);
2398 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002399 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002400 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002401#if defined(TARGET_WORDS_BIGENDIAN)
2402 if (endian == DEVICE_LITTLE_ENDIAN) {
2403 val = bswap64(val);
2404 }
2405#else
2406 if (endian == DEVICE_BIG_ENDIAN) {
2407 val = bswap64(val);
2408 }
2409#endif
bellard84b7b8e2005-11-28 21:19:04 +00002410 } else {
2411 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002412 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002413 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002414 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002415 switch (endian) {
2416 case DEVICE_LITTLE_ENDIAN:
2417 val = ldq_le_p(ptr);
2418 break;
2419 case DEVICE_BIG_ENDIAN:
2420 val = ldq_be_p(ptr);
2421 break;
2422 default:
2423 val = ldq_p(ptr);
2424 break;
2425 }
bellard84b7b8e2005-11-28 21:19:04 +00002426 }
2427 return val;
2428}
2429
Avi Kivitya8170e52012-10-23 12:30:10 +02002430uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002431{
2432 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2433}
2434
Avi Kivitya8170e52012-10-23 12:30:10 +02002435uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002436{
2437 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2438}
2439
Avi Kivitya8170e52012-10-23 12:30:10 +02002440uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002441{
2442 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2443}
2444
bellardaab33092005-10-30 20:48:42 +00002445/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002446uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002447{
2448 uint8_t val;
2449 cpu_physical_memory_read(addr, &val, 1);
2450 return val;
2451}
2452
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002453/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002454static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002455 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002456{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002457 uint8_t *ptr;
2458 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002459 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002460 hwaddr l = 2;
2461 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002462
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002463 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2464 false);
2465 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002466 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002467 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468#if defined(TARGET_WORDS_BIGENDIAN)
2469 if (endian == DEVICE_LITTLE_ENDIAN) {
2470 val = bswap16(val);
2471 }
2472#else
2473 if (endian == DEVICE_BIG_ENDIAN) {
2474 val = bswap16(val);
2475 }
2476#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477 } else {
2478 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002479 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002480 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002481 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482 switch (endian) {
2483 case DEVICE_LITTLE_ENDIAN:
2484 val = lduw_le_p(ptr);
2485 break;
2486 case DEVICE_BIG_ENDIAN:
2487 val = lduw_be_p(ptr);
2488 break;
2489 default:
2490 val = lduw_p(ptr);
2491 break;
2492 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002493 }
2494 return val;
bellardaab33092005-10-30 20:48:42 +00002495}
2496
Avi Kivitya8170e52012-10-23 12:30:10 +02002497uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002498{
2499 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2500}
2501
Avi Kivitya8170e52012-10-23 12:30:10 +02002502uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503{
2504 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2505}
2506
Avi Kivitya8170e52012-10-23 12:30:10 +02002507uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508{
2509 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2510}
2511
bellard8df1cd02005-01-28 22:37:22 +00002512/* warning: addr must be aligned. The ram page is not masked as dirty
2513 and the code inside is not invalidated. It is useful if the dirty
2514 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002515void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002516{
bellard8df1cd02005-01-28 22:37:22 +00002517 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002518 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002519 hwaddr l = 4;
2520 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002521
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002522 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2523 true);
2524 if (l < 4 || !memory_access_is_direct(mr, true)) {
2525 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002526 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002527 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002528 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002529 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002530
2531 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002532 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002533 /* invalidate code */
2534 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2535 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002536 cpu_physical_memory_set_dirty_flag(addr1,
2537 DIRTY_MEMORY_MIGRATION);
2538 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002539 }
2540 }
bellard8df1cd02005-01-28 22:37:22 +00002541 }
2542}
2543
2544/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002545static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002546 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002547{
bellard8df1cd02005-01-28 22:37:22 +00002548 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002549 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002550 hwaddr l = 4;
2551 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002552
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002553 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2554 true);
2555 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002556#if defined(TARGET_WORDS_BIGENDIAN)
2557 if (endian == DEVICE_LITTLE_ENDIAN) {
2558 val = bswap32(val);
2559 }
2560#else
2561 if (endian == DEVICE_BIG_ENDIAN) {
2562 val = bswap32(val);
2563 }
2564#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002565 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002566 } else {
bellard8df1cd02005-01-28 22:37:22 +00002567 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002568 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002569 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002570 switch (endian) {
2571 case DEVICE_LITTLE_ENDIAN:
2572 stl_le_p(ptr, val);
2573 break;
2574 case DEVICE_BIG_ENDIAN:
2575 stl_be_p(ptr, val);
2576 break;
2577 default:
2578 stl_p(ptr, val);
2579 break;
2580 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002581 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002582 }
2583}
2584
Avi Kivitya8170e52012-10-23 12:30:10 +02002585void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002586{
2587 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2588}
2589
Avi Kivitya8170e52012-10-23 12:30:10 +02002590void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002591{
2592 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2593}
2594
Avi Kivitya8170e52012-10-23 12:30:10 +02002595void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002596{
2597 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2598}
2599
bellardaab33092005-10-30 20:48:42 +00002600/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002601void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002602{
2603 uint8_t v = val;
2604 cpu_physical_memory_write(addr, &v, 1);
2605}
2606
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002607/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002608static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002609 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002610{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002611 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002612 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002613 hwaddr l = 2;
2614 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002615
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002616 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2617 true);
2618 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002619#if defined(TARGET_WORDS_BIGENDIAN)
2620 if (endian == DEVICE_LITTLE_ENDIAN) {
2621 val = bswap16(val);
2622 }
2623#else
2624 if (endian == DEVICE_BIG_ENDIAN) {
2625 val = bswap16(val);
2626 }
2627#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002628 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002629 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002630 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002631 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002632 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002633 switch (endian) {
2634 case DEVICE_LITTLE_ENDIAN:
2635 stw_le_p(ptr, val);
2636 break;
2637 case DEVICE_BIG_ENDIAN:
2638 stw_be_p(ptr, val);
2639 break;
2640 default:
2641 stw_p(ptr, val);
2642 break;
2643 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002644 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002645 }
bellardaab33092005-10-30 20:48:42 +00002646}
2647
Avi Kivitya8170e52012-10-23 12:30:10 +02002648void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002649{
2650 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2651}
2652
Avi Kivitya8170e52012-10-23 12:30:10 +02002653void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002654{
2655 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2656}
2657
Avi Kivitya8170e52012-10-23 12:30:10 +02002658void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002659{
2660 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2661}
2662
bellardaab33092005-10-30 20:48:42 +00002663/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002664void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002665{
2666 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002667 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002668}
2669
Avi Kivitya8170e52012-10-23 12:30:10 +02002670void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002671{
2672 val = cpu_to_le64(val);
2673 cpu_physical_memory_write(addr, &val, 8);
2674}
2675
Avi Kivitya8170e52012-10-23 12:30:10 +02002676void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002677{
2678 val = cpu_to_be64(val);
2679 cpu_physical_memory_write(addr, &val, 8);
2680}
2681
aliguori5e2972f2009-03-28 17:51:36 +00002682/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002683int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002684 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002685{
2686 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002687 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002688 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002689
2690 while (len > 0) {
2691 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002692 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002693 /* if no physical page mapped, return an error */
2694 if (phys_addr == -1)
2695 return -1;
2696 l = (page + TARGET_PAGE_SIZE) - addr;
2697 if (l > len)
2698 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002699 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002700 if (is_write)
2701 cpu_physical_memory_write_rom(phys_addr, buf, l);
2702 else
aliguori5e2972f2009-03-28 17:51:36 +00002703 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002704 len -= l;
2705 buf += l;
2706 addr += l;
2707 }
2708 return 0;
2709}
Paul Brooka68fe892010-03-01 00:08:59 +00002710#endif
bellard13eb76e2004-01-24 15:23:36 +00002711
Blue Swirl8e4a4242013-01-06 18:30:17 +00002712#if !defined(CONFIG_USER_ONLY)
2713
2714/*
2715 * A helper function for the _utterly broken_ virtio device model to find out if
2716 * it's running on a big endian machine. Don't do this at home kids!
2717 */
2718bool virtio_is_big_endian(void);
2719bool virtio_is_big_endian(void)
2720{
2721#if defined(TARGET_WORDS_BIGENDIAN)
2722 return true;
2723#else
2724 return false;
2725#endif
2726}
2727
2728#endif
2729
Wen Congyang76f35532012-05-07 12:04:18 +08002730#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002731bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002732{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002733 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002734 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002735
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002736 mr = address_space_translate(&address_space_memory,
2737 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002738
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002739 return !(memory_region_is_ram(mr) ||
2740 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002741}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002742
2743void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2744{
2745 RAMBlock *block;
2746
2747 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2748 func(block->host, block->offset, block->length, opaque);
2749 }
2750}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002751#endif