blob: 5e07d285087dfec4906543f6661397f62436737d [file] [log] [blame]
bellard5a9fdfe2003-06-15 20:02:25 +00001/*
2 * defines common to all virtual CPUs
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard5a9fdfe2003-06-15 20:02:25 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard5a9fdfe2003-06-15 20:02:25 +000018 */
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
blueswir17d99a002009-01-14 19:00:36 +000022#include "qemu-common.h"
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +010023#include "qemu-tls.h"
Paul Brook1ad21342009-05-19 16:17:58 +010024#include "cpu-common.h"
bellard0ac4bd52004-01-04 15:44:17 +000025
ths5fafdf22007-09-16 21:08:06 +000026/* some important defines:
27 *
bellard0ac4bd52004-01-04 15:44:17 +000028 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
29 * memory accesses.
ths5fafdf22007-09-16 21:08:06 +000030 *
Juan Quintelae2542fe2009-07-27 16:13:06 +020031 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
bellard0ac4bd52004-01-04 15:44:17 +000032 * otherwise little endian.
ths5fafdf22007-09-16 21:08:06 +000033 *
bellard0ac4bd52004-01-04 15:44:17 +000034 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
ths5fafdf22007-09-16 21:08:06 +000035 *
bellard0ac4bd52004-01-04 15:44:17 +000036 * TARGET_WORDS_BIGENDIAN : same for target cpu
37 */
38
Juan Quintelae2542fe2009-07-27 16:13:06 +020039#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
bellardf193c792004-03-21 17:06:25 +000040#define BSWAP_NEEDED
41#endif
42
43#ifdef BSWAP_NEEDED
44
45static inline uint16_t tswap16(uint16_t s)
46{
47 return bswap16(s);
48}
49
50static inline uint32_t tswap32(uint32_t s)
51{
52 return bswap32(s);
53}
54
55static inline uint64_t tswap64(uint64_t s)
56{
57 return bswap64(s);
58}
59
60static inline void tswap16s(uint16_t *s)
61{
62 *s = bswap16(*s);
63}
64
65static inline void tswap32s(uint32_t *s)
66{
67 *s = bswap32(*s);
68}
69
70static inline void tswap64s(uint64_t *s)
71{
72 *s = bswap64(*s);
73}
74
75#else
76
77static inline uint16_t tswap16(uint16_t s)
78{
79 return s;
80}
81
82static inline uint32_t tswap32(uint32_t s)
83{
84 return s;
85}
86
87static inline uint64_t tswap64(uint64_t s)
88{
89 return s;
90}
91
92static inline void tswap16s(uint16_t *s)
93{
94}
95
96static inline void tswap32s(uint32_t *s)
97{
98}
99
100static inline void tswap64s(uint64_t *s)
101{
102}
103
104#endif
105
106#if TARGET_LONG_SIZE == 4
107#define tswapl(s) tswap32(s)
108#define tswapls(s) tswap32s((uint32_t *)(s))
bellard0a962c02005-02-10 22:00:27 +0000109#define bswaptls(s) bswap32s(s)
bellardf193c792004-03-21 17:06:25 +0000110#else
111#define tswapl(s) tswap64(s)
112#define tswapls(s) tswap64s((uint64_t *)(s))
bellard0a962c02005-02-10 22:00:27 +0000113#define bswaptls(s) bswap64s(s)
bellardf193c792004-03-21 17:06:25 +0000114#endif
115
bellard61382a52003-10-27 21:22:23 +0000116/* CPU memory access without any memory or io remapping */
117
bellard83d73962004-02-22 11:53:50 +0000118/*
119 * the generic syntax for the memory accesses is:
120 *
121 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
122 *
123 * store: st{type}{size}{endian}_{access_type}(ptr, val)
124 *
125 * type is:
126 * (empty): integer access
127 * f : float access
ths5fafdf22007-09-16 21:08:06 +0000128 *
bellard83d73962004-02-22 11:53:50 +0000129 * sign is:
130 * (empty): for floats or 32 bit size
131 * u : unsigned
132 * s : signed
133 *
134 * size is:
135 * b: 8 bits
136 * w: 16 bits
137 * l: 32 bits
138 * q: 64 bits
ths5fafdf22007-09-16 21:08:06 +0000139 *
bellard83d73962004-02-22 11:53:50 +0000140 * endian is:
141 * (empty): target cpu endianness or 8 bit access
142 * r : reversed target cpu endianness (not implemented yet)
143 * be : big endian (not implemented yet)
144 * le : little endian (not implemented yet)
145 *
146 * access_type is:
147 * raw : host memory access
148 * user : user mode access using soft MMU
149 * kernel : kernel mode access using soft MMU
150 */
bellard5a9fdfe2003-06-15 20:02:25 +0000151
Paolo Bonzinicbbab922011-07-28 12:10:30 +0200152/* target-endianness CPU memory access functions */
bellard2df3b952005-11-19 17:47:39 +0000153#if defined(TARGET_WORDS_BIGENDIAN)
154#define lduw_p(p) lduw_be_p(p)
155#define ldsw_p(p) ldsw_be_p(p)
156#define ldl_p(p) ldl_be_p(p)
157#define ldq_p(p) ldq_be_p(p)
158#define ldfl_p(p) ldfl_be_p(p)
159#define ldfq_p(p) ldfq_be_p(p)
160#define stw_p(p, v) stw_be_p(p, v)
161#define stl_p(p, v) stl_be_p(p, v)
162#define stq_p(p, v) stq_be_p(p, v)
163#define stfl_p(p, v) stfl_be_p(p, v)
164#define stfq_p(p, v) stfq_be_p(p, v)
165#else
166#define lduw_p(p) lduw_le_p(p)
167#define ldsw_p(p) ldsw_le_p(p)
168#define ldl_p(p) ldl_le_p(p)
169#define ldq_p(p) ldq_le_p(p)
170#define ldfl_p(p) ldfl_le_p(p)
171#define ldfq_p(p) ldfq_le_p(p)
172#define stw_p(p, v) stw_le_p(p, v)
173#define stl_p(p, v) stl_le_p(p, v)
174#define stq_p(p, v) stq_le_p(p, v)
175#define stfl_p(p, v) stfl_le_p(p, v)
176#define stfq_p(p, v) stfq_le_p(p, v)
bellard5a9fdfe2003-06-15 20:02:25 +0000177#endif
178
bellard61382a52003-10-27 21:22:23 +0000179/* MMU memory access macros */
180
pbrook53a59602006-03-25 19:31:22 +0000181#if defined(CONFIG_USER_ONLY)
aurel320e62fd72008-12-08 18:12:11 +0000182#include <assert.h>
183#include "qemu-types.h"
184
pbrook53a59602006-03-25 19:31:22 +0000185/* On some host systems the guest address space is reserved on the host.
186 * This allows the guest address space to be offset to a convenient location.
187 */
Paul Brook379f6692009-07-17 12:48:08 +0100188#if defined(CONFIG_USE_GUEST_BASE)
189extern unsigned long guest_base;
190extern int have_guest_base;
Paul Brook68a1c812010-05-29 02:27:35 +0100191extern unsigned long reserved_va;
Paul Brook379f6692009-07-17 12:48:08 +0100192#define GUEST_BASE guest_base
Aurelien Jarno18e9ea82010-07-30 21:09:10 +0200193#define RESERVED_VA reserved_va
Paul Brook379f6692009-07-17 12:48:08 +0100194#else
195#define GUEST_BASE 0ul
Aurelien Jarno18e9ea82010-07-30 21:09:10 +0200196#define RESERVED_VA 0ul
Paul Brook379f6692009-07-17 12:48:08 +0100197#endif
pbrook53a59602006-03-25 19:31:22 +0000198
199/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
Peter Maydell8d9dde92012-03-09 14:33:20 +0000200#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
Richard Hendersonb9f83122010-03-10 14:36:58 -0800201
202#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
203#define h2g_valid(x) 1
204#else
205#define h2g_valid(x) ({ \
206 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
Alexander Graf39879bb2012-02-02 03:14:18 +0100207 (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
208 (!RESERVED_VA || (__guest < RESERVED_VA)); \
Richard Hendersonb9f83122010-03-10 14:36:58 -0800209})
210#endif
211
aurel320e62fd72008-12-08 18:12:11 +0000212#define h2g(x) ({ \
213 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
214 /* Check if given address fits target address space */ \
Richard Hendersonb9f83122010-03-10 14:36:58 -0800215 assert(h2g_valid(x)); \
aurel320e62fd72008-12-08 18:12:11 +0000216 (abi_ulong)__ret; \
217})
pbrook53a59602006-03-25 19:31:22 +0000218
219#define saddr(x) g2h(x)
220#define laddr(x) g2h(x)
221
222#else /* !CONFIG_USER_ONLY */
bellardc27004e2005-01-03 23:35:10 +0000223/* NOTE: we use double casts if pointers and target_ulong have
224 different sizes */
Stefan Weil27b0dc12012-04-15 15:18:29 +0200225#define saddr(x) (uint8_t *)(intptr_t)(x)
226#define laddr(x) (uint8_t *)(intptr_t)(x)
pbrook53a59602006-03-25 19:31:22 +0000227#endif
228
229#define ldub_raw(p) ldub_p(laddr((p)))
230#define ldsb_raw(p) ldsb_p(laddr((p)))
231#define lduw_raw(p) lduw_p(laddr((p)))
232#define ldsw_raw(p) ldsw_p(laddr((p)))
233#define ldl_raw(p) ldl_p(laddr((p)))
234#define ldq_raw(p) ldq_p(laddr((p)))
235#define ldfl_raw(p) ldfl_p(laddr((p)))
236#define ldfq_raw(p) ldfq_p(laddr((p)))
237#define stb_raw(p, v) stb_p(saddr((p)), v)
238#define stw_raw(p, v) stw_p(saddr((p)), v)
239#define stl_raw(p, v) stl_p(saddr((p)), v)
240#define stq_raw(p, v) stq_p(saddr((p)), v)
241#define stfl_raw(p, v) stfl_p(saddr((p)), v)
242#define stfq_raw(p, v) stfq_p(saddr((p)), v)
bellardc27004e2005-01-03 23:35:10 +0000243
244
ths5fafdf22007-09-16 21:08:06 +0000245#if defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +0000246
247/* if user mode, no other memory access functions */
248#define ldub(p) ldub_raw(p)
249#define ldsb(p) ldsb_raw(p)
250#define lduw(p) lduw_raw(p)
251#define ldsw(p) ldsw_raw(p)
252#define ldl(p) ldl_raw(p)
253#define ldq(p) ldq_raw(p)
254#define ldfl(p) ldfl_raw(p)
255#define ldfq(p) ldfq_raw(p)
256#define stb(p, v) stb_raw(p, v)
257#define stw(p, v) stw_raw(p, v)
258#define stl(p, v) stl_raw(p, v)
259#define stq(p, v) stq_raw(p, v)
260#define stfl(p, v) stfl_raw(p, v)
261#define stfq(p, v) stfq_raw(p, v)
262
Blue Swirle141ab52011-09-18 14:55:46 +0000263#ifndef CONFIG_TCG_PASS_AREG0
bellard61382a52003-10-27 21:22:23 +0000264#define ldub_code(p) ldub_raw(p)
265#define ldsb_code(p) ldsb_raw(p)
266#define lduw_code(p) lduw_raw(p)
267#define ldsw_code(p) ldsw_raw(p)
268#define ldl_code(p) ldl_raw(p)
j_mayerbc98a7e2007-04-04 07:55:12 +0000269#define ldq_code(p) ldq_raw(p)
Blue Swirle141ab52011-09-18 14:55:46 +0000270#else
271#define cpu_ldub_code(env1, p) ldub_raw(p)
272#define cpu_ldsb_code(env1, p) ldsb_raw(p)
273#define cpu_lduw_code(env1, p) lduw_raw(p)
274#define cpu_ldsw_code(env1, p) ldsw_raw(p)
275#define cpu_ldl_code(env1, p) ldl_raw(p)
276#define cpu_ldq_code(env1, p) ldq_raw(p)
Blue Swirl92fc4b52012-04-29 20:35:48 +0000277
278#define cpu_ldub_data(env, addr) ldub_raw(addr)
279#define cpu_lduw_data(env, addr) lduw_raw(addr)
280#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
281#define cpu_ldl_data(env, addr) ldl_raw(addr)
282#define cpu_ldq_data(env, addr) ldq_raw(addr)
283
284#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
285#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
286#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
287#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
288
289#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
290#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
291#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
292#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
293#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
294
295#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
296#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
297#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
298#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
Blue Swirle141ab52011-09-18 14:55:46 +0000299#endif
bellard61382a52003-10-27 21:22:23 +0000300
301#define ldub_kernel(p) ldub_raw(p)
302#define ldsb_kernel(p) ldsb_raw(p)
303#define lduw_kernel(p) lduw_raw(p)
304#define ldsw_kernel(p) ldsw_raw(p)
305#define ldl_kernel(p) ldl_raw(p)
j_mayerbc98a7e2007-04-04 07:55:12 +0000306#define ldq_kernel(p) ldq_raw(p)
bellard0ac4bd52004-01-04 15:44:17 +0000307#define ldfl_kernel(p) ldfl_raw(p)
308#define ldfq_kernel(p) ldfq_raw(p)
bellard61382a52003-10-27 21:22:23 +0000309#define stb_kernel(p, v) stb_raw(p, v)
310#define stw_kernel(p, v) stw_raw(p, v)
311#define stl_kernel(p, v) stl_raw(p, v)
312#define stq_kernel(p, v) stq_raw(p, v)
bellard0ac4bd52004-01-04 15:44:17 +0000313#define stfl_kernel(p, v) stfl_raw(p, v)
314#define stfq_kernel(p, vt) stfq_raw(p, v)
bellard61382a52003-10-27 21:22:23 +0000315
Blue Swirl2f5a1892012-05-30 04:23:40 +0000316#ifdef CONFIG_TCG_PASS_AREG0
317#define cpu_ldub_data(env, addr) ldub_raw(addr)
318#define cpu_lduw_data(env, addr) lduw_raw(addr)
319#define cpu_ldl_data(env, addr) ldl_raw(addr)
320
321#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
322#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
323#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
324#endif
bellard61382a52003-10-27 21:22:23 +0000325#endif /* defined(CONFIG_USER_ONLY) */
326
bellard5a9fdfe2003-06-15 20:02:25 +0000327/* page related stuff */
328
aurel3203875442008-04-22 20:45:18 +0000329#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
bellard5a9fdfe2003-06-15 20:02:25 +0000330#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
331#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
332
Stefan Weilc6d50672012-03-16 20:23:49 +0100333/* ??? These should be the larger of uintptr_t and target_ulong. */
334extern uintptr_t qemu_real_host_page_size;
335extern uintptr_t qemu_host_page_size;
336extern uintptr_t qemu_host_page_mask;
bellard5a9fdfe2003-06-15 20:02:25 +0000337
bellard83fb7ad2004-07-05 21:25:26 +0000338#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
bellard5a9fdfe2003-06-15 20:02:25 +0000339
340/* same as PROT_xxx */
341#define PAGE_READ 0x0001
342#define PAGE_WRITE 0x0002
343#define PAGE_EXEC 0x0004
344#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
345#define PAGE_VALID 0x0008
346/* original state of the write flag (used when tracking self-modifying
347 code */
ths5fafdf22007-09-16 21:08:06 +0000348#define PAGE_WRITE_ORG 0x0010
Paul Brook2e9a5712010-05-05 16:32:59 +0100349#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
350/* FIXME: Code that sets/uses this is broken and needs to go away. */
balrog50a95692007-12-12 01:16:23 +0000351#define PAGE_RESERVED 0x0020
Paul Brook2e9a5712010-05-05 16:32:59 +0100352#endif
bellard5a9fdfe2003-06-15 20:02:25 +0000353
Paul Brookb480d9b2010-03-12 23:23:29 +0000354#if defined(CONFIG_USER_ONLY)
bellard5a9fdfe2003-06-15 20:02:25 +0000355void page_dump(FILE *f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356
Paul Brookb480d9b2010-03-12 23:23:29 +0000357typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
358 abi_ulong, unsigned long);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359int walk_memory_regions(void *, walk_memory_regions_fn);
360
pbrook53a59602006-03-25 19:31:22 +0000361int page_get_flags(target_ulong address);
362void page_set_flags(target_ulong start, target_ulong end, int flags);
ths3d97b402007-11-02 19:02:07 +0000363int page_check_range(target_ulong start, target_ulong len, int flags);
Paul Brookb480d9b2010-03-12 23:23:29 +0000364#endif
bellard5a9fdfe2003-06-15 20:02:25 +0000365
Andreas Färber9349b4f2012-03-14 01:38:32 +0100366CPUArchState *cpu_copy(CPUArchState *env);
367CPUArchState *qemu_get_cpu(int cpu);
thsc5be9f02007-02-28 20:20:53 +0000368
Jan Kiszkaf5c848e2011-01-21 21:48:08 +0100369#define CPU_DUMP_CODE 0x00010000
370
Andreas Färber9349b4f2012-03-14 01:38:32 +0100371void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
bellard7fe48482004-10-09 18:08:01 +0000372 int flags);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100373void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
Stefan Weil9a78eea2010-10-22 23:03:33 +0200374 int flags);
bellard7fe48482004-10-09 18:08:01 +0000375
Andreas Färber9349b4f2012-03-14 01:38:32 +0100376void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
Stefan Weil2c80e422010-10-13 20:54:27 +0200377 GCC_FMT_ATTR(2, 3);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378extern CPUArchState *first_cpu;
379DECLARE_TLS(CPUArchState *,cpu_single_env);
Jan Kiszka4a2dd922011-12-05 15:18:54 +0100380#define cpu_single_env tls_var(cpu_single_env)
Paolo Bonzinidb1a4972010-03-10 11:38:55 +0100381
Richard Henderson9c762192011-05-04 13:34:24 -0700382/* Flags for use in ENV->INTERRUPT_PENDING.
383
384 The numbers assigned here are non-sequential in order to preserve
385 binary compatibility with the vmstate dump. Bit 0 (0x0001) was
386 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
387 the vmstate dump. */
388
389/* External hardware interrupt pending. This is typically used for
390 interrupts from devices. */
391#define CPU_INTERRUPT_HARD 0x0002
392
393/* Exit the current TB. This is typically used when some system-level device
394 makes some change to the memory mapping. E.g. the a20 line change. */
395#define CPU_INTERRUPT_EXITTB 0x0004
396
397/* Halt the CPU. */
398#define CPU_INTERRUPT_HALT 0x0020
399
400/* Debug event pending. */
401#define CPU_INTERRUPT_DEBUG 0x0080
402
403/* Several target-specific external hardware interrupts. Each target/cpu.h
404 should define proper names based on these defines. */
405#define CPU_INTERRUPT_TGT_EXT_0 0x0008
406#define CPU_INTERRUPT_TGT_EXT_1 0x0010
407#define CPU_INTERRUPT_TGT_EXT_2 0x0040
408#define CPU_INTERRUPT_TGT_EXT_3 0x0200
409#define CPU_INTERRUPT_TGT_EXT_4 0x1000
410
411/* Several target-specific internal interrupts. These differ from the
Dong Xu Wang07f35072011-11-22 18:06:26 +0800412 preceding target-specific interrupts in that they are intended to
Richard Henderson9c762192011-05-04 13:34:24 -0700413 originate from within the cpu itself, typically in response to some
414 instruction being executed. These, therefore, are not masked while
415 single-stepping within the debugger. */
416#define CPU_INTERRUPT_TGT_INT_0 0x0100
417#define CPU_INTERRUPT_TGT_INT_1 0x0400
418#define CPU_INTERRUPT_TGT_INT_2 0x0800
Jan Kiszkad362e752012-02-17 18:31:17 +0100419#define CPU_INTERRUPT_TGT_INT_3 0x2000
Richard Henderson9c762192011-05-04 13:34:24 -0700420
Jan Kiszkad362e752012-02-17 18:31:17 +0100421/* First unused bit: 0x4000. */
Richard Henderson9c762192011-05-04 13:34:24 -0700422
Richard Henderson3125f762011-05-04 13:34:25 -0700423/* The set of all bits that should be masked when single-stepping. */
424#define CPU_INTERRUPT_SSTEP_MASK \
425 (CPU_INTERRUPT_HARD \
426 | CPU_INTERRUPT_TGT_EXT_0 \
427 | CPU_INTERRUPT_TGT_EXT_1 \
428 | CPU_INTERRUPT_TGT_EXT_2 \
429 | CPU_INTERRUPT_TGT_EXT_3 \
430 | CPU_INTERRUPT_TGT_EXT_4)
bellard98699962005-11-26 10:29:22 +0000431
Jan Kiszkaec6959d2011-04-13 01:32:56 +0200432#ifndef CONFIG_USER_ONLY
Andreas Färber9349b4f2012-03-14 01:38:32 +0100433typedef void (*CPUInterruptHandler)(CPUArchState *, int);
Jan Kiszkaec6959d2011-04-13 01:32:56 +0200434
435extern CPUInterruptHandler cpu_interrupt_handler;
436
Andreas Färber9349b4f2012-03-14 01:38:32 +0100437static inline void cpu_interrupt(CPUArchState *s, int mask)
Jan Kiszkaec6959d2011-04-13 01:32:56 +0200438{
439 cpu_interrupt_handler(s, mask);
440}
441#else /* USER_ONLY */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100442void cpu_interrupt(CPUArchState *env, int mask);
Jan Kiszkaec6959d2011-04-13 01:32:56 +0200443#endif /* USER_ONLY */
444
Andreas Färber9349b4f2012-03-14 01:38:32 +0100445void cpu_reset_interrupt(CPUArchState *env, int mask);
bellard68a79312003-06-30 13:12:32 +0000446
Andreas Färber9349b4f2012-03-14 01:38:32 +0100447void cpu_exit(CPUArchState *s);
aurel323098dba2009-03-07 21:28:24 +0000448
Andreas Färber9349b4f2012-03-14 01:38:32 +0100449bool qemu_cpu_has_work(CPUArchState *env);
aliguori6a4955a2009-04-24 18:03:20 +0000450
aliguoria1d1bb32008-11-18 20:07:32 +0000451/* Breakpoint/watchpoint flags */
452#define BP_MEM_READ 0x01
453#define BP_MEM_WRITE 0x02
454#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
aliguori06d55cc2008-11-18 20:24:06 +0000455#define BP_STOP_BEFORE_ACCESS 0x04
aliguori6e140f22008-11-18 20:37:55 +0000456#define BP_WATCHPOINT_HIT 0x08
aliguoria1d1bb32008-11-18 20:07:32 +0000457#define BP_GDB 0x10
aliguori2dc9f412008-11-18 20:56:59 +0000458#define BP_CPU 0x20
aliguoria1d1bb32008-11-18 20:07:32 +0000459
Andreas Färber9349b4f2012-03-14 01:38:32 +0100460int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000461 CPUBreakpoint **breakpoint);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100462int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
464void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
465int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000466 int flags, CPUWatchpoint **watchpoint);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100467int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
aliguoria1d1bb32008-11-18 20:07:32 +0000468 target_ulong len, int flags);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100469void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
470void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
edgar_igl60897d32008-05-09 08:25:14 +0000471
472#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
473#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
474#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
475
Andreas Färber9349b4f2012-03-14 01:38:32 +0100476void cpu_single_step(CPUArchState *env, int enabled);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100477int cpu_is_stopped(CPUArchState *env);
478void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data);
bellard4c3a88a2003-07-26 12:06:08 +0000479
Paul Brookb3755a92010-03-12 16:54:58 +0000480#if !defined(CONFIG_USER_ONLY)
481
Paul Brook4fcc5622010-03-01 03:46:18 +0000482/* Return the physical page corresponding to a virtual one. Use it
483 only for debugging because no protection checks are done. Return -1
484 if no page found. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100485target_phys_addr_t cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
Paul Brook4fcc5622010-03-01 03:46:18 +0000486
bellard33417e72003-08-10 21:47:01 +0000487/* memory API */
488
bellardedf75d52004-01-04 17:43:30 +0000489extern int phys_ram_fd;
Anthony Liguoric227f092009-10-01 16:12:16 -0500490extern ram_addr_t ram_size;
Alex Williamsonf471a172010-06-11 11:11:42 -0600491
Huang Yingcd19cfa2011-03-02 08:56:19 +0100492/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
493#define RAM_PREALLOC_MASK (1 << 0)
494
Alex Williamsonf471a172010-06-11 11:11:42 -0600495typedef struct RAMBlock {
Avi Kivity7c637362011-12-21 13:09:49 +0200496 struct MemoryRegion *mr;
Alex Williamsonf471a172010-06-11 11:11:42 -0600497 uint8_t *host;
498 ram_addr_t offset;
499 ram_addr_t length;
Huang Yingcd19cfa2011-03-02 08:56:19 +0100500 uint32_t flags;
Alex Williamsoncc9e98c2010-06-25 11:09:43 -0600501 char idstr[256];
Alex Williamsonf471a172010-06-11 11:11:42 -0600502 QLIST_ENTRY(RAMBlock) next;
Alex Williamson04b16652010-07-02 11:13:17 -0600503#if defined(__linux__) && !defined(TARGET_S390X)
504 int fd;
505#endif
Alex Williamsonf471a172010-06-11 11:11:42 -0600506} RAMBlock;
507
508typedef struct RAMList {
509 uint8_t *phys_dirty;
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200510 QLIST_HEAD(, RAMBlock) blocks;
Juan Quintela45f33f02012-06-22 15:21:07 +0200511 uint64_t dirty_pages;
Alex Williamsonf471a172010-06-11 11:11:42 -0600512} RAMList;
513extern RAMList ram_list;
bellardedf75d52004-01-04 17:43:30 +0000514
Marcelo Tosattic9027602010-03-01 20:25:08 -0300515extern const char *mem_path;
516extern int mem_prealloc;
517
pbrook0f459d12008-06-09 00:20:13 +0000518/* Flags stored in the low bits of the TLB virtual address. These are
519 defined so that fast path ram access is all zeros. */
520/* Zero if TLB entry is valid. */
521#define TLB_INVALID_MASK (1 << 3)
522/* Set if TLB entry references a clean RAM page. The iotlb entry will
523 contain the page physical address. */
524#define TLB_NOTDIRTY (1 << 4)
525/* Set if TLB entry is an IO callback. */
526#define TLB_MMIO (1 << 5)
527
Stefan Weil055403b2010-10-22 23:03:32 +0200528void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
Paul Brookb3755a92010-03-12 16:54:58 +0000529#endif /* !CONFIG_USER_ONLY */
530
Andreas Färber9349b4f2012-03-14 01:38:32 +0100531int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brookb3755a92010-03-12 16:54:58 +0000532 uint8_t *buf, int len, int is_write);
533
bellard5a9fdfe2003-06-15 20:02:25 +0000534#endif /* CPU_ALL_H */