blob: ea90b649d4aa2bac6c198c17289a486afc353fa6 [file] [log] [blame]
bellardd4e81642003-05-25 16:46:15 +00001/*
2 * internal execution defines for qemu
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardd4e81642003-05-25 16:46:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardd4e81642003-05-25 16:46:15 +000018 */
19
aliguori875cdcf2008-10-23 13:52:00 +000020#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
blueswir17d99a002009-01-14 19:00:36 +000022
23#include "qemu-common.h"
24
bellardb346ff42003-06-15 20:05:50 +000025/* allow to see translation results - the slowdown should be negligible, so we leave it */
aurel32de9a95f2008-11-11 13:41:01 +000026#define DEBUG_DISAS
bellardb346ff42003-06-15 20:05:50 +000027
Paul Brook41c1b1c2010-03-12 16:54:58 +000028/* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31#if defined(CONFIG_USER_ONLY)
Paul Brookb480d9b2010-03-12 23:23:29 +000032typedef abi_ulong tb_page_addr_t;
Paul Brook41c1b1c2010-03-12 16:54:58 +000033#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
bellardb346ff42003-06-15 20:05:50 +000037/* is_jmp field values */
38#define DISAS_NEXT 0 /* next instruction can be analyzed */
39#define DISAS_JUMP 1 /* only pc was modified dynamically */
40#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
Blue Swirlf081c762011-05-21 07:10:23 +000043struct TranslationBlock;
pbrook2e70f6e2008-06-29 01:03:05 +000044typedef struct TranslationBlock TranslationBlock;
bellardb346ff42003-06-15 20:05:50 +000045
46/* XXX: make safe guess about sizes */
Peter Maydell5b620fb2011-06-22 15:16:32 +010047#define MAX_OP_PER_INSTR 208
Stuart Brady4d0e4ac2010-04-27 22:23:35 +010048
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
Stefan Weil3cebc3f2012-09-12 19:18:55 +020054#define MAX_OPC_PARAM_IARGS 5
Stuart Brady4d0e4ac2010-04-27 22:23:35 +010055#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
Aurelien Jarno6db73502009-09-22 23:31:04 +020062#define OPC_BUF_SIZE 640
bellardb346ff42003-06-15 20:05:50 +000063#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
pbrooka208e542008-03-31 17:07:36 +000065/* Maximum size a TCG op can expand to. This is complicated because a
Aurelien Jarno0cbfcd22009-10-22 02:36:27 +020066 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
pbrooka208e542008-03-31 17:07:36 +000068 a couple of fixup instructions per argument. */
Aurelien Jarno0cbfcd22009-10-22 02:36:27 +020069#define TCG_MAX_OP_SIZE 192
pbrooka208e542008-03-31 17:07:36 +000070
pbrook0115be32008-02-03 17:35:41 +000071#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
bellardb346ff42003-06-15 20:05:50 +000072
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010073#include "qemu/log.h"
bellardb346ff42003-06-15 20:05:50 +000074
Andreas Färber9349b4f2012-03-14 01:38:32 +010075void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
Stefan Weile87b7cb2011-04-18 06:39:52 +000078 int pc_pos);
aurel32d2856f12008-04-28 00:32:32 +000079
bellard57fec1f2008-02-01 10:50:11 +000080void cpu_gen_init(void);
Andreas Färber9349b4f2012-03-14 01:38:32 +010081int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
blueswir1d07bde82007-12-11 19:35:45 +000082 int *gen_code_size_ptr);
Blue Swirla8a826a2012-12-04 20:16:07 +000083bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
84
Stefan Weil38c30fb2012-04-07 17:58:33 +020085void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
Blue Swirl20503962012-04-09 14:20:20 +000086void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
Andreas Färber9349b4f2012-03-14 01:38:32 +010087TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +000088 target_ulong pc, target_ulong cs_base, int flags,
89 int cflags);
Andreas Färber9349b4f2012-03-14 01:38:32 +010090void cpu_exec_init(CPUArchState *env);
91void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
Stefan Weil6375e092012-04-06 22:26:15 +020092int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
Paul Brook41c1b1c2010-03-12 16:54:58 +000093void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellard2e126692004-04-25 21:28:44 +000094 int is_cpu_write_access);
Alexander Graf77a8f1a2012-05-10 22:40:10 +000095void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
96 int is_cpu_write_access);
Blue Swirl0cac1b62012-04-09 16:50:52 +000097#if !defined(CONFIG_USER_ONLY)
98/* cputlb.c */
Andreas Färber9349b4f2012-03-14 01:38:32 +010099void tlb_flush_page(CPUArchState *env, target_ulong addr);
100void tlb_flush(CPUArchState *env, int flush_global);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100101void tlb_set_page(CPUArchState *env, target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200102 hwaddr paddr, int prot,
Paul Brookd4c430a2010-03-17 02:14:28 +0000103 int mmu_idx, target_ulong size);
Avi Kivitya8170e52012-10-23 12:30:10 +0200104void tb_invalidate_phys_addr(hwaddr addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000105#else
106static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
107{
108}
109
110static inline void tlb_flush(CPUArchState *env, int flush_global)
111{
112}
Paul Brookc527ee82010-03-01 03:31:14 +0000113#endif
bellardd4e81642003-05-25 16:46:15 +0000114
bellardd4e81642003-05-25 16:46:15 +0000115#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
116
bellard4390df52004-01-04 18:03:10 +0000117#define CODE_GEN_PHYS_HASH_BITS 15
118#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
119
bellard4390df52004-01-04 18:03:10 +0000120/* estimated block size for TB allocation */
121/* XXX: use a per code average code fragment size and modulate it
122 according to the host CPU */
123#if defined(CONFIG_SOFTMMU)
124#define CODE_GEN_AVG_BLOCK_SIZE 128
125#else
126#define CODE_GEN_AVG_BLOCK_SIZE 64
127#endif
128
Richard Henderson5bbd2ca2012-09-21 10:48:51 -0700129#if defined(__arm__) || defined(_ARCH_PPC) \
130 || defined(__x86_64__) || defined(__i386__) \
Claudio Fontana4a136e02013-06-12 16:20:22 +0100131 || defined(__sparc__) || defined(__aarch64__) \
Richard Henderson5bbd2ca2012-09-21 10:48:51 -0700132 || defined(CONFIG_TCG_INTERPRETER)
Stefan Weil73163292011-10-05 20:03:02 +0200133#define USE_DIRECT_JUMP
bellardd4e81642003-05-25 16:46:15 +0000134#endif
135
pbrook2e70f6e2008-06-29 01:03:05 +0000136struct TranslationBlock {
bellard2e126692004-04-25 21:28:44 +0000137 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
138 target_ulong cs_base; /* CS base for this block */
j_mayerc0686882007-09-20 22:47:42 +0000139 uint64_t flags; /* flags defining in which context the code was generated */
bellardd4e81642003-05-25 16:46:15 +0000140 uint16_t size; /* size of target code for this block (1 <=
141 size <= TARGET_PAGE_SIZE) */
bellard58fe2f12004-02-16 22:11:32 +0000142 uint16_t cflags; /* compile flags */
pbrook2e70f6e2008-06-29 01:03:05 +0000143#define CF_COUNT_MASK 0x7fff
144#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
bellard58fe2f12004-02-16 22:11:32 +0000145
bellardd4e81642003-05-25 16:46:15 +0000146 uint8_t *tc_ptr; /* pointer to the translated code */
bellard4390df52004-01-04 18:03:10 +0000147 /* next matching tb for physical address. */
ths5fafdf22007-09-16 21:08:06 +0000148 struct TranslationBlock *phys_hash_next;
bellard4390df52004-01-04 18:03:10 +0000149 /* first and second physical page containing code. The lower bit
150 of the pointer tells the index in page_next[] */
ths5fafdf22007-09-16 21:08:06 +0000151 struct TranslationBlock *page_next[2];
Paul Brook41c1b1c2010-03-12 16:54:58 +0000152 tb_page_addr_t page_addr[2];
bellard4390df52004-01-04 18:03:10 +0000153
bellardd4e81642003-05-25 16:46:15 +0000154 /* the following data are used to directly call another TB from
155 the code of this one. */
156 uint16_t tb_next_offset[2]; /* offset of original jump target */
157#ifdef USE_DIRECT_JUMP
Filip Navaraefc0a512010-03-26 16:06:28 +0000158 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
bellardd4e81642003-05-25 16:46:15 +0000159#else
Stefan Weil6375e092012-04-06 22:26:15 +0200160 uintptr_t tb_next[2]; /* address of jump generated code */
bellardd4e81642003-05-25 16:46:15 +0000161#endif
162 /* list of TBs jumping to this one. This is a circular list using
163 the two least significant bits of the pointers to tell what is
164 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
165 jmp_first */
ths5fafdf22007-09-16 21:08:06 +0000166 struct TranslationBlock *jmp_next[2];
bellardd4e81642003-05-25 16:46:15 +0000167 struct TranslationBlock *jmp_first;
pbrook2e70f6e2008-06-29 01:03:05 +0000168 uint32_t icount;
169};
bellardd4e81642003-05-25 16:46:15 +0000170
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700171#include "exec/spinlock.h"
172
173typedef struct TBContext TBContext;
174
175struct TBContext {
176
177 TranslationBlock *tbs;
178 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
179 int nb_tbs;
180 /* any access to the tbs or the page table must use this lock */
181 spinlock_t tb_lock;
182
183 /* statistics */
184 int tb_flush_count;
185 int tb_phys_invalidate_count;
186
187 int tb_invalidated_flag;
188};
189
pbrookb362e5e2006-11-12 20:40:55 +0000190static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
191{
192 target_ulong tmp;
193 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
edgar_iglb5e19d42008-05-06 08:38:22 +0000194 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
pbrookb362e5e2006-11-12 20:40:55 +0000195}
196
bellard8a40a182005-11-20 10:35:40 +0000197static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
bellardd4e81642003-05-25 16:46:15 +0000198{
pbrookb362e5e2006-11-12 20:40:55 +0000199 target_ulong tmp;
200 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
edgar_iglb5e19d42008-05-06 08:38:22 +0000201 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
202 | (tmp & TB_JMP_ADDR_MASK));
bellardd4e81642003-05-25 16:46:15 +0000203}
204
Paul Brook41c1b1c2010-03-12 16:54:58 +0000205static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
bellard4390df52004-01-04 18:03:10 +0000206{
Aurelien Jarnof96a3832010-12-28 17:46:59 +0100207 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
bellard4390df52004-01-04 18:03:10 +0000208}
209
pbrook2e70f6e2008-06-29 01:03:05 +0000210void tb_free(TranslationBlock *tb);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100211void tb_flush(CPUArchState *env);
Paul Brook41c1b1c2010-03-12 16:54:58 +0000212void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
bellardd4e81642003-05-25 16:46:15 +0000213
bellard4390df52004-01-04 18:03:10 +0000214#if defined(USE_DIRECT_JUMP)
215
Stefan Weil73163292011-10-05 20:03:02 +0200216#if defined(CONFIG_TCG_INTERPRETER)
217static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
218{
219 /* patch the branch destination */
220 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
221 /* no need to flush icache explicitly */
222}
223#elif defined(_ARCH_PPC)
Blue Swirl64b85a82011-01-23 16:21:20 +0000224void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
malc810260a2008-07-23 19:17:46 +0000225#define tb_set_jmp_target1 ppc_tb_set_jmp_target
bellard57fec1f2008-02-01 10:50:11 +0000226#elif defined(__i386__) || defined(__x86_64__)
Stefan Weil6375e092012-04-06 22:26:15 +0200227static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
bellard4390df52004-01-04 18:03:10 +0000228{
229 /* patch the branch destination */
230 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
ths1235fc02008-06-03 19:51:57 +0000231 /* no need to flush icache explicitly */
bellard4390df52004-01-04 18:03:10 +0000232}
Claudio Fontana4a136e02013-06-12 16:20:22 +0100233#elif defined(__aarch64__)
234void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
235#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
balrog811d4cf2008-05-19 23:59:38 +0000236#elif defined(__arm__)
Stefan Weil6375e092012-04-06 22:26:15 +0200237static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
balrog811d4cf2008-05-19 23:59:38 +0000238{
Aurelien Jarno4a1e19a2010-12-21 19:32:49 +0100239#if !QEMU_GNUC_PREREQ(4, 1)
balrog811d4cf2008-05-19 23:59:38 +0000240 register unsigned long _beg __asm ("a1");
241 register unsigned long _end __asm ("a2");
242 register unsigned long _flg __asm ("a3");
balrog3233f0d2008-12-01 02:02:37 +0000243#endif
balrog811d4cf2008-05-19 23:59:38 +0000244
245 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
Laurent Desnogues87b78ad2009-09-21 14:27:59 +0200246 *(uint32_t *)jmp_addr =
247 (*(uint32_t *)jmp_addr & ~0xffffff)
248 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
balrog811d4cf2008-05-19 23:59:38 +0000249
balrog3233f0d2008-12-01 02:02:37 +0000250#if QEMU_GNUC_PREREQ(4, 1)
Aurelien Jarno4a1e19a2010-12-21 19:32:49 +0100251 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
balrog3233f0d2008-12-01 02:02:37 +0000252#else
balrog811d4cf2008-05-19 23:59:38 +0000253 /* flush icache */
254 _beg = jmp_addr;
255 _end = jmp_addr + 4;
256 _flg = 0;
257 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
balrog3233f0d2008-12-01 02:02:37 +0000258#endif
balrog811d4cf2008-05-19 23:59:38 +0000259}
Richard Henderson5bbd2ca2012-09-21 10:48:51 -0700260#elif defined(__sparc__)
261void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
Stefan Weil73163292011-10-05 20:03:02 +0200262#else
263#error tb_set_jmp_target1 is missing
bellard4390df52004-01-04 18:03:10 +0000264#endif
bellardd4e81642003-05-25 16:46:15 +0000265
ths5fafdf22007-09-16 21:08:06 +0000266static inline void tb_set_jmp_target(TranslationBlock *tb,
Stefan Weil6375e092012-04-06 22:26:15 +0200267 int n, uintptr_t addr)
bellard4cbb86e2003-09-17 22:53:29 +0000268{
Stefan Weil6375e092012-04-06 22:26:15 +0200269 uint16_t offset = tb->tb_jmp_offset[n];
270 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
bellard4cbb86e2003-09-17 22:53:29 +0000271}
272
bellardd4e81642003-05-25 16:46:15 +0000273#else
274
275/* set the jump target */
ths5fafdf22007-09-16 21:08:06 +0000276static inline void tb_set_jmp_target(TranslationBlock *tb,
Stefan Weil6375e092012-04-06 22:26:15 +0200277 int n, uintptr_t addr)
bellardd4e81642003-05-25 16:46:15 +0000278{
bellard95f76522003-06-05 00:54:44 +0000279 tb->tb_next[n] = addr;
bellardd4e81642003-05-25 16:46:15 +0000280}
281
282#endif
283
ths5fafdf22007-09-16 21:08:06 +0000284static inline void tb_add_jump(TranslationBlock *tb, int n,
bellardd4e81642003-05-25 16:46:15 +0000285 TranslationBlock *tb_next)
286{
bellardcf256292003-05-25 19:20:31 +0000287 /* NOTE: this test is only needed for thread safety */
288 if (!tb->jmp_next[n]) {
289 /* patch the native jump address */
Stefan Weil6375e092012-04-06 22:26:15 +0200290 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
ths3b46e622007-09-17 08:09:54 +0000291
bellardcf256292003-05-25 19:20:31 +0000292 /* add in TB jmp circular list */
293 tb->jmp_next[n] = tb_next->jmp_first;
Stefan Weil6375e092012-04-06 22:26:15 +0200294 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
bellardcf256292003-05-25 19:20:31 +0000295 }
bellardd4e81642003-05-25 16:46:15 +0000296}
297
Richard Henderson0f842f82013-08-27 10:22:54 -0700298/* GETRA is the true target of the return instruction that we'll execute,
299 defined here for simplicity of defining the follow-up macros. */
Stefan Weil73163292011-10-05 20:03:02 +0200300#if defined(CONFIG_TCG_INTERPRETER)
Stefan Weilc3ca0462012-04-17 19:22:39 +0200301extern uintptr_t tci_tb_ptr;
Richard Henderson0f842f82013-08-27 10:22:54 -0700302# define GETRA() tci_tb_ptr
Blue Swirl39171492011-09-21 18:13:16 +0000303#else
Richard Henderson0f842f82013-08-27 10:22:54 -0700304# define GETRA() \
305 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
Blue Swirl39171492011-09-21 18:13:16 +0000306#endif
307
Richard Henderson0f842f82013-08-27 10:22:54 -0700308/* The true return address will often point to a host insn that is part of
309 the next translated guest insn. Adjust the address backward to point to
310 the middle of the call insn. Subtracting one would do the job except for
311 several compressed mode architectures (arm, mips) which set the low bit
312 to indicate the compressed mode; subtracting two works around that. It
313 is also the case that there are no host isas that contain a call insn
314 smaller than 4 bytes, so we don't worry about special-casing this. */
315#if defined(CONFIG_TCG_INTERPRETER)
316# define GETPC_ADJ 0
317#else
318# define GETPC_ADJ 2
319#endif
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +0900320
Richard Henderson0f842f82013-08-27 10:22:54 -0700321#define GETPC() (GETRA() - GETPC_ADJ)
322
bellarde95c8d52004-09-30 22:22:08 +0000323#if !defined(CONFIG_USER_ONLY)
bellard6e59c1d2003-10-27 21:24:54 +0000324
Stefan Weil575ddeb2013-09-29 20:56:45 +0200325void phys_mem_set_alloc(void *(*alloc)(size_t));
Markus Armbruster91138032013-07-31 15:11:08 +0200326
Avi Kivitya8170e52012-10-23 12:30:10 +0200327struct MemoryRegion *iotlb_to_region(hwaddr index);
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200328bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
329 uint64_t *pvalue, unsigned size);
330bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
Avi Kivity37ec01d2012-03-08 18:08:35 +0200331 uint64_t value, unsigned size);
Paul Brookb3755a92010-03-12 16:54:58 +0000332
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
Blue Swirl20503962012-04-09 14:20:20 +0000334 uintptr_t retaddr);
bellard6e59c1d2003-10-27 21:24:54 +0000335
Richard Hendersone58eb532013-08-27 13:13:44 -0700336uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
337uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
338uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
339uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
blueswir179383c92008-08-30 09:51:20 +0000340
j_mayer6ebbf392007-10-14 07:07:08 +0000341#define ACCESS_TYPE (NB_MMU_MODES + 1)
bellard6e59c1d2003-10-27 21:24:54 +0000342#define MEMSUFFIX _code
bellard6e59c1d2003-10-27 21:24:54 +0000343
344#define DATA_SIZE 1
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100345#include "exec/softmmu_header.h"
bellard6e59c1d2003-10-27 21:24:54 +0000346
347#define DATA_SIZE 2
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100348#include "exec/softmmu_header.h"
bellard6e59c1d2003-10-27 21:24:54 +0000349
350#define DATA_SIZE 4
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100351#include "exec/softmmu_header.h"
bellard6e59c1d2003-10-27 21:24:54 +0000352
bellardc27004e2005-01-03 23:35:10 +0000353#define DATA_SIZE 8
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100354#include "exec/softmmu_header.h"
bellardc27004e2005-01-03 23:35:10 +0000355
bellard6e59c1d2003-10-27 21:24:54 +0000356#undef ACCESS_TYPE
357#undef MEMSUFFIX
bellard6e59c1d2003-10-27 21:24:54 +0000358
359#endif
bellard4390df52004-01-04 18:03:10 +0000360
361#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100362static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
bellard4390df52004-01-04 18:03:10 +0000363{
364 return addr;
365}
366#else
Blue Swirl0cac1b62012-04-09 16:50:52 +0000367/* cputlb.c */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100368tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
bellard4390df52004-01-04 18:03:10 +0000369#endif
bellard9df217a2005-02-10 22:05:51 +0000370
Andreas Färber9349b4f2012-03-14 01:38:32 +0100371typedef void (CPUDebugExcpHandler)(CPUArchState *env);
aliguoridde23672008-11-18 20:50:36 +0000372
Igor Mammedov84e3b602012-06-21 18:29:38 +0200373void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
aurel321b530a62009-04-05 20:08:59 +0000374
375/* vl.c */
376extern int singlestep;
377
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300378/* cpu-exec.c */
379extern volatile sig_atomic_t exit_request;
380
Paolo Bonzini946fb272011-09-12 13:57:37 +0200381/* Deterministic execution requires that IO only be performed on the last
382 instruction of a TB so that interrupts take effect immediately. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100383static inline int can_do_io(CPUArchState *env)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200384{
Andreas Färberd77953b2013-01-16 19:29:31 +0100385 CPUState *cpu = ENV_GET_CPU(env);
386
Paolo Bonzini946fb272011-09-12 13:57:37 +0200387 if (!use_icount) {
388 return 1;
389 }
390 /* If not executing code then assume we are ok. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100391 if (cpu->current_tb == NULL) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200392 return 1;
393 }
394 return env->can_do_io != 0;
395}
396
aliguori875cdcf2008-10-23 13:52:00 +0000397#endif