bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 1 | /* |
| 2 | * internal execution defines for qemu |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 18 | */ |
| 19 | |
aliguori | 875cdcf | 2008-10-23 13:52:00 +0000 | [diff] [blame] | 20 | #ifndef _EXEC_ALL_H_ |
| 21 | #define _EXEC_ALL_H_ |
blueswir1 | 7d99a00 | 2009-01-14 19:00:36 +0000 | [diff] [blame] | 22 | |
| 23 | #include "qemu-common.h" |
| 24 | |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 25 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
aurel32 | de9a95f | 2008-11-11 13:41:01 +0000 | [diff] [blame] | 26 | #define DEBUG_DISAS |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 27 | |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 28 | /* Page tracking code uses ram addresses in system mode, and virtual |
| 29 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate |
| 30 | type. */ |
| 31 | #if defined(CONFIG_USER_ONLY) |
Paul Brook | b480d9b | 2010-03-12 23:23:29 +0000 | [diff] [blame] | 32 | typedef abi_ulong tb_page_addr_t; |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 33 | #else |
| 34 | typedef ram_addr_t tb_page_addr_t; |
| 35 | #endif |
| 36 | |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 37 | /* is_jmp field values */ |
| 38 | #define DISAS_NEXT 0 /* next instruction can be analyzed */ |
| 39 | #define DISAS_JUMP 1 /* only pc was modified dynamically */ |
| 40 | #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
| 41 | #define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
| 42 | |
Blue Swirl | f081c76 | 2011-05-21 07:10:23 +0000 | [diff] [blame] | 43 | struct TranslationBlock; |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 44 | typedef struct TranslationBlock TranslationBlock; |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 45 | |
| 46 | /* XXX: make safe guess about sizes */ |
Peter Maydell | 5b620fb | 2011-06-22 15:16:32 +0100 | [diff] [blame] | 47 | #define MAX_OP_PER_INSTR 208 |
Stuart Brady | 4d0e4ac | 2010-04-27 22:23:35 +0100 | [diff] [blame] | 48 | |
| 49 | #if HOST_LONG_BITS == 32 |
| 50 | #define MAX_OPC_PARAM_PER_ARG 2 |
| 51 | #else |
| 52 | #define MAX_OPC_PARAM_PER_ARG 1 |
| 53 | #endif |
Stefan Weil | 3cebc3f | 2012-09-12 19:18:55 +0200 | [diff] [blame] | 54 | #define MAX_OPC_PARAM_IARGS 5 |
Stuart Brady | 4d0e4ac | 2010-04-27 22:23:35 +0100 | [diff] [blame] | 55 | #define MAX_OPC_PARAM_OARGS 1 |
| 56 | #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) |
| 57 | |
| 58 | /* A Call op needs up to 4 + 2N parameters on 32-bit archs, |
| 59 | * and up to 4 + N parameters on 64-bit archs |
| 60 | * (N = number of input arguments + output arguments). */ |
| 61 | #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) |
Aurelien Jarno | 6db7350 | 2009-09-22 23:31:04 +0200 | [diff] [blame] | 62 | #define OPC_BUF_SIZE 640 |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 63 | #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) |
| 64 | |
pbrook | a208e54 | 2008-03-31 17:07:36 +0000 | [diff] [blame] | 65 | /* Maximum size a TCG op can expand to. This is complicated because a |
Aurelien Jarno | 0cbfcd2 | 2009-10-22 02:36:27 +0200 | [diff] [blame] | 66 | single op may require several host instructions and register reloads. |
| 67 | For now take a wild guess at 192 bytes, which should allow at least |
pbrook | a208e54 | 2008-03-31 17:07:36 +0000 | [diff] [blame] | 68 | a couple of fixup instructions per argument. */ |
Aurelien Jarno | 0cbfcd2 | 2009-10-22 02:36:27 +0200 | [diff] [blame] | 69 | #define TCG_MAX_OP_SIZE 192 |
pbrook | a208e54 | 2008-03-31 17:07:36 +0000 | [diff] [blame] | 70 | |
pbrook | 0115be3 | 2008-02-03 17:35:41 +0000 | [diff] [blame] | 71 | #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 72 | |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 73 | #include "qemu/log.h" |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 74 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 75 | void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); |
| 76 | void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); |
| 77 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
Stefan Weil | e87b7cb | 2011-04-18 06:39:52 +0000 | [diff] [blame] | 78 | int pc_pos); |
aurel32 | d2856f1 | 2008-04-28 00:32:32 +0000 | [diff] [blame] | 79 | |
bellard | 57fec1f | 2008-02-01 10:50:11 +0000 | [diff] [blame] | 80 | void cpu_gen_init(void); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 81 | int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, |
blueswir1 | d07bde8 | 2007-12-11 19:35:45 +0000 | [diff] [blame] | 82 | int *gen_code_size_ptr); |
Blue Swirl | a8a826a | 2012-12-04 20:16:07 +0000 | [diff] [blame] | 83 | bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc); |
| 84 | |
Stefan Weil | 38c30fb | 2012-04-07 17:58:33 +0200 | [diff] [blame] | 85 | void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc); |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 86 | void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 87 | TranslationBlock *tb_gen_code(CPUArchState *env, |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 88 | target_ulong pc, target_ulong cs_base, int flags, |
| 89 | int cflags); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 90 | void cpu_exec_init(CPUArchState *env); |
| 91 | void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1); |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 92 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc); |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 93 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, |
bellard | 2e12669 | 2004-04-25 21:28:44 +0000 | [diff] [blame] | 94 | int is_cpu_write_access); |
Alexander Graf | 77a8f1a | 2012-05-10 22:40:10 +0000 | [diff] [blame] | 95 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, |
| 96 | int is_cpu_write_access); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 97 | #if !defined(CONFIG_USER_ONLY) |
| 98 | /* cputlb.c */ |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 99 | void tlb_flush_page(CPUArchState *env, target_ulong addr); |
| 100 | void tlb_flush(CPUArchState *env, int flush_global); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 101 | void tlb_set_page(CPUArchState *env, target_ulong vaddr, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 102 | hwaddr paddr, int prot, |
Paul Brook | d4c430a | 2010-03-17 02:14:28 +0000 | [diff] [blame] | 103 | int mmu_idx, target_ulong size); |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 104 | void tb_invalidate_phys_addr(hwaddr addr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 105 | #else |
| 106 | static inline void tlb_flush_page(CPUArchState *env, target_ulong addr) |
| 107 | { |
| 108 | } |
| 109 | |
| 110 | static inline void tlb_flush(CPUArchState *env, int flush_global) |
| 111 | { |
| 112 | } |
Paul Brook | c527ee8 | 2010-03-01 03:31:14 +0000 | [diff] [blame] | 113 | #endif |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 114 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 115 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
| 116 | |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 117 | #define CODE_GEN_PHYS_HASH_BITS 15 |
| 118 | #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) |
| 119 | |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 120 | /* estimated block size for TB allocation */ |
| 121 | /* XXX: use a per code average code fragment size and modulate it |
| 122 | according to the host CPU */ |
| 123 | #if defined(CONFIG_SOFTMMU) |
| 124 | #define CODE_GEN_AVG_BLOCK_SIZE 128 |
| 125 | #else |
| 126 | #define CODE_GEN_AVG_BLOCK_SIZE 64 |
| 127 | #endif |
| 128 | |
Richard Henderson | 5bbd2ca | 2012-09-21 10:48:51 -0700 | [diff] [blame] | 129 | #if defined(__arm__) || defined(_ARCH_PPC) \ |
| 130 | || defined(__x86_64__) || defined(__i386__) \ |
Claudio Fontana | 4a136e0 | 2013-06-12 16:20:22 +0100 | [diff] [blame] | 131 | || defined(__sparc__) || defined(__aarch64__) \ |
Richard Henderson | 5bbd2ca | 2012-09-21 10:48:51 -0700 | [diff] [blame] | 132 | || defined(CONFIG_TCG_INTERPRETER) |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 133 | #define USE_DIRECT_JUMP |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 134 | #endif |
| 135 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 136 | struct TranslationBlock { |
bellard | 2e12669 | 2004-04-25 21:28:44 +0000 | [diff] [blame] | 137 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
| 138 | target_ulong cs_base; /* CS base for this block */ |
j_mayer | c068688 | 2007-09-20 22:47:42 +0000 | [diff] [blame] | 139 | uint64_t flags; /* flags defining in which context the code was generated */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 140 | uint16_t size; /* size of target code for this block (1 <= |
| 141 | size <= TARGET_PAGE_SIZE) */ |
bellard | 58fe2f1 | 2004-02-16 22:11:32 +0000 | [diff] [blame] | 142 | uint16_t cflags; /* compile flags */ |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 143 | #define CF_COUNT_MASK 0x7fff |
| 144 | #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ |
bellard | 58fe2f1 | 2004-02-16 22:11:32 +0000 | [diff] [blame] | 145 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 146 | uint8_t *tc_ptr; /* pointer to the translated code */ |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 147 | /* next matching tb for physical address. */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 148 | struct TranslationBlock *phys_hash_next; |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 149 | /* first and second physical page containing code. The lower bit |
| 150 | of the pointer tells the index in page_next[] */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 151 | struct TranslationBlock *page_next[2]; |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 152 | tb_page_addr_t page_addr[2]; |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 153 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 154 | /* the following data are used to directly call another TB from |
| 155 | the code of this one. */ |
| 156 | uint16_t tb_next_offset[2]; /* offset of original jump target */ |
| 157 | #ifdef USE_DIRECT_JUMP |
Filip Navara | efc0a51 | 2010-03-26 16:06:28 +0000 | [diff] [blame] | 158 | uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 159 | #else |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 160 | uintptr_t tb_next[2]; /* address of jump generated code */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 161 | #endif |
| 162 | /* list of TBs jumping to this one. This is a circular list using |
| 163 | the two least significant bits of the pointers to tell what is |
| 164 | the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = |
| 165 | jmp_first */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 166 | struct TranslationBlock *jmp_next[2]; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 167 | struct TranslationBlock *jmp_first; |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 168 | uint32_t icount; |
| 169 | }; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 170 | |
Evgeny Voevodin | 5e5f07e | 2013-02-01 01:47:23 +0700 | [diff] [blame] | 171 | #include "exec/spinlock.h" |
| 172 | |
| 173 | typedef struct TBContext TBContext; |
| 174 | |
| 175 | struct TBContext { |
| 176 | |
| 177 | TranslationBlock *tbs; |
| 178 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
| 179 | int nb_tbs; |
| 180 | /* any access to the tbs or the page table must use this lock */ |
| 181 | spinlock_t tb_lock; |
| 182 | |
| 183 | /* statistics */ |
| 184 | int tb_flush_count; |
| 185 | int tb_phys_invalidate_count; |
| 186 | |
| 187 | int tb_invalidated_flag; |
| 188 | }; |
| 189 | |
pbrook | b362e5e | 2006-11-12 20:40:55 +0000 | [diff] [blame] | 190 | static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
| 191 | { |
| 192 | target_ulong tmp; |
| 193 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
edgar_igl | b5e19d4 | 2008-05-06 08:38:22 +0000 | [diff] [blame] | 194 | return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; |
pbrook | b362e5e | 2006-11-12 20:40:55 +0000 | [diff] [blame] | 195 | } |
| 196 | |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 197 | static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 198 | { |
pbrook | b362e5e | 2006-11-12 20:40:55 +0000 | [diff] [blame] | 199 | target_ulong tmp; |
| 200 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
edgar_igl | b5e19d4 | 2008-05-06 08:38:22 +0000 | [diff] [blame] | 201 | return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) |
| 202 | | (tmp & TB_JMP_ADDR_MASK)); |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 203 | } |
| 204 | |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 205 | static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 206 | { |
Aurelien Jarno | f96a383 | 2010-12-28 17:46:59 +0100 | [diff] [blame] | 207 | return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 208 | } |
| 209 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 210 | void tb_free(TranslationBlock *tb); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 211 | void tb_flush(CPUArchState *env); |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 212 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 213 | |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 214 | #if defined(USE_DIRECT_JUMP) |
| 215 | |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 216 | #if defined(CONFIG_TCG_INTERPRETER) |
| 217 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
| 218 | { |
| 219 | /* patch the branch destination */ |
| 220 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); |
| 221 | /* no need to flush icache explicitly */ |
| 222 | } |
| 223 | #elif defined(_ARCH_PPC) |
Blue Swirl | 64b85a8 | 2011-01-23 16:21:20 +0000 | [diff] [blame] | 224 | void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); |
malc | 810260a | 2008-07-23 19:17:46 +0000 | [diff] [blame] | 225 | #define tb_set_jmp_target1 ppc_tb_set_jmp_target |
bellard | 57fec1f | 2008-02-01 10:50:11 +0000 | [diff] [blame] | 226 | #elif defined(__i386__) || defined(__x86_64__) |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 227 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 228 | { |
| 229 | /* patch the branch destination */ |
| 230 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); |
ths | 1235fc0 | 2008-06-03 19:51:57 +0000 | [diff] [blame] | 231 | /* no need to flush icache explicitly */ |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 232 | } |
Claudio Fontana | 4a136e0 | 2013-06-12 16:20:22 +0100 | [diff] [blame] | 233 | #elif defined(__aarch64__) |
| 234 | void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); |
| 235 | #define tb_set_jmp_target1 aarch64_tb_set_jmp_target |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 236 | #elif defined(__arm__) |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 237 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 238 | { |
Aurelien Jarno | 4a1e19a | 2010-12-21 19:32:49 +0100 | [diff] [blame] | 239 | #if !QEMU_GNUC_PREREQ(4, 1) |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 240 | register unsigned long _beg __asm ("a1"); |
| 241 | register unsigned long _end __asm ("a2"); |
| 242 | register unsigned long _flg __asm ("a3"); |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 243 | #endif |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 244 | |
| 245 | /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ |
Laurent Desnogues | 87b78ad | 2009-09-21 14:27:59 +0200 | [diff] [blame] | 246 | *(uint32_t *)jmp_addr = |
| 247 | (*(uint32_t *)jmp_addr & ~0xffffff) |
| 248 | | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 249 | |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 250 | #if QEMU_GNUC_PREREQ(4, 1) |
Aurelien Jarno | 4a1e19a | 2010-12-21 19:32:49 +0100 | [diff] [blame] | 251 | __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 252 | #else |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 253 | /* flush icache */ |
| 254 | _beg = jmp_addr; |
| 255 | _end = jmp_addr + 4; |
| 256 | _flg = 0; |
| 257 | __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 258 | #endif |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 259 | } |
Richard Henderson | 5bbd2ca | 2012-09-21 10:48:51 -0700 | [diff] [blame] | 260 | #elif defined(__sparc__) |
| 261 | void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 262 | #else |
| 263 | #error tb_set_jmp_target1 is missing |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 264 | #endif |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 265 | |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 266 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 267 | int n, uintptr_t addr) |
bellard | 4cbb86e | 2003-09-17 22:53:29 +0000 | [diff] [blame] | 268 | { |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 269 | uint16_t offset = tb->tb_jmp_offset[n]; |
| 270 | tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); |
bellard | 4cbb86e | 2003-09-17 22:53:29 +0000 | [diff] [blame] | 271 | } |
| 272 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 273 | #else |
| 274 | |
| 275 | /* set the jump target */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 276 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 277 | int n, uintptr_t addr) |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 278 | { |
bellard | 95f7652 | 2003-06-05 00:54:44 +0000 | [diff] [blame] | 279 | tb->tb_next[n] = addr; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | #endif |
| 283 | |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 284 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 285 | TranslationBlock *tb_next) |
| 286 | { |
bellard | cf25629 | 2003-05-25 19:20:31 +0000 | [diff] [blame] | 287 | /* NOTE: this test is only needed for thread safety */ |
| 288 | if (!tb->jmp_next[n]) { |
| 289 | /* patch the native jump address */ |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 290 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 291 | |
bellard | cf25629 | 2003-05-25 19:20:31 +0000 | [diff] [blame] | 292 | /* add in TB jmp circular list */ |
| 293 | tb->jmp_next[n] = tb_next->jmp_first; |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 294 | tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); |
bellard | cf25629 | 2003-05-25 19:20:31 +0000 | [diff] [blame] | 295 | } |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 296 | } |
| 297 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 298 | /* GETRA is the true target of the return instruction that we'll execute, |
| 299 | defined here for simplicity of defining the follow-up macros. */ |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 300 | #if defined(CONFIG_TCG_INTERPRETER) |
Stefan Weil | c3ca046 | 2012-04-17 19:22:39 +0200 | [diff] [blame] | 301 | extern uintptr_t tci_tb_ptr; |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 302 | # define GETRA() tci_tb_ptr |
Blue Swirl | 3917149 | 2011-09-21 18:13:16 +0000 | [diff] [blame] | 303 | #else |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 304 | # define GETRA() \ |
| 305 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
Blue Swirl | 3917149 | 2011-09-21 18:13:16 +0000 | [diff] [blame] | 306 | #endif |
| 307 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 308 | /* The true return address will often point to a host insn that is part of |
| 309 | the next translated guest insn. Adjust the address backward to point to |
| 310 | the middle of the call insn. Subtracting one would do the job except for |
| 311 | several compressed mode architectures (arm, mips) which set the low bit |
| 312 | to indicate the compressed mode; subtracting two works around that. It |
| 313 | is also the case that there are no host isas that contain a call insn |
| 314 | smaller than 4 bytes, so we don't worry about special-casing this. */ |
| 315 | #if defined(CONFIG_TCG_INTERPRETER) |
| 316 | # define GETPC_ADJ 0 |
| 317 | #else |
| 318 | # define GETPC_ADJ 2 |
| 319 | #endif |
Yeongkyoon Lee | fdbb84d | 2012-10-31 16:04:24 +0900 | [diff] [blame] | 320 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 321 | #define GETPC() (GETRA() - GETPC_ADJ) |
| 322 | |
bellard | e95c8d5 | 2004-09-30 22:22:08 +0000 | [diff] [blame] | 323 | #if !defined(CONFIG_USER_ONLY) |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 324 | |
Stefan Weil | 575ddeb | 2013-09-29 20:56:45 +0200 | [diff] [blame^] | 325 | void phys_mem_set_alloc(void *(*alloc)(size_t)); |
Markus Armbruster | 9113803 | 2013-07-31 15:11:08 +0200 | [diff] [blame] | 326 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 327 | struct MemoryRegion *iotlb_to_region(hwaddr index); |
Paolo Bonzini | 791af8c | 2013-05-24 16:10:39 +0200 | [diff] [blame] | 328 | bool io_mem_read(struct MemoryRegion *mr, hwaddr addr, |
| 329 | uint64_t *pvalue, unsigned size); |
| 330 | bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, |
Avi Kivity | 37ec01d | 2012-03-08 18:08:35 +0200 | [diff] [blame] | 331 | uint64_t value, unsigned size); |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 332 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 333 | void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 334 | uintptr_t retaddr); |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 335 | |
Richard Henderson | e58eb53 | 2013-08-27 13:13:44 -0700 | [diff] [blame] | 336 | uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
| 337 | uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
| 338 | uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
| 339 | uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
blueswir1 | 79383c9 | 2008-08-30 09:51:20 +0000 | [diff] [blame] | 340 | |
j_mayer | 6ebbf39 | 2007-10-14 07:07:08 +0000 | [diff] [blame] | 341 | #define ACCESS_TYPE (NB_MMU_MODES + 1) |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 342 | #define MEMSUFFIX _code |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 343 | |
| 344 | #define DATA_SIZE 1 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 345 | #include "exec/softmmu_header.h" |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 346 | |
| 347 | #define DATA_SIZE 2 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 348 | #include "exec/softmmu_header.h" |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 349 | |
| 350 | #define DATA_SIZE 4 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 351 | #include "exec/softmmu_header.h" |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 352 | |
bellard | c27004e | 2005-01-03 23:35:10 +0000 | [diff] [blame] | 353 | #define DATA_SIZE 8 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 354 | #include "exec/softmmu_header.h" |
bellard | c27004e | 2005-01-03 23:35:10 +0000 | [diff] [blame] | 355 | |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 356 | #undef ACCESS_TYPE |
| 357 | #undef MEMSUFFIX |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 358 | |
| 359 | #endif |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 360 | |
| 361 | #if defined(CONFIG_USER_ONLY) |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 362 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 363 | { |
| 364 | return addr; |
| 365 | } |
| 366 | #else |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 367 | /* cputlb.c */ |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 368 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 369 | #endif |
bellard | 9df217a | 2005-02-10 22:05:51 +0000 | [diff] [blame] | 370 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 371 | typedef void (CPUDebugExcpHandler)(CPUArchState *env); |
aliguori | dde2367 | 2008-11-18 20:50:36 +0000 | [diff] [blame] | 372 | |
Igor Mammedov | 84e3b60 | 2012-06-21 18:29:38 +0200 | [diff] [blame] | 373 | void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); |
aurel32 | 1b530a6 | 2009-04-05 20:08:59 +0000 | [diff] [blame] | 374 | |
| 375 | /* vl.c */ |
| 376 | extern int singlestep; |
| 377 | |
Marcelo Tosatti | 1a28cac | 2010-05-04 09:45:20 -0300 | [diff] [blame] | 378 | /* cpu-exec.c */ |
| 379 | extern volatile sig_atomic_t exit_request; |
| 380 | |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 381 | /* Deterministic execution requires that IO only be performed on the last |
| 382 | instruction of a TB so that interrupts take effect immediately. */ |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 383 | static inline int can_do_io(CPUArchState *env) |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 384 | { |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 385 | CPUState *cpu = ENV_GET_CPU(env); |
| 386 | |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 387 | if (!use_icount) { |
| 388 | return 1; |
| 389 | } |
| 390 | /* If not executing code then assume we are ok. */ |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 391 | if (cpu->current_tb == NULL) { |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 392 | return 1; |
| 393 | } |
| 394 | return env->can_do_io != 0; |
| 395 | } |
| 396 | |
aliguori | 875cdcf | 2008-10-23 13:52:00 +0000 | [diff] [blame] | 397 | #endif |