Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 1 | #include <stdio.h> |
| 2 | #include <assert.h> |
| 3 | #include <sys/types.h> |
| 4 | #include <sys/mman.h> |
| 5 | #include <string.h> |
| 6 | |
| 7 | #include "misc.h" |
| 8 | #include "asmx64.h" |
| 9 | |
| 10 | /* all offsets are measured in multiples of 8 bytes */ |
| 11 | #define WORD_SIZE (8) |
| 12 | |
| 13 | #define OPCODE_NOP (0x90) |
| 14 | #define OPCODE_PUSH_R64 (0x50) |
| 15 | #define OPCODE_PUSH_I64 (0x68) |
| 16 | #define OPCODE_PUSH_M64 (0xff) /* /6 */ |
| 17 | #define OPCODE_POP_R64 (0x58) |
| 18 | #define OPCODE_RET (0xc3) |
| 19 | #define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */ |
| 20 | #define OPCODE_MOV_I64_TO_R64 (0xb8) |
| 21 | #define OPCODE_MOV_I32_TO_RM32 (0xc7) |
| 22 | #define OPCODE_MOV_R64_TO_RM64 (0x89) |
| 23 | #define OPCODE_MOV_RM64_TO_R64 (0x8b) |
| 24 | #define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */ |
| 25 | #define OPCODE_XOR_R64_TO_RM64 (0x31) /* /r */ |
| 26 | #define OPCODE_ADD_R64_TO_RM64 (0x01) |
| 27 | #define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */ |
| 28 | #define OPCODE_ADD_I8_TO_RM32 (0x83) /* /0 */ |
| 29 | #define OPCODE_SUB_R64_FROM_RM64 (0x29) |
| 30 | #define OPCODE_SUB_I32_FROM_RM64 (0x81) /* /5 */ |
| 31 | #define OPCODE_SUB_I8_FROM_RM64 (0x83) /* /5 */ |
| 32 | #define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */ |
| 33 | #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */ |
| 34 | #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */ |
| 35 | #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */ |
| 36 | #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */ |
| 37 | #define OPCODE_CMP_R64_WITH_RM64 (0x39) |
| 38 | #define OPCODE_CMP_RM32_WITH_R32 (0x3b) |
| 39 | #define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */ |
| 40 | #define OPCODE_JMP_REL8 (0xeb) |
| 41 | #define OPCODE_JMP_REL32 (0xe9) |
| 42 | #define OPCODE_JCC_REL8 (0x70) /* | jcc type */ |
| 43 | #define OPCODE_JCC_REL32_A (0x0f) |
| 44 | #define OPCODE_JCC_REL32_B (0x80) /* | jcc type */ |
| 45 | #define OPCODE_SETCC_RM8_A (0x0f) |
| 46 | #define OPCODE_SETCC_RM8_B (0x90) /* | jcc type, /0 */ |
| 47 | #define OPCODE_CALL_REL32 (0xe8) |
| 48 | #define OPCODE_CALL_RM32 (0xff) /* /2 */ |
| 49 | #define OPCODE_LEAVE (0xc9) |
| 50 | |
| 51 | #define MODRM_R64(x) ((x) << 3) |
| 52 | #define MODRM_RM_DISP0 (0x00) |
| 53 | #define MODRM_RM_DISP8 (0x40) |
| 54 | #define MODRM_RM_DISP32 (0x80) |
| 55 | #define MODRM_RM_REG (0xc0) |
| 56 | #define MODRM_RM_R64(x) (x) |
| 57 | |
| 58 | #define REX_PREFIX (0x40) |
| 59 | #define REX_W (0x08) // width |
| 60 | #define REX_R (0x04) // register |
| 61 | #define REX_X (0x02) // index |
| 62 | #define REX_B (0x01) // base |
| 63 | |
| 64 | #define IMM32_L0(x) ((x) & 0xff) |
| 65 | #define IMM32_L1(x) (((x) >> 8) & 0xff) |
| 66 | #define IMM32_L2(x) (((x) >> 16) & 0xff) |
| 67 | #define IMM32_L3(x) (((x) >> 24) & 0xff) |
| 68 | #define IMM64_L4(x) (((x) >> 32) & 0xff) |
| 69 | #define IMM64_L5(x) (((x) >> 40) & 0xff) |
| 70 | #define IMM64_L6(x) (((x) >> 48) & 0xff) |
| 71 | #define IMM64_L7(x) (((x) >> 56) & 0xff) |
| 72 | |
| 73 | #define UNSIGNED_FIT8(x) (((x) & 0xffffffffffffff00) == 0) |
| 74 | #define UNSIGNED_FIT32(x) (((x) & 0xffffffff00000000) == 0) |
| 75 | #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80) |
| 76 | |
| 77 | struct _asm_x64_t { |
| 78 | int pass; |
| 79 | uint code_offset; |
| 80 | uint code_size; |
| 81 | byte *code_base; |
| 82 | byte dummy_data[8]; |
| 83 | |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 84 | uint max_num_labels; |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 85 | int *label_offsets; |
| 86 | }; |
| 87 | |
| 88 | // for allocating memory, see src/v8/src/platform-linux.cc |
| 89 | void *alloc_mem(uint req_size, uint *alloc_size, bool is_exec) { |
| 90 | req_size = (req_size + 0xfff) & (~0xfff); |
| 91 | int prot = PROT_READ | PROT_WRITE | (is_exec ? PROT_EXEC : 0); |
| 92 | void *ptr = mmap(NULL, req_size, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| 93 | if (ptr == MAP_FAILED) { |
| 94 | assert(0); |
| 95 | } |
| 96 | *alloc_size = req_size; |
| 97 | return ptr; |
| 98 | } |
| 99 | |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 100 | asm_x64_t* asm_x64_new(uint max_num_labels) { |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 101 | asm_x64_t* as; |
| 102 | |
| 103 | as = m_new(asm_x64_t, 1); |
| 104 | as->pass = 0; |
| 105 | as->code_offset = 0; |
| 106 | as->code_size = 0; |
| 107 | as->code_base = NULL; |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 108 | as->max_num_labels = max_num_labels; |
| 109 | as->label_offsets = m_new(int, max_num_labels); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 110 | |
| 111 | return as; |
| 112 | } |
| 113 | |
| 114 | void asm_x64_free(asm_x64_t* as, bool free_code) { |
| 115 | if (free_code) { |
Damien | 415eb6f | 2013-10-05 12:19:06 +0100 | [diff] [blame] | 116 | // need to un-mmap |
| 117 | //m_free(as->code_base); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 118 | } |
| 119 | /* |
| 120 | if (as->label != NULL) { |
| 121 | int i; |
| 122 | for (i = 0; i < as->label->len; ++i) |
| 123 | { |
| 124 | Label* lab = &g_array_index(as->label, Label, i); |
| 125 | if (lab->unresolved != NULL) |
| 126 | g_array_free(lab->unresolved, true); |
| 127 | } |
| 128 | g_array_free(as->label, true); |
| 129 | } |
| 130 | */ |
| 131 | m_free(as); |
| 132 | } |
| 133 | |
| 134 | void asm_x64_start_pass(asm_x64_t *as, int pass) { |
| 135 | as->pass = pass; |
| 136 | as->code_offset = 0; |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 137 | if (pass == ASM_X64_PASS_2) { |
| 138 | // reset all labels |
| 139 | memset(as->label_offsets, -1, as->max_num_labels * sizeof(int)); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 140 | } |
| 141 | } |
| 142 | |
| 143 | void asm_x64_end_pass(asm_x64_t *as) { |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 144 | if (as->pass == ASM_X64_PASS_2) { |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 145 | // calculate size of code in bytes |
| 146 | as->code_size = as->code_offset; |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 147 | //as->code_base = m_new(byte, as->code_size); need to allocale executable memory |
| 148 | uint actual_alloc; |
| 149 | as->code_base = alloc_mem(as->code_size, &actual_alloc, true); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 150 | printf("code_size: %u\n", as->code_size); |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | // check labels are resolved |
| 155 | if (as->label != NULL) |
| 156 | { |
| 157 | int i; |
| 158 | for (i = 0; i < as->label->len; ++i) |
| 159 | if (g_array_index(as->label, Label, i).unresolved != NULL) |
| 160 | return false; |
| 161 | } |
| 162 | */ |
| 163 | } |
| 164 | |
| 165 | // all functions must go through this one to emit bytes |
| 166 | static byte* asm_x64_get_cur_to_write_bytes(asm_x64_t* as, int num_bytes_to_write) { |
| 167 | //printf("emit %d\n", num_bytes_to_write); |
| 168 | if (as->pass < ASM_X64_PASS_3) { |
| 169 | as->code_offset += num_bytes_to_write; |
| 170 | return as->dummy_data; |
| 171 | } else { |
| 172 | assert(as->code_offset + num_bytes_to_write <= as->code_size); |
| 173 | byte *c = as->code_base + as->code_offset; |
| 174 | as->code_offset += num_bytes_to_write; |
| 175 | return c; |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | uint asm_x64_get_code_size(asm_x64_t* as) { |
| 180 | return as->code_size; |
| 181 | } |
| 182 | |
| 183 | void* asm_x64_get_code(asm_x64_t* as) { |
| 184 | return as->code_base; |
| 185 | } |
| 186 | |
| 187 | static void asm_x64_write_byte_1(asm_x64_t* as, byte b1) { |
| 188 | byte* c = asm_x64_get_cur_to_write_bytes(as, 1); |
| 189 | c[0] = b1; |
| 190 | } |
| 191 | |
| 192 | static void asm_x64_write_byte_2(asm_x64_t* as, byte b1, byte b2) { |
| 193 | byte* c = asm_x64_get_cur_to_write_bytes(as, 2); |
| 194 | c[0] = b1; |
| 195 | c[1] = b2; |
| 196 | } |
| 197 | |
| 198 | static void asm_x64_write_byte_3(asm_x64_t* as, byte b1, byte b2, byte b3) { |
| 199 | byte* c = asm_x64_get_cur_to_write_bytes(as, 3); |
| 200 | c[0] = b1; |
| 201 | c[1] = b2; |
| 202 | c[2] = b3; |
| 203 | } |
| 204 | |
| 205 | static void asm_x64_write_word32(asm_x64_t* as, int w32) { |
| 206 | byte* c = asm_x64_get_cur_to_write_bytes(as, 4); |
| 207 | c[0] = IMM32_L0(w32); |
| 208 | c[1] = IMM32_L1(w32); |
| 209 | c[2] = IMM32_L2(w32); |
| 210 | c[3] = IMM32_L3(w32); |
| 211 | } |
| 212 | |
| 213 | static void asm_x64_write_word64(asm_x64_t* as, int64_t w64) { |
| 214 | byte* c = asm_x64_get_cur_to_write_bytes(as, 8); |
| 215 | c[0] = IMM32_L0(w64); |
| 216 | c[1] = IMM32_L1(w64); |
| 217 | c[2] = IMM32_L2(w64); |
| 218 | c[3] = IMM32_L3(w64); |
| 219 | c[4] = IMM64_L4(w64); |
| 220 | c[5] = IMM64_L5(w64); |
| 221 | c[6] = IMM64_L6(w64); |
| 222 | c[7] = IMM64_L7(w64); |
| 223 | } |
| 224 | |
| 225 | /* unused |
| 226 | static void asm_x64_write_word32_to(asm_x64_t* as, int offset, int w32) { |
| 227 | byte* c; |
| 228 | assert(offset + 4 <= as->code_size); |
| 229 | c = as->code_base + offset; |
| 230 | c[0] = IMM32_L0(w32); |
| 231 | c[1] = IMM32_L1(w32); |
| 232 | c[2] = IMM32_L2(w32); |
| 233 | c[3] = IMM32_L3(w32); |
| 234 | } |
| 235 | */ |
| 236 | |
| 237 | static void asm_x64_write_r64_disp(asm_x64_t* as, int r64, int disp_r64, int disp_offset) { |
| 238 | assert(disp_r64 != REG_RSP); |
| 239 | |
| 240 | if (disp_offset == 0 && disp_r64 != REG_RBP) { |
| 241 | asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP0 | MODRM_RM_R64(disp_r64)); |
| 242 | } else if (SIGNED_FIT8(disp_offset)) { |
| 243 | asm_x64_write_byte_2(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), IMM32_L0(disp_offset)); |
| 244 | } else { |
| 245 | asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP32 | MODRM_RM_R64(disp_r64)); |
| 246 | asm_x64_write_word32(as, disp_offset); |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | void asm_x64_nop(asm_x64_t* as) |
| 251 | { |
| 252 | asm_x64_write_byte_1(as, OPCODE_NOP); |
| 253 | } |
| 254 | |
| 255 | void asm_x64_push_r64(asm_x64_t* as, int src_r64) |
| 256 | { |
| 257 | asm_x64_write_byte_1(as, OPCODE_PUSH_R64 | src_r64); |
| 258 | } |
| 259 | |
| 260 | void asm_x64_push_i32(asm_x64_t* as, int src_i32) |
| 261 | { |
| 262 | asm_x64_write_byte_1(as, OPCODE_PUSH_I64); |
| 263 | asm_x64_write_word32(as, src_i32); // will be sign extended to 64 bits |
| 264 | } |
| 265 | |
| 266 | void asm_x64_push_disp(asm_x64_t* as, int src_r64, int src_offset) { |
| 267 | asm_x64_write_byte_1(as, OPCODE_PUSH_M64); |
| 268 | asm_x64_write_r64_disp(as, 6, src_r64, src_offset); |
| 269 | } |
| 270 | |
| 271 | void asm_x64_pop_r64(asm_x64_t* as, int dest_r64) |
| 272 | { |
| 273 | asm_x64_write_byte_1(as, OPCODE_POP_R64 | dest_r64); |
| 274 | } |
| 275 | |
| 276 | static void asm_x64_ret(asm_x64_t* as) |
| 277 | { |
| 278 | asm_x64_write_byte_1(as, OPCODE_RET); |
| 279 | } |
| 280 | |
| 281 | void asm_x64_mov_r32_to_r32(asm_x64_t* as, int src_r32, int dest_r32) { |
| 282 | // defaults to 32 bit operation |
| 283 | asm_x64_write_byte_2(as, OPCODE_MOV_R64_TO_RM64, MODRM_R64(src_r32) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
| 284 | } |
| 285 | |
| 286 | void asm_x64_mov_r64_to_r64(asm_x64_t* as, int src_r64, int dest_r64) { |
| 287 | // use REX prefix for 64 bit operation |
| 288 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_MOV_R64_TO_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
| 289 | } |
| 290 | |
| 291 | void asm_x64_mov_r64_to_disp(asm_x64_t* as, int src_r64, int dest_r64, int dest_disp) { |
| 292 | // use REX prefix for 64 bit operation |
| 293 | asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_MOV_R64_TO_RM64); |
| 294 | asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp); |
| 295 | } |
| 296 | |
| 297 | void asm_x64_mov_disp_to_r64(asm_x64_t* as, int src_r64, int src_disp, int dest_r64) { |
| 298 | // use REX prefix for 64 bit operation |
| 299 | asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_MOV_RM64_TO_R64); |
| 300 | asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp); |
| 301 | } |
| 302 | |
| 303 | void asm_x64_lea_disp_to_r64(asm_x64_t* as, int src_r64, int src_disp, int dest_r64) { |
| 304 | // use REX prefix for 64 bit operation |
| 305 | asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_LEA_MEM_TO_R64); |
| 306 | asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp); |
| 307 | } |
| 308 | |
| 309 | void asm_x64_mov_i8_to_r8(asm_x64_t *as, int src_i8, int dest_r64) { |
| 310 | asm_x64_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r64, src_i8); |
| 311 | } |
| 312 | |
| 313 | void asm_x64_mov_i32_to_r64(asm_x64_t* as, int src_i32, int dest_r64) { |
| 314 | // cpu defaults to i32 to r64, with zero extension |
| 315 | asm_x64_write_byte_1(as, OPCODE_MOV_I64_TO_R64 | dest_r64); |
| 316 | asm_x64_write_word32(as, src_i32); |
| 317 | } |
| 318 | |
| 319 | void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64) { |
| 320 | // cpu defaults to i32 to r64 |
| 321 | // to mov i64 to r64 need to use REX prefix |
| 322 | asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_MOV_I64_TO_R64 | dest_r64); |
| 323 | asm_x64_write_word64(as, src_i64); |
| 324 | } |
| 325 | |
| 326 | void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64) { |
| 327 | if (UNSIGNED_FIT32(src_i64)) { |
| 328 | // 5 bytes |
| 329 | asm_x64_mov_i32_to_r64(as, src_i64 & 0xffffffff, dest_r64); |
| 330 | } else { |
| 331 | // 10 bytes |
| 332 | asm_x64_mov_i64_to_r64(as, src_i64, dest_r64); |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | void asm_x64_mov_i32_to_disp(asm_x64_t* as, int src_i32, int dest_r32, int dest_disp) |
| 337 | { |
| 338 | assert(0); |
| 339 | asm_x64_write_byte_1(as, OPCODE_MOV_I32_TO_RM32); |
| 340 | //asm_x64_write_r32_disp(as, 0, dest_r32, dest_disp); |
| 341 | asm_x64_write_word32(as, src_i32); |
| 342 | } |
| 343 | |
| 344 | void asm_x64_xor_r64_to_r64(asm_x64_t *as, int src_r64, int dest_r64) { |
| 345 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_XOR_R64_TO_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
| 346 | } |
| 347 | |
| 348 | void asm_x64_add_r64_to_r64(asm_x64_t* as, int src_r64, int dest_r64) { |
| 349 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_ADD_R64_TO_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
| 350 | } |
| 351 | |
| 352 | void asm_x64_add_i32_to_r32(asm_x64_t* as, int src_i32, int dest_r32) |
| 353 | { |
| 354 | assert(dest_r32 != REG_RSP); // in this case i think src_i32 must be 64 bits |
| 355 | if (SIGNED_FIT8(src_i32)) |
| 356 | { |
| 357 | asm_x64_write_byte_2(as, OPCODE_ADD_I8_TO_RM32, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
| 358 | asm_x64_write_byte_1(as, src_i32 & 0xff); |
| 359 | } |
| 360 | else |
| 361 | { |
| 362 | asm_x64_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
| 363 | asm_x64_write_word32(as, src_i32); |
| 364 | } |
| 365 | } |
| 366 | |
| 367 | void asm_x64_sub_r32_from_r32(asm_x64_t* as, int src_r32, int dest_r32) { |
| 368 | // defaults to 32 bit operation |
| 369 | asm_x64_write_byte_2(as, OPCODE_SUB_R64_FROM_RM64, MODRM_R64(src_r32) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
| 370 | } |
| 371 | |
| 372 | void asm_x64_sub_r64_from_r64(asm_x64_t* as, int src_r64, int dest_r64) { |
| 373 | // use REX prefix for 64 bit operation |
| 374 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_R64_FROM_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
| 375 | } |
| 376 | |
| 377 | void asm_x64_sub_i32_from_r32(asm_x64_t* as, int src_i32, int dest_r32) { |
| 378 | if (SIGNED_FIT8(src_i32)) { |
| 379 | // defaults to 32 bit operation |
| 380 | asm_x64_write_byte_2(as, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
| 381 | asm_x64_write_byte_1(as, src_i32 & 0xff); |
| 382 | } else { |
| 383 | // defaults to 32 bit operation |
| 384 | asm_x64_write_byte_2(as, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32)); |
| 385 | asm_x64_write_word32(as, src_i32); |
| 386 | } |
| 387 | } |
| 388 | |
| 389 | void asm_x64_sub_i32_from_r64(asm_x64_t* as, int src_i32, int dest_r64) { |
| 390 | if (SIGNED_FIT8(src_i32)) { |
| 391 | // use REX prefix for 64 bit operation |
| 392 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
| 393 | asm_x64_write_byte_1(as, src_i32 & 0xff); |
| 394 | } else { |
| 395 | // use REX prefix for 64 bit operation |
| 396 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64)); |
| 397 | asm_x64_write_word32(as, src_i32); |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | /* shifts not tested */ |
| 402 | void asm_x64_shl_r32_by_imm(asm_x64_t* as, int r32, int imm) { |
| 403 | asm_x64_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(r32)); |
| 404 | asm_x64_write_byte_1(as, imm); |
| 405 | } |
| 406 | |
| 407 | void asm_x64_shr_r32_by_imm(asm_x64_t* as, int r32, int imm) { |
| 408 | asm_x64_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(r32)); |
| 409 | asm_x64_write_byte_1(as, imm); |
| 410 | } |
| 411 | |
| 412 | void asm_x64_sar_r32_by_imm(asm_x64_t* as, int r32, int imm) { |
| 413 | asm_x64_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(r32)); |
| 414 | asm_x64_write_byte_1(as, imm); |
| 415 | } |
| 416 | |
| 417 | void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b) { |
| 418 | asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_CMP_R64_WITH_RM64, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b)); |
| 419 | } |
| 420 | |
| 421 | void asm_x64_cmp_r32_with_disp(asm_x64_t* as, int src_r32_a, int src_r32_b, int src_disp_b) { |
| 422 | assert(0); |
| 423 | asm_x64_write_byte_1(as, OPCODE_CMP_R64_WITH_RM64); |
| 424 | //asm_x64_write_r32_disp(as, src_r32_a, src_r32_b, src_disp_b); |
| 425 | } |
| 426 | |
| 427 | void asm_x64_cmp_disp_with_r32(asm_x64_t* as, int src_r32_a, int src_disp_a, int src_r32_b) { |
| 428 | assert(0); |
| 429 | asm_x64_write_byte_1(as, OPCODE_CMP_RM32_WITH_R32); |
| 430 | //asm_x64_write_r32_disp(as, src_r32_b, src_r32_a, src_disp_a); |
| 431 | } |
| 432 | |
| 433 | void asm_x64_cmp_i32_with_r32(asm_x64_t* as, int src_i32, int src_r32) { |
| 434 | if (SIGNED_FIT8(src_i32)) { |
| 435 | asm_x64_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32)); |
| 436 | asm_x64_write_byte_1(as, src_i32 & 0xff); |
| 437 | } else { |
| 438 | asm_x64_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32)); |
| 439 | asm_x64_write_word32(as, src_i32); |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b) { |
Damien | 7af3d19 | 2013-10-07 00:02:49 +0100 | [diff] [blame^] | 444 | // TODO implement for other registers |
| 445 | assert(src_r64_a == REG_RAX); |
| 446 | assert(src_r64_b == REG_RAX); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 447 | asm_x64_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b)); |
| 448 | } |
| 449 | |
| 450 | void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8) { |
| 451 | asm_x64_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r8)); |
| 452 | } |
| 453 | |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 454 | void asm_x64_label_assign(asm_x64_t* as, int label) { |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 455 | assert(label < as->max_num_labels); |
| 456 | if (as->pass == ASM_X64_PASS_2) { |
| 457 | // assign label offset |
| 458 | assert(as->label_offsets[label] == -1); |
| 459 | as->label_offsets[label] = as->code_offset; |
| 460 | } else if (as->pass == ASM_X64_PASS_3) { |
| 461 | // ensure label offset has not changed from PASS_2 to PASS_3 |
| 462 | //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset); |
| 463 | assert(as->label_offsets[label] == as->code_offset); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 464 | } |
| 465 | } |
| 466 | |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 467 | static int get_label_dest(asm_x64_t *as, int label) { |
| 468 | assert(label < as->max_num_labels); |
| 469 | return as->label_offsets[label]; |
| 470 | } |
| 471 | |
| 472 | void asm_x64_jmp_label(asm_x64_t *as, int label) { |
| 473 | int dest = get_label_dest(as, label); |
| 474 | int rel = dest - as->code_offset; |
| 475 | if (dest >= 0 && rel < 0) { |
| 476 | // is a backwards jump, so we know the size of the jump on the first pass |
| 477 | // calculate rel assuming 8 bit relative jump |
| 478 | rel -= 2; |
| 479 | if (SIGNED_FIT8(rel)) { |
| 480 | asm_x64_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 481 | } else { |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 482 | rel += 2; |
| 483 | goto large_jump; |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 484 | } |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 485 | } else { |
| 486 | // is a forwards jump, so need to assume it's large |
| 487 | large_jump: |
| 488 | rel -= 5; |
| 489 | asm_x64_write_byte_1(as, OPCODE_JMP_REL32); |
| 490 | asm_x64_write_word32(as, rel); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 491 | } |
| 492 | } |
| 493 | |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 494 | void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, int label) { |
| 495 | int dest = get_label_dest(as, label); |
| 496 | int rel = dest - as->code_offset; |
| 497 | if (dest >= 0 && rel < 0) { |
| 498 | // is a backwards jump, so we know the size of the jump on the first pass |
| 499 | // calculate rel assuming 8 bit relative jump |
| 500 | rel -= 2; |
| 501 | if (SIGNED_FIT8(rel)) { |
| 502 | asm_x64_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 503 | } else { |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 504 | rel += 2; |
| 505 | goto large_jump; |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 506 | } |
Damien | 054848a | 2013-10-05 13:44:41 +0100 | [diff] [blame] | 507 | } else { |
| 508 | // is a forwards jump, so need to assume it's large |
| 509 | large_jump: |
| 510 | rel -= 6; |
| 511 | asm_x64_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type); |
| 512 | asm_x64_write_word32(as, rel); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 513 | } |
| 514 | } |
| 515 | |
| 516 | void asm_x64_entry(asm_x64_t* as, int num_locals) { |
| 517 | asm_x64_push_r64(as, REG_RBP); |
| 518 | asm_x64_mov_r64_to_r64(as, REG_RSP, REG_RBP); |
| 519 | if (num_locals < 0) { |
| 520 | num_locals = 0; |
| 521 | } |
| 522 | num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary |
| 523 | asm_x64_sub_i32_from_r64(as, num_locals * WORD_SIZE, REG_RSP); |
| 524 | asm_x64_push_r64(as, REG_RBX); |
| 525 | } |
| 526 | |
| 527 | void asm_x64_exit(asm_x64_t* as) { |
| 528 | asm_x64_pop_r64(as, REG_RBX); |
| 529 | asm_x64_write_byte_1(as, OPCODE_LEAVE); |
| 530 | asm_x64_ret(as); |
| 531 | } |
| 532 | |
| 533 | void asm_x64_push_arg(asm_x64_t* as, int src_arg_num) { |
| 534 | assert(0); |
| 535 | asm_x64_push_disp(as, REG_RBP, 8 + src_arg_num * WORD_SIZE); |
| 536 | } |
| 537 | |
| 538 | void asm_x64_mov_arg_to_r32(asm_x64_t* as, int src_arg_num, int dest_r32) { |
| 539 | assert(0); |
| 540 | //asm_x64_mov_disp_to_r32(as, REG_RBP, 8 + src_arg_num * WORD_SIZE, dest_r32); |
| 541 | } |
| 542 | |
| 543 | void asm_x64_mov_r32_to_arg(asm_x64_t* as, int src_r32, int dest_arg_num) { |
| 544 | assert(0); |
| 545 | //asm_x64_mov_r32_to_disp(as, src_r32, REG_RBP, 8 + dest_arg_num * WORD_SIZE); |
| 546 | } |
| 547 | |
| 548 | static int asm_x64_local_offset_from_ebp(int local_num) |
| 549 | { |
| 550 | return -(local_num + 1) * WORD_SIZE; |
| 551 | } |
| 552 | |
| 553 | void asm_x64_mov_local_to_r64(asm_x64_t* as, int src_local_num, int dest_r64) { |
| 554 | asm_x64_mov_disp_to_r64(as, REG_RBP, asm_x64_local_offset_from_ebp(src_local_num), dest_r64); |
| 555 | } |
| 556 | |
| 557 | void asm_x64_mov_r64_to_local(asm_x64_t* as, int src_r64, int dest_local_num) { |
| 558 | asm_x64_mov_r64_to_disp(as, src_r64, REG_RBP, asm_x64_local_offset_from_ebp(dest_local_num)); |
| 559 | } |
| 560 | |
| 561 | void asm_x64_mov_local_addr_to_r64(asm_x64_t* as, int local_num, int dest_r64) { |
| 562 | int offset = asm_x64_local_offset_from_ebp(local_num); |
| 563 | if (offset == 0) { |
| 564 | asm_x64_mov_r64_to_r64(as, REG_RBP, dest_r64); |
| 565 | } else { |
| 566 | asm_x64_lea_disp_to_r64(as, REG_RBP, offset, dest_r64); |
| 567 | } |
| 568 | } |
| 569 | |
| 570 | void asm_x64_push_local(asm_x64_t* as, int local_num) { |
| 571 | asm_x64_push_disp(as, REG_RBP, asm_x64_local_offset_from_ebp(local_num)); |
| 572 | } |
| 573 | |
| 574 | void asm_x64_push_local_addr(asm_x64_t* as, int local_num, int temp_r64) |
| 575 | { |
| 576 | asm_x64_mov_r64_to_r64(as, REG_RBP, temp_r64); |
| 577 | asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_ebp(local_num), temp_r64); |
| 578 | asm_x64_push_r64(as, temp_r64); |
| 579 | } |
| 580 | |
| 581 | /* |
| 582 | can't use these because code might be relocated when resized |
| 583 | |
| 584 | void asm_x64_call(asm_x64_t* as, void* func) |
| 585 | { |
| 586 | asm_x64_sub_i32_from_r32(as, 8, REG_RSP); |
| 587 | asm_x64_write_byte_1(as, OPCODE_CALL_REL32); |
| 588 | asm_x64_write_word32(as, func - (void*)(as->code_cur + 4)); |
| 589 | asm_x64_mov_r64_to_r64(as, REG_RBP, REG_RSP); |
| 590 | } |
| 591 | |
| 592 | void asm_x64_call_i1(asm_x64_t* as, void* func, int i1) |
| 593 | { |
| 594 | asm_x64_sub_i32_from_r32(as, 8, REG_RSP); |
| 595 | asm_x64_sub_i32_from_r32(as, 12, REG_RSP); |
| 596 | asm_x64_push_i32(as, i1); |
| 597 | asm_x64_write_byte_1(as, OPCODE_CALL_REL32); |
| 598 | asm_x64_write_word32(as, func - (void*)(as->code_cur + 4)); |
| 599 | asm_x64_add_i32_to_r32(as, 16, REG_RSP); |
| 600 | asm_x64_mov_r64_to_r64(as, REG_RBP, REG_RSP); |
| 601 | } |
| 602 | */ |
| 603 | |
| 604 | void asm_x64_call_ind(asm_x64_t* as, void *ptr, int temp_r64) { |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 605 | asm_x64_mov_i64_to_r64_optimised(as, (int64_t)ptr, temp_r64); |
| 606 | asm_x64_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R64(2) | MODRM_RM_REG | MODRM_RM_R64(temp_r64)); |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 607 | // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all |
Damien | 415eb6f | 2013-10-05 12:19:06 +0100 | [diff] [blame] | 608 | // doesn't work anymore because calls are 64 bits away |
| 609 | /* |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 610 | asm_x64_write_byte_1(as, OPCODE_CALL_REL32); |
| 611 | asm_x64_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4)); |
Damien | 415eb6f | 2013-10-05 12:19:06 +0100 | [diff] [blame] | 612 | */ |
Damien | 429d719 | 2013-10-04 19:53:11 +0100 | [diff] [blame] | 613 | } |