Assembler-x86.h (39265B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_x86_Assembler_x86_h 8 #define jit_x86_Assembler_x86_h 9 10 #include "jit/CompactBuffer.h" 11 #include "jit/JitCode.h" 12 #include "jit/shared/Assembler-shared.h" 13 #include "jit/x86-shared/Constants-x86-shared.h" 14 15 namespace js { 16 namespace jit { 17 18 static constexpr Register eax{X86Encoding::rax}; 19 static constexpr Register ecx{X86Encoding::rcx}; 20 static constexpr Register edx{X86Encoding::rdx}; 21 static constexpr Register ebx{X86Encoding::rbx}; 22 static constexpr Register esp{X86Encoding::rsp}; 23 static constexpr Register ebp{X86Encoding::rbp}; 24 static constexpr Register esi{X86Encoding::rsi}; 25 static constexpr Register edi{X86Encoding::rdi}; 26 27 static constexpr FloatRegister xmm0 = 28 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double); 29 static constexpr FloatRegister xmm1 = 30 FloatRegister(X86Encoding::xmm1, FloatRegisters::Double); 31 static constexpr FloatRegister xmm2 = 32 FloatRegister(X86Encoding::xmm2, FloatRegisters::Double); 33 static constexpr FloatRegister xmm3 = 34 FloatRegister(X86Encoding::xmm3, FloatRegisters::Double); 35 static constexpr FloatRegister xmm4 = 36 FloatRegister(X86Encoding::xmm4, FloatRegisters::Double); 37 static constexpr FloatRegister xmm5 = 38 FloatRegister(X86Encoding::xmm5, FloatRegisters::Double); 39 static constexpr FloatRegister xmm6 = 40 FloatRegister(X86Encoding::xmm6, FloatRegisters::Double); 41 static constexpr FloatRegister xmm7 = 42 FloatRegister(X86Encoding::xmm7, FloatRegisters::Double); 43 44 // Vector registers fixed for use with some instructions, e.g. PBLENDVB. 45 static constexpr FloatRegister vmm0 = 46 FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128); 47 48 static constexpr Register InvalidReg{X86Encoding::invalid_reg}; 49 static constexpr FloatRegister InvalidFloatReg = FloatRegister(); 50 51 static constexpr Register JSReturnReg_Type = ecx; 52 static constexpr Register JSReturnReg_Data = edx; 53 static constexpr Register StackPointer = esp; 54 static constexpr Register FramePointer = ebp; 55 static constexpr Register ReturnReg = eax; 56 static constexpr FloatRegister ReturnFloat32Reg = 57 FloatRegister(X86Encoding::xmm0, FloatRegisters::Single); 58 static constexpr FloatRegister ReturnDoubleReg = 59 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double); 60 static constexpr FloatRegister ReturnSimd128Reg = 61 FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128); 62 static constexpr FloatRegister ScratchFloat32Reg_ = 63 FloatRegister(X86Encoding::xmm7, FloatRegisters::Single); 64 static constexpr FloatRegister ScratchDoubleReg_ = 65 FloatRegister(X86Encoding::xmm7, FloatRegisters::Double); 66 static constexpr FloatRegister ScratchSimd128Reg = 67 FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128); 68 69 // Note, EDX:EAX is the system ABI 64-bit return register, and it is to our 70 // advantage to keep the SpiderMonkey ABI in sync with the system ABI. 71 // 72 // However, using EDX here means that we have to use a register that does not 73 // have a word or byte part (eg DX/DH/DL) in some other places; notably, 74 // ABINonArgReturnReg1 is EDI. If this becomes a problem and ReturnReg64 has to 75 // be something other than EDX:EAX, then jitted code that calls directly to C++ 76 // will need to shuffle the return value from EDX:EAX into ReturnReg64 directly 77 // after the call. See bug 1730161 for discussion and a patch that does that. 78 static constexpr Register64 ReturnReg64(edx, eax); 79 80 // Avoid ebp, which is the FramePointer, which is unavailable in some modes. 81 static constexpr Register CallTempReg0 = edi; 82 static constexpr Register CallTempReg1 = eax; 83 static constexpr Register CallTempReg2 = ebx; 84 static constexpr Register CallTempReg3 = ecx; 85 static constexpr Register CallTempReg4 = esi; 86 static constexpr Register CallTempReg5 = edx; 87 88 // We have no arg regs, so our NonArgRegs are just our CallTempReg* 89 static constexpr Register CallTempNonArgRegs[] = {edi, eax, ebx, ecx, esi, edx}; 90 static constexpr uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs); 91 92 class ABIArgGenerator : public ABIArgGeneratorShared { 93 ABIArg current_; 94 95 public: 96 explicit ABIArgGenerator(ABIKind kind) 97 : ABIArgGeneratorShared(kind), current_() {} 98 99 ABIArg next(MIRType argType); 100 ABIArg& current() { return current_; } 101 }; 102 103 // See "ABI special registers" in Assembler-shared.h for more information. 104 static constexpr Register ABINonArgReg0 = eax; 105 static constexpr Register ABINonArgReg1 = ebx; 106 static constexpr Register ABINonArgReg2 = ecx; 107 static constexpr Register ABINonArgReg3 = edx; 108 109 // See "ABI special registers" in Assembler-shared.h for more information. 110 // Avoid xmm7 which is the ScratchDoubleReg_. 111 static constexpr FloatRegister ABINonArgDoubleReg = 112 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double); 113 114 // See "ABI special registers" in Assembler-shared.h for more information. 115 static constexpr Register ABINonArgReturnReg0 = ecx; 116 static constexpr Register ABINonArgReturnReg1 = edi; 117 static constexpr Register ABINonVolatileReg = ebx; 118 119 // See "ABI special registers" in Assembler-shared.h for more information. 120 static constexpr Register ABINonArgReturnVolatileReg = ecx; 121 122 // See "ABI special registers" in Assembler-shared.h, and "The WASM ABIs" in 123 // WasmFrame.h for more information. 124 static constexpr Register InstanceReg = esi; 125 126 // Registers used for asm.js/wasm table calls. These registers must be disjoint 127 // from the ABI argument registers, InstanceReg and each other. 128 static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0; 129 static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1; 130 static constexpr Register WasmTableCallSigReg = ABINonArgReg2; 131 static constexpr Register WasmTableCallIndexReg = ABINonArgReg3; 132 133 // Registers used for ref calls. 134 static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0; 135 static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1; 136 static constexpr Register WasmCallRefCallScratchReg2 = ABINonArgReg2; 137 static constexpr Register WasmCallRefReg = ABINonArgReg3; 138 139 // Registers used for wasm tail calls operations. 140 static constexpr Register WasmTailCallInstanceScratchReg = ABINonArgReg1; 141 static constexpr Register WasmTailCallRAScratchReg = ABINonArgReg2; 142 static constexpr Register WasmTailCallFPScratchReg = ABINonArgReg3; 143 144 // Register used as a scratch along the return path in the fast js -> wasm stub 145 // code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg. 146 // It must be a volatile register. 147 static constexpr Register WasmJitEntryReturnScratch = ebx; 148 149 static constexpr Register OsrFrameReg = edx; 150 static constexpr Register PreBarrierReg = edx; 151 152 // Not enough registers for a PC register (R0-R2 use 2 registers each). 153 static constexpr Register InterpreterPCReg = InvalidReg; 154 155 // Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use 156 // JSReturnOperand). 157 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0; 158 static constexpr Register RegExpMatcherStringReg = CallTempReg1; 159 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2; 160 161 // Registers used by RegExpExecTest stub (do not use ReturnReg). 162 static constexpr Register RegExpExecTestRegExpReg = CallTempReg0; 163 static constexpr Register RegExpExecTestStringReg = CallTempReg2; 164 165 // Registers used by RegExpSearcher stub (do not use ReturnReg). 166 static constexpr Register RegExpSearcherRegExpReg = CallTempReg0; 167 static constexpr Register RegExpSearcherStringReg = CallTempReg2; 168 static constexpr Register RegExpSearcherLastIndexReg = CallTempReg3; 169 170 // GCC stack is aligned on 16 bytes. Ion does not maintain this for internal 171 // calls. wasm code does. 172 #if defined(__GNUC__) && !defined(__MINGW32__) 173 static constexpr uint32_t ABIStackAlignment = 16; 174 #else 175 static constexpr uint32_t ABIStackAlignment = 4; 176 #endif 177 static constexpr uint32_t CodeAlignment = 16; 178 static constexpr uint32_t JitStackAlignment = 16; 179 180 static constexpr uint32_t JitStackValueAlignment = 181 JitStackAlignment / sizeof(Value); 182 static_assert(JitStackAlignment % sizeof(Value) == 0 && 183 JitStackValueAlignment >= 1, 184 "Stack alignment should be a non-zero multiple of sizeof(Value)"); 185 186 static constexpr uint32_t SimdMemoryAlignment = 16; 187 188 static_assert(CodeAlignment % SimdMemoryAlignment == 0, 189 "Code alignment should be larger than any of the alignments " 190 "which are used for " 191 "the constant sections of the code buffer. Thus it should be " 192 "larger than the " 193 "alignment for SIMD constants."); 194 195 static_assert(JitStackAlignment % SimdMemoryAlignment == 0, 196 "Stack alignment should be larger than any of the alignments " 197 "which are used for " 198 "spilled values. Thus it should be larger than the alignment " 199 "for SIMD accesses."); 200 201 static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment; 202 static constexpr uint32_t WasmTrapInstructionLength = 2; 203 204 // See comments in wasm::GenerateFunctionPrologue. The difference between these 205 // is the size of the largest callable prologue on the platform. (We could make 206 // the tail offset 3, but I have opted for 4 as that results in a better-aligned 207 // branch target.) 208 static constexpr uint32_t WasmCheckedCallEntryOffset = 0u; 209 210 struct ImmTag : public Imm32 { 211 explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {} 212 }; 213 214 struct ImmType : public ImmTag { 215 explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {} 216 }; 217 218 static constexpr Scale ScalePointer = TimesFour; 219 220 } // namespace jit 221 } // namespace js 222 223 #include "jit/x86-shared/Assembler-x86-shared.h" 224 225 namespace js { 226 namespace jit { 227 228 static inline Operand LowWord(const Operand& op) { 229 switch (op.kind()) { 230 case Operand::MEM_REG_DISP: 231 return Operand(LowWord(op.toAddress())); 232 case Operand::MEM_SCALE: 233 return Operand(LowWord(op.toBaseIndex())); 234 default: 235 MOZ_CRASH("Invalid operand type"); 236 } 237 } 238 239 static inline Operand HighWord(const Operand& op) { 240 switch (op.kind()) { 241 case Operand::MEM_REG_DISP: 242 return Operand(HighWord(op.toAddress())); 243 case Operand::MEM_SCALE: 244 return Operand(HighWord(op.toBaseIndex())); 245 default: 246 MOZ_CRASH("Invalid operand type"); 247 } 248 } 249 250 // Return operand from a JS -> JS call. 251 static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type, 252 JSReturnReg_Data}; 253 254 class Assembler : public AssemblerX86Shared { 255 Vector<RelativePatch, 8, SystemAllocPolicy> jumps_; 256 257 void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind kind) { 258 enoughMemory_ &= 259 jumps_.append(RelativePatch(src.offset(), target.value, kind)); 260 if (kind == RelocationKind::JITCODE) { 261 jumpRelocations_.writeUnsigned(src.offset()); 262 } 263 } 264 265 public: 266 using AssemblerX86Shared::call; 267 using AssemblerX86Shared::cmpl; 268 using AssemblerX86Shared::j; 269 using AssemblerX86Shared::jmp; 270 using AssemblerX86Shared::movl; 271 using AssemblerX86Shared::pop; 272 using AssemblerX86Shared::push; 273 using AssemblerX86Shared::retarget; 274 using AssemblerX86Shared::vmovsd; 275 using AssemblerX86Shared::vmovss; 276 277 static void TraceJumpRelocations(JSTracer* trc, JitCode* code, 278 CompactBufferReader& reader); 279 280 // Copy the assembly code to the given buffer, and perform any pending 281 // relocations relying on the target address. 282 void executableCopy(uint8_t* buffer); 283 284 void assertNoGCThings() const { 285 #ifdef DEBUG 286 MOZ_ASSERT(dataRelocations_.length() == 0); 287 for (auto& j : jumps_) { 288 MOZ_ASSERT(j.kind == RelocationKind::HARDCODED); 289 } 290 #endif 291 } 292 293 // Actual assembly emitting functions. 294 295 void push(ImmGCPtr ptr) { 296 masm.push_i32(int32_t(ptr.value)); 297 writeDataRelocation(ptr); 298 } 299 void push(const ImmWord imm) { push(Imm32(imm.value)); } 300 void push(const ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); } 301 void push(FloatRegister src) { 302 // We allocate space for double even when storing a float. 303 subl(Imm32(sizeof(double)), StackPointer); 304 if (src.isDouble()) { 305 vmovsd(src, Address(StackPointer, 0)); 306 } else { 307 MOZ_ASSERT(src.isSingle(), "simd128 is not supported"); 308 vmovss(src, Address(StackPointer, 0)); 309 } 310 } 311 312 CodeOffset pushWithPatch(ImmWord word) { 313 masm.push_i32(int32_t(word.value)); 314 return CodeOffset(masm.currentOffset()); 315 } 316 317 void pop(FloatRegister src) { 318 if (src.isDouble()) { 319 vmovsd(Address(StackPointer, 0), src); 320 } else { 321 MOZ_ASSERT(src.isSingle(), "simd128 is not supported"); 322 vmovss(Address(StackPointer, 0), src); 323 } 324 // We free space for double even when storing a float. 325 addl(Imm32(sizeof(double)), StackPointer); 326 } 327 328 CodeOffset movWithPatch(ImmWord word, Register dest) { 329 movl(Imm32(word.value), dest); 330 return CodeOffset(masm.currentOffset()); 331 } 332 CodeOffset movWithPatch(ImmPtr imm, Register dest) { 333 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest); 334 } 335 336 void movl(ImmGCPtr ptr, Register dest) { 337 masm.movl_i32r(uintptr_t(ptr.value), dest.encoding()); 338 writeDataRelocation(ptr); 339 } 340 void movl(ImmGCPtr ptr, const Operand& dest) { 341 switch (dest.kind()) { 342 case Operand::REG: 343 masm.movl_i32r(uintptr_t(ptr.value), dest.reg()); 344 writeDataRelocation(ptr); 345 break; 346 case Operand::MEM_REG_DISP: 347 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base()); 348 writeDataRelocation(ptr); 349 break; 350 case Operand::MEM_SCALE: 351 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(), 352 dest.index(), dest.scale()); 353 writeDataRelocation(ptr); 354 break; 355 default: 356 MOZ_CRASH("unexpected operand kind"); 357 } 358 } 359 void movl(ImmWord imm, Register dest) { 360 masm.movl_i32r(imm.value, dest.encoding()); 361 } 362 void movl(ImmPtr imm, Register dest) { 363 movl(ImmWord(uintptr_t(imm.value)), dest); 364 } 365 void mov(ImmWord imm, Register dest) { 366 // Use xor for setting registers to zero, as it is specially optimized 367 // for this purpose on modern hardware. Note that it does clobber FLAGS 368 // though. 369 if (imm.value == 0) { 370 xorl(dest, dest); 371 } else { 372 movl(imm, dest); 373 } 374 } 375 void mov(ImmPtr imm, Register dest) { 376 mov(ImmWord(uintptr_t(imm.value)), dest); 377 } 378 void mov(wasm::SymbolicAddress imm, Register dest) { 379 masm.movl_i32r(-1, dest.encoding()); 380 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm)); 381 } 382 void mov(const Operand& src, Register dest) { movl(src, dest); } 383 void mov(Register src, const Operand& dest) { movl(src, dest); } 384 void mov(Imm32 imm, const Operand& dest) { movl(imm, dest); } 385 void mov(CodeLabel* label, Register dest) { 386 // Put a placeholder value in the instruction stream. 387 masm.movl_i32r(0, dest.encoding()); 388 label->patchAt()->bind(masm.size()); 389 } 390 void mov(Register src, Register dest) { movl(src, dest); } 391 void xchg(Register src, Register dest) { xchgl(src, dest); } 392 void lea(const Operand& src, Register dest) { return leal(src, dest); } 393 void cmovz32(const Operand& src, Register dest) { return cmovzl(src, dest); } 394 void cmovzPtr(const Operand& src, Register dest) { return cmovzl(src, dest); } 395 396 void fstp32(const Operand& src) { 397 switch (src.kind()) { 398 case Operand::MEM_REG_DISP: 399 masm.fstp32_m(src.disp(), src.base()); 400 break; 401 default: 402 MOZ_CRASH("unexpected operand kind"); 403 } 404 } 405 void faddp() { masm.faddp(); } 406 407 void cmpl(ImmWord rhs, Register lhs) { 408 masm.cmpl_ir(rhs.value, lhs.encoding()); 409 } 410 void cmpl(ImmPtr rhs, Register lhs) { 411 cmpl(ImmWord(uintptr_t(rhs.value)), lhs); 412 } 413 void cmpl(ImmGCPtr rhs, Register lhs) { 414 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding()); 415 writeDataRelocation(rhs); 416 } 417 void cmpl(Register rhs, Register lhs) { 418 masm.cmpl_rr(rhs.encoding(), lhs.encoding()); 419 } 420 void cmpl(ImmGCPtr rhs, const Operand& lhs) { 421 switch (lhs.kind()) { 422 case Operand::REG: 423 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg()); 424 writeDataRelocation(rhs); 425 break; 426 case Operand::MEM_REG_DISP: 427 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base()); 428 writeDataRelocation(rhs); 429 break; 430 case Operand::MEM_ADDRESS32: 431 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address()); 432 writeDataRelocation(rhs); 433 break; 434 default: 435 MOZ_CRASH("unexpected operand kind"); 436 } 437 } 438 void cmpl(Register rhs, wasm::SymbolicAddress lhs) { 439 masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1); 440 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs)); 441 } 442 void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) { 443 JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1); 444 append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs)); 445 } 446 447 void adcl(Imm32 imm, Register dest) { 448 masm.adcl_ir(imm.value, dest.encoding()); 449 } 450 void adcl(Register src, Register dest) { 451 masm.adcl_rr(src.encoding(), dest.encoding()); 452 } 453 void adcl(Operand src, Register dest) { 454 switch (src.kind()) { 455 case Operand::MEM_REG_DISP: 456 masm.adcl_mr(src.disp(), src.base(), dest.encoding()); 457 break; 458 case Operand::MEM_SCALE: 459 masm.adcl_mr(src.disp(), src.base(), src.index(), src.scale(), 460 dest.encoding()); 461 break; 462 default: 463 MOZ_CRASH("unexpected operand kind"); 464 } 465 } 466 467 void sbbl(Imm32 imm, Register dest) { 468 masm.sbbl_ir(imm.value, dest.encoding()); 469 } 470 void sbbl(Register src, Register dest) { 471 masm.sbbl_rr(src.encoding(), dest.encoding()); 472 } 473 void sbbl(Operand src, Register dest) { 474 switch (src.kind()) { 475 case Operand::MEM_REG_DISP: 476 masm.sbbl_mr(src.disp(), src.base(), dest.encoding()); 477 break; 478 case Operand::MEM_SCALE: 479 masm.sbbl_mr(src.disp(), src.base(), src.index(), src.scale(), 480 dest.encoding()); 481 break; 482 default: 483 MOZ_CRASH("unexpected operand kind"); 484 } 485 } 486 487 void mull(Register multiplier) { masm.mull_r(multiplier.encoding()); } 488 489 void shldl(const Imm32 imm, Register src, Register dest) { 490 masm.shldl_irr(imm.value, src.encoding(), dest.encoding()); 491 } 492 void shrdl(const Imm32 imm, Register src, Register dest) { 493 masm.shrdl_irr(imm.value, src.encoding(), dest.encoding()); 494 } 495 496 void vhaddpd(FloatRegister rhs, FloatRegister lhsDest) { 497 MOZ_ASSERT(HasSSE3()); 498 MOZ_ASSERT(rhs.size() == 16); 499 MOZ_ASSERT(lhsDest.size() == 16); 500 masm.vhaddpd_rr(rhs.encoding(), lhsDest.encoding(), lhsDest.encoding()); 501 } 502 503 void fild(const Operand& src) { 504 switch (src.kind()) { 505 case Operand::MEM_REG_DISP: 506 masm.fild_m(src.disp(), src.base()); 507 break; 508 default: 509 MOZ_CRASH("unexpected operand kind"); 510 } 511 } 512 513 void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) { 514 JmpSrc src = masm.jmp(); 515 addPendingJump(src, target, reloc); 516 } 517 void j(Condition cond, ImmPtr target, 518 RelocationKind reloc = RelocationKind::HARDCODED) { 519 JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond)); 520 addPendingJump(src, target, reloc); 521 } 522 523 void jmp(JitCode* target) { 524 jmp(ImmPtr(target->raw()), RelocationKind::JITCODE); 525 } 526 void j(Condition cond, JitCode* target) { 527 j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE); 528 } 529 void call(JitCode* target) { 530 JmpSrc src = masm.call(); 531 addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE); 532 } 533 void call(ImmWord target) { call(ImmPtr((void*)target.value)); } 534 void call(ImmPtr target) { 535 JmpSrc src = masm.call(); 536 addPendingJump(src, target, RelocationKind::HARDCODED); 537 } 538 539 // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch 540 // this instruction. 541 CodeOffset toggledCall(JitCode* target, bool enabled) { 542 CodeOffset offset(size()); 543 JmpSrc src = enabled ? masm.call() : masm.cmp_eax(); 544 addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE); 545 MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr)); 546 return offset; 547 } 548 549 static size_t ToggledCallSize(uint8_t* code) { 550 // Size of a call instruction. 551 return 5; 552 } 553 554 // Re-routes pending jumps to an external target, flushing the label in the 555 // process. 556 void retarget(Label* label, ImmPtr target, RelocationKind reloc) { 557 if (label->used()) { 558 bool more; 559 X86Encoding::JmpSrc jmp(label->offset()); 560 do { 561 X86Encoding::JmpSrc next; 562 more = masm.nextJump(jmp, &next); 563 addPendingJump(jmp, target, reloc); 564 jmp = next; 565 } while (more); 566 } 567 label->reset(); 568 } 569 570 // Move a 32-bit immediate into a register where the immediate can be 571 // patched. 572 CodeOffset movlWithPatch(Imm32 imm, Register dest) { 573 masm.movl_i32r(imm.value, dest.encoding()); 574 return CodeOffset(masm.currentOffset()); 575 } 576 577 // Load from *(base + disp32) where disp32 can be patched. 578 CodeOffset movsblWithPatch(const Operand& src, Register dest) { 579 switch (src.kind()) { 580 case Operand::MEM_REG_DISP: 581 masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding()); 582 break; 583 case Operand::MEM_ADDRESS32: 584 masm.movsbl_mr(src.address(), dest.encoding()); 585 break; 586 default: 587 MOZ_CRASH("unexpected operand kind"); 588 } 589 return CodeOffset(masm.currentOffset()); 590 } 591 CodeOffset movzblWithPatch(const Operand& src, Register dest) { 592 switch (src.kind()) { 593 case Operand::MEM_REG_DISP: 594 masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding()); 595 break; 596 case Operand::MEM_ADDRESS32: 597 masm.movzbl_mr(src.address(), dest.encoding()); 598 break; 599 default: 600 MOZ_CRASH("unexpected operand kind"); 601 } 602 return CodeOffset(masm.currentOffset()); 603 } 604 CodeOffset movswlWithPatch(const Operand& src, Register dest) { 605 switch (src.kind()) { 606 case Operand::MEM_REG_DISP: 607 masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding()); 608 break; 609 case Operand::MEM_ADDRESS32: 610 masm.movswl_mr(src.address(), dest.encoding()); 611 break; 612 default: 613 MOZ_CRASH("unexpected operand kind"); 614 } 615 return CodeOffset(masm.currentOffset()); 616 } 617 CodeOffset movzwlWithPatch(const Operand& src, Register dest) { 618 switch (src.kind()) { 619 case Operand::MEM_REG_DISP: 620 masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding()); 621 break; 622 case Operand::MEM_ADDRESS32: 623 masm.movzwl_mr(src.address(), dest.encoding()); 624 break; 625 default: 626 MOZ_CRASH("unexpected operand kind"); 627 } 628 return CodeOffset(masm.currentOffset()); 629 } 630 CodeOffset movlWithPatch(const Operand& src, Register dest) { 631 switch (src.kind()) { 632 case Operand::MEM_REG_DISP: 633 masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding()); 634 break; 635 case Operand::MEM_ADDRESS32: 636 masm.movl_mr(src.address(), dest.encoding()); 637 break; 638 default: 639 MOZ_CRASH("unexpected operand kind"); 640 } 641 return CodeOffset(masm.currentOffset()); 642 } 643 CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) { 644 MOZ_ASSERT(HasSSE2()); 645 switch (src.kind()) { 646 case Operand::MEM_REG_DISP: 647 masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding()); 648 break; 649 case Operand::MEM_ADDRESS32: 650 masm.vmovss_mr(src.address(), dest.encoding()); 651 break; 652 default: 653 MOZ_CRASH("unexpected operand kind"); 654 } 655 return CodeOffset(masm.currentOffset()); 656 } 657 void vmovss(const Operand& src, FloatRegister dest) { 658 MOZ_ASSERT(HasSSE2()); 659 switch (src.kind()) { 660 case Operand::MEM_REG_DISP: 661 masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding()); 662 break; 663 case Operand::MEM_ADDRESS32: 664 masm.vmovss_mr(src.address(), dest.encoding()); 665 break; 666 case Operand::MEM_SCALE: 667 masm.vmovss_mr(src.disp(), src.base(), src.index(), src.scale(), 668 dest.encoding()); 669 break; 670 default: 671 MOZ_CRASH("unexpected operand kind"); 672 } 673 } 674 CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) { 675 MOZ_ASSERT(HasSSE2()); 676 switch (src.kind()) { 677 case Operand::MEM_REG_DISP: 678 masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding()); 679 break; 680 case Operand::MEM_ADDRESS32: 681 masm.vmovd_mr(src.address(), dest.encoding()); 682 break; 683 default: 684 MOZ_CRASH("unexpected operand kind"); 685 } 686 return CodeOffset(masm.currentOffset()); 687 } 688 CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) { 689 MOZ_ASSERT(HasSSE2()); 690 switch (src.kind()) { 691 case Operand::MEM_REG_DISP: 692 masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding()); 693 break; 694 case Operand::MEM_ADDRESS32: 695 masm.vmovq_mr(src.address(), dest.encoding()); 696 break; 697 default: 698 MOZ_CRASH("unexpected operand kind"); 699 } 700 return CodeOffset(masm.currentOffset()); 701 } 702 CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) { 703 MOZ_ASSERT(HasSSE2()); 704 switch (src.kind()) { 705 case Operand::MEM_REG_DISP: 706 masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding()); 707 break; 708 case Operand::MEM_ADDRESS32: 709 masm.vmovsd_mr(src.address(), dest.encoding()); 710 break; 711 default: 712 MOZ_CRASH("unexpected operand kind"); 713 } 714 return CodeOffset(masm.currentOffset()); 715 } 716 void vmovsd(const Operand& src, FloatRegister dest) { 717 MOZ_ASSERT(HasSSE2()); 718 switch (src.kind()) { 719 case Operand::MEM_REG_DISP: 720 masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding()); 721 break; 722 case Operand::MEM_ADDRESS32: 723 masm.vmovsd_mr(src.address(), dest.encoding()); 724 break; 725 case Operand::MEM_SCALE: 726 masm.vmovsd_mr(src.disp(), src.base(), src.index(), src.scale(), 727 dest.encoding()); 728 break; 729 default: 730 MOZ_CRASH("unexpected operand kind"); 731 } 732 } 733 CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) { 734 MOZ_ASSERT(HasSSE2()); 735 switch (src.kind()) { 736 case Operand::MEM_REG_DISP: 737 masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding()); 738 break; 739 case Operand::MEM_ADDRESS32: 740 masm.vmovups_mr(src.address(), dest.encoding()); 741 break; 742 default: 743 MOZ_CRASH("unexpected operand kind"); 744 } 745 return CodeOffset(masm.currentOffset()); 746 } 747 CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) { 748 MOZ_ASSERT(HasSSE2()); 749 switch (src.kind()) { 750 case Operand::MEM_REG_DISP: 751 masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding()); 752 break; 753 case Operand::MEM_ADDRESS32: 754 masm.vmovdqu_mr(src.address(), dest.encoding()); 755 break; 756 default: 757 MOZ_CRASH("unexpected operand kind"); 758 } 759 return CodeOffset(masm.currentOffset()); 760 } 761 762 // Store to *(base + disp32) where disp32 can be patched. 763 CodeOffset movbWithPatch(Register src, const Operand& dest) { 764 switch (dest.kind()) { 765 case Operand::MEM_REG_DISP: 766 masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base()); 767 break; 768 case Operand::MEM_ADDRESS32: 769 masm.movb_rm(src.encoding(), dest.address()); 770 break; 771 default: 772 MOZ_CRASH("unexpected operand kind"); 773 } 774 return CodeOffset(masm.currentOffset()); 775 } 776 CodeOffset movwWithPatch(Register src, const Operand& dest) { 777 switch (dest.kind()) { 778 case Operand::MEM_REG_DISP: 779 masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base()); 780 break; 781 case Operand::MEM_ADDRESS32: 782 masm.movw_rm(src.encoding(), dest.address()); 783 break; 784 default: 785 MOZ_CRASH("unexpected operand kind"); 786 } 787 return CodeOffset(masm.currentOffset()); 788 } 789 CodeOffset movlWithPatch(Register src, const Operand& dest) { 790 switch (dest.kind()) { 791 case Operand::MEM_REG_DISP: 792 masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base()); 793 break; 794 case Operand::MEM_ADDRESS32: 795 masm.movl_rm(src.encoding(), dest.address()); 796 break; 797 default: 798 MOZ_CRASH("unexpected operand kind"); 799 } 800 return CodeOffset(masm.currentOffset()); 801 } 802 CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) { 803 switch (dest.kind()) { 804 case Operand::MEM_REG_DISP: { 805 return movlWithPatch(regLow, LowWord(dest)); 806 } 807 case Operand::MEM_ADDRESS32: { 808 Operand low( 809 PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET)); 810 return movlWithPatch(regLow, low); 811 } 812 default: 813 MOZ_CRASH("unexpected operand kind"); 814 } 815 } 816 CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) { 817 switch (dest.kind()) { 818 case Operand::MEM_REG_DISP: { 819 return movlWithPatch(regHigh, HighWord(dest)); 820 } 821 case Operand::MEM_ADDRESS32: { 822 Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) + 823 INT64HIGH_OFFSET)); 824 return movlWithPatch(regHigh, high); 825 } 826 default: 827 MOZ_CRASH("unexpected operand kind"); 828 } 829 } 830 CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) { 831 MOZ_ASSERT(HasSSE2()); 832 switch (dest.kind()) { 833 case Operand::MEM_REG_DISP: 834 masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base()); 835 break; 836 case Operand::MEM_ADDRESS32: 837 masm.vmovd_rm(src.encoding(), dest.address()); 838 break; 839 default: 840 MOZ_CRASH("unexpected operand kind"); 841 } 842 return CodeOffset(masm.currentOffset()); 843 } 844 CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) { 845 MOZ_ASSERT(HasSSE2()); 846 switch (dest.kind()) { 847 case Operand::MEM_REG_DISP: 848 masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base()); 849 break; 850 case Operand::MEM_ADDRESS32: 851 masm.vmovq_rm(src.encoding(), dest.address()); 852 break; 853 default: 854 MOZ_CRASH("unexpected operand kind"); 855 } 856 return CodeOffset(masm.currentOffset()); 857 } 858 CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) { 859 MOZ_ASSERT(HasSSE2()); 860 switch (dest.kind()) { 861 case Operand::MEM_REG_DISP: 862 masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base()); 863 break; 864 case Operand::MEM_ADDRESS32: 865 masm.vmovss_rm(src.encoding(), dest.address()); 866 break; 867 default: 868 MOZ_CRASH("unexpected operand kind"); 869 } 870 return CodeOffset(masm.currentOffset()); 871 } 872 void vmovss(FloatRegister src, const Operand& dest) { 873 MOZ_ASSERT(HasSSE2()); 874 switch (dest.kind()) { 875 case Operand::MEM_REG_DISP: 876 masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base()); 877 break; 878 case Operand::MEM_ADDRESS32: 879 masm.vmovss_rm(src.encoding(), dest.address()); 880 break; 881 case Operand::MEM_SCALE: 882 masm.vmovss_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), 883 dest.scale()); 884 break; 885 default: 886 MOZ_CRASH("unexpected operand kind"); 887 } 888 } 889 CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) { 890 MOZ_ASSERT(HasSSE2()); 891 switch (dest.kind()) { 892 case Operand::MEM_REG_DISP: 893 masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base()); 894 break; 895 case Operand::MEM_ADDRESS32: 896 masm.vmovsd_rm(src.encoding(), dest.address()); 897 break; 898 default: 899 MOZ_CRASH("unexpected operand kind"); 900 } 901 return CodeOffset(masm.currentOffset()); 902 } 903 void vmovsd(FloatRegister src, const Operand& dest) { 904 MOZ_ASSERT(HasSSE2()); 905 switch (dest.kind()) { 906 case Operand::MEM_REG_DISP: 907 masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base()); 908 break; 909 case Operand::MEM_ADDRESS32: 910 masm.vmovsd_rm(src.encoding(), dest.address()); 911 break; 912 case Operand::MEM_SCALE: 913 masm.vmovsd_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), 914 dest.scale()); 915 break; 916 default: 917 MOZ_CRASH("unexpected operand kind"); 918 } 919 } 920 CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) { 921 MOZ_ASSERT(HasSSE2()); 922 switch (dest.kind()) { 923 case Operand::MEM_REG_DISP: 924 masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base()); 925 break; 926 case Operand::MEM_ADDRESS32: 927 masm.vmovups_rm(src.encoding(), dest.address()); 928 break; 929 default: 930 MOZ_CRASH("unexpected operand kind"); 931 } 932 return CodeOffset(masm.currentOffset()); 933 } 934 CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) { 935 MOZ_ASSERT(HasSSE2()); 936 switch (dest.kind()) { 937 case Operand::MEM_REG_DISP: 938 masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base()); 939 break; 940 case Operand::MEM_ADDRESS32: 941 masm.vmovdqu_rm(src.encoding(), dest.address()); 942 break; 943 default: 944 MOZ_CRASH("unexpected operand kind"); 945 } 946 return CodeOffset(masm.currentOffset()); 947 } 948 949 // Load from *(addr + index*scale) where addr can be patched. 950 CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index, 951 Scale scale, Register dest) { 952 masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding()); 953 return CodeOffset(masm.currentOffset()); 954 } 955 956 // Load from *src where src can be patched. 957 CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) { 958 masm.movsbl_mr(src.addr, dest.encoding()); 959 return CodeOffset(masm.currentOffset()); 960 } 961 CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) { 962 masm.movzbl_mr(src.addr, dest.encoding()); 963 return CodeOffset(masm.currentOffset()); 964 } 965 CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) { 966 masm.movswl_mr(src.addr, dest.encoding()); 967 return CodeOffset(masm.currentOffset()); 968 } 969 CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) { 970 masm.movzwl_mr(src.addr, dest.encoding()); 971 return CodeOffset(masm.currentOffset()); 972 } 973 CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) { 974 masm.movl_mr(src.addr, dest.encoding()); 975 return CodeOffset(masm.currentOffset()); 976 } 977 CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 978 MOZ_ASSERT(HasSSE2()); 979 masm.vmovss_mr(src.addr, dest.encoding()); 980 return CodeOffset(masm.currentOffset()); 981 } 982 CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 983 MOZ_ASSERT(HasSSE2()); 984 masm.vmovd_mr(src.addr, dest.encoding()); 985 return CodeOffset(masm.currentOffset()); 986 } 987 CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 988 MOZ_ASSERT(HasSSE2()); 989 masm.vmovq_mr(src.addr, dest.encoding()); 990 return CodeOffset(masm.currentOffset()); 991 } 992 CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 993 MOZ_ASSERT(HasSSE2()); 994 masm.vmovsd_mr(src.addr, dest.encoding()); 995 return CodeOffset(masm.currentOffset()); 996 } 997 CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 998 MOZ_ASSERT(HasSSE2()); 999 masm.vmovdqa_mr(src.addr, dest.encoding()); 1000 return CodeOffset(masm.currentOffset()); 1001 } 1002 CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 1003 MOZ_ASSERT(HasSSE2()); 1004 masm.vmovdqu_mr(src.addr, dest.encoding()); 1005 return CodeOffset(masm.currentOffset()); 1006 } 1007 CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 1008 MOZ_ASSERT(HasSSE2()); 1009 masm.vmovaps_mr(src.addr, dest.encoding()); 1010 return CodeOffset(masm.currentOffset()); 1011 } 1012 CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) { 1013 MOZ_ASSERT(HasSSE2()); 1014 masm.vmovups_mr(src.addr, dest.encoding()); 1015 return CodeOffset(masm.currentOffset()); 1016 } 1017 1018 // Store to *dest where dest can be patched. 1019 CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) { 1020 masm.movb_rm(src.encoding(), dest.addr); 1021 return CodeOffset(masm.currentOffset()); 1022 } 1023 CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) { 1024 masm.movw_rm(src.encoding(), dest.addr); 1025 return CodeOffset(masm.currentOffset()); 1026 } 1027 CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) { 1028 masm.movl_rm(src.encoding(), dest.addr); 1029 return CodeOffset(masm.currentOffset()); 1030 } 1031 CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1032 MOZ_ASSERT(HasSSE2()); 1033 masm.vmovss_rm(src.encoding(), dest.addr); 1034 return CodeOffset(masm.currentOffset()); 1035 } 1036 CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1037 MOZ_ASSERT(HasSSE2()); 1038 masm.vmovd_rm(src.encoding(), dest.addr); 1039 return CodeOffset(masm.currentOffset()); 1040 } 1041 CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1042 MOZ_ASSERT(HasSSE2()); 1043 masm.vmovq_rm(src.encoding(), dest.addr); 1044 return CodeOffset(masm.currentOffset()); 1045 } 1046 CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1047 MOZ_ASSERT(HasSSE2()); 1048 masm.vmovsd_rm(src.encoding(), dest.addr); 1049 return CodeOffset(masm.currentOffset()); 1050 } 1051 CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1052 MOZ_ASSERT(HasSSE2()); 1053 masm.vmovdqa_rm(src.encoding(), dest.addr); 1054 return CodeOffset(masm.currentOffset()); 1055 } 1056 CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1057 MOZ_ASSERT(HasSSE2()); 1058 masm.vmovaps_rm(src.encoding(), dest.addr); 1059 return CodeOffset(masm.currentOffset()); 1060 } 1061 CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1062 MOZ_ASSERT(HasSSE2()); 1063 masm.vmovdqu_rm(src.encoding(), dest.addr); 1064 return CodeOffset(masm.currentOffset()); 1065 } 1066 CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) { 1067 MOZ_ASSERT(HasSSE2()); 1068 masm.vmovups_rm(src.encoding(), dest.addr); 1069 return CodeOffset(masm.currentOffset()); 1070 } 1071 }; 1072 1073 // Get a register in which we plan to put a quantity that will be used as an 1074 // integer argument. This differs from GetIntArgReg in that if we have no more 1075 // actual argument registers to use we will fall back on using whatever 1076 // CallTempReg* don't overlap the argument registers, and only fail once those 1077 // run out too. 1078 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs, 1079 uint32_t usedFloatArgs, Register* out) { 1080 if (usedIntArgs >= NumCallTempNonArgRegs) { 1081 return false; 1082 } 1083 *out = CallTempNonArgRegs[usedIntArgs]; 1084 return true; 1085 } 1086 1087 } // namespace jit 1088 } // namespace js 1089 1090 #endif /* jit_x86_Assembler_x86_h */