MacroAssembler-arm64.h (78833B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_arm64_MacroAssembler_arm64_h 8 #define jit_arm64_MacroAssembler_arm64_h 9 10 #include "jit/arm64/Assembler-arm64.h" 11 #include "jit/arm64/vixl/Debugger-vixl.h" 12 #include "jit/arm64/vixl/MacroAssembler-vixl.h" 13 #include "jit/AtomicOp.h" 14 #include "jit/MoveResolver.h" 15 #include "vm/BigIntType.h" // JS::BigInt 16 #include "wasm/WasmBuiltins.h" 17 18 #ifdef _M_ARM64 19 # ifdef move32 20 # undef move32 21 # endif 22 # ifdef move64 23 # undef move64 24 # endif 25 #endif 26 27 namespace js { 28 namespace jit { 29 30 // Import VIXL operands directly into the jit namespace for shared code. 31 using vixl::MemOperand; 32 using vixl::Operand; 33 34 struct ImmShiftedTag : public ImmWord { 35 explicit ImmShiftedTag(JSValueType type) 36 : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) { 37 } 38 }; 39 40 struct ImmTag : public Imm32 { 41 explicit ImmTag(JSValueTag tag) : Imm32(tag) {} 42 }; 43 44 class ScratchTagScope; 45 46 class MacroAssemblerCompat : public vixl::MacroAssembler { 47 public: 48 using Condition = vixl::Condition; 49 50 private: 51 // Perform a downcast. Should be removed by Bug 996602. 52 js::jit::MacroAssembler& asMasm(); 53 const js::jit::MacroAssembler& asMasm() const; 54 55 public: 56 // Restrict to only VIXL-internal functions. 57 vixl::MacroAssembler& asVIXL(); 58 const MacroAssembler& asVIXL() const; 59 60 protected: 61 bool enoughMemory_; 62 63 MacroAssemblerCompat() : vixl::MacroAssembler(), enoughMemory_(true) {} 64 65 protected: 66 MoveResolver moveResolver_; 67 68 public: 69 bool oom() const { return Assembler::oom() || !enoughMemory_; } 70 static ARMRegister toARMRegister(RegisterOrSP r, size_t size) { 71 if (IsHiddenSP(r)) { 72 MOZ_ASSERT(size == 64); 73 return sp; 74 } 75 return ARMRegister(AsRegister(r), size); 76 } 77 static MemOperand toMemOperand(const Address& a) { 78 return MemOperand(toARMRegister(a.base, 64), a.offset); 79 } 80 FaultingCodeOffset doBaseIndex(const vixl::CPURegister& rt, 81 const BaseIndex& addr, vixl::LoadStoreOp op) { 82 const ARMRegister base = toARMRegister(addr.base, 64); 83 const ARMRegister index = ARMRegister(addr.index, 64); 84 const unsigned scale = addr.scale; 85 86 if (!addr.offset && 87 (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) { 88 return LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op); 89 } 90 91 vixl::UseScratchRegisterScope temps(this); 92 ARMRegister scratch64 = temps.AcquireX(); 93 MOZ_ASSERT(!scratch64.Is(rt)); 94 MOZ_ASSERT(!scratch64.Is(base)); 95 MOZ_ASSERT(!scratch64.Is(index)); 96 97 Add(scratch64, base, Operand(index, vixl::LSL, scale)); 98 return LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op); 99 } 100 void push(FloatRegister f) { 101 MOZ_ASSERT(f.isDouble() || f.isSingle(), "simd128 is not supported"); 102 // We push the entire Dx register even when storing a Sx. 103 vixl::MacroAssembler::Push(ARMFPRegister(f, 64)); 104 } 105 void push(ARMFPRegister f) { vixl::MacroAssembler::Push(f); } 106 void push(Imm32 imm) { 107 if (imm.value == 0) { 108 vixl::MacroAssembler::Push(vixl::xzr); 109 } else { 110 vixl::UseScratchRegisterScope temps(this); 111 const ARMRegister scratch64 = temps.AcquireX(); 112 move32(imm, scratch64.asUnsized()); 113 vixl::MacroAssembler::Push(scratch64); 114 } 115 } 116 void push(ImmWord imm) { 117 if (imm.value == 0) { 118 vixl::MacroAssembler::Push(vixl::xzr); 119 } else { 120 vixl::UseScratchRegisterScope temps(this); 121 const ARMRegister scratch64 = temps.AcquireX(); 122 Mov(scratch64, imm.value); 123 vixl::MacroAssembler::Push(scratch64); 124 } 125 } 126 void push(ImmPtr imm) { 127 if (imm.value == nullptr) { 128 vixl::MacroAssembler::Push(vixl::xzr); 129 } else { 130 vixl::UseScratchRegisterScope temps(this); 131 const ARMRegister scratch64 = temps.AcquireX(); 132 movePtr(imm, scratch64.asUnsized()); 133 vixl::MacroAssembler::Push(scratch64); 134 } 135 } 136 void push(ImmGCPtr imm) { 137 if (imm.value == nullptr) { 138 vixl::MacroAssembler::Push(vixl::xzr); 139 } else { 140 vixl::UseScratchRegisterScope temps(this); 141 const ARMRegister scratch64 = temps.AcquireX(); 142 movePtr(imm, scratch64.asUnsized()); 143 vixl::MacroAssembler::Push(scratch64); 144 } 145 } 146 void push(ARMRegister reg) { vixl::MacroAssembler::Push(reg); } 147 void push(Address a) { 148 vixl::UseScratchRegisterScope temps(this); 149 const ARMRegister scratch64 = temps.AcquireX(); 150 MOZ_ASSERT(a.base != scratch64.asUnsized()); 151 loadPtr(a, scratch64.asUnsized()); 152 vixl::MacroAssembler::Push(scratch64); 153 } 154 155 // Push registers. 156 void push(Register reg) { vixl::MacroAssembler::Push(ARMRegister(reg, 64)); } 157 void push(RegisterOrSP reg) { 158 if (IsHiddenSP(reg)) { 159 vixl::MacroAssembler::Push(sp); 160 } 161 vixl::MacroAssembler::Push(toARMRegister(reg, 64)); 162 } 163 void push(Register r0, Register r1) { 164 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64)); 165 } 166 void push(Register r0, Register r1, Register r2) { 167 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), 168 ARMRegister(r2, 64)); 169 } 170 void push(Register r0, Register r1, Register r2, Register r3) { 171 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), 172 ARMRegister(r2, 64), ARMRegister(r3, 64)); 173 } 174 void push(Register r0, ARMRegister r1) { 175 vixl::MacroAssembler::Push(ARMRegister(r0, 64), r1); 176 } 177 void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, 178 ARMFPRegister r3) { 179 vixl::MacroAssembler::Push(r0, r1, r2, r3); 180 } 181 182 // Pop registers. 183 void pop(Register reg) { vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); } 184 void pop(Register r0, Register r1) { 185 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64)); 186 } 187 void pop(Register r0, Register r1, Register r2) { 188 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), 189 ARMRegister(r2, 64)); 190 } 191 void pop(Register r0, Register r1, Register r2, Register r3) { 192 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), 193 ARMRegister(r2, 64), ARMRegister(r3, 64)); 194 } 195 void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, 196 ARMFPRegister r3) { 197 vixl::MacroAssembler::Pop(r0, r1, r2, r3); 198 } 199 void pop(ARMRegister r0, Register r1) { 200 vixl::MacroAssembler::Pop(r0, ARMRegister(r1, 64)); 201 } 202 203 void pop(const ValueOperand& v) { pop(v.valueReg()); } 204 void pop(const FloatRegister& f) { 205 MOZ_ASSERT(f.isDouble() || f.isSingle(), "simd128 is not supported"); 206 // We pop the entire Dx register even when storing a Sx. 207 vixl::MacroAssembler::Pop(ARMFPRegister(f, 64)); 208 } 209 210 // Update sp with the value of the current active stack pointer, if necessary. 211 void syncStackPtr() { 212 if (!GetStackPointer64().Is(vixl::sp)) { 213 Mov(vixl::sp, GetStackPointer64()); 214 } 215 } 216 void initPseudoStackPtr() { 217 if (!GetStackPointer64().Is(vixl::sp)) { 218 Mov(GetStackPointer64(), vixl::sp); 219 } 220 } 221 // In debug builds only, cause a trap if PSP is active and PSP != SP 222 void assertStackPtrsSynced(uint32_t id) { 223 #ifdef DEBUG 224 // The add and sub instructions below will only take a 12-bit immediate. 225 MOZ_ASSERT(id <= 0xFFF); 226 if (!GetStackPointer64().Is(vixl::sp)) { 227 Label ok; 228 // Add a marker, so we can figure out who requested the check when 229 // inspecting the generated code. Note, a more concise way to encode 230 // the marker would be to use it as an immediate for the `brk` 231 // instruction as generated by `Unreachable()`, and removing the add/sub. 232 Add(GetStackPointer64(), GetStackPointer64(), Operand(id)); 233 Sub(GetStackPointer64(), GetStackPointer64(), Operand(id)); 234 Cmp(vixl::sp, GetStackPointer64()); 235 B(Equal, &ok); 236 Unreachable(); 237 bind(&ok); 238 } 239 #endif 240 } 241 // In debug builds only, add a marker that doesn't change the machine's 242 // state. Note these markers are x16-based, as opposed to the x28-based 243 // ones made by `assertStackPtrsSynced`. 244 void addMarker(uint32_t id) { 245 #ifdef DEBUG 246 // Only 12 bits of immediate are allowed. 247 MOZ_ASSERT(id <= 0xFFF); 248 ARMRegister x16 = ARMRegister(r16, 64); 249 Add(x16, x16, Operand(id)); 250 Sub(x16, x16, Operand(id)); 251 #endif 252 } 253 254 void storeValue(ValueOperand val, const Address& dest) { 255 storePtr(val.valueReg(), dest); 256 } 257 258 template <typename T> 259 void storeValue(JSValueType type, Register reg, const T& dest) { 260 vixl::UseScratchRegisterScope temps(this); 261 const Register scratch = temps.AcquireX().asUnsized(); 262 MOZ_ASSERT(scratch != reg); 263 boxValue(type, reg, scratch); 264 storeValue(ValueOperand(scratch), dest); 265 } 266 template <typename T> 267 void storeValue(const Value& val, const T& dest) { 268 vixl::UseScratchRegisterScope temps(this); 269 const Register scratch = temps.AcquireX().asUnsized(); 270 moveValue(val, ValueOperand(scratch)); 271 storeValue(ValueOperand(scratch), dest); 272 } 273 void storeValue(ValueOperand val, BaseIndex dest) { 274 storePtr(val.valueReg(), dest); 275 } 276 void storeValue(const Address& src, const Address& dest, Register temp) { 277 loadPtr(src, temp); 278 storePtr(temp, dest); 279 } 280 281 void storePrivateValue(Register src, const Address& dest) { 282 storePtr(src, dest); 283 } 284 void storePrivateValue(ImmGCPtr imm, const Address& dest) { 285 storePtr(imm, dest); 286 } 287 288 void loadValue(Address src, Register val) { 289 Ldr(ARMRegister(val, 64), MemOperand(src)); 290 } 291 void loadValue(Address src, ValueOperand val) { 292 Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src)); 293 } 294 void loadValue(const BaseIndex& src, ValueOperand val) { 295 doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x); 296 } 297 void loadUnalignedValue(const Address& src, ValueOperand dest) { 298 loadValue(src, dest); 299 } 300 void tagValue(JSValueType type, Register payload, ValueOperand dest); 301 void pushValue(ValueOperand val) { 302 vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64)); 303 } 304 void popValue(ValueOperand val) { 305 vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64)); 306 // SP may be < PSP now (that's OK). 307 // eg testcase: tests/backup-point-bug1315634.js 308 } 309 void pushValue(const Value& val) { 310 vixl::UseScratchRegisterScope temps(this); 311 const Register scratch = temps.AcquireX().asUnsized(); 312 if (val.isGCThing()) { 313 BufferOffset load = 314 movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), scratch); 315 writeDataRelocation(val, load); 316 push(scratch); 317 } else { 318 moveValue(val, scratch); 319 push(scratch); 320 } 321 } 322 void pushValue(JSValueType type, Register reg) { 323 vixl::UseScratchRegisterScope temps(this); 324 const Register scratch = temps.AcquireX().asUnsized(); 325 MOZ_ASSERT(scratch != reg); 326 boxValue(type, reg, scratch); 327 push(scratch); 328 } 329 void pushValue(const Address& addr) { 330 vixl::UseScratchRegisterScope temps(this); 331 const Register scratch = temps.AcquireX().asUnsized(); 332 MOZ_ASSERT(scratch != addr.base); 333 loadValue(addr, scratch); 334 push(scratch); 335 } 336 void pushValue(const BaseIndex& addr, Register scratch) { 337 loadValue(addr, ValueOperand(scratch)); 338 pushValue(ValueOperand(scratch)); 339 } 340 void moveValue(const Value& val, Register dest) { 341 if (val.isGCThing()) { 342 BufferOffset load = 343 movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), dest); 344 writeDataRelocation(val, load); 345 } else { 346 movePtr(ImmWord(val.asRawBits()), dest); 347 } 348 } 349 void moveValue(const Value& src, const ValueOperand& dest) { 350 moveValue(src, dest.valueReg()); 351 } 352 353 CodeOffset pushWithPatch(ImmWord imm) { 354 vixl::UseScratchRegisterScope temps(this); 355 const Register scratch = temps.AcquireX().asUnsized(); 356 CodeOffset label = movWithPatch(imm, scratch); 357 push(scratch); 358 return label; 359 } 360 361 CodeOffset movWithPatch(ImmWord imm, Register dest) { 362 BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value); 363 return CodeOffset(off.getOffset()); 364 } 365 CodeOffset movWithPatch(ImmPtr imm, Register dest) { 366 BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value)); 367 return CodeOffset(off.getOffset()); 368 } 369 370 void boxValue(JSValueType type, Register src, Register dest); 371 void boxValue(Register type, Register src, Register dest); 372 373 void splitSignExtTag(Register src, Register dest) { 374 sbfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, 375 (64 - JSVAL_TAG_SHIFT)); 376 } 377 [[nodiscard]] Register extractTag(const Address& address, Register scratch) { 378 loadPtr(address, scratch); 379 splitSignExtTag(scratch, scratch); 380 return scratch; 381 } 382 [[nodiscard]] Register extractTag(const ValueOperand& value, 383 Register scratch) { 384 splitSignExtTag(value.valueReg(), scratch); 385 return scratch; 386 } 387 [[nodiscard]] Register extractObject(const Address& address, 388 Register scratch) { 389 loadPtr(address, scratch); 390 unboxObject(scratch, scratch); 391 return scratch; 392 } 393 [[nodiscard]] Register extractObject(const ValueOperand& value, 394 Register scratch) { 395 unboxObject(value, scratch); 396 return scratch; 397 } 398 [[nodiscard]] Register extractSymbol(const ValueOperand& value, 399 Register scratch) { 400 unboxSymbol(value, scratch); 401 return scratch; 402 } 403 [[nodiscard]] Register extractInt32(const ValueOperand& value, 404 Register scratch) { 405 unboxInt32(value, scratch); 406 return scratch; 407 } 408 [[nodiscard]] Register extractBoolean(const ValueOperand& value, 409 Register scratch) { 410 unboxBoolean(value, scratch); 411 return scratch; 412 } 413 414 void emitSet(Condition cond, Register dest) { 415 Cset(ARMRegister(dest, 64), cond); 416 } 417 418 void testNullSet(Condition cond, const ValueOperand& value, Register dest) { 419 cond = testNull(cond, value); 420 emitSet(cond, dest); 421 } 422 void testObjectSet(Condition cond, const ValueOperand& value, Register dest) { 423 cond = testObject(cond, value); 424 emitSet(cond, dest); 425 } 426 void testUndefinedSet(Condition cond, const ValueOperand& value, 427 Register dest) { 428 cond = testUndefined(cond, value); 429 emitSet(cond, dest); 430 } 431 432 void convertBoolToInt32(Register source, Register dest) { 433 Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64)); 434 } 435 436 void convertInt32ToDouble(Register src, FloatRegister dest) { 437 Scvtf(ARMFPRegister(dest, 64), 438 ARMRegister(src, 32)); // Uses FPCR rounding mode. 439 } 440 void convertInt32ToDouble(const Address& src, FloatRegister dest) { 441 vixl::UseScratchRegisterScope temps(this); 442 const Register scratch = temps.AcquireX().asUnsized(); 443 MOZ_ASSERT(scratch != src.base); 444 load32(src, scratch); 445 convertInt32ToDouble(scratch, dest); 446 } 447 void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) { 448 vixl::UseScratchRegisterScope temps(this); 449 const Register scratch = temps.AcquireX().asUnsized(); 450 MOZ_ASSERT(scratch != src.base); 451 MOZ_ASSERT(scratch != src.index); 452 load32(src, scratch); 453 convertInt32ToDouble(scratch, dest); 454 } 455 456 void convertInt32ToFloat32(Register src, FloatRegister dest) { 457 Scvtf(ARMFPRegister(dest, 32), 458 ARMRegister(src, 32)); // Uses FPCR rounding mode. 459 } 460 void convertInt32ToFloat32(const Address& src, FloatRegister dest) { 461 vixl::UseScratchRegisterScope temps(this); 462 const Register scratch = temps.AcquireX().asUnsized(); 463 MOZ_ASSERT(scratch != src.base); 464 load32(src, scratch); 465 convertInt32ToFloat32(scratch, dest); 466 } 467 468 void convertUInt32ToDouble(Register src, FloatRegister dest) { 469 Ucvtf(ARMFPRegister(dest, 64), 470 ARMRegister(src, 32)); // Uses FPCR rounding mode. 471 } 472 void convertUInt32ToDouble(const Address& src, FloatRegister dest) { 473 vixl::UseScratchRegisterScope temps(this); 474 const Register scratch = temps.AcquireX().asUnsized(); 475 MOZ_ASSERT(scratch != src.base); 476 load32(src, scratch); 477 convertUInt32ToDouble(scratch, dest); 478 } 479 480 void convertUInt32ToFloat32(Register src, FloatRegister dest) { 481 Ucvtf(ARMFPRegister(dest, 32), 482 ARMRegister(src, 32)); // Uses FPCR rounding mode. 483 } 484 void convertUInt32ToFloat32(const Address& src, FloatRegister dest) { 485 vixl::UseScratchRegisterScope temps(this); 486 const Register scratch = temps.AcquireX().asUnsized(); 487 MOZ_ASSERT(scratch != src.base); 488 load32(src, scratch); 489 convertUInt32ToFloat32(scratch, dest); 490 } 491 492 void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) { 493 Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32)); 494 } 495 void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) { 496 Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64)); 497 } 498 499 void convertDoubleToFloat16(FloatRegister src, FloatRegister dest) { 500 Fcvt(ARMFPRegister(dest, 16), ARMFPRegister(src, 64)); 501 } 502 void convertFloat16ToDouble(FloatRegister src, FloatRegister dest) { 503 Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 16)); 504 } 505 void convertFloat32ToFloat16(FloatRegister src, FloatRegister dest) { 506 Fcvt(ARMFPRegister(dest, 16), ARMFPRegister(src, 32)); 507 } 508 void convertFloat16ToFloat32(FloatRegister src, FloatRegister dest) { 509 Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 16)); 510 } 511 void convertInt32ToFloat16(Register src, FloatRegister dest) { 512 // Direct "32-bit to half-precision" move requires (FEAT_FP16), so we 513 // instead use a "32-bit to single-precision" move. 514 convertInt32ToFloat32(src, dest); 515 convertFloat32ToFloat16(dest, dest); 516 } 517 518 using vixl::MacroAssembler::B; 519 520 bool hasFjcvtzs() const { 521 return CPUHas(vixl::CPUFeatures::kFP, vixl::CPUFeatures::kJSCVT); 522 } 523 524 void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, 525 bool negativeZeroCheck = true) { 526 ARMFPRegister fsrc64(src, 64); 527 ARMRegister dest32(dest, 32); 528 529 // ARMv8.3 chips support the FJCVTZS instruction, which handles exactly this 530 // logic. 531 if (hasFjcvtzs()) { 532 // Convert double to integer, rounding toward zero. 533 // The Z-flag is set iff the conversion is exact. -0 unsets the Z-flag. 534 Fjcvtzs(dest32, fsrc64); 535 536 if (negativeZeroCheck) { 537 B(fail, Assembler::NonZero); 538 } else { 539 Label done; 540 B(&done, Assembler::Zero); // If conversion was exact, go to end. 541 542 // The conversion was inexact, but the caller intends to allow -0. 543 544 // Compare fsrc64 to 0. 545 // If fsrc64 == 0 and FJCVTZS conversion was inexact, then fsrc64 is -0. 546 Fcmp(fsrc64, 0.0); 547 B(fail, Assembler::NotEqual); // Pass through -0; fail otherwise. 548 549 bind(&done); 550 } 551 } else { 552 // Older processors use a significantly slower path. 553 ARMRegister dest64(dest, 64); 554 555 vixl::UseScratchRegisterScope temps(this); 556 const ARMFPRegister scratch64 = temps.AcquireD(); 557 MOZ_ASSERT(!scratch64.Is(fsrc64)); 558 559 Fcvtzs(dest32, fsrc64); // Convert, rounding toward zero. 560 Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode. 561 Fcmp(scratch64, fsrc64); 562 B(fail, Assembler::NotEqual); 563 564 if (negativeZeroCheck) { 565 Label nonzero; 566 Cbnz(dest32, &nonzero); 567 // dest32 is 0, fail if fsrc64 is negative. 568 Fmov(dest64, fsrc64); 569 Cmp(dest64, xzr); 570 B(fail, Assembler::Signed); 571 Mov(dest64, xzr); 572 bind(&nonzero); 573 } 574 } 575 } 576 void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, 577 bool negativeZeroCheck = true) { 578 vixl::UseScratchRegisterScope temps(this); 579 const ARMFPRegister scratch32 = temps.AcquireS(); 580 581 ARMFPRegister fsrc(src, 32); 582 ARMRegister dest32(dest, 32); 583 ARMRegister dest64(dest, 64); 584 585 MOZ_ASSERT(!scratch32.Is(fsrc)); 586 587 Fcvtzs(dest64, fsrc); // Convert, rounding toward zero. 588 Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode. 589 Fcmp(scratch32, fsrc); 590 B(fail, Assembler::NotEqual); 591 592 if (negativeZeroCheck) { 593 Label nonzero; 594 Cbnz(dest32, &nonzero); 595 // dest32 is 0, fail if fsrc64 is negative. 596 Fmov(dest32, fsrc); 597 Cmp(dest32, wzr); 598 B(fail, Assembler::Signed); 599 Mov(dest32, wzr); 600 bind(&nonzero); 601 } 602 Uxtw(dest64, dest64); 603 } 604 605 void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail, 606 bool negativeZeroCheck = true) { 607 ARMFPRegister fsrc64(src, 64); 608 ARMRegister dest64(dest, 64); 609 610 vixl::UseScratchRegisterScope temps(this); 611 const ARMFPRegister scratch64 = temps.AcquireD(); 612 MOZ_ASSERT(!scratch64.Is(fsrc64)); 613 614 // Note: we can't use the FJCVTZS instruction here because that only works 615 // for 32-bit values. 616 617 Fcvtzs(dest64, fsrc64); // Convert, rounding toward zero. 618 Scvtf(scratch64, dest64); // Convert back, using FPCR rounding mode. 619 Fcmp(scratch64, fsrc64); 620 B(fail, Assembler::NotEqual); 621 622 if (negativeZeroCheck) { 623 Label nonzero; 624 Cbnz(dest64, &nonzero); 625 // dest64 is 0, fail if fsrc64 is negative. 626 Fmov(dest64, fsrc64); 627 Cmp(dest64, xzr); 628 B(fail, Assembler::Signed); 629 Mov(dest64, xzr); 630 bind(&nonzero); 631 } 632 } 633 634 void truncateFloat32ModUint32(FloatRegister src, Register dest) { 635 vixl::UseScratchRegisterScope temps(this); 636 const ARMRegister scratch64 = temps.AcquireX(); 637 638 ARMFPRegister src32(src, 32); 639 ARMRegister dest64(dest, 64); 640 641 MOZ_ASSERT(!scratch64.Is(dest64)); 642 643 // Convert scalar to signed 64-bit fixed-point, rounding toward zero. 644 // In the case of overflow, the output is saturated. 645 // In the case of NaN and -0, the output is zero. 646 Fcvtzs(dest64, src32); 647 648 // Zero if the result is saturated, i.e. it's either INT64_MIN or INT64_MAX. 649 Add(scratch64, dest64, Operand(0x7fff'ffff'ffff'ffff)); 650 Cmn(scratch64, 3); 651 Csel(dest64, dest64, vixl::xzr, Assembler::BelowOrEqual); 652 653 // Clear upper 32 bits. 654 Uxtw(dest64, dest64); 655 } 656 657 void jump(Label* label) { B(label); } 658 void jump(JitCode* code) { branch(code); } 659 void jump(ImmPtr ptr) { 660 // It is unclear why this sync is necessary: 661 // * PSP and SP have been observed to be different in testcase 662 // tests/asm.js/testBug1046688.js. 663 // * Removing the sync causes no failures in all of jit-tests. 664 // 665 // Also see branch(JitCode*) below. This version of jump() is called only 666 // from jump(TrampolinePtr) which is called on various very slow paths, 667 // probably only in JS. 668 syncStackPtr(); 669 BufferOffset loc = 670 b(-1, 671 LabelDoc()); // The jump target will be patched by executableCopy(). 672 addPendingJump(loc, ptr, RelocationKind::HARDCODED); 673 } 674 void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); } 675 void jump(Register reg) { Br(ARMRegister(reg, 64)); } 676 void jump(const Address& addr) { 677 vixl::UseScratchRegisterScope temps(this); 678 const auto scratch = temps.AcquireX(); 679 MOZ_ASSERT(addr.base != scratch.asUnsized()); 680 loadPtr(addr, scratch.asUnsized()); 681 br(scratch); 682 } 683 684 void align(int alignment) { armbuffer_.align(alignment); } 685 686 void haltingAlign(int alignment) { 687 armbuffer_.align(alignment, vixl::HLT | ImmException(0xBAAD)); 688 } 689 void nopAlign(int alignment) { armbuffer_.align(alignment); } 690 691 void movePtr(Register src, Register dest) { 692 Mov(ARMRegister(dest, 64), ARMRegister(src, 64)); 693 } 694 void movePtr(ImmWord imm, Register dest) { 695 Mov(ARMRegister(dest, 64), int64_t(imm.value)); 696 } 697 void movePtr(ImmPtr imm, Register dest) { 698 Mov(ARMRegister(dest, 64), int64_t(imm.value)); 699 } 700 void movePtr(wasm::SymbolicAddress imm, Register dest) { 701 BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest); 702 append(wasm::SymbolicAccess(CodeOffset(off.getOffset()), imm)); 703 } 704 void movePtr(ImmGCPtr imm, Register dest) { 705 BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest); 706 writeDataRelocation(imm, load); 707 } 708 709 void mov(ImmWord imm, Register dest) { movePtr(imm, dest); } 710 void mov(ImmPtr imm, Register dest) { movePtr(imm, dest); } 711 void mov(wasm::SymbolicAddress imm, Register dest) { movePtr(imm, dest); } 712 void mov(Register src, Register dest) { movePtr(src, dest); } 713 void mov(CodeLabel* label, Register dest); 714 715 void move32(Imm32 imm, Register dest) { 716 Mov(ARMRegister(dest, 32), (int64_t)imm.value); 717 } 718 void move32(Register src, Register dest) { 719 Mov(ARMRegister(dest, 32), ARMRegister(src, 32)); 720 } 721 722 // Move a pointer using a literal pool, so that the pointer 723 // may be easily patched or traced. 724 // Returns the BufferOffset of the load instruction emitted. 725 BufferOffset movePatchablePtr(ImmWord ptr, Register dest); 726 BufferOffset movePatchablePtr(ImmPtr ptr, Register dest); 727 728 void loadPtr(wasm::SymbolicAddress address, Register dest) { 729 vixl::UseScratchRegisterScope temps(this); 730 const ARMRegister scratch = temps.AcquireX(); 731 movePtr(address, scratch.asUnsized()); 732 Ldr(ARMRegister(dest, 64), MemOperand(scratch)); 733 } 734 void loadPtr(AbsoluteAddress address, Register dest) { 735 vixl::UseScratchRegisterScope temps(this); 736 const ARMRegister scratch = temps.AcquireX(); 737 movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized()); 738 Ldr(ARMRegister(dest, 64), MemOperand(scratch)); 739 } 740 FaultingCodeOffset loadPtr(const Address& address, Register dest) { 741 return Ldr(ARMRegister(dest, 64), MemOperand(address)); 742 } 743 FaultingCodeOffset loadPtr(const BaseIndex& src, Register dest) { 744 ARMRegister base = toARMRegister(src.base, 64); 745 uint32_t scale = Imm32::ShiftOf(src.scale).value; 746 ARMRegister dest64(dest, 64); 747 ARMRegister index64(src.index, 64); 748 749 if (src.offset) { 750 vixl::UseScratchRegisterScope temps(this); 751 const ARMRegister scratch = temps.AcquireX(); 752 MOZ_ASSERT(!scratch.Is(base)); 753 MOZ_ASSERT(!scratch.Is(dest64)); 754 MOZ_ASSERT(!scratch.Is(index64)); 755 756 Add(scratch, base, Operand(int64_t(src.offset))); 757 return Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale)); 758 } 759 760 return Ldr(dest64, MemOperand(base, index64, vixl::LSL, scale)); 761 } 762 void loadPrivate(const Address& src, Register dest); 763 764 FaultingCodeOffset store8(Register src, const Address& address) { 765 return Strb(ARMRegister(src, 32), toMemOperand(address)); 766 } 767 void store8(Imm32 imm, const Address& address) { 768 vixl::UseScratchRegisterScope temps(this); 769 const ARMRegister scratch32 = temps.AcquireW(); 770 MOZ_ASSERT(scratch32.asUnsized() != address.base); 771 move32(imm, scratch32.asUnsized()); 772 Strb(scratch32, toMemOperand(address)); 773 } 774 FaultingCodeOffset store8(Register src, const BaseIndex& address) { 775 return doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w); 776 } 777 void store8(Imm32 imm, const BaseIndex& address) { 778 vixl::UseScratchRegisterScope temps(this); 779 const ARMRegister scratch32 = temps.AcquireW(); 780 MOZ_ASSERT(scratch32.asUnsized() != address.base); 781 MOZ_ASSERT(scratch32.asUnsized() != address.index); 782 Mov(scratch32, Operand(imm.value)); 783 doBaseIndex(scratch32, address, vixl::STRB_w); 784 } 785 786 FaultingCodeOffset store16(Register src, const Address& address) { 787 return Strh(ARMRegister(src, 32), toMemOperand(address)); 788 } 789 void store16(Imm32 imm, const Address& address) { 790 vixl::UseScratchRegisterScope temps(this); 791 const ARMRegister scratch32 = temps.AcquireW(); 792 MOZ_ASSERT(scratch32.asUnsized() != address.base); 793 move32(imm, scratch32.asUnsized()); 794 Strh(scratch32, toMemOperand(address)); 795 } 796 FaultingCodeOffset store16(Register src, const BaseIndex& address) { 797 return doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w); 798 } 799 void store16(Imm32 imm, const BaseIndex& address) { 800 vixl::UseScratchRegisterScope temps(this); 801 const ARMRegister scratch32 = temps.AcquireW(); 802 MOZ_ASSERT(scratch32.asUnsized() != address.base); 803 MOZ_ASSERT(scratch32.asUnsized() != address.index); 804 Mov(scratch32, Operand(imm.value)); 805 doBaseIndex(scratch32, address, vixl::STRH_w); 806 } 807 template <typename S, typename T> 808 void store16Unaligned(const S& src, const T& dest) { 809 store16(src, dest); 810 } 811 812 void storePtr(ImmWord imm, const Address& address) { 813 vixl::UseScratchRegisterScope temps(this); 814 const Register scratch = temps.AcquireX().asUnsized(); 815 MOZ_ASSERT(scratch != address.base); 816 movePtr(imm, scratch); 817 storePtr(scratch, address); 818 } 819 void storePtr(ImmPtr imm, const Address& address) { 820 vixl::UseScratchRegisterScope temps(this); 821 const ARMRegister scratch64 = temps.AcquireX(); 822 MOZ_ASSERT(scratch64.asUnsized() != address.base); 823 Mov(scratch64, uint64_t(imm.value)); 824 Str(scratch64, toMemOperand(address)); 825 } 826 void storePtr(ImmGCPtr imm, const Address& address) { 827 vixl::UseScratchRegisterScope temps(this); 828 const Register scratch = temps.AcquireX().asUnsized(); 829 MOZ_ASSERT(scratch != address.base); 830 movePtr(imm, scratch); 831 storePtr(scratch, address); 832 } 833 FaultingCodeOffset storePtr(Register src, const Address& address) { 834 return Str(ARMRegister(src, 64), toMemOperand(address)); 835 } 836 837 void storePtr(ImmWord imm, const BaseIndex& address) { 838 vixl::UseScratchRegisterScope temps(this); 839 const ARMRegister scratch64 = temps.AcquireX(); 840 MOZ_ASSERT(scratch64.asUnsized() != address.base); 841 MOZ_ASSERT(scratch64.asUnsized() != address.index); 842 Mov(scratch64, Operand(imm.value)); 843 doBaseIndex(scratch64, address, vixl::STR_x); 844 } 845 void storePtr(ImmGCPtr imm, const BaseIndex& address) { 846 vixl::UseScratchRegisterScope temps(this); 847 const Register scratch = temps.AcquireX().asUnsized(); 848 MOZ_ASSERT(scratch != address.base); 849 MOZ_ASSERT(scratch != address.index); 850 movePtr(imm, scratch); 851 doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x); 852 } 853 FaultingCodeOffset storePtr(Register src, const BaseIndex& address) { 854 return doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x); 855 } 856 857 void storePtr(Register src, AbsoluteAddress address) { 858 vixl::UseScratchRegisterScope temps(this); 859 const ARMRegister scratch64 = temps.AcquireX(); 860 Mov(scratch64, uint64_t(address.addr)); 861 Str(ARMRegister(src, 64), MemOperand(scratch64)); 862 } 863 864 void store32(Register src, AbsoluteAddress address) { 865 vixl::UseScratchRegisterScope temps(this); 866 const ARMRegister scratch64 = temps.AcquireX(); 867 Mov(scratch64, uint64_t(address.addr)); 868 Str(ARMRegister(src, 32), MemOperand(scratch64)); 869 } 870 void store32(Imm32 imm, const Address& address) { 871 vixl::UseScratchRegisterScope temps(this); 872 const ARMRegister scratch32 = temps.AcquireW(); 873 MOZ_ASSERT(scratch32.asUnsized() != address.base); 874 Mov(scratch32, uint64_t(imm.value)); 875 Str(scratch32, toMemOperand(address)); 876 } 877 FaultingCodeOffset store32(Register r, const Address& address) { 878 return Str(ARMRegister(r, 32), toMemOperand(address)); 879 } 880 void store32(Imm32 imm, const BaseIndex& address) { 881 vixl::UseScratchRegisterScope temps(this); 882 const ARMRegister scratch32 = temps.AcquireW(); 883 MOZ_ASSERT(scratch32.asUnsized() != address.base); 884 MOZ_ASSERT(scratch32.asUnsized() != address.index); 885 Mov(scratch32, imm.value); 886 doBaseIndex(scratch32, address, vixl::STR_w); 887 } 888 FaultingCodeOffset store32(Register r, const BaseIndex& address) { 889 return doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w); 890 } 891 892 template <typename S, typename T> 893 void store32Unaligned(const S& src, const T& dest) { 894 store32(src, dest); 895 } 896 897 FaultingCodeOffset store64(Register64 src, Address address) { 898 return storePtr(src.reg, address); 899 } 900 901 FaultingCodeOffset store64(Register64 src, const BaseIndex& address) { 902 return storePtr(src.reg, address); 903 } 904 905 void store64(Imm64 imm, const BaseIndex& address) { 906 storePtr(ImmWord(imm.value), address); 907 } 908 909 void store64(Imm64 imm, const Address& address) { 910 storePtr(ImmWord(imm.value), address); 911 } 912 913 template <typename S, typename T> 914 void store64Unaligned(const S& src, const T& dest) { 915 store64(src, dest); 916 } 917 918 // StackPointer manipulation. 919 inline void addToStackPtr(Register src); 920 inline void addToStackPtr(Imm32 imm); 921 inline void addToStackPtr(const Address& src); 922 inline void addStackPtrTo(Register dest); 923 924 inline void subFromStackPtr(Register src); 925 inline void subFromStackPtr(Imm32 imm); 926 inline void subStackPtrFrom(Register dest); 927 928 inline void andToStackPtr(Imm32 t); 929 930 inline void moveToStackPtr(Register src); 931 inline void moveStackPtrTo(Register dest); 932 933 inline void loadStackPtr(const Address& src); 934 inline void storeStackPtr(const Address& dest); 935 936 inline void loadStackPtrFromPrivateValue(const Address& src); 937 inline void storeStackPtrToPrivateValue(const Address& dest); 938 939 // StackPointer testing functions. 940 inline void branchTestStackPtr(Condition cond, Imm32 rhs, Label* label); 941 inline void branchStackPtr(Condition cond, Register rhs, Label* label); 942 inline void branchStackPtrRhs(Condition cond, Address lhs, Label* label); 943 inline void branchStackPtrRhs(Condition cond, AbsoluteAddress lhs, 944 Label* label); 945 946 void testPtr(Register lhs, Register rhs) { 947 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); 948 } 949 void test32(Register lhs, Register rhs) { 950 Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32))); 951 } 952 void test32(const Address& addr, Imm32 imm) { 953 vixl::UseScratchRegisterScope temps(this); 954 const ARMRegister scratch32 = temps.AcquireW(); 955 MOZ_ASSERT(scratch32.asUnsized() != addr.base); 956 load32(addr, scratch32.asUnsized()); 957 Tst(scratch32, Operand(imm.value)); 958 } 959 void test32(Register lhs, Imm32 rhs) { 960 Tst(ARMRegister(lhs, 32), Operand(rhs.value)); 961 } 962 void cmp32(Register lhs, Imm32 rhs) { 963 Cmp(ARMRegister(lhs, 32), Operand(rhs.value)); 964 } 965 void cmp32(Register a, Register b) { 966 Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32))); 967 } 968 void cmp32(const Address& lhs, Imm32 rhs) { 969 vixl::UseScratchRegisterScope temps(this); 970 const ARMRegister scratch32 = temps.AcquireW(); 971 MOZ_ASSERT(scratch32.asUnsized() != lhs.base); 972 Ldr(scratch32, toMemOperand(lhs)); 973 Cmp(scratch32, Operand(rhs.value)); 974 } 975 void cmp32(const Address& lhs, Register rhs) { 976 vixl::UseScratchRegisterScope temps(this); 977 const ARMRegister scratch32 = temps.AcquireW(); 978 MOZ_ASSERT(scratch32.asUnsized() != lhs.base); 979 MOZ_ASSERT(scratch32.asUnsized() != rhs); 980 Ldr(scratch32, toMemOperand(lhs)); 981 Cmp(scratch32, Operand(ARMRegister(rhs, 32))); 982 } 983 984 void cmn32(Register lhs, Imm32 rhs) { 985 Cmn(ARMRegister(lhs, 32), Operand(rhs.value)); 986 } 987 988 void cmpPtr(Register lhs, Imm32 rhs) { 989 Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); 990 } 991 void cmpPtr(Register lhs, ImmWord rhs) { 992 Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); 993 } 994 void cmpPtr(Register lhs, ImmPtr rhs) { 995 Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value))); 996 } 997 void cmpPtr(Register lhs, Imm64 rhs) { 998 Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value))); 999 } 1000 void cmpPtr(Register lhs, Register rhs) { 1001 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); 1002 } 1003 void cmpPtr(Register lhs, ImmGCPtr rhs) { 1004 vixl::UseScratchRegisterScope temps(this); 1005 const Register scratch = temps.AcquireX().asUnsized(); 1006 MOZ_ASSERT(scratch != lhs); 1007 movePtr(rhs, scratch); 1008 cmpPtr(lhs, scratch); 1009 } 1010 1011 void cmpPtr(const Address& lhs, Register rhs) { 1012 vixl::UseScratchRegisterScope temps(this); 1013 const ARMRegister scratch64 = temps.AcquireX(); 1014 MOZ_ASSERT(scratch64.asUnsized() != lhs.base); 1015 MOZ_ASSERT(scratch64.asUnsized() != rhs); 1016 Ldr(scratch64, toMemOperand(lhs)); 1017 Cmp(scratch64, Operand(ARMRegister(rhs, 64))); 1018 } 1019 void cmpPtr(const Address& lhs, ImmWord rhs) { 1020 vixl::UseScratchRegisterScope temps(this); 1021 const ARMRegister scratch64 = temps.AcquireX(); 1022 MOZ_ASSERT(scratch64.asUnsized() != lhs.base); 1023 Ldr(scratch64, toMemOperand(lhs)); 1024 Cmp(scratch64, Operand(rhs.value)); 1025 } 1026 void cmpPtr(const Address& lhs, ImmPtr rhs) { 1027 vixl::UseScratchRegisterScope temps(this); 1028 const ARMRegister scratch64 = temps.AcquireX(); 1029 MOZ_ASSERT(scratch64.asUnsized() != lhs.base); 1030 Ldr(scratch64, toMemOperand(lhs)); 1031 Cmp(scratch64, Operand(uint64_t(rhs.value))); 1032 } 1033 void cmpPtr(const Address& lhs, ImmGCPtr rhs) { 1034 vixl::UseScratchRegisterScope temps(this); 1035 const Register scratch = temps.AcquireX().asUnsized(); 1036 MOZ_ASSERT(scratch != lhs.base); 1037 loadPtr(lhs, scratch); 1038 cmpPtr(scratch, rhs); 1039 } 1040 1041 FaultingCodeOffset loadDouble(const Address& src, FloatRegister dest) { 1042 return Ldr(ARMFPRegister(dest, 64), MemOperand(src)); 1043 } 1044 FaultingCodeOffset loadDouble(const BaseIndex& src, FloatRegister dest) { 1045 ARMRegister base = toARMRegister(src.base, 64); 1046 ARMRegister index(src.index, 64); 1047 1048 if (src.offset == 0) { 1049 return Ldr(ARMFPRegister(dest, 64), 1050 MemOperand(base, index, vixl::LSL, unsigned(src.scale))); 1051 } 1052 1053 vixl::UseScratchRegisterScope temps(this); 1054 const ARMRegister scratch64 = temps.AcquireX(); 1055 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1056 MOZ_ASSERT(scratch64.asUnsized() != src.index); 1057 1058 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); 1059 return Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset)); 1060 } 1061 1062 FaultingCodeOffset loadFloat32(const Address& addr, FloatRegister dest) { 1063 return Ldr(ARMFPRegister(dest, 32), toMemOperand(addr)); 1064 } 1065 FaultingCodeOffset loadFloat32(const BaseIndex& src, FloatRegister dest) { 1066 ARMRegister base = toARMRegister(src.base, 64); 1067 ARMRegister index(src.index, 64); 1068 if (src.offset == 0) { 1069 return Ldr(ARMFPRegister(dest, 32), 1070 MemOperand(base, index, vixl::LSL, unsigned(src.scale))); 1071 } else { 1072 vixl::UseScratchRegisterScope temps(this); 1073 const ARMRegister scratch64 = temps.AcquireX(); 1074 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1075 MOZ_ASSERT(scratch64.asUnsized() != src.index); 1076 1077 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); 1078 return Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); 1079 } 1080 } 1081 1082 FaultingCodeOffset loadFloat16(const Address& addr, FloatRegister dest, 1083 Register) { 1084 return Ldr(ARMFPRegister(dest, 16), toMemOperand(addr)); 1085 } 1086 1087 FaultingCodeOffset loadFloat16(const BaseIndex& src, FloatRegister dest, 1088 Register) { 1089 ARMRegister base = toARMRegister(src.base, 64); 1090 ARMRegister index(src.index, 64); 1091 if (src.offset == 0) { 1092 return Ldr(ARMFPRegister(dest, 16), 1093 MemOperand(base, index, vixl::LSL, unsigned(src.scale))); 1094 } else { 1095 vixl::UseScratchRegisterScope temps(this); 1096 const ARMRegister scratch64 = temps.AcquireX(); 1097 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1098 MOZ_ASSERT(scratch64.asUnsized() != src.index); 1099 1100 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); 1101 return Ldr(ARMFPRegister(dest, 16), MemOperand(scratch64, src.offset)); 1102 } 1103 } 1104 1105 void moveDouble(FloatRegister src, FloatRegister dest) { 1106 fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); 1107 } 1108 void zeroDouble(FloatRegister reg) { 1109 fmov(ARMFPRegister(reg, 64), vixl::xzr); 1110 } 1111 void zeroFloat32(FloatRegister reg) { 1112 fmov(ARMFPRegister(reg, 32), vixl::wzr); 1113 } 1114 1115 void moveFloat32(FloatRegister src, FloatRegister dest) { 1116 fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); 1117 } 1118 1119 void moveSimd128(FloatRegister src, FloatRegister dest) { 1120 fmov(ARMFPRegister(dest, 128), ARMFPRegister(src, 128)); 1121 } 1122 1123 void splitSignExtTag(const ValueOperand& operand, Register dest) { 1124 splitSignExtTag(operand.valueReg(), dest); 1125 } 1126 void splitSignExtTag(const Address& operand, Register dest) { 1127 loadPtr(operand, dest); 1128 splitSignExtTag(dest, dest); 1129 } 1130 void splitSignExtTag(const BaseIndex& operand, Register dest) { 1131 loadPtr(operand, dest); 1132 splitSignExtTag(dest, dest); 1133 } 1134 1135 // Extracts the tag of a value and places it in tag 1136 inline void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag); 1137 void cmpTag(const ValueOperand& operand, ImmTag tag) { MOZ_CRASH("cmpTag"); } 1138 1139 FaultingCodeOffset load32(const Address& address, Register dest) { 1140 return Ldr(ARMRegister(dest, 32), toMemOperand(address)); 1141 } 1142 FaultingCodeOffset load32(const BaseIndex& src, Register dest) { 1143 return doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w); 1144 } 1145 void load32(AbsoluteAddress address, Register dest) { 1146 vixl::UseScratchRegisterScope temps(this); 1147 const ARMRegister scratch64 = temps.AcquireX(); 1148 movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized()); 1149 ldr(ARMRegister(dest, 32), MemOperand(scratch64)); 1150 } 1151 template <typename S> 1152 void load32Unaligned(const S& src, Register dest) { 1153 load32(src, dest); 1154 } 1155 FaultingCodeOffset load64(const Address& address, Register64 dest) { 1156 return loadPtr(address, dest.reg); 1157 } 1158 FaultingCodeOffset load64(const BaseIndex& address, Register64 dest) { 1159 return loadPtr(address, dest.reg); 1160 } 1161 template <typename S> 1162 void load64Unaligned(const S& src, Register64 dest) { 1163 load64(src, dest); 1164 } 1165 1166 FaultingCodeOffset load8SignExtend(const Address& address, Register dest) { 1167 return Ldrsb(ARMRegister(dest, 32), toMemOperand(address)); 1168 } 1169 FaultingCodeOffset load8SignExtend(const BaseIndex& src, Register dest) { 1170 return doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w); 1171 } 1172 1173 FaultingCodeOffset load8ZeroExtend(const Address& address, Register dest) { 1174 return Ldrb(ARMRegister(dest, 32), toMemOperand(address)); 1175 } 1176 FaultingCodeOffset load8ZeroExtend(const BaseIndex& src, Register dest) { 1177 return doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w); 1178 } 1179 1180 FaultingCodeOffset load16SignExtend(const Address& address, Register dest) { 1181 return Ldrsh(ARMRegister(dest, 32), toMemOperand(address)); 1182 } 1183 FaultingCodeOffset load16SignExtend(const BaseIndex& src, Register dest) { 1184 return doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w); 1185 } 1186 template <typename S> 1187 void load16UnalignedSignExtend(const S& src, Register dest) { 1188 load16SignExtend(src, dest); 1189 } 1190 1191 FaultingCodeOffset load16ZeroExtend(const Address& address, Register dest) { 1192 return Ldrh(ARMRegister(dest, 32), toMemOperand(address)); 1193 } 1194 FaultingCodeOffset load16ZeroExtend(const BaseIndex& src, Register dest) { 1195 return doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w); 1196 } 1197 template <typename S> 1198 void load16UnalignedZeroExtend(const S& src, Register dest) { 1199 load16ZeroExtend(src, dest); 1200 } 1201 1202 void adds32(Register src, Register dest) { 1203 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), 1204 Operand(ARMRegister(src, 32))); 1205 } 1206 void adds32(Imm32 imm, Register dest) { 1207 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); 1208 } 1209 void adds32(Imm32 imm, const Address& dest) { 1210 vixl::UseScratchRegisterScope temps(this); 1211 const ARMRegister scratch32 = temps.AcquireW(); 1212 MOZ_ASSERT(scratch32.asUnsized() != dest.base); 1213 1214 Ldr(scratch32, toMemOperand(dest)); 1215 Adds(scratch32, scratch32, Operand(imm.value)); 1216 Str(scratch32, toMemOperand(dest)); 1217 } 1218 void adds64(Imm32 imm, Register dest) { 1219 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); 1220 } 1221 void adds64(ImmWord imm, Register dest) { 1222 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); 1223 } 1224 void adds64(Register src, Register dest) { 1225 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), 1226 Operand(ARMRegister(src, 64))); 1227 } 1228 1229 void subs32(Imm32 imm, Register dest) { 1230 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); 1231 } 1232 void subs32(Register src, Register dest) { 1233 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), 1234 Operand(ARMRegister(src, 32))); 1235 } 1236 void subs64(Imm32 imm, Register dest) { 1237 Subs(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); 1238 } 1239 void subs64(Register src, Register dest) { 1240 Subs(ARMRegister(dest, 64), ARMRegister(dest, 64), 1241 Operand(ARMRegister(src, 64))); 1242 } 1243 1244 void negs32(Register reg) { 1245 Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32))); 1246 } 1247 void negs64(Register reg) { 1248 Negs(ARMRegister(reg, 64), Operand(ARMRegister(reg, 64))); 1249 } 1250 1251 void minMax32(Register lhs, Register rhs, Register dest, bool isMax); 1252 void minMax32(Register lhs, Imm32 rhs, Register dest, bool isMax); 1253 1254 void minMaxPtr(Register lhs, Register rhs, Register dest, bool isMax); 1255 void minMaxPtr(Register lhs, ImmWord rhs, Register dest, bool isMax); 1256 1257 BufferOffset ret() { 1258 pop(lr); 1259 BufferOffset ret(currentOffset()); 1260 abiret(); 1261 return ret; 1262 } 1263 1264 void retn(Imm32 n) { 1265 vixl::UseScratchRegisterScope temps(this); 1266 const auto scratch = temps.AcquireX(); 1267 // scratch <- [sp]; sp += n; ret scratch 1268 Ldr(scratch, 1269 MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex)); 1270 syncStackPtr(); // SP is always used to transmit the stack between calls. 1271 Ret(scratch); 1272 } 1273 1274 void j(Condition cond, Label* dest) { B(dest, cond); } 1275 1276 void branch(Condition cond, Label* label) { B(label, cond); } 1277 void branch(JitCode* target) { 1278 // It is unclear why this sync is necessary: 1279 // * PSP and SP have been observed to be different in testcase 1280 // tests/async/debugger-reject-after-fulfill.js 1281 // * Removing the sync causes no failures in all of jit-tests. 1282 // 1283 // Also see jump() above. This is used only to implement jump(JitCode*) 1284 // and only for JS, it appears. 1285 syncStackPtr(); 1286 BufferOffset loc = 1287 b(-1, 1288 LabelDoc()); // The jump target will be patched by executableCopy(). 1289 addPendingJump(loc, ImmPtr(target->raw()), RelocationKind::JITCODE); 1290 } 1291 1292 void compareDouble(FloatRegister lhs, FloatRegister rhs) { 1293 Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64)); 1294 } 1295 1296 void compareFloat(FloatRegister lhs, FloatRegister rhs) { 1297 Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32)); 1298 } 1299 1300 void compareSimd128Int(Assembler::Condition cond, ARMFPRegister dest, 1301 ARMFPRegister lhs, ARMFPRegister rhs); 1302 void compareSimd128Float(Assembler::Condition cond, ARMFPRegister dest, 1303 ARMFPRegister lhs, ARMFPRegister rhs); 1304 void rightShiftInt8x16(FloatRegister lhs, Register rhs, FloatRegister dest, 1305 bool isUnsigned); 1306 void rightShiftInt16x8(FloatRegister lhs, Register rhs, FloatRegister dest, 1307 bool isUnsigned); 1308 void rightShiftInt32x4(FloatRegister lhs, Register rhs, FloatRegister dest, 1309 bool isUnsigned); 1310 void rightShiftInt64x2(FloatRegister lhs, Register rhs, FloatRegister dest, 1311 bool isUnsigned); 1312 1313 void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister) { 1314 Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64)); 1315 } 1316 void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { 1317 boxValue(type, src, dest.valueReg()); 1318 } 1319 void boxNonDouble(Register type, Register src, const ValueOperand& dest) { 1320 boxValue(type, src, dest.valueReg()); 1321 } 1322 1323 // Note that the |dest| register here may be ScratchReg, so we shouldn't use 1324 // it. 1325 void unboxInt32(const ValueOperand& src, Register dest) { 1326 move32(src.valueReg(), dest); 1327 } 1328 void unboxInt32(const Address& src, Register dest) { load32(src, dest); } 1329 void unboxInt32(const BaseIndex& src, Register dest) { load32(src, dest); } 1330 1331 template <typename T> 1332 void unboxDouble(const T& src, FloatRegister dest) { 1333 loadDouble(src, dest); 1334 } 1335 void unboxDouble(const ValueOperand& src, FloatRegister dest) { 1336 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64)); 1337 } 1338 1339 void unboxBoolean(const ValueOperand& src, Register dest) { 1340 move32(src.valueReg(), dest); 1341 } 1342 void unboxBoolean(const Address& src, Register dest) { load32(src, dest); } 1343 void unboxBoolean(const BaseIndex& src, Register dest) { load32(src, dest); } 1344 1345 void unboxMagic(const ValueOperand& src, Register dest) { 1346 move32(src.valueReg(), dest); 1347 } 1348 void unboxNonDouble(const ValueOperand& src, Register dest, 1349 JSValueType type) { 1350 unboxNonDouble(src.valueReg(), dest, type); 1351 } 1352 1353 template <typename T> 1354 void unboxNonDouble(T src, Register dest, JSValueType type) { 1355 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE); 1356 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) { 1357 load32(src, dest); 1358 return; 1359 } 1360 loadPtr(src, dest); 1361 unboxNonDouble(dest, dest, type); 1362 } 1363 1364 void unboxNonDouble(Register src, Register dest, JSValueType type) { 1365 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE); 1366 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) { 1367 move32(src, dest); 1368 return; 1369 } 1370 Eor(ARMRegister(dest, 64), ARMRegister(src, 64), 1371 Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type))); 1372 } 1373 1374 void notBoolean(const ValueOperand& val) { 1375 ARMRegister r(val.valueReg(), 64); 1376 eor(r, r, Operand(1)); 1377 } 1378 void unboxObject(const ValueOperand& src, Register dest) { 1379 unboxNonDouble(src.valueReg(), dest, JSVAL_TYPE_OBJECT); 1380 } 1381 void unboxObject(Register src, Register dest) { 1382 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT); 1383 } 1384 void unboxObject(const Address& src, Register dest) { 1385 loadPtr(src, dest); 1386 unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT); 1387 } 1388 void unboxObject(const BaseIndex& src, Register dest) { 1389 doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x); 1390 unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT); 1391 } 1392 1393 // See comment in MacroAssembler-x64.h. 1394 void unboxGCThingForGCBarrier(const Address& src, Register dest) { 1395 loadPtr(src, dest); 1396 And(ARMRegister(dest, 64), ARMRegister(dest, 64), 1397 Operand(JS::detail::ValueGCThingPayloadMask)); 1398 } 1399 void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) { 1400 And(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1401 Operand(JS::detail::ValueGCThingPayloadMask)); 1402 } 1403 1404 void unboxWasmAnyRefGCThingForGCBarrier(const Address& src, Register dest) { 1405 loadPtr(src, dest); 1406 And(ARMRegister(dest, 64), ARMRegister(dest, 64), 1407 Operand(wasm::AnyRef::GCThingMask)); 1408 } 1409 1410 // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base. 1411 void getGCThingValueChunk(Register src, Register dest) { 1412 And(ARMRegister(src, 64), ARMRegister(dest, 64), 1413 Operand(JS::detail::ValueGCThingPayloadChunkMask)); 1414 } 1415 void getGCThingValueChunk(const Address& src, Register dest) { 1416 loadPtr(src, dest); 1417 And(ARMRegister(dest, 64), ARMRegister(dest, 64), 1418 Operand(JS::detail::ValueGCThingPayloadChunkMask)); 1419 } 1420 void getGCThingValueChunk(const ValueOperand& src, Register dest) { 1421 And(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1422 Operand(JS::detail::ValueGCThingPayloadChunkMask)); 1423 } 1424 1425 void getWasmAnyRefGCThingChunk(Register src, Register dest) { 1426 And(ARMRegister(dest, 64), ARMRegister(src, 64), 1427 Operand(wasm::AnyRef::GCThingChunkMask)); 1428 } 1429 1430 inline void unboxValue(const ValueOperand& src, AnyRegister dest, 1431 JSValueType type); 1432 1433 void unboxString(const ValueOperand& operand, Register dest) { 1434 unboxNonDouble(operand, dest, JSVAL_TYPE_STRING); 1435 } 1436 void unboxString(const Address& src, Register dest) { 1437 unboxNonDouble(src, dest, JSVAL_TYPE_STRING); 1438 } 1439 void unboxSymbol(const ValueOperand& operand, Register dest) { 1440 unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL); 1441 } 1442 void unboxSymbol(const Address& src, Register dest) { 1443 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL); 1444 } 1445 void unboxBigInt(const ValueOperand& operand, Register dest) { 1446 unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT); 1447 } 1448 void unboxBigInt(const Address& src, Register dest) { 1449 unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT); 1450 } 1451 1452 void loadConstantDouble(double d, FloatRegister dest) { 1453 ARMFPRegister r(dest, 64); 1454 if (d == 0.0) { 1455 // Clang11 does movi for 0 and movi+fneg for -0, and this seems like a 1456 // good implementation-independent strategy as it avoids any gpr->fpr 1457 // moves or memory traffic. 1458 Movi(r, 0); 1459 if (std::signbit(d)) { 1460 Fneg(r, r); 1461 } 1462 } else { 1463 Fmov(r, d); 1464 } 1465 } 1466 void loadConstantFloat32(float f, FloatRegister dest) { 1467 ARMFPRegister r(dest, 32); 1468 if (f == 0.0) { 1469 // See comments above. There's not a movi variant for a single register, 1470 // so clear the double. 1471 Movi(ARMFPRegister(dest, 64), 0); 1472 if (std::signbit(f)) { 1473 Fneg(r, r); 1474 } 1475 } else { 1476 Fmov(r, f); 1477 } 1478 } 1479 1480 void cmpTag(Register tag, ImmTag ref) { 1481 // As opposed to other architecture, splitTag is replaced by splitSignExtTag 1482 // which extract the tag with a sign extension. The reason being that cmp32 1483 // with a tag value would be too large to fit as a 12 bits immediate value, 1484 // and would require the VIXL macro assembler to add an extra instruction 1485 // and require extra scratch register to load the Tag value. 1486 // 1487 // Instead, we compare with the negative value of the sign extended tag with 1488 // the CMN instruction. The sign extended tag is expected to be a negative 1489 // value. Therefore the negative of the sign extended tag is expected to be 1490 // near 0 and fit on 12 bits. 1491 // 1492 // Ignoring the sign extension, the logic is the following: 1493 // 1494 // CMP32(Reg, Tag) = Reg - Tag 1495 // = Reg + (-Tag) 1496 // = CMN32(Reg, -Tag) 1497 // 1498 // Note: testGCThing, testPrimitive and testNumber which are checking for 1499 // inequalities should use unsigned comparisons (as done by default) in 1500 // order to keep the same relation order after the sign extension, i.e. 1501 // using Above or Below which are based on the carry flag. 1502 uint32_t hiShift = JSVAL_TAG_SHIFT - 32; 1503 int32_t seTag = int32_t(ref.value); 1504 seTag = (seTag << hiShift) >> hiShift; 1505 MOZ_ASSERT(seTag < 0); 1506 int32_t negTag = -seTag; 1507 // Check thest negTag is encoded on a 12 bits immediate value. 1508 MOZ_ASSERT((negTag & ~0xFFF) == 0); 1509 cmn32(tag, Imm32(negTag)); 1510 } 1511 1512 // Register-based tests. 1513 Condition testUndefined(Condition cond, Register tag) { 1514 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1515 cmpTag(tag, ImmTag(JSVAL_TAG_UNDEFINED)); 1516 return cond; 1517 } 1518 Condition testInt32(Condition cond, Register tag) { 1519 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1520 cmpTag(tag, ImmTag(JSVAL_TAG_INT32)); 1521 return cond; 1522 } 1523 Condition testBoolean(Condition cond, Register tag) { 1524 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1525 cmpTag(tag, ImmTag(JSVAL_TAG_BOOLEAN)); 1526 return cond; 1527 } 1528 Condition testNull(Condition cond, Register tag) { 1529 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1530 cmpTag(tag, ImmTag(JSVAL_TAG_NULL)); 1531 return cond; 1532 } 1533 Condition testString(Condition cond, Register tag) { 1534 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1535 cmpTag(tag, ImmTag(JSVAL_TAG_STRING)); 1536 return cond; 1537 } 1538 Condition testSymbol(Condition cond, Register tag) { 1539 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1540 cmpTag(tag, ImmTag(JSVAL_TAG_SYMBOL)); 1541 return cond; 1542 } 1543 Condition testBigInt(Condition cond, Register tag) { 1544 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1545 cmpTag(tag, ImmTag(JSVAL_TAG_BIGINT)); 1546 return cond; 1547 } 1548 Condition testObject(Condition cond, Register tag) { 1549 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1550 cmpTag(tag, ImmTag(JSVAL_TAG_OBJECT)); 1551 return cond; 1552 } 1553 Condition testDouble(Condition cond, Register tag) { 1554 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1555 cmpTag(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE)); 1556 // Requires unsigned comparison due to cmpTag internals. 1557 return (cond == Equal) ? BelowOrEqual : Above; 1558 } 1559 Condition testNumber(Condition cond, Register tag) { 1560 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1561 cmpTag(tag, ImmTag(JS::detail::ValueUpperInclNumberTag)); 1562 // Requires unsigned comparison due to cmpTag internals. 1563 return (cond == Equal) ? BelowOrEqual : Above; 1564 } 1565 Condition testGCThing(Condition cond, Register tag) { 1566 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1567 cmpTag(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag)); 1568 // Requires unsigned comparison due to cmpTag internals. 1569 return (cond == Equal) ? AboveOrEqual : Below; 1570 } 1571 Condition testMagic(Condition cond, Register tag) { 1572 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1573 cmpTag(tag, ImmTag(JSVAL_TAG_MAGIC)); 1574 return cond; 1575 } 1576 Condition testPrimitive(Condition cond, Register tag) { 1577 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1578 cmpTag(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag)); 1579 // Requires unsigned comparison due to cmpTag internals. 1580 return (cond == Equal) ? Below : AboveOrEqual; 1581 } 1582 Condition testError(Condition cond, Register tag) { 1583 return testMagic(cond, tag); 1584 } 1585 1586 // ValueOperand-based tests. 1587 Condition testInt32(Condition cond, const ValueOperand& value) { 1588 // The incoming ValueOperand may use scratch registers. 1589 vixl::UseScratchRegisterScope temps(this); 1590 const Register scratch = temps.AcquireX().asUnsized(); 1591 MOZ_ASSERT(scratch != value.valueReg()); 1592 1593 splitSignExtTag(value, scratch); 1594 return testInt32(cond, scratch); 1595 } 1596 Condition testBoolean(Condition cond, const ValueOperand& value) { 1597 vixl::UseScratchRegisterScope temps(this); 1598 const Register scratch = temps.AcquireX().asUnsized(); 1599 MOZ_ASSERT(value.valueReg() != scratch); 1600 splitSignExtTag(value, scratch); 1601 return testBoolean(cond, scratch); 1602 } 1603 Condition testDouble(Condition cond, const ValueOperand& value) { 1604 vixl::UseScratchRegisterScope temps(this); 1605 const Register scratch = temps.AcquireX().asUnsized(); 1606 MOZ_ASSERT(value.valueReg() != scratch); 1607 splitSignExtTag(value, scratch); 1608 return testDouble(cond, scratch); 1609 } 1610 Condition testNull(Condition cond, const ValueOperand& value) { 1611 vixl::UseScratchRegisterScope temps(this); 1612 const Register scratch = temps.AcquireX().asUnsized(); 1613 MOZ_ASSERT(value.valueReg() != scratch); 1614 splitSignExtTag(value, scratch); 1615 return testNull(cond, scratch); 1616 } 1617 Condition testUndefined(Condition cond, const ValueOperand& value) { 1618 vixl::UseScratchRegisterScope temps(this); 1619 const Register scratch = temps.AcquireX().asUnsized(); 1620 MOZ_ASSERT(value.valueReg() != scratch); 1621 splitSignExtTag(value, scratch); 1622 return testUndefined(cond, scratch); 1623 } 1624 Condition testString(Condition cond, const ValueOperand& value) { 1625 vixl::UseScratchRegisterScope temps(this); 1626 const Register scratch = temps.AcquireX().asUnsized(); 1627 MOZ_ASSERT(value.valueReg() != scratch); 1628 splitSignExtTag(value, scratch); 1629 return testString(cond, scratch); 1630 } 1631 Condition testSymbol(Condition cond, const ValueOperand& value) { 1632 vixl::UseScratchRegisterScope temps(this); 1633 const Register scratch = temps.AcquireX().asUnsized(); 1634 MOZ_ASSERT(value.valueReg() != scratch); 1635 splitSignExtTag(value, scratch); 1636 return testSymbol(cond, scratch); 1637 } 1638 Condition testBigInt(Condition cond, const ValueOperand& value) { 1639 vixl::UseScratchRegisterScope temps(this); 1640 const Register scratch = temps.AcquireX().asUnsized(); 1641 MOZ_ASSERT(value.valueReg() != scratch); 1642 splitSignExtTag(value, scratch); 1643 return testBigInt(cond, scratch); 1644 } 1645 Condition testObject(Condition cond, const ValueOperand& value) { 1646 vixl::UseScratchRegisterScope temps(this); 1647 const Register scratch = temps.AcquireX().asUnsized(); 1648 MOZ_ASSERT(value.valueReg() != scratch); 1649 splitSignExtTag(value, scratch); 1650 return testObject(cond, scratch); 1651 } 1652 Condition testNumber(Condition cond, const ValueOperand& value) { 1653 vixl::UseScratchRegisterScope temps(this); 1654 const Register scratch = temps.AcquireX().asUnsized(); 1655 MOZ_ASSERT(value.valueReg() != scratch); 1656 splitSignExtTag(value, scratch); 1657 return testNumber(cond, scratch); 1658 } 1659 Condition testPrimitive(Condition cond, const ValueOperand& value) { 1660 vixl::UseScratchRegisterScope temps(this); 1661 const Register scratch = temps.AcquireX().asUnsized(); 1662 MOZ_ASSERT(value.valueReg() != scratch); 1663 splitSignExtTag(value, scratch); 1664 return testPrimitive(cond, scratch); 1665 } 1666 Condition testMagic(Condition cond, const ValueOperand& value) { 1667 vixl::UseScratchRegisterScope temps(this); 1668 const Register scratch = temps.AcquireX().asUnsized(); 1669 MOZ_ASSERT(value.valueReg() != scratch); 1670 splitSignExtTag(value, scratch); 1671 return testMagic(cond, scratch); 1672 } 1673 Condition testGCThing(Condition cond, const ValueOperand& value) { 1674 vixl::UseScratchRegisterScope temps(this); 1675 const Register scratch = temps.AcquireX().asUnsized(); 1676 MOZ_ASSERT(value.valueReg() != scratch); 1677 splitSignExtTag(value, scratch); 1678 return testGCThing(cond, scratch); 1679 } 1680 Condition testError(Condition cond, const ValueOperand& value) { 1681 return testMagic(cond, value); 1682 } 1683 1684 // Address-based tests. 1685 Condition testGCThing(Condition cond, const Address& address) { 1686 vixl::UseScratchRegisterScope temps(this); 1687 const Register scratch = temps.AcquireX().asUnsized(); 1688 MOZ_ASSERT(address.base != scratch); 1689 splitSignExtTag(address, scratch); 1690 return testGCThing(cond, scratch); 1691 } 1692 Condition testMagic(Condition cond, const Address& address) { 1693 vixl::UseScratchRegisterScope temps(this); 1694 const Register scratch = temps.AcquireX().asUnsized(); 1695 MOZ_ASSERT(address.base != scratch); 1696 splitSignExtTag(address, scratch); 1697 return testMagic(cond, scratch); 1698 } 1699 Condition testInt32(Condition cond, const Address& address) { 1700 vixl::UseScratchRegisterScope temps(this); 1701 const Register scratch = temps.AcquireX().asUnsized(); 1702 MOZ_ASSERT(address.base != scratch); 1703 splitSignExtTag(address, scratch); 1704 return testInt32(cond, scratch); 1705 } 1706 Condition testDouble(Condition cond, const Address& address) { 1707 vixl::UseScratchRegisterScope temps(this); 1708 const Register scratch = temps.AcquireX().asUnsized(); 1709 MOZ_ASSERT(address.base != scratch); 1710 splitSignExtTag(address, scratch); 1711 return testDouble(cond, scratch); 1712 } 1713 Condition testBoolean(Condition cond, const Address& address) { 1714 vixl::UseScratchRegisterScope temps(this); 1715 const Register scratch = temps.AcquireX().asUnsized(); 1716 MOZ_ASSERT(address.base != scratch); 1717 splitSignExtTag(address, scratch); 1718 return testBoolean(cond, scratch); 1719 } 1720 Condition testNull(Condition cond, const Address& address) { 1721 vixl::UseScratchRegisterScope temps(this); 1722 const Register scratch = temps.AcquireX().asUnsized(); 1723 MOZ_ASSERT(address.base != scratch); 1724 splitSignExtTag(address, scratch); 1725 return testNull(cond, scratch); 1726 } 1727 Condition testUndefined(Condition cond, const Address& address) { 1728 vixl::UseScratchRegisterScope temps(this); 1729 const Register scratch = temps.AcquireX().asUnsized(); 1730 MOZ_ASSERT(address.base != scratch); 1731 splitSignExtTag(address, scratch); 1732 return testUndefined(cond, scratch); 1733 } 1734 Condition testString(Condition cond, const Address& address) { 1735 vixl::UseScratchRegisterScope temps(this); 1736 const Register scratch = temps.AcquireX().asUnsized(); 1737 MOZ_ASSERT(address.base != scratch); 1738 splitSignExtTag(address, scratch); 1739 return testString(cond, scratch); 1740 } 1741 Condition testSymbol(Condition cond, const Address& address) { 1742 vixl::UseScratchRegisterScope temps(this); 1743 const Register scratch = temps.AcquireX().asUnsized(); 1744 MOZ_ASSERT(address.base != scratch); 1745 splitSignExtTag(address, scratch); 1746 return testSymbol(cond, scratch); 1747 } 1748 Condition testBigInt(Condition cond, const Address& address) { 1749 vixl::UseScratchRegisterScope temps(this); 1750 const Register scratch = temps.AcquireX().asUnsized(); 1751 MOZ_ASSERT(address.base != scratch); 1752 splitSignExtTag(address, scratch); 1753 return testBigInt(cond, scratch); 1754 } 1755 Condition testObject(Condition cond, const Address& address) { 1756 vixl::UseScratchRegisterScope temps(this); 1757 const Register scratch = temps.AcquireX().asUnsized(); 1758 MOZ_ASSERT(address.base != scratch); 1759 splitSignExtTag(address, scratch); 1760 return testObject(cond, scratch); 1761 } 1762 Condition testNumber(Condition cond, const Address& address) { 1763 vixl::UseScratchRegisterScope temps(this); 1764 const Register scratch = temps.AcquireX().asUnsized(); 1765 MOZ_ASSERT(address.base != scratch); 1766 splitSignExtTag(address, scratch); 1767 return testNumber(cond, scratch); 1768 } 1769 1770 // BaseIndex-based tests. 1771 Condition testUndefined(Condition cond, const BaseIndex& src) { 1772 vixl::UseScratchRegisterScope temps(this); 1773 const Register scratch = temps.AcquireX().asUnsized(); 1774 MOZ_ASSERT(src.base != scratch); 1775 MOZ_ASSERT(src.index != scratch); 1776 splitSignExtTag(src, scratch); 1777 return testUndefined(cond, scratch); 1778 } 1779 Condition testNull(Condition cond, const BaseIndex& src) { 1780 vixl::UseScratchRegisterScope temps(this); 1781 const Register scratch = temps.AcquireX().asUnsized(); 1782 MOZ_ASSERT(src.base != scratch); 1783 MOZ_ASSERT(src.index != scratch); 1784 splitSignExtTag(src, scratch); 1785 return testNull(cond, scratch); 1786 } 1787 Condition testBoolean(Condition cond, const BaseIndex& src) { 1788 vixl::UseScratchRegisterScope temps(this); 1789 const Register scratch = temps.AcquireX().asUnsized(); 1790 MOZ_ASSERT(src.base != scratch); 1791 MOZ_ASSERT(src.index != scratch); 1792 splitSignExtTag(src, scratch); 1793 return testBoolean(cond, scratch); 1794 } 1795 Condition testString(Condition cond, const BaseIndex& src) { 1796 vixl::UseScratchRegisterScope temps(this); 1797 const Register scratch = temps.AcquireX().asUnsized(); 1798 MOZ_ASSERT(src.base != scratch); 1799 MOZ_ASSERT(src.index != scratch); 1800 splitSignExtTag(src, scratch); 1801 return testString(cond, scratch); 1802 } 1803 Condition testSymbol(Condition cond, const BaseIndex& src) { 1804 vixl::UseScratchRegisterScope temps(this); 1805 const Register scratch = temps.AcquireX().asUnsized(); 1806 MOZ_ASSERT(src.base != scratch); 1807 MOZ_ASSERT(src.index != scratch); 1808 splitSignExtTag(src, scratch); 1809 return testSymbol(cond, scratch); 1810 } 1811 Condition testBigInt(Condition cond, const BaseIndex& src) { 1812 vixl::UseScratchRegisterScope temps(this); 1813 const Register scratch = temps.AcquireX().asUnsized(); 1814 MOZ_ASSERT(src.base != scratch); 1815 MOZ_ASSERT(src.index != scratch); 1816 splitSignExtTag(src, scratch); 1817 return testBigInt(cond, scratch); 1818 } 1819 Condition testInt32(Condition cond, const BaseIndex& src) { 1820 vixl::UseScratchRegisterScope temps(this); 1821 const Register scratch = temps.AcquireX().asUnsized(); 1822 MOZ_ASSERT(src.base != scratch); 1823 MOZ_ASSERT(src.index != scratch); 1824 splitSignExtTag(src, scratch); 1825 return testInt32(cond, scratch); 1826 } 1827 Condition testObject(Condition cond, const BaseIndex& src) { 1828 vixl::UseScratchRegisterScope temps(this); 1829 const Register scratch = temps.AcquireX().asUnsized(); 1830 MOZ_ASSERT(src.base != scratch); 1831 MOZ_ASSERT(src.index != scratch); 1832 splitSignExtTag(src, scratch); 1833 return testObject(cond, scratch); 1834 } 1835 Condition testDouble(Condition cond, const BaseIndex& src) { 1836 vixl::UseScratchRegisterScope temps(this); 1837 const Register scratch = temps.AcquireX().asUnsized(); 1838 MOZ_ASSERT(src.base != scratch); 1839 MOZ_ASSERT(src.index != scratch); 1840 splitSignExtTag(src, scratch); 1841 return testDouble(cond, scratch); 1842 } 1843 Condition testMagic(Condition cond, const BaseIndex& src) { 1844 vixl::UseScratchRegisterScope temps(this); 1845 const Register scratch = temps.AcquireX().asUnsized(); 1846 MOZ_ASSERT(src.base != scratch); 1847 MOZ_ASSERT(src.index != scratch); 1848 splitSignExtTag(src, scratch); 1849 return testMagic(cond, scratch); 1850 } 1851 Condition testGCThing(Condition cond, const BaseIndex& src) { 1852 vixl::UseScratchRegisterScope temps(this); 1853 const Register scratch = temps.AcquireX().asUnsized(); 1854 MOZ_ASSERT(src.base != scratch); 1855 MOZ_ASSERT(src.index != scratch); 1856 splitSignExtTag(src, scratch); 1857 return testGCThing(cond, scratch); 1858 } 1859 1860 Condition testInt32Truthy(bool truthy, const ValueOperand& operand) { 1861 ARMRegister payload32(operand.valueReg(), 32); 1862 Tst(payload32, payload32); 1863 return truthy ? NonZero : Zero; 1864 } 1865 1866 Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) { 1867 ARMRegister payload32(operand.valueReg(), 32); 1868 Tst(payload32, payload32); 1869 return truthy ? NonZero : Zero; 1870 } 1871 1872 Condition testBigIntTruthy(bool truthy, const ValueOperand& value); 1873 Condition testStringTruthy(bool truthy, const ValueOperand& value); 1874 1875 void int32OrDouble(Register src, ARMFPRegister dest) { 1876 Label isInt32; 1877 Label join; 1878 testInt32(Equal, ValueOperand(src)); 1879 B(&isInt32, Equal); 1880 // is double, move the bits as is 1881 Fmov(dest, ARMRegister(src, 64)); 1882 B(&join); 1883 bind(&isInt32); 1884 // is int32, do a conversion while moving 1885 Scvtf(dest, ARMRegister(src, 32)); 1886 bind(&join); 1887 } 1888 void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) { 1889 if (dest.isFloat()) { 1890 vixl::UseScratchRegisterScope temps(this); 1891 const ARMRegister scratch64 = temps.AcquireX(); 1892 MOZ_ASSERT(scratch64.asUnsized() != address.base); 1893 Ldr(scratch64, toMemOperand(address)); 1894 int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64)); 1895 } else { 1896 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type)); 1897 } 1898 } 1899 1900 void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) { 1901 if (dest.isFloat()) { 1902 vixl::UseScratchRegisterScope temps(this); 1903 const ARMRegister scratch64 = temps.AcquireX(); 1904 MOZ_ASSERT(scratch64.asUnsized() != address.base); 1905 MOZ_ASSERT(scratch64.asUnsized() != address.index); 1906 doBaseIndex(scratch64, address, vixl::LDR_x); 1907 int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64)); 1908 } else { 1909 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type)); 1910 } 1911 } 1912 1913 // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). 1914 CodeOffset toggledJump(Label* label) { 1915 BufferOffset offset = b(label, Always); 1916 CodeOffset ret(offset.getOffset()); 1917 return ret; 1918 } 1919 1920 // load: offset to the load instruction obtained by movePatchablePtr(). 1921 void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) { 1922 // Raw GC pointer relocations and Value relocations both end up in 1923 // Assembler::TraceDataRelocations. 1924 if (ptr.value) { 1925 if (gc::IsInsideNursery(ptr.value)) { 1926 embedsNurseryPointers_ = true; 1927 } 1928 dataRelocations_.writeUnsigned(load.getOffset()); 1929 } 1930 } 1931 void writeDataRelocation(const Value& val, BufferOffset load) { 1932 MOZ_ASSERT(val.isGCThing(), "only called for gc-things"); 1933 1934 // Raw GC pointer relocations and Value relocations both end up in 1935 // Assembler::TraceDataRelocations. 1936 gc::Cell* cell = val.toGCThing(); 1937 if (cell && gc::IsInsideNursery(cell)) { 1938 embedsNurseryPointers_ = true; 1939 } 1940 dataRelocations_.writeUnsigned(load.getOffset()); 1941 } 1942 1943 void computeEffectiveAddress(const Address& address, Register dest) { 1944 Add(ARMRegister(dest, 64), toARMRegister(address.base, 64), 1945 Operand(address.offset)); 1946 } 1947 void computeEffectiveAddress(const Address& address, RegisterOrSP dest) { 1948 Add(toARMRegister(dest, 64), toARMRegister(address.base, 64), 1949 Operand(address.offset)); 1950 } 1951 void computeEffectiveAddress(const BaseIndex& address, Register dest) { 1952 ARMRegister dest64(dest, 64); 1953 ARMRegister base64 = toARMRegister(address.base, 64); 1954 ARMRegister index64(address.index, 64); 1955 1956 Add(dest64, base64, Operand(index64, vixl::LSL, address.scale)); 1957 if (address.offset) { 1958 Add(dest64, dest64, Operand(address.offset)); 1959 } 1960 } 1961 1962 public: 1963 void handleFailureWithHandlerTail(Label* profilerExitTail, Label* bailoutTail, 1964 uint32_t* returnValueCheckOffset); 1965 1966 void profilerEnterFrame(Register framePtr, Register scratch); 1967 void profilerExitFrame(); 1968 1969 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase, 1970 Register ptr, AnyRegister outany, Register64 out64); 1971 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, MemOperand srcAddr, 1972 AnyRegister outany, Register64 out64); 1973 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valany, 1974 Register64 val64, Register memoryBase, Register ptr); 1975 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, MemOperand destAddr, 1976 AnyRegister valany, Register64 val64); 1977 // The complete address is in `address`, and `access` is used for its type 1978 // attributes only; its `offset` is ignored. 1979 void wasmLoadAbsolute(const wasm::MemoryAccessDesc& access, 1980 Register memoryBase, uint64_t address, AnyRegister out, 1981 Register64 out64); 1982 void wasmStoreAbsolute(const wasm::MemoryAccessDesc& access, 1983 AnyRegister value, Register64 value64, 1984 Register memoryBase, uint64_t address); 1985 1986 // Emit a BLR or NOP instruction. ToggleCall can be used to patch 1987 // this instruction. 1988 CodeOffset toggledCall(JitCode* target, bool enabled) { 1989 // The returned offset must be to the first instruction generated, 1990 // for the debugger to match offset with Baseline's pcMappingEntries_. 1991 BufferOffset offset = nextOffset(); 1992 1993 // It is unclear why this sync is necessary: 1994 // * PSP and SP have been observed to be different in testcase 1995 // tests/cacheir/bug1448136.js 1996 // * Removing the sync causes no failures in all of jit-tests. 1997 syncStackPtr(); 1998 1999 BufferOffset loadOffset; 2000 { 2001 vixl::UseScratchRegisterScope temps(this); 2002 2003 // The register used for the load is hardcoded, so that ToggleCall 2004 // can patch in the branch instruction easily. This could be changed, 2005 // but then ToggleCall must read the target register from the load. 2006 MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64)); 2007 temps.Exclude(ScratchReg2_64); 2008 2009 loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw())); 2010 2011 if (enabled) { 2012 blr(ScratchReg2_64); 2013 } else { 2014 nop(); 2015 } 2016 } 2017 2018 addPendingJump(loadOffset, ImmPtr(target->raw()), RelocationKind::JITCODE); 2019 CodeOffset ret(offset.getOffset()); 2020 return ret; 2021 } 2022 2023 static size_t ToggledCallSize(uint8_t* code) { 2024 // The call site is a sequence of two or three instructions: 2025 // 2026 // syncStack (optional) 2027 // ldr/adr 2028 // nop/blr 2029 // 2030 // Flushed constant pools can appear before any of the instructions. 2031 2032 const Instruction* cur = (const Instruction*)code; 2033 cur = cur->skipPool(); 2034 if (cur->IsStackPtrSync()) cur = cur->NextInstruction(); 2035 cur = cur->skipPool(); 2036 cur = cur->NextInstruction(); // LDR/ADR 2037 cur = cur->skipPool(); 2038 cur = cur->NextInstruction(); // NOP/BLR 2039 return (uint8_t*)cur - code; 2040 } 2041 2042 void checkARMRegAlignment(const ARMRegister& reg) { 2043 #ifdef DEBUG 2044 vixl::UseScratchRegisterScope temps(this); 2045 const ARMRegister scratch64 = temps.AcquireX(); 2046 MOZ_ASSERT_IF(!reg.IsSP(), scratch64.asUnsized() != reg.asUnsized()); 2047 Label aligned; 2048 Mov(scratch64, reg); 2049 Tst(scratch64, Operand(StackAlignment - 1)); 2050 B(Zero, &aligned); 2051 breakpoint(); 2052 bind(&aligned); 2053 Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity. 2054 #endif 2055 } 2056 2057 void checkStackAlignment() { 2058 #ifdef DEBUG 2059 checkARMRegAlignment(GetStackPointer64()); 2060 2061 // If another register is being used to track pushes, check sp explicitly. 2062 if (!GetStackPointer64().Is(vixl::sp)) { 2063 checkARMRegAlignment(vixl::sp); 2064 } 2065 #endif 2066 } 2067 2068 void abiret() { 2069 syncStackPtr(); // SP is always used to transmit the stack between calls. 2070 vixl::MacroAssembler::Ret(vixl::lr); 2071 } 2072 2073 void incrementInt32Value(const Address& addr) { 2074 vixl::UseScratchRegisterScope temps(this); 2075 const ARMRegister scratch32 = temps.AcquireW(); 2076 MOZ_ASSERT(scratch32.asUnsized() != addr.base); 2077 2078 load32(addr, scratch32.asUnsized()); 2079 Add(scratch32, scratch32, Operand(1)); 2080 store32(scratch32.asUnsized(), addr); 2081 } 2082 2083 void breakpoint(); 2084 2085 // Emits a simulator directive to save the current sp on an internal stack. 2086 void simulatorMarkSP() { 2087 #ifdef JS_SIMULATOR_ARM64 2088 svc(vixl::kMarkStackPointer); 2089 #endif 2090 } 2091 2092 // Emits a simulator directive to pop from its internal stack 2093 // and assert that the value is equal to the current sp. 2094 void simulatorCheckSP() { 2095 #ifdef JS_SIMULATOR_ARM64 2096 svc(vixl::kCheckStackPointer); 2097 #endif 2098 } 2099 2100 protected: 2101 bool buildOOLFakeExitFrame(void* fakeReturnAddr); 2102 }; 2103 2104 // See documentation for ScratchTagScope and ScratchTagScopeRelease in 2105 // MacroAssembler-x64.h. 2106 2107 class ScratchTagScope { 2108 vixl::UseScratchRegisterScope temps_; 2109 ARMRegister scratch64_; 2110 bool owned_; 2111 mozilla::DebugOnly<bool> released_; 2112 2113 public: 2114 ScratchTagScope(MacroAssemblerCompat& masm, const ValueOperand&) 2115 : temps_(&masm), owned_(true), released_(false) { 2116 scratch64_ = temps_.AcquireX(); 2117 } 2118 2119 operator Register() { 2120 MOZ_ASSERT(!released_); 2121 return scratch64_.asUnsized(); 2122 } 2123 2124 void release() { 2125 MOZ_ASSERT(!released_); 2126 released_ = true; 2127 if (owned_) { 2128 temps_.Release(scratch64_); 2129 owned_ = false; 2130 } 2131 } 2132 2133 void reacquire() { 2134 MOZ_ASSERT(released_); 2135 released_ = false; 2136 if (!owned_) { 2137 scratch64_ = temps_.AcquireX(); 2138 owned_ = true; 2139 } 2140 } 2141 }; 2142 2143 class ScratchTagScopeRelease { 2144 ScratchTagScope* ts_; 2145 2146 public: 2147 explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) { 2148 ts_->release(); 2149 } 2150 ~ScratchTagScopeRelease() { ts_->reacquire(); } 2151 }; 2152 2153 inline void MacroAssemblerCompat::splitTagForTest(const ValueOperand& value, 2154 ScratchTagScope& tag) { 2155 splitSignExtTag(value, tag); 2156 } 2157 2158 using MacroAssemblerSpecific = MacroAssemblerCompat; 2159 2160 } // namespace jit 2161 } // namespace js 2162 2163 #endif // jit_arm64_MacroAssembler_arm64_h