MacroAssembler-loong64.h (38411B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_loong64_MacroAssembler_loong64_h 8 #define jit_loong64_MacroAssembler_loong64_h 9 10 #include "jit/loong64/Assembler-loong64.h" 11 #include "jit/MoveResolver.h" 12 #include "wasm/WasmBuiltins.h" 13 14 namespace js { 15 namespace jit { 16 17 enum LoadStoreSize { 18 SizeByte = 8, 19 SizeHalfWord = 16, 20 SizeWord = 32, 21 SizeDouble = 64 22 }; 23 24 enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 }; 25 26 enum JumpKind { LongJump = 0, ShortJump = 1 }; 27 28 static Register CallReg = t8; 29 30 enum LiFlags { 31 Li64 = 0, 32 Li48 = 1, 33 }; 34 35 struct ImmShiftedTag : public ImmWord { 36 explicit ImmShiftedTag(JSValueType type) 37 : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) { 38 } 39 }; 40 41 struct ImmTag : public Imm32 { 42 explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {} 43 }; 44 45 static const int defaultShift = 3; 46 static_assert(1 << defaultShift == sizeof(JS::Value), 47 "The defaultShift is wrong"); 48 49 // See documentation for ScratchTagScope and ScratchTagScopeRelease in 50 // MacroAssembler-x64.h. 51 52 class ScratchTagScope { 53 UseScratchRegisterScope temps_; 54 Register scratch_; 55 bool owned_; 56 mozilla::DebugOnly<bool> released_; 57 58 public: 59 ScratchTagScope(Assembler& masm, const ValueOperand&) 60 : temps_(masm), owned_(true), released_(false) { 61 scratch_ = temps_.Acquire(); 62 } 63 64 operator Register() { 65 MOZ_ASSERT(!released_); 66 return scratch_; 67 } 68 69 void release() { 70 MOZ_ASSERT(!released_); 71 released_ = true; 72 if (owned_) { 73 temps_.Release(scratch_); 74 owned_ = false; 75 } 76 } 77 78 void reacquire() { 79 MOZ_ASSERT(released_); 80 released_ = false; 81 if (!owned_) { 82 scratch_ = temps_.Acquire(); 83 owned_ = true; 84 } 85 } 86 }; 87 88 class ScratchTagScopeRelease { 89 ScratchTagScope* ts_; 90 91 public: 92 explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) { 93 ts_->release(); 94 } 95 ~ScratchTagScopeRelease() { ts_->reacquire(); } 96 }; 97 98 class MacroAssemblerLOONG64 : public Assembler { 99 protected: 100 // Perform a downcast. Should be removed by Bug 996602. 101 MacroAssembler& asMasm(); 102 const MacroAssembler& asMasm() const; 103 104 Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c); 105 Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c); 106 107 void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, 108 FloatRegister rhs, DoubleCondition c, 109 FPConditionBit fcc = FCC0); 110 111 public: 112 void ma_li(Register dest, CodeLabel* label); 113 void ma_li(Register dest, ImmWord imm); 114 void ma_liPatchable(Register dest, ImmPtr imm); 115 void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48); 116 117 // load 118 FaultingCodeOffset ma_ld_b(Register dest, Address address); 119 FaultingCodeOffset ma_ld_h(Register dest, Address address); 120 FaultingCodeOffset ma_ld_w(Register dest, Address address); 121 FaultingCodeOffset ma_ld_d(Register dest, Address address); 122 FaultingCodeOffset ma_ld_bu(Register dest, Address address); 123 FaultingCodeOffset ma_ld_hu(Register dest, Address address); 124 FaultingCodeOffset ma_ld_wu(Register dest, Address address); 125 FaultingCodeOffset ma_load(Register dest, Address address, 126 LoadStoreSize size = SizeWord, 127 LoadStoreExtension extension = SignExtend); 128 129 // store 130 FaultingCodeOffset ma_st_b(Register src, Address address); 131 FaultingCodeOffset ma_st_h(Register src, Address address); 132 FaultingCodeOffset ma_st_w(Register src, Address address); 133 FaultingCodeOffset ma_st_d(Register src, Address address); 134 FaultingCodeOffset ma_store(Register data, Address address, 135 LoadStoreSize size = SizeWord, 136 LoadStoreExtension extension = SignExtend); 137 138 // arithmetic based ops 139 // add 140 void ma_add_d(Register rd, Register rj, Imm32 imm); 141 void ma_add_d(Register rd, Register rj, ImmWord imm); 142 void ma_add32TestOverflow(Register rd, Register rj, Register rk, 143 Label* overflow); 144 void ma_add32TestOverflow(Register rd, Register rj, Imm32 imm, 145 Label* overflow); 146 void ma_addPtrTestOverflow(Register rd, Register rj, Register rk, 147 Label* overflow); 148 void ma_addPtrTestOverflow(Register rd, Register rj, Imm32 imm, 149 Label* overflow); 150 void ma_addPtrTestOverflow(Register rd, Register rj, ImmWord imm, 151 Label* overflow); 152 void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Register rk, 153 Label* overflow); 154 void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Imm32 imm, 155 Label* overflow); 156 void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, ImmWord imm, 157 Label* overflow); 158 void ma_addPtrTestSigned(Condition cond, Register rd, Register rj, 159 Register rk, Label* taken); 160 void ma_addPtrTestSigned(Condition cond, Register rd, Register rj, Imm32 imm, 161 Label* taken); 162 void ma_addPtrTestSigned(Condition cond, Register rd, Register rj, 163 ImmWord imm, Label* taken); 164 165 // subtract 166 void ma_sub_d(Register rd, Register rj, Imm32 imm); 167 void ma_sub_d(Register rd, Register rj, ImmWord imm); 168 void ma_sub32TestOverflow(Register rd, Register rj, Register rk, 169 Label* overflow); 170 void ma_subPtrTestOverflow(Register rd, Register rj, Register rk, 171 Label* overflow); 172 void ma_subPtrTestOverflow(Register rd, Register rj, Imm32 imm, 173 Label* overflow); 174 175 // multiplies. For now, there are only few that we care about. 176 void ma_mul_d(Register rd, Register rj, Imm32 imm); 177 void ma_mul_d(Register rd, Register rj, ImmWord imm); 178 void ma_mulh_d(Register rd, Register rj, Imm32 imm); 179 void ma_mulPtrTestOverflow(Register rd, Register rj, Register rk, 180 Label* overflow); 181 182 // stack 183 void ma_pop(Register r); 184 void ma_push(Register r); 185 186 void branchWithCode(InstImm code, Label* label, JumpKind jumpKind, 187 Register scratch = Register::Invalid()); 188 // branches when done from within la-specific code 189 void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, 190 JumpKind jumpKind = LongJump); 191 void ma_b(Register lhs, Address addr, Label* l, Condition c, 192 JumpKind jumpKind = LongJump); 193 void ma_b(Address addr, Imm32 imm, Label* l, Condition c, 194 JumpKind jumpKind = LongJump); 195 void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, 196 JumpKind jumpKind = LongJump); 197 void ma_b(Address addr, Register rhs, Label* l, Condition c, 198 JumpKind jumpKind = LongJump) { 199 UseScratchRegisterScope temps(*this); 200 Register scratch = temps.Acquire(); 201 MOZ_ASSERT(rhs != scratch); 202 ma_ld_d(scratch, addr); 203 ma_b(scratch, rhs, l, c, jumpKind); 204 } 205 206 void ma_bl(Label* l); 207 208 // fp instructions 209 void ma_lid(FloatRegister dest, double value); 210 211 void ma_mv(FloatRegister src, ValueOperand dest); 212 void ma_mv(ValueOperand src, FloatRegister dest); 213 214 FaultingCodeOffset ma_fld_s(FloatRegister ft, Address address); 215 FaultingCodeOffset ma_fld_d(FloatRegister ft, Address address); 216 FaultingCodeOffset ma_fst_d(FloatRegister ft, Address address); 217 FaultingCodeOffset ma_fst_s(FloatRegister ft, Address address); 218 219 void ma_pop(FloatRegister f); 220 void ma_push(FloatRegister f); 221 222 void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c); 223 void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c); 224 void ma_cmp_set(Register dst, Register lhs, ImmGCPtr imm, Condition c); 225 void ma_cmp_set(Register dst, Address address, Register rhs, Condition c); 226 void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c); 227 void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c); 228 229 void moveIfZero(Register dst, Register src, Register cond) { 230 UseScratchRegisterScope temps(*this); 231 Register scratch = temps.Acquire(); 232 MOZ_ASSERT(dst != scratch && cond != scratch); 233 as_masknez(scratch, src, cond); 234 as_maskeqz(dst, dst, cond); 235 as_or(dst, dst, scratch); 236 } 237 void moveIfNotZero(Register dst, Register src, Register cond) { 238 UseScratchRegisterScope temps(*this); 239 Register scratch = temps.Acquire(); 240 MOZ_ASSERT(dst != scratch && cond != scratch); 241 as_maskeqz(scratch, src, cond); 242 as_masknez(dst, dst, cond); 243 as_or(dst, dst, scratch); 244 } 245 246 // These functions abstract the access to high part of the double precision 247 // float register. They are intended to work on both 32 bit and 64 bit 248 // floating point coprocessor. 249 void moveToDoubleHi(Register src, FloatRegister dest) { 250 as_movgr2frh_w(dest, src); 251 } 252 void moveFromDoubleHi(FloatRegister src, Register dest) { 253 as_movfrh2gr_s(dest, src); 254 } 255 256 void moveToDouble(Register src, FloatRegister dest) { 257 as_movgr2fr_d(dest, src); 258 } 259 void moveFromDouble(FloatRegister src, Register dest) { 260 as_movfr2gr_d(dest, src); 261 } 262 263 public: 264 void ma_li(Register dest, ImmGCPtr ptr); 265 266 void ma_li(Register dest, Imm32 imm); 267 void ma_liPatchable(Register dest, Imm32 imm); 268 269 void ma_rotr_w(Register rd, Register rj, Imm32 shift); 270 271 void ma_fmovz(FloatFormat fmt, FloatRegister fd, FloatRegister fj, 272 Register rk); 273 void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj, 274 Register rk); 275 276 void ma_and(Register rd, Register rj, Imm32 imm); 277 278 void ma_or(Register rd, Register rj, Imm32 imm); 279 280 void ma_xor(Register rd, Register rj, Imm32 imm); 281 282 // load 283 FaultingCodeOffset ma_load(Register dest, const BaseIndex& src, 284 LoadStoreSize size = SizeWord, 285 LoadStoreExtension extension = SignExtend); 286 287 // store 288 FaultingCodeOffset ma_store(Register data, const BaseIndex& dest, 289 LoadStoreSize size = SizeWord, 290 LoadStoreExtension extension = SignExtend); 291 void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord, 292 LoadStoreExtension extension = SignExtend); 293 294 // arithmetic based ops 295 // add 296 void ma_add_w(Register rd, Register rj, Imm32 imm); 297 void ma_add32TestCarry(Condition cond, Register rd, Register rj, Register rk, 298 Label* overflow); 299 void ma_add32TestCarry(Condition cond, Register rd, Register rj, Imm32 imm, 300 Label* overflow); 301 302 // subtract 303 void ma_sub_w(Register rd, Register rj, Imm32 imm); 304 void ma_sub_w(Register rd, Register rj, Register rk); 305 void ma_sub32TestOverflow(Register rd, Register rj, Imm32 imm, 306 Label* overflow); 307 308 // multiplies. For now, there are only few that we care about. 309 void ma_mul(Register rd, Register rj, Imm32 imm); 310 void ma_mul32TestOverflow(Register rd, Register rj, Register rk, 311 Label* overflow); 312 void ma_mul32TestOverflow(Register rd, Register rj, Imm32 imm, 313 Label* overflow); 314 315 // fast mod, uses scratch registers, and thus needs to be in the assembler 316 // implicitly assumes that we can overwrite dest at the beginning of the 317 // sequence 318 void ma_mod_mask(Register src, Register dest, Register hold, Register remain, 319 int32_t shift, Label* negZero = nullptr); 320 321 // branches when done from within la-specific code 322 void ma_b(Register lhs, Register rhs, Label* l, Condition c, 323 JumpKind jumpKind = LongJump, 324 Register scratch = Register::Invalid()); 325 void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, 326 JumpKind jumpKind = LongJump); 327 void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, 328 JumpKind jumpKind = LongJump); 329 void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, 330 JumpKind jumpKind = LongJump) { 331 UseScratchRegisterScope temps(*this); 332 Register scratch = temps.Acquire(); 333 MOZ_ASSERT(lhs != scratch); 334 ma_li(scratch, imm); 335 ma_b(lhs, scratch, l, c, jumpKind); 336 } 337 338 void ma_b(Label* l, JumpKind jumpKind = LongJump); 339 340 // fp instructions 341 void ma_lis(FloatRegister dest, float value); 342 343 FaultingCodeOffset ma_fst_d(FloatRegister src, BaseIndex address); 344 FaultingCodeOffset ma_fst_s(FloatRegister src, BaseIndex address); 345 346 FaultingCodeOffset ma_fld_d(FloatRegister dest, const BaseIndex& src); 347 FaultingCodeOffset ma_fld_s(FloatRegister dest, const BaseIndex& src); 348 349 // FP branches 350 void ma_bc_s(FloatRegister lhs, FloatRegister rhs, Label* label, 351 DoubleCondition c, JumpKind jumpKind = LongJump, 352 FPConditionBit fcc = FCC0); 353 void ma_bc_d(FloatRegister lhs, FloatRegister rhs, Label* label, 354 DoubleCondition c, JumpKind jumpKind = LongJump, 355 FPConditionBit fcc = FCC0); 356 357 void ma_call(ImmPtr dest); 358 359 void ma_jump(ImmPtr dest); 360 361 void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c); 362 void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c); 363 void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, 364 DoubleCondition c); 365 void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, 366 DoubleCondition c); 367 368 void moveToFloat32(Register src, FloatRegister dest) { 369 as_movgr2fr_w(dest, src); 370 } 371 void moveFromFloat32(FloatRegister src, Register dest) { 372 as_movfr2gr_s(dest, src); 373 } 374 375 void minMaxPtr(Register lhs, Register rhs, Register dest, bool isMax); 376 void minMaxPtr(Register lhs, ImmWord rhs, Register dest, bool isMax); 377 378 // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other). 379 // Handle NaN specially if handleNaN is true. 380 void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, 381 bool isMax); 382 void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, 383 bool isMax); 384 385 FaultingCodeOffset loadDouble(const Address& addr, FloatRegister dest); 386 FaultingCodeOffset loadDouble(const BaseIndex& src, FloatRegister dest); 387 388 FaultingCodeOffset loadFloat32(const Address& addr, FloatRegister dest); 389 FaultingCodeOffset loadFloat32(const BaseIndex& src, FloatRegister dest); 390 391 FaultingCodeOffset loadFloat16(const Address& addr, FloatRegister dest, 392 Register) { 393 MOZ_CRASH("Not supported for this target"); 394 } 395 FaultingCodeOffset loadFloat16(const BaseIndex& src, FloatRegister dest, 396 Register) { 397 MOZ_CRASH("Not supported for this target"); 398 } 399 400 void outOfLineWasmTruncateToInt32Check( 401 FloatRegister input, Register output, MIRType fromType, TruncFlags flags, 402 Label* rejoin, const wasm::TrapSiteDesc& trapSiteDesc); 403 void outOfLineWasmTruncateToInt64Check( 404 FloatRegister input, Register64 output, MIRType fromType, 405 TruncFlags flags, Label* rejoin, const wasm::TrapSiteDesc& trapSiteDesc); 406 407 protected: 408 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase, 409 Register ptr, Register ptrScratch, AnyRegister output, 410 Register tmp); 411 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value, 412 Register memoryBase, Register ptr, Register ptrScratch, 413 Register tmp); 414 }; 415 416 class MacroAssembler; 417 418 class MacroAssemblerLOONG64Compat : public MacroAssemblerLOONG64 { 419 public: 420 using MacroAssemblerLOONG64::call; 421 422 MacroAssemblerLOONG64Compat() {} 423 424 void convertBoolToInt32(Register src, Register dest) { 425 ma_and(dest, src, Imm32(0xff)); 426 }; 427 void convertInt32ToDouble(Register src, FloatRegister dest) { 428 as_movgr2fr_w(dest, src); 429 as_ffint_d_w(dest, dest); 430 }; 431 void convertInt32ToDouble(const Address& src, FloatRegister dest) { 432 ma_fld_s(dest, src); 433 as_ffint_d_w(dest, dest); 434 }; 435 void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) { 436 UseScratchRegisterScope temps(*this); 437 Register scratch = temps.Acquire(); 438 MOZ_ASSERT(scratch != src.base); 439 MOZ_ASSERT(scratch != src.index); 440 computeScaledAddress(src, scratch); 441 convertInt32ToDouble(Address(scratch, src.offset), dest); 442 }; 443 void convertUInt32ToDouble(Register src, FloatRegister dest); 444 void convertUInt32ToFloat32(Register src, FloatRegister dest); 445 void convertDoubleToFloat32(FloatRegister src, FloatRegister dest); 446 void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, 447 bool negativeZeroCheck = true); 448 void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail, 449 bool negativeZeroCheck = true); 450 void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, 451 bool negativeZeroCheck = true); 452 453 void convertFloat32ToDouble(FloatRegister src, FloatRegister dest); 454 void convertInt32ToFloat32(Register src, FloatRegister dest); 455 void convertInt32ToFloat32(const Address& src, FloatRegister dest); 456 457 void convertDoubleToFloat16(FloatRegister src, FloatRegister dest) { 458 MOZ_CRASH("Not supported for this target"); 459 } 460 void convertFloat16ToDouble(FloatRegister src, FloatRegister dest) { 461 MOZ_CRASH("Not supported for this target"); 462 } 463 void convertFloat32ToFloat16(FloatRegister src, FloatRegister dest) { 464 MOZ_CRASH("Not supported for this target"); 465 } 466 void convertFloat16ToFloat32(FloatRegister src, FloatRegister dest) { 467 MOZ_CRASH("Not supported for this target"); 468 } 469 void convertInt32ToFloat16(Register src, FloatRegister dest) { 470 MOZ_CRASH("Not supported for this target"); 471 } 472 473 void computeScaledAddress(const BaseIndex& address, Register dest); 474 void computeScaledAddress32(const BaseIndex& address, Register dest); 475 476 void computeEffectiveAddress(const Address& address, Register dest) { 477 ma_add_d(dest, address.base, Imm32(address.offset)); 478 } 479 480 void computeEffectiveAddress(const BaseIndex& address, Register dest) { 481 computeScaledAddress(address, dest); 482 if (address.offset) { 483 ma_add_d(dest, dest, Imm32(address.offset)); 484 } 485 } 486 487 void computeEffectiveAddress32(const Address& address, Register dest) { 488 ma_add_w(dest, address.base, Imm32(address.offset)); 489 } 490 491 void computeEffectiveAddress32(const BaseIndex& address, Register dest) { 492 computeScaledAddress32(address, dest); 493 if (address.offset) { 494 ma_add_w(dest, dest, Imm32(address.offset)); 495 } 496 } 497 498 void j(Label* dest) { ma_b(dest); } 499 500 void mov(Register src, Register dest) { as_ori(dest, src, 0); } 501 void mov(ImmWord imm, Register dest) { ma_li(dest, imm); } 502 void mov(ImmPtr imm, Register dest) { 503 mov(ImmWord(uintptr_t(imm.value)), dest); 504 } 505 void mov(CodeLabel* label, Register dest) { ma_li(dest, label); } 506 void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); } 507 void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); } 508 509 void writeDataRelocation(const Value& val) { 510 MOZ_ASSERT(val.isGCThing(), "only called for gc-things"); 511 512 // Raw GC pointer relocations and Value relocations both end up in 513 // TraceOneDataRelocation. 514 gc::Cell* cell = val.toGCThing(); 515 if (cell && gc::IsInsideNursery(cell)) { 516 embedsNurseryPointers_ = true; 517 } 518 dataRelocations_.writeUnsigned(currentOffset()); 519 } 520 521 void branch(JitCode* c) { 522 UseScratchRegisterScope temps(*this); 523 Register scratch = temps.Acquire(); 524 BufferOffset bo = m_buffer.nextOffset(); 525 addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE); 526 ma_liPatchable(scratch, ImmPtr(c->raw())); 527 as_jirl(zero, scratch, BOffImm16(0)); 528 } 529 void branch(const Register reg) { as_jirl(zero, reg, BOffImm16(0)); } 530 void nop() { as_nop(); } 531 BufferOffset ret() { 532 ma_pop(ra); 533 return as_jirl(zero, ra, BOffImm16(0)); 534 } 535 inline void retn(Imm32 n); 536 void push(Imm32 imm) { 537 UseScratchRegisterScope temps(*this); 538 Register scratch = temps.Acquire(); 539 ma_li(scratch, imm); 540 ma_push(scratch); 541 } 542 void push(ImmWord imm) { 543 UseScratchRegisterScope temps(*this); 544 Register scratch = temps.Acquire(); 545 ma_li(scratch, imm); 546 ma_push(scratch); 547 } 548 void push(ImmGCPtr imm) { 549 UseScratchRegisterScope temps(*this); 550 Register scratch = temps.Acquire(); 551 ma_li(scratch, imm); 552 ma_push(scratch); 553 } 554 void push(const Address& address) { 555 UseScratchRegisterScope temps(*this); 556 Register scratch = temps.Acquire(); 557 loadPtr(address, scratch); 558 ma_push(scratch); 559 } 560 void push(Register reg) { ma_push(reg); } 561 void push(FloatRegister reg) { ma_push(reg); } 562 void pop(Register reg) { ma_pop(reg); } 563 void pop(FloatRegister reg) { ma_pop(reg); } 564 565 // Emit a branch that can be toggled to a non-operation. On LOONG64 we use 566 // "andi" instruction to toggle the branch. 567 // See ToggleToJmp(), ToggleToCmp(). 568 CodeOffset toggledJump(Label* label); 569 570 // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch 571 // this instruction. 572 CodeOffset toggledCall(JitCode* target, bool enabled); 573 574 static size_t ToggledCallSize(uint8_t* code) { 575 // Four instructions used in: MacroAssemblerLOONG64Compat::toggledCall 576 return 4 * sizeof(uint32_t); 577 } 578 579 CodeOffset pushWithPatch(ImmWord imm) { 580 UseScratchRegisterScope temps(*this); 581 Register scratch = temps.Acquire(); 582 CodeOffset offset = movWithPatch(imm, scratch); 583 ma_push(scratch); 584 return offset; 585 } 586 587 CodeOffset movWithPatch(ImmWord imm, Register dest) { 588 CodeOffset offset = CodeOffset(currentOffset()); 589 ma_liPatchable(dest, imm, Li64); 590 return offset; 591 } 592 CodeOffset movWithPatch(ImmPtr imm, Register dest) { 593 CodeOffset offset = CodeOffset(currentOffset()); 594 ma_liPatchable(dest, imm); 595 return offset; 596 } 597 598 void writeCodePointer(CodeLabel* label) { 599 label->patchAt()->bind(currentOffset()); 600 label->setLinkMode(CodeLabel::RawPointer); 601 m_buffer.ensureSpace(sizeof(void*)); 602 writeInst(-1); 603 writeInst(-1); 604 } 605 606 void jump(Label* label) { ma_b(label); } 607 void jump(Register reg) { as_jirl(zero, reg, BOffImm16(0)); } 608 void jump(const Address& address) { 609 UseScratchRegisterScope temps(*this); 610 Register scratch = temps.Acquire(); 611 loadPtr(address, scratch); 612 as_jirl(zero, scratch, BOffImm16(0)); 613 } 614 615 void jump(JitCode* code) { branch(code); } 616 617 void jump(ImmPtr ptr) { 618 BufferOffset bo = m_buffer.nextOffset(); 619 addPendingJump(bo, ptr, RelocationKind::HARDCODED); 620 ma_jump(ptr); 621 } 622 623 void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); } 624 625 void splitTag(Register src, Register dest) { 626 as_srli_d(dest, src, JSVAL_TAG_SHIFT); 627 } 628 629 void splitTag(const ValueOperand& operand, Register dest) { 630 splitTag(operand.valueReg(), dest); 631 } 632 633 void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) { 634 splitTag(value, tag); 635 } 636 637 // unboxing code 638 void unboxNonDouble(const ValueOperand& operand, Register dest, 639 JSValueType type) { 640 unboxNonDouble(operand.valueReg(), dest, type); 641 } 642 643 template <typename T> 644 void unboxNonDouble(T src, Register dest, JSValueType type) { 645 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE); 646 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) { 647 load32(src, dest); 648 return; 649 } 650 loadPtr(src, dest); 651 unboxNonDouble(dest, dest, type); 652 } 653 654 void unboxNonDouble(Register src, Register dest, JSValueType type) { 655 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE); 656 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) { 657 as_slli_w(dest, src, 0); 658 return; 659 } 660 UseScratchRegisterScope temps(*this); 661 Register scratch = temps.Acquire(); 662 MOZ_ASSERT(scratch != src); 663 mov(ImmShiftedTag(type), scratch); 664 as_xor(dest, src, scratch); 665 } 666 667 void unboxGCThingForGCBarrier(const Address& src, Register dest) { 668 loadPtr(src, dest); 669 as_bstrpick_d(dest, dest, JSVAL_TAG_SHIFT - 1, 0); 670 } 671 void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) { 672 as_bstrpick_d(dest, src.valueReg(), JSVAL_TAG_SHIFT - 1, 0); 673 } 674 675 void unboxWasmAnyRefGCThingForGCBarrier(const Address& src, Register dest) { 676 UseScratchRegisterScope temps(*this); 677 Register scratch = temps.Acquire(); 678 MOZ_ASSERT(scratch != dest); 679 movePtr(ImmWord(wasm::AnyRef::GCThingMask), scratch); 680 loadPtr(src, dest); 681 as_and(dest, dest, scratch); 682 } 683 684 // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base. 685 void getGCThingValueChunk(const Address& src, Register dest) { 686 UseScratchRegisterScope temps(*this); 687 Register scratch = temps.Acquire(); 688 MOZ_ASSERT(scratch != dest); 689 loadPtr(src, dest); 690 movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch); 691 as_and(dest, dest, scratch); 692 } 693 void getGCThingValueChunk(const ValueOperand& src, Register dest) { 694 MOZ_ASSERT(src.valueReg() != dest); 695 movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest); 696 as_and(dest, dest, src.valueReg()); 697 } 698 699 void getWasmAnyRefGCThingChunk(Register src, Register dest) { 700 MOZ_ASSERT(src != dest); 701 movePtr(ImmWord(wasm::AnyRef::GCThingChunkMask), dest); 702 as_and(dest, dest, src); 703 } 704 705 void unboxInt32(const ValueOperand& operand, Register dest); 706 void unboxInt32(Register src, Register dest); 707 void unboxInt32(const Address& src, Register dest); 708 void unboxInt32(const BaseIndex& src, Register dest); 709 void unboxBoolean(const ValueOperand& operand, Register dest); 710 void unboxBoolean(Register src, Register dest); 711 void unboxBoolean(const Address& src, Register dest); 712 void unboxBoolean(const BaseIndex& src, Register dest); 713 void unboxDouble(const ValueOperand& operand, FloatRegister dest); 714 void unboxDouble(Register src, Register dest); 715 void unboxDouble(const Address& src, FloatRegister dest); 716 void unboxDouble(const BaseIndex& src, FloatRegister dest); 717 void unboxString(const ValueOperand& operand, Register dest); 718 void unboxString(Register src, Register dest); 719 void unboxString(const Address& src, Register dest); 720 void unboxSymbol(const ValueOperand& src, Register dest); 721 void unboxSymbol(Register src, Register dest); 722 void unboxSymbol(const Address& src, Register dest); 723 void unboxBigInt(const ValueOperand& operand, Register dest); 724 void unboxBigInt(Register src, Register dest); 725 void unboxBigInt(const Address& src, Register dest); 726 void unboxObject(const ValueOperand& src, Register dest); 727 void unboxObject(Register src, Register dest); 728 void unboxObject(const Address& src, Register dest); 729 void unboxObject(const BaseIndex& src, Register dest) { 730 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT); 731 } 732 void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type); 733 734 void notBoolean(const ValueOperand& val) { 735 as_xori(val.valueReg(), val.valueReg(), 1); 736 } 737 738 // boxing code 739 void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister); 740 void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { 741 boxValue(type, src, dest.valueReg()); 742 } 743 void boxNonDouble(Register type, Register src, const ValueOperand& dest) { 744 boxValue(type, src, dest.valueReg()); 745 } 746 747 // Extended unboxing API. If the payload is already in a register, returns 748 // that register. Otherwise, provides a move to the given scratch register, 749 // and returns that. 750 [[nodiscard]] Register extractObject(const Address& address, 751 Register scratch); 752 [[nodiscard]] Register extractObject(const ValueOperand& value, 753 Register scratch) { 754 unboxObject(value, scratch); 755 return scratch; 756 } 757 [[nodiscard]] Register extractString(const ValueOperand& value, 758 Register scratch) { 759 unboxString(value, scratch); 760 return scratch; 761 } 762 [[nodiscard]] Register extractSymbol(const ValueOperand& value, 763 Register scratch) { 764 unboxSymbol(value, scratch); 765 return scratch; 766 } 767 [[nodiscard]] Register extractInt32(const ValueOperand& value, 768 Register scratch) { 769 unboxInt32(value, scratch); 770 return scratch; 771 } 772 [[nodiscard]] Register extractBoolean(const ValueOperand& value, 773 Register scratch) { 774 unboxBoolean(value, scratch); 775 return scratch; 776 } 777 [[nodiscard]] Register extractTag(const Address& address, Register scratch); 778 [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch); 779 [[nodiscard]] Register extractTag(const ValueOperand& value, 780 Register scratch) { 781 splitTag(value, scratch); 782 return scratch; 783 } 784 785 void loadInt32OrDouble(const Address& src, FloatRegister dest); 786 void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest); 787 void loadConstantDouble(double dp, FloatRegister dest); 788 void loadConstantFloat32(float f, FloatRegister dest); 789 790 void testNullSet(Condition cond, const ValueOperand& value, Register dest); 791 792 void testObjectSet(Condition cond, const ValueOperand& value, Register dest); 793 794 void testUndefinedSet(Condition cond, const ValueOperand& value, 795 Register dest); 796 797 template <typename T> 798 void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) { 799 if (dest.isFloat()) { 800 loadInt32OrDouble(address, dest.fpu()); 801 } else { 802 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type)); 803 } 804 } 805 806 void boxValue(JSValueType type, Register src, Register dest); 807 void boxValue(Register type, Register src, Register dest); 808 809 void storeValue(ValueOperand val, const Address& dest); 810 void storeValue(ValueOperand val, const BaseIndex& dest); 811 void storeValue(JSValueType type, Register reg, Address dest); 812 void storeValue(JSValueType type, Register reg, BaseIndex dest); 813 void storeValue(const Value& val, Address dest); 814 void storeValue(const Value& val, BaseIndex dest); 815 void storeValue(const Address& src, const Address& dest, Register temp) { 816 loadPtr(src, temp); 817 storePtr(temp, dest); 818 } 819 820 void storePrivateValue(Register src, const Address& dest) { 821 storePtr(src, dest); 822 } 823 void storePrivateValue(ImmGCPtr imm, const Address& dest) { 824 storePtr(imm, dest); 825 } 826 827 void loadValue(Address src, ValueOperand val); 828 void loadValue(const BaseIndex& src, ValueOperand val); 829 830 void loadUnalignedValue(const Address& src, ValueOperand dest) { 831 loadValue(src, dest); 832 } 833 834 void tagValue(JSValueType type, Register payload, ValueOperand dest); 835 836 void pushValue(ValueOperand val); 837 void popValue(ValueOperand val); 838 void pushValue(const Value& val) { 839 if (val.isGCThing()) { 840 UseScratchRegisterScope temps(*this); 841 Register scratch = temps.Acquire(); 842 writeDataRelocation(val); 843 movWithPatch(ImmWord(val.asRawBits()), scratch); 844 push(scratch); 845 } else { 846 push(ImmWord(val.asRawBits())); 847 } 848 } 849 void pushValue(JSValueType type, Register reg) { 850 UseScratchRegisterScope temps(*this); 851 Register scratch = temps.Acquire(); 852 boxValue(type, reg, scratch); 853 push(scratch); 854 } 855 void pushValue(const Address& addr); 856 void pushValue(const BaseIndex& addr, Register scratch) { 857 loadValue(addr, ValueOperand(scratch)); 858 pushValue(ValueOperand(scratch)); 859 } 860 861 void handleFailureWithHandlerTail(Label* profilerExitTail, Label* bailoutTail, 862 uint32_t* returnValueCheckOffset); 863 864 ///////////////////////////////////////////////////////////////// 865 // Common interface. 866 ///////////////////////////////////////////////////////////////// 867 public: 868 // The following functions are exposed for use in platform-shared code. 869 870 inline void incrementInt32Value(const Address& addr); 871 872 void move32(Imm32 imm, Register dest); 873 void move32(Register src, Register dest); 874 875 void movePtr(Register src, Register dest); 876 void movePtr(ImmWord imm, Register dest); 877 void movePtr(ImmPtr imm, Register dest); 878 void movePtr(wasm::SymbolicAddress imm, Register dest); 879 void movePtr(ImmGCPtr imm, Register dest); 880 881 FaultingCodeOffset load8SignExtend(const Address& address, Register dest); 882 FaultingCodeOffset load8SignExtend(const BaseIndex& src, Register dest); 883 884 FaultingCodeOffset load8ZeroExtend(const Address& address, Register dest); 885 FaultingCodeOffset load8ZeroExtend(const BaseIndex& src, Register dest); 886 887 FaultingCodeOffset load16SignExtend(const Address& address, Register dest); 888 FaultingCodeOffset load16SignExtend(const BaseIndex& src, Register dest); 889 890 template <typename S> 891 void load16UnalignedSignExtend(const S& src, Register dest) { 892 load16SignExtend(src, dest); 893 } 894 895 FaultingCodeOffset load16ZeroExtend(const Address& address, Register dest); 896 FaultingCodeOffset load16ZeroExtend(const BaseIndex& src, Register dest); 897 898 template <typename S> 899 void load16UnalignedZeroExtend(const S& src, Register dest) { 900 load16ZeroExtend(src, dest); 901 } 902 903 FaultingCodeOffset load32(const Address& address, Register dest); 904 FaultingCodeOffset load32(const BaseIndex& address, Register dest); 905 void load32(AbsoluteAddress address, Register dest); 906 void load32(wasm::SymbolicAddress address, Register dest); 907 908 template <typename S> 909 void load32Unaligned(const S& src, Register dest) { 910 load32(src, dest); 911 } 912 913 FaultingCodeOffset load64(const Address& address, Register64 dest) { 914 return loadPtr(address, dest.reg); 915 } 916 FaultingCodeOffset load64(const BaseIndex& address, Register64 dest) { 917 return loadPtr(address, dest.reg); 918 } 919 920 template <typename S> 921 void load64Unaligned(const S& src, Register64 dest) { 922 load64(src, dest); 923 } 924 925 FaultingCodeOffset loadPtr(const Address& address, Register dest); 926 FaultingCodeOffset loadPtr(const BaseIndex& src, Register dest); 927 void loadPtr(AbsoluteAddress address, Register dest); 928 void loadPtr(wasm::SymbolicAddress address, Register dest); 929 930 void loadPrivate(const Address& address, Register dest); 931 932 FaultingCodeOffset store8(Register src, const Address& address); 933 FaultingCodeOffset store8(Register src, const BaseIndex& address); 934 void store8(Imm32 imm, const Address& address); 935 void store8(Imm32 imm, const BaseIndex& address); 936 937 FaultingCodeOffset store16(Register src, const Address& address); 938 FaultingCodeOffset store16(Register src, const BaseIndex& address); 939 void store16(Imm32 imm, const Address& address); 940 void store16(Imm32 imm, const BaseIndex& address); 941 942 template <typename T> 943 void store16Unaligned(Register src, const T& dest) { 944 store16(src, dest); 945 } 946 947 FaultingCodeOffset store32(Register src, const Address& address); 948 FaultingCodeOffset store32(Register src, const BaseIndex& address); 949 void store32(Register src, AbsoluteAddress address); 950 void store32(Imm32 src, const Address& address); 951 void store32(Imm32 src, const BaseIndex& address); 952 953 template <typename T> 954 void store32Unaligned(Register src, const T& dest) { 955 store32(src, dest); 956 } 957 958 void store64(Imm64 imm, Address address) { 959 storePtr(ImmWord(imm.value), address); 960 } 961 void store64(Imm64 imm, const BaseIndex& address) { 962 storePtr(ImmWord(imm.value), address); 963 } 964 965 FaultingCodeOffset store64(Register64 src, Address address) { 966 return storePtr(src.reg, address); 967 } 968 969 FaultingCodeOffset store64(Register64 src, const BaseIndex& address) { 970 return storePtr(src.reg, address); 971 } 972 973 template <typename T> 974 void store64Unaligned(Register64 src, const T& dest) { 975 store64(src, dest); 976 } 977 978 template <typename T> 979 void storePtr(ImmWord imm, T address); 980 template <typename T> 981 void storePtr(ImmPtr imm, T address); 982 template <typename T> 983 void storePtr(ImmGCPtr imm, T address); 984 void storePtr(Register src, AbsoluteAddress dest); 985 FaultingCodeOffset storePtr(Register src, const Address& address); 986 FaultingCodeOffset storePtr(Register src, const BaseIndex& address); 987 988 void moveDouble(FloatRegister src, FloatRegister dest) { 989 as_fmov_d(dest, src); 990 } 991 992 void zeroDouble(FloatRegister reg) { moveToDouble(zero, reg); } 993 994 void convertUInt64ToDouble(Register src, FloatRegister dest); 995 996 void breakpoint(uint32_t value = 0); 997 998 void checkStackAlignment() { 999 #ifdef DEBUG 1000 Label aligned; 1001 UseScratchRegisterScope temps(*this); 1002 Register scratch = temps.Acquire(); 1003 as_andi(scratch, sp, ABIStackAlignment - 1); 1004 ma_b(scratch, zero, &aligned, Equal, ShortJump); 1005 breakpoint(); 1006 bind(&aligned); 1007 #endif 1008 }; 1009 1010 static void calculateAlignedStackPointer(void** stackPointer); 1011 1012 void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs, 1013 Register dest); 1014 void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs, 1015 Register dest); 1016 void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs, 1017 Register dest); 1018 1019 void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, 1020 Register dest); 1021 1022 protected: 1023 bool buildOOLFakeExitFrame(void* fakeReturnAddr); 1024 1025 void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access, 1026 Register memoryBase, Register ptr, Register ptrScratch, 1027 Register64 output, Register tmp); 1028 void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value, 1029 Register memoryBase, Register ptr, Register ptrScratch, 1030 Register tmp); 1031 1032 public: 1033 void lea(Operand addr, Register dest) { 1034 ma_add_d(dest, addr.baseReg(), Imm32(addr.disp())); 1035 } 1036 1037 void abiret() { as_jirl(zero, ra, BOffImm16(0)); } 1038 1039 void moveFloat32(FloatRegister src, FloatRegister dest) { 1040 as_fmov_s(dest, src); 1041 } 1042 1043 // Instrumentation for entering and leaving the profiler. 1044 void profilerEnterFrame(Register framePtr, Register scratch); 1045 void profilerExitFrame(); 1046 }; 1047 1048 typedef MacroAssemblerLOONG64Compat MacroAssemblerSpecific; 1049 1050 } // namespace jit 1051 } // namespace js 1052 1053 #endif /* jit_loong64_MacroAssembler_loong64_h */