MacroAssembler-riscv64.h (48252B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 // Copyright 2021 the V8 project authors. All rights reserved. 8 // Use of this source code is governed by a BSD-style license that can be 9 // found in the LICENSE file. 10 #ifndef jit_riscv64_MacroAssembler_riscv64_h 11 #define jit_riscv64_MacroAssembler_riscv64_h 12 13 #include "jit/MoveResolver.h" 14 #include "jit/riscv64/Assembler-riscv64.h" 15 #include "wasm/WasmTypeDecls.h" 16 17 namespace js { 18 namespace jit { 19 20 static Register CallReg = t6; 21 22 enum LiFlags { 23 Li64 = 0, 24 Li48 = 1, 25 }; 26 27 class CompactBufferReader; 28 enum LoadStoreSize { 29 SizeByte = 8, 30 SizeHalfWord = 16, 31 SizeWord = 32, 32 SizeDouble = 64 33 }; 34 35 enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 }; 36 enum JumpKind { LongJump = 0, ShortJump = 1 }; 37 enum FloatFormat { SingleFloat, DoubleFloat }; 38 class ScratchTagScope { 39 UseScratchRegisterScope temps_; 40 Register scratch_; 41 bool owned_; 42 mozilla::DebugOnly<bool> released_; 43 44 public: 45 ScratchTagScope(Assembler& masm, const ValueOperand&) 46 : temps_(masm), owned_(true), released_(false) { 47 scratch_ = temps_.Acquire(); 48 } 49 50 operator Register() { 51 MOZ_ASSERT(!released_); 52 return scratch_; 53 } 54 55 void release() { 56 MOZ_ASSERT(!released_); 57 released_ = true; 58 if (owned_) { 59 temps_.Release(scratch_); 60 owned_ = false; 61 } 62 } 63 64 void reacquire() { 65 MOZ_ASSERT(released_); 66 released_ = false; 67 if (!owned_) { 68 scratch_ = temps_.Acquire(); 69 owned_ = true; 70 } 71 } 72 }; 73 74 class ScratchTagScopeRelease { 75 ScratchTagScope* ts_; 76 77 public: 78 explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) { 79 ts_->release(); 80 } 81 ~ScratchTagScopeRelease() { ts_->reacquire(); } 82 }; 83 84 struct ImmShiftedTag : public ImmWord { 85 explicit ImmShiftedTag(JSValueType type) 86 : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) { 87 } 88 }; 89 90 struct ImmTag : public Imm32 { 91 explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {} 92 }; 93 94 class MacroAssemblerRiscv64 : public Assembler { 95 public: 96 MacroAssemblerRiscv64() {} 97 98 #ifdef JS_SIMULATOR_RISCV64 99 // See riscv64/base-constants-riscv.h DebugParameters. 100 void Debug(uint32_t parameters) { break_(parameters, false); } 101 #endif 102 103 // Perform a downcast. Should be removed by Bug 996602. 104 MacroAssembler& asMasm(); 105 const MacroAssembler& asMasm() const; 106 107 MoveResolver moveResolver_; 108 109 static bool SupportsFloatingPoint() { return true; } 110 static bool SupportsUnalignedAccesses() { return true; } 111 static bool SupportsFastUnalignedFPAccesses() { return true; } 112 static bool SupportsFloat64To16() { return false; } 113 static bool SupportsFloat32To16() { return false; } 114 115 void haltingAlign(int alignment) { 116 // TODO(loong64): Implement a proper halting align. 117 nopAlign(alignment); 118 } 119 120 bool CalculateOffset(Label* L, OffsetSize bits, int32_t* offset); 121 int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); 122 123 inline void GenPCRelativeJump(Register rd, int32_t imm32) { 124 MOZ_ASSERT(is_int32(imm32 + 0x800)); 125 int32_t Hi20 = ((imm32 + 0x800) >> 12); 126 int32_t Lo12 = imm32 << 20 >> 20; 127 auipc(rd, Hi20); // Read PC + Hi20 into scratch. 128 jr(rd, Lo12); // jump PC + Hi20 + Lo12 129 } 130 131 // load 132 FaultingCodeOffset ma_load(Register dest, Address address, 133 LoadStoreSize size = SizeWord, 134 LoadStoreExtension extension = SignExtend); 135 FaultingCodeOffset ma_load(Register dest, const BaseIndex& src, 136 LoadStoreSize size = SizeWord, 137 LoadStoreExtension extension = SignExtend); 138 FaultingCodeOffset ma_loadDouble(FloatRegister dest, Address address); 139 FaultingCodeOffset ma_loadFloat(FloatRegister dest, Address address); 140 // store 141 FaultingCodeOffset ma_store(Register data, Address address, 142 LoadStoreSize size = SizeWord, 143 LoadStoreExtension extension = SignExtend); 144 FaultingCodeOffset ma_store(Register data, const BaseIndex& dest, 145 LoadStoreSize size = SizeWord, 146 LoadStoreExtension extension = SignExtend); 147 FaultingCodeOffset ma_store(Imm32 imm, const BaseIndex& dest, 148 LoadStoreSize size = SizeWord, 149 LoadStoreExtension extension = SignExtend); 150 FaultingCodeOffset ma_store(Imm32 imm, Address address, 151 LoadStoreSize size = SizeWord, 152 LoadStoreExtension extension = SignExtend); 153 void ma_storeDouble(FloatRegister dest, Address address); 154 void ma_storeFloat(FloatRegister dest, Address address); 155 void ma_liPatchable(Register dest, Imm32 imm); 156 void ma_liPatchable(Register dest, ImmPtr imm); 157 void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48); 158 void ma_li(Register dest, ImmGCPtr ptr); 159 void ma_li(Register dest, Imm32 imm); 160 void ma_li(Register dest, Imm64 imm); 161 void ma_li(Register dest, intptr_t imm) { RV_li(dest, imm); } 162 void ma_li(Register dest, CodeLabel* label); 163 void ma_li(Register dest, ImmWord imm); 164 165 // branches when done from within la-specific code 166 void ma_b(Register lhs, Register rhs, Label* l, Condition c, 167 JumpKind jumpKind = LongJump); 168 void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, 169 JumpKind jumpKind = LongJump); 170 CodeOffset BranchAndLinkShort(Label* L); 171 CodeOffset BranchAndLink(Label* label); 172 CodeOffset BranchAndLinkShort(int32_t offset); 173 CodeOffset BranchAndLinkShortHelper(int32_t offset, Label* L); 174 CodeOffset BranchAndLinkLong(Label* L); 175 void GenPCRelativeJumpAndLink(Register rd, int32_t imm32); 176 177 #define DEFINE_INSTRUCTION(instr) \ 178 void instr(Register rd, Register rj, Operand rt); \ 179 void instr(Register rd, Register rj, Imm32 imm) { \ 180 instr(rd, rj, Operand(imm.value)); \ 181 } \ 182 void instr(Register rd, Imm32 imm) { instr(rd, rd, Operand(imm.value)); } \ 183 void instr(Register rd, Register rs) { instr(rd, rd, Operand(rs)); } 184 185 #define DEFINE_INSTRUCTION2(instr) \ 186 void instr(Register rs, const Operand& rt); \ 187 void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \ 188 void instr(Register rs, Imm32 j) { instr(rs, Operand(j.value)); } 189 190 DEFINE_INSTRUCTION(ma_and); 191 DEFINE_INSTRUCTION(ma_or); 192 DEFINE_INSTRUCTION(ma_xor); 193 DEFINE_INSTRUCTION(ma_nor); 194 DEFINE_INSTRUCTION(ma_sub32) 195 DEFINE_INSTRUCTION(ma_sub64) 196 DEFINE_INSTRUCTION(ma_add32) 197 DEFINE_INSTRUCTION(ma_add64) 198 DEFINE_INSTRUCTION(ma_div32) 199 DEFINE_INSTRUCTION(ma_divu32) 200 DEFINE_INSTRUCTION(ma_div64) 201 DEFINE_INSTRUCTION(ma_divu64) 202 DEFINE_INSTRUCTION(ma_mod32) 203 DEFINE_INSTRUCTION(ma_modu32) 204 DEFINE_INSTRUCTION(ma_mod64) 205 DEFINE_INSTRUCTION(ma_modu64) 206 DEFINE_INSTRUCTION(ma_mul32) 207 DEFINE_INSTRUCTION(ma_mulh32) 208 DEFINE_INSTRUCTION(ma_mulhu32) 209 DEFINE_INSTRUCTION(ma_mul64) 210 DEFINE_INSTRUCTION(ma_mulh64) 211 DEFINE_INSTRUCTION(ma_sll64) 212 DEFINE_INSTRUCTION(ma_sra64) 213 DEFINE_INSTRUCTION(ma_srl64) 214 DEFINE_INSTRUCTION(ma_sll32) 215 DEFINE_INSTRUCTION(ma_sra32) 216 DEFINE_INSTRUCTION(ma_srl32) 217 DEFINE_INSTRUCTION(ma_slt) 218 DEFINE_INSTRUCTION(ma_sltu) 219 DEFINE_INSTRUCTION(ma_sle) 220 DEFINE_INSTRUCTION(ma_sleu) 221 DEFINE_INSTRUCTION(ma_sgt) 222 DEFINE_INSTRUCTION(ma_sgtu) 223 DEFINE_INSTRUCTION(ma_sge) 224 DEFINE_INSTRUCTION(ma_sgeu) 225 DEFINE_INSTRUCTION(ma_seq) 226 DEFINE_INSTRUCTION(ma_sne) 227 228 DEFINE_INSTRUCTION2(ma_seqz) 229 DEFINE_INSTRUCTION2(ma_snez) 230 DEFINE_INSTRUCTION2(ma_neg); 231 232 #undef DEFINE_INSTRUCTION2 233 #undef DEFINE_INSTRUCTION 234 // arithmetic based ops 235 void ma_add32TestOverflow(Register rd, Register rj, Register rk, 236 Label* overflow); 237 void ma_add32TestOverflow(Register rd, Register rj, Imm32 imm, 238 Label* overflow); 239 void ma_addPtrTestOverflow(Register rd, Register rj, Register rk, 240 Label* overflow); 241 void ma_addPtrTestOverflow(Register rd, Register rj, Imm32 imm, 242 Label* overflow); 243 void ma_addPtrTestOverflow(Register rd, Register rj, ImmWord imm, 244 Label* overflow); 245 void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Register rk, 246 Label* overflow); 247 void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Imm32 imm, 248 Label* overflow); 249 void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, ImmWord imm, 250 Label* overflow); 251 void ma_addPtrTestSigned(Condition cond, Register rd, Register rj, 252 Register rk, Label* taken); 253 void ma_addPtrTestSigned(Condition cond, Register rd, Register rj, Imm32 imm, 254 Label* taken); 255 void ma_addPtrTestSigned(Condition cond, Register rd, Register rj, 256 ImmWord imm, Label* taken); 257 258 // subtract 259 void ma_sub32TestOverflow(Register rd, Register rj, Register rk, 260 Label* overflow); 261 void ma_subPtrTestOverflow(Register rd, Register rj, Register rk, 262 Label* overflow); 263 void ma_subPtrTestOverflow(Register rd, Register rj, Imm32 imm, 264 Label* overflow); 265 266 // multiplies. For now, there are only few that we care about. 267 void ma_mulPtrTestOverflow(Register rd, Register rj, Register rk, 268 Label* overflow); 269 270 // branches when done from within la-specific code 271 void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, 272 JumpKind jumpKind = LongJump); 273 void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, 274 JumpKind jumpKind = LongJump); 275 void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, 276 JumpKind jumpKind = LongJump) { 277 UseScratchRegisterScope temps(this); 278 Register scratch = temps.Acquire(); 279 ma_li(scratch, imm); 280 ma_b(lhs, scratch, l, c, jumpKind); 281 } 282 void ma_b(Register lhs, Address addr, Label* l, Condition c, 283 JumpKind jumpKind = LongJump); 284 void ma_b(Address addr, Imm32 imm, Label* l, Condition c, 285 JumpKind jumpKind = LongJump); 286 void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, 287 JumpKind jumpKind = LongJump); 288 void ma_b(Address addr, Register rhs, Label* l, Condition c, 289 JumpKind jumpKind = LongJump) { 290 UseScratchRegisterScope temps(this); 291 Register scratch = temps.Acquire(); 292 MOZ_ASSERT(rhs != scratch); 293 ma_load(scratch, addr, SizeDouble); 294 ma_b(scratch, rhs, l, c, jumpKind); 295 } 296 297 void ma_branch(Label* target, Condition cond, Register r1, const Operand& r2, 298 JumpKind jumpKind = ShortJump); 299 300 void ma_branch(Label* target, JumpKind jumpKind = ShortJump) { 301 ma_branch(target, Always, zero, zero, jumpKind); 302 } 303 304 // fp instructions 305 void ma_lid(FloatRegister dest, double value); 306 307 // fp instructions 308 void ma_lis(FloatRegister dest, float value); 309 310 FaultingCodeOffset ma_fst_d(FloatRegister src, BaseIndex address); 311 FaultingCodeOffset ma_fst_s(FloatRegister src, BaseIndex address); 312 313 void ma_fld_d(FloatRegister dest, const BaseIndex& src); 314 void ma_fld_s(FloatRegister dest, const BaseIndex& src); 315 316 void ma_fmv_d(FloatRegister src, ValueOperand dest); 317 void ma_fmv_d(ValueOperand src, FloatRegister dest); 318 319 void ma_fmv_w(FloatRegister src, ValueOperand dest); 320 void ma_fmv_w(ValueOperand src, FloatRegister dest); 321 322 FaultingCodeOffset ma_fld_s(FloatRegister ft, Address address); 323 FaultingCodeOffset ma_fld_d(FloatRegister ft, Address address); 324 FaultingCodeOffset ma_fst_d(FloatRegister ft, Address address); 325 FaultingCodeOffset ma_fst_s(FloatRegister ft, Address address); 326 327 // stack 328 void ma_pop(Register r); 329 void ma_push(Register r); 330 void ma_pop(FloatRegister f); 331 void ma_push(FloatRegister f); 332 333 Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c); 334 Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c); 335 void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c); 336 void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c); 337 void ma_cmp_set(Register dst, Register lhs, ImmGCPtr imm, Condition c); 338 void ma_cmp_set(Register dst, Address address, Register rhs, Condition c); 339 void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c); 340 void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c); 341 342 void ma_rotr_w(Register rd, Register rj, Imm32 shift); 343 344 void ma_fmovz(FloatFormat fmt, FloatRegister fd, FloatRegister fj, 345 Register rk); 346 void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj, 347 Register rk); 348 349 // arithmetic based ops 350 void ma_add32TestCarry(Condition cond, Register rd, Register rj, Register rk, 351 Label* overflow); 352 void ma_add32TestCarry(Condition cond, Register rd, Register rj, Imm32 imm, 353 Label* overflow); 354 355 // subtract 356 void ma_sub32TestOverflow(Register rd, Register rj, Imm32 imm, 357 Label* overflow); 358 359 // multiplies. For now, there are only few that we care about. 360 void ma_mul32TestOverflow(Register rd, Register rj, Register rk, 361 Label* overflow); 362 void ma_mul32TestOverflow(Register rd, Register rj, Imm32 imm, 363 Label* overflow); 364 365 // fast mod, uses scratch registers, and thus needs to be in the assembler 366 // implicitly assumes that we can overwrite dest at the beginning of the 367 // sequence 368 void ma_mod_mask(Register src, Register dest, Register hold, Register remain, 369 int32_t shift, Label* negZero = nullptr); 370 371 // FP branches 372 void ma_compareF32(Register rd, DoubleCondition cc, FloatRegister cmp1, 373 FloatRegister cmp2); 374 void ma_compareF64(Register rd, DoubleCondition cc, FloatRegister cmp1, 375 FloatRegister cmp2); 376 377 void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2); 378 void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2); 379 void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2); 380 void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2); 381 382 void ma_call(ImmPtr dest); 383 384 void ma_jump(ImmPtr dest); 385 386 void jump(Label* label) { ma_branch(label); } 387 void jump(Register reg) { jr(reg); } 388 389 void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c); 390 void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c); 391 392 void computeScaledAddress(const BaseIndex& address, Register dest); 393 void computeScaledAddress32(const BaseIndex& address, Register dest); 394 395 void BranchShort(Label* L); 396 397 [[nodiscard]] bool BranchShort(int32_t offset, Condition cond, Register rs, 398 const Operand& rt); 399 [[nodiscard]] bool BranchShort(Label* L, Condition cond, Register rs, 400 const Operand& rt); 401 void BranchShortHelper(int32_t offset, Label* L); 402 bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, 403 const Operand& rt); 404 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, 405 const Operand& rt); 406 void BranchLong(Label* L); 407 408 // Floating point branches 409 void BranchFloat32(DoubleCondition cc, FloatRegister frs1, FloatRegister frs2, 410 Label* label, JumpKind jumpKind); 411 void BranchFloat64(DoubleCondition cc, FloatRegister frs1, FloatRegister frs2, 412 Label* label, JumpKind jumpKind); 413 414 void moveFromDoubleHi(FloatRegister src, Register dest) { 415 fmv_x_d(dest, src); 416 srli(dest, dest, 32); 417 } 418 // Bit field starts at bit pos and extending for size bits is extracted from 419 // rs and stored zero/sign-extended and right-justified in rt 420 void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size, 421 bool sign_extend = false); 422 void ExtractBits(Register dest, Register source, Register pos, int size, 423 bool sign_extend = false) { 424 sra(dest, source, pos); 425 ExtractBits(dest, dest, 0, size, sign_extend); 426 } 427 428 // Insert bits [0, size) of source to bits [pos, pos+size) of dest 429 void InsertBits(Register dest, Register source, Register pos, int size); 430 431 // Insert bits [0, size) of source to bits [pos, pos+size) of dest 432 void InsertBits(Register dest, Register source, int pos, int size); 433 434 template <typename F_TYPE> 435 void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, 436 FPURoundingMode mode); 437 438 template <typename TruncFunc> 439 void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, 440 TruncFunc trunc, bool Inexact = false); 441 442 void Clear_if_nan_d(Register rd, FPURegister fs); 443 void Clear_if_nan_s(Register rd, FPURegister fs); 444 445 // Convert double to unsigned word. 446 void Trunc_uw_d(Register rd, FPURegister fs, Register result = InvalidReg, 447 bool Inexact = false); 448 449 // Convert double to signed word. 450 void Trunc_w_d(Register rd, FPURegister fs, Register result = InvalidReg, 451 bool Inexact = false); 452 453 // Convert double to unsigned long. 454 void Trunc_ul_d(Register rd, FPURegister fs, Register result = InvalidReg, 455 bool Inexact = false); 456 457 // Convert single to signed long. 458 void Trunc_l_d(Register rd, FPURegister fs, Register result = InvalidReg, 459 bool Inexact = false); 460 461 // Convert single to signed word. 462 void Trunc_w_s(Register rd, FPURegister fs, Register result = InvalidReg, 463 bool Inexact = false); 464 465 // Convert single to unsigned word. 466 void Trunc_uw_s(Register rd, FPURegister fs, Register result = InvalidReg, 467 bool Inexact = false); 468 469 // Convert single to unsigned long. 470 void Trunc_ul_s(Register rd, FPURegister fs, Register result = InvalidReg, 471 bool Inexact = false); 472 473 // Convert single to signed long. 474 void Trunc_l_s(Register rd, FPURegister fs, Register result = InvalidReg, 475 bool Inexact = false); 476 477 // Round double functions 478 void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 479 void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 480 void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 481 void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 482 483 // Round float functions 484 void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 485 void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 486 void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 487 void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); 488 489 // Round single to signed word. 490 void Round_w_s(Register rd, FPURegister fs, Register result = InvalidReg, 491 bool Inexact = false); 492 493 // Round double to signed word. 494 void Round_w_d(Register rd, FPURegister fs, Register result = InvalidReg, 495 bool Inexact = false); 496 497 // Ceil single to signed word. 498 void Ceil_w_s(Register rd, FPURegister fs, Register result = InvalidReg, 499 bool Inexact = false); 500 501 // Ceil double to signed word. 502 void Ceil_w_d(Register rd, FPURegister fs, Register result = InvalidReg, 503 bool Inexact = false); 504 505 // Ceil single to signed long. 506 void Ceil_l_s(Register rd, FPURegister fs, Register result = InvalidReg, 507 bool Inexact = false); 508 509 // Ceil double to signed long. 510 void Ceil_l_d(Register rd, FPURegister fs, Register result = InvalidReg, 511 bool Inexact = false); 512 513 // Floor single to signed word. 514 void Floor_w_s(Register rd, FPURegister fs, Register result = InvalidReg, 515 bool Inexact = false); 516 517 // Floor double to signed word. 518 void Floor_w_d(Register rd, FPURegister fs, Register result = InvalidReg, 519 bool Inexact = false); 520 521 // Floor single to signed long. 522 void Floor_l_s(Register rd, FPURegister fs, Register result = InvalidReg, 523 bool Inexact = false); 524 525 // Floor double to signed long. 526 void Floor_l_d(Register rd, FPURegister fs, Register result = InvalidReg, 527 bool Inexact = false); 528 529 // Round single to signed long, ties to max magnitude (or away from zero). 530 void RoundMaxMag_l_s(Register rd, FPURegister fs, 531 Register result = InvalidReg, bool Inexact = false); 532 533 // Round double to signed long, ties to max magnitude (or away from zero). 534 void RoundMaxMag_l_d(Register rd, FPURegister fs, 535 Register result = InvalidReg, bool Inexact = false); 536 537 void Clz32(Register rd, Register rs); 538 void Ctz32(Register rd, Register rs); 539 void Popcnt32(Register rd, Register rs, Register scratch); 540 541 void Popcnt64(Register rd, Register rs, Register scratch); 542 void Ctz64(Register rd, Register rs); 543 void Clz64(Register rd, Register rs); 544 545 // Change endianness 546 void ByteSwap(Register dest, Register src, int operand_size, 547 Register scratch); 548 549 void Rol(Register rd, Register rs, const Operand& rt); 550 void Drol(Register rd, Register rs, const Operand& rt); 551 552 void Ror(Register rd, Register rs, const Operand& rt); 553 void Dror(Register rd, Register rs, const Operand& rt); 554 555 void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2); 556 void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2); 557 void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2); 558 void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2); 559 560 template <typename F> 561 void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2, 562 MaxMinKind kind); 563 564 inline void NegateBool(Register rd, Register rs) { xori(rd, rs, 1); } 565 566 protected: 567 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase, 568 Register ptr, Register ptrScratch, AnyRegister output, 569 Register tmp); 570 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value, 571 Register memoryBase, Register ptr, Register ptrScratch, 572 Register tmp); 573 }; 574 575 class MacroAssemblerRiscv64Compat : public MacroAssemblerRiscv64 { 576 public: 577 using MacroAssemblerRiscv64::call; 578 579 MacroAssemblerRiscv64Compat() {} 580 581 void convertBoolToInt32(Register src, Register dest) { 582 ma_and(dest, src, Imm32(0xff)); 583 }; 584 void convertInt32ToDouble(Register src, FloatRegister dest) { 585 fcvt_d_w(dest, src); 586 }; 587 void convertInt32ToDouble(const Address& src, FloatRegister dest) { 588 UseScratchRegisterScope temps(this); 589 Register scratch = temps.Acquire(); 590 ma_load(scratch, src, SizeWord, SignExtend); 591 fcvt_d_w(dest, scratch); 592 }; 593 void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) { 594 UseScratchRegisterScope temps(this); 595 Register scratch = temps.Acquire(); 596 MOZ_ASSERT(scratch != src.base); 597 MOZ_ASSERT(scratch != src.index); 598 computeScaledAddress(src, scratch); 599 convertInt32ToDouble(Address(scratch, src.offset), dest); 600 }; 601 void convertUInt32ToDouble(Register src, FloatRegister dest); 602 void convertUInt32ToFloat32(Register src, FloatRegister dest); 603 void convertDoubleToFloat32(FloatRegister src, FloatRegister dest); 604 void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, 605 bool negativeZeroCheck = true); 606 void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail, 607 bool negativeZeroCheck = true); 608 void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, 609 bool negativeZeroCheck = true); 610 611 void convertFloat32ToDouble(FloatRegister src, FloatRegister dest); 612 void convertInt32ToFloat32(Register src, FloatRegister dest); 613 void convertInt32ToFloat32(const Address& src, FloatRegister dest); 614 615 void convertDoubleToFloat16(FloatRegister src, FloatRegister dest) { 616 MOZ_CRASH("Not supported for this target"); 617 } 618 void convertFloat16ToDouble(FloatRegister src, FloatRegister dest) { 619 MOZ_CRASH("Not supported for this target"); 620 } 621 void convertFloat32ToFloat16(FloatRegister src, FloatRegister dest) { 622 MOZ_CRASH("Not supported for this target"); 623 } 624 void convertFloat16ToFloat32(FloatRegister src, FloatRegister dest) { 625 MOZ_CRASH("Not supported for this target"); 626 } 627 void convertInt32ToFloat16(Register src, FloatRegister dest) { 628 MOZ_CRASH("Not supported for this target"); 629 } 630 631 void truncateFloat32ModUint32(FloatRegister src, Register dest); 632 633 void computeEffectiveAddress(const Address& address, Register dest) { 634 ma_add64(dest, address.base, Imm32(address.offset)); 635 } 636 637 void computeEffectiveAddress(const BaseIndex& address, Register dest) { 638 computeScaledAddress(address, dest); 639 if (address.offset) { 640 ma_add64(dest, dest, Imm32(address.offset)); 641 } 642 } 643 644 void computeEffectiveAddress32(const Address& address, Register dest) { 645 ma_add32(dest, address.base, Imm32(address.offset)); 646 } 647 648 void computeEffectiveAddress32(const BaseIndex& address, Register dest) { 649 computeScaledAddress32(address, dest); 650 if (address.offset) { 651 ma_add32(dest, dest, Imm32(address.offset)); 652 } 653 } 654 655 void j(Label* dest) { ma_branch(dest); } 656 657 void mov(Register src, Register dest) { addi(dest, src, 0); } 658 void mov(ImmWord imm, Register dest) { ma_li(dest, imm); } 659 void mov(ImmPtr imm, Register dest) { 660 mov(ImmWord(uintptr_t(imm.value)), dest); 661 } 662 void mov(CodeLabel* label, Register dest) { ma_li(dest, label); } 663 void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); } 664 void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); } 665 666 void writeDataRelocation(const Value& val) { 667 MOZ_ASSERT(val.isGCThing(), "only called for gc-things"); 668 669 // Raw GC pointer relocations and Value relocations both end up in 670 // TraceOneDataRelocation. 671 gc::Cell* cell = val.toGCThing(); 672 if (cell && gc::IsInsideNursery(cell)) { 673 embedsNurseryPointers_ = true; 674 } 675 dataRelocations_.writeUnsigned(currentOffset()); 676 } 677 678 void branch(JitCode* c) { 679 BlockTrampolinePoolScope block_trampoline_pool(this, 7); 680 UseScratchRegisterScope temps(this); 681 Register scratch = temps.Acquire(); 682 BufferOffset bo = m_buffer.nextOffset(); 683 addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE); 684 ma_liPatchable(scratch, ImmPtr(c->raw())); 685 jr(scratch); 686 } 687 void branch(const Register reg) { jr(reg); } 688 BufferOffset ret() { 689 ma_pop(ra); 690 return jalr(zero_reg, ra, 0); 691 } 692 inline void retn(Imm32 n); 693 void push(Imm32 imm) { 694 UseScratchRegisterScope temps(this); 695 Register scratch = temps.Acquire(); 696 ma_li(scratch, imm); 697 ma_push(scratch); 698 } 699 void push(ImmWord imm) { 700 UseScratchRegisterScope temps(this); 701 Register scratch = temps.Acquire(); 702 ma_li(scratch, imm); 703 ma_push(scratch); 704 } 705 void push(ImmGCPtr imm) { 706 UseScratchRegisterScope temps(this); 707 Register scratch = temps.Acquire(); 708 ma_li(scratch, imm); 709 ma_push(scratch); 710 } 711 void push(const Address& address) { 712 UseScratchRegisterScope temps(this); 713 Register scratch = temps.Acquire(); 714 loadPtr(address, scratch); 715 ma_push(scratch); 716 } 717 void push(Register reg) { ma_push(reg); } 718 void push(FloatRegister reg) { ma_push(reg); } 719 void pop(Register reg) { ma_pop(reg); } 720 void pop(FloatRegister reg) { ma_pop(reg); } 721 722 // Emit a branch that can be toggled to a non-operation. On LOONG64 we use 723 // "andi" instruction to toggle the branch. 724 // See ToggleToJmp(), ToggleToCmp(). 725 CodeOffset toggledJump(Label* label); 726 727 // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch 728 // this instruction. 729 CodeOffset toggledCall(JitCode* target, bool enabled); 730 731 static size_t ToggledCallSize(uint8_t* code) { 732 // Four instructions used in: MacroAssemblerRiscv64Compat::toggledCall 733 return 7 * sizeof(uint32_t); 734 } 735 736 CodeOffset pushWithPatch(ImmWord imm) { 737 UseScratchRegisterScope temps(this); 738 Register scratch = temps.Acquire(); 739 CodeOffset offset = movWithPatch(imm, scratch); 740 ma_push(scratch); 741 return offset; 742 } 743 744 CodeOffset movWithPatch(ImmWord imm, Register dest) { 745 BlockTrampolinePoolScope block_trampoline_pool(this, 8); 746 CodeOffset offset = CodeOffset(currentOffset()); 747 ma_liPatchable(dest, imm, Li64); 748 return offset; 749 } 750 CodeOffset movWithPatch(ImmPtr imm, Register dest) { 751 BlockTrampolinePoolScope block_trampoline_pool(this, 6); 752 CodeOffset offset = CodeOffset(currentOffset()); 753 ma_liPatchable(dest, imm); 754 return offset; 755 } 756 757 void writeCodePointer(CodeLabel* label) { 758 label->patchAt()->bind(currentOffset()); 759 label->setLinkMode(CodeLabel::RawPointer); 760 m_buffer.ensureSpace(sizeof(void*)); 761 emit(uint32_t(-1)); 762 emit(uint32_t(-1)); 763 } 764 765 void jump(Label* label) { ma_branch(label); } 766 void jump(Register reg) { jr(reg); } 767 void jump(const Address& address) { 768 UseScratchRegisterScope temps(this); 769 Register scratch = temps.Acquire(); 770 loadPtr(address, scratch); 771 jr(scratch); 772 } 773 774 void jump(JitCode* code) { branch(code); } 775 776 void jump(ImmPtr ptr) { 777 BufferOffset bo = m_buffer.nextOffset(); 778 addPendingJump(bo, ptr, RelocationKind::HARDCODED); 779 ma_jump(ptr); 780 } 781 782 void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); } 783 784 void splitTag(Register src, Register dest) { 785 srli(dest, src, JSVAL_TAG_SHIFT); 786 } 787 788 void splitTag(const ValueOperand& operand, Register dest) { 789 splitTag(operand.valueReg(), dest); 790 } 791 792 void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) { 793 splitTag(value, tag); 794 } 795 796 void moveIfZero(Register dst, Register src, Register cond) { 797 Label done; 798 ma_branch(&done, NotEqual, cond, zero); 799 mv(dst, src); 800 bind(&done); 801 } 802 803 void moveIfNotZero(Register dst, Register src, Register cond) { 804 Label done; 805 ma_branch(&done, Equal, cond, zero); 806 mv(dst, src); 807 bind(&done); 808 } 809 // unboxing code 810 void unboxNonDouble(const ValueOperand& operand, Register dest, 811 JSValueType type) { 812 unboxNonDouble(operand.valueReg(), dest, type); 813 } 814 815 template <typename T> 816 void unboxNonDouble(T src, Register dest, JSValueType type) { 817 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE); 818 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) { 819 load32(src, dest); 820 return; 821 } 822 loadPtr(src, dest); 823 unboxNonDouble(dest, dest, type); 824 } 825 826 void unboxNonDouble(Register src, Register dest, JSValueType type) { 827 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE); 828 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) { 829 SignExtendWord(dest, src); 830 return; 831 } 832 UseScratchRegisterScope temps(this); 833 Register scratch = temps.Acquire(); 834 MOZ_ASSERT(scratch != src); 835 mov(ImmShiftedTag(type), scratch); 836 xor_(dest, src, scratch); 837 } 838 839 void unboxGCThingForGCBarrier(const Address& src, Register dest) { 840 loadPtr(src, dest); 841 ExtractBits(dest, dest, 0, JSVAL_TAG_SHIFT); 842 } 843 void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) { 844 ExtractBits(dest, src.valueReg(), 0, JSVAL_TAG_SHIFT); 845 } 846 847 void unboxWasmAnyRefGCThingForGCBarrier(const Address& src, Register dest) { 848 UseScratchRegisterScope temps(this); 849 Register scratch = temps.Acquire(); 850 MOZ_ASSERT(scratch != dest); 851 movePtr(ImmWord(wasm::AnyRef::GCThingMask), scratch); 852 loadPtr(src, dest); 853 ma_and(dest, dest, scratch); 854 } 855 856 void getWasmAnyRefGCThingChunk(Register src, Register dest) { 857 MOZ_ASSERT(src != dest); 858 movePtr(ImmWord(wasm::AnyRef::GCThingChunkMask), dest); 859 ma_and(dest, dest, src); 860 } 861 862 // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base. 863 void getGCThingValueChunk(const Address& src, Register dest) { 864 UseScratchRegisterScope temps(this); 865 Register scratch = temps.Acquire(); 866 MOZ_ASSERT(scratch != dest); 867 loadPtr(src, dest); 868 movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch); 869 and_(dest, dest, scratch); 870 } 871 void getGCThingValueChunk(const ValueOperand& src, Register dest) { 872 MOZ_ASSERT(src.valueReg() != dest); 873 movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest); 874 and_(dest, dest, src.valueReg()); 875 } 876 877 void unboxInt32(const ValueOperand& operand, Register dest); 878 void unboxInt32(Register src, Register dest); 879 void unboxInt32(const Address& src, Register dest); 880 void unboxInt32(const BaseIndex& src, Register dest); 881 void unboxBoolean(const ValueOperand& operand, Register dest); 882 void unboxBoolean(Register src, Register dest); 883 void unboxBoolean(const Address& src, Register dest); 884 void unboxBoolean(const BaseIndex& src, Register dest); 885 void unboxDouble(const ValueOperand& operand, FloatRegister dest); 886 void unboxDouble(Register src, Register dest); 887 void unboxDouble(const Address& src, FloatRegister dest); 888 void unboxDouble(const BaseIndex& src, FloatRegister dest); 889 void unboxString(const ValueOperand& operand, Register dest); 890 void unboxString(Register src, Register dest); 891 void unboxString(const Address& src, Register dest); 892 void unboxSymbol(const ValueOperand& src, Register dest); 893 void unboxSymbol(Register src, Register dest); 894 void unboxSymbol(const Address& src, Register dest); 895 void unboxBigInt(const ValueOperand& operand, Register dest); 896 void unboxBigInt(Register src, Register dest); 897 void unboxBigInt(const Address& src, Register dest); 898 void unboxObject(const ValueOperand& src, Register dest); 899 void unboxObject(Register src, Register dest); 900 void unboxObject(const Address& src, Register dest); 901 void unboxObject(const BaseIndex& src, Register dest) { 902 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT); 903 } 904 void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type); 905 906 void notBoolean(const ValueOperand& val) { 907 xori(val.valueReg(), val.valueReg(), 1); 908 } 909 910 // boxing code 911 void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister); 912 void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { 913 boxValue(type, src, dest.valueReg()); 914 } 915 void boxNonDouble(Register type, Register src, const ValueOperand& dest) { 916 boxValue(type, src, dest.valueReg()); 917 } 918 919 // Extended unboxing API. If the payload is already in a register, returns 920 // that register. Otherwise, provides a move to the given scratch register, 921 // and returns that. 922 [[nodiscard]] Register extractObject(const Address& address, 923 Register scratch); 924 [[nodiscard]] Register extractObject(const ValueOperand& value, 925 Register scratch) { 926 unboxObject(value, scratch); 927 return scratch; 928 } 929 [[nodiscard]] Register extractString(const ValueOperand& value, 930 Register scratch) { 931 unboxString(value, scratch); 932 return scratch; 933 } 934 [[nodiscard]] Register extractSymbol(const ValueOperand& value, 935 Register scratch) { 936 unboxSymbol(value, scratch); 937 return scratch; 938 } 939 [[nodiscard]] Register extractInt32(const ValueOperand& value, 940 Register scratch) { 941 unboxInt32(value, scratch); 942 return scratch; 943 } 944 [[nodiscard]] Register extractBoolean(const ValueOperand& value, 945 Register scratch) { 946 unboxBoolean(value, scratch); 947 return scratch; 948 } 949 [[nodiscard]] Register extractTag(const Address& address, Register scratch); 950 [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch); 951 [[nodiscard]] Register extractTag(const ValueOperand& value, 952 Register scratch) { 953 splitTag(value, scratch); 954 return scratch; 955 } 956 957 void loadInt32OrDouble(const Address& src, FloatRegister dest); 958 void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest); 959 void loadConstantDouble(double dp, FloatRegister dest); 960 void loadConstantFloat32(float f, FloatRegister dest); 961 962 void testNullSet(Condition cond, const ValueOperand& value, Register dest); 963 964 void testObjectSet(Condition cond, const ValueOperand& value, Register dest); 965 966 void testUndefinedSet(Condition cond, const ValueOperand& value, 967 Register dest); 968 969 // higher level tag testing code 970 Address ToPayload(Address value) { return value; } 971 972 template <typename T> 973 void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) { 974 if (dest.isFloat()) { 975 loadInt32OrDouble(address, dest.fpu()); 976 } else { 977 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type)); 978 } 979 } 980 981 void boxValue(JSValueType type, Register src, Register dest); 982 void boxValue(Register type, Register src, Register dest); 983 984 void storeValue(ValueOperand val, const Address& dest); 985 void storeValue(ValueOperand val, const BaseIndex& dest); 986 void storeValue(JSValueType type, Register reg, Address dest); 987 void storeValue(JSValueType type, Register reg, BaseIndex dest); 988 void storeValue(const Value& val, Address dest); 989 void storeValue(const Value& val, BaseIndex dest); 990 void storeValue(const Address& src, const Address& dest, Register temp) { 991 loadPtr(src, temp); 992 storePtr(temp, dest); 993 } 994 995 void storePrivateValue(Register src, const Address& dest) { 996 storePtr(src, dest); 997 } 998 void storePrivateValue(ImmGCPtr imm, const Address& dest) { 999 storePtr(imm, dest); 1000 } 1001 1002 void loadValue(Address src, ValueOperand val); 1003 void loadValue(const BaseIndex& src, ValueOperand val); 1004 1005 void loadUnalignedValue(const Address& src, ValueOperand dest) { 1006 loadValue(src, dest); 1007 } 1008 1009 void tagValue(JSValueType type, Register payload, ValueOperand dest); 1010 1011 void pushValue(ValueOperand val); 1012 void popValue(ValueOperand val); 1013 void pushValue(const Value& val) { 1014 if (val.isGCThing()) { 1015 UseScratchRegisterScope temps(this); 1016 Register scratch = temps.Acquire(); 1017 writeDataRelocation(val); 1018 movWithPatch(ImmWord(val.asRawBits()), scratch); 1019 push(scratch); 1020 } else { 1021 push(ImmWord(val.asRawBits())); 1022 } 1023 } 1024 void pushValue(JSValueType type, Register reg) { 1025 UseScratchRegisterScope temps(this); 1026 Register scratch = temps.Acquire(); 1027 boxValue(type, reg, scratch); 1028 push(scratch); 1029 } 1030 void pushValue(const Address& addr); 1031 void pushValue(const BaseIndex& addr, Register scratch) { 1032 loadValue(addr, ValueOperand(scratch)); 1033 pushValue(ValueOperand(scratch)); 1034 } 1035 1036 void handleFailureWithHandlerTail(Label* profilerExitTail, Label* bailoutTail, 1037 uint32_t* returnValueCheckOffset); 1038 1039 ///////////////////////////////////////////////////////////////// 1040 // Common interface. 1041 ///////////////////////////////////////////////////////////////// 1042 public: 1043 // The following functions are exposed for use in platform-shared code. 1044 1045 inline void incrementInt32Value(const Address& addr); 1046 1047 void move32(Imm32 imm, Register dest); 1048 void move32(Register src, Register dest); 1049 1050 void movePtr(Register src, Register dest); 1051 void movePtr(ImmWord imm, Register dest); 1052 void movePtr(ImmPtr imm, Register dest); 1053 void movePtr(wasm::SymbolicAddress imm, Register dest); 1054 void movePtr(ImmGCPtr imm, Register dest); 1055 1056 FaultingCodeOffset load8SignExtend(const Address& address, Register dest); 1057 FaultingCodeOffset load8SignExtend(const BaseIndex& src, Register dest); 1058 1059 FaultingCodeOffset load8ZeroExtend(const Address& address, Register dest); 1060 FaultingCodeOffset load8ZeroExtend(const BaseIndex& src, Register dest); 1061 1062 FaultingCodeOffset load16SignExtend(const Address& address, Register dest); 1063 FaultingCodeOffset load16SignExtend(const BaseIndex& src, Register dest); 1064 1065 template <typename S> 1066 void load16UnalignedSignExtend(const S& src, Register dest) { 1067 load16SignExtend(src, dest); 1068 } 1069 1070 FaultingCodeOffset load16ZeroExtend(const Address& address, Register dest); 1071 FaultingCodeOffset load16ZeroExtend(const BaseIndex& src, Register dest); 1072 1073 void SignExtendByte(Register rd, Register rs) { 1074 if (HasZbbExtension()) { 1075 sext_b(rd, rs); 1076 return; 1077 } 1078 slli(rd, rs, xlen - 8); 1079 srai(rd, rd, xlen - 8); 1080 } 1081 1082 void SignExtendShort(Register rd, Register rs) { 1083 if (HasZbbExtension()) { 1084 sext_h(rd, rs); 1085 return; 1086 } 1087 slli(rd, rs, xlen - 16); 1088 srai(rd, rd, xlen - 16); 1089 } 1090 1091 void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); } 1092 void ZeroExtendWord(Register rd, Register rs) { 1093 if (HasZbaExtension()) { 1094 zext_w(rd, rs); 1095 return; 1096 } 1097 slli(rd, rs, 32); 1098 srli(rd, rd, 32); 1099 } 1100 1101 template <typename S> 1102 void load16UnalignedZeroExtend(const S& src, Register dest) { 1103 load16ZeroExtend(src, dest); 1104 } 1105 1106 FaultingCodeOffset load32(const Address& address, Register dest); 1107 FaultingCodeOffset load32(const BaseIndex& address, Register dest); 1108 FaultingCodeOffset load32(AbsoluteAddress address, Register dest); 1109 FaultingCodeOffset load32(wasm::SymbolicAddress address, Register dest); 1110 1111 template <typename S> 1112 void load32Unaligned(const S& src, Register dest) { 1113 load32(src, dest); 1114 } 1115 1116 FaultingCodeOffset load64(const Address& address, Register64 dest) { 1117 return loadPtr(address, dest.reg); 1118 } 1119 FaultingCodeOffset load64(const BaseIndex& address, Register64 dest) { 1120 return loadPtr(address, dest.reg); 1121 } 1122 1123 FaultingCodeOffset loadDouble(const Address& addr, FloatRegister dest) { 1124 return ma_loadDouble(dest, addr); 1125 } 1126 FaultingCodeOffset loadDouble(const BaseIndex& src, FloatRegister dest) { 1127 UseScratchRegisterScope temps(this); 1128 Register scratch = temps.Acquire(); 1129 computeEffectiveAddress(src, scratch); 1130 FaultingCodeOffset fco = FaultingCodeOffset(currentOffset()); 1131 fld(dest, scratch, 0); 1132 return fco; 1133 } 1134 1135 FaultingCodeOffset loadFloat32(const Address& addr, FloatRegister dest) { 1136 return ma_loadFloat(dest, addr); 1137 } 1138 1139 FaultingCodeOffset loadFloat32(const BaseIndex& src, FloatRegister dest) { 1140 UseScratchRegisterScope temps(this); 1141 Register scratch = temps.Acquire(); 1142 computeEffectiveAddress(src, scratch); 1143 FaultingCodeOffset fco = FaultingCodeOffset(currentOffset()); 1144 flw(dest, scratch, 0); 1145 return fco; 1146 } 1147 1148 FaultingCodeOffset loadFloat16(const Address& addr, FloatRegister dest, 1149 Register) { 1150 MOZ_CRASH("Not supported for this target"); 1151 } 1152 FaultingCodeOffset loadFloat16(const BaseIndex& src, FloatRegister dest, 1153 Register) { 1154 MOZ_CRASH("Not supported for this target"); 1155 } 1156 1157 template <typename S> 1158 FaultingCodeOffset load64Unaligned(const S& src, Register64 dest) { 1159 return load64(src, dest); 1160 } 1161 1162 FaultingCodeOffset loadPtr(const Address& address, Register dest); 1163 FaultingCodeOffset loadPtr(const BaseIndex& src, Register dest); 1164 FaultingCodeOffset loadPtr(AbsoluteAddress address, Register dest); 1165 FaultingCodeOffset loadPtr(wasm::SymbolicAddress address, Register dest); 1166 1167 FaultingCodeOffset loadPrivate(const Address& address, Register dest); 1168 1169 FaultingCodeOffset store8(Register src, const Address& address); 1170 FaultingCodeOffset store8(Imm32 imm, const Address& address); 1171 FaultingCodeOffset store8(Register src, const BaseIndex& address); 1172 FaultingCodeOffset store8(Imm32 imm, const BaseIndex& address); 1173 1174 FaultingCodeOffset store16(Register src, const Address& address); 1175 FaultingCodeOffset store16(Imm32 imm, const Address& address); 1176 FaultingCodeOffset store16(Register src, const BaseIndex& address); 1177 FaultingCodeOffset store16(Imm32 imm, const BaseIndex& address); 1178 1179 template <typename T> 1180 FaultingCodeOffset store16Unaligned(Register src, const T& dest) { 1181 return store16(src, dest); 1182 } 1183 1184 FaultingCodeOffset store32(Register src, AbsoluteAddress address); 1185 FaultingCodeOffset store32(Register src, const Address& address); 1186 FaultingCodeOffset store32(Register src, const BaseIndex& address); 1187 FaultingCodeOffset store32(Imm32 src, const Address& address); 1188 FaultingCodeOffset store32(Imm32 src, const BaseIndex& address); 1189 1190 // NOTE: This will use second scratch on LOONG64. Only ARM needs the 1191 // implementation without second scratch. 1192 void store32_NoSecondScratch(Imm32 src, const Address& address) { 1193 store32(src, address); 1194 } 1195 1196 template <typename T> 1197 void store32Unaligned(Register src, const T& dest) { 1198 store32(src, dest); 1199 } 1200 1201 FaultingCodeOffset store64(Imm64 imm, Address address) { 1202 return storePtr(ImmWord(imm.value), address); 1203 } 1204 FaultingCodeOffset store64(Imm64 imm, const BaseIndex& address) { 1205 return storePtr(ImmWord(imm.value), address); 1206 } 1207 1208 FaultingCodeOffset store64(Register64 src, Address address) { 1209 return storePtr(src.reg, address); 1210 } 1211 FaultingCodeOffset store64(Register64 src, const BaseIndex& address) { 1212 return storePtr(src.reg, address); 1213 } 1214 1215 template <typename T> 1216 FaultingCodeOffset store64Unaligned(Register64 src, const T& dest) { 1217 return store64(src, dest); 1218 } 1219 1220 template <typename T> 1221 FaultingCodeOffset storePtr(ImmWord imm, T address); 1222 template <typename T> 1223 FaultingCodeOffset storePtr(ImmPtr imm, T address); 1224 template <typename T> 1225 FaultingCodeOffset storePtr(ImmGCPtr imm, T address); 1226 FaultingCodeOffset storePtr(Register src, const Address& address); 1227 FaultingCodeOffset storePtr(Register src, const BaseIndex& address); 1228 FaultingCodeOffset storePtr(Register src, AbsoluteAddress dest); 1229 1230 void moveDouble(FloatRegister src, FloatRegister dest) { fmv_d(dest, src); } 1231 1232 void zeroDouble(FloatRegister reg) { fmv_d_x(reg, zero); } 1233 1234 void convertUInt64ToDouble(Register src, FloatRegister dest); 1235 1236 void breakpoint(uint32_t value = 0); 1237 1238 void checkStackAlignment() { 1239 #ifdef DEBUG 1240 Label aligned; 1241 UseScratchRegisterScope temps(this); 1242 Register scratch = temps.Acquire(); 1243 andi(scratch, sp, ABIStackAlignment - 1); 1244 ma_b(scratch, zero, &aligned, Equal, ShortJump); 1245 breakpoint(); 1246 bind(&aligned); 1247 #endif 1248 }; 1249 1250 static void calculateAlignedStackPointer(void** stackPointer); 1251 1252 void minMax32(Register lhs, Register rhs, Register dest, bool isMax); 1253 void minMax32(Register lhs, Imm32 rhs, Register dest, bool isMax); 1254 1255 void minMaxPtr(Register lhs, Register rhs, Register dest, bool isMax); 1256 void minMaxPtr(Register lhs, ImmWord rhs, Register dest, bool isMax); 1257 1258 void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs, 1259 Register dest); 1260 void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs, 1261 Register dest); 1262 void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs, 1263 Register dest); 1264 1265 void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, 1266 Register dest); 1267 1268 protected: 1269 bool buildOOLFakeExitFrame(void* fakeReturnAddr); 1270 1271 void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access, 1272 Register memoryBase, Register ptr, Register ptrScratch, 1273 Register64 output, Register tmp); 1274 void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value, 1275 Register memoryBase, Register ptr, Register ptrScratch, 1276 Register tmp); 1277 1278 public: 1279 void abiret() { jr(ra); } 1280 1281 void moveFloat32(FloatRegister src, FloatRegister dest) { fmv_s(dest, src); } 1282 1283 // Instrumentation for entering and leaving the profiler. 1284 void profilerEnterFrame(Register framePtr, Register scratch); 1285 void profilerExitFrame(); 1286 }; 1287 1288 typedef MacroAssemblerRiscv64Compat MacroAssemblerSpecific; 1289 1290 } // namespace jit 1291 } // namespace js 1292 1293 #endif /* jit_riscv64_MacroAssembler_riscv64_h */