MacroAssembler-vixl.h (91787B)
1 // Copyright 2015, VIXL authors 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 #ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_ 28 #define VIXL_A64_MACRO_ASSEMBLER_A64_H_ 29 30 #include <algorithm> 31 #include <limits> 32 33 #include "jit/arm64/Assembler-arm64.h" 34 #include "jit/arm64/vixl/Debugger-vixl.h" 35 #include "jit/arm64/vixl/Globals-vixl.h" 36 #include "jit/arm64/vixl/Instrument-vixl.h" 37 #include "jit/arm64/vixl/Simulator-Constants-vixl.h" 38 39 #define LS_MACRO_LIST(V) \ 40 V(Ldrb, Register&, rt, LDRB_w) \ 41 V(Strb, Register&, rt, STRB_w) \ 42 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ 43 V(Ldrh, Register&, rt, LDRH_w) \ 44 V(Strh, Register&, rt, STRH_w) \ 45 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ 46 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ 47 V(Str, CPURegister&, rt, StoreOpFor(rt)) \ 48 V(Ldrsw, Register&, rt, LDRSW_x) 49 50 51 #define LSPAIR_MACRO_LIST(V) \ 52 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ 53 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ 54 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) 55 56 namespace vixl { 57 58 // Forward declaration 59 class MacroAssembler; 60 class UseScratchRegisterScope; 61 62 // This scope has the following purposes: 63 // * Acquire/Release the underlying assembler's code buffer. 64 // * This is mandatory before emitting. 65 // * Emit the literal or veneer pools if necessary before emitting the 66 // macro-instruction. 67 // * Ensure there is enough space to emit the macro-instruction. 68 class EmissionCheckScope { 69 public: 70 EmissionCheckScope(MacroAssembler* masm, size_t size) 71 : masm_(masm) 72 { } 73 74 protected: 75 MacroAssembler* masm_; 76 #ifdef DEBUG 77 Label start_; 78 size_t size_; 79 #endif 80 }; 81 82 83 // Helper for common Emission checks. 84 // The macro-instruction maps to a single instruction. 85 class SingleEmissionCheckScope : public EmissionCheckScope { 86 public: 87 explicit SingleEmissionCheckScope(MacroAssembler* masm) 88 : EmissionCheckScope(masm, kInstructionSize) {} 89 }; 90 91 92 // The macro instruction is a "typical" macro-instruction. Typical macro- 93 // instruction only emit a few instructions, a few being defined as 8 here. 94 class MacroEmissionCheckScope : public EmissionCheckScope { 95 public: 96 explicit MacroEmissionCheckScope(MacroAssembler* masm) 97 : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {} 98 99 private: 100 static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize; 101 }; 102 103 104 enum BranchType { 105 // Copies of architectural conditions. 106 // The associated conditions can be used in place of those, the code will 107 // take care of reinterpreting them with the correct type. 108 integer_eq = eq, 109 integer_ne = ne, 110 integer_hs = hs, 111 integer_lo = lo, 112 integer_mi = mi, 113 integer_pl = pl, 114 integer_vs = vs, 115 integer_vc = vc, 116 integer_hi = hi, 117 integer_ls = ls, 118 integer_ge = ge, 119 integer_lt = lt, 120 integer_gt = gt, 121 integer_le = le, 122 integer_al = al, 123 integer_nv = nv, 124 125 // These two are *different* from the architectural codes al and nv. 126 // 'always' is used to generate unconditional branches. 127 // 'never' is used to not generate a branch (generally as the inverse 128 // branch type of 'always). 129 always, never, 130 // cbz and cbnz 131 reg_zero, reg_not_zero, 132 // tbz and tbnz 133 reg_bit_clear, reg_bit_set, 134 135 // Aliases. 136 kBranchTypeFirstCondition = eq, 137 kBranchTypeLastCondition = nv, 138 kBranchTypeFirstUsingReg = reg_zero, 139 kBranchTypeFirstUsingBit = reg_bit_clear 140 }; 141 142 143 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; 144 145 // The macro assembler supports moving automatically pre-shifted immediates for 146 // arithmetic and logical instructions, and then applying a post shift in the 147 // instruction to undo the modification, in order to reduce the code emitted for 148 // an operation. For example: 149 // 150 // Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1. 151 // 152 // This optimisation can be only partially applied when the stack pointer is an 153 // operand or destination, so this enumeration is used to control the shift. 154 enum PreShiftImmMode { 155 kNoShift, // Don't pre-shift. 156 kLimitShiftForSP, // Limit pre-shift for add/sub extend use. 157 kAnyShift // Allow any pre-shift. 158 }; 159 160 161 class MacroAssembler : public js::jit::Assembler { 162 public: 163 MacroAssembler(); 164 165 // Finalize a code buffer of generated instructions. This function must be 166 // called before executing or copying code from the buffer. 167 void FinalizeCode(); 168 169 170 // Constant generation helpers. 171 // These functions return the number of instructions required to move the 172 // immediate into the destination register. Also, if the masm pointer is 173 // non-null, it generates the code to do so. 174 // The two features are implemented using one function to avoid duplication of 175 // the logic. 176 // The function can be used to evaluate the cost of synthesizing an 177 // instruction using 'mov immediate' instructions. A user might prefer loading 178 // a constant using the literal pool instead of using multiple 'mov immediate' 179 // instructions. 180 static int MoveImmediateHelper(MacroAssembler* masm, 181 const Register &rd, 182 uint64_t imm); 183 static bool OneInstrMoveImmediateHelper(MacroAssembler* masm, 184 const Register& dst, 185 int64_t imm); 186 187 188 // Logical macros. 189 void And(const Register& rd, 190 const Register& rn, 191 const Operand& operand); 192 void Ands(const Register& rd, 193 const Register& rn, 194 const Operand& operand); 195 void Bic(const Register& rd, 196 const Register& rn, 197 const Operand& operand); 198 void Bics(const Register& rd, 199 const Register& rn, 200 const Operand& operand); 201 void Orr(const Register& rd, 202 const Register& rn, 203 const Operand& operand); 204 void Orn(const Register& rd, 205 const Register& rn, 206 const Operand& operand); 207 void Eor(const Register& rd, 208 const Register& rn, 209 const Operand& operand); 210 void Eon(const Register& rd, 211 const Register& rn, 212 const Operand& operand); 213 void Tst(const Register& rn, const Operand& operand); 214 void LogicalMacro(const Register& rd, 215 const Register& rn, 216 const Operand& operand, 217 LogicalOp op); 218 219 // Add and sub macros. 220 void Add(const Register& rd, 221 const Register& rn, 222 const Operand& operand, 223 FlagsUpdate S = LeaveFlags); 224 void Adds(const Register& rd, 225 const Register& rn, 226 const Operand& operand); 227 void Sub(const Register& rd, 228 const Register& rn, 229 const Operand& operand, 230 FlagsUpdate S = LeaveFlags); 231 void Subs(const Register& rd, 232 const Register& rn, 233 const Operand& operand); 234 void Cmn(const Register& rn, const Operand& operand); 235 void Cmp(const Register& rn, const Operand& operand); 236 void Neg(const Register& rd, 237 const Operand& operand); 238 void Negs(const Register& rd, 239 const Operand& operand); 240 241 void AddSubMacro(const Register& rd, 242 const Register& rn, 243 const Operand& operand, 244 FlagsUpdate S, 245 AddSubOp op); 246 247 // Add/sub with carry macros. 248 void Adc(const Register& rd, 249 const Register& rn, 250 const Operand& operand); 251 void Adcs(const Register& rd, 252 const Register& rn, 253 const Operand& operand); 254 void Sbc(const Register& rd, 255 const Register& rn, 256 const Operand& operand); 257 void Sbcs(const Register& rd, 258 const Register& rn, 259 const Operand& operand); 260 void Ngc(const Register& rd, 261 const Operand& operand); 262 void Ngcs(const Register& rd, 263 const Operand& operand); 264 void AddSubWithCarryMacro(const Register& rd, 265 const Register& rn, 266 const Operand& operand, 267 FlagsUpdate S, 268 AddSubWithCarryOp op); 269 270 // Move macros. 271 void Mov(const Register& rd, uint64_t imm); 272 void Mov(const Register& rd, 273 const Operand& operand, 274 DiscardMoveMode discard_mode = kDontDiscardForSameWReg); 275 void Mvn(const Register& rd, uint64_t imm) { 276 Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask)); 277 } 278 void Mvn(const Register& rd, const Operand& operand); 279 280 // Try to move an immediate into the destination register in a single 281 // instruction. Returns true for success, and updates the contents of dst. 282 // Returns false, otherwise. 283 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); 284 285 // Move an immediate into register dst, and return an Operand object for 286 // use with a subsequent instruction that accepts a shift. The value moved 287 // into dst is not necessarily equal to imm; it may have had a shifting 288 // operation applied to it that will be subsequently undone by the shift 289 // applied in the Operand. 290 Operand MoveImmediateForShiftedOp(const Register& dst, 291 int64_t imm, 292 PreShiftImmMode mode); 293 294 // Synthesises the address represented by a MemOperand into a register. 295 void ComputeAddress(const Register& dst, const MemOperand& mem_op); 296 297 // Conditional macros. 298 void Ccmp(const Register& rn, 299 const Operand& operand, 300 StatusFlags nzcv, 301 Condition cond); 302 void Ccmn(const Register& rn, 303 const Operand& operand, 304 StatusFlags nzcv, 305 Condition cond); 306 void ConditionalCompareMacro(const Register& rn, 307 const Operand& operand, 308 StatusFlags nzcv, 309 Condition cond, 310 ConditionalCompareOp op); 311 void Csel(const Register& rd, 312 const Register& rn, 313 const Operand& operand, 314 Condition cond); 315 316 // Load/store macros. 317 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ 318 js::wasm::FaultingCodeOffset FN(const REGTYPE REG, const MemOperand& addr); 319 LS_MACRO_LIST(DECLARE_FUNCTION) 320 #undef DECLARE_FUNCTION 321 322 js::wasm::FaultingCodeOffset LoadStoreMacro(const CPURegister& rt, 323 const MemOperand& addr, 324 LoadStoreOp op); 325 326 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ 327 void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); 328 LSPAIR_MACRO_LIST(DECLARE_FUNCTION) 329 #undef DECLARE_FUNCTION 330 331 void LoadStorePairMacro(const CPURegister& rt, 332 const CPURegister& rt2, 333 const MemOperand& addr, 334 LoadStorePairOp op); 335 336 void Prfm(PrefetchOperation op, const MemOperand& addr); 337 338 // Push or pop up to 4 registers of the same width to or from the stack, 339 // using the current stack pointer as set by SetStackPointer. 340 // 341 // If an argument register is 'NoReg', all further arguments are also assumed 342 // to be 'NoReg', and are thus not pushed or popped. 343 // 344 // Arguments are ordered such that "Push(a, b);" is functionally equivalent 345 // to "Push(a); Push(b);". 346 // 347 // It is valid to push the same register more than once, and there is no 348 // restriction on the order in which registers are specified. 349 // 350 // It is not valid to pop into the same register more than once in one 351 // operation, not even into the zero register. 352 // 353 // If the current stack pointer (as set by SetStackPointer) is sp, then it 354 // must be aligned to 16 bytes on entry and the total size of the specified 355 // registers must also be a multiple of 16 bytes. 356 // 357 // Even if the current stack pointer is not the system stack pointer (sp), 358 // Push (and derived methods) will still modify the system stack pointer in 359 // order to comply with ABI rules about accessing memory below the system 360 // stack pointer. 361 // 362 // Other than the registers passed into Pop, the stack pointer and (possibly) 363 // the system stack pointer, these methods do not modify any other registers. 364 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, 365 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); 366 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, 367 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); 368 void PushStackPointer(); 369 370 // Alternative forms of Push and Pop, taking a RegList or CPURegList that 371 // specifies the registers that are to be pushed or popped. Higher-numbered 372 // registers are associated with higher memory addresses (as in the A32 push 373 // and pop instructions). 374 // 375 // (Push|Pop)SizeRegList allow you to specify the register size as a 376 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are 377 // supported. 378 // 379 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. 380 void PushCPURegList(CPURegList registers); 381 void PopCPURegList(CPURegList registers); 382 383 void PushSizeRegList(RegList registers, unsigned reg_size, 384 CPURegister::RegisterType type = CPURegister::kRegister) { 385 PushCPURegList(CPURegList(type, reg_size, registers)); 386 } 387 void PopSizeRegList(RegList registers, unsigned reg_size, 388 CPURegister::RegisterType type = CPURegister::kRegister) { 389 PopCPURegList(CPURegList(type, reg_size, registers)); 390 } 391 void PushXRegList(RegList regs) { 392 PushSizeRegList(regs, kXRegSize); 393 } 394 void PopXRegList(RegList regs) { 395 PopSizeRegList(regs, kXRegSize); 396 } 397 void PushWRegList(RegList regs) { 398 PushSizeRegList(regs, kWRegSize); 399 } 400 void PopWRegList(RegList regs) { 401 PopSizeRegList(regs, kWRegSize); 402 } 403 void PushDRegList(RegList regs) { 404 PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister); 405 } 406 void PopDRegList(RegList regs) { 407 PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister); 408 } 409 void PushSRegList(RegList regs) { 410 PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister); 411 } 412 void PopSRegList(RegList regs) { 413 PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister); 414 } 415 416 // Push the specified register 'count' times. 417 void PushMultipleTimes(int count, Register src); 418 419 // Poke 'src' onto the stack. The offset is in bytes. 420 // 421 // If the current stack pointer (as set by SetStackPointer) is sp, then sp 422 // must be aligned to 16 bytes. 423 void Poke(const Register& src, const Operand& offset); 424 425 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. 426 // 427 // If the current stack pointer (as set by SetStackPointer) is sp, then sp 428 // must be aligned to 16 bytes. 429 void Peek(const Register& dst, const Operand& offset); 430 431 // Alternative forms of Peek and Poke, taking a RegList or CPURegList that 432 // specifies the registers that are to be pushed or popped. Higher-numbered 433 // registers are associated with higher memory addresses. 434 // 435 // (Peek|Poke)SizeRegList allow you to specify the register size as a 436 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are 437 // supported. 438 // 439 // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred. 440 void PeekCPURegList(CPURegList registers, int64_t offset) { 441 LoadCPURegList(registers, MemOperand(StackPointer(), offset)); 442 } 443 void PokeCPURegList(CPURegList registers, int64_t offset) { 444 StoreCPURegList(registers, MemOperand(StackPointer(), offset)); 445 } 446 447 void PeekSizeRegList(RegList registers, int64_t offset, unsigned reg_size, 448 CPURegister::RegisterType type = CPURegister::kRegister) { 449 PeekCPURegList(CPURegList(type, reg_size, registers), offset); 450 } 451 void PokeSizeRegList(RegList registers, int64_t offset, unsigned reg_size, 452 CPURegister::RegisterType type = CPURegister::kRegister) { 453 PokeCPURegList(CPURegList(type, reg_size, registers), offset); 454 } 455 void PeekXRegList(RegList regs, int64_t offset) { 456 PeekSizeRegList(regs, offset, kXRegSize); 457 } 458 void PokeXRegList(RegList regs, int64_t offset) { 459 PokeSizeRegList(regs, offset, kXRegSize); 460 } 461 void PeekWRegList(RegList regs, int64_t offset) { 462 PeekSizeRegList(regs, offset, kWRegSize); 463 } 464 void PokeWRegList(RegList regs, int64_t offset) { 465 PokeSizeRegList(regs, offset, kWRegSize); 466 } 467 void PeekDRegList(RegList regs, int64_t offset) { 468 PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); 469 } 470 void PokeDRegList(RegList regs, int64_t offset) { 471 PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); 472 } 473 void PeekSRegList(RegList regs, int64_t offset) { 474 PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); 475 } 476 void PokeSRegList(RegList regs, int64_t offset) { 477 PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); 478 } 479 480 481 // Claim or drop stack space without actually accessing memory. 482 // 483 // If the current stack pointer (as set by SetStackPointer) is sp, then it 484 // must be aligned to 16 bytes and the size claimed or dropped must be a 485 // multiple of 16 bytes. 486 void Claim(const Operand& size); 487 void Drop(const Operand& size); 488 489 // Preserve the callee-saved registers (as defined by AAPCS64). 490 // 491 // Higher-numbered registers are pushed before lower-numbered registers, and 492 // thus get higher addresses. 493 // Floating-point registers are pushed before general-purpose registers, and 494 // thus get higher addresses. 495 // 496 // This method must not be called unless StackPointer() is sp, and it is 497 // aligned to 16 bytes. 498 void PushCalleeSavedRegisters(); 499 500 // Restore the callee-saved registers (as defined by AAPCS64). 501 // 502 // Higher-numbered registers are popped after lower-numbered registers, and 503 // thus come from higher addresses. 504 // Floating-point registers are popped after general-purpose registers, and 505 // thus come from higher addresses. 506 // 507 // This method must not be called unless StackPointer() is sp, and it is 508 // aligned to 16 bytes. 509 void PopCalleeSavedRegisters(); 510 511 void LoadCPURegList(CPURegList registers, const MemOperand& src); 512 void StoreCPURegList(CPURegList registers, const MemOperand& dst); 513 514 // Remaining instructions are simple pass-through calls to the assembler. 515 void Adr(const Register& rd, Label* label) { 516 VIXL_ASSERT(!rd.IsZero()); 517 SingleEmissionCheckScope guard(this); 518 adr(rd, label); 519 } 520 void Adrp(const Register& rd, Label* label) { 521 VIXL_ASSERT(!rd.IsZero()); 522 SingleEmissionCheckScope guard(this); 523 adrp(rd, label); 524 } 525 void Asr(const Register& rd, const Register& rn, unsigned shift) { 526 VIXL_ASSERT(!rd.IsZero()); 527 VIXL_ASSERT(!rn.IsZero()); 528 SingleEmissionCheckScope guard(this); 529 asr(rd, rn, shift); 530 } 531 void Asr(const Register& rd, const Register& rn, const Register& rm) { 532 VIXL_ASSERT(!rd.IsZero()); 533 VIXL_ASSERT(!rn.IsZero()); 534 VIXL_ASSERT(!rm.IsZero()); 535 SingleEmissionCheckScope guard(this); 536 asrv(rd, rn, rm); 537 } 538 539 // Branch type inversion relies on these relations. 540 VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && 541 (reg_bit_clear == (reg_bit_set ^ 1)) && 542 (always == (never ^ 1))); 543 544 BranchType InvertBranchType(BranchType type) { 545 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { 546 return static_cast<BranchType>( 547 InvertCondition(static_cast<Condition>(type))); 548 } else { 549 return static_cast<BranchType>(type ^ 1); 550 } 551 } 552 553 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); 554 555 void B(Label* label); 556 void B(Label* label, Condition cond); 557 void B(Condition cond, Label* label) { 558 B(label, cond); 559 } 560 void Bfm(const Register& rd, 561 const Register& rn, 562 unsigned immr, 563 unsigned imms) { 564 VIXL_ASSERT(!rd.IsZero()); 565 VIXL_ASSERT(!rn.IsZero()); 566 SingleEmissionCheckScope guard(this); 567 bfm(rd, rn, immr, imms); 568 } 569 void Bfi(const Register& rd, 570 const Register& rn, 571 unsigned lsb, 572 unsigned width) { 573 VIXL_ASSERT(!rd.IsZero()); 574 VIXL_ASSERT(!rn.IsZero()); 575 SingleEmissionCheckScope guard(this); 576 bfi(rd, rn, lsb, width); 577 } 578 void Bfxil(const Register& rd, 579 const Register& rn, 580 unsigned lsb, 581 unsigned width) { 582 VIXL_ASSERT(!rd.IsZero()); 583 VIXL_ASSERT(!rn.IsZero()); 584 SingleEmissionCheckScope guard(this); 585 bfxil(rd, rn, lsb, width); 586 } 587 void Bind(Label* label); 588 // Bind a label to a specified offset from the start of the buffer. 589 void BindToOffset(Label* label, ptrdiff_t offset); 590 void Bl(Label* label) { 591 SingleEmissionCheckScope guard(this); 592 bl(label); 593 } 594 void Blr(const Register& xn) { 595 VIXL_ASSERT(!xn.IsZero()); 596 SingleEmissionCheckScope guard(this); 597 blr(xn); 598 } 599 void Br(const Register& xn) { 600 VIXL_ASSERT(!xn.IsZero()); 601 SingleEmissionCheckScope guard(this); 602 br(xn); 603 } 604 void Brk(int code = 0) { 605 SingleEmissionCheckScope guard(this); 606 brk(code); 607 } 608 void Cbnz(const Register& rt, Label* label); 609 void Cbz(const Register& rt, Label* label); 610 void Cinc(const Register& rd, const Register& rn, Condition cond) { 611 VIXL_ASSERT(!rd.IsZero()); 612 VIXL_ASSERT(!rn.IsZero()); 613 SingleEmissionCheckScope guard(this); 614 cinc(rd, rn, cond); 615 } 616 void Cinv(const Register& rd, const Register& rn, Condition cond) { 617 VIXL_ASSERT(!rd.IsZero()); 618 VIXL_ASSERT(!rn.IsZero()); 619 SingleEmissionCheckScope guard(this); 620 cinv(rd, rn, cond); 621 } 622 void Clrex() { 623 SingleEmissionCheckScope guard(this); 624 clrex(); 625 } 626 void Cls(const Register& rd, const Register& rn) { 627 VIXL_ASSERT(!rd.IsZero()); 628 VIXL_ASSERT(!rn.IsZero()); 629 SingleEmissionCheckScope guard(this); 630 cls(rd, rn); 631 } 632 void Clz(const Register& rd, const Register& rn) { 633 VIXL_ASSERT(!rd.IsZero()); 634 VIXL_ASSERT(!rn.IsZero()); 635 SingleEmissionCheckScope guard(this); 636 clz(rd, rn); 637 } 638 void Cneg(const Register& rd, const Register& rn, Condition cond) { 639 VIXL_ASSERT(!rd.IsZero()); 640 VIXL_ASSERT(!rn.IsZero()); 641 SingleEmissionCheckScope guard(this); 642 cneg(rd, rn, cond); 643 } 644 void Cset(const Register& rd, Condition cond) { 645 VIXL_ASSERT(!rd.IsZero()); 646 SingleEmissionCheckScope guard(this); 647 cset(rd, cond); 648 } 649 void Csetm(const Register& rd, Condition cond) { 650 VIXL_ASSERT(!rd.IsZero()); 651 SingleEmissionCheckScope guard(this); 652 csetm(rd, cond); 653 } 654 void Csinc(const Register& rd, 655 const Register& rn, 656 const Register& rm, 657 Condition cond) { 658 VIXL_ASSERT(!rd.IsZero()); 659 // The VIXL source code contains these assertions, but the AArch64 ISR 660 // explicitly permits the use of zero registers. CSET itself is defined 661 // in terms of CSINC with WZR/XZR. 662 // 663 // VIXL_ASSERT(!rn.IsZero()); 664 // VIXL_ASSERT(!rm.IsZero()); 665 VIXL_ASSERT((cond != al) && (cond != nv)); 666 SingleEmissionCheckScope guard(this); 667 csinc(rd, rn, rm, cond); 668 } 669 void Csinv(const Register& rd, 670 const Register& rn, 671 const Register& rm, 672 Condition cond) { 673 VIXL_ASSERT(!rd.IsZero()); 674 VIXL_ASSERT(!rn.IsZero()); 675 VIXL_ASSERT(!rm.IsZero()); 676 VIXL_ASSERT((cond != al) && (cond != nv)); 677 SingleEmissionCheckScope guard(this); 678 csinv(rd, rn, rm, cond); 679 } 680 void Csneg(const Register& rd, 681 const Register& rn, 682 const Register& rm, 683 Condition cond) { 684 VIXL_ASSERT(!rd.IsZero()); 685 VIXL_ASSERT(!rn.IsZero()); 686 VIXL_ASSERT(!rm.IsZero()); 687 VIXL_ASSERT((cond != al) && (cond != nv)); 688 SingleEmissionCheckScope guard(this); 689 csneg(rd, rn, rm, cond); 690 } 691 void Dmb(BarrierDomain domain, BarrierType type) { 692 SingleEmissionCheckScope guard(this); 693 dmb(domain, type); 694 } 695 void Dsb(BarrierDomain domain, BarrierType type) { 696 SingleEmissionCheckScope guard(this); 697 dsb(domain, type); 698 } 699 void Extr(const Register& rd, 700 const Register& rn, 701 const Register& rm, 702 unsigned lsb) { 703 VIXL_ASSERT(!rd.IsZero()); 704 VIXL_ASSERT(!rn.IsZero()); 705 VIXL_ASSERT(!rm.IsZero()); 706 SingleEmissionCheckScope guard(this); 707 extr(rd, rn, rm, lsb); 708 } 709 void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) { 710 SingleEmissionCheckScope guard(this); 711 fadd(vd, vn, vm); 712 } 713 void Fccmp(const VRegister& vn, 714 const VRegister& vm, 715 StatusFlags nzcv, 716 Condition cond, 717 FPTrapFlags trap = DisableTrap) { 718 VIXL_ASSERT((cond != al) && (cond != nv)); 719 SingleEmissionCheckScope guard(this); 720 FPCCompareMacro(vn, vm, nzcv, cond, trap); 721 } 722 void Fccmpe(const VRegister& vn, 723 const VRegister& vm, 724 StatusFlags nzcv, 725 Condition cond) { 726 Fccmp(vn, vm, nzcv, cond, EnableTrap); 727 } 728 void Fcmp(const VRegister& vn, const VRegister& vm, 729 FPTrapFlags trap = DisableTrap) { 730 SingleEmissionCheckScope guard(this); 731 FPCompareMacro(vn, vm, trap); 732 } 733 void Fcmp(const VRegister& vn, double value, 734 FPTrapFlags trap = DisableTrap); 735 void Fcmpe(const VRegister& vn, double value); 736 void Fcmpe(const VRegister& vn, const VRegister& vm) { 737 Fcmp(vn, vm, EnableTrap); 738 } 739 void Fcsel(const VRegister& vd, 740 const VRegister& vn, 741 const VRegister& vm, 742 Condition cond) { 743 VIXL_ASSERT((cond != al) && (cond != nv)); 744 SingleEmissionCheckScope guard(this); 745 fcsel(vd, vn, vm, cond); 746 } 747 void Fcvt(const VRegister& vd, const VRegister& vn) { 748 SingleEmissionCheckScope guard(this); 749 fcvt(vd, vn); 750 } 751 void Fcvtl(const VRegister& vd, const VRegister& vn) { 752 SingleEmissionCheckScope guard(this); 753 fcvtl(vd, vn); 754 } 755 void Fcvtl2(const VRegister& vd, const VRegister& vn) { 756 SingleEmissionCheckScope guard(this); 757 fcvtl2(vd, vn); 758 } 759 void Fcvtn(const VRegister& vd, const VRegister& vn) { 760 SingleEmissionCheckScope guard(this); 761 fcvtn(vd, vn); 762 } 763 void Fcvtn2(const VRegister& vd, const VRegister& vn) { 764 SingleEmissionCheckScope guard(this); 765 fcvtn2(vd, vn); 766 } 767 void Fcvtxn(const VRegister& vd, const VRegister& vn) { 768 SingleEmissionCheckScope guard(this); 769 fcvtxn(vd, vn); 770 } 771 void Fcvtxn2(const VRegister& vd, const VRegister& vn) { 772 SingleEmissionCheckScope guard(this); 773 fcvtxn2(vd, vn); 774 } 775 void Fcvtas(const Register& rd, const VRegister& vn) { 776 VIXL_ASSERT(!rd.IsZero()); 777 SingleEmissionCheckScope guard(this); 778 fcvtas(rd, vn); 779 } 780 void Fcvtau(const Register& rd, const VRegister& vn) { 781 VIXL_ASSERT(!rd.IsZero()); 782 SingleEmissionCheckScope guard(this); 783 fcvtau(rd, vn); 784 } 785 void Fcvtms(const Register& rd, const VRegister& vn) { 786 VIXL_ASSERT(!rd.IsZero()); 787 SingleEmissionCheckScope guard(this); 788 fcvtms(rd, vn); 789 } 790 void Fcvtmu(const Register& rd, const VRegister& vn) { 791 VIXL_ASSERT(!rd.IsZero()); 792 SingleEmissionCheckScope guard(this); 793 fcvtmu(rd, vn); 794 } 795 void Fcvtns(const Register& rd, const VRegister& vn) { 796 VIXL_ASSERT(!rd.IsZero()); 797 SingleEmissionCheckScope guard(this); 798 fcvtns(rd, vn); 799 } 800 void Fcvtnu(const Register& rd, const VRegister& vn) { 801 VIXL_ASSERT(!rd.IsZero()); 802 SingleEmissionCheckScope guard(this); 803 fcvtnu(rd, vn); 804 } 805 void Fcvtps(const Register& rd, const VRegister& vn) { 806 VIXL_ASSERT(!rd.IsZero()); 807 SingleEmissionCheckScope guard(this); 808 fcvtps(rd, vn); 809 } 810 void Fcvtpu(const Register& rd, const VRegister& vn) { 811 VIXL_ASSERT(!rd.IsZero()); 812 SingleEmissionCheckScope guard(this); 813 fcvtpu(rd, vn); 814 } 815 void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) { 816 VIXL_ASSERT(!rd.IsZero()); 817 SingleEmissionCheckScope guard(this); 818 fcvtzs(rd, vn, fbits); 819 } 820 void Fjcvtzs(const Register& rd, const VRegister& vn) { 821 VIXL_ASSERT(!rd.IsZero()); 822 SingleEmissionCheckScope guard(this); 823 fjcvtzs(rd, vn); 824 } 825 void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) { 826 VIXL_ASSERT(!rd.IsZero()); 827 SingleEmissionCheckScope guard(this); 828 fcvtzu(rd, vn, fbits); 829 } 830 void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) { 831 SingleEmissionCheckScope guard(this); 832 fdiv(vd, vn, vm); 833 } 834 void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) { 835 SingleEmissionCheckScope guard(this); 836 fmax(vd, vn, vm); 837 } 838 void Fmaxnm(const VRegister& vd, 839 const VRegister& vn, 840 const VRegister& vm) { 841 SingleEmissionCheckScope guard(this); 842 fmaxnm(vd, vn, vm); 843 } 844 void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) { 845 SingleEmissionCheckScope guard(this); 846 fmin(vd, vn, vm); 847 } 848 void Fminnm(const VRegister& vd, 849 const VRegister& vn, 850 const VRegister& vm) { 851 SingleEmissionCheckScope guard(this); 852 fminnm(vd, vn, vm); 853 } 854 void Fmov(VRegister vd, VRegister vn) { 855 SingleEmissionCheckScope guard(this); 856 // Only emit an instruction if vd and vn are different, and they are both D 857 // registers. fmov(s0, s0) is not a no-op because it clears the top word of 858 // d0. Technically, fmov(d0, d0) is not a no-op either because it clears 859 // the top of q0, but VRegister does not currently support Q registers. 860 if (!vd.Is(vn) || !vd.Is64Bits()) { 861 fmov(vd, vn); 862 } 863 } 864 void Fmov(VRegister vd, Register rn) { 865 SingleEmissionCheckScope guard(this); 866 fmov(vd, rn); 867 } 868 void Fmov(const VRegister& vd, int index, const Register& rn) { 869 SingleEmissionCheckScope guard(this); 870 fmov(vd, index, rn); 871 } 872 void Fmov(const Register& rd, const VRegister& vn, int index) { 873 SingleEmissionCheckScope guard(this); 874 fmov(rd, vn, index); 875 } 876 877 // Provide explicit double and float interfaces for FP immediate moves, rather 878 // than relying on implicit C++ casts. This allows signalling NaNs to be 879 // preserved when the immediate matches the format of vd. Most systems convert 880 // signalling NaNs to quiet NaNs when converting between float and double. 881 void Fmov(VRegister vd, double imm); 882 void Fmov(VRegister vd, float imm); 883 // Provide a template to allow other types to be converted automatically. 884 template<typename T> 885 void Fmov(VRegister vd, T imm) { 886 Fmov(vd, static_cast<double>(imm)); 887 } 888 void Fmov(Register rd, VRegister vn) { 889 VIXL_ASSERT(!rd.IsZero()); 890 SingleEmissionCheckScope guard(this); 891 fmov(rd, vn); 892 } 893 void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) { 894 SingleEmissionCheckScope guard(this); 895 fmul(vd, vn, vm); 896 } 897 void Fnmul(const VRegister& vd, const VRegister& vn, 898 const VRegister& vm) { 899 SingleEmissionCheckScope guard(this); 900 fnmul(vd, vn, vm); 901 } 902 void Fmadd(const VRegister& vd, 903 const VRegister& vn, 904 const VRegister& vm, 905 const VRegister& va) { 906 SingleEmissionCheckScope guard(this); 907 fmadd(vd, vn, vm, va); 908 } 909 void Fmsub(const VRegister& vd, 910 const VRegister& vn, 911 const VRegister& vm, 912 const VRegister& va) { 913 SingleEmissionCheckScope guard(this); 914 fmsub(vd, vn, vm, va); 915 } 916 void Fnmadd(const VRegister& vd, 917 const VRegister& vn, 918 const VRegister& vm, 919 const VRegister& va) { 920 SingleEmissionCheckScope guard(this); 921 fnmadd(vd, vn, vm, va); 922 } 923 void Fnmsub(const VRegister& vd, 924 const VRegister& vn, 925 const VRegister& vm, 926 const VRegister& va) { 927 SingleEmissionCheckScope guard(this); 928 fnmsub(vd, vn, vm, va); 929 } 930 void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) { 931 SingleEmissionCheckScope guard(this); 932 fsub(vd, vn, vm); 933 } 934 void Hint(SystemHint code) { 935 SingleEmissionCheckScope guard(this); 936 hint(code); 937 } 938 void Hlt(int code) { 939 SingleEmissionCheckScope guard(this); 940 hlt(code); 941 } 942 void Isb() { 943 SingleEmissionCheckScope guard(this); 944 isb(); 945 } 946 void Ldar(const Register& rt, const MemOperand& src) { 947 SingleEmissionCheckScope guard(this); 948 ldar(rt, src); 949 } 950 void Ldarb(const Register& rt, const MemOperand& src) { 951 SingleEmissionCheckScope guard(this); 952 ldarb(rt, src); 953 } 954 void Ldarh(const Register& rt, const MemOperand& src) { 955 SingleEmissionCheckScope guard(this); 956 ldarh(rt, src); 957 } 958 void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) { 959 VIXL_ASSERT(!rt.Aliases(rt2)); 960 SingleEmissionCheckScope guard(this); 961 ldaxp(rt, rt2, src); 962 } 963 void Ldaxr(const Register& rt, const MemOperand& src) { 964 SingleEmissionCheckScope guard(this); 965 ldaxr(rt, src); 966 } 967 void Ldaxrb(const Register& rt, const MemOperand& src) { 968 SingleEmissionCheckScope guard(this); 969 ldaxrb(rt, src); 970 } 971 void Ldaxrh(const Register& rt, const MemOperand& src) { 972 SingleEmissionCheckScope guard(this); 973 ldaxrh(rt, src); 974 } 975 976 // clang-format off 977 #define COMPARE_AND_SWAP_SINGLE_MACRO_LIST(V) \ 978 V(cas, Cas) \ 979 V(casa, Casa) \ 980 V(casl, Casl) \ 981 V(casal, Casal) \ 982 V(casb, Casb) \ 983 V(casab, Casab) \ 984 V(caslb, Caslb) \ 985 V(casalb, Casalb) \ 986 V(cash, Cash) \ 987 V(casah, Casah) \ 988 V(caslh, Caslh) \ 989 V(casalh, Casalh) 990 // clang-format on 991 992 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 993 void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ 994 SingleEmissionCheckScope guard(this); \ 995 ASM(rs, rt, src); \ 996 } 997 COMPARE_AND_SWAP_SINGLE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 998 #undef DEFINE_MACRO_ASM_FUNC 999 1000 // clang-format off 1001 #define COMPARE_AND_SWAP_PAIR_MACRO_LIST(V) \ 1002 V(casp, Casp) \ 1003 V(caspa, Caspa) \ 1004 V(caspl, Caspl) \ 1005 V(caspal, Caspal) 1006 // clang-format on 1007 1008 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 1009 void MASM(const Register& rs, const Register& rs2, const Register& rt, \ 1010 const Register& rt2, const MemOperand& src) { \ 1011 SingleEmissionCheckScope guard(this); \ 1012 ASM(rs, rs2, rt, rt2, src); \ 1013 } 1014 COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 1015 #undef DEFINE_MACRO_ASM_FUNC 1016 1017 // These macros generate all the variations of the atomic memory operations, 1018 // e.g. ldadd, ldadda, ldaddb, staddl, etc. 1019 1020 // clang-format off 1021 #define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \ 1022 V(DEF, MASM_PRE##add, ASM_PRE##add) \ 1023 V(DEF, MASM_PRE##clr, ASM_PRE##clr) \ 1024 V(DEF, MASM_PRE##eor, ASM_PRE##eor) \ 1025 V(DEF, MASM_PRE##set, ASM_PRE##set) \ 1026 V(DEF, MASM_PRE##smax, ASM_PRE##smax) \ 1027 V(DEF, MASM_PRE##smin, ASM_PRE##smin) \ 1028 V(DEF, MASM_PRE##umax, ASM_PRE##umax) \ 1029 V(DEF, MASM_PRE##umin, ASM_PRE##umin) 1030 1031 #define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \ 1032 V(MASM, ASM) \ 1033 V(MASM##l, ASM##l) \ 1034 V(MASM##b, ASM##b) \ 1035 V(MASM##lb, ASM##lb) \ 1036 V(MASM##h, ASM##h) \ 1037 V(MASM##lh, ASM##lh) 1038 1039 #define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \ 1040 ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \ 1041 V(MASM##a, ASM##a) \ 1042 V(MASM##al, ASM##al) \ 1043 V(MASM##ab, ASM##ab) \ 1044 V(MASM##alb, ASM##alb) \ 1045 V(MASM##ah, ASM##ah) \ 1046 V(MASM##alh, ASM##alh) 1047 // clang-format on 1048 1049 #define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \ 1050 void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ 1051 SingleEmissionCheckScope guard(this); \ 1052 ASM(rs, rt, src); \ 1053 } 1054 #define DEFINE_MACRO_STORE_ASM_FUNC(MASM, ASM) \ 1055 void MASM(const Register& rs, const MemOperand& src) { \ 1056 SingleEmissionCheckScope guard(this); \ 1057 ASM(rs, src); \ 1058 } 1059 1060 ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES, 1061 DEFINE_MACRO_LOAD_ASM_FUNC, 1062 Ld, 1063 ld) 1064 ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES, 1065 DEFINE_MACRO_STORE_ASM_FUNC, 1066 St, 1067 st) 1068 1069 #define DEFINE_MACRO_SWP_ASM_FUNC(MASM, ASM) \ 1070 void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ 1071 SingleEmissionCheckScope guard(this); \ 1072 ASM(rs, rt, src); \ 1073 } 1074 1075 ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_MACRO_SWP_ASM_FUNC, Swp, swp) 1076 1077 #undef DEFINE_MACRO_LOAD_ASM_FUNC 1078 #undef DEFINE_MACRO_STORE_ASM_FUNC 1079 #undef DEFINE_MACRO_SWP_ASM_FUNC 1080 1081 void Ldnp(const CPURegister& rt, 1082 const CPURegister& rt2, 1083 const MemOperand& src) { 1084 SingleEmissionCheckScope guard(this); 1085 ldnp(rt, rt2, src); 1086 } 1087 // Provide both double and float interfaces for FP immediate loads, rather 1088 // than relying on implicit C++ casts. This allows signalling NaNs to be 1089 // preserved when the immediate matches the format of fd. Most systems convert 1090 // signalling NaNs to quiet NaNs when converting between float and double. 1091 void Ldr(const VRegister& vt, double imm) { 1092 SingleEmissionCheckScope guard(this); 1093 if (vt.Is64Bits()) { 1094 ldr(vt, imm); 1095 } else { 1096 ldr(vt, static_cast<float>(imm)); 1097 } 1098 } 1099 void Ldr(const VRegister& vt, float imm) { 1100 SingleEmissionCheckScope guard(this); 1101 if (vt.Is32Bits()) { 1102 ldr(vt, imm); 1103 } else { 1104 ldr(vt, static_cast<double>(imm)); 1105 } 1106 } 1107 /* 1108 void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) { 1109 VIXL_ASSERT(vt.IsQ()); 1110 SingleEmissionCheckScope guard(this); 1111 ldr(vt, new Literal<uint64_t>(high64, low64, 1112 &literal_pool_, 1113 RawLiteral::kDeletedOnPlacementByPool)); 1114 } 1115 */ 1116 void Ldr(const Register& rt, uint64_t imm) { 1117 VIXL_ASSERT(!rt.IsZero()); 1118 SingleEmissionCheckScope guard(this); 1119 ldr(rt, imm); 1120 } 1121 void Ldrsw(const Register& rt, uint32_t imm) { 1122 VIXL_ASSERT(!rt.IsZero()); 1123 SingleEmissionCheckScope guard(this); 1124 ldrsw(rt, imm); 1125 } 1126 void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) { 1127 VIXL_ASSERT(!rt.Aliases(rt2)); 1128 SingleEmissionCheckScope guard(this); 1129 ldxp(rt, rt2, src); 1130 } 1131 void Ldxr(const Register& rt, const MemOperand& src) { 1132 SingleEmissionCheckScope guard(this); 1133 ldxr(rt, src); 1134 } 1135 void Ldxrb(const Register& rt, const MemOperand& src) { 1136 SingleEmissionCheckScope guard(this); 1137 ldxrb(rt, src); 1138 } 1139 void Ldxrh(const Register& rt, const MemOperand& src) { 1140 SingleEmissionCheckScope guard(this); 1141 ldxrh(rt, src); 1142 } 1143 void Lsl(const Register& rd, const Register& rn, unsigned shift) { 1144 VIXL_ASSERT(!rd.IsZero()); 1145 VIXL_ASSERT(!rn.IsZero()); 1146 SingleEmissionCheckScope guard(this); 1147 lsl(rd, rn, shift); 1148 } 1149 void Lsl(const Register& rd, const Register& rn, const Register& rm) { 1150 VIXL_ASSERT(!rd.IsZero()); 1151 VIXL_ASSERT(!rn.IsZero()); 1152 VIXL_ASSERT(!rm.IsZero()); 1153 SingleEmissionCheckScope guard(this); 1154 lslv(rd, rn, rm); 1155 } 1156 void Lsr(const Register& rd, const Register& rn, unsigned shift) { 1157 VIXL_ASSERT(!rd.IsZero()); 1158 VIXL_ASSERT(!rn.IsZero()); 1159 SingleEmissionCheckScope guard(this); 1160 lsr(rd, rn, shift); 1161 } 1162 void Lsr(const Register& rd, const Register& rn, const Register& rm) { 1163 VIXL_ASSERT(!rd.IsZero()); 1164 VIXL_ASSERT(!rn.IsZero()); 1165 VIXL_ASSERT(!rm.IsZero()); 1166 SingleEmissionCheckScope guard(this); 1167 lsrv(rd, rn, rm); 1168 } 1169 void Madd(const Register& rd, 1170 const Register& rn, 1171 const Register& rm, 1172 const Register& ra) { 1173 VIXL_ASSERT(!rd.IsZero()); 1174 VIXL_ASSERT(!rn.IsZero()); 1175 VIXL_ASSERT(!rm.IsZero()); 1176 VIXL_ASSERT(!ra.IsZero()); 1177 SingleEmissionCheckScope guard(this); 1178 madd(rd, rn, rm, ra); 1179 } 1180 void Mneg(const Register& rd, const Register& rn, const Register& rm) { 1181 VIXL_ASSERT(!rd.IsZero()); 1182 VIXL_ASSERT(!rn.IsZero()); 1183 VIXL_ASSERT(!rm.IsZero()); 1184 SingleEmissionCheckScope guard(this); 1185 mneg(rd, rn, rm); 1186 } 1187 void Mov(const Register& rd, 1188 const Register& rn, 1189 DiscardMoveMode discard_mode = kDontDiscardForSameWReg) { 1190 // Emit a register move only if the registers are distinct, or if they are 1191 // not X registers. 1192 // 1193 // Note that mov(w0, w0) is not a no-op because it clears the top word of 1194 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W 1195 // registers is not required to clear the top word of the X register. In 1196 // this case, the instruction is discarded. 1197 // 1198 // If the sp is an operand, add #0 is emitted, otherwise, orr #0. 1199 if (!rd.Is(rn) || 1200 (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) { 1201 SingleEmissionCheckScope guard(this); 1202 mov(rd, rn); 1203 } 1204 } 1205 void Movk(const Register& rd, uint64_t imm, int shift = -1) { 1206 VIXL_ASSERT(!rd.IsZero()); 1207 SingleEmissionCheckScope guard(this); 1208 movk(rd, imm, shift); 1209 } 1210 void Mrs(const Register& rt, SystemRegister sysreg) { 1211 VIXL_ASSERT(!rt.IsZero()); 1212 SingleEmissionCheckScope guard(this); 1213 mrs(rt, sysreg); 1214 } 1215 void Msr(SystemRegister sysreg, const Register& rt) { 1216 VIXL_ASSERT(!rt.IsZero()); 1217 SingleEmissionCheckScope guard(this); 1218 msr(sysreg, rt); 1219 } 1220 void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) { 1221 SingleEmissionCheckScope guard(this); 1222 sys(op1, crn, crm, op2, rt); 1223 } 1224 void Dc(DataCacheOp op, const Register& rt) { 1225 SingleEmissionCheckScope guard(this); 1226 dc(op, rt); 1227 } 1228 void Ic(InstructionCacheOp op, const Register& rt) { 1229 SingleEmissionCheckScope guard(this); 1230 ic(op, rt); 1231 } 1232 void Msub(const Register& rd, 1233 const Register& rn, 1234 const Register& rm, 1235 const Register& ra) { 1236 VIXL_ASSERT(!rd.IsZero()); 1237 VIXL_ASSERT(!rn.IsZero()); 1238 VIXL_ASSERT(!rm.IsZero()); 1239 VIXL_ASSERT(!ra.IsZero()); 1240 SingleEmissionCheckScope guard(this); 1241 msub(rd, rn, rm, ra); 1242 } 1243 void Mul(const Register& rd, const Register& rn, const Register& rm) { 1244 VIXL_ASSERT(!rd.IsZero()); 1245 VIXL_ASSERT(!rn.IsZero()); 1246 VIXL_ASSERT(!rm.IsZero()); 1247 SingleEmissionCheckScope guard(this); 1248 mul(rd, rn, rm); 1249 } 1250 void Nop() { 1251 SingleEmissionCheckScope guard(this); 1252 nop(); 1253 } 1254 void Csdb() { 1255 SingleEmissionCheckScope guard(this); 1256 csdb(); 1257 } 1258 void Rbit(const Register& rd, const Register& rn) { 1259 VIXL_ASSERT(!rd.IsZero()); 1260 VIXL_ASSERT(!rn.IsZero()); 1261 SingleEmissionCheckScope guard(this); 1262 rbit(rd, rn); 1263 } 1264 void Ret(const Register& xn = lr) { 1265 VIXL_ASSERT(!xn.IsZero()); 1266 SingleEmissionCheckScope guard(this); 1267 ret(xn); 1268 } 1269 void Rev(const Register& rd, const Register& rn) { 1270 VIXL_ASSERT(!rd.IsZero()); 1271 VIXL_ASSERT(!rn.IsZero()); 1272 SingleEmissionCheckScope guard(this); 1273 rev(rd, rn); 1274 } 1275 void Rev16(const Register& rd, const Register& rn) { 1276 VIXL_ASSERT(!rd.IsZero()); 1277 VIXL_ASSERT(!rn.IsZero()); 1278 SingleEmissionCheckScope guard(this); 1279 rev16(rd, rn); 1280 } 1281 void Rev32(const Register& rd, const Register& rn) { 1282 VIXL_ASSERT(!rd.IsZero()); 1283 VIXL_ASSERT(!rn.IsZero()); 1284 SingleEmissionCheckScope guard(this); 1285 rev32(rd, rn); 1286 } 1287 void Ror(const Register& rd, const Register& rs, unsigned shift) { 1288 VIXL_ASSERT(!rd.IsZero()); 1289 VIXL_ASSERT(!rs.IsZero()); 1290 SingleEmissionCheckScope guard(this); 1291 ror(rd, rs, shift); 1292 } 1293 void Ror(const Register& rd, const Register& rn, const Register& rm) { 1294 VIXL_ASSERT(!rd.IsZero()); 1295 VIXL_ASSERT(!rn.IsZero()); 1296 VIXL_ASSERT(!rm.IsZero()); 1297 SingleEmissionCheckScope guard(this); 1298 rorv(rd, rn, rm); 1299 } 1300 void Sbfiz(const Register& rd, 1301 const Register& rn, 1302 unsigned lsb, 1303 unsigned width) { 1304 VIXL_ASSERT(!rd.IsZero()); 1305 VIXL_ASSERT(!rn.IsZero()); 1306 SingleEmissionCheckScope guard(this); 1307 sbfiz(rd, rn, lsb, width); 1308 } 1309 void Sbfm(const Register& rd, 1310 const Register& rn, 1311 unsigned immr, 1312 unsigned imms) { 1313 VIXL_ASSERT(!rd.IsZero()); 1314 VIXL_ASSERT(!rn.IsZero()); 1315 SingleEmissionCheckScope guard(this); 1316 sbfm(rd, rn, immr, imms); 1317 } 1318 void Sbfx(const Register& rd, 1319 const Register& rn, 1320 unsigned lsb, 1321 unsigned width) { 1322 VIXL_ASSERT(!rd.IsZero()); 1323 VIXL_ASSERT(!rn.IsZero()); 1324 SingleEmissionCheckScope guard(this); 1325 sbfx(rd, rn, lsb, width); 1326 } 1327 void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) { 1328 VIXL_ASSERT(!rn.IsZero()); 1329 SingleEmissionCheckScope guard(this); 1330 scvtf(vd, rn, fbits); 1331 } 1332 void Sdiv(const Register& rd, const Register& rn, const Register& rm) { 1333 VIXL_ASSERT(!rd.IsZero()); 1334 VIXL_ASSERT(!rn.IsZero()); 1335 VIXL_ASSERT(!rm.IsZero()); 1336 SingleEmissionCheckScope guard(this); 1337 sdiv(rd, rn, rm); 1338 } 1339 void Smaddl(const Register& rd, 1340 const Register& rn, 1341 const Register& rm, 1342 const Register& ra) { 1343 VIXL_ASSERT(!rd.IsZero()); 1344 VIXL_ASSERT(!rn.IsZero()); 1345 VIXL_ASSERT(!rm.IsZero()); 1346 VIXL_ASSERT(!ra.IsZero()); 1347 SingleEmissionCheckScope guard(this); 1348 smaddl(rd, rn, rm, ra); 1349 } 1350 void Smsubl(const Register& rd, 1351 const Register& rn, 1352 const Register& rm, 1353 const Register& ra) { 1354 VIXL_ASSERT(!rd.IsZero()); 1355 VIXL_ASSERT(!rn.IsZero()); 1356 VIXL_ASSERT(!rm.IsZero()); 1357 VIXL_ASSERT(!ra.IsZero()); 1358 SingleEmissionCheckScope guard(this); 1359 smsubl(rd, rn, rm, ra); 1360 } 1361 void Smull(const Register& rd, const Register& rn, const Register& rm) { 1362 VIXL_ASSERT(!rd.IsZero()); 1363 VIXL_ASSERT(!rn.IsZero()); 1364 VIXL_ASSERT(!rm.IsZero()); 1365 SingleEmissionCheckScope guard(this); 1366 smull(rd, rn, rm); 1367 } 1368 void Smulh(const Register& xd, const Register& xn, const Register& xm) { 1369 VIXL_ASSERT(!xd.IsZero()); 1370 VIXL_ASSERT(!xn.IsZero()); 1371 VIXL_ASSERT(!xm.IsZero()); 1372 SingleEmissionCheckScope guard(this); 1373 smulh(xd, xn, xm); 1374 } 1375 void Stlr(const Register& rt, const MemOperand& dst) { 1376 SingleEmissionCheckScope guard(this); 1377 stlr(rt, dst); 1378 } 1379 void Stlrb(const Register& rt, const MemOperand& dst) { 1380 SingleEmissionCheckScope guard(this); 1381 stlrb(rt, dst); 1382 } 1383 void Stlrh(const Register& rt, const MemOperand& dst) { 1384 SingleEmissionCheckScope guard(this); 1385 stlrh(rt, dst); 1386 } 1387 void Stlxp(const Register& rs, 1388 const Register& rt, 1389 const Register& rt2, 1390 const MemOperand& dst) { 1391 VIXL_ASSERT(!rs.Aliases(dst.base())); 1392 VIXL_ASSERT(!rs.Aliases(rt)); 1393 VIXL_ASSERT(!rs.Aliases(rt2)); 1394 SingleEmissionCheckScope guard(this); 1395 stlxp(rs, rt, rt2, dst); 1396 } 1397 void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) { 1398 VIXL_ASSERT(!rs.Aliases(dst.base())); 1399 VIXL_ASSERT(!rs.Aliases(rt)); 1400 SingleEmissionCheckScope guard(this); 1401 stlxr(rs, rt, dst); 1402 } 1403 void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) { 1404 VIXL_ASSERT(!rs.Aliases(dst.base())); 1405 VIXL_ASSERT(!rs.Aliases(rt)); 1406 SingleEmissionCheckScope guard(this); 1407 stlxrb(rs, rt, dst); 1408 } 1409 void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) { 1410 VIXL_ASSERT(!rs.Aliases(dst.base())); 1411 VIXL_ASSERT(!rs.Aliases(rt)); 1412 SingleEmissionCheckScope guard(this); 1413 stlxrh(rs, rt, dst); 1414 } 1415 void Stnp(const CPURegister& rt, 1416 const CPURegister& rt2, 1417 const MemOperand& dst) { 1418 SingleEmissionCheckScope guard(this); 1419 stnp(rt, rt2, dst); 1420 } 1421 void Stxp(const Register& rs, 1422 const Register& rt, 1423 const Register& rt2, 1424 const MemOperand& dst) { 1425 VIXL_ASSERT(!rs.Aliases(dst.base())); 1426 VIXL_ASSERT(!rs.Aliases(rt)); 1427 VIXL_ASSERT(!rs.Aliases(rt2)); 1428 SingleEmissionCheckScope guard(this); 1429 stxp(rs, rt, rt2, dst); 1430 } 1431 void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) { 1432 VIXL_ASSERT(!rs.Aliases(dst.base())); 1433 VIXL_ASSERT(!rs.Aliases(rt)); 1434 SingleEmissionCheckScope guard(this); 1435 stxr(rs, rt, dst); 1436 } 1437 void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) { 1438 VIXL_ASSERT(!rs.Aliases(dst.base())); 1439 VIXL_ASSERT(!rs.Aliases(rt)); 1440 SingleEmissionCheckScope guard(this); 1441 stxrb(rs, rt, dst); 1442 } 1443 void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) { 1444 VIXL_ASSERT(!rs.Aliases(dst.base())); 1445 VIXL_ASSERT(!rs.Aliases(rt)); 1446 SingleEmissionCheckScope guard(this); 1447 stxrh(rs, rt, dst); 1448 } 1449 void Svc(int code) { 1450 SingleEmissionCheckScope guard(this); 1451 svc(code); 1452 } 1453 void Sxtb(const Register& rd, const Register& rn) { 1454 VIXL_ASSERT(!rd.IsZero()); 1455 VIXL_ASSERT(!rn.IsZero()); 1456 SingleEmissionCheckScope guard(this); 1457 sxtb(rd, rn); 1458 } 1459 void Sxth(const Register& rd, const Register& rn) { 1460 VIXL_ASSERT(!rd.IsZero()); 1461 VIXL_ASSERT(!rn.IsZero()); 1462 SingleEmissionCheckScope guard(this); 1463 sxth(rd, rn); 1464 } 1465 void Sxtw(const Register& rd, const Register& rn) { 1466 VIXL_ASSERT(!rd.IsZero()); 1467 VIXL_ASSERT(!rn.IsZero()); 1468 SingleEmissionCheckScope guard(this); 1469 sxtw(rd, rn); 1470 } 1471 void Tbl(const VRegister& vd, 1472 const VRegister& vn, 1473 const VRegister& vm) { 1474 SingleEmissionCheckScope guard(this); 1475 tbl(vd, vn, vm); 1476 } 1477 void Tbl(const VRegister& vd, 1478 const VRegister& vn, 1479 const VRegister& vn2, 1480 const VRegister& vm) { 1481 SingleEmissionCheckScope guard(this); 1482 tbl(vd, vn, vn2, vm); 1483 } 1484 void Tbl(const VRegister& vd, 1485 const VRegister& vn, 1486 const VRegister& vn2, 1487 const VRegister& vn3, 1488 const VRegister& vm) { 1489 SingleEmissionCheckScope guard(this); 1490 tbl(vd, vn, vn2, vn3, vm); 1491 } 1492 void Tbl(const VRegister& vd, 1493 const VRegister& vn, 1494 const VRegister& vn2, 1495 const VRegister& vn3, 1496 const VRegister& vn4, 1497 const VRegister& vm) { 1498 SingleEmissionCheckScope guard(this); 1499 tbl(vd, vn, vn2, vn3, vn4, vm); 1500 } 1501 void Tbx(const VRegister& vd, 1502 const VRegister& vn, 1503 const VRegister& vm) { 1504 SingleEmissionCheckScope guard(this); 1505 tbx(vd, vn, vm); 1506 } 1507 void Tbx(const VRegister& vd, 1508 const VRegister& vn, 1509 const VRegister& vn2, 1510 const VRegister& vm) { 1511 SingleEmissionCheckScope guard(this); 1512 tbx(vd, vn, vn2, vm); 1513 } 1514 void Tbx(const VRegister& vd, 1515 const VRegister& vn, 1516 const VRegister& vn2, 1517 const VRegister& vn3, 1518 const VRegister& vm) { 1519 SingleEmissionCheckScope guard(this); 1520 tbx(vd, vn, vn2, vn3, vm); 1521 } 1522 void Tbx(const VRegister& vd, 1523 const VRegister& vn, 1524 const VRegister& vn2, 1525 const VRegister& vn3, 1526 const VRegister& vn4, 1527 const VRegister& vm) { 1528 SingleEmissionCheckScope guard(this); 1529 tbx(vd, vn, vn2, vn3, vn4, vm); 1530 } 1531 void Tbnz(const Register& rt, unsigned bit_pos, Label* label); 1532 void Tbz(const Register& rt, unsigned bit_pos, Label* label); 1533 void Ubfiz(const Register& rd, 1534 const Register& rn, 1535 unsigned lsb, 1536 unsigned width) { 1537 VIXL_ASSERT(!rd.IsZero()); 1538 VIXL_ASSERT(!rn.IsZero()); 1539 SingleEmissionCheckScope guard(this); 1540 ubfiz(rd, rn, lsb, width); 1541 } 1542 void Ubfm(const Register& rd, 1543 const Register& rn, 1544 unsigned immr, 1545 unsigned imms) { 1546 VIXL_ASSERT(!rd.IsZero()); 1547 VIXL_ASSERT(!rn.IsZero()); 1548 SingleEmissionCheckScope guard(this); 1549 ubfm(rd, rn, immr, imms); 1550 } 1551 void Ubfx(const Register& rd, 1552 const Register& rn, 1553 unsigned lsb, 1554 unsigned width) { 1555 VIXL_ASSERT(!rd.IsZero()); 1556 VIXL_ASSERT(!rn.IsZero()); 1557 SingleEmissionCheckScope guard(this); 1558 ubfx(rd, rn, lsb, width); 1559 } 1560 void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) { 1561 VIXL_ASSERT(!rn.IsZero()); 1562 SingleEmissionCheckScope guard(this); 1563 ucvtf(vd, rn, fbits); 1564 } 1565 void Udiv(const Register& rd, const Register& rn, const Register& rm) { 1566 VIXL_ASSERT(!rd.IsZero()); 1567 VIXL_ASSERT(!rn.IsZero()); 1568 VIXL_ASSERT(!rm.IsZero()); 1569 SingleEmissionCheckScope guard(this); 1570 udiv(rd, rn, rm); 1571 } 1572 void Umaddl(const Register& rd, 1573 const Register& rn, 1574 const Register& rm, 1575 const Register& ra) { 1576 VIXL_ASSERT(!rd.IsZero()); 1577 VIXL_ASSERT(!rn.IsZero()); 1578 VIXL_ASSERT(!rm.IsZero()); 1579 VIXL_ASSERT(!ra.IsZero()); 1580 SingleEmissionCheckScope guard(this); 1581 umaddl(rd, rn, rm, ra); 1582 } 1583 void Umull(const Register& rd, 1584 const Register& rn, 1585 const Register& rm) { 1586 VIXL_ASSERT(!rd.IsZero()); 1587 VIXL_ASSERT(!rn.IsZero()); 1588 VIXL_ASSERT(!rm.IsZero()); 1589 SingleEmissionCheckScope guard(this); 1590 umull(rd, rn, rm); 1591 } 1592 void Umulh(const Register& xd, const Register& xn, const Register& xm) { 1593 VIXL_ASSERT(!xd.IsZero()); 1594 VIXL_ASSERT(!xn.IsZero()); 1595 VIXL_ASSERT(!xm.IsZero()); 1596 SingleEmissionCheckScope guard(this); 1597 umulh(xd, xn, xm); 1598 } 1599 void Umsubl(const Register& rd, 1600 const Register& rn, 1601 const Register& rm, 1602 const Register& ra) { 1603 VIXL_ASSERT(!rd.IsZero()); 1604 VIXL_ASSERT(!rn.IsZero()); 1605 VIXL_ASSERT(!rm.IsZero()); 1606 VIXL_ASSERT(!ra.IsZero()); 1607 SingleEmissionCheckScope guard(this); 1608 umsubl(rd, rn, rm, ra); 1609 } 1610 1611 void Unreachable() { 1612 SingleEmissionCheckScope guard(this); 1613 Emit(UNDEFINED_INST_PATTERN); 1614 } 1615 1616 void Uxtb(const Register& rd, const Register& rn) { 1617 VIXL_ASSERT(!rd.IsZero()); 1618 VIXL_ASSERT(!rn.IsZero()); 1619 SingleEmissionCheckScope guard(this); 1620 uxtb(rd, rn); 1621 } 1622 void Uxth(const Register& rd, const Register& rn) { 1623 VIXL_ASSERT(!rd.IsZero()); 1624 VIXL_ASSERT(!rn.IsZero()); 1625 SingleEmissionCheckScope guard(this); 1626 uxth(rd, rn); 1627 } 1628 void Uxtw(const Register& rd, const Register& rn) { 1629 VIXL_ASSERT(!rd.IsZero()); 1630 VIXL_ASSERT(!rn.IsZero()); 1631 SingleEmissionCheckScope guard(this); 1632 uxtw(rd, rn); 1633 } 1634 1635 // NEON 3 vector register instructions. 1636 #define NEON_3VREG_MACRO_LIST(V) \ 1637 V(add, Add) \ 1638 V(addhn, Addhn) \ 1639 V(addhn2, Addhn2) \ 1640 V(addp, Addp) \ 1641 V(and_, And) \ 1642 V(bic, Bic) \ 1643 V(bif, Bif) \ 1644 V(bit, Bit) \ 1645 V(bsl, Bsl) \ 1646 V(cmeq, Cmeq) \ 1647 V(cmge, Cmge) \ 1648 V(cmgt, Cmgt) \ 1649 V(cmhi, Cmhi) \ 1650 V(cmhs, Cmhs) \ 1651 V(cmtst, Cmtst) \ 1652 V(eor, Eor) \ 1653 V(fabd, Fabd) \ 1654 V(facge, Facge) \ 1655 V(facgt, Facgt) \ 1656 V(faddp, Faddp) \ 1657 V(fcmeq, Fcmeq) \ 1658 V(fcmge, Fcmge) \ 1659 V(fcmgt, Fcmgt) \ 1660 V(fmaxnmp, Fmaxnmp) \ 1661 V(fmaxp, Fmaxp) \ 1662 V(fminnmp, Fminnmp) \ 1663 V(fminp, Fminp) \ 1664 V(fmla, Fmla) \ 1665 V(fmls, Fmls) \ 1666 V(fmulx, Fmulx) \ 1667 V(frecps, Frecps) \ 1668 V(frsqrts, Frsqrts) \ 1669 V(mla, Mla) \ 1670 V(mls, Mls) \ 1671 V(mul, Mul) \ 1672 V(orn, Orn) \ 1673 V(orr, Orr) \ 1674 V(pmul, Pmul) \ 1675 V(pmull, Pmull) \ 1676 V(pmull2, Pmull2) \ 1677 V(raddhn, Raddhn) \ 1678 V(raddhn2, Raddhn2) \ 1679 V(rsubhn, Rsubhn) \ 1680 V(rsubhn2, Rsubhn2) \ 1681 V(saba, Saba) \ 1682 V(sabal, Sabal) \ 1683 V(sabal2, Sabal2) \ 1684 V(sabd, Sabd) \ 1685 V(sabdl, Sabdl) \ 1686 V(sabdl2, Sabdl2) \ 1687 V(saddl, Saddl) \ 1688 V(saddl2, Saddl2) \ 1689 V(saddw, Saddw) \ 1690 V(saddw2, Saddw2) \ 1691 V(shadd, Shadd) \ 1692 V(shsub, Shsub) \ 1693 V(smax, Smax) \ 1694 V(smaxp, Smaxp) \ 1695 V(smin, Smin) \ 1696 V(sminp, Sminp) \ 1697 V(smlal, Smlal) \ 1698 V(smlal2, Smlal2) \ 1699 V(smlsl, Smlsl) \ 1700 V(smlsl2, Smlsl2) \ 1701 V(smull, Smull) \ 1702 V(smull2, Smull2) \ 1703 V(sqadd, Sqadd) \ 1704 V(sqdmlal, Sqdmlal) \ 1705 V(sqdmlal2, Sqdmlal2) \ 1706 V(sqdmlsl, Sqdmlsl) \ 1707 V(sqdmlsl2, Sqdmlsl2) \ 1708 V(sqdmulh, Sqdmulh) \ 1709 V(sqdmull, Sqdmull) \ 1710 V(sqdmull2, Sqdmull2) \ 1711 V(sqrdmulh, Sqrdmulh) \ 1712 V(sqrshl, Sqrshl) \ 1713 V(sqshl, Sqshl) \ 1714 V(sqsub, Sqsub) \ 1715 V(srhadd, Srhadd) \ 1716 V(srshl, Srshl) \ 1717 V(sshl, Sshl) \ 1718 V(ssubl, Ssubl) \ 1719 V(ssubl2, Ssubl2) \ 1720 V(ssubw, Ssubw) \ 1721 V(ssubw2, Ssubw2) \ 1722 V(sub, Sub) \ 1723 V(subhn, Subhn) \ 1724 V(subhn2, Subhn2) \ 1725 V(trn1, Trn1) \ 1726 V(trn2, Trn2) \ 1727 V(uaba, Uaba) \ 1728 V(uabal, Uabal) \ 1729 V(uabal2, Uabal2) \ 1730 V(uabd, Uabd) \ 1731 V(uabdl, Uabdl) \ 1732 V(uabdl2, Uabdl2) \ 1733 V(uaddl, Uaddl) \ 1734 V(uaddl2, Uaddl2) \ 1735 V(uaddw, Uaddw) \ 1736 V(uaddw2, Uaddw2) \ 1737 V(uhadd, Uhadd) \ 1738 V(uhsub, Uhsub) \ 1739 V(umax, Umax) \ 1740 V(umaxp, Umaxp) \ 1741 V(umin, Umin) \ 1742 V(uminp, Uminp) \ 1743 V(umlal, Umlal) \ 1744 V(umlal2, Umlal2) \ 1745 V(umlsl, Umlsl) \ 1746 V(umlsl2, Umlsl2) \ 1747 V(umull, Umull) \ 1748 V(umull2, Umull2) \ 1749 V(uqadd, Uqadd) \ 1750 V(uqrshl, Uqrshl) \ 1751 V(uqshl, Uqshl) \ 1752 V(uqsub, Uqsub) \ 1753 V(urhadd, Urhadd) \ 1754 V(urshl, Urshl) \ 1755 V(ushl, Ushl) \ 1756 V(usubl, Usubl) \ 1757 V(usubl2, Usubl2) \ 1758 V(usubw, Usubw) \ 1759 V(usubw2, Usubw2) \ 1760 V(uzp1, Uzp1) \ 1761 V(uzp2, Uzp2) \ 1762 V(zip1, Zip1) \ 1763 V(zip2, Zip2) 1764 1765 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 1766 void MASM(const VRegister& vd, \ 1767 const VRegister& vn, \ 1768 const VRegister& vm) { \ 1769 SingleEmissionCheckScope guard(this); \ 1770 ASM(vd, vn, vm); \ 1771 } 1772 NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 1773 #undef DEFINE_MACRO_ASM_FUNC 1774 1775 // NEON 2 vector register instructions. 1776 #define NEON_2VREG_MACRO_LIST(V) \ 1777 V(abs, Abs) \ 1778 V(addp, Addp) \ 1779 V(addv, Addv) \ 1780 V(cls, Cls) \ 1781 V(clz, Clz) \ 1782 V(cnt, Cnt) \ 1783 V(fabs, Fabs) \ 1784 V(faddp, Faddp) \ 1785 V(fcvtas, Fcvtas) \ 1786 V(fcvtau, Fcvtau) \ 1787 V(fcvtms, Fcvtms) \ 1788 V(fcvtmu, Fcvtmu) \ 1789 V(fcvtns, Fcvtns) \ 1790 V(fcvtnu, Fcvtnu) \ 1791 V(fcvtps, Fcvtps) \ 1792 V(fcvtpu, Fcvtpu) \ 1793 V(fmaxnmp, Fmaxnmp) \ 1794 V(fmaxnmv, Fmaxnmv) \ 1795 V(fmaxp, Fmaxp) \ 1796 V(fmaxv, Fmaxv) \ 1797 V(fminnmp, Fminnmp) \ 1798 V(fminnmv, Fminnmv) \ 1799 V(fminp, Fminp) \ 1800 V(fminv, Fminv) \ 1801 V(fneg, Fneg) \ 1802 V(frecpe, Frecpe) \ 1803 V(frecpx, Frecpx) \ 1804 V(frinta, Frinta) \ 1805 V(frinti, Frinti) \ 1806 V(frintm, Frintm) \ 1807 V(frintn, Frintn) \ 1808 V(frintp, Frintp) \ 1809 V(frintx, Frintx) \ 1810 V(frintz, Frintz) \ 1811 V(frsqrte, Frsqrte) \ 1812 V(fsqrt, Fsqrt) \ 1813 V(mov, Mov) \ 1814 V(mvn, Mvn) \ 1815 V(neg, Neg) \ 1816 V(not_, Not) \ 1817 V(rbit, Rbit) \ 1818 V(rev16, Rev16) \ 1819 V(rev32, Rev32) \ 1820 V(rev64, Rev64) \ 1821 V(sadalp, Sadalp) \ 1822 V(saddlp, Saddlp) \ 1823 V(saddlv, Saddlv) \ 1824 V(smaxv, Smaxv) \ 1825 V(sminv, Sminv) \ 1826 V(sqabs, Sqabs) \ 1827 V(sqneg, Sqneg) \ 1828 V(sqxtn, Sqxtn) \ 1829 V(sqxtn2, Sqxtn2) \ 1830 V(sqxtun, Sqxtun) \ 1831 V(sqxtun2, Sqxtun2) \ 1832 V(suqadd, Suqadd) \ 1833 V(sxtl, Sxtl) \ 1834 V(sxtl2, Sxtl2) \ 1835 V(uadalp, Uadalp) \ 1836 V(uaddlp, Uaddlp) \ 1837 V(uaddlv, Uaddlv) \ 1838 V(umaxv, Umaxv) \ 1839 V(uminv, Uminv) \ 1840 V(uqxtn, Uqxtn) \ 1841 V(uqxtn2, Uqxtn2) \ 1842 V(urecpe, Urecpe) \ 1843 V(ursqrte, Ursqrte) \ 1844 V(usqadd, Usqadd) \ 1845 V(uxtl, Uxtl) \ 1846 V(uxtl2, Uxtl2) \ 1847 V(xtn, Xtn) \ 1848 V(xtn2, Xtn2) 1849 1850 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 1851 void MASM(const VRegister& vd, \ 1852 const VRegister& vn) { \ 1853 SingleEmissionCheckScope guard(this); \ 1854 ASM(vd, vn); \ 1855 } 1856 NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 1857 #undef DEFINE_MACRO_ASM_FUNC 1858 1859 // NEON 2 vector register with immediate instructions. 1860 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \ 1861 V(fcmeq, Fcmeq) \ 1862 V(fcmge, Fcmge) \ 1863 V(fcmgt, Fcmgt) \ 1864 V(fcmle, Fcmle) \ 1865 V(fcmlt, Fcmlt) 1866 1867 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 1868 void MASM(const VRegister& vd, \ 1869 const VRegister& vn, \ 1870 double imm) { \ 1871 SingleEmissionCheckScope guard(this); \ 1872 ASM(vd, vn, imm); \ 1873 } 1874 NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 1875 #undef DEFINE_MACRO_ASM_FUNC 1876 1877 // NEON by element instructions. 1878 #define NEON_BYELEMENT_MACRO_LIST(V) \ 1879 V(fmul, Fmul) \ 1880 V(fmla, Fmla) \ 1881 V(fmls, Fmls) \ 1882 V(fmulx, Fmulx) \ 1883 V(mul, Mul) \ 1884 V(mla, Mla) \ 1885 V(mls, Mls) \ 1886 V(sqdmulh, Sqdmulh) \ 1887 V(sqrdmulh, Sqrdmulh) \ 1888 V(sqdmull, Sqdmull) \ 1889 V(sqdmull2, Sqdmull2) \ 1890 V(sqdmlal, Sqdmlal) \ 1891 V(sqdmlal2, Sqdmlal2) \ 1892 V(sqdmlsl, Sqdmlsl) \ 1893 V(sqdmlsl2, Sqdmlsl2) \ 1894 V(smull, Smull) \ 1895 V(smull2, Smull2) \ 1896 V(smlal, Smlal) \ 1897 V(smlal2, Smlal2) \ 1898 V(smlsl, Smlsl) \ 1899 V(smlsl2, Smlsl2) \ 1900 V(umull, Umull) \ 1901 V(umull2, Umull2) \ 1902 V(umlal, Umlal) \ 1903 V(umlal2, Umlal2) \ 1904 V(umlsl, Umlsl) \ 1905 V(umlsl2, Umlsl2) 1906 1907 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 1908 void MASM(const VRegister& vd, \ 1909 const VRegister& vn, \ 1910 const VRegister& vm, \ 1911 int vm_index \ 1912 ) { \ 1913 SingleEmissionCheckScope guard(this); \ 1914 ASM(vd, vn, vm, vm_index); \ 1915 } 1916 NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 1917 #undef DEFINE_MACRO_ASM_FUNC 1918 1919 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \ 1920 V(rshrn, Rshrn) \ 1921 V(rshrn2, Rshrn2) \ 1922 V(shl, Shl) \ 1923 V(shll, Shll) \ 1924 V(shll2, Shll2) \ 1925 V(shrn, Shrn) \ 1926 V(shrn2, Shrn2) \ 1927 V(sli, Sli) \ 1928 V(sqrshrn, Sqrshrn) \ 1929 V(sqrshrn2, Sqrshrn2) \ 1930 V(sqrshrun, Sqrshrun) \ 1931 V(sqrshrun2, Sqrshrun2) \ 1932 V(sqshl, Sqshl) \ 1933 V(sqshlu, Sqshlu) \ 1934 V(sqshrn, Sqshrn) \ 1935 V(sqshrn2, Sqshrn2) \ 1936 V(sqshrun, Sqshrun) \ 1937 V(sqshrun2, Sqshrun2) \ 1938 V(sri, Sri) \ 1939 V(srshr, Srshr) \ 1940 V(srsra, Srsra) \ 1941 V(sshll, Sshll) \ 1942 V(sshll2, Sshll2) \ 1943 V(sshr, Sshr) \ 1944 V(ssra, Ssra) \ 1945 V(uqrshrn, Uqrshrn) \ 1946 V(uqrshrn2, Uqrshrn2) \ 1947 V(uqshl, Uqshl) \ 1948 V(uqshrn, Uqshrn) \ 1949 V(uqshrn2, Uqshrn2) \ 1950 V(urshr, Urshr) \ 1951 V(ursra, Ursra) \ 1952 V(ushll, Ushll) \ 1953 V(ushll2, Ushll2) \ 1954 V(ushr, Ushr) \ 1955 V(usra, Usra) \ 1956 1957 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ 1958 void MASM(const VRegister& vd, \ 1959 const VRegister& vn, \ 1960 int shift) { \ 1961 SingleEmissionCheckScope guard(this); \ 1962 ASM(vd, vn, shift); \ 1963 } 1964 NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) 1965 #undef DEFINE_MACRO_ASM_FUNC 1966 1967 void Bic(const VRegister& vd, 1968 const int imm8, 1969 const int left_shift = 0) { 1970 SingleEmissionCheckScope guard(this); 1971 bic(vd, imm8, left_shift); 1972 } 1973 void Cmeq(const VRegister& vd, 1974 const VRegister& vn, 1975 int imm) { 1976 SingleEmissionCheckScope guard(this); 1977 cmeq(vd, vn, imm); 1978 } 1979 void Cmge(const VRegister& vd, 1980 const VRegister& vn, 1981 int imm) { 1982 SingleEmissionCheckScope guard(this); 1983 cmge(vd, vn, imm); 1984 } 1985 void Cmgt(const VRegister& vd, 1986 const VRegister& vn, 1987 int imm) { 1988 SingleEmissionCheckScope guard(this); 1989 cmgt(vd, vn, imm); 1990 } 1991 void Cmle(const VRegister& vd, 1992 const VRegister& vn, 1993 int imm) { 1994 SingleEmissionCheckScope guard(this); 1995 cmle(vd, vn, imm); 1996 } 1997 void Cmlt(const VRegister& vd, 1998 const VRegister& vn, 1999 int imm) { 2000 SingleEmissionCheckScope guard(this); 2001 cmlt(vd, vn, imm); 2002 } 2003 void Dup(const VRegister& vd, 2004 const VRegister& vn, 2005 int index) { 2006 SingleEmissionCheckScope guard(this); 2007 dup(vd, vn, index); 2008 } 2009 void Dup(const VRegister& vd, 2010 const Register& rn) { 2011 SingleEmissionCheckScope guard(this); 2012 dup(vd, rn); 2013 } 2014 void Ext(const VRegister& vd, 2015 const VRegister& vn, 2016 const VRegister& vm, 2017 int index) { 2018 SingleEmissionCheckScope guard(this); 2019 ext(vd, vn, vm, index); 2020 } 2021 void Ins(const VRegister& vd, 2022 int vd_index, 2023 const VRegister& vn, 2024 int vn_index) { 2025 SingleEmissionCheckScope guard(this); 2026 ins(vd, vd_index, vn, vn_index); 2027 } 2028 void Ins(const VRegister& vd, 2029 int vd_index, 2030 const Register& rn) { 2031 SingleEmissionCheckScope guard(this); 2032 ins(vd, vd_index, rn); 2033 } 2034 void Ld1(const VRegister& vt, 2035 const MemOperand& src) { 2036 SingleEmissionCheckScope guard(this); 2037 ld1(vt, src); 2038 } 2039 void Ld1(const VRegister& vt, 2040 const VRegister& vt2, 2041 const MemOperand& src) { 2042 SingleEmissionCheckScope guard(this); 2043 ld1(vt, vt2, src); 2044 } 2045 void Ld1(const VRegister& vt, 2046 const VRegister& vt2, 2047 const VRegister& vt3, 2048 const MemOperand& src) { 2049 SingleEmissionCheckScope guard(this); 2050 ld1(vt, vt2, vt3, src); 2051 } 2052 void Ld1(const VRegister& vt, 2053 const VRegister& vt2, 2054 const VRegister& vt3, 2055 const VRegister& vt4, 2056 const MemOperand& src) { 2057 SingleEmissionCheckScope guard(this); 2058 ld1(vt, vt2, vt3, vt4, src); 2059 } 2060 void Ld1(const VRegister& vt, 2061 int lane, 2062 const MemOperand& src) { 2063 SingleEmissionCheckScope guard(this); 2064 ld1(vt, lane, src); 2065 } 2066 void Ld1r(const VRegister& vt, 2067 const MemOperand& src) { 2068 SingleEmissionCheckScope guard(this); 2069 ld1r(vt, src); 2070 } 2071 void Ld2(const VRegister& vt, 2072 const VRegister& vt2, 2073 const MemOperand& src) { 2074 SingleEmissionCheckScope guard(this); 2075 ld2(vt, vt2, src); 2076 } 2077 void Ld2(const VRegister& vt, 2078 const VRegister& vt2, 2079 int lane, 2080 const MemOperand& src) { 2081 SingleEmissionCheckScope guard(this); 2082 ld2(vt, vt2, lane, src); 2083 } 2084 void Ld2r(const VRegister& vt, 2085 const VRegister& vt2, 2086 const MemOperand& src) { 2087 SingleEmissionCheckScope guard(this); 2088 ld2r(vt, vt2, src); 2089 } 2090 void Ld3(const VRegister& vt, 2091 const VRegister& vt2, 2092 const VRegister& vt3, 2093 const MemOperand& src) { 2094 SingleEmissionCheckScope guard(this); 2095 ld3(vt, vt2, vt3, src); 2096 } 2097 void Ld3(const VRegister& vt, 2098 const VRegister& vt2, 2099 const VRegister& vt3, 2100 int lane, 2101 const MemOperand& src) { 2102 SingleEmissionCheckScope guard(this); 2103 ld3(vt, vt2, vt3, lane, src); 2104 } 2105 void Ld3r(const VRegister& vt, 2106 const VRegister& vt2, 2107 const VRegister& vt3, 2108 const MemOperand& src) { 2109 SingleEmissionCheckScope guard(this); 2110 ld3r(vt, vt2, vt3, src); 2111 } 2112 void Ld4(const VRegister& vt, 2113 const VRegister& vt2, 2114 const VRegister& vt3, 2115 const VRegister& vt4, 2116 const MemOperand& src) { 2117 SingleEmissionCheckScope guard(this); 2118 ld4(vt, vt2, vt3, vt4, src); 2119 } 2120 void Ld4(const VRegister& vt, 2121 const VRegister& vt2, 2122 const VRegister& vt3, 2123 const VRegister& vt4, 2124 int lane, 2125 const MemOperand& src) { 2126 SingleEmissionCheckScope guard(this); 2127 ld4(vt, vt2, vt3, vt4, lane, src); 2128 } 2129 void Ld4r(const VRegister& vt, 2130 const VRegister& vt2, 2131 const VRegister& vt3, 2132 const VRegister& vt4, 2133 const MemOperand& src) { 2134 SingleEmissionCheckScope guard(this); 2135 ld4r(vt, vt2, vt3, vt4, src); 2136 } 2137 void Mov(const VRegister& vd, 2138 int vd_index, 2139 const VRegister& vn, 2140 int vn_index) { 2141 SingleEmissionCheckScope guard(this); 2142 mov(vd, vd_index, vn, vn_index); 2143 } 2144 void Mov(const VRegister& vd, 2145 const VRegister& vn, 2146 int index) { 2147 SingleEmissionCheckScope guard(this); 2148 mov(vd, vn, index); 2149 } 2150 void Mov(const VRegister& vd, 2151 int vd_index, 2152 const Register& rn) { 2153 SingleEmissionCheckScope guard(this); 2154 mov(vd, vd_index, rn); 2155 } 2156 void Mov(const Register& rd, 2157 const VRegister& vn, 2158 int vn_index) { 2159 SingleEmissionCheckScope guard(this); 2160 mov(rd, vn, vn_index); 2161 } 2162 void Movi(const VRegister& vd, 2163 uint64_t imm, 2164 Shift shift = LSL, 2165 int shift_amount = 0); 2166 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo); 2167 void Mvni(const VRegister& vd, 2168 const int imm8, 2169 Shift shift = LSL, 2170 const int shift_amount = 0) { 2171 SingleEmissionCheckScope guard(this); 2172 mvni(vd, imm8, shift, shift_amount); 2173 } 2174 void Orr(const VRegister& vd, 2175 const int imm8, 2176 const int left_shift = 0) { 2177 SingleEmissionCheckScope guard(this); 2178 orr(vd, imm8, left_shift); 2179 } 2180 void Scvtf(const VRegister& vd, 2181 const VRegister& vn, 2182 int fbits = 0) { 2183 SingleEmissionCheckScope guard(this); 2184 scvtf(vd, vn, fbits); 2185 } 2186 void Ucvtf(const VRegister& vd, 2187 const VRegister& vn, 2188 int fbits = 0) { 2189 SingleEmissionCheckScope guard(this); 2190 ucvtf(vd, vn, fbits); 2191 } 2192 void Fcvtzs(const VRegister& vd, 2193 const VRegister& vn, 2194 int fbits = 0) { 2195 SingleEmissionCheckScope guard(this); 2196 fcvtzs(vd, vn, fbits); 2197 } 2198 void Fcvtzu(const VRegister& vd, 2199 const VRegister& vn, 2200 int fbits = 0) { 2201 SingleEmissionCheckScope guard(this); 2202 fcvtzu(vd, vn, fbits); 2203 } 2204 void St1(const VRegister& vt, 2205 const MemOperand& dst) { 2206 SingleEmissionCheckScope guard(this); 2207 st1(vt, dst); 2208 } 2209 void St1(const VRegister& vt, 2210 const VRegister& vt2, 2211 const MemOperand& dst) { 2212 SingleEmissionCheckScope guard(this); 2213 st1(vt, vt2, dst); 2214 } 2215 void St1(const VRegister& vt, 2216 const VRegister& vt2, 2217 const VRegister& vt3, 2218 const MemOperand& dst) { 2219 SingleEmissionCheckScope guard(this); 2220 st1(vt, vt2, vt3, dst); 2221 } 2222 void St1(const VRegister& vt, 2223 const VRegister& vt2, 2224 const VRegister& vt3, 2225 const VRegister& vt4, 2226 const MemOperand& dst) { 2227 SingleEmissionCheckScope guard(this); 2228 st1(vt, vt2, vt3, vt4, dst); 2229 } 2230 void St1(const VRegister& vt, 2231 int lane, 2232 const MemOperand& dst) { 2233 SingleEmissionCheckScope guard(this); 2234 st1(vt, lane, dst); 2235 } 2236 void St2(const VRegister& vt, 2237 const VRegister& vt2, 2238 const MemOperand& dst) { 2239 SingleEmissionCheckScope guard(this); 2240 st2(vt, vt2, dst); 2241 } 2242 void St3(const VRegister& vt, 2243 const VRegister& vt2, 2244 const VRegister& vt3, 2245 const MemOperand& dst) { 2246 SingleEmissionCheckScope guard(this); 2247 st3(vt, vt2, vt3, dst); 2248 } 2249 void St4(const VRegister& vt, 2250 const VRegister& vt2, 2251 const VRegister& vt3, 2252 const VRegister& vt4, 2253 const MemOperand& dst) { 2254 SingleEmissionCheckScope guard(this); 2255 st4(vt, vt2, vt3, vt4, dst); 2256 } 2257 void St2(const VRegister& vt, 2258 const VRegister& vt2, 2259 int lane, 2260 const MemOperand& dst) { 2261 SingleEmissionCheckScope guard(this); 2262 st2(vt, vt2, lane, dst); 2263 } 2264 void St3(const VRegister& vt, 2265 const VRegister& vt2, 2266 const VRegister& vt3, 2267 int lane, 2268 const MemOperand& dst) { 2269 SingleEmissionCheckScope guard(this); 2270 st3(vt, vt2, vt3, lane, dst); 2271 } 2272 void St4(const VRegister& vt, 2273 const VRegister& vt2, 2274 const VRegister& vt3, 2275 const VRegister& vt4, 2276 int lane, 2277 const MemOperand& dst) { 2278 SingleEmissionCheckScope guard(this); 2279 st4(vt, vt2, vt3, vt4, lane, dst); 2280 } 2281 void Smov(const Register& rd, 2282 const VRegister& vn, 2283 int vn_index) { 2284 SingleEmissionCheckScope guard(this); 2285 smov(rd, vn, vn_index); 2286 } 2287 void Umov(const Register& rd, 2288 const VRegister& vn, 2289 int vn_index) { 2290 SingleEmissionCheckScope guard(this); 2291 umov(rd, vn, vn_index); 2292 } 2293 void Crc32b(const Register& rd, 2294 const Register& rn, 2295 const Register& rm) { 2296 SingleEmissionCheckScope guard(this); 2297 crc32b(rd, rn, rm); 2298 } 2299 void Crc32h(const Register& rd, 2300 const Register& rn, 2301 const Register& rm) { 2302 SingleEmissionCheckScope guard(this); 2303 crc32h(rd, rn, rm); 2304 } 2305 void Crc32w(const Register& rd, 2306 const Register& rn, 2307 const Register& rm) { 2308 SingleEmissionCheckScope guard(this); 2309 crc32w(rd, rn, rm); 2310 } 2311 void Crc32x(const Register& rd, 2312 const Register& rn, 2313 const Register& rm) { 2314 SingleEmissionCheckScope guard(this); 2315 crc32x(rd, rn, rm); 2316 } 2317 void Crc32cb(const Register& rd, 2318 const Register& rn, 2319 const Register& rm) { 2320 SingleEmissionCheckScope guard(this); 2321 crc32cb(rd, rn, rm); 2322 } 2323 void Crc32ch(const Register& rd, 2324 const Register& rn, 2325 const Register& rm) { 2326 SingleEmissionCheckScope guard(this); 2327 crc32ch(rd, rn, rm); 2328 } 2329 void Crc32cw(const Register& rd, 2330 const Register& rn, 2331 const Register& rm) { 2332 SingleEmissionCheckScope guard(this); 2333 crc32cw(rd, rn, rm); 2334 } 2335 void Crc32cx(const Register& rd, 2336 const Register& rn, 2337 const Register& rm) { 2338 SingleEmissionCheckScope guard(this); 2339 crc32cx(rd, rn, rm); 2340 } 2341 2342 void Abs(const Register& rd, const Register& rn) { 2343 SingleEmissionCheckScope guard(this); 2344 abs(rd, rn); 2345 } 2346 2347 void Cnt(const Register& rd, const Register& rn) { 2348 SingleEmissionCheckScope guard(this); 2349 cnt(rd, rn); 2350 } 2351 2352 void Ctz(const Register& rd, const Register& rn) { 2353 SingleEmissionCheckScope guard(this); 2354 ctz(rd, rn); 2355 } 2356 2357 void Smax(const Register& rd, const Register& rn, const Operand& op); 2358 void Smin(const Register& rd, const Register& rn, const Operand& op); 2359 void Umax(const Register& rd, const Register& rn, const Operand& op); 2360 void Umin(const Register& rd, const Register& rn, const Operand& op); 2361 2362 // Push the system stack pointer (sp) down to allow the same to be done to 2363 // the current stack pointer (according to StackPointer()). This must be 2364 // called _before_ accessing the memory. 2365 // 2366 // This is necessary when pushing or otherwise adding things to the stack, to 2367 // satisfy the AAPCS64 constraint that the memory below the system stack 2368 // pointer is not accessed. 2369 // 2370 // This method asserts that StackPointer() is not sp, since the call does 2371 // not make sense in that context. 2372 // 2373 // TODO: This method can only accept values of 'space' that can be encoded in 2374 // one instruction. Refer to the implementation for details. 2375 void BumpSystemStackPointer(const Operand& space); 2376 2377 // Set the current stack pointer, but don't generate any code. 2378 void SetStackPointer64(const Register& stack_pointer) { 2379 VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer)); 2380 sp_ = stack_pointer; 2381 } 2382 2383 // Return the current stack pointer, as set by SetStackPointer. 2384 const Register& StackPointer() const { 2385 return sp_; 2386 } 2387 2388 const Register& GetStackPointer64() const { 2389 return sp_; 2390 } 2391 2392 js::jit::RegisterOrSP getStackPointer() const { 2393 return js::jit::RegisterOrSP(sp_.code()); 2394 } 2395 2396 CPURegList* TmpList() { return &tmp_list_; } 2397 CPURegList* FPTmpList() { return &fptmp_list_; } 2398 2399 // Trace control when running the debug simulator. 2400 // 2401 // For example: 2402 // 2403 // __ Trace(LOG_REGS, TRACE_ENABLE); 2404 // Will add registers to the trace if it wasn't already the case. 2405 // 2406 // __ Trace(LOG_DISASM, TRACE_DISABLE); 2407 // Will stop logging disassembly. It has no effect if the disassembly wasn't 2408 // already being logged. 2409 void Trace(TraceParameters parameters, TraceCommand command); 2410 2411 // Log the requested data independently of what is being traced. 2412 // 2413 // For example: 2414 // 2415 // __ Log(LOG_FLAGS) 2416 // Will output the flags. 2417 void Log(TraceParameters parameters); 2418 2419 // Enable or disable instrumentation when an Instrument visitor is attached to 2420 // the simulator. 2421 void EnableInstrumentation(); 2422 void DisableInstrumentation(); 2423 2424 // Add a marker to the instrumentation data produced by an Instrument visitor. 2425 // The name is a two character string that will be attached to the marker in 2426 // the output data. 2427 void AnnotateInstrumentation(const char* marker_name); 2428 2429 private: 2430 // The actual Push and Pop implementations. These don't generate any code 2431 // other than that required for the push or pop. This allows 2432 // (Push|Pop)CPURegList to bundle together setup code for a large block of 2433 // registers. 2434 // 2435 // Note that size is per register, and is specified in bytes. 2436 void PushHelper(int count, int size, 2437 const CPURegister& src0, const CPURegister& src1, 2438 const CPURegister& src2, const CPURegister& src3); 2439 void PopHelper(int count, int size, 2440 const CPURegister& dst0, const CPURegister& dst1, 2441 const CPURegister& dst2, const CPURegister& dst3); 2442 2443 void Movi16bitHelper(const VRegister& vd, uint64_t imm); 2444 void Movi32bitHelper(const VRegister& vd, uint64_t imm); 2445 void Movi64bitHelper(const VRegister& vd, uint64_t imm); 2446 2447 // Perform necessary maintenance operations before a push or pop. 2448 // 2449 // Note that size is per register, and is specified in bytes. 2450 void PrepareForPush(int count, int size); 2451 void PrepareForPop(int count, int size); 2452 2453 // The actual implementation of load and store operations for CPURegList. 2454 enum LoadStoreCPURegListAction { 2455 kLoad, 2456 kStore 2457 }; 2458 void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation, 2459 CPURegList registers, 2460 const MemOperand& mem); 2461 // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`. 2462 // This helper may allocate registers from `scratch_scope` and generate code 2463 // to compute an intermediate address. The resulting MemOperand is only valid 2464 // as long as `scratch_scope` remains valid. 2465 MemOperand BaseMemOperandForLoadStoreCPURegList( 2466 const CPURegList& registers, 2467 const MemOperand& mem, 2468 UseScratchRegisterScope* scratch_scope); 2469 2470 bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) { 2471 return !Instruction::IsValidImmPCOffset(branch_type, nextOffset().getOffset() - label->offset()); 2472 } 2473 2474 // The register to use as a stack pointer for stack operations. 2475 Register sp_; 2476 2477 // Scratch registers available for use by the MacroAssembler. 2478 CPURegList tmp_list_; 2479 CPURegList fptmp_list_; 2480 2481 ptrdiff_t checkpoint_; 2482 ptrdiff_t recommended_checkpoint_; 2483 }; 2484 2485 2486 // All Assembler emits MUST acquire/release the underlying code buffer. The 2487 // helper scope below will do so and optionally ensure the buffer is big enough 2488 // to receive the emit. It is possible to request the scope not to perform any 2489 // checks (kNoCheck) if for example it is known in advance the buffer size is 2490 // adequate or there is some other size checking mechanism in place. 2491 class CodeBufferCheckScope { 2492 public: 2493 // Tell whether or not the scope needs to ensure the associated CodeBuffer 2494 // has enough space for the requested size. 2495 enum CheckPolicy { 2496 kNoCheck, 2497 kCheck 2498 }; 2499 2500 // Tell whether or not the scope should assert the amount of code emitted 2501 // within the scope is consistent with the requested amount. 2502 enum AssertPolicy { 2503 kNoAssert, // No assert required. 2504 kExactSize, // The code emitted must be exactly size bytes. 2505 kMaximumSize // The code emitted must be at most size bytes. 2506 }; 2507 2508 CodeBufferCheckScope(Assembler* assm, 2509 size_t size, 2510 CheckPolicy check_policy = kCheck, 2511 AssertPolicy assert_policy = kMaximumSize) 2512 { } 2513 2514 // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert). 2515 explicit CodeBufferCheckScope(Assembler* assm) {} 2516 }; 2517 2518 2519 // Use this scope when you need a one-to-one mapping between methods and 2520 // instructions. This scope prevents the MacroAssembler from being called and 2521 // literal pools from being emitted. It also asserts the number of instructions 2522 // emitted is what you specified when creating the scope. 2523 // FIXME: Because of the disabled calls below, this class asserts nothing. 2524 class InstructionAccurateScope : public CodeBufferCheckScope { 2525 public: 2526 InstructionAccurateScope(MacroAssembler* masm, 2527 int64_t count, 2528 AssertPolicy policy = kExactSize) 2529 : CodeBufferCheckScope(masm, 2530 (count * kInstructionSize), 2531 kCheck, 2532 policy) { 2533 } 2534 }; 2535 2536 2537 // This scope utility allows scratch registers to be managed safely. The 2538 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch 2539 // registers. These registers can be allocated on demand, and will be returned 2540 // at the end of the scope. 2541 // 2542 // When the scope ends, the MacroAssembler's lists will be restored to their 2543 // original state, even if the lists were modified by some other means. 2544 class UseScratchRegisterScope { 2545 public: 2546 // This constructor implicitly calls the `Open` function to initialise the 2547 // scope, so it is ready to use immediately after it has been constructed. 2548 explicit UseScratchRegisterScope(MacroAssembler* masm); 2549 // This constructor allows deferred and optional initialisation of the scope. 2550 // The user is required to explicitly call the `Open` function before using 2551 // the scope. 2552 UseScratchRegisterScope(); 2553 // This function performs the actual initialisation work. 2554 void Open(MacroAssembler* masm); 2555 2556 // The destructor always implicitly calls the `Close` function. 2557 ~UseScratchRegisterScope(); 2558 // This function performs the cleaning-up work. It must succeed even if the 2559 // scope has not been opened. It is safe to call multiple times. 2560 void Close(); 2561 2562 2563 bool IsAvailable(const CPURegister& reg) const; 2564 bool HasAvailableRegister() const; 2565 2566 // Take a register from the appropriate temps list. It will be returned 2567 // automatically when the scope ends. 2568 Register AcquireW() { return AcquireNextAvailable(available_).W(); } 2569 Register AcquireX() { return AcquireNextAvailable(available_).X(); } 2570 VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } 2571 VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } 2572 VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); } 2573 2574 2575 Register AcquireSameSizeAs(const Register& reg); 2576 VRegister AcquireSameSizeAs(const VRegister& reg); 2577 2578 2579 // Explicitly release an acquired (or excluded) register, putting it back in 2580 // the appropriate temps list. 2581 void Release(const CPURegister& reg); 2582 2583 2584 // Make the specified registers available as scratch registers for the 2585 // duration of this scope. 2586 void Include(const CPURegList& list); 2587 void Include(const Register& reg1, 2588 const Register& reg2 = NoReg, 2589 const Register& reg3 = NoReg, 2590 const Register& reg4 = NoReg); 2591 void Include(const VRegister& reg1, 2592 const VRegister& reg2 = NoVReg, 2593 const VRegister& reg3 = NoVReg, 2594 const VRegister& reg4 = NoVReg); 2595 2596 2597 // Make sure that the specified registers are not available in this scope. 2598 // This can be used to prevent helper functions from using sensitive 2599 // registers, for example. 2600 void Exclude(const CPURegList& list); 2601 void Exclude(const Register& reg1, 2602 const Register& reg2 = NoReg, 2603 const Register& reg3 = NoReg, 2604 const Register& reg4 = NoReg); 2605 void Exclude(const VRegister& reg1, 2606 const VRegister& reg2 = NoVReg, 2607 const VRegister& reg3 = NoVReg, 2608 const VRegister& reg4 = NoVReg); 2609 void Exclude(const CPURegister& reg1, 2610 const CPURegister& reg2 = NoCPUReg, 2611 const CPURegister& reg3 = NoCPUReg, 2612 const CPURegister& reg4 = NoCPUReg); 2613 2614 2615 // Prevent any scratch registers from being used in this scope. 2616 void ExcludeAll(); 2617 2618 2619 private: 2620 static CPURegister AcquireNextAvailable(CPURegList* available); 2621 2622 static void ReleaseByCode(CPURegList* available, int code); 2623 2624 static void ReleaseByRegList(CPURegList* available, 2625 RegList regs); 2626 2627 static void IncludeByRegList(CPURegList* available, 2628 RegList exclude); 2629 2630 static void ExcludeByRegList(CPURegList* available, 2631 RegList exclude); 2632 2633 // Available scratch registers. 2634 CPURegList* available_; // kRegister 2635 CPURegList* availablefp_; // kVRegister 2636 2637 // The state of the available lists at the start of this scope. 2638 RegList old_available_; // kRegister 2639 RegList old_availablefp_; // kVRegister 2640 #ifdef DEBUG 2641 bool initialised_; 2642 #endif 2643 2644 // Disallow copy constructor and operator=. 2645 UseScratchRegisterScope(const UseScratchRegisterScope&) { 2646 VIXL_UNREACHABLE(); 2647 } 2648 void operator=(const UseScratchRegisterScope&) { 2649 VIXL_UNREACHABLE(); 2650 } 2651 }; 2652 2653 2654 } // namespace vixl 2655 2656 #endif // VIXL_A64_MACRO_ASSEMBLER_A64_H_