tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-vixl.cpp (154054B)


      1 // Copyright 2015, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #include "jit/arm64/vixl/Assembler-vixl.h"
     28 
     29 #include <cmath>
     30 
     31 #include "jit/arm64/vixl/MacroAssembler-vixl.h"
     32 
     33 namespace vixl {
     34 
     35 // Assembler
     36 Assembler::Assembler(PositionIndependentCodeOption pic)
     37    : pic_(pic)
     38 {
     39  // Mozilla change: query cpu features once and cache result.
     40  cpu_features_ = js::jit::ARM64Flags::GetCPUFeatures();
     41 }
     42 
     43 
     44 // Code generation.
     45 void Assembler::br(const Register& xn) {
     46  VIXL_ASSERT(xn.Is64Bits());
     47  Emit(BR | Rn(xn));
     48 }
     49 
     50 
     51 void Assembler::blr(const Register& xn) {
     52  VIXL_ASSERT(xn.Is64Bits());
     53  Emit(BLR | Rn(xn));
     54 }
     55 
     56 
     57 void Assembler::ret(const Register& xn) {
     58  VIXL_ASSERT(xn.Is64Bits());
     59  Emit(RET | Rn(xn));
     60 }
     61 
     62 
     63 void Assembler::NEONTable(const VRegister& vd,
     64                          const VRegister& vn,
     65                          const VRegister& vm,
     66                          NEONTableOp op) {
     67  VIXL_ASSERT(vd.Is16B() || vd.Is8B());
     68  VIXL_ASSERT(vn.Is16B());
     69  VIXL_ASSERT(AreSameFormat(vd, vm));
     70  Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
     71 }
     72 
     73 
     74 void Assembler::tbl(const VRegister& vd,
     75                    const VRegister& vn,
     76                    const VRegister& vm) {
     77  NEONTable(vd, vn, vm, NEON_TBL_1v);
     78 }
     79 
     80 
     81 void Assembler::tbl(const VRegister& vd,
     82                    const VRegister& vn,
     83                    const VRegister& vn2,
     84                    const VRegister& vm) {
     85  USE(vn2);
     86  VIXL_ASSERT(AreSameFormat(vn, vn2));
     87  VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
     88 
     89  NEONTable(vd, vn, vm, NEON_TBL_2v);
     90 }
     91 
     92 
     93 void Assembler::tbl(const VRegister& vd,
     94                    const VRegister& vn,
     95                    const VRegister& vn2,
     96                    const VRegister& vn3,
     97                    const VRegister& vm) {
     98  USE(vn2, vn3);
     99  VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
    100  VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
    101  VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
    102 
    103  NEONTable(vd, vn, vm, NEON_TBL_3v);
    104 }
    105 
    106 
    107 void Assembler::tbl(const VRegister& vd,
    108                    const VRegister& vn,
    109                    const VRegister& vn2,
    110                    const VRegister& vn3,
    111                    const VRegister& vn4,
    112                    const VRegister& vm) {
    113  USE(vn2, vn3, vn4);
    114  VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
    115  VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
    116  VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
    117  VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
    118 
    119  NEONTable(vd, vn, vm, NEON_TBL_4v);
    120 }
    121 
    122 
    123 void Assembler::tbx(const VRegister& vd,
    124                    const VRegister& vn,
    125                    const VRegister& vm) {
    126  NEONTable(vd, vn, vm, NEON_TBX_1v);
    127 }
    128 
    129 
    130 void Assembler::tbx(const VRegister& vd,
    131                    const VRegister& vn,
    132                    const VRegister& vn2,
    133                    const VRegister& vm) {
    134  USE(vn2);
    135  VIXL_ASSERT(AreSameFormat(vn, vn2));
    136  VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
    137 
    138  NEONTable(vd, vn, vm, NEON_TBX_2v);
    139 }
    140 
    141 
    142 void Assembler::tbx(const VRegister& vd,
    143                    const VRegister& vn,
    144                    const VRegister& vn2,
    145                    const VRegister& vn3,
    146                    const VRegister& vm) {
    147  USE(vn2, vn3);
    148  VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
    149  VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
    150  VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
    151 
    152  NEONTable(vd, vn, vm, NEON_TBX_3v);
    153 }
    154 
    155 
    156 void Assembler::tbx(const VRegister& vd,
    157                    const VRegister& vn,
    158                    const VRegister& vn2,
    159                    const VRegister& vn3,
    160                    const VRegister& vn4,
    161                    const VRegister& vm) {
    162  USE(vn2, vn3, vn4);
    163  VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
    164  VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
    165  VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
    166  VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
    167 
    168  NEONTable(vd, vn, vm, NEON_TBX_4v);
    169 }
    170 
    171 
    172 void Assembler::add(const Register& rd,
    173                    const Register& rn,
    174                    const Operand& operand) {
    175  AddSub(rd, rn, operand, LeaveFlags, ADD);
    176 }
    177 
    178 
    179 void Assembler::adds(const Register& rd,
    180                     const Register& rn,
    181                     const Operand& operand) {
    182  AddSub(rd, rn, operand, SetFlags, ADD);
    183 }
    184 
    185 
    186 void Assembler::cmn(const Register& rn,
    187                    const Operand& operand) {
    188  Register zr = AppropriateZeroRegFor(rn);
    189  adds(zr, rn, operand);
    190 }
    191 
    192 
    193 void Assembler::sub(const Register& rd,
    194                    const Register& rn,
    195                    const Operand& operand) {
    196  AddSub(rd, rn, operand, LeaveFlags, SUB);
    197 }
    198 
    199 
    200 void Assembler::subs(const Register& rd,
    201                     const Register& rn,
    202                     const Operand& operand) {
    203  AddSub(rd, rn, operand, SetFlags, SUB);
    204 }
    205 
    206 
    207 void Assembler::cmp(const Register& rn, const Operand& operand) {
    208  Register zr = AppropriateZeroRegFor(rn);
    209  subs(zr, rn, operand);
    210 }
    211 
    212 
    213 void Assembler::neg(const Register& rd, const Operand& operand) {
    214  Register zr = AppropriateZeroRegFor(rd);
    215  sub(rd, zr, operand);
    216 }
    217 
    218 
    219 void Assembler::negs(const Register& rd, const Operand& operand) {
    220  Register zr = AppropriateZeroRegFor(rd);
    221  subs(rd, zr, operand);
    222 }
    223 
    224 
    225 void Assembler::adc(const Register& rd,
    226                    const Register& rn,
    227                    const Operand& operand) {
    228  AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
    229 }
    230 
    231 
    232 void Assembler::adcs(const Register& rd,
    233                     const Register& rn,
    234                     const Operand& operand) {
    235  AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
    236 }
    237 
    238 
    239 void Assembler::sbc(const Register& rd,
    240                    const Register& rn,
    241                    const Operand& operand) {
    242  AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
    243 }
    244 
    245 
    246 void Assembler::sbcs(const Register& rd,
    247                     const Register& rn,
    248                     const Operand& operand) {
    249  AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
    250 }
    251 
    252 
    253 void Assembler::ngc(const Register& rd, const Operand& operand) {
    254  Register zr = AppropriateZeroRegFor(rd);
    255  sbc(rd, zr, operand);
    256 }
    257 
    258 
    259 void Assembler::ngcs(const Register& rd, const Operand& operand) {
    260  Register zr = AppropriateZeroRegFor(rd);
    261  sbcs(rd, zr, operand);
    262 }
    263 
    264 
    265 // Logical instructions.
    266 void Assembler::and_(const Register& rd,
    267                     const Register& rn,
    268                     const Operand& operand) {
    269  Logical(rd, rn, operand, AND);
    270 }
    271 
    272 
    273 void Assembler::bic(const Register& rd,
    274                    const Register& rn,
    275                    const Operand& operand) {
    276  Logical(rd, rn, operand, BIC);
    277 }
    278 
    279 
    280 void Assembler::bics(const Register& rd,
    281                     const Register& rn,
    282                     const Operand& operand) {
    283  Logical(rd, rn, operand, BICS);
    284 }
    285 
    286 
    287 void Assembler::orr(const Register& rd,
    288                    const Register& rn,
    289                    const Operand& operand) {
    290  Logical(rd, rn, operand, ORR);
    291 }
    292 
    293 
    294 void Assembler::orn(const Register& rd,
    295                    const Register& rn,
    296                    const Operand& operand) {
    297  Logical(rd, rn, operand, ORN);
    298 }
    299 
    300 
    301 void Assembler::eor(const Register& rd,
    302                    const Register& rn,
    303                    const Operand& operand) {
    304  Logical(rd, rn, operand, EOR);
    305 }
    306 
    307 
    308 void Assembler::eon(const Register& rd,
    309                    const Register& rn,
    310                    const Operand& operand) {
    311  Logical(rd, rn, operand, EON);
    312 }
    313 
    314 
    315 void Assembler::lslv(const Register& rd,
    316                     const Register& rn,
    317                     const Register& rm) {
    318  VIXL_ASSERT(rd.size() == rn.size());
    319  VIXL_ASSERT(rd.size() == rm.size());
    320  Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
    321 }
    322 
    323 
    324 void Assembler::lsrv(const Register& rd,
    325                     const Register& rn,
    326                     const Register& rm) {
    327  VIXL_ASSERT(rd.size() == rn.size());
    328  VIXL_ASSERT(rd.size() == rm.size());
    329  Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
    330 }
    331 
    332 
    333 void Assembler::asrv(const Register& rd,
    334                     const Register& rn,
    335                     const Register& rm) {
    336  VIXL_ASSERT(rd.size() == rn.size());
    337  VIXL_ASSERT(rd.size() == rm.size());
    338  Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
    339 }
    340 
    341 
    342 void Assembler::rorv(const Register& rd,
    343                     const Register& rn,
    344                     const Register& rm) {
    345  VIXL_ASSERT(rd.size() == rn.size());
    346  VIXL_ASSERT(rd.size() == rm.size());
    347  Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
    348 }
    349 
    350 
    351 // Bitfield operations.
    352 void Assembler::bfm(const Register& rd,
    353                    const Register& rn,
    354                    unsigned immr,
    355                    unsigned imms) {
    356  VIXL_ASSERT(rd.size() == rn.size());
    357  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    358  Emit(SF(rd) | BFM | N |
    359       ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
    360 }
    361 
    362 
    363 void Assembler::sbfm(const Register& rd,
    364                     const Register& rn,
    365                     unsigned immr,
    366                     unsigned imms) {
    367  VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
    368  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    369  Emit(SF(rd) | SBFM | N |
    370       ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
    371 }
    372 
    373 
    374 void Assembler::ubfm(const Register& rd,
    375                     const Register& rn,
    376                     unsigned immr,
    377                     unsigned imms) {
    378  VIXL_ASSERT(rd.size() == rn.size());
    379  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    380  Emit(SF(rd) | UBFM | N |
    381       ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
    382 }
    383 
    384 
    385 void Assembler::extr(const Register& rd,
    386                     const Register& rn,
    387                     const Register& rm,
    388                     unsigned lsb) {
    389  VIXL_ASSERT(rd.size() == rn.size());
    390  VIXL_ASSERT(rd.size() == rm.size());
    391  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    392  Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd));
    393 }
    394 
    395 
    396 void Assembler::csel(const Register& rd,
    397                     const Register& rn,
    398                     const Register& rm,
    399                     Condition cond) {
    400  ConditionalSelect(rd, rn, rm, cond, CSEL);
    401 }
    402 
    403 
    404 void Assembler::csinc(const Register& rd,
    405                      const Register& rn,
    406                      const Register& rm,
    407                      Condition cond) {
    408  ConditionalSelect(rd, rn, rm, cond, CSINC);
    409 }
    410 
    411 
    412 void Assembler::csinv(const Register& rd,
    413                      const Register& rn,
    414                      const Register& rm,
    415                      Condition cond) {
    416  ConditionalSelect(rd, rn, rm, cond, CSINV);
    417 }
    418 
    419 
    420 void Assembler::csneg(const Register& rd,
    421                      const Register& rn,
    422                      const Register& rm,
    423                      Condition cond) {
    424  ConditionalSelect(rd, rn, rm, cond, CSNEG);
    425 }
    426 
    427 
    428 void Assembler::cset(const Register &rd, Condition cond) {
    429  VIXL_ASSERT((cond != al) && (cond != nv));
    430  Register zr = AppropriateZeroRegFor(rd);
    431  csinc(rd, zr, zr, InvertCondition(cond));
    432 }
    433 
    434 
    435 void Assembler::csetm(const Register &rd, Condition cond) {
    436  VIXL_ASSERT((cond != al) && (cond != nv));
    437  Register zr = AppropriateZeroRegFor(rd);
    438  csinv(rd, zr, zr, InvertCondition(cond));
    439 }
    440 
    441 
    442 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
    443  VIXL_ASSERT((cond != al) && (cond != nv));
    444  csinc(rd, rn, rn, InvertCondition(cond));
    445 }
    446 
    447 
    448 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
    449  VIXL_ASSERT((cond != al) && (cond != nv));
    450  csinv(rd, rn, rn, InvertCondition(cond));
    451 }
    452 
    453 
    454 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
    455  VIXL_ASSERT((cond != al) && (cond != nv));
    456  csneg(rd, rn, rn, InvertCondition(cond));
    457 }
    458 
    459 
    460 void Assembler::ConditionalSelect(const Register& rd,
    461                                  const Register& rn,
    462                                  const Register& rm,
    463                                  Condition cond,
    464                                  ConditionalSelectOp op) {
    465  VIXL_ASSERT(rd.size() == rn.size());
    466  VIXL_ASSERT(rd.size() == rm.size());
    467  Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
    468 }
    469 
    470 
    471 void Assembler::ccmn(const Register& rn,
    472                     const Operand& operand,
    473                     StatusFlags nzcv,
    474                     Condition cond) {
    475  ConditionalCompare(rn, operand, nzcv, cond, CCMN);
    476 }
    477 
    478 
    479 void Assembler::ccmp(const Register& rn,
    480                     const Operand& operand,
    481                     StatusFlags nzcv,
    482                     Condition cond) {
    483  ConditionalCompare(rn, operand, nzcv, cond, CCMP);
    484 }
    485 
    486 
    487 void Assembler::DataProcessing3Source(const Register& rd,
    488                     const Register& rn,
    489                     const Register& rm,
    490                     const Register& ra,
    491                     DataProcessing3SourceOp op) {
    492  Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
    493 }
    494 
    495 
    496 void Assembler::crc32b(const Register& rd,
    497                       const Register& rn,
    498                       const Register& rm) {
    499  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
    500  Emit(SF(rm) | Rm(rm) | CRC32B | Rn(rn) | Rd(rd));
    501 }
    502 
    503 
    504 void Assembler::crc32h(const Register& rd,
    505                       const Register& rn,
    506                       const Register& rm) {
    507  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
    508  Emit(SF(rm) | Rm(rm) | CRC32H | Rn(rn) | Rd(rd));
    509 }
    510 
    511 
    512 void Assembler::crc32w(const Register& rd,
    513                       const Register& rn,
    514                       const Register& rm) {
    515  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
    516  Emit(SF(rm) | Rm(rm) | CRC32W | Rn(rn) | Rd(rd));
    517 }
    518 
    519 
    520 void Assembler::crc32x(const Register& rd,
    521                       const Register& rn,
    522                       const Register& rm) {
    523  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
    524  Emit(SF(rm) | Rm(rm) | CRC32X | Rn(rn) | Rd(rd));
    525 }
    526 
    527 
    528 void Assembler::crc32cb(const Register& rd,
    529                        const Register& rn,
    530                        const Register& rm) {
    531  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
    532  Emit(SF(rm) | Rm(rm) | CRC32CB | Rn(rn) | Rd(rd));
    533 }
    534 
    535 
    536 void Assembler::crc32ch(const Register& rd,
    537                        const Register& rn,
    538                        const Register& rm) {
    539  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
    540  Emit(SF(rm) | Rm(rm) | CRC32CH | Rn(rn) | Rd(rd));
    541 }
    542 
    543 
    544 void Assembler::crc32cw(const Register& rd,
    545                        const Register& rn,
    546                        const Register& rm) {
    547  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
    548  Emit(SF(rm) | Rm(rm) | CRC32CW | Rn(rn) | Rd(rd));
    549 }
    550 
    551 
    552 void Assembler::crc32cx(const Register& rd,
    553                        const Register& rn,
    554                        const Register& rm) {
    555  VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
    556  Emit(SF(rm) | Rm(rm) | CRC32CX | Rn(rn) | Rd(rd));
    557 }
    558 
    559 
    560 void Assembler::mul(const Register& rd,
    561                    const Register& rn,
    562                    const Register& rm) {
    563  VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
    564  DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
    565 }
    566 
    567 
    568 void Assembler::madd(const Register& rd,
    569                     const Register& rn,
    570                     const Register& rm,
    571                     const Register& ra) {
    572  DataProcessing3Source(rd, rn, rm, ra, MADD);
    573 }
    574 
    575 
    576 void Assembler::mneg(const Register& rd,
    577                     const Register& rn,
    578                     const Register& rm) {
    579  VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
    580  DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
    581 }
    582 
    583 
    584 void Assembler::msub(const Register& rd,
    585                     const Register& rn,
    586                     const Register& rm,
    587                     const Register& ra) {
    588  DataProcessing3Source(rd, rn, rm, ra, MSUB);
    589 }
    590 
    591 
    592 void Assembler::umaddl(const Register& rd,
    593                       const Register& rn,
    594                       const Register& rm,
    595                       const Register& ra) {
    596  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
    597  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
    598  DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
    599 }
    600 
    601 
    602 void Assembler::smaddl(const Register& rd,
    603                       const Register& rn,
    604                       const Register& rm,
    605                       const Register& ra) {
    606  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
    607  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
    608  DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
    609 }
    610 
    611 
    612 void Assembler::umsubl(const Register& rd,
    613                       const Register& rn,
    614                       const Register& rm,
    615                       const Register& ra) {
    616  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
    617  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
    618  DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
    619 }
    620 
    621 
    622 void Assembler::smsubl(const Register& rd,
    623                       const Register& rn,
    624                       const Register& rm,
    625                       const Register& ra) {
    626  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
    627  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
    628  DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
    629 }
    630 
    631 
    632 void Assembler::smull(const Register& rd,
    633                      const Register& rn,
    634                      const Register& rm) {
    635  VIXL_ASSERT(rd.Is64Bits());
    636  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
    637  DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
    638 }
    639 
    640 
    641 void Assembler::sdiv(const Register& rd,
    642                     const Register& rn,
    643                     const Register& rm) {
    644  VIXL_ASSERT(rd.size() == rn.size());
    645  VIXL_ASSERT(rd.size() == rm.size());
    646  Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
    647 }
    648 
    649 
    650 void Assembler::smulh(const Register& xd,
    651                      const Register& xn,
    652                      const Register& xm) {
    653  VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
    654  DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
    655 }
    656 
    657 
    658 void Assembler::umulh(const Register& xd,
    659                      const Register& xn,
    660                      const Register& xm) {
    661  VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
    662  DataProcessing3Source(xd, xn, xm, xzr, UMULH_x);
    663 }
    664 
    665 
    666 void Assembler::udiv(const Register& rd,
    667                     const Register& rn,
    668                     const Register& rm) {
    669  VIXL_ASSERT(rd.size() == rn.size());
    670  VIXL_ASSERT(rd.size() == rm.size());
    671  Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
    672 }
    673 
    674 
    675 void Assembler::rbit(const Register& rd,
    676                     const Register& rn) {
    677  DataProcessing1Source(rd, rn, RBIT);
    678 }
    679 
    680 
    681 void Assembler::rev16(const Register& rd,
    682                      const Register& rn) {
    683  DataProcessing1Source(rd, rn, REV16);
    684 }
    685 
    686 
    687 void Assembler::rev32(const Register& rd,
    688                      const Register& rn) {
    689  VIXL_ASSERT(rd.Is64Bits());
    690  DataProcessing1Source(rd, rn, REV);
    691 }
    692 
    693 
    694 void Assembler::rev(const Register& rd,
    695                    const Register& rn) {
    696  DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
    697 }
    698 
    699 
    700 void Assembler::clz(const Register& rd,
    701                    const Register& rn) {
    702  DataProcessing1Source(rd, rn, CLZ);
    703 }
    704 
    705 
    706 void Assembler::cls(const Register& rd,
    707                    const Register& rn) {
    708  DataProcessing1Source(rd, rn, CLS);
    709 }
    710 
    711 
    712 void Assembler::ldp(const CPURegister& rt,
    713                    const CPURegister& rt2,
    714                    const MemOperand& src) {
    715  LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
    716 }
    717 
    718 
    719 void Assembler::stp(const CPURegister& rt,
    720                    const CPURegister& rt2,
    721                    const MemOperand& dst) {
    722  LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
    723 }
    724 
    725 
    726 void Assembler::ldpsw(const Register& rt,
    727                      const Register& rt2,
    728                      const MemOperand& src) {
    729  VIXL_ASSERT(rt.Is64Bits());
    730  LoadStorePair(rt, rt2, src, LDPSW_x);
    731 }
    732 
    733 
    734 void Assembler::LoadStorePair(const CPURegister& rt,
    735                              const CPURegister& rt2,
    736                              const MemOperand& addr,
    737                              LoadStorePairOp op) {
    738  // 'rt' and 'rt2' can only be aliased for stores.
    739  VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
    740  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
    741  VIXL_ASSERT(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
    742 
    743  int offset = static_cast<int>(addr.offset());
    744  Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
    745                ImmLSPair(offset, CalcLSPairDataSize(op));
    746 
    747  Instr addrmodeop;
    748  if (addr.IsImmediateOffset()) {
    749    addrmodeop = LoadStorePairOffsetFixed;
    750  } else {
    751    VIXL_ASSERT(addr.offset() != 0);
    752    if (addr.IsPreIndex()) {
    753      addrmodeop = LoadStorePairPreIndexFixed;
    754    } else {
    755      VIXL_ASSERT(addr.IsPostIndex());
    756      addrmodeop = LoadStorePairPostIndexFixed;
    757    }
    758  }
    759  Emit(addrmodeop | memop);
    760 }
    761 
    762 
    763 void Assembler::ldnp(const CPURegister& rt,
    764                     const CPURegister& rt2,
    765                     const MemOperand& src) {
    766  LoadStorePairNonTemporal(rt, rt2, src,
    767                           LoadPairNonTemporalOpFor(rt, rt2));
    768 }
    769 
    770 
    771 void Assembler::stnp(const CPURegister& rt,
    772                     const CPURegister& rt2,
    773                     const MemOperand& dst) {
    774  LoadStorePairNonTemporal(rt, rt2, dst,
    775                           StorePairNonTemporalOpFor(rt, rt2));
    776 }
    777 
    778 
    779 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
    780                                         const CPURegister& rt2,
    781                                         const MemOperand& addr,
    782                                         LoadStorePairNonTemporalOp op) {
    783  VIXL_ASSERT(!rt.Is(rt2));
    784  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
    785  VIXL_ASSERT(addr.IsImmediateOffset());
    786 
    787  unsigned size = CalcLSPairDataSize(
    788    static_cast<LoadStorePairOp>(static_cast<Instr>(op) & LoadStorePairMask));
    789  VIXL_ASSERT(IsImmLSPair(addr.offset(), size));
    790  int offset = static_cast<int>(addr.offset());
    791  Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
    792 }
    793 
    794 
    795 // Memory instructions.
    796 void Assembler::ldrb(const Register& rt, const MemOperand& src,
    797                     LoadStoreScalingOption option) {
    798  VIXL_ASSERT(option != RequireUnscaledOffset);
    799  VIXL_ASSERT(option != PreferUnscaledOffset);
    800  LoadStore(rt, src, LDRB_w, option);
    801 }
    802 
    803 
    804 void Assembler::strb(const Register& rt, const MemOperand& dst,
    805                     LoadStoreScalingOption option) {
    806  VIXL_ASSERT(option != RequireUnscaledOffset);
    807  VIXL_ASSERT(option != PreferUnscaledOffset);
    808  LoadStore(rt, dst, STRB_w, option);
    809 }
    810 
    811 
    812 void Assembler::ldrsb(const Register& rt, const MemOperand& src,
    813                      LoadStoreScalingOption option) {
    814  VIXL_ASSERT(option != RequireUnscaledOffset);
    815  VIXL_ASSERT(option != PreferUnscaledOffset);
    816  LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
    817 }
    818 
    819 
    820 void Assembler::ldrh(const Register& rt, const MemOperand& src,
    821                     LoadStoreScalingOption option) {
    822  VIXL_ASSERT(option != RequireUnscaledOffset);
    823  VIXL_ASSERT(option != PreferUnscaledOffset);
    824  LoadStore(rt, src, LDRH_w, option);
    825 }
    826 
    827 
    828 void Assembler::strh(const Register& rt, const MemOperand& dst,
    829                     LoadStoreScalingOption option) {
    830  VIXL_ASSERT(option != RequireUnscaledOffset);
    831  VIXL_ASSERT(option != PreferUnscaledOffset);
    832  LoadStore(rt, dst, STRH_w, option);
    833 }
    834 
    835 
    836 void Assembler::ldrsh(const Register& rt, const MemOperand& src,
    837                      LoadStoreScalingOption option) {
    838  VIXL_ASSERT(option != RequireUnscaledOffset);
    839  VIXL_ASSERT(option != PreferUnscaledOffset);
    840  LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
    841 }
    842 
    843 
    844 void Assembler::ldr(const CPURegister& rt, const MemOperand& src,
    845                    LoadStoreScalingOption option) {
    846  VIXL_ASSERT(option != RequireUnscaledOffset);
    847  VIXL_ASSERT(option != PreferUnscaledOffset);
    848  LoadStore(rt, src, LoadOpFor(rt), option);
    849 }
    850 
    851 
    852 void Assembler::str(const CPURegister& rt, const MemOperand& dst,
    853                    LoadStoreScalingOption option) {
    854  VIXL_ASSERT(option != RequireUnscaledOffset);
    855  VIXL_ASSERT(option != PreferUnscaledOffset);
    856  LoadStore(rt, dst, StoreOpFor(rt), option);
    857 }
    858 
    859 
    860 void Assembler::ldrsw(const Register& rt, const MemOperand& src,
    861                      LoadStoreScalingOption option) {
    862  VIXL_ASSERT(rt.Is64Bits());
    863  VIXL_ASSERT(option != RequireUnscaledOffset);
    864  VIXL_ASSERT(option != PreferUnscaledOffset);
    865  LoadStore(rt, src, LDRSW_x, option);
    866 }
    867 
    868 
    869 void Assembler::ldurb(const Register& rt, const MemOperand& src,
    870                      LoadStoreScalingOption option) {
    871  VIXL_ASSERT(option != RequireScaledOffset);
    872  VIXL_ASSERT(option != PreferScaledOffset);
    873  LoadStore(rt, src, LDRB_w, option);
    874 }
    875 
    876 
    877 void Assembler::sturb(const Register& rt, const MemOperand& dst,
    878                      LoadStoreScalingOption option) {
    879  VIXL_ASSERT(option != RequireScaledOffset);
    880  VIXL_ASSERT(option != PreferScaledOffset);
    881  LoadStore(rt, dst, STRB_w, option);
    882 }
    883 
    884 
    885 void Assembler::ldursb(const Register& rt, const MemOperand& src,
    886                       LoadStoreScalingOption option) {
    887  VIXL_ASSERT(option != RequireScaledOffset);
    888  VIXL_ASSERT(option != PreferScaledOffset);
    889  LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
    890 }
    891 
    892 
    893 void Assembler::ldurh(const Register& rt, const MemOperand& src,
    894                      LoadStoreScalingOption option) {
    895  VIXL_ASSERT(option != RequireScaledOffset);
    896  VIXL_ASSERT(option != PreferScaledOffset);
    897  LoadStore(rt, src, LDRH_w, option);
    898 }
    899 
    900 
    901 void Assembler::sturh(const Register& rt, const MemOperand& dst,
    902                      LoadStoreScalingOption option) {
    903  VIXL_ASSERT(option != RequireScaledOffset);
    904  VIXL_ASSERT(option != PreferScaledOffset);
    905  LoadStore(rt, dst, STRH_w, option);
    906 }
    907 
    908 
    909 void Assembler::ldursh(const Register& rt, const MemOperand& src,
    910                       LoadStoreScalingOption option) {
    911  VIXL_ASSERT(option != RequireScaledOffset);
    912  VIXL_ASSERT(option != PreferScaledOffset);
    913  LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
    914 }
    915 
    916 
    917 void Assembler::ldur(const CPURegister& rt, const MemOperand& src,
    918                     LoadStoreScalingOption option) {
    919  VIXL_ASSERT(option != RequireScaledOffset);
    920  VIXL_ASSERT(option != PreferScaledOffset);
    921  LoadStore(rt, src, LoadOpFor(rt), option);
    922 }
    923 
    924 
    925 void Assembler::stur(const CPURegister& rt, const MemOperand& dst,
    926                     LoadStoreScalingOption option) {
    927  VIXL_ASSERT(option != RequireScaledOffset);
    928  VIXL_ASSERT(option != PreferScaledOffset);
    929  LoadStore(rt, dst, StoreOpFor(rt), option);
    930 }
    931 
    932 
    933 void Assembler::ldursw(const Register& rt, const MemOperand& src,
    934                       LoadStoreScalingOption option) {
    935  VIXL_ASSERT(rt.Is64Bits());
    936  VIXL_ASSERT(option != RequireScaledOffset);
    937  VIXL_ASSERT(option != PreferScaledOffset);
    938  LoadStore(rt, src, LDRSW_x, option);
    939 }
    940 
    941 
    942 void Assembler::ldrsw(const Register& rt, int imm19) {
    943  Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
    944 }
    945 
    946 
    947 void Assembler::ldr(const CPURegister& rt, int imm19) {
    948  LoadLiteralOp op = LoadLiteralOpFor(rt);
    949  Emit(op | ImmLLiteral(imm19) | Rt(rt));
    950 }
    951 
    952 // clang-format off
    953 #define COMPARE_AND_SWAP_W_X_LIST(V) \
    954  V(cas,   CAS)                      \
    955  V(casa,  CASA)                     \
    956  V(casl,  CASL)                     \
    957  V(casal, CASAL)
    958 // clang-format on
    959 
    960 #define DEFINE_ASM_FUNC(FN, OP)                                  \
    961  void Assembler::FN(const Register& rs, const Register& rt,     \
    962                     const MemOperand& src) {                    \
    963    VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
    964    LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w;     \
    965    Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base()));    \
    966  }
    967 COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC)
    968 #undef DEFINE_ASM_FUNC
    969 
    970 // clang-format off
    971 #define COMPARE_AND_SWAP_W_LIST(V) \
    972  V(casb,   CASB)                  \
    973  V(casab,  CASAB)                 \
    974  V(caslb,  CASLB)                 \
    975  V(casalb, CASALB)                \
    976  V(cash,   CASH)                  \
    977  V(casah,  CASAH)                 \
    978  V(caslh,  CASLH)                 \
    979  V(casalh, CASALH)
    980 // clang-format on
    981 
    982 #define DEFINE_ASM_FUNC(FN, OP)                                  \
    983  void Assembler::FN(const Register& rs, const Register& rt,     \
    984                     const MemOperand& src) {                    \
    985    VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
    986    Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base()));    \
    987  }
    988 COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC)
    989 #undef DEFINE_ASM_FUNC
    990 
    991 // clang-format off
    992 #define COMPARE_AND_SWAP_PAIR_LIST(V) \
    993  V(casp,   CASP)                     \
    994  V(caspa,  CASPA)                    \
    995  V(caspl,  CASPL)                    \
    996  V(caspal, CASPAL)
    997 // clang-format on
    998 
    999 #define DEFINE_ASM_FUNC(FN, OP)                                  \
   1000  void Assembler::FN(const Register& rs, const Register& rs1,    \
   1001                     const Register& rt, const Register& rt1,    \
   1002                     const MemOperand& src) {                    \
   1003    USE(rs1, rt1);                                               \
   1004    VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
   1005    VIXL_ASSERT(AreEven(rs, rt));                                \
   1006    VIXL_ASSERT(AreConsecutive(rs, rs1));                        \
   1007    VIXL_ASSERT(AreConsecutive(rt, rt1));                        \
   1008    LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w;     \
   1009    Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base()));    \
   1010  }
   1011 COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC)
   1012 #undef DEFINE_ASM_FUNC
   1013 
   1014 void Assembler::prfm(PrefetchOperation op, int imm19) {
   1015  Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19));
   1016 }
   1017 
   1018 
   1019 // Exclusive-access instructions.
   1020 void Assembler::stxrb(const Register& rs,
   1021                      const Register& rt,
   1022                      const MemOperand& dst) {
   1023  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1024  Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1025 }
   1026 
   1027 
   1028 void Assembler::stxrh(const Register& rs,
   1029                      const Register& rt,
   1030                      const MemOperand& dst) {
   1031  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1032  Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1033 }
   1034 
   1035 
   1036 void Assembler::stxr(const Register& rs,
   1037                     const Register& rt,
   1038                     const MemOperand& dst) {
   1039  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1040  LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
   1041  Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1042 }
   1043 
   1044 
   1045 void Assembler::ldxrb(const Register& rt,
   1046                      const MemOperand& src) {
   1047  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1048  Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1049 }
   1050 
   1051 
   1052 void Assembler::ldxrh(const Register& rt,
   1053                      const MemOperand& src) {
   1054  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1055  Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1056 }
   1057 
   1058 
   1059 void Assembler::ldxr(const Register& rt,
   1060                     const MemOperand& src) {
   1061  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1062  LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
   1063  Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1064 }
   1065 
   1066 
   1067 void Assembler::stxp(const Register& rs,
   1068                     const Register& rt,
   1069                     const Register& rt2,
   1070                     const MemOperand& dst) {
   1071  VIXL_ASSERT(rt.size() == rt2.size());
   1072  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1073  LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w;
   1074  Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
   1075 }
   1076 
   1077 
   1078 void Assembler::ldxp(const Register& rt,
   1079                     const Register& rt2,
   1080                     const MemOperand& src) {
   1081  VIXL_ASSERT(rt.size() == rt2.size());
   1082  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1083  LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w;
   1084  Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
   1085 }
   1086 
   1087 
   1088 void Assembler::stlxrb(const Register& rs,
   1089                       const Register& rt,
   1090                       const MemOperand& dst) {
   1091  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1092  Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1093 }
   1094 
   1095 
   1096 void Assembler::stlxrh(const Register& rs,
   1097                       const Register& rt,
   1098                       const MemOperand& dst) {
   1099  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1100  Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1101 }
   1102 
   1103 
   1104 void Assembler::stlxr(const Register& rs,
   1105                      const Register& rt,
   1106                      const MemOperand& dst) {
   1107  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1108  LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w;
   1109  Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1110 }
   1111 
   1112 
   1113 void Assembler::ldaxrb(const Register& rt,
   1114                       const MemOperand& src) {
   1115  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1116  Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1117 }
   1118 
   1119 
   1120 void Assembler::ldaxrh(const Register& rt,
   1121                       const MemOperand& src) {
   1122  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1123  Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1124 }
   1125 
   1126 
   1127 void Assembler::ldaxr(const Register& rt,
   1128                      const MemOperand& src) {
   1129  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1130  LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w;
   1131  Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1132 }
   1133 
   1134 
   1135 void Assembler::stlxp(const Register& rs,
   1136                      const Register& rt,
   1137                      const Register& rt2,
   1138                      const MemOperand& dst) {
   1139  VIXL_ASSERT(rt.size() == rt2.size());
   1140  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1141  LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w;
   1142  Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
   1143 }
   1144 
   1145 
   1146 void Assembler::ldaxp(const Register& rt,
   1147                      const Register& rt2,
   1148                      const MemOperand& src) {
   1149  VIXL_ASSERT(rt.size() == rt2.size());
   1150  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1151  LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w;
   1152  Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
   1153 }
   1154 
   1155 
   1156 void Assembler::stlrb(const Register& rt,
   1157                      const MemOperand& dst) {
   1158  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1159  Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1160 }
   1161 
   1162 
   1163 void Assembler::stlrh(const Register& rt,
   1164                      const MemOperand& dst) {
   1165  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1166  Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1167 }
   1168 
   1169 
   1170 void Assembler::stlr(const Register& rt,
   1171                     const MemOperand& dst) {
   1172  VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
   1173  LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w;
   1174  Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
   1175 }
   1176 
   1177 
   1178 void Assembler::ldarb(const Register& rt,
   1179                      const MemOperand& src) {
   1180  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1181  Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1182 }
   1183 
   1184 
   1185 void Assembler::ldarh(const Register& rt,
   1186                      const MemOperand& src) {
   1187  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1188  Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1189 }
   1190 
   1191 
   1192 void Assembler::ldar(const Register& rt,
   1193                     const MemOperand& src) {
   1194  VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
   1195  LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w;
   1196  Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
   1197 }
   1198 
   1199 // These macros generate all the variations of the atomic memory operations,
   1200 // e.g. ldadd, ldadda, ldaddb, staddl, etc.
   1201 // For a full list of the methods with comments, see the assembler header file.
   1202 
   1203 // clang-format off
   1204 #define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \
   1205  V(DEF, add,  LDADD)                               \
   1206  V(DEF, clr,  LDCLR)                               \
   1207  V(DEF, eor,  LDEOR)                               \
   1208  V(DEF, set,  LDSET)                               \
   1209  V(DEF, smax, LDSMAX)                              \
   1210  V(DEF, smin, LDSMIN)                              \
   1211  V(DEF, umax, LDUMAX)                              \
   1212  V(DEF, umin, LDUMIN)
   1213 
   1214 #define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
   1215  V(NAME,     OP##_x,   OP##_w)                \
   1216  V(NAME##l,  OP##L_x,  OP##L_w)               \
   1217  V(NAME##b,  OP##B,    OP##B)                 \
   1218  V(NAME##lb, OP##LB,   OP##LB)                \
   1219  V(NAME##h,  OP##H,    OP##H)                 \
   1220  V(NAME##lh, OP##LH,   OP##LH)
   1221 
   1222 #define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \
   1223  ATOMIC_MEMORY_STORE_MODES(V, NAME, OP)      \
   1224  V(NAME##a,   OP##A_x,  OP##A_w)             \
   1225  V(NAME##al,  OP##AL_x, OP##AL_w)            \
   1226  V(NAME##ab,  OP##AB,   OP##AB)              \
   1227  V(NAME##alb, OP##ALB,  OP##ALB)             \
   1228  V(NAME##ah,  OP##AH,   OP##AH)              \
   1229  V(NAME##alh, OP##ALH,  OP##ALH)
   1230 // clang-format on
   1231 
   1232 #define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W)                     \
   1233  void Assembler::ld##FN(const Register& rs, const Register& rt, \
   1234                         const MemOperand& src) {                \
   1235    VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                  \
   1236    VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
   1237    AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W;             \
   1238    Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base()));               \
   1239  }
   1240 #define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W)                         \
   1241  void Assembler::st##FN(const Register& rs, const MemOperand& src) { \
   1242    VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                       \
   1243    ld##FN(rs, AppropriateZeroRegFor(rs), src);                       \
   1244  }
   1245 
   1246 ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES,
   1247                                    DEFINE_ASM_LOAD_FUNC)
   1248 ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES,
   1249                                    DEFINE_ASM_STORE_FUNC)
   1250 
   1251 #define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W)                      \
   1252  void Assembler::FN(const Register& rs, const Register& rt,     \
   1253                     const MemOperand& src) {                    \
   1254    VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                  \
   1255    VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); \
   1256    AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W;             \
   1257    Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base()));               \
   1258  }
   1259 
   1260 ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP)
   1261 
   1262 #undef DEFINE_ASM_LOAD_FUNC
   1263 #undef DEFINE_ASM_STORE_FUNC
   1264 #undef DEFINE_ASM_SWP_FUNC
   1265 
   1266 void Assembler::prfm(PrefetchOperation op, const MemOperand& address,
   1267                     LoadStoreScalingOption option) {
   1268  VIXL_ASSERT(option != RequireUnscaledOffset);
   1269  VIXL_ASSERT(option != PreferUnscaledOffset);
   1270  Prefetch(op, address, option);
   1271 }
   1272 
   1273 
   1274 void Assembler::prfum(PrefetchOperation op, const MemOperand& address,
   1275                      LoadStoreScalingOption option) {
   1276  VIXL_ASSERT(option != RequireScaledOffset);
   1277  VIXL_ASSERT(option != PreferScaledOffset);
   1278  Prefetch(op, address, option);
   1279 }
   1280 
   1281 
   1282 void Assembler::sys(int op1, int crn, int crm, int op2, const Register& rt) {
   1283  Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(rt));
   1284 }
   1285 
   1286 
   1287 void Assembler::sys(int op, const Register& rt) {
   1288  Emit(SYS | SysOp(op) | Rt(rt));
   1289 }
   1290 
   1291 
   1292 void Assembler::dc(DataCacheOp op, const Register& rt) {
   1293  VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA));
   1294  sys(op, rt);
   1295 }
   1296 
   1297 
   1298 void Assembler::ic(InstructionCacheOp op, const Register& rt) {
   1299  VIXL_ASSERT(op == IVAU);
   1300  sys(op, rt);
   1301 }
   1302 
   1303 void Assembler::abs(const Register& rd, const Register& rn) {
   1304  VIXL_ASSERT(CPUHas(CPUFeatures::kCSSC));
   1305  VIXL_ASSERT(rd.IsSameSizeAndType(rn));
   1306 
   1307  Emit(0x5ac02000 | SF(rd) | Rd(rd) | Rn(rn));
   1308 }
   1309 
   1310 void Assembler::cnt(const Register& rd, const Register& rn) {
   1311  VIXL_ASSERT(CPUHas(CPUFeatures::kCSSC));
   1312  VIXL_ASSERT(rd.IsSameSizeAndType(rn));
   1313 
   1314  Emit(0x5ac01c00 | SF(rd) | Rd(rd) | Rn(rn));
   1315 }
   1316 
   1317 void Assembler::ctz(const Register& rd, const Register& rn) {
   1318  VIXL_ASSERT(CPUHas(CPUFeatures::kCSSC));
   1319  VIXL_ASSERT(rd.IsSameSizeAndType(rn));
   1320 
   1321  Emit(0x5ac01800 | SF(rd) | Rd(rd) | Rn(rn));
   1322 }
   1323 
   1324 #define MINMAX(V)                        \
   1325  V(smax, 0x11c00000, 0x1ac06000, true)  \
   1326  V(smin, 0x11c80000, 0x1ac06800, true)  \
   1327  V(umax, 0x11c40000, 0x1ac06400, false) \
   1328  V(umin, 0x11cc0000, 0x1ac06c00, false)
   1329 
   1330 #define VIXL_DEFINE_ASM_FUNC(FN, IMMOP, REGOP, SIGNED)                     \
   1331  void Assembler::FN(const Register& rd,                                   \
   1332                     const Register& rn,                                   \
   1333                     const Operand& op) {                                  \
   1334    VIXL_ASSERT(rd.IsSameSizeAndType(rn));                                 \
   1335    Instr i = SF(rd) | Rd(rd) | Rn(rn);                                    \
   1336    if (op.IsImmediate()) {                                                \
   1337      int64_t imm = op.GetImmediate();                                     \
   1338      i |= SIGNED ? ImmField<17, 10>(imm) : ImmUnsignedField<17, 10>(imm); \
   1339      Emit(IMMOP | i);                                                     \
   1340    } else {                                                               \
   1341      VIXL_ASSERT(op.IsPlainRegister());                                   \
   1342      VIXL_ASSERT(op.GetRegister().IsSameSizeAndType(rd));                 \
   1343      Emit(REGOP | i | Rm(op.GetRegister()));                              \
   1344    }                                                                      \
   1345  }
   1346 MINMAX(VIXL_DEFINE_ASM_FUNC)
   1347 #undef VIXL_DEFINE_ASM_FUNC
   1348 
   1349 // Mozilla change: Undefine MINMAX
   1350 #undef MINMAX
   1351 
   1352 // NEON structure loads and stores.
   1353 Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
   1354  Instr addr_field = RnSP(addr.base());
   1355 
   1356  if (addr.IsPostIndex()) {
   1357    VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex ==
   1358        static_cast<NEONLoadStoreMultiStructPostIndexOp>(
   1359            NEONLoadStoreSingleStructPostIndex));
   1360 
   1361    addr_field |= NEONLoadStoreMultiStructPostIndex;
   1362    if (addr.offset() == 0) {
   1363      addr_field |= RmNot31(addr.regoffset());
   1364    } else {
   1365      // The immediate post index addressing mode is indicated by rm = 31.
   1366      // The immediate is implied by the number of vector registers used.
   1367      addr_field |= (0x1f << Rm_offset);
   1368    }
   1369  } else {
   1370    VIXL_ASSERT(addr.IsImmediateOffset() && (addr.offset() == 0));
   1371  }
   1372  return addr_field;
   1373 }
   1374 
   1375 void Assembler::LoadStoreStructVerify(const VRegister& vt,
   1376                                      const MemOperand& addr,
   1377                                      Instr op) {
   1378 #ifdef DEBUG
   1379  // Assert that addressing mode is either offset (with immediate 0), post
   1380  // index by immediate of the size of the register list, or post index by a
   1381  // value in a core register.
   1382  if (addr.IsImmediateOffset()) {
   1383    VIXL_ASSERT(addr.offset() == 0);
   1384  } else {
   1385    int offset = vt.SizeInBytes();
   1386    switch (op) {
   1387      case NEON_LD1_1v:
   1388      case NEON_ST1_1v:
   1389        offset *= 1; break;
   1390      case NEONLoadStoreSingleStructLoad1:
   1391      case NEONLoadStoreSingleStructStore1:
   1392      case NEON_LD1R:
   1393        offset = (offset / vt.lanes()) * 1; break;
   1394 
   1395      case NEON_LD1_2v:
   1396      case NEON_ST1_2v:
   1397      case NEON_LD2:
   1398      case NEON_ST2:
   1399        offset *= 2;
   1400        break;
   1401      case NEONLoadStoreSingleStructLoad2:
   1402      case NEONLoadStoreSingleStructStore2:
   1403      case NEON_LD2R:
   1404        offset = (offset / vt.lanes()) * 2; break;
   1405 
   1406      case NEON_LD1_3v:
   1407      case NEON_ST1_3v:
   1408      case NEON_LD3:
   1409      case NEON_ST3:
   1410        offset *= 3; break;
   1411      case NEONLoadStoreSingleStructLoad3:
   1412      case NEONLoadStoreSingleStructStore3:
   1413      case NEON_LD3R:
   1414        offset = (offset / vt.lanes()) * 3; break;
   1415 
   1416      case NEON_LD1_4v:
   1417      case NEON_ST1_4v:
   1418      case NEON_LD4:
   1419      case NEON_ST4:
   1420        offset *= 4; break;
   1421      case NEONLoadStoreSingleStructLoad4:
   1422      case NEONLoadStoreSingleStructStore4:
   1423      case NEON_LD4R:
   1424        offset = (offset / vt.lanes()) * 4; break;
   1425      default:
   1426        VIXL_UNREACHABLE();
   1427    }
   1428    VIXL_ASSERT(!addr.regoffset().Is(NoReg) ||
   1429                addr.offset() == offset);
   1430  }
   1431 #else
   1432  USE(vt, addr, op);
   1433 #endif
   1434 }
   1435 
   1436 void Assembler::LoadStoreStruct(const VRegister& vt,
   1437                                const MemOperand& addr,
   1438                                NEONLoadStoreMultiStructOp op) {
   1439  LoadStoreStructVerify(vt, addr, op);
   1440  VIXL_ASSERT(vt.IsVector() || vt.Is1D());
   1441  Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
   1442 }
   1443 
   1444 
   1445 void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
   1446 				      const MemOperand& addr,
   1447 				      NEONLoadStoreSingleStructOp op) {
   1448  LoadStoreStructVerify(vt, addr, op);
   1449  Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
   1450 }
   1451 
   1452 
   1453 void Assembler::ld1(const VRegister& vt,
   1454                    const MemOperand& src) {
   1455  LoadStoreStruct(vt, src, NEON_LD1_1v);
   1456 }
   1457 
   1458 
   1459 void Assembler::ld1(const VRegister& vt,
   1460                    const VRegister& vt2,
   1461                    const MemOperand& src) {
   1462  USE(vt2);
   1463  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1464  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1465  LoadStoreStruct(vt, src, NEON_LD1_2v);
   1466 }
   1467 
   1468 
   1469 void Assembler::ld1(const VRegister& vt,
   1470                    const VRegister& vt2,
   1471                    const VRegister& vt3,
   1472                    const MemOperand& src) {
   1473  USE(vt2, vt3);
   1474  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1475  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1476  LoadStoreStruct(vt, src, NEON_LD1_3v);
   1477 }
   1478 
   1479 
   1480 void Assembler::ld1(const VRegister& vt,
   1481                    const VRegister& vt2,
   1482                    const VRegister& vt3,
   1483                    const VRegister& vt4,
   1484                    const MemOperand& src) {
   1485  USE(vt2, vt3, vt4);
   1486  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1487  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1488  LoadStoreStruct(vt, src, NEON_LD1_4v);
   1489 }
   1490 
   1491 
   1492 void Assembler::ld2(const VRegister& vt,
   1493                    const VRegister& vt2,
   1494                    const MemOperand& src) {
   1495  USE(vt2);
   1496  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1497  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1498  LoadStoreStruct(vt, src, NEON_LD2);
   1499 }
   1500 
   1501 
   1502 void Assembler::ld2(const VRegister& vt,
   1503                    const VRegister& vt2,
   1504                    int lane,
   1505                    const MemOperand& src) {
   1506  USE(vt2);
   1507  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1508  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1509  LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
   1510 }
   1511 
   1512 
   1513 void Assembler::ld2r(const VRegister& vt,
   1514                     const VRegister& vt2,
   1515                     const MemOperand& src) {
   1516  USE(vt2);
   1517  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1518  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1519  LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
   1520 }
   1521 
   1522 
   1523 void Assembler::ld3(const VRegister& vt,
   1524                    const VRegister& vt2,
   1525                    const VRegister& vt3,
   1526                    const MemOperand& src) {
   1527  USE(vt2, vt3);
   1528  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1529  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1530  LoadStoreStruct(vt, src, NEON_LD3);
   1531 }
   1532 
   1533 
   1534 void Assembler::ld3(const VRegister& vt,
   1535                    const VRegister& vt2,
   1536                    const VRegister& vt3,
   1537                    int lane,
   1538                    const MemOperand& src) {
   1539  USE(vt2, vt3);
   1540  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1541  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1542  LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
   1543 }
   1544 
   1545 
   1546 void Assembler::ld3r(const VRegister& vt,
   1547                    const VRegister& vt2,
   1548                    const VRegister& vt3,
   1549                    const MemOperand& src) {
   1550  USE(vt2, vt3);
   1551  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1552  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1553  LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
   1554 }
   1555 
   1556 
   1557 void Assembler::ld4(const VRegister& vt,
   1558                    const VRegister& vt2,
   1559                    const VRegister& vt3,
   1560                    const VRegister& vt4,
   1561                    const MemOperand& src) {
   1562  USE(vt2, vt3, vt4);
   1563  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1564  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1565  LoadStoreStruct(vt, src, NEON_LD4);
   1566 }
   1567 
   1568 
   1569 void Assembler::ld4(const VRegister& vt,
   1570                    const VRegister& vt2,
   1571                    const VRegister& vt3,
   1572                    const VRegister& vt4,
   1573                    int lane,
   1574                    const MemOperand& src) {
   1575  USE(vt2, vt3, vt4);
   1576  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1577  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1578  LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
   1579 }
   1580 
   1581 
   1582 void Assembler::ld4r(const VRegister& vt,
   1583                    const VRegister& vt2,
   1584                    const VRegister& vt3,
   1585                    const VRegister& vt4,
   1586                    const MemOperand& src) {
   1587  USE(vt2, vt3, vt4);
   1588  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1589  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1590  LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
   1591 }
   1592 
   1593 
   1594 void Assembler::st1(const VRegister& vt,
   1595                    const MemOperand& src) {
   1596  LoadStoreStruct(vt, src, NEON_ST1_1v);
   1597 }
   1598 
   1599 
   1600 void Assembler::st1(const VRegister& vt,
   1601                    const VRegister& vt2,
   1602                    const MemOperand& src) {
   1603  USE(vt2);
   1604  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1605  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1606  LoadStoreStruct(vt, src, NEON_ST1_2v);
   1607 }
   1608 
   1609 
   1610 void Assembler::st1(const VRegister& vt,
   1611                    const VRegister& vt2,
   1612                    const VRegister& vt3,
   1613                    const MemOperand& src) {
   1614  USE(vt2, vt3);
   1615  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1616  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1617  LoadStoreStruct(vt, src, NEON_ST1_3v);
   1618 }
   1619 
   1620 
   1621 void Assembler::st1(const VRegister& vt,
   1622                    const VRegister& vt2,
   1623                    const VRegister& vt3,
   1624                    const VRegister& vt4,
   1625                    const MemOperand& src) {
   1626  USE(vt2, vt3, vt4);
   1627  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1628  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1629  LoadStoreStruct(vt, src, NEON_ST1_4v);
   1630 }
   1631 
   1632 
   1633 void Assembler::st2(const VRegister& vt,
   1634                    const VRegister& vt2,
   1635                    const MemOperand& dst) {
   1636  USE(vt2);
   1637  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1638  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1639  LoadStoreStruct(vt, dst, NEON_ST2);
   1640 }
   1641 
   1642 
   1643 void Assembler::st2(const VRegister& vt,
   1644                    const VRegister& vt2,
   1645                    int lane,
   1646                    const MemOperand& dst) {
   1647  USE(vt2);
   1648  VIXL_ASSERT(AreSameFormat(vt, vt2));
   1649  VIXL_ASSERT(AreConsecutive(vt, vt2));
   1650  LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
   1651 }
   1652 
   1653 
   1654 void Assembler::st3(const VRegister& vt,
   1655                    const VRegister& vt2,
   1656                    const VRegister& vt3,
   1657                    const MemOperand& dst) {
   1658  USE(vt2, vt3);
   1659  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1660  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1661  LoadStoreStruct(vt, dst, NEON_ST3);
   1662 }
   1663 
   1664 
   1665 void Assembler::st3(const VRegister& vt,
   1666                    const VRegister& vt2,
   1667                    const VRegister& vt3,
   1668                    int lane,
   1669                    const MemOperand& dst) {
   1670  USE(vt2, vt3);
   1671  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   1672  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   1673  LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
   1674 }
   1675 
   1676 
   1677 void Assembler::st4(const VRegister& vt,
   1678                    const VRegister& vt2,
   1679                    const VRegister& vt3,
   1680                    const VRegister& vt4,
   1681                    const MemOperand& dst) {
   1682  USE(vt2, vt3, vt4);
   1683  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1684  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1685  LoadStoreStruct(vt, dst, NEON_ST4);
   1686 }
   1687 
   1688 
   1689 void Assembler::st4(const VRegister& vt,
   1690                    const VRegister& vt2,
   1691                    const VRegister& vt3,
   1692                    const VRegister& vt4,
   1693                    int lane,
   1694                    const MemOperand& dst) {
   1695  USE(vt2, vt3, vt4);
   1696  VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   1697  VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   1698  LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
   1699 }
   1700 
   1701 
   1702 void Assembler::LoadStoreStructSingle(const VRegister& vt,
   1703                                      uint32_t lane,
   1704                                      const MemOperand& addr,
   1705                                      NEONLoadStoreSingleStructOp op) {
   1706  LoadStoreStructVerify(vt, addr, op);
   1707 
   1708  // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
   1709  // number of lanes, and T is b, h, s or d.
   1710  unsigned lane_size = vt.LaneSizeInBytes();
   1711  VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size));
   1712 
   1713  // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
   1714  // S and size fields.
   1715  lane *= lane_size;
   1716  if (lane_size == 8) lane++;
   1717 
   1718  Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
   1719  Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
   1720  Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
   1721 
   1722  Instr instr = op;
   1723  switch (lane_size) {
   1724    case 1: instr |= NEONLoadStoreSingle_b; break;
   1725    case 2: instr |= NEONLoadStoreSingle_h; break;
   1726    case 4: instr |= NEONLoadStoreSingle_s; break;
   1727    default:
   1728      VIXL_ASSERT(lane_size == 8);
   1729      instr |= NEONLoadStoreSingle_d;
   1730  }
   1731 
   1732  Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
   1733 }
   1734 
   1735 
   1736 void Assembler::ld1(const VRegister& vt,
   1737                    int lane,
   1738                    const MemOperand& src) {
   1739  LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
   1740 }
   1741 
   1742 
   1743 void Assembler::ld1r(const VRegister& vt,
   1744                     const MemOperand& src) {
   1745  LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
   1746 }
   1747 
   1748 
   1749 void Assembler::st1(const VRegister& vt,
   1750                    int lane,
   1751                    const MemOperand& dst) {
   1752  LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
   1753 }
   1754 
   1755 
   1756 void Assembler::NEON3DifferentL(const VRegister& vd,
   1757                                const VRegister& vn,
   1758                                const VRegister& vm,
   1759                                NEON3DifferentOp vop) {
   1760  VIXL_ASSERT(AreSameFormat(vn, vm));
   1761  VIXL_ASSERT((vn.Is1H() && vd.Is1S()) ||
   1762              (vn.Is1S() && vd.Is1D()) ||
   1763              (vn.Is8B() && vd.Is8H()) ||
   1764              (vn.Is4H() && vd.Is4S()) ||
   1765              (vn.Is2S() && vd.Is2D()) ||
   1766              (vn.Is16B() && vd.Is8H())||
   1767              (vn.Is8H() && vd.Is4S()) ||
   1768              (vn.Is4S() && vd.Is2D()));
   1769  Instr format, op = vop;
   1770  if (vd.IsScalar()) {
   1771    op |= NEON_Q | NEONScalar;
   1772    format = SFormat(vn);
   1773  } else {
   1774    format = VFormat(vn);
   1775  }
   1776  Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
   1777 }
   1778 
   1779 
   1780 void Assembler::NEON3DifferentW(const VRegister& vd,
   1781                                const VRegister& vn,
   1782                                const VRegister& vm,
   1783                                NEON3DifferentOp vop) {
   1784  VIXL_ASSERT(AreSameFormat(vd, vn));
   1785  VIXL_ASSERT((vm.Is8B() && vd.Is8H()) ||
   1786              (vm.Is4H() && vd.Is4S()) ||
   1787              (vm.Is2S() && vd.Is2D()) ||
   1788              (vm.Is16B() && vd.Is8H())||
   1789              (vm.Is8H() && vd.Is4S()) ||
   1790              (vm.Is4S() && vd.Is2D()));
   1791  Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
   1792 }
   1793 
   1794 
   1795 void Assembler::NEON3DifferentHN(const VRegister& vd,
   1796                                 const VRegister& vn,
   1797                                 const VRegister& vm,
   1798                                 NEON3DifferentOp vop) {
   1799  VIXL_ASSERT(AreSameFormat(vm, vn));
   1800  VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
   1801              (vd.Is4H() && vn.Is4S()) ||
   1802              (vd.Is2S() && vn.Is2D()) ||
   1803              (vd.Is16B() && vn.Is8H())||
   1804              (vd.Is8H() && vn.Is4S()) ||
   1805              (vd.Is4S() && vn.Is2D()));
   1806  Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
   1807 }
   1808 
   1809 
   1810 #define NEON_3DIFF_LONG_LIST(V) \
   1811  V(pmull,  NEON_PMULL,  vn.IsVector() && vn.Is8B())                           \
   1812  V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B())                          \
   1813  V(saddl,  NEON_SADDL,  vn.IsVector() && vn.IsD())                            \
   1814  V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ())                            \
   1815  V(sabal,  NEON_SABAL,  vn.IsVector() && vn.IsD())                            \
   1816  V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ())                            \
   1817  V(uabal,  NEON_UABAL,  vn.IsVector() && vn.IsD())                            \
   1818  V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ())                            \
   1819  V(sabdl,  NEON_SABDL,  vn.IsVector() && vn.IsD())                            \
   1820  V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ())                            \
   1821  V(uabdl,  NEON_UABDL,  vn.IsVector() && vn.IsD())                            \
   1822  V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ())                            \
   1823  V(smlal,  NEON_SMLAL,  vn.IsVector() && vn.IsD())                            \
   1824  V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ())                            \
   1825  V(umlal,  NEON_UMLAL,  vn.IsVector() && vn.IsD())                            \
   1826  V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ())                            \
   1827  V(smlsl,  NEON_SMLSL,  vn.IsVector() && vn.IsD())                            \
   1828  V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ())                            \
   1829  V(umlsl,  NEON_UMLSL,  vn.IsVector() && vn.IsD())                            \
   1830  V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ())                            \
   1831  V(smull,  NEON_SMULL,  vn.IsVector() && vn.IsD())                            \
   1832  V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ())                            \
   1833  V(umull,  NEON_UMULL,  vn.IsVector() && vn.IsD())                            \
   1834  V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ())                            \
   1835  V(ssubl,  NEON_SSUBL,  vn.IsVector() && vn.IsD())                            \
   1836  V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ())                            \
   1837  V(uaddl,  NEON_UADDL,  vn.IsVector() && vn.IsD())                            \
   1838  V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ())                            \
   1839  V(usubl,  NEON_USUBL,  vn.IsVector() && vn.IsD())                            \
   1840  V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ())                            \
   1841  V(sqdmlal,  NEON_SQDMLAL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
   1842  V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
   1843  V(sqdmlsl,  NEON_SQDMLSL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
   1844  V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
   1845  V(sqdmull,  NEON_SQDMULL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
   1846  V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
   1847 
   1848 
   1849 #define DEFINE_ASM_FUNC(FN, OP, AS)        \
   1850 void Assembler::FN(const VRegister& vd,    \
   1851                   const VRegister& vn,    \
   1852                   const VRegister& vm) {  \
   1853  VIXL_ASSERT(AS);                         \
   1854  NEON3DifferentL(vd, vn, vm, OP);         \
   1855 }
   1856 NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
   1857 #undef DEFINE_ASM_FUNC
   1858 
   1859 #define NEON_3DIFF_HN_LIST(V)         \
   1860  V(addhn,   NEON_ADDHN,   vd.IsD())  \
   1861  V(addhn2,  NEON_ADDHN2,  vd.IsQ())  \
   1862  V(raddhn,  NEON_RADDHN,  vd.IsD())  \
   1863  V(raddhn2, NEON_RADDHN2, vd.IsQ())  \
   1864  V(subhn,   NEON_SUBHN,   vd.IsD())  \
   1865  V(subhn2,  NEON_SUBHN2,  vd.IsQ())  \
   1866  V(rsubhn,  NEON_RSUBHN,  vd.IsD())  \
   1867  V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
   1868 
   1869 #define DEFINE_ASM_FUNC(FN, OP, AS)        \
   1870 void Assembler::FN(const VRegister& vd,    \
   1871                   const VRegister& vn,    \
   1872                   const VRegister& vm) {  \
   1873  VIXL_ASSERT(AS);                         \
   1874  NEON3DifferentHN(vd, vn, vm, OP);        \
   1875 }
   1876 NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
   1877 #undef DEFINE_ASM_FUNC
   1878 
   1879 void Assembler::uaddw(const VRegister& vd,
   1880                      const VRegister& vn,
   1881                      const VRegister& vm) {
   1882  VIXL_ASSERT(vm.IsD());
   1883  NEON3DifferentW(vd, vn, vm, NEON_UADDW);
   1884 }
   1885 
   1886 
   1887 void Assembler::uaddw2(const VRegister& vd,
   1888                       const VRegister& vn,
   1889                       const VRegister& vm) {
   1890  VIXL_ASSERT(vm.IsQ());
   1891  NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
   1892 }
   1893 
   1894 
   1895 void Assembler::saddw(const VRegister& vd,
   1896                      const VRegister& vn,
   1897                      const VRegister& vm) {
   1898  VIXL_ASSERT(vm.IsD());
   1899  NEON3DifferentW(vd, vn, vm, NEON_SADDW);
   1900 }
   1901 
   1902 
   1903 void Assembler::saddw2(const VRegister& vd,
   1904                       const VRegister& vn,
   1905                       const VRegister& vm) {
   1906  VIXL_ASSERT(vm.IsQ());
   1907  NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
   1908 }
   1909 
   1910 
   1911 void Assembler::usubw(const VRegister& vd,
   1912                      const VRegister& vn,
   1913                      const VRegister& vm) {
   1914  VIXL_ASSERT(vm.IsD());
   1915  NEON3DifferentW(vd, vn, vm, NEON_USUBW);
   1916 }
   1917 
   1918 
   1919 void Assembler::usubw2(const VRegister& vd,
   1920                       const VRegister& vn,
   1921                       const VRegister& vm) {
   1922  VIXL_ASSERT(vm.IsQ());
   1923  NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
   1924 }
   1925 
   1926 
   1927 void Assembler::ssubw(const VRegister& vd,
   1928                      const VRegister& vn,
   1929                      const VRegister& vm) {
   1930  VIXL_ASSERT(vm.IsD());
   1931  NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
   1932 }
   1933 
   1934 
   1935 void Assembler::ssubw2(const VRegister& vd,
   1936                       const VRegister& vn,
   1937                       const VRegister& vm) {
   1938  VIXL_ASSERT(vm.IsQ());
   1939  NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
   1940 }
   1941 
   1942 
   1943 void Assembler::mov(const Register& rd, const Register& rm) {
   1944  // Moves involving the stack pointer are encoded as add immediate with
   1945  // second operand of zero. Otherwise, orr with first operand zr is
   1946  // used.
   1947  if (rd.IsSP() || rm.IsSP()) {
   1948    add(rd, rm, 0);
   1949  } else {
   1950    orr(rd, AppropriateZeroRegFor(rd), rm);
   1951  }
   1952 }
   1953 
   1954 
   1955 void Assembler::mvn(const Register& rd, const Operand& operand) {
   1956  orn(rd, AppropriateZeroRegFor(rd), operand);
   1957 }
   1958 
   1959 
   1960 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
   1961  VIXL_ASSERT(rt.Is64Bits());
   1962  Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
   1963 }
   1964 
   1965 
   1966 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
   1967  VIXL_ASSERT(rt.Is64Bits());
   1968  Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
   1969 }
   1970 
   1971 
   1972 void Assembler::clrex(int imm4) {
   1973  Emit(CLREX | CRm(imm4));
   1974 }
   1975 
   1976 
   1977 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
   1978  Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
   1979 }
   1980 
   1981 
   1982 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
   1983  Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
   1984 }
   1985 
   1986 
   1987 void Assembler::isb() {
   1988  Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
   1989 }
   1990 
   1991 
   1992 void Assembler::fmov(const VRegister& vd, double imm) {
   1993  if (vd.IsScalar()) {
   1994    VIXL_ASSERT(vd.Is1D());
   1995    Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
   1996  } else {
   1997    VIXL_ASSERT(vd.Is2D());
   1998    Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
   1999    Instr q = NEON_Q;
   2000    uint32_t encoded_imm = FP64ToImm8(imm);
   2001    Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
   2002  }
   2003 }
   2004 
   2005 
   2006 void Assembler::fmov(const VRegister& vd, float imm) {
   2007  if (vd.IsScalar()) {
   2008    VIXL_ASSERT(vd.Is1S());
   2009    Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
   2010  } else {
   2011    VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   2012    Instr op = NEONModifiedImmediate_MOVI;
   2013    Instr q = vd.Is4S() ?  NEON_Q : 0;
   2014    uint32_t encoded_imm = FP32ToImm8(imm);
   2015    Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
   2016  }
   2017 }
   2018 
   2019 
   2020 void Assembler::fmov(const Register& rd, const VRegister& vn) {
   2021  VIXL_ASSERT(vn.Is1S() || vn.Is1D());
   2022  VIXL_ASSERT(rd.size() == vn.size());
   2023  FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
   2024  Emit(op | Rd(rd) | Rn(vn));
   2025 }
   2026 
   2027 
   2028 void Assembler::fmov(const VRegister& vd, const Register& rn) {
   2029  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2030  VIXL_ASSERT(vd.size() == rn.size());
   2031  FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
   2032  Emit(op | Rd(vd) | Rn(rn));
   2033 }
   2034 
   2035 
   2036 void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
   2037  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2038  VIXL_ASSERT(vd.IsSameFormat(vn));
   2039  Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
   2040 }
   2041 
   2042 
   2043 void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
   2044  VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
   2045  USE(index);
   2046  Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
   2047 }
   2048 
   2049 
   2050 void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
   2051  VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX());
   2052  USE(index);
   2053  Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
   2054 }
   2055 
   2056 
   2057 void Assembler::fmadd(const VRegister& vd,
   2058                      const VRegister& vn,
   2059                      const VRegister& vm,
   2060                      const VRegister& va) {
   2061  FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d);
   2062 }
   2063 
   2064 
   2065 void Assembler::fmsub(const VRegister& vd,
   2066                      const VRegister& vn,
   2067                      const VRegister& vm,
   2068                      const VRegister& va) {
   2069  FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d);
   2070 }
   2071 
   2072 
   2073 void Assembler::fnmadd(const VRegister& vd,
   2074                       const VRegister& vn,
   2075                       const VRegister& vm,
   2076                       const VRegister& va) {
   2077  FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d);
   2078 }
   2079 
   2080 
   2081 void Assembler::fnmsub(const VRegister& vd,
   2082                       const VRegister& vn,
   2083                       const VRegister& vm,
   2084                       const VRegister& va) {
   2085  FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d);
   2086 }
   2087 
   2088 
   2089 void Assembler::fnmul(const VRegister& vd,
   2090                      const VRegister& vn,
   2091                      const VRegister& vm) {
   2092  VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
   2093  Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
   2094  Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
   2095 }
   2096 
   2097 
   2098 void Assembler::FPCompareMacro(const VRegister& vn,
   2099                               double value,
   2100                               FPTrapFlags trap) {
   2101  USE(value);
   2102  // Although the fcmp{e} instructions can strictly only take an immediate
   2103  // value of +0.0, we don't need to check for -0.0 because the sign of 0.0
   2104  // doesn't affect the result of the comparison.
   2105  VIXL_ASSERT(value == 0.0);
   2106  VIXL_ASSERT(vn.Is1S() || vn.Is1D());
   2107  Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero;
   2108  Emit(FPType(vn) | op | Rn(vn));
   2109 }
   2110 
   2111 
   2112 void Assembler::FPCompareMacro(const VRegister& vn,
   2113                               const VRegister& vm,
   2114                               FPTrapFlags trap) {
   2115  VIXL_ASSERT(vn.Is1S() || vn.Is1D());
   2116  VIXL_ASSERT(vn.IsSameSizeAndType(vm));
   2117  Instr op = (trap == EnableTrap) ? FCMPE : FCMP;
   2118  Emit(FPType(vn) | op | Rm(vm) | Rn(vn));
   2119 }
   2120 
   2121 
   2122 void Assembler::fcmp(const VRegister& vn,
   2123                     const VRegister& vm) {
   2124  FPCompareMacro(vn, vm, DisableTrap);
   2125 }
   2126 
   2127 
   2128 void Assembler::fcmpe(const VRegister& vn,
   2129                      const VRegister& vm) {
   2130  FPCompareMacro(vn, vm, EnableTrap);
   2131 }
   2132 
   2133 
   2134 void Assembler::fcmp(const VRegister& vn,
   2135                     double value) {
   2136  FPCompareMacro(vn, value, DisableTrap);
   2137 }
   2138 
   2139 
   2140 void Assembler::fcmpe(const VRegister& vn,
   2141                      double value) {
   2142  FPCompareMacro(vn, value, EnableTrap);
   2143 }
   2144 
   2145 
   2146 void Assembler::FPCCompareMacro(const VRegister& vn,
   2147                                const VRegister& vm,
   2148                                StatusFlags nzcv,
   2149                                Condition cond,
   2150                                FPTrapFlags trap) {
   2151  VIXL_ASSERT(vn.Is1S() || vn.Is1D());
   2152  VIXL_ASSERT(vn.IsSameSizeAndType(vm));
   2153  Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP;
   2154  Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv));
   2155 }
   2156 
   2157 void Assembler::fccmp(const VRegister& vn,
   2158                      const VRegister& vm,
   2159                      StatusFlags nzcv,
   2160                      Condition cond) {
   2161  FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap);
   2162 }
   2163 
   2164 
   2165 void Assembler::fccmpe(const VRegister& vn,
   2166                       const VRegister& vm,
   2167                       StatusFlags nzcv,
   2168                       Condition cond) {
   2169  FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap);
   2170 }
   2171 
   2172 
   2173 void Assembler::fcsel(const VRegister& vd,
   2174                      const VRegister& vn,
   2175                      const VRegister& vm,
   2176                      Condition cond) {
   2177  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2178  VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   2179  Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
   2180 }
   2181 
   2182 void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
   2183  VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT));
   2184  VIXL_ASSERT(rd.IsW() && vn.Is1D());
   2185  Emit(FJCVTZS | Rn(vn) | Rd(rd));
   2186 }
   2187 
   2188 
   2189 void Assembler::NEONFPConvertToInt(const Register& rd,
   2190                                   const VRegister& vn,
   2191                                   Instr op) {
   2192  Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
   2193 }
   2194 
   2195 
   2196 void Assembler::NEONFPConvertToInt(const VRegister& vd,
   2197                                   const VRegister& vn,
   2198                                   Instr op) {
   2199  if (vn.IsScalar()) {
   2200    VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
   2201    op |= NEON_Q | NEONScalar;
   2202  }
   2203  Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
   2204 }
   2205 
   2206 
   2207 void Assembler::fcvt(const VRegister& vd,
   2208                     const VRegister& vn) {
   2209  FPDataProcessing1SourceOp op;
   2210  if (vd.Is1D()) {
   2211    VIXL_ASSERT(vn.Is1S() || vn.Is1H());
   2212    op = vn.Is1S() ? FCVT_ds : FCVT_dh;
   2213  } else if (vd.Is1S()) {
   2214    VIXL_ASSERT(vn.Is1D() || vn.Is1H());
   2215    op = vn.Is1D() ? FCVT_sd : FCVT_sh;
   2216  } else {
   2217    VIXL_ASSERT(vd.Is1H());
   2218    VIXL_ASSERT(vn.Is1D() || vn.Is1S());
   2219    op = vn.Is1D() ? FCVT_hd : FCVT_hs;
   2220  }
   2221  FPDataProcessing1Source(vd, vn, op);
   2222 }
   2223 
   2224 
   2225 void Assembler::fcvtl(const VRegister& vd,
   2226                      const VRegister& vn) {
   2227  VIXL_ASSERT((vd.Is4S() && vn.Is4H()) ||
   2228              (vd.Is2D() && vn.Is2S()));
   2229  Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
   2230  Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
   2231 }
   2232 
   2233 
   2234 void Assembler::fcvtl2(const VRegister& vd,
   2235                       const VRegister& vn) {
   2236  VIXL_ASSERT((vd.Is4S() && vn.Is8H()) ||
   2237              (vd.Is2D() && vn.Is4S()));
   2238  Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
   2239  Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
   2240 }
   2241 
   2242 
   2243 void Assembler::fcvtn(const VRegister& vd,
   2244                      const VRegister& vn) {
   2245  VIXL_ASSERT((vn.Is4S() && vd.Is4H()) ||
   2246              (vn.Is2D() && vd.Is2S()));
   2247  Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
   2248  Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
   2249 }
   2250 
   2251 
   2252 void Assembler::fcvtn2(const VRegister& vd,
   2253                       const VRegister& vn) {
   2254  VIXL_ASSERT((vn.Is4S() && vd.Is8H()) ||
   2255              (vn.Is2D() && vd.Is4S()));
   2256  Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
   2257  Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
   2258 }
   2259 
   2260 
   2261 void Assembler::fcvtxn(const VRegister& vd,
   2262                       const VRegister& vn) {
   2263  Instr format = 1 << NEONSize_offset;
   2264  if (vd.IsScalar()) {
   2265    VIXL_ASSERT(vd.Is1S() && vn.Is1D());
   2266    Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
   2267  } else {
   2268    VIXL_ASSERT(vd.Is2S() && vn.Is2D());
   2269    Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
   2270  }
   2271 }
   2272 
   2273 
   2274 void Assembler::fcvtxn2(const VRegister& vd,
   2275                        const VRegister& vn) {
   2276  VIXL_ASSERT(vd.Is4S() && vn.Is2D());
   2277  Instr format = 1 << NEONSize_offset;
   2278  Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
   2279 }
   2280 
   2281 
   2282 #define NEON_FP2REGMISC_FCVT_LIST(V)  \
   2283  V(fcvtnu, NEON_FCVTNU, FCVTNU)      \
   2284  V(fcvtns, NEON_FCVTNS, FCVTNS)      \
   2285  V(fcvtpu, NEON_FCVTPU, FCVTPU)      \
   2286  V(fcvtps, NEON_FCVTPS, FCVTPS)      \
   2287  V(fcvtmu, NEON_FCVTMU, FCVTMU)      \
   2288  V(fcvtms, NEON_FCVTMS, FCVTMS)      \
   2289  V(fcvtau, NEON_FCVTAU, FCVTAU)      \
   2290  V(fcvtas, NEON_FCVTAS, FCVTAS)
   2291 
   2292 #define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP)  \
   2293 void Assembler::FN(const Register& rd,        \
   2294                   const VRegister& vn) {     \
   2295  NEONFPConvertToInt(rd, vn, SCA_OP);         \
   2296 }                                             \
   2297 void Assembler::FN(const VRegister& vd,       \
   2298                   const VRegister& vn) {     \
   2299  NEONFPConvertToInt(vd, vn, VEC_OP);         \
   2300 }
   2301 NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
   2302 #undef DEFINE_ASM_FUNCS
   2303 
   2304 
   2305 void Assembler::fcvtzs(const Register& rd,
   2306                       const VRegister& vn,
   2307                       int fbits) {
   2308  VIXL_ASSERT(vn.Is1S() || vn.Is1D());
   2309  VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
   2310  if (fbits == 0) {
   2311    Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
   2312  } else {
   2313    Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
   2314         Rd(rd));
   2315  }
   2316 }
   2317 
   2318 
   2319 void Assembler::fcvtzs(const VRegister& vd,
   2320                       const VRegister& vn,
   2321                       int fbits) {
   2322  VIXL_ASSERT(fbits >= 0);
   2323  if (fbits == 0) {
   2324    NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
   2325  } else {
   2326    VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
   2327    NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
   2328  }
   2329 }
   2330 
   2331 
   2332 void Assembler::fcvtzu(const Register& rd,
   2333                       const VRegister& vn,
   2334                       int fbits) {
   2335  VIXL_ASSERT(vn.Is1S() || vn.Is1D());
   2336  VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
   2337  if (fbits == 0) {
   2338    Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
   2339  } else {
   2340    Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
   2341         Rd(rd));
   2342  }
   2343 }
   2344 
   2345 
   2346 void Assembler::fcvtzu(const VRegister& vd,
   2347                       const VRegister& vn,
   2348                       int fbits) {
   2349  VIXL_ASSERT(fbits >= 0);
   2350  if (fbits == 0) {
   2351    NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
   2352  } else {
   2353    VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
   2354    NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
   2355  }
   2356 }
   2357 
   2358 void Assembler::ucvtf(const VRegister& vd,
   2359                      const VRegister& vn,
   2360                      int fbits) {
   2361  VIXL_ASSERT(fbits >= 0);
   2362  if (fbits == 0) {
   2363    NEONFP2RegMisc(vd, vn, NEON_UCVTF);
   2364  } else {
   2365    VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
   2366    NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
   2367  }
   2368 }
   2369 
   2370 void Assembler::scvtf(const VRegister& vd,
   2371                      const VRegister& vn,
   2372                      int fbits) {
   2373  VIXL_ASSERT(fbits >= 0);
   2374  if (fbits == 0) {
   2375    NEONFP2RegMisc(vd, vn, NEON_SCVTF);
   2376  } else {
   2377    VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
   2378    NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
   2379  }
   2380 }
   2381 
   2382 
   2383 void Assembler::scvtf(const VRegister& vd,
   2384                      const Register& rn,
   2385                      int fbits) {
   2386  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2387  VIXL_ASSERT(fbits >= 0);
   2388  if (fbits == 0) {
   2389    Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
   2390  } else {
   2391    Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
   2392         Rd(vd));
   2393  }
   2394 }
   2395 
   2396 
   2397 void Assembler::ucvtf(const VRegister& vd,
   2398                      const Register& rn,
   2399                      int fbits) {
   2400  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2401  VIXL_ASSERT(fbits >= 0);
   2402  if (fbits == 0) {
   2403    Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
   2404  } else {
   2405    Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
   2406         Rd(vd));
   2407  }
   2408 }
   2409 
   2410 
   2411 void Assembler::NEON3Same(const VRegister& vd,
   2412                          const VRegister& vn,
   2413                          const VRegister& vm,
   2414                          NEON3SameOp vop) {
   2415  VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   2416  VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
   2417 
   2418  Instr format, op = vop;
   2419  if (vd.IsScalar()) {
   2420    op |= NEON_Q | NEONScalar;
   2421    format = SFormat(vd);
   2422  } else {
   2423    format = VFormat(vd);
   2424  }
   2425 
   2426  Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
   2427 }
   2428 
   2429 
   2430 void Assembler::NEONFP3Same(const VRegister& vd,
   2431                            const VRegister& vn,
   2432                            const VRegister& vm,
   2433                            Instr op) {
   2434  VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   2435  Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
   2436 }
   2437 
   2438 
   2439 #define NEON_FP2REGMISC_LIST(V)                 \
   2440  V(fabs,    NEON_FABS,    FABS)                \
   2441  V(fneg,    NEON_FNEG,    FNEG)                \
   2442  V(fsqrt,   NEON_FSQRT,   FSQRT)               \
   2443  V(frintn,  NEON_FRINTN,  FRINTN)              \
   2444  V(frinta,  NEON_FRINTA,  FRINTA)              \
   2445  V(frintp,  NEON_FRINTP,  FRINTP)              \
   2446  V(frintm,  NEON_FRINTM,  FRINTM)              \
   2447  V(frintx,  NEON_FRINTX,  FRINTX)              \
   2448  V(frintz,  NEON_FRINTZ,  FRINTZ)              \
   2449  V(frinti,  NEON_FRINTI,  FRINTI)              \
   2450  V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
   2451  V(frecpe,  NEON_FRECPE,  NEON_FRECPE_scalar )
   2452 
   2453 
   2454 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)            \
   2455 void Assembler::FN(const VRegister& vd,                \
   2456                   const VRegister& vn) {              \
   2457  Instr op;                                            \
   2458  if (vd.IsScalar()) {                                 \
   2459    VIXL_ASSERT(vd.Is1S() || vd.Is1D());               \
   2460    op = SCA_OP;                                       \
   2461  } else {                                             \
   2462    VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());  \
   2463    op = VEC_OP;                                       \
   2464  }                                                    \
   2465  NEONFP2RegMisc(vd, vn, op);                          \
   2466 }
   2467 NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
   2468 #undef DEFINE_ASM_FUNC
   2469 
   2470 
   2471 void Assembler::NEONFP2RegMisc(const VRegister& vd,
   2472                               const VRegister& vn,
   2473                               Instr op) {
   2474  VIXL_ASSERT(AreSameFormat(vd, vn));
   2475  Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
   2476 }
   2477 
   2478 
   2479 void Assembler::NEON2RegMisc(const VRegister& vd,
   2480                             const VRegister& vn,
   2481                             NEON2RegMiscOp vop,
   2482                             int value) {
   2483  VIXL_ASSERT(AreSameFormat(vd, vn));
   2484  VIXL_ASSERT(value == 0);
   2485  USE(value);
   2486 
   2487  Instr format, op = vop;
   2488  if (vd.IsScalar()) {
   2489    op |= NEON_Q | NEONScalar;
   2490    format = SFormat(vd);
   2491  } else {
   2492    format = VFormat(vd);
   2493  }
   2494 
   2495  Emit(format | op | Rn(vn) | Rd(vd));
   2496 }
   2497 
   2498 
   2499 void Assembler::cmeq(const VRegister& vd,
   2500                     const VRegister& vn,
   2501                     int value) {
   2502  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   2503  NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
   2504 }
   2505 
   2506 
   2507 void Assembler::cmge(const VRegister& vd,
   2508                     const VRegister& vn,
   2509                     int value) {
   2510  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   2511  NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
   2512 }
   2513 
   2514 
   2515 void Assembler::cmgt(const VRegister& vd,
   2516                     const VRegister& vn,
   2517                     int value) {
   2518  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   2519  NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
   2520 }
   2521 
   2522 
   2523 void Assembler::cmle(const VRegister& vd,
   2524                     const VRegister& vn,
   2525                     int value) {
   2526  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   2527  NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
   2528 }
   2529 
   2530 
   2531 void Assembler::cmlt(const VRegister& vd,
   2532                     const VRegister& vn,
   2533                     int value) {
   2534  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   2535  NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
   2536 }
   2537 
   2538 
   2539 void Assembler::shll(const VRegister& vd,
   2540                     const VRegister& vn,
   2541                     int shift) {
   2542  VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
   2543              (vd.Is4S() && vn.Is4H() && shift == 16) ||
   2544              (vd.Is2D() && vn.Is2S() && shift == 32));
   2545  USE(shift);
   2546  Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
   2547 }
   2548 
   2549 
   2550 void Assembler::shll2(const VRegister& vd,
   2551                      const VRegister& vn,
   2552                      int shift) {
   2553  USE(shift);
   2554  VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
   2555              (vd.Is4S() && vn.Is8H() && shift == 16) ||
   2556              (vd.Is2D() && vn.Is4S() && shift == 32));
   2557  Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
   2558 }
   2559 
   2560 
   2561 void Assembler::NEONFP2RegMisc(const VRegister& vd,
   2562                               const VRegister& vn,
   2563                               NEON2RegMiscOp vop,
   2564                               double value) {
   2565  VIXL_ASSERT(AreSameFormat(vd, vn));
   2566  VIXL_ASSERT(value == 0.0);
   2567  USE(value);
   2568 
   2569  Instr op = vop;
   2570  if (vd.IsScalar()) {
   2571    VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2572    op |= NEON_Q | NEONScalar;
   2573  } else {
   2574    VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
   2575  }
   2576 
   2577  Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
   2578 }
   2579 
   2580 
   2581 void Assembler::fcmeq(const VRegister& vd,
   2582                      const VRegister& vn,
   2583                      double value) {
   2584  NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
   2585 }
   2586 
   2587 
   2588 void Assembler::fcmge(const VRegister& vd,
   2589                      const VRegister& vn,
   2590                      double value) {
   2591  NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
   2592 }
   2593 
   2594 
   2595 void Assembler::fcmgt(const VRegister& vd,
   2596                      const VRegister& vn,
   2597                      double value) {
   2598  NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
   2599 }
   2600 
   2601 
   2602 void Assembler::fcmle(const VRegister& vd,
   2603                      const VRegister& vn,
   2604                      double value) {
   2605  NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
   2606 }
   2607 
   2608 
   2609 void Assembler::fcmlt(const VRegister& vd,
   2610                      const VRegister& vn,
   2611                      double value) {
   2612  NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
   2613 }
   2614 
   2615 
   2616 void Assembler::frecpx(const VRegister& vd,
   2617                       const VRegister& vn) {
   2618  VIXL_ASSERT(vd.IsScalar());
   2619  VIXL_ASSERT(AreSameFormat(vd, vn));
   2620  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   2621  Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
   2622 }
   2623 
   2624 
   2625 #define NEON_3SAME_LIST(V) \
   2626  V(add,      NEON_ADD,      vd.IsVector() || vd.Is1D())            \
   2627  V(addp,     NEON_ADDP,     vd.IsVector() || vd.Is1D())            \
   2628  V(sub,      NEON_SUB,      vd.IsVector() || vd.Is1D())            \
   2629  V(cmeq,     NEON_CMEQ,     vd.IsVector() || vd.Is1D())            \
   2630  V(cmge,     NEON_CMGE,     vd.IsVector() || vd.Is1D())            \
   2631  V(cmgt,     NEON_CMGT,     vd.IsVector() || vd.Is1D())            \
   2632  V(cmhi,     NEON_CMHI,     vd.IsVector() || vd.Is1D())            \
   2633  V(cmhs,     NEON_CMHS,     vd.IsVector() || vd.Is1D())            \
   2634  V(cmtst,    NEON_CMTST,    vd.IsVector() || vd.Is1D())            \
   2635  V(sshl,     NEON_SSHL,     vd.IsVector() || vd.Is1D())            \
   2636  V(ushl,     NEON_USHL,     vd.IsVector() || vd.Is1D())            \
   2637  V(srshl,    NEON_SRSHL,    vd.IsVector() || vd.Is1D())            \
   2638  V(urshl,    NEON_URSHL,    vd.IsVector() || vd.Is1D())            \
   2639  V(sqdmulh,  NEON_SQDMULH,  vd.IsLaneSizeH() || vd.IsLaneSizeS())  \
   2640  V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS())  \
   2641  V(shadd,    NEON_SHADD,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2642  V(uhadd,    NEON_UHADD,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2643  V(srhadd,   NEON_SRHADD,   vd.IsVector() && !vd.IsLaneSizeD())    \
   2644  V(urhadd,   NEON_URHADD,   vd.IsVector() && !vd.IsLaneSizeD())    \
   2645  V(shsub,    NEON_SHSUB,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2646  V(uhsub,    NEON_UHSUB,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2647  V(smax,     NEON_SMAX,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2648  V(smaxp,    NEON_SMAXP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2649  V(smin,     NEON_SMIN,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2650  V(sminp,    NEON_SMINP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2651  V(umax,     NEON_UMAX,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2652  V(umaxp,    NEON_UMAXP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2653  V(umin,     NEON_UMIN,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2654  V(uminp,    NEON_UMINP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   2655  V(saba,     NEON_SABA,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2656  V(sabd,     NEON_SABD,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2657  V(uaba,     NEON_UABA,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2658  V(uabd,     NEON_UABD,     vd.IsVector() && !vd.IsLaneSizeD())    \
   2659  V(mla,      NEON_MLA,      vd.IsVector() && !vd.IsLaneSizeD())    \
   2660  V(mls,      NEON_MLS,      vd.IsVector() && !vd.IsLaneSizeD())    \
   2661  V(mul,      NEON_MUL,      vd.IsVector() && !vd.IsLaneSizeD())    \
   2662  V(and_,     NEON_AND,      vd.Is8B() || vd.Is16B())               \
   2663  V(orr,      NEON_ORR,      vd.Is8B() || vd.Is16B())               \
   2664  V(orn,      NEON_ORN,      vd.Is8B() || vd.Is16B())               \
   2665  V(eor,      NEON_EOR,      vd.Is8B() || vd.Is16B())               \
   2666  V(bic,      NEON_BIC,      vd.Is8B() || vd.Is16B())               \
   2667  V(bit,      NEON_BIT,      vd.Is8B() || vd.Is16B())               \
   2668  V(bif,      NEON_BIF,      vd.Is8B() || vd.Is16B())               \
   2669  V(bsl,      NEON_BSL,      vd.Is8B() || vd.Is16B())               \
   2670  V(pmul,     NEON_PMUL,     vd.Is8B() || vd.Is16B())               \
   2671  V(uqadd,    NEON_UQADD,    true)                                  \
   2672  V(sqadd,    NEON_SQADD,    true)                                  \
   2673  V(uqsub,    NEON_UQSUB,    true)                                  \
   2674  V(sqsub,    NEON_SQSUB,    true)                                  \
   2675  V(sqshl,    NEON_SQSHL,    true)                                  \
   2676  V(uqshl,    NEON_UQSHL,    true)                                  \
   2677  V(sqrshl,   NEON_SQRSHL,   true)                                  \
   2678  V(uqrshl,   NEON_UQRSHL,   true)
   2679 
   2680 #define DEFINE_ASM_FUNC(FN, OP, AS)        \
   2681 void Assembler::FN(const VRegister& vd,    \
   2682                   const VRegister& vn,    \
   2683                   const VRegister& vm) {  \
   2684  VIXL_ASSERT(AS);                         \
   2685  NEON3Same(vd, vn, vm, OP);               \
   2686 }
   2687 NEON_3SAME_LIST(DEFINE_ASM_FUNC)
   2688 #undef DEFINE_ASM_FUNC
   2689 
   2690 
   2691 #define NEON_FP3SAME_OP_LIST(V)                  \
   2692  V(fadd,    NEON_FADD,    FADD)                 \
   2693  V(fsub,    NEON_FSUB,    FSUB)                 \
   2694  V(fmul,    NEON_FMUL,    FMUL)                 \
   2695  V(fdiv,    NEON_FDIV,    FDIV)                 \
   2696  V(fmax,    NEON_FMAX,    FMAX)                 \
   2697  V(fmaxnm,  NEON_FMAXNM,  FMAXNM)               \
   2698  V(fmin,    NEON_FMIN,    FMIN)                 \
   2699  V(fminnm,  NEON_FMINNM,  FMINNM)               \
   2700  V(fmulx,   NEON_FMULX,   NEON_FMULX_scalar)    \
   2701  V(frecps,  NEON_FRECPS,  NEON_FRECPS_scalar)   \
   2702  V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar)  \
   2703  V(fabd,    NEON_FABD,    NEON_FABD_scalar)     \
   2704  V(fmla,    NEON_FMLA,    0)                    \
   2705  V(fmls,    NEON_FMLS,    0)                    \
   2706  V(facge,   NEON_FACGE,   NEON_FACGE_scalar)    \
   2707  V(facgt,   NEON_FACGT,   NEON_FACGT_scalar)    \
   2708  V(fcmeq,   NEON_FCMEQ,   NEON_FCMEQ_scalar)    \
   2709  V(fcmge,   NEON_FCMGE,   NEON_FCMGE_scalar)    \
   2710  V(fcmgt,   NEON_FCMGT,   NEON_FCMGT_scalar)    \
   2711  V(faddp,   NEON_FADDP,   0)                    \
   2712  V(fmaxp,   NEON_FMAXP,   0)                    \
   2713  V(fminp,   NEON_FMINP,   0)                    \
   2714  V(fmaxnmp, NEON_FMAXNMP, 0)                    \
   2715  V(fminnmp, NEON_FMINNMP, 0)
   2716 
   2717 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)            \
   2718 void Assembler::FN(const VRegister& vd,                \
   2719                   const VRegister& vn,                \
   2720                   const VRegister& vm) {              \
   2721  Instr op;                                            \
   2722  if ((SCA_OP != 0) && vd.IsScalar()) {                \
   2723    VIXL_ASSERT(vd.Is1S() || vd.Is1D());               \
   2724    op = SCA_OP;                                       \
   2725  } else {                                             \
   2726    VIXL_ASSERT(vd.IsVector());                        \
   2727    VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());  \
   2728    op = VEC_OP;                                       \
   2729  }                                                    \
   2730  NEONFP3Same(vd, vn, vm, op);                         \
   2731 }
   2732 NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)
   2733 #undef DEFINE_ASM_FUNC
   2734 
   2735 
   2736 void Assembler::addp(const VRegister& vd,
   2737                     const VRegister& vn) {
   2738  VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
   2739  Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
   2740 }
   2741 
   2742 
   2743 void Assembler::faddp(const VRegister& vd,
   2744                      const VRegister& vn) {
   2745  VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
   2746              (vd.Is1D() && vn.Is2D()));
   2747  Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
   2748 }
   2749 
   2750 
   2751 void Assembler::fmaxp(const VRegister& vd,
   2752                      const VRegister& vn) {
   2753  VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
   2754              (vd.Is1D() && vn.Is2D()));
   2755  Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
   2756 }
   2757 
   2758 
   2759 void Assembler::fminp(const VRegister& vd,
   2760                      const VRegister& vn) {
   2761  VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
   2762              (vd.Is1D() && vn.Is2D()));
   2763  Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
   2764 }
   2765 
   2766 
   2767 void Assembler::fmaxnmp(const VRegister& vd,
   2768                        const VRegister& vn) {
   2769  VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
   2770              (vd.Is1D() && vn.Is2D()));
   2771  Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
   2772 }
   2773 
   2774 
   2775 void Assembler::fminnmp(const VRegister& vd,
   2776                        const VRegister& vn) {
   2777  VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
   2778              (vd.Is1D() && vn.Is2D()));
   2779  Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
   2780 }
   2781 
   2782 
   2783 void Assembler::orr(const VRegister& vd,
   2784                    const int imm8,
   2785                    const int left_shift) {
   2786  NEONModifiedImmShiftLsl(vd, imm8, left_shift,
   2787                          NEONModifiedImmediate_ORR);
   2788 }
   2789 
   2790 
   2791 void Assembler::mov(const VRegister& vd,
   2792                    const VRegister& vn) {
   2793  VIXL_ASSERT(AreSameFormat(vd, vn));
   2794  if (vd.IsD()) {
   2795    orr(vd.V8B(), vn.V8B(), vn.V8B());
   2796  } else {
   2797    VIXL_ASSERT(vd.IsQ());
   2798    orr(vd.V16B(), vn.V16B(), vn.V16B());
   2799  }
   2800 }
   2801 
   2802 
   2803 void Assembler::bic(const VRegister& vd,
   2804                    const int imm8,
   2805                    const int left_shift) {
   2806  NEONModifiedImmShiftLsl(vd, imm8, left_shift,
   2807                          NEONModifiedImmediate_BIC);
   2808 }
   2809 
   2810 
   2811 void Assembler::movi(const VRegister& vd,
   2812                     const uint64_t imm,
   2813                     Shift shift,
   2814                     const int shift_amount) {
   2815  VIXL_ASSERT((shift == LSL) || (shift == MSL));
   2816  if (vd.Is2D() || vd.Is1D()) {
   2817    VIXL_ASSERT(shift_amount == 0);
   2818    int imm8 = 0;
   2819    for (int i = 0; i < 8; ++i) {
   2820      int byte = (imm >> (i * 8)) & 0xff;
   2821      VIXL_ASSERT((byte == 0) || (byte == 0xff));
   2822      if (byte == 0xff) {
   2823        imm8 |= (1 << i);
   2824      }
   2825    }
   2826    int q = vd.Is2D() ? NEON_Q : 0;
   2827    Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
   2828         ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
   2829  } else if (shift == LSL) {
   2830    VIXL_ASSERT(IsUint8(imm));
   2831    NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
   2832                            NEONModifiedImmediate_MOVI);
   2833  } else {
   2834    VIXL_ASSERT(IsUint8(imm));
   2835    NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
   2836                            NEONModifiedImmediate_MOVI);
   2837  }
   2838 }
   2839 
   2840 
   2841 void Assembler::mvn(const VRegister& vd,
   2842                    const VRegister& vn) {
   2843  VIXL_ASSERT(AreSameFormat(vd, vn));
   2844  if (vd.IsD()) {
   2845    not_(vd.V8B(), vn.V8B());
   2846  } else {
   2847    VIXL_ASSERT(vd.IsQ());
   2848    not_(vd.V16B(), vn.V16B());
   2849  }
   2850 }
   2851 
   2852 
   2853 void Assembler::mvni(const VRegister& vd,
   2854                     const int imm8,
   2855                     Shift shift,
   2856                     const int shift_amount) {
   2857  VIXL_ASSERT((shift == LSL) || (shift == MSL));
   2858  if (shift == LSL) {
   2859    NEONModifiedImmShiftLsl(vd, imm8, shift_amount,
   2860                            NEONModifiedImmediate_MVNI);
   2861  } else {
   2862    NEONModifiedImmShiftMsl(vd, imm8, shift_amount,
   2863                            NEONModifiedImmediate_MVNI);
   2864  }
   2865 }
   2866 
   2867 
   2868 void Assembler::NEONFPByElement(const VRegister& vd,
   2869                                const VRegister& vn,
   2870                                const VRegister& vm,
   2871                                int vm_index,
   2872                                NEONByIndexedElementOp vop) {
   2873  VIXL_ASSERT(AreSameFormat(vd, vn));
   2874  VIXL_ASSERT((vd.Is2S() && vm.Is1S()) ||
   2875              (vd.Is4S() && vm.Is1S()) ||
   2876              (vd.Is1S() && vm.Is1S()) ||
   2877              (vd.Is2D() && vm.Is1D()) ||
   2878              (vd.Is1D() && vm.Is1D()));
   2879  VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) ||
   2880              (vm.Is1D() && (vm_index < 2)));
   2881 
   2882  Instr op = vop;
   2883  int index_num_bits = vm.Is1S() ? 2 : 1;
   2884  if (vd.IsScalar()) {
   2885    op |= NEON_Q | NEONScalar;
   2886  }
   2887 
   2888  Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) |
   2889       Rm(vm) | Rn(vn) | Rd(vd));
   2890 }
   2891 
   2892 
   2893 void Assembler::NEONByElement(const VRegister& vd,
   2894                              const VRegister& vn,
   2895                              const VRegister& vm,
   2896                              int vm_index,
   2897                              NEONByIndexedElementOp vop) {
   2898  VIXL_ASSERT(AreSameFormat(vd, vn));
   2899  VIXL_ASSERT((vd.Is4H() && vm.Is1H()) ||
   2900              (vd.Is8H() && vm.Is1H()) ||
   2901              (vd.Is1H() && vm.Is1H()) ||
   2902              (vd.Is2S() && vm.Is1S()) ||
   2903              (vd.Is4S() && vm.Is1S()) ||
   2904              (vd.Is1S() && vm.Is1S()));
   2905  VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
   2906              (vm.Is1S() && (vm_index < 4)));
   2907 
   2908  Instr format, op = vop;
   2909  int index_num_bits = vm.Is1H() ? 3 : 2;
   2910  if (vd.IsScalar()) {
   2911    op |= NEONScalar | NEON_Q;
   2912    format = SFormat(vn);
   2913  } else {
   2914    format = VFormat(vn);
   2915  }
   2916  Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
   2917       Rm(vm) | Rn(vn) | Rd(vd));
   2918 }
   2919 
   2920 
   2921 void Assembler::NEONByElementL(const VRegister& vd,
   2922                               const VRegister& vn,
   2923                               const VRegister& vm,
   2924                               int vm_index,
   2925                               NEONByIndexedElementOp vop) {
   2926  VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
   2927              (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
   2928              (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
   2929              (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
   2930              (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
   2931              (vd.Is1D() && vn.Is1S() && vm.Is1S()));
   2932 
   2933  VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
   2934              (vm.Is1S() && (vm_index < 4)));
   2935 
   2936  Instr format, op = vop;
   2937  int index_num_bits = vm.Is1H() ? 3 : 2;
   2938  if (vd.IsScalar()) {
   2939    op |= NEONScalar | NEON_Q;
   2940    format = SFormat(vn);
   2941  } else {
   2942    format = VFormat(vn);
   2943  }
   2944  Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
   2945       Rm(vm) | Rn(vn) | Rd(vd));
   2946 }
   2947 
   2948 
   2949 #define NEON_BYELEMENT_LIST(V)                         \
   2950  V(mul,      NEON_MUL_byelement,      vn.IsVector())  \
   2951  V(mla,      NEON_MLA_byelement,      vn.IsVector())  \
   2952  V(mls,      NEON_MLS_byelement,      vn.IsVector())  \
   2953  V(sqdmulh,  NEON_SQDMULH_byelement,  true)           \
   2954  V(sqrdmulh, NEON_SQRDMULH_byelement, true)
   2955 
   2956 
   2957 #define DEFINE_ASM_FUNC(FN, OP, AS)        \
   2958 void Assembler::FN(const VRegister& vd,    \
   2959                   const VRegister& vn,    \
   2960                   const VRegister& vm,    \
   2961                   int vm_index) {         \
   2962  VIXL_ASSERT(AS);                         \
   2963  NEONByElement(vd, vn, vm, vm_index, OP); \
   2964 }
   2965 NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
   2966 #undef DEFINE_ASM_FUNC
   2967 
   2968 
   2969 #define NEON_FPBYELEMENT_LIST(V) \
   2970  V(fmul,  NEON_FMUL_byelement)  \
   2971  V(fmla,  NEON_FMLA_byelement)  \
   2972  V(fmls,  NEON_FMLS_byelement)  \
   2973  V(fmulx, NEON_FMULX_byelement)
   2974 
   2975 
   2976 #define DEFINE_ASM_FUNC(FN, OP)              \
   2977 void Assembler::FN(const VRegister& vd,      \
   2978                   const VRegister& vn,      \
   2979                   const VRegister& vm,      \
   2980                   int vm_index) {           \
   2981  NEONFPByElement(vd, vn, vm, vm_index, OP); \
   2982 }
   2983 NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
   2984 #undef DEFINE_ASM_FUNC
   2985 
   2986 
   2987 #define NEON_BYELEMENT_LONG_LIST(V)                               \
   2988  V(sqdmull,  NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD())  \
   2989  V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ())  \
   2990  V(sqdmlal,  NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD())  \
   2991  V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ())  \
   2992  V(sqdmlsl,  NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD())  \
   2993  V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ())  \
   2994  V(smull,    NEON_SMULL_byelement,   vn.IsVector() && vn.IsD())  \
   2995  V(smull2,   NEON_SMULL_byelement,   vn.IsVector() && vn.IsQ())  \
   2996  V(umull,    NEON_UMULL_byelement,   vn.IsVector() && vn.IsD())  \
   2997  V(umull2,   NEON_UMULL_byelement,   vn.IsVector() && vn.IsQ())  \
   2998  V(smlal,    NEON_SMLAL_byelement,   vn.IsVector() && vn.IsD())  \
   2999  V(smlal2,   NEON_SMLAL_byelement,   vn.IsVector() && vn.IsQ())  \
   3000  V(umlal,    NEON_UMLAL_byelement,   vn.IsVector() && vn.IsD())  \
   3001  V(umlal2,   NEON_UMLAL_byelement,   vn.IsVector() && vn.IsQ())  \
   3002  V(smlsl,    NEON_SMLSL_byelement,   vn.IsVector() && vn.IsD())  \
   3003  V(smlsl2,   NEON_SMLSL_byelement,   vn.IsVector() && vn.IsQ())  \
   3004  V(umlsl,    NEON_UMLSL_byelement,   vn.IsVector() && vn.IsD())  \
   3005  V(umlsl2,   NEON_UMLSL_byelement,   vn.IsVector() && vn.IsQ())
   3006 
   3007 
   3008 #define DEFINE_ASM_FUNC(FN, OP, AS)         \
   3009 void Assembler::FN(const VRegister& vd,     \
   3010                   const VRegister& vn,     \
   3011                   const VRegister& vm,     \
   3012                   int vm_index) {          \
   3013  VIXL_ASSERT(AS);                          \
   3014  NEONByElementL(vd, vn, vm, vm_index, OP); \
   3015 }
   3016 NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
   3017 #undef DEFINE_ASM_FUNC
   3018 
   3019 
   3020 void Assembler::suqadd(const VRegister& vd,
   3021                       const VRegister& vn) {
   3022  NEON2RegMisc(vd, vn, NEON_SUQADD);
   3023 }
   3024 
   3025 
   3026 void Assembler::usqadd(const VRegister& vd,
   3027                       const VRegister& vn) {
   3028  NEON2RegMisc(vd, vn, NEON_USQADD);
   3029 }
   3030 
   3031 
   3032 void Assembler::abs(const VRegister& vd,
   3033                    const VRegister& vn) {
   3034  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3035  NEON2RegMisc(vd, vn, NEON_ABS);
   3036 }
   3037 
   3038 
   3039 void Assembler::sqabs(const VRegister& vd,
   3040                      const VRegister& vn) {
   3041  NEON2RegMisc(vd, vn, NEON_SQABS);
   3042 }
   3043 
   3044 
   3045 void Assembler::neg(const VRegister& vd,
   3046                    const VRegister& vn) {
   3047  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3048  NEON2RegMisc(vd, vn, NEON_NEG);
   3049 }
   3050 
   3051 
   3052 void Assembler::sqneg(const VRegister& vd,
   3053                      const VRegister& vn) {
   3054  NEON2RegMisc(vd, vn, NEON_SQNEG);
   3055 }
   3056 
   3057 
   3058 void Assembler::NEONXtn(const VRegister& vd,
   3059                        const VRegister& vn,
   3060                        NEON2RegMiscOp vop) {
   3061  Instr format, op = vop;
   3062  if (vd.IsScalar()) {
   3063    VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
   3064                (vd.Is1H() && vn.Is1S()) ||
   3065                (vd.Is1S() && vn.Is1D()));
   3066    op |= NEON_Q | NEONScalar;
   3067    format = SFormat(vd);
   3068  } else {
   3069    VIXL_ASSERT((vd.Is8B() && vn.Is8H())  ||
   3070                (vd.Is4H() && vn.Is4S())  ||
   3071                (vd.Is2S() && vn.Is2D())  ||
   3072                (vd.Is16B() && vn.Is8H()) ||
   3073                (vd.Is8H() && vn.Is4S())  ||
   3074                (vd.Is4S() && vn.Is2D()));
   3075    format = VFormat(vd);
   3076  }
   3077  Emit(format | op | Rn(vn) | Rd(vd));
   3078 }
   3079 
   3080 
   3081 void Assembler::xtn(const VRegister& vd,
   3082                    const VRegister& vn) {
   3083  VIXL_ASSERT(vd.IsVector() && vd.IsD());
   3084  NEONXtn(vd, vn, NEON_XTN);
   3085 }
   3086 
   3087 
   3088 void Assembler::xtn2(const VRegister& vd,
   3089                     const VRegister& vn) {
   3090  VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   3091  NEONXtn(vd, vn, NEON_XTN);
   3092 }
   3093 
   3094 
   3095 void Assembler::sqxtn(const VRegister& vd,
   3096                      const VRegister& vn) {
   3097  VIXL_ASSERT(vd.IsScalar() || vd.IsD());
   3098  NEONXtn(vd, vn, NEON_SQXTN);
   3099 }
   3100 
   3101 
   3102 void Assembler::sqxtn2(const VRegister& vd,
   3103                       const VRegister& vn) {
   3104  VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   3105  NEONXtn(vd, vn, NEON_SQXTN);
   3106 }
   3107 
   3108 
   3109 void Assembler::sqxtun(const VRegister& vd,
   3110                       const VRegister& vn) {
   3111  VIXL_ASSERT(vd.IsScalar() || vd.IsD());
   3112  NEONXtn(vd, vn, NEON_SQXTUN);
   3113 }
   3114 
   3115 
   3116 void Assembler::sqxtun2(const VRegister& vd,
   3117                        const VRegister& vn) {
   3118  VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   3119  NEONXtn(vd, vn, NEON_SQXTUN);
   3120 }
   3121 
   3122 
   3123 void Assembler::uqxtn(const VRegister& vd,
   3124                      const VRegister& vn) {
   3125  VIXL_ASSERT(vd.IsScalar() || vd.IsD());
   3126  NEONXtn(vd, vn, NEON_UQXTN);
   3127 }
   3128 
   3129 
   3130 void Assembler::uqxtn2(const VRegister& vd,
   3131                       const VRegister& vn) {
   3132  VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   3133  NEONXtn(vd, vn, NEON_UQXTN);
   3134 }
   3135 
   3136 
   3137 // NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
   3138 void Assembler::not_(const VRegister& vd,
   3139                     const VRegister& vn) {
   3140  VIXL_ASSERT(AreSameFormat(vd, vn));
   3141  VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   3142  Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
   3143 }
   3144 
   3145 
   3146 void Assembler::rbit(const VRegister& vd,
   3147                     const VRegister& vn) {
   3148  VIXL_ASSERT(AreSameFormat(vd, vn));
   3149  VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   3150  Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
   3151 }
   3152 
   3153 
   3154 void Assembler::ext(const VRegister& vd,
   3155                    const VRegister& vn,
   3156                    const VRegister& vm,
   3157                    int index) {
   3158  VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   3159  VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   3160  VIXL_ASSERT((0 <= index) && (index < vd.lanes()));
   3161  Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
   3162 }
   3163 
   3164 
   3165 void Assembler::dup(const VRegister& vd,
   3166                    const VRegister& vn,
   3167                    int vn_index) {
   3168  Instr q, scalar;
   3169 
   3170  // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
   3171  // number of lanes, and T is b, h, s or d.
   3172  int lane_size = vn.LaneSizeInBytes();
   3173  NEONFormatField format;
   3174  switch (lane_size) {
   3175    case 1: format = NEON_16B; break;
   3176    case 2: format = NEON_8H;  break;
   3177    case 4: format = NEON_4S;  break;
   3178    default:
   3179      VIXL_ASSERT(lane_size == 8);
   3180      format = NEON_2D;
   3181      break;
   3182  }
   3183 
   3184  if (vd.IsScalar()) {
   3185    q = NEON_Q;
   3186    scalar = NEONScalar;
   3187  } else {
   3188    VIXL_ASSERT(!vd.Is1D());
   3189    q = vd.IsD() ? 0 : NEON_Q;
   3190    scalar = 0;
   3191  }
   3192  Emit(q | scalar | NEON_DUP_ELEMENT |
   3193       ImmNEON5(format, vn_index) | Rn(vn) | Rd(vd));
   3194 }
   3195 
   3196 
   3197 void Assembler::mov(const VRegister& vd,
   3198                    const VRegister& vn,
   3199                    int vn_index) {
   3200  VIXL_ASSERT(vn.IsScalar());
   3201  dup(vd, vn, vn_index);
   3202 }
   3203 
   3204 
   3205 void Assembler::dup(const VRegister& vd, const Register& rn) {
   3206  VIXL_ASSERT(!vd.Is1D());
   3207  VIXL_ASSERT(vd.Is2D() == rn.IsX());
   3208  int q = vd.IsD() ? 0 : NEON_Q;
   3209  Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
   3210 }
   3211 
   3212 
   3213 void Assembler::ins(const VRegister& vd,
   3214                    int vd_index,
   3215                    const VRegister& vn,
   3216                    int vn_index) {
   3217  VIXL_ASSERT(AreSameFormat(vd, vn));
   3218  // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
   3219  // number of lanes, and T is b, h, s or d.
   3220  int lane_size = vd.LaneSizeInBytes();
   3221  NEONFormatField format;
   3222  switch (lane_size) {
   3223    case 1: format = NEON_16B; break;
   3224    case 2: format = NEON_8H;  break;
   3225    case 4: format = NEON_4S;  break;
   3226    default:
   3227      VIXL_ASSERT(lane_size == 8);
   3228      format = NEON_2D;
   3229      break;
   3230  }
   3231 
   3232  VIXL_ASSERT((0 <= vd_index) &&
   3233          (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   3234  VIXL_ASSERT((0 <= vn_index) &&
   3235          (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   3236  Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
   3237       ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
   3238 }
   3239 
   3240 
   3241 void Assembler::mov(const VRegister& vd,
   3242                    int vd_index,
   3243                    const VRegister& vn,
   3244                    int vn_index) {
   3245  ins(vd, vd_index, vn, vn_index);
   3246 }
   3247 
   3248 
   3249 void Assembler::ins(const VRegister& vd,
   3250                    int vd_index,
   3251                    const Register& rn) {
   3252  // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
   3253  // number of lanes, and T is b, h, s or d.
   3254  int lane_size = vd.LaneSizeInBytes();
   3255  NEONFormatField format;
   3256  switch (lane_size) {
   3257    case 1: format = NEON_16B; VIXL_ASSERT(rn.IsW()); break;
   3258    case 2: format = NEON_8H;  VIXL_ASSERT(rn.IsW()); break;
   3259    case 4: format = NEON_4S;  VIXL_ASSERT(rn.IsW()); break;
   3260    default:
   3261      VIXL_ASSERT(lane_size == 8);
   3262      VIXL_ASSERT(rn.IsX());
   3263      format = NEON_2D;
   3264      break;
   3265  }
   3266 
   3267  VIXL_ASSERT((0 <= vd_index) &&
   3268          (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   3269  Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
   3270 }
   3271 
   3272 
   3273 void Assembler::mov(const VRegister& vd,
   3274                    int vd_index,
   3275                    const Register& rn) {
   3276  ins(vd, vd_index, rn);
   3277 }
   3278 
   3279 
   3280 void Assembler::umov(const Register& rd,
   3281                     const VRegister& vn,
   3282                     int vn_index) {
   3283  // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
   3284  // number of lanes, and T is b, h, s or d.
   3285  int lane_size = vn.LaneSizeInBytes();
   3286  NEONFormatField format;
   3287  Instr q = 0;
   3288  switch (lane_size) {
   3289    case 1: format = NEON_16B; VIXL_ASSERT(rd.IsW()); break;
   3290    case 2: format = NEON_8H;  VIXL_ASSERT(rd.IsW()); break;
   3291    case 4: format = NEON_4S;  VIXL_ASSERT(rd.IsW()); break;
   3292    default:
   3293      VIXL_ASSERT(lane_size == 8);
   3294      VIXL_ASSERT(rd.IsX());
   3295      format = NEON_2D;
   3296      q = NEON_Q;
   3297      break;
   3298  }
   3299 
   3300  VIXL_ASSERT((0 <= vn_index) &&
   3301          (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   3302  Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
   3303 }
   3304 
   3305 
   3306 void Assembler::mov(const Register& rd,
   3307                    const VRegister& vn,
   3308                    int vn_index) {
   3309  VIXL_ASSERT(vn.SizeInBytes() >= 4);
   3310  umov(rd, vn, vn_index);
   3311 }
   3312 
   3313 
   3314 void Assembler::smov(const Register& rd,
   3315                     const VRegister& vn,
   3316                     int vn_index) {
   3317  // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
   3318  // number of lanes, and T is b, h, s.
   3319  int lane_size = vn.LaneSizeInBytes();
   3320  NEONFormatField format;
   3321  Instr q = 0;
   3322  VIXL_ASSERT(lane_size != 8);
   3323  switch (lane_size) {
   3324    case 1: format = NEON_16B; break;
   3325    case 2: format = NEON_8H;  break;
   3326    default:
   3327      VIXL_ASSERT(lane_size == 4);
   3328      VIXL_ASSERT(rd.IsX());
   3329      format = NEON_4S;
   3330      break;
   3331  }
   3332  q = rd.IsW() ? 0 : NEON_Q;
   3333  VIXL_ASSERT((0 <= vn_index) &&
   3334          (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   3335  Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
   3336 }
   3337 
   3338 
   3339 void Assembler::cls(const VRegister& vd,
   3340                    const VRegister& vn) {
   3341  VIXL_ASSERT(AreSameFormat(vd, vn));
   3342  VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
   3343  Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
   3344 }
   3345 
   3346 
   3347 void Assembler::clz(const VRegister& vd,
   3348                    const VRegister& vn) {
   3349  VIXL_ASSERT(AreSameFormat(vd, vn));
   3350  VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
   3351  Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
   3352 }
   3353 
   3354 
   3355 void Assembler::cnt(const VRegister& vd,
   3356                    const VRegister& vn) {
   3357  VIXL_ASSERT(AreSameFormat(vd, vn));
   3358  VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   3359  Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
   3360 }
   3361 
   3362 
   3363 void Assembler::rev16(const VRegister& vd,
   3364                      const VRegister& vn) {
   3365  VIXL_ASSERT(AreSameFormat(vd, vn));
   3366  VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   3367  Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
   3368 }
   3369 
   3370 
   3371 void Assembler::rev32(const VRegister& vd,
   3372                      const VRegister& vn) {
   3373  VIXL_ASSERT(AreSameFormat(vd, vn));
   3374  VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
   3375  Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
   3376 }
   3377 
   3378 
   3379 void Assembler::rev64(const VRegister& vd,
   3380                      const VRegister& vn) {
   3381  VIXL_ASSERT(AreSameFormat(vd, vn));
   3382  VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
   3383  Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
   3384 }
   3385 
   3386 
   3387 void Assembler::ursqrte(const VRegister& vd,
   3388                        const VRegister& vn) {
   3389  VIXL_ASSERT(AreSameFormat(vd, vn));
   3390  VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   3391  Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
   3392 }
   3393 
   3394 
   3395 void Assembler::urecpe(const VRegister& vd,
   3396                       const VRegister& vn) {
   3397  VIXL_ASSERT(AreSameFormat(vd, vn));
   3398  VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   3399  Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
   3400 }
   3401 
   3402 
   3403 void Assembler::NEONAddlp(const VRegister& vd,
   3404                          const VRegister& vn,
   3405                          NEON2RegMiscOp op) {
   3406  VIXL_ASSERT((op == NEON_SADDLP) ||
   3407              (op == NEON_UADDLP) ||
   3408              (op == NEON_SADALP) ||
   3409              (op == NEON_UADALP));
   3410 
   3411  VIXL_ASSERT((vn.Is8B() && vd.Is4H()) ||
   3412              (vn.Is4H() && vd.Is2S()) ||
   3413              (vn.Is2S() && vd.Is1D()) ||
   3414              (vn.Is16B() && vd.Is8H())||
   3415              (vn.Is8H() && vd.Is4S()) ||
   3416              (vn.Is4S() && vd.Is2D()));
   3417  Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
   3418 }
   3419 
   3420 
   3421 void Assembler::saddlp(const VRegister& vd,
   3422                       const VRegister& vn) {
   3423  NEONAddlp(vd, vn, NEON_SADDLP);
   3424 }
   3425 
   3426 
   3427 void Assembler::uaddlp(const VRegister& vd,
   3428                       const VRegister& vn) {
   3429  NEONAddlp(vd, vn, NEON_UADDLP);
   3430 }
   3431 
   3432 
   3433 void Assembler::sadalp(const VRegister& vd,
   3434                       const VRegister& vn) {
   3435  NEONAddlp(vd, vn, NEON_SADALP);
   3436 }
   3437 
   3438 
   3439 void Assembler::uadalp(const VRegister& vd,
   3440                       const VRegister& vn) {
   3441  NEONAddlp(vd, vn, NEON_UADALP);
   3442 }
   3443 
   3444 
   3445 void Assembler::NEONAcrossLanesL(const VRegister& vd,
   3446                                 const VRegister& vn,
   3447                                 NEONAcrossLanesOp op) {
   3448  VIXL_ASSERT((vn.Is8B()  && vd.Is1H()) ||
   3449              (vn.Is16B() && vd.Is1H()) ||
   3450              (vn.Is4H()  && vd.Is1S()) ||
   3451              (vn.Is8H()  && vd.Is1S()) ||
   3452              (vn.Is4S()  && vd.Is1D()));
   3453  Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
   3454 }
   3455 
   3456 
   3457 void Assembler::saddlv(const VRegister& vd,
   3458                       const VRegister& vn) {
   3459  NEONAcrossLanesL(vd, vn, NEON_SADDLV);
   3460 }
   3461 
   3462 
   3463 void Assembler::uaddlv(const VRegister& vd,
   3464                       const VRegister& vn) {
   3465  NEONAcrossLanesL(vd, vn, NEON_UADDLV);
   3466 }
   3467 
   3468 
   3469 void Assembler::NEONAcrossLanes(const VRegister& vd,
   3470                                const VRegister& vn,
   3471                                NEONAcrossLanesOp op) {
   3472  VIXL_ASSERT((vn.Is8B()  && vd.Is1B()) ||
   3473              (vn.Is16B() && vd.Is1B()) ||
   3474              (vn.Is4H()  && vd.Is1H()) ||
   3475              (vn.Is8H()  && vd.Is1H()) ||
   3476              (vn.Is4S()  && vd.Is1S()));
   3477  if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
   3478    Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
   3479  } else {
   3480    Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
   3481  }
   3482 }
   3483 
   3484 
   3485 #define NEON_ACROSSLANES_LIST(V) \
   3486  V(fmaxv,   NEON_FMAXV,   vd.Is1S()) \
   3487  V(fminv,   NEON_FMINV,   vd.Is1S()) \
   3488  V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
   3489  V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
   3490  V(addv,    NEON_ADDV,    true)      \
   3491  V(smaxv,   NEON_SMAXV,   true)      \
   3492  V(sminv,   NEON_SMINV,   true)      \
   3493  V(umaxv,   NEON_UMAXV,   true)      \
   3494  V(uminv,   NEON_UMINV,   true)
   3495 
   3496 
   3497 #define DEFINE_ASM_FUNC(FN, OP, AS)        \
   3498 void Assembler::FN(const VRegister& vd,    \
   3499                   const VRegister& vn) {  \
   3500  VIXL_ASSERT(AS);                         \
   3501  NEONAcrossLanes(vd, vn, OP);             \
   3502 }
   3503 NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
   3504 #undef DEFINE_ASM_FUNC
   3505 
   3506 
   3507 void Assembler::NEONPerm(const VRegister& vd,
   3508                         const VRegister& vn,
   3509                         const VRegister& vm,
   3510                         NEONPermOp op) {
   3511  VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   3512  VIXL_ASSERT(!vd.Is1D());
   3513  Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
   3514 }
   3515 
   3516 
   3517 void Assembler::trn1(const VRegister& vd,
   3518                     const VRegister& vn,
   3519                     const VRegister& vm) {
   3520  NEONPerm(vd, vn, vm, NEON_TRN1);
   3521 }
   3522 
   3523 
   3524 void Assembler::trn2(const VRegister& vd,
   3525                     const VRegister& vn,
   3526                     const VRegister& vm) {
   3527  NEONPerm(vd, vn, vm, NEON_TRN2);
   3528 }
   3529 
   3530 
   3531 void Assembler::uzp1(const VRegister& vd,
   3532                     const VRegister& vn,
   3533                     const VRegister& vm) {
   3534  NEONPerm(vd, vn, vm, NEON_UZP1);
   3535 }
   3536 
   3537 
   3538 void Assembler::uzp2(const VRegister& vd,
   3539                     const VRegister& vn,
   3540                     const VRegister& vm) {
   3541  NEONPerm(vd, vn, vm, NEON_UZP2);
   3542 }
   3543 
   3544 
   3545 void Assembler::zip1(const VRegister& vd,
   3546                     const VRegister& vn,
   3547                     const VRegister& vm) {
   3548  NEONPerm(vd, vn, vm, NEON_ZIP1);
   3549 }
   3550 
   3551 
   3552 void Assembler::zip2(const VRegister& vd,
   3553                     const VRegister& vn,
   3554                     const VRegister& vm) {
   3555  NEONPerm(vd, vn, vm, NEON_ZIP2);
   3556 }
   3557 
   3558 
   3559 void Assembler::NEONShiftImmediate(const VRegister& vd,
   3560                                   const VRegister& vn,
   3561                                   NEONShiftImmediateOp op,
   3562                                   int immh_immb) {
   3563  VIXL_ASSERT(AreSameFormat(vd, vn));
   3564  Instr q, scalar;
   3565  if (vn.IsScalar()) {
   3566    q = NEON_Q;
   3567    scalar = NEONScalar;
   3568  } else {
   3569    q = vd.IsD() ? 0 : NEON_Q;
   3570    scalar = 0;
   3571  }
   3572  Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
   3573 }
   3574 
   3575 
   3576 void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
   3577                                       const VRegister& vn,
   3578                                       int shift,
   3579                                       NEONShiftImmediateOp op) {
   3580  int laneSizeInBits = vn.LaneSizeInBits();
   3581  VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
   3582  NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
   3583 }
   3584 
   3585 
   3586 void Assembler::NEONShiftRightImmediate(const VRegister& vd,
   3587                                        const VRegister& vn,
   3588                                        int shift,
   3589                                        NEONShiftImmediateOp op) {
   3590  int laneSizeInBits = vn.LaneSizeInBits();
   3591  VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
   3592  NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
   3593 }
   3594 
   3595 
   3596 void Assembler::NEONShiftImmediateL(const VRegister& vd,
   3597                                    const VRegister& vn,
   3598                                    int shift,
   3599                                    NEONShiftImmediateOp op) {
   3600  int laneSizeInBits = vn.LaneSizeInBits();
   3601  VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
   3602  int immh_immb = (laneSizeInBits + shift) << 16;
   3603 
   3604  VIXL_ASSERT((vn.Is8B() && vd.Is8H()) ||
   3605              (vn.Is4H() && vd.Is4S()) ||
   3606              (vn.Is2S() && vd.Is2D()) ||
   3607              (vn.Is16B() && vd.Is8H())||
   3608              (vn.Is8H() && vd.Is4S()) ||
   3609              (vn.Is4S() && vd.Is2D()));
   3610  Instr q;
   3611  q = vn.IsD() ? 0 : NEON_Q;
   3612  Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
   3613 }
   3614 
   3615 
   3616 void Assembler::NEONShiftImmediateN(const VRegister& vd,
   3617                                    const VRegister& vn,
   3618                                    int shift,
   3619                                    NEONShiftImmediateOp op) {
   3620  Instr q, scalar;
   3621  int laneSizeInBits = vd.LaneSizeInBits();
   3622  VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
   3623  int immh_immb = (2 * laneSizeInBits - shift) << 16;
   3624 
   3625  if (vn.IsScalar()) {
   3626    VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
   3627                (vd.Is1H() && vn.Is1S()) ||
   3628                (vd.Is1S() && vn.Is1D()));
   3629    q = NEON_Q;
   3630    scalar = NEONScalar;
   3631  } else {
   3632    VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
   3633                (vd.Is4H() && vn.Is4S()) ||
   3634                (vd.Is2S() && vn.Is2D()) ||
   3635                (vd.Is16B() && vn.Is8H())||
   3636                (vd.Is8H() && vn.Is4S()) ||
   3637                (vd.Is4S() && vn.Is2D()));
   3638    scalar = 0;
   3639    q = vd.IsD() ? 0 : NEON_Q;
   3640  }
   3641  Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
   3642 }
   3643 
   3644 
   3645 void Assembler::shl(const VRegister& vd,
   3646                    const VRegister& vn,
   3647                    int shift) {
   3648  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3649  NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
   3650 }
   3651 
   3652 
   3653 void Assembler::sli(const VRegister& vd,
   3654                    const VRegister& vn,
   3655                    int shift) {
   3656  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3657  NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
   3658 }
   3659 
   3660 
   3661 void Assembler::sqshl(const VRegister& vd,
   3662                      const VRegister& vn,
   3663                      int shift) {
   3664  NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
   3665 }
   3666 
   3667 
   3668 void Assembler::sqshlu(const VRegister& vd,
   3669                       const VRegister& vn,
   3670                       int shift) {
   3671  NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
   3672 }
   3673 
   3674 
   3675 void Assembler::uqshl(const VRegister& vd,
   3676                      const VRegister& vn,
   3677                      int shift) {
   3678  NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
   3679 }
   3680 
   3681 
   3682 void Assembler::sshll(const VRegister& vd,
   3683                      const VRegister& vn,
   3684                      int shift) {
   3685  VIXL_ASSERT(vn.IsD());
   3686  NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
   3687 }
   3688 
   3689 
   3690 void Assembler::sshll2(const VRegister& vd,
   3691                       const VRegister& vn,
   3692                       int shift) {
   3693  VIXL_ASSERT(vn.IsQ());
   3694  NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
   3695 }
   3696 
   3697 
   3698 void Assembler::sxtl(const VRegister& vd,
   3699                     const VRegister& vn) {
   3700  sshll(vd, vn, 0);
   3701 }
   3702 
   3703 
   3704 void Assembler::sxtl2(const VRegister& vd,
   3705                      const VRegister& vn) {
   3706  sshll2(vd, vn, 0);
   3707 }
   3708 
   3709 
   3710 void Assembler::ushll(const VRegister& vd,
   3711                      const VRegister& vn,
   3712                      int shift) {
   3713  VIXL_ASSERT(vn.IsD());
   3714  NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
   3715 }
   3716 
   3717 
   3718 void Assembler::ushll2(const VRegister& vd,
   3719                       const VRegister& vn,
   3720                       int shift) {
   3721  VIXL_ASSERT(vn.IsQ());
   3722  NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
   3723 }
   3724 
   3725 
   3726 void Assembler::uxtl(const VRegister& vd,
   3727                     const VRegister& vn) {
   3728  ushll(vd, vn, 0);
   3729 }
   3730 
   3731 
   3732 void Assembler::uxtl2(const VRegister& vd,
   3733                      const VRegister& vn) {
   3734  ushll2(vd, vn, 0);
   3735 }
   3736 
   3737 
   3738 void Assembler::sri(const VRegister& vd,
   3739                    const VRegister& vn,
   3740                    int shift) {
   3741  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3742  NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
   3743 }
   3744 
   3745 
   3746 void Assembler::sshr(const VRegister& vd,
   3747                     const VRegister& vn,
   3748                     int shift) {
   3749  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3750  NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
   3751 }
   3752 
   3753 
   3754 void Assembler::ushr(const VRegister& vd,
   3755                     const VRegister& vn,
   3756                     int shift) {
   3757  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3758  NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
   3759 }
   3760 
   3761 
   3762 void Assembler::srshr(const VRegister& vd,
   3763                      const VRegister& vn,
   3764                      int shift) {
   3765  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3766  NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
   3767 }
   3768 
   3769 
   3770 void Assembler::urshr(const VRegister& vd,
   3771                      const VRegister& vn,
   3772                      int shift) {
   3773  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3774  NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
   3775 }
   3776 
   3777 
   3778 void Assembler::ssra(const VRegister& vd,
   3779                     const VRegister& vn,
   3780                     int shift) {
   3781  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3782  NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
   3783 }
   3784 
   3785 
   3786 void Assembler::usra(const VRegister& vd,
   3787                     const VRegister& vn,
   3788                     int shift) {
   3789  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3790  NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
   3791 }
   3792 
   3793 
   3794 void Assembler::srsra(const VRegister& vd,
   3795                      const VRegister& vn,
   3796                      int shift) {
   3797  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3798  NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
   3799 }
   3800 
   3801 
   3802 void Assembler::ursra(const VRegister& vd,
   3803                      const VRegister& vn,
   3804                      int shift) {
   3805  VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   3806  NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
   3807 }
   3808 
   3809 
   3810 void Assembler::shrn(const VRegister& vd,
   3811                     const VRegister& vn,
   3812                     int shift) {
   3813  VIXL_ASSERT(vn.IsVector() && vd.IsD());
   3814  NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
   3815 }
   3816 
   3817 
   3818 void Assembler::shrn2(const VRegister& vd,
   3819                      const VRegister& vn,
   3820                      int shift) {
   3821  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3822  NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
   3823 }
   3824 
   3825 
   3826 void Assembler::rshrn(const VRegister& vd,
   3827                      const VRegister& vn,
   3828                      int shift) {
   3829  VIXL_ASSERT(vn.IsVector() && vd.IsD());
   3830  NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
   3831 }
   3832 
   3833 
   3834 void Assembler::rshrn2(const VRegister& vd,
   3835                       const VRegister& vn,
   3836                       int shift) {
   3837  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3838  NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
   3839 }
   3840 
   3841 
   3842 void Assembler::sqshrn(const VRegister& vd,
   3843                       const VRegister& vn,
   3844                       int shift) {
   3845  VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   3846  NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
   3847 }
   3848 
   3849 
   3850 void Assembler::sqshrn2(const VRegister& vd,
   3851                        const VRegister& vn,
   3852                        int shift) {
   3853  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3854  NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
   3855 }
   3856 
   3857 
   3858 void Assembler::sqrshrn(const VRegister& vd,
   3859                        const VRegister& vn,
   3860                        int shift) {
   3861  VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   3862  NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
   3863 }
   3864 
   3865 
   3866 void Assembler::sqrshrn2(const VRegister& vd,
   3867                         const VRegister& vn,
   3868                         int shift) {
   3869  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3870  NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
   3871 }
   3872 
   3873 
   3874 void Assembler::sqshrun(const VRegister& vd,
   3875                        const VRegister& vn,
   3876                        int shift) {
   3877  VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   3878  NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
   3879 }
   3880 
   3881 
   3882 void Assembler::sqshrun2(const VRegister& vd,
   3883                         const VRegister& vn,
   3884                         int shift) {
   3885  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3886  NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
   3887 }
   3888 
   3889 
   3890 void Assembler::sqrshrun(const VRegister& vd,
   3891                         const VRegister& vn,
   3892                         int shift) {
   3893  VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   3894  NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
   3895 }
   3896 
   3897 
   3898 void Assembler::sqrshrun2(const VRegister& vd,
   3899                          const VRegister& vn,
   3900                          int shift) {
   3901  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3902  NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
   3903 }
   3904 
   3905 
   3906 void Assembler::uqshrn(const VRegister& vd,
   3907                       const VRegister& vn,
   3908                       int shift) {
   3909  VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   3910  NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
   3911 }
   3912 
   3913 
   3914 void Assembler::uqshrn2(const VRegister& vd,
   3915                        const VRegister& vn,
   3916                        int shift) {
   3917  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3918  NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
   3919 }
   3920 
   3921 
   3922 void Assembler::uqrshrn(const VRegister& vd,
   3923                        const VRegister& vn,
   3924                        int shift) {
   3925  VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   3926  NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
   3927 }
   3928 
   3929 
   3930 void Assembler::uqrshrn2(const VRegister& vd,
   3931                         const VRegister& vn,
   3932                         int shift) {
   3933  VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   3934  NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
   3935 }
   3936 
   3937 
   3938 // Note:
   3939 // Below, a difference in case for the same letter indicates a
   3940 // negated bit.
   3941 // If b is 1, then B is 0.
   3942 uint32_t Assembler::FP32ToImm8(float imm) {
   3943  VIXL_ASSERT(IsImmFP32(imm));
   3944  // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
   3945  uint32_t bits = FloatToRawbits(imm);
   3946  // bit7: a000.0000
   3947  uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
   3948  // bit6: 0b00.0000
   3949  uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
   3950  // bit5_to_0: 00cd.efgh
   3951  uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
   3952 
   3953  return bit7 | bit6 | bit5_to_0;
   3954 }
   3955 
   3956 
   3957 Instr Assembler::ImmFP32(float imm) {
   3958  return FP32ToImm8(imm) << ImmFP_offset;
   3959 }
   3960 
   3961 
   3962 uint32_t Assembler::FP64ToImm8(double imm) {
   3963  VIXL_ASSERT(IsImmFP64(imm));
   3964  // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   3965  //       0000.0000.0000.0000.0000.0000.0000.0000
   3966  uint64_t bits = DoubleToRawbits(imm);
   3967  // bit7: a000.0000
   3968  uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
   3969  // bit6: 0b00.0000
   3970  uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
   3971  // bit5_to_0: 00cd.efgh
   3972  uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
   3973 
   3974  return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
   3975 }
   3976 
   3977 
   3978 Instr Assembler::ImmFP64(double imm) {
   3979  return FP64ToImm8(imm) << ImmFP_offset;
   3980 }
   3981 
   3982 
   3983 // Code generation helpers.
   3984 void Assembler::MoveWide(const Register& rd,
   3985                         uint64_t imm,
   3986                         int shift,
   3987                         MoveWideImmediateOp mov_op) {
   3988  // Ignore the top 32 bits of an immediate if we're moving to a W register.
   3989  if (rd.Is32Bits()) {
   3990    // Check that the top 32 bits are zero (a positive 32-bit number) or top
   3991    // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
   3992    VIXL_ASSERT(((imm >> kWRegSize) == 0) ||
   3993                ((imm >> (kWRegSize - 1)) == 0x1ffffffff));
   3994    imm &= kWRegMask;
   3995  }
   3996 
   3997  if (shift >= 0) {
   3998    // Explicit shift specified.
   3999    VIXL_ASSERT((shift == 0) || (shift == 16) ||
   4000                (shift == 32) || (shift == 48));
   4001    VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
   4002    shift /= 16;
   4003  } else {
   4004    // Calculate a new immediate and shift combination to encode the immediate
   4005    // argument.
   4006    shift = 0;
   4007    if ((imm & 0xffffffffffff0000) == 0) {
   4008      // Nothing to do.
   4009    } else if ((imm & 0xffffffff0000ffff) == 0) {
   4010      imm >>= 16;
   4011      shift = 1;
   4012    } else if ((imm & 0xffff0000ffffffff) == 0) {
   4013      VIXL_ASSERT(rd.Is64Bits());
   4014      imm >>= 32;
   4015      shift = 2;
   4016    } else if ((imm & 0x0000ffffffffffff) == 0) {
   4017      VIXL_ASSERT(rd.Is64Bits());
   4018      imm >>= 48;
   4019      shift = 3;
   4020    }
   4021  }
   4022 
   4023  VIXL_ASSERT(IsUint16(imm));
   4024 
   4025  Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
   4026       Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
   4027 }
   4028 
   4029 
   4030 void Assembler::AddSub(const Register& rd,
   4031                       const Register& rn,
   4032                       const Operand& operand,
   4033                       FlagsUpdate S,
   4034                       AddSubOp op) {
   4035  VIXL_ASSERT(rd.size() == rn.size());
   4036  if (operand.IsImmediate()) {
   4037    int64_t immediate = operand.immediate();
   4038    VIXL_ASSERT(IsImmAddSub(immediate));
   4039    Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   4040    Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
   4041         ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
   4042  } else if (operand.IsShiftedRegister()) {
   4043    VIXL_ASSERT(operand.reg().size() == rd.size());
   4044    VIXL_ASSERT(operand.shift() != ROR);
   4045 
   4046    // For instructions of the form:
   4047    //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
   4048    //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
   4049    //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
   4050    //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
   4051    // or their 64-bit register equivalents, convert the operand from shifted to
   4052    // extended register mode, and emit an add/sub extended instruction.
   4053    if (rn.IsSP() || rd.IsSP()) {
   4054      VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
   4055      DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
   4056                               AddSubExtendedFixed | static_cast<Instr>(op));
   4057    } else {
   4058      DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | static_cast<Instr>(op));
   4059    }
   4060  } else {
   4061    VIXL_ASSERT(operand.IsExtendedRegister());
   4062    DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | static_cast<Instr>(op));
   4063  }
   4064 }
   4065 
   4066 
   4067 void Assembler::AddSubWithCarry(const Register& rd,
   4068                                const Register& rn,
   4069                                const Operand& operand,
   4070                                FlagsUpdate S,
   4071                                AddSubWithCarryOp op) {
   4072  VIXL_ASSERT(rd.size() == rn.size());
   4073  VIXL_ASSERT(rd.size() == operand.reg().size());
   4074  VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
   4075  Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
   4076 }
   4077 
   4078 
   4079 void Assembler::hlt(int code) {
   4080  VIXL_ASSERT(IsUint16(code));
   4081  Emit(HLT | ImmException(code));
   4082 }
   4083 
   4084 
   4085 void Assembler::brk(int code) {
   4086  VIXL_ASSERT(IsUint16(code));
   4087  Emit(BRK | ImmException(code));
   4088 }
   4089 
   4090 
   4091 void Assembler::svc(int code) {
   4092  Emit(SVC | ImmException(code));
   4093 }
   4094 
   4095 
   4096 void Assembler::ConditionalCompare(const Register& rn,
   4097                                   const Operand& operand,
   4098                                   StatusFlags nzcv,
   4099                                   Condition cond,
   4100                                   ConditionalCompareOp op) {
   4101  Instr ccmpop;
   4102  if (operand.IsImmediate()) {
   4103    int64_t immediate = operand.immediate();
   4104    VIXL_ASSERT(IsImmConditionalCompare(immediate));
   4105    ccmpop = ConditionalCompareImmediateFixed | static_cast<Instr>(op) |
   4106        ImmCondCmp(static_cast<unsigned>(immediate));
   4107  } else {
   4108    VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
   4109    ccmpop = ConditionalCompareRegisterFixed | static_cast<Instr>(op) | Rm(operand.reg());
   4110  }
   4111  Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
   4112 }
   4113 
   4114 
   4115 void Assembler::DataProcessing1Source(const Register& rd,
   4116                                      const Register& rn,
   4117                                      DataProcessing1SourceOp op) {
   4118  VIXL_ASSERT(rd.size() == rn.size());
   4119  Emit(SF(rn) | op | Rn(rn) | Rd(rd));
   4120 }
   4121 
   4122 
   4123 void Assembler::FPDataProcessing1Source(const VRegister& vd,
   4124                                        const VRegister& vn,
   4125                                        FPDataProcessing1SourceOp op) {
   4126  VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   4127  Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
   4128 }
   4129 
   4130 
   4131 void Assembler::FPDataProcessing3Source(const VRegister& vd,
   4132                                        const VRegister& vn,
   4133                                        const VRegister& vm,
   4134                                        const VRegister& va,
   4135                                        FPDataProcessing3SourceOp op) {
   4136  VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   4137  VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
   4138  Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
   4139 }
   4140 
   4141 
   4142 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
   4143                                        const int imm8,
   4144                                        const int left_shift,
   4145                                        NEONModifiedImmediateOp op) {
   4146  VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() ||
   4147              vd.Is2S() || vd.Is4S());
   4148  VIXL_ASSERT((left_shift == 0) || (left_shift == 8) ||
   4149              (left_shift == 16) || (left_shift == 24));
   4150  VIXL_ASSERT(IsUint8(imm8));
   4151 
   4152  int cmode_1, cmode_2, cmode_3;
   4153  if (vd.Is8B() || vd.Is16B()) {
   4154    VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
   4155    cmode_1 = 1;
   4156    cmode_2 = 1;
   4157    cmode_3 = 1;
   4158  } else {
   4159    cmode_1 = (left_shift >> 3) & 1;
   4160    cmode_2 = left_shift >> 4;
   4161    cmode_3 = 0;
   4162    if (vd.Is4H() || vd.Is8H()) {
   4163      VIXL_ASSERT((left_shift == 0) || (left_shift == 8));
   4164      cmode_3 = 1;
   4165    }
   4166  }
   4167  int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
   4168 
   4169  int q = vd.IsQ() ? NEON_Q : 0;
   4170 
   4171  Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
   4172 }
   4173 
   4174 
   4175 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
   4176                                        const int imm8,
   4177                                        const int shift_amount,
   4178                                        NEONModifiedImmediateOp op) {
   4179  VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   4180  VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
   4181  VIXL_ASSERT(IsUint8(imm8));
   4182 
   4183  int cmode_0 = (shift_amount >> 4) & 1;
   4184  int cmode = 0xc | cmode_0;
   4185 
   4186  int q = vd.IsQ() ? NEON_Q : 0;
   4187 
   4188  Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
   4189 }
   4190 
   4191 
   4192 void Assembler::EmitShift(const Register& rd,
   4193                          const Register& rn,
   4194                          Shift shift,
   4195                          unsigned shift_amount) {
   4196  switch (shift) {
   4197    case LSL:
   4198      lsl(rd, rn, shift_amount);
   4199      break;
   4200    case LSR:
   4201      lsr(rd, rn, shift_amount);
   4202      break;
   4203    case ASR:
   4204      asr(rd, rn, shift_amount);
   4205      break;
   4206    case ROR:
   4207      ror(rd, rn, shift_amount);
   4208      break;
   4209    default:
   4210      VIXL_UNREACHABLE();
   4211  }
   4212 }
   4213 
   4214 
   4215 void Assembler::EmitExtendShift(const Register& rd,
   4216                                const Register& rn,
   4217                                Extend extend,
   4218                                unsigned left_shift) {
   4219  VIXL_ASSERT(rd.size() >= rn.size());
   4220  unsigned reg_size = rd.size();
   4221  // Use the correct size of register.
   4222  Register rn_ = Register(rn.code(), rd.size());
   4223  // Bits extracted are high_bit:0.
   4224  unsigned high_bit = (8 << (extend & 0x3)) - 1;
   4225  // Number of bits left in the result that are not introduced by the shift.
   4226  unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
   4227 
   4228  if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
   4229    switch (extend) {
   4230      case UXTB:
   4231      case UXTH:
   4232      case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
   4233      case SXTB:
   4234      case SXTH:
   4235      case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
   4236      case UXTX:
   4237      case SXTX: {
   4238        VIXL_ASSERT(rn.size() == kXRegSize);
   4239        // Nothing to extend. Just shift.
   4240        lsl(rd, rn_, left_shift);
   4241        break;
   4242      }
   4243      default: VIXL_UNREACHABLE();
   4244    }
   4245  } else {
   4246    // No need to extend as the extended bits would be shifted away.
   4247    lsl(rd, rn_, left_shift);
   4248  }
   4249 }
   4250 
   4251 
   4252 void Assembler::DataProcExtendedRegister(const Register& rd,
   4253                                         const Register& rn,
   4254                                         const Operand& operand,
   4255                                         FlagsUpdate S,
   4256                                         Instr op) {
   4257  Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   4258  Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
   4259       ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
   4260       dest_reg | RnSP(rn));
   4261 }
   4262 
   4263 
   4264 Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
   4265                                     unsigned access_size,
   4266                                     LoadStoreScalingOption option) {
   4267  Instr base = RnSP(addr.base());
   4268  int64_t offset = addr.offset();
   4269 
   4270  if (addr.IsImmediateOffset()) {
   4271    bool prefer_unscaled = (option == PreferUnscaledOffset) ||
   4272                           (option == RequireUnscaledOffset);
   4273    if (prefer_unscaled && IsImmLSUnscaled(offset)) {
   4274      // Use the unscaled addressing mode.
   4275      return base | LoadStoreUnscaledOffsetFixed |
   4276          ImmLS(static_cast<int>(offset));
   4277    }
   4278 
   4279    if ((option != RequireUnscaledOffset) &&
   4280        IsImmLSScaled(offset, access_size)) {
   4281      // Use the scaled addressing mode.
   4282      return base | LoadStoreUnsignedOffsetFixed |
   4283          ImmLSUnsigned(static_cast<int>(offset) >> access_size);
   4284    }
   4285 
   4286    if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
   4287      // Use the unscaled addressing mode.
   4288      return base | LoadStoreUnscaledOffsetFixed |
   4289          ImmLS(static_cast<int>(offset));
   4290    }
   4291  }
   4292 
   4293  // All remaining addressing modes are register-offset, pre-indexed or
   4294  // post-indexed modes.
   4295  VIXL_ASSERT((option != RequireUnscaledOffset) &&
   4296              (option != RequireScaledOffset));
   4297 
   4298  if (addr.IsRegisterOffset()) {
   4299    Extend ext = addr.extend();
   4300    Shift shift = addr.shift();
   4301    unsigned shift_amount = addr.shift_amount();
   4302 
   4303    // LSL is encoded in the option field as UXTX.
   4304    if (shift == LSL) {
   4305      ext = UXTX;
   4306    }
   4307 
   4308    // Shifts are encoded in one bit, indicating a left shift by the memory
   4309    // access size.
   4310    VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size));
   4311    return base | LoadStoreRegisterOffsetFixed | Rm(addr.regoffset()) |
   4312        ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
   4313  }
   4314 
   4315  if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
   4316    return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset));
   4317  }
   4318 
   4319  if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
   4320    return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset));
   4321  }
   4322 
   4323  // If this point is reached, the MemOperand (addr) cannot be encoded.
   4324  VIXL_UNREACHABLE();
   4325  return 0;
   4326 }
   4327 
   4328 
   4329 void Assembler::LoadStore(const CPURegister& rt,
   4330                          const MemOperand& addr,
   4331                          LoadStoreOp op,
   4332                          LoadStoreScalingOption option) {
   4333  Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option));
   4334 }
   4335 
   4336 
   4337 void Assembler::Prefetch(PrefetchOperation op,
   4338                         const MemOperand& addr,
   4339                         LoadStoreScalingOption option) {
   4340  VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
   4341 
   4342  Instr prfop = ImmPrefetchOperation(op);
   4343  Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
   4344 }
   4345 
   4346 
   4347 bool Assembler::IsImmAddSub(int64_t immediate) {
   4348  return IsUint12(immediate) ||
   4349         (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0));
   4350 }
   4351 
   4352 
   4353 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
   4354  return IsUint5(immediate);
   4355 }
   4356 
   4357 
   4358 bool Assembler::IsImmFP32(float imm) {
   4359  // Valid values will have the form:
   4360  // aBbb.bbbc.defg.h000.0000.0000.0000.0000
   4361  uint32_t bits = FloatToRawbits(imm);
   4362  // bits[19..0] are cleared.
   4363  if ((bits & 0x7ffff) != 0) {
   4364    return false;
   4365  }
   4366 
   4367  // bits[29..25] are all set or all cleared.
   4368  uint32_t b_pattern = (bits >> 16) & 0x3e00;
   4369  if (b_pattern != 0 && b_pattern != 0x3e00) {
   4370    return false;
   4371  }
   4372 
   4373  // bit[30] and bit[29] are opposite.
   4374  if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
   4375    return false;
   4376  }
   4377 
   4378  return true;
   4379 }
   4380 
   4381 
   4382 bool Assembler::IsImmFP64(double imm) {
   4383  // Valid values will have the form:
   4384  // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   4385  // 0000.0000.0000.0000.0000.0000.0000.0000
   4386  uint64_t bits = DoubleToRawbits(imm);
   4387  // bits[47..0] are cleared.
   4388  if ((bits & 0x0000ffffffffffff) != 0) {
   4389    return false;
   4390  }
   4391 
   4392  // bits[61..54] are all set or all cleared.
   4393  uint32_t b_pattern = (bits >> 48) & 0x3fc0;
   4394  if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
   4395    return false;
   4396  }
   4397 
   4398  // bit[62] and bit[61] are opposite.
   4399  if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
   4400    return false;
   4401  }
   4402 
   4403  return true;
   4404 }
   4405 
   4406 
   4407 bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
   4408  VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
   4409  bool offset_is_size_multiple =
   4410      (((offset >> access_size) << access_size) == offset);
   4411  return offset_is_size_multiple && IsInt7(offset >> access_size);
   4412 }
   4413 
   4414 
   4415 bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
   4416  VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
   4417  bool offset_is_size_multiple =
   4418      (((offset >> access_size) << access_size) == offset);
   4419  return offset_is_size_multiple && IsUint12(offset >> access_size);
   4420 }
   4421 
   4422 
   4423 bool Assembler::IsImmLSUnscaled(int64_t offset) {
   4424  return IsInt9(offset);
   4425 }
   4426 
   4427 
   4428 // The movn instruction can generate immediates containing an arbitrary 16-bit
   4429 // value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
   4430 bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
   4431  return IsImmMovz(~imm, reg_size);
   4432 }
   4433 
   4434 
   4435 // The movz instruction can generate immediates containing an arbitrary 16-bit
   4436 // value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
   4437 bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
   4438  VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
   4439  return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
   4440 }
   4441 
   4442 
   4443 // Test if a given value can be encoded in the immediate field of a logical
   4444 // instruction.
   4445 // If it can be encoded, the function returns true, and values pointed to by n,
   4446 // imm_s and imm_r are updated with immediates encoded in the format required
   4447 // by the corresponding fields in the logical instruction.
   4448 // If it can not be encoded, the function returns false, and the values pointed
   4449 // to by n, imm_s and imm_r are undefined.
   4450 bool Assembler::IsImmLogical(uint64_t value,
   4451                             unsigned width,
   4452                             unsigned* n,
   4453                             unsigned* imm_s,
   4454                             unsigned* imm_r) {
   4455  VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
   4456 
   4457  bool negate = false;
   4458 
   4459  // Logical immediates are encoded using parameters n, imm_s and imm_r using
   4460  // the following table:
   4461  //
   4462  //    N   imms    immr    size        S             R
   4463  //    1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
   4464  //    0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
   4465  //    0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
   4466  //    0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
   4467  //    0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
   4468  //    0  11110s  xxxxxr     2    UInt(s)       UInt(r)
   4469  // (s bits must not be all set)
   4470  //
   4471  // A pattern is constructed of size bits, where the least significant S+1 bits
   4472  // are set. The pattern is rotated right by R, and repeated across a 32 or
   4473  // 64-bit value, depending on destination register width.
   4474  //
   4475  // Put another way: the basic format of a logical immediate is a single
   4476  // contiguous stretch of 1 bits, repeated across the whole word at intervals
   4477  // given by a power of 2. To identify them quickly, we first locate the
   4478  // lowest stretch of 1 bits, then the next 1 bit above that; that combination
   4479  // is different for every logical immediate, so it gives us all the
   4480  // information we need to identify the only logical immediate that our input
   4481  // could be, and then we simply check if that's the value we actually have.
   4482  //
   4483  // (The rotation parameter does give the possibility of the stretch of 1 bits
   4484  // going 'round the end' of the word. To deal with that, we observe that in
   4485  // any situation where that happens the bitwise NOT of the value is also a
   4486  // valid logical immediate. So we simply invert the input whenever its low bit
   4487  // is set, and then we know that the rotated case can't arise.)
   4488 
   4489  if (value & 1) {
   4490    // If the low bit is 1, negate the value, and set a flag to remember that we
   4491    // did (so that we can adjust the return values appropriately).
   4492    negate = true;
   4493    value = ~value;
   4494  }
   4495 
   4496  if (width == kWRegSize) {
   4497    // To handle 32-bit logical immediates, the very easiest thing is to repeat
   4498    // the input value twice to make a 64-bit word. The correct encoding of that
   4499    // as a logical immediate will also be the correct encoding of the 32-bit
   4500    // value.
   4501 
   4502    // Avoid making the assumption that the most-significant 32 bits are zero by
   4503    // shifting the value left and duplicating it.
   4504    value <<= kWRegSize;
   4505    value |= value >> kWRegSize;
   4506  }
   4507 
   4508  // The basic analysis idea: imagine our input word looks like this.
   4509  //
   4510  //    0011111000111110001111100011111000111110001111100011111000111110
   4511  //                                                          c  b    a
   4512  //                                                          |<--d-->|
   4513  //
   4514  // We find the lowest set bit (as an actual power-of-2 value, not its index)
   4515  // and call it a. Then we add a to our original number, which wipes out the
   4516  // bottommost stretch of set bits and replaces it with a 1 carried into the
   4517  // next zero bit. Then we look for the new lowest set bit, which is in
   4518  // position b, and subtract it, so now our number is just like the original
   4519  // but with the lowest stretch of set bits completely gone. Now we find the
   4520  // lowest set bit again, which is position c in the diagram above. Then we'll
   4521  // measure the distance d between bit positions a and c (using CLZ), and that
   4522  // tells us that the only valid logical immediate that could possibly be equal
   4523  // to this number is the one in which a stretch of bits running from a to just
   4524  // below b is replicated every d bits.
   4525  uint64_t a = LowestSetBit(value);
   4526  uint64_t value_plus_a = value + a;
   4527  uint64_t b = LowestSetBit(value_plus_a);
   4528  uint64_t value_plus_a_minus_b = value_plus_a - b;
   4529  uint64_t c = LowestSetBit(value_plus_a_minus_b);
   4530 
   4531  int d, clz_a, out_n;
   4532  uint64_t mask;
   4533 
   4534  if (c != 0) {
   4535    // The general case, in which there is more than one stretch of set bits.
   4536    // Compute the repeat distance d, and set up a bitmask covering the basic
   4537    // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
   4538    // of these cases the N bit of the output will be zero.
   4539    clz_a = CountLeadingZeros(a, kXRegSize);
   4540    int clz_c = CountLeadingZeros(c, kXRegSize);
   4541    d = clz_a - clz_c;
   4542    mask = ((UINT64_C(1) << d) - 1);
   4543    out_n = 0;
   4544  } else {
   4545    // Handle degenerate cases.
   4546    //
   4547    // If any of those 'find lowest set bit' operations didn't find a set bit at
   4548    // all, then the word will have been zero thereafter, so in particular the
   4549    // last lowest_set_bit operation will have returned zero. So we can test for
   4550    // all the special case conditions in one go by seeing if c is zero.
   4551    if (a == 0) {
   4552      // The input was zero (or all 1 bits, which will come to here too after we
   4553      // inverted it at the start of the function), for which we just return
   4554      // false.
   4555      return false;
   4556    } else {
   4557      // Otherwise, if c was zero but a was not, then there's just one stretch
   4558      // of set bits in our word, meaning that we have the trivial case of
   4559      // d == 64 and only one 'repetition'. Set up all the same variables as in
   4560      // the general case above, and set the N bit in the output.
   4561      clz_a = CountLeadingZeros(a, kXRegSize);
   4562      d = 64;
   4563      mask = ~UINT64_C(0);
   4564      out_n = 1;
   4565    }
   4566  }
   4567 
   4568  // If the repeat period d is not a power of two, it can't be encoded.
   4569  if (!IsPowerOf2(d)) {
   4570    return false;
   4571  }
   4572 
   4573  if (((b - a) & ~mask) != 0) {
   4574    // If the bit stretch (b - a) does not fit within the mask derived from the
   4575    // repeat period, then fail.
   4576    return false;
   4577  }
   4578 
   4579  // The only possible option is b - a repeated every d bits. Now we're going to
   4580  // actually construct the valid logical immediate derived from that
   4581  // specification, and see if it equals our original input.
   4582  //
   4583  // To repeat a value every d bits, we multiply it by a number of the form
   4584  // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
   4585  // be derived using a table lookup on CLZ(d).
   4586  static const uint64_t multipliers[] = {
   4587    0x0000000000000001UL,
   4588    0x0000000100000001UL,
   4589    0x0001000100010001UL,
   4590    0x0101010101010101UL,
   4591    0x1111111111111111UL,
   4592    0x5555555555555555UL,
   4593  };
   4594  uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57];
   4595  uint64_t candidate = (b - a) * multiplier;
   4596 
   4597  if (value != candidate) {
   4598    // The candidate pattern doesn't match our input value, so fail.
   4599    return false;
   4600  }
   4601 
   4602  // We have a match! This is a valid logical immediate, so now we have to
   4603  // construct the bits and pieces of the instruction encoding that generates
   4604  // it.
   4605 
   4606  // Count the set bits in our basic stretch. The special case of clz(0) == -1
   4607  // makes the answer come out right for stretches that reach the very top of
   4608  // the word (e.g. numbers like 0xffffc00000000000).
   4609  int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize);
   4610  int s = clz_a - clz_b;
   4611 
   4612  // Decide how many bits to rotate right by, to put the low bit of that basic
   4613  // stretch in position a.
   4614  int r;
   4615  if (negate) {
   4616    // If we inverted the input right at the start of this function, here's
   4617    // where we compensate: the number of set bits becomes the number of clear
   4618    // bits, and the rotation count is based on position b rather than position
   4619    // a (since b is the location of the 'lowest' 1 bit after inversion).
   4620    s = d - s;
   4621    r = (clz_b + 1) & (d - 1);
   4622  } else {
   4623    r = (clz_a + 1) & (d - 1);
   4624  }
   4625 
   4626  // Now we're done, except for having to encode the S output in such a way that
   4627  // it gives both the number of set bits and the length of the repeated
   4628  // segment. The s field is encoded like this:
   4629  //
   4630  //     imms    size        S
   4631  //    ssssss    64    UInt(ssssss)
   4632  //    0sssss    32    UInt(sssss)
   4633  //    10ssss    16    UInt(ssss)
   4634  //    110sss     8    UInt(sss)
   4635  //    1110ss     4    UInt(ss)
   4636  //    11110s     2    UInt(s)
   4637  //
   4638  // So we 'or' (-d << 1) with our computed s to form imms.
   4639  if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) {
   4640    *n = out_n;
   4641    *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
   4642    *imm_r = r;
   4643  }
   4644 
   4645  return true;
   4646 }
   4647 
   4648 
   4649 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
   4650  VIXL_ASSERT(rt.IsValid());
   4651  if (rt.IsRegister()) {
   4652    return rt.Is64Bits() ? LDR_x : LDR_w;
   4653  } else {
   4654    VIXL_ASSERT(rt.IsVRegister());
   4655    switch (rt.SizeInBits()) {
   4656      case kBRegSize: return LDR_b;
   4657      case kHRegSize: return LDR_h;
   4658      case kSRegSize: return LDR_s;
   4659      case kDRegSize: return LDR_d;
   4660      default:
   4661        VIXL_ASSERT(rt.IsQ());
   4662        return LDR_q;
   4663    }
   4664  }
   4665 }
   4666 
   4667 
   4668 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
   4669  VIXL_ASSERT(rt.IsValid());
   4670  if (rt.IsRegister()) {
   4671    return rt.Is64Bits() ? STR_x : STR_w;
   4672  } else {
   4673    VIXL_ASSERT(rt.IsVRegister());
   4674    switch (rt.SizeInBits()) {
   4675      case kBRegSize: return STR_b;
   4676      case kHRegSize: return STR_h;
   4677      case kSRegSize: return STR_s;
   4678      case kDRegSize: return STR_d;
   4679      default:
   4680        VIXL_ASSERT(rt.IsQ());
   4681        return STR_q;
   4682    }
   4683  }
   4684 }
   4685 
   4686 
   4687 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
   4688    const CPURegister& rt2) {
   4689  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   4690  USE(rt2);
   4691  if (rt.IsRegister()) {
   4692    return rt.Is64Bits() ? STP_x : STP_w;
   4693  } else {
   4694    VIXL_ASSERT(rt.IsVRegister());
   4695    switch (rt.SizeInBytes()) {
   4696      case kSRegSizeInBytes: return STP_s;
   4697      case kDRegSizeInBytes: return STP_d;
   4698      default:
   4699        VIXL_ASSERT(rt.IsQ());
   4700        return STP_q;
   4701    }
   4702  }
   4703 }
   4704 
   4705 
   4706 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
   4707                                         const CPURegister& rt2) {
   4708  VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w);
   4709  return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
   4710                                      LoadStorePairLBit);
   4711 }
   4712 
   4713 
   4714 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
   4715    const CPURegister& rt, const CPURegister& rt2) {
   4716  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   4717  USE(rt2);
   4718  if (rt.IsRegister()) {
   4719    return rt.Is64Bits() ? STNP_x : STNP_w;
   4720  } else {
   4721    VIXL_ASSERT(rt.IsVRegister());
   4722    switch (rt.SizeInBytes()) {
   4723      case kSRegSizeInBytes: return STNP_s;
   4724      case kDRegSizeInBytes: return STNP_d;
   4725      default:
   4726        VIXL_ASSERT(rt.IsQ());
   4727        return STNP_q;
   4728    }
   4729  }
   4730 }
   4731 
   4732 
   4733 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
   4734    const CPURegister& rt, const CPURegister& rt2) {
   4735  VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w);
   4736  return static_cast<LoadStorePairNonTemporalOp>(
   4737      StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit);
   4738 }
   4739 
   4740 
   4741 LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
   4742  if (rt.IsRegister()) {
   4743    return rt.IsX() ? LDR_x_lit : LDR_w_lit;
   4744  } else {
   4745    VIXL_ASSERT(rt.IsVRegister());
   4746    switch (rt.SizeInBytes()) {
   4747      case kSRegSizeInBytes: return LDR_s_lit;
   4748      case kDRegSizeInBytes: return LDR_d_lit;
   4749      default:
   4750        VIXL_ASSERT(rt.IsQ());
   4751        return LDR_q_lit;
   4752    }
   4753  }
   4754 }
   4755 
   4756 
   4757 bool Assembler::CPUHas(const CPURegister& rt) const {
   4758  // Core registers are available without any particular CPU features.
   4759  if (rt.IsRegister()) return true;
   4760  VIXL_ASSERT(rt.IsVRegister());
   4761  // The architecture does not allow FP and NEON to be implemented separately,
   4762  // but we can crudely categorise them based on register size, since FP only
   4763  // uses D, S and (occasionally) H registers.
   4764  if (rt.IsH() || rt.IsS() || rt.IsD()) {
   4765    return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON);
   4766  }
   4767  VIXL_ASSERT(rt.IsB() || rt.IsQ());
   4768  return CPUHas(CPUFeatures::kNEON);
   4769 }
   4770 
   4771 
   4772 bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const {
   4773  // This is currently only used for loads and stores, where rt and rt2 must
   4774  // have the same size and type. We could extend this to cover other cases if
   4775  // necessary, but for now we can avoid checking both registers.
   4776  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   4777  USE(rt2);
   4778  return CPUHas(rt);
   4779 }
   4780 
   4781 
   4782 bool Assembler::CPUHas(SystemRegister sysreg) const {
   4783  switch (sysreg) {
   4784    case RNDR:
   4785    case RNDRRS:
   4786      return CPUHas(CPUFeatures::kRNG);
   4787    case FPCR:
   4788    case NZCV:
   4789    case DCZID_EL0:
   4790      break;
   4791  }
   4792  return true;
   4793 }
   4794 }  // namespace vixl