tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Operands-vixl.h (35696B)


      1 // Copyright 2016, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #ifndef VIXL_A64_OPERANDS_A64_H_
     28 #define VIXL_A64_OPERANDS_A64_H_
     29 
     30 #include "jit/arm64/vixl/Instructions-vixl.h"
     31 #include "jit/arm64/vixl/Registers-vixl.h"
     32 
     33 #include "jit/shared/Assembler-shared.h"
     34 
     35 namespace vixl {
     36 
     37 // Lists of registers.
     38 class CPURegList {
     39 public:
     40  explicit CPURegList(CPURegister reg1,
     41                      CPURegister reg2 = NoCPUReg,
     42                      CPURegister reg3 = NoCPUReg,
     43                      CPURegister reg4 = NoCPUReg)
     44      : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
     45        size_(reg1.GetSizeInBits()),
     46        type_(reg1.GetType()) {
     47    VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
     48    VIXL_ASSERT(IsValid());
     49  }
     50 
     51  CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
     52      : list_(list), size_(size), type_(type) {
     53    VIXL_ASSERT(IsValid());
     54  }
     55 
     56  CPURegList(CPURegister::RegisterType type,
     57             unsigned size,
     58             unsigned first_reg,
     59             unsigned last_reg)
     60      : size_(size), type_(type) {
     61    VIXL_ASSERT(
     62        ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
     63        ((type == CPURegister::kVRegister) &&
     64         (last_reg < kNumberOfVRegisters)));
     65    VIXL_ASSERT(last_reg >= first_reg);
     66    list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
     67    list_ &= ~((UINT64_C(1) << first_reg) - 1);
     68    VIXL_ASSERT(IsValid());
     69  }
     70 
     71  // Construct an empty CPURegList with the specified size and type. If `size`
     72  // is CPURegister::kUnknownSize and the register type requires a size, a valid
     73  // but unspecified default will be picked.
     74  static CPURegList Empty(CPURegister::RegisterType type,
     75                          unsigned size = CPURegister::kUnknownSize) {
     76    return CPURegList(type, GetDefaultSizeFor(type, size), 0);
     77  }
     78 
     79  // Construct a CPURegList with all possible registers with the specified size
     80  // and type. If `size` is CPURegister::kUnknownSize and the register type
     81  // requires a size, a valid but unspecified default will be picked.
     82  static CPURegList All(CPURegister::RegisterType type,
     83                        unsigned size = CPURegister::kUnknownSize) {
     84    unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1);
     85    RegList list = (static_cast<RegList>(1) << number_of_registers) - 1;
     86    if (type == CPURegister::kRegister) {
     87      // GetMaxCodeFor(kRegister) ignores SP, so explicitly include it.
     88      list |= (static_cast<RegList>(1) << kSPRegInternalCode);
     89    }
     90    return CPURegList(type, GetDefaultSizeFor(type, size), list);
     91  }
     92 
     93  CPURegister::RegisterType GetType() const {
     94    VIXL_ASSERT(IsValid());
     95    return type_;
     96  }
     97  CPURegister::RegisterType type() const {
     98    return GetType();
     99  }
    100 
    101  CPURegister::RegisterBank GetBank() const {
    102    return CPURegister::GetBankFor(GetType());
    103  }
    104 
    105  // Combine another CPURegList into this one. Registers that already exist in
    106  // this list are left unchanged. The type and size of the registers in the
    107  // 'other' list must match those in this list.
    108  void Combine(const CPURegList& other) {
    109    VIXL_ASSERT(IsValid());
    110    VIXL_ASSERT(other.GetType() == type_);
    111    VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
    112    list_ |= other.GetList();
    113  }
    114 
    115  // Remove every register in the other CPURegList from this one. Registers that
    116  // do not exist in this list are ignored. The type and size of the registers
    117  // in the 'other' list must match those in this list.
    118  void Remove(const CPURegList& other) {
    119    VIXL_ASSERT(IsValid());
    120    VIXL_ASSERT(other.GetType() == type_);
    121    VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
    122    list_ &= ~other.GetList();
    123  }
    124 
    125  // Variants of Combine and Remove which take a single register.
    126  void Combine(const CPURegister& other) {
    127    VIXL_ASSERT(other.GetType() == type_);
    128    VIXL_ASSERT(other.GetSizeInBits() == size_);
    129    Combine(other.GetCode());
    130  }
    131 
    132  void Remove(const CPURegister& other) {
    133    VIXL_ASSERT(other.GetType() == type_);
    134    VIXL_ASSERT(other.GetSizeInBits() == size_);
    135    Remove(other.GetCode());
    136  }
    137 
    138  // Variants of Combine and Remove which take a single register by its code;
    139  // the type and size of the register is inferred from this list.
    140  void Combine(int code) {
    141    VIXL_ASSERT(IsValid());
    142    VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
    143    list_ |= (UINT64_C(1) << code);
    144  }
    145 
    146  void Remove(int code) {
    147    VIXL_ASSERT(IsValid());
    148    VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
    149    list_ &= ~(UINT64_C(1) << code);
    150  }
    151 
    152  static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
    153    VIXL_ASSERT(list_1.type_ == list_2.type_);
    154    VIXL_ASSERT(list_1.size_ == list_2.size_);
    155    return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
    156  }
    157  static CPURegList Union(const CPURegList& list_1,
    158                          const CPURegList& list_2,
    159                          const CPURegList& list_3);
    160  static CPURegList Union(const CPURegList& list_1,
    161                          const CPURegList& list_2,
    162                          const CPURegList& list_3,
    163                          const CPURegList& list_4);
    164 
    165  static CPURegList Intersection(const CPURegList& list_1,
    166                                 const CPURegList& list_2) {
    167    VIXL_ASSERT(list_1.type_ == list_2.type_);
    168    VIXL_ASSERT(list_1.size_ == list_2.size_);
    169    return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
    170  }
    171  static CPURegList Intersection(const CPURegList& list_1,
    172                                 const CPURegList& list_2,
    173                                 const CPURegList& list_3);
    174  static CPURegList Intersection(const CPURegList& list_1,
    175                                 const CPURegList& list_2,
    176                                 const CPURegList& list_3,
    177                                 const CPURegList& list_4);
    178 
    179  bool Overlaps(const CPURegList& other) const {
    180    return (type_ == other.type_) && ((list_ & other.list_) != 0);
    181  }
    182 
    183  RegList GetList() const {
    184    VIXL_ASSERT(IsValid());
    185    return list_;
    186  }
    187  RegList list() const { return GetList(); }
    188 
    189  void SetList(RegList new_list) {
    190    VIXL_ASSERT(IsValid());
    191    list_ = new_list;
    192  }
    193  void set_list(RegList new_list) {
    194    return SetList(new_list);
    195  }
    196 
    197  // Remove all callee-saved registers from the list. This can be useful when
    198  // preparing registers for an AAPCS64 function call, for example.
    199  void RemoveCalleeSaved();
    200 
    201  // Find the register in this list that appears in `mask` with the lowest or
    202  // highest code, remove it from the list and return it as a CPURegister. If
    203  // the list is empty, leave it unchanged and return NoCPUReg.
    204  CPURegister PopLowestIndex(RegList mask = ~static_cast<RegList>(0));
    205  CPURegister PopHighestIndex(RegList mask = ~static_cast<RegList>(0));
    206 
    207  // AAPCS64 callee-saved registers.
    208  static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
    209  static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
    210 
    211  // AAPCS64 caller-saved registers. Note that this includes lr.
    212  // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
    213  // 64-bits being caller-saved.
    214  static CPURegList GetCallerSaved(unsigned size = kXRegSize);
    215  static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
    216 
    217  bool IsEmpty() const {
    218    VIXL_ASSERT(IsValid());
    219    return list_ == 0;
    220  }
    221 
    222  bool IncludesAliasOf(const CPURegister& other) const {
    223    VIXL_ASSERT(IsValid());
    224    return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode());
    225  }
    226 
    227  bool IncludesAliasOf(int code) const {
    228    VIXL_ASSERT(IsValid());
    229    return (((static_cast<RegList>(1) << code) & list_) != 0);
    230  }
    231 
    232  int GetCount() const {
    233    VIXL_ASSERT(IsValid());
    234    return CountSetBits(list_);
    235  }
    236  int Count() const { return GetCount(); }
    237 
    238  int GetRegisterSizeInBits() const {
    239    VIXL_ASSERT(IsValid());
    240    return size_;
    241  }
    242  int RegisterSizeInBits() const {
    243    return GetRegisterSizeInBits();
    244  }
    245 
    246  int GetRegisterSizeInBytes() const {
    247    int size_in_bits = GetRegisterSizeInBits();
    248    VIXL_ASSERT((size_in_bits % 8) == 0);
    249    return size_in_bits / 8;
    250  }
    251  int RegisterSizeInBytes() const {
    252    return GetRegisterSizeInBytes();
    253  }
    254 
    255  unsigned GetTotalSizeInBytes() const {
    256    VIXL_ASSERT(IsValid());
    257    return GetRegisterSizeInBytes() * GetCount();
    258  }
    259  unsigned TotalSizeInBytes() const {
    260    return GetTotalSizeInBytes();
    261  }
    262 
    263 private:
    264  // If `size` is CPURegister::kUnknownSize and the type requires a known size,
    265  // then return an arbitrary-but-valid size.
    266  //
    267  // Otherwise, the size is checked for validity and returned unchanged.
    268  static unsigned GetDefaultSizeFor(CPURegister::RegisterType type,
    269                                    unsigned size) {
    270    if (size == CPURegister::kUnknownSize) {
    271      if (type == CPURegister::kRegister) size = kXRegSize;
    272      if (type == CPURegister::kVRegister) size = kQRegSize;
    273      // All other types require kUnknownSize.
    274    }
    275    VIXL_ASSERT(CPURegister(0, size, type).IsValid());
    276    return size;
    277  }
    278 
    279  RegList list_;
    280  int size_;
    281  CPURegister::RegisterType type_;
    282 
    283  bool IsValid() const;
    284 };
    285 
    286 
    287 // AAPCS64 callee-saved registers.
    288 extern const CPURegList kCalleeSaved;
    289 extern const CPURegList kCalleeSavedV;
    290 
    291 
    292 // AAPCS64 caller-saved registers. Note that this includes lr.
    293 extern const CPURegList kCallerSaved;
    294 extern const CPURegList kCallerSavedV;
    295 
    296 class IntegerOperand;
    297 
    298 // Operand.
    299 class Operand {
    300 public:
    301  // #<immediate>
    302  // where <immediate> is int64_t.
    303  // This is allowed to be an implicit constructor because Operand is
    304  // a wrapper class that doesn't normally perform any type conversion.
    305  Operand(int64_t immediate);  // NOLINT(runtime/explicit)
    306 
    307  Operand(IntegerOperand immediate);  // NOLINT(runtime/explicit)
    308 
    309  // rm, {<shift> #<shift_amount>}
    310  // where <shift> is one of {LSL, LSR, ASR, ROR}.
    311  //       <shift_amount> is uint6_t.
    312  // This is allowed to be an implicit constructor because Operand is
    313  // a wrapper class that doesn't normally perform any type conversion.
    314  Operand(Register reg,
    315          Shift shift = LSL,
    316          unsigned shift_amount = 0);  // NOLINT(runtime/explicit)
    317 
    318  // rm, {<extend> {#<shift_amount>}}
    319  // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
    320  //       <shift_amount> is uint2_t.
    321  explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
    322 
    323  bool IsImmediate() const;
    324  bool IsPlainRegister() const;
    325  bool IsShiftedRegister() const;
    326  bool IsExtendedRegister() const;
    327  bool IsZero() const;
    328 
    329  // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
    330  // which helps in the encoding of instructions that use the stack pointer.
    331  Operand ToExtendedRegister() const;
    332 
    333  int64_t GetImmediate() const {
    334    VIXL_ASSERT(IsImmediate());
    335    return immediate_;
    336  }
    337  int64_t immediate() const {
    338    return GetImmediate();
    339  }
    340 
    341  int64_t GetEquivalentImmediate() const {
    342    return IsZero() ? 0 : GetImmediate();
    343  }
    344 
    345  Register GetRegister() const {
    346    VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
    347    return reg_;
    348  }
    349  Register reg() const { return GetRegister(); }
    350  Register GetBaseRegister() const { return GetRegister(); }
    351 
    352  CPURegister maybeReg() const {
    353    if (IsShiftedRegister() || IsExtendedRegister())
    354      return reg_;
    355    return NoCPUReg;
    356  }
    357 
    358  Shift GetShift() const {
    359    VIXL_ASSERT(IsShiftedRegister());
    360    return shift_;
    361  }
    362  Shift shift() const { return GetShift(); }
    363 
    364  Extend GetExtend() const {
    365    VIXL_ASSERT(IsExtendedRegister());
    366    return extend_;
    367  }
    368  Extend extend() const { return GetExtend(); }
    369 
    370  unsigned GetShiftAmount() const {
    371    VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
    372    return shift_amount_;
    373  }
    374  unsigned shift_amount() const {
    375    return GetShiftAmount();
    376  }
    377 
    378 private:
    379  int64_t immediate_;
    380  Register reg_;
    381  Shift shift_;
    382  Extend extend_;
    383  unsigned shift_amount_;
    384 };
    385 
    386 
    387 // MemOperand represents the addressing mode of a load or store instruction.
    388 // In assembly syntax, MemOperands are normally denoted by one or more elements
    389 // inside or around square brackets.
    390 class MemOperand {
    391 public:
    392  // Creates an invalid `MemOperand`.
    393  MemOperand();
    394  explicit MemOperand(Register base,
    395                      int64_t offset = 0,
    396                      AddrMode addrmode = Offset);
    397  MemOperand(Register base,
    398             Register regoffset,
    399             Shift shift = LSL,
    400             unsigned shift_amount = 0);
    401  MemOperand(Register base,
    402             Register regoffset,
    403             Extend extend,
    404             unsigned shift_amount = 0);
    405  MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
    406 
    407  // Adapter constructors using C++11 delegating.
    408  // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
    409  explicit MemOperand(js::jit::Address addr)
    410    : MemOperand(IsHiddenSP(addr.base) ? sp : Register(AsRegister(addr.base), 64),
    411                 (ptrdiff_t)addr.offset) {
    412  }
    413 
    414  const Register& GetBaseRegister() const { return base_; }
    415  const Register& base() const { return base_; }
    416 
    417  // If the MemOperand has a register offset, return it. (This also applies to
    418  // pre- and post-index modes.) Otherwise, return NoReg.
    419  const Register& GetRegisterOffset() const { return regoffset_; }
    420  const Register& regoffset() const { return regoffset_; }
    421 
    422  // If the MemOperand has an immediate offset, return it. (This also applies to
    423  // pre- and post-index modes.) Otherwise, return 0.
    424  int64_t GetOffset() const { return offset_; }
    425  int64_t offset() const { return offset_; }
    426 
    427  AddrMode GetAddrMode() const { return addrmode_; }
    428  AddrMode addrmode() const { return addrmode_; }
    429  Shift GetShift() const { return shift_; }
    430  Shift shift() const { return shift_; }
    431  Extend GetExtend() const { return extend_; }
    432  Extend extend() const { return extend_; }
    433 
    434  unsigned GetShiftAmount() const {
    435    // Extend modes can also encode a shift for some instructions.
    436    VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND));
    437    return shift_amount_;
    438  }
    439  unsigned shift_amount() const { return shift_amount_; }
    440 
    441  // True for MemOperands which represent something like [x0].
    442  // Currently, this will also return true for [x0, #0], because MemOperand has
    443  // no way to distinguish the two.
    444  bool IsPlainRegister() const;
    445 
    446  // True for MemOperands which represent something like [x0], or for compound
    447  // MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr]
    448  // or [x0, wzr, UXTW #3].
    449  bool IsEquivalentToPlainRegister() const;
    450 
    451  // True for immediate-offset (but not indexed) MemOperands.
    452  bool IsImmediateOffset() const;
    453  // True for register-offset (but not indexed) MemOperands.
    454  bool IsRegisterOffset() const;
    455  // True for immediate or register pre-indexed MemOperands.
    456  bool IsPreIndex() const;
    457  // True for immediate or register post-indexed MemOperands.
    458  bool IsPostIndex() const;
    459  // True for immediate pre-indexed MemOperands, [reg, #imm]!
    460  bool IsImmediatePreIndex() const;
    461  // True for immediate post-indexed MemOperands, [reg], #imm
    462  bool IsImmediatePostIndex() const;
    463 
    464  void AddOffset(int64_t offset);
    465 
    466  bool IsValid() const {
    467    return base_.IsValid() &&
    468           ((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
    469            (addrmode_ == PostIndex)) &&
    470           ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
    471           ((offset_ == 0) || !regoffset_.IsValid());
    472  }
    473 
    474  bool Equals(const MemOperand& other) const {
    475    return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
    476           (offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
    477           (shift_ == other.shift_) && (extend_ == other.extend_) &&
    478           (shift_amount_ == other.shift_amount_);
    479  }
    480 
    481 private:
    482  Register base_;
    483  Register regoffset_;
    484  int64_t offset_;
    485  AddrMode addrmode_;
    486  Shift shift_;
    487  Extend extend_;
    488  unsigned shift_amount_;
    489 };
    490 
    491 // SVE supports memory operands which don't make sense to the core ISA, such as
    492 // scatter-gather forms, in which either the base or offset registers are
    493 // vectors. This class exists to avoid complicating core-ISA code with
    494 // SVE-specific behaviour.
    495 //
    496 // Note that SVE does not support any pre- or post-index modes.
    497 class SVEMemOperand {
    498 public:
    499  // "vector-plus-immediate", like [z0.s, #21]
    500  explicit SVEMemOperand(ZRegister base, uint64_t offset = 0)
    501      : base_(base),
    502        regoffset_(NoReg),
    503        offset_(RawbitsToInt64(offset)),
    504        mod_(NO_SVE_OFFSET_MODIFIER),
    505        shift_amount_(0) {
    506    VIXL_ASSERT(IsVectorPlusImmediate());
    507    VIXL_ASSERT(IsValid());
    508  }
    509 
    510  // "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL]
    511  // The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL.
    512  //
    513  // Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and
    514  // `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar
    515  // instructions where xm defaults to xzr. However, users should not rely on
    516  // `SVEMemOperand(x0, 0)` being accepted in such cases.
    517  explicit SVEMemOperand(Register base,
    518                         uint64_t offset = 0,
    519                         SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER)
    520      : base_(base),
    521        regoffset_(NoReg),
    522        offset_(RawbitsToInt64(offset)),
    523        mod_(mod),
    524        shift_amount_(0) {
    525    VIXL_ASSERT(IsScalarPlusImmediate());
    526    VIXL_ASSERT(IsValid());
    527  }
    528 
    529  // "scalar-plus-scalar", like [x0, x1]
    530  // "scalar-plus-vector", like [x0, z1.d]
    531  SVEMemOperand(Register base, CPURegister offset)
    532      : base_(base),
    533        regoffset_(offset),
    534        offset_(0),
    535        mod_(NO_SVE_OFFSET_MODIFIER),
    536        shift_amount_(0) {
    537    VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector());
    538    if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar());
    539    VIXL_ASSERT(IsValid());
    540  }
    541 
    542  // "scalar-plus-vector", like [x0, z1.d, UXTW]
    543  // The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a
    544  // corresponding `Extend` value.
    545  template <typename M>
    546  SVEMemOperand(Register base, ZRegister offset, M mod)
    547      : base_(base),
    548        regoffset_(offset),
    549        offset_(0),
    550        mod_(GetSVEOffsetModifierFor(mod)),
    551        shift_amount_(0) {
    552    VIXL_ASSERT(mod_ != SVE_LSL);  // LSL requires an explicit shift amount.
    553    VIXL_ASSERT(IsScalarPlusVector());
    554    VIXL_ASSERT(IsValid());
    555  }
    556 
    557  // "scalar-plus-scalar", like [x0, x1, LSL #1]
    558  // "scalar-plus-vector", like [x0, z1.d, LSL #2]
    559  // The type of `mod` can be any `SVEOffsetModifier`, or a corresponding
    560  // `Shift` or `Extend` value.
    561  template <typename M>
    562  SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount)
    563      : base_(base),
    564        regoffset_(offset),
    565        offset_(0),
    566        mod_(GetSVEOffsetModifierFor(mod)),
    567        shift_amount_(shift_amount) {
    568    VIXL_ASSERT(IsValid());
    569  }
    570 
    571  // "vector-plus-scalar", like [z0.d, x0]
    572  SVEMemOperand(ZRegister base, Register offset)
    573      : base_(base),
    574        regoffset_(offset),
    575        offset_(0),
    576        mod_(NO_SVE_OFFSET_MODIFIER),
    577        shift_amount_(0) {
    578    VIXL_ASSERT(IsValid());
    579    VIXL_ASSERT(IsVectorPlusScalar());
    580  }
    581 
    582  // "vector-plus-vector", like [z0.d, z1.d, UXTW]
    583  template <typename M = SVEOffsetModifier>
    584  SVEMemOperand(ZRegister base,
    585                ZRegister offset,
    586                M mod = NO_SVE_OFFSET_MODIFIER,
    587                unsigned shift_amount = 0)
    588      : base_(base),
    589        regoffset_(offset),
    590        offset_(0),
    591        mod_(GetSVEOffsetModifierFor(mod)),
    592        shift_amount_(shift_amount) {
    593    VIXL_ASSERT(IsValid());
    594    VIXL_ASSERT(IsVectorPlusVector());
    595  }
    596 
    597  // True for SVEMemOperands which represent something like [x0].
    598  // This will also return true for [x0, #0], because there is no way
    599  // to distinguish the two.
    600  bool IsPlainScalar() const {
    601    return IsScalarPlusImmediate() && (offset_ == 0);
    602  }
    603 
    604  // True for SVEMemOperands which represent something like [x0], or for
    605  // compound SVEMemOperands which are functionally equivalent, such as
    606  // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3].
    607  bool IsEquivalentToScalar() const;
    608 
    609  // True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and
    610  // similar.
    611  bool IsPlainRegister() const;
    612 
    613  bool IsScalarPlusImmediate() const {
    614    return base_.IsX() && regoffset_.IsNone() &&
    615           ((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl());
    616  }
    617 
    618  bool IsScalarPlusScalar() const {
    619    // SVE offers no extend modes for scalar-plus-scalar, so both registers must
    620    // be X registers.
    621    return base_.IsX() && regoffset_.IsX() &&
    622           ((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL));
    623  }
    624 
    625  bool IsScalarPlusVector() const {
    626    // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike
    627    // in the core ISA, these extend modes do not imply an S-sized lane, so the
    628    // modifier is independent from the lane size. The architecture describes
    629    // [US]XTW with a D-sized lane as an "unpacked" offset.
    630    return base_.IsX() && regoffset_.IsZRegister() &&
    631           (regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl();
    632  }
    633 
    634  bool IsVectorPlusImmediate() const {
    635    return base_.IsZRegister() &&
    636           (base_.IsLaneSizeS() || base_.IsLaneSizeD()) &&
    637           regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER);
    638  }
    639 
    640  bool IsVectorPlusScalar() const {
    641    return base_.IsZRegister() && regoffset_.IsX() &&
    642           (base_.IsLaneSizeS() || base_.IsLaneSizeD());
    643  }
    644 
    645  bool IsVectorPlusVector() const {
    646    return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) &&
    647           AreSameFormat(base_, regoffset_) &&
    648           (base_.IsLaneSizeS() || base_.IsLaneSizeD());
    649  }
    650 
    651  bool IsContiguous() const { return !IsScatterGather(); }
    652  bool IsScatterGather() const {
    653    return base_.IsZRegister() || regoffset_.IsZRegister();
    654  }
    655 
    656  // TODO: If necessary, add helpers like `HasScalarBase()`.
    657 
    658  Register GetScalarBase() const {
    659    VIXL_ASSERT(base_.IsX());
    660    return Register(base_);
    661  }
    662 
    663  ZRegister GetVectorBase() const {
    664    VIXL_ASSERT(base_.IsZRegister());
    665    VIXL_ASSERT(base_.HasLaneSize());
    666    return ZRegister(base_);
    667  }
    668 
    669  Register GetScalarOffset() const {
    670    VIXL_ASSERT(regoffset_.IsRegister());
    671    return Register(regoffset_);
    672  }
    673 
    674  ZRegister GetVectorOffset() const {
    675    VIXL_ASSERT(regoffset_.IsZRegister());
    676    VIXL_ASSERT(regoffset_.HasLaneSize());
    677    return ZRegister(regoffset_);
    678  }
    679 
    680  int64_t GetImmediateOffset() const {
    681    VIXL_ASSERT(regoffset_.IsNone());
    682    return offset_;
    683  }
    684 
    685  SVEOffsetModifier GetOffsetModifier() const { return mod_; }
    686  unsigned GetShiftAmount() const { return shift_amount_; }
    687 
    688  bool IsEquivalentToLSL(unsigned amount) const {
    689    if (shift_amount_ != amount) return false;
    690    if (amount == 0) {
    691      // No-shift is equivalent to "LSL #0".
    692      return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER));
    693    }
    694    return mod_ == SVE_LSL;
    695  }
    696 
    697  bool IsMulVl() const { return mod_ == SVE_MUL_VL; }
    698 
    699  bool IsValid() const;
    700 
    701 private:
    702  // Allow standard `Shift` and `Extend` arguments to be used.
    703  SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) {
    704    if (shift == LSL) return SVE_LSL;
    705    if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER;
    706    // SVE does not accept any other shift.
    707    VIXL_UNIMPLEMENTED();
    708    return NO_SVE_OFFSET_MODIFIER;
    709  }
    710 
    711  SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) {
    712    if (extend == UXTW) return SVE_UXTW;
    713    if (extend == SXTW) return SVE_SXTW;
    714    if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER;
    715    // SVE does not accept any other extend mode.
    716    VIXL_UNIMPLEMENTED();
    717    return NO_SVE_OFFSET_MODIFIER;
    718  }
    719 
    720  SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) {
    721    return mod;
    722  }
    723 
    724  CPURegister base_;
    725  CPURegister regoffset_;
    726  int64_t offset_;
    727  SVEOffsetModifier mod_;
    728  unsigned shift_amount_;
    729 };
    730 
    731 // Represent a signed or unsigned integer operand.
    732 //
    733 // This is designed to make instructions which naturally accept a _signed_
    734 // immediate easier to implement and use, when we also want users to be able to
    735 // specify raw-bits values (such as with hexadecimal constants). The advantage
    736 // of this class over a simple uint64_t (with implicit C++ sign-extension) is
    737 // that this class can strictly check the range of allowed values. With a simple
    738 // uint64_t, it is impossible to distinguish -1 from UINT64_MAX.
    739 //
    740 // For example, these instructions are equivalent:
    741 //
    742 //     __ Insr(z0.VnB(), -1);
    743 //     __ Insr(z0.VnB(), 0xff);
    744 //
    745 // ... as are these:
    746 //
    747 //     __ Insr(z0.VnD(), -1);
    748 //     __ Insr(z0.VnD(), 0xffffffffffffffff);
    749 //
    750 // ... but this is invalid:
    751 //
    752 //     __ Insr(z0.VnB(), 0xffffffffffffffff);  // Too big for B-sized lanes.
    753 class IntegerOperand {
    754 public:
    755 #define VIXL_INT_TYPES(V) \
    756  V(char) V(short) V(int) V(long) V(long long)  // NOLINT(google-runtime-int)
    757 #define VIXL_DECL_INT_OVERLOADS(T)                                        \
    758  /* These are allowed to be implicit constructors because this is a */   \
    759  /* wrapper class that doesn't normally perform any type conversion. */  \
    760  IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */       \
    761      : raw_bits_(immediate),        /* Allow implicit sign-extension. */ \
    762        is_negative_(immediate < 0) {}                                    \
    763  IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */     \
    764      : raw_bits_(immediate), is_negative_(false) {}
    765  VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)
    766 #undef VIXL_DECL_INT_OVERLOADS
    767 #undef VIXL_INT_TYPES
    768 
    769  // TODO: `Operand` can currently only hold an int64_t, so some large, unsigned
    770  // values will be misrepresented here.
    771  explicit IntegerOperand(const Operand& operand)
    772      : raw_bits_(operand.GetEquivalentImmediate()),
    773        is_negative_(operand.GetEquivalentImmediate() < 0) {}
    774 
    775  bool IsIntN(unsigned n) const {
    776    return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_))
    777                        : vixl::IsIntN(n, raw_bits_);
    778  }
    779  bool IsUintN(unsigned n) const {
    780    return !is_negative_ && vixl::IsUintN(n, raw_bits_);
    781  }
    782 
    783  bool IsUint8() const { return IsUintN(8); }
    784  bool IsUint16() const { return IsUintN(16); }
    785  bool IsUint32() const { return IsUintN(32); }
    786  bool IsUint64() const { return IsUintN(64); }
    787 
    788  bool IsInt8() const { return IsIntN(8); }
    789  bool IsInt16() const { return IsIntN(16); }
    790  bool IsInt32() const { return IsIntN(32); }
    791  bool IsInt64() const { return IsIntN(64); }
    792 
    793  bool FitsInBits(unsigned n) const {
    794    return is_negative_ ? IsIntN(n) : IsUintN(n);
    795  }
    796  bool FitsInLane(const CPURegister& zd) const {
    797    return FitsInBits(zd.GetLaneSizeInBits());
    798  }
    799  bool FitsInSignedLane(const CPURegister& zd) const {
    800    return IsIntN(zd.GetLaneSizeInBits());
    801  }
    802  bool FitsInUnsignedLane(const CPURegister& zd) const {
    803    return IsUintN(zd.GetLaneSizeInBits());
    804  }
    805 
    806  // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to an unsigned integer
    807  // in the range [0, UINT<n>_MAX] (using two's complement mapping).
    808  uint64_t AsUintN(unsigned n) const {
    809    VIXL_ASSERT(FitsInBits(n));
    810    return raw_bits_ & GetUintMask(n);
    811  }
    812 
    813  uint8_t AsUint8() const { return static_cast<uint8_t>(AsUintN(8)); }
    814  uint16_t AsUint16() const { return static_cast<uint16_t>(AsUintN(16)); }
    815  uint32_t AsUint32() const { return static_cast<uint32_t>(AsUintN(32)); }
    816  uint64_t AsUint64() const { return AsUintN(64); }
    817 
    818  // Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to a signed integer in
    819  // the range [INT<n>_MIN, INT<n>_MAX] (using two's complement mapping).
    820  int64_t AsIntN(unsigned n) const {
    821    VIXL_ASSERT(FitsInBits(n));
    822    return ExtractSignedBitfield64(n - 1, 0, raw_bits_);
    823  }
    824 
    825  int8_t AsInt8() const { return static_cast<int8_t>(AsIntN(8)); }
    826  int16_t AsInt16() const { return static_cast<int16_t>(AsIntN(16)); }
    827  int32_t AsInt32() const { return static_cast<int32_t>(AsIntN(32)); }
    828  int64_t AsInt64() const { return AsIntN(64); }
    829 
    830  // Several instructions encode a signed int<N>_t, which is then (optionally)
    831  // left-shifted and sign-extended to a Z register lane with a size which may
    832  // be larger than N. This helper tries to find an int<N>_t such that the
    833  // IntegerOperand's arithmetic value is reproduced in each lane.
    834  //
    835  // This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as
    836  // `Insr(z0.VnB(), -1)`.
    837  template <unsigned N, unsigned kShift, typename T>
    838  bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const {
    839    VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
    840    VIXL_ASSERT(FitsInLane(zd));
    841    if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
    842 
    843    // Reverse the specified left-shift.
    844    IntegerOperand unshifted(*this);
    845    unshifted.ArithmeticShiftRight(kShift);
    846 
    847    if (unshifted.IsIntN(N)) {
    848      // This is trivial, since sign-extension produces the same arithmetic
    849      // value irrespective of the destination size.
    850      *imm = static_cast<T>(unshifted.AsIntN(N));
    851      return true;
    852    }
    853 
    854    // Otherwise, we might be able to use the sign-extension to produce the
    855    // desired bit pattern. We can only do this for values in the range
    856    // [INT<N>_MAX + 1, UINT<N>_MAX], where the highest set bit is the sign bit.
    857    //
    858    // The lane size has to be adjusted to compensate for `kShift`, since the
    859    // high bits will be dropped when the encoded value is left-shifted.
    860    if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) {
    861      int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift);
    862      if (vixl::IsIntN(N, encoded)) {
    863        *imm = static_cast<T>(encoded);
    864        return true;
    865      }
    866    }
    867    return false;
    868  }
    869 
    870  // As above, but `kShift` is written to the `*shift` parameter on success, so
    871  // that it is easy to chain calls like this:
    872  //
    873  //     if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) ||
    874  //         imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) {
    875  //       insn(zd, imm8, shift)
    876  //     }
    877  template <unsigned N, unsigned kShift, typename T, typename S>
    878  bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd,
    879                                     T* imm,
    880                                     S* shift) const {
    881    if (TryEncodeAsShiftedIntNForLane<N, kShift>(zd, imm)) {
    882      *shift = kShift;
    883      return true;
    884    }
    885    return false;
    886  }
    887 
    888  // As above, but assume that `kShift` is 0.
    889  template <unsigned N, typename T>
    890  bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const {
    891    return TryEncodeAsShiftedIntNForLane<N, 0>(zd, imm);
    892  }
    893 
    894  // As above, but for unsigned fields. This is usually a simple operation, but
    895  // is provided for symmetry.
    896  template <unsigned N, unsigned kShift, typename T>
    897  bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const {
    898    VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
    899    VIXL_ASSERT(FitsInLane(zd));
    900 
    901    // TODO: Should we convert -1 to 0xff here?
    902    if (is_negative_) return false;
    903    USE(zd);
    904 
    905    if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
    906 
    907    if (vixl::IsUintN(N, raw_bits_ >> kShift)) {
    908      *imm = static_cast<T>(raw_bits_ >> kShift);
    909      return true;
    910    }
    911    return false;
    912  }
    913 
    914  template <unsigned N, unsigned kShift, typename T, typename S>
    915  bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd,
    916                                      T* imm,
    917                                      S* shift) const {
    918    if (TryEncodeAsShiftedUintNForLane<N, kShift>(zd, imm)) {
    919      *shift = kShift;
    920      return true;
    921    }
    922    return false;
    923  }
    924 
    925  bool IsZero() const { return raw_bits_ == 0; }
    926  bool IsNegative() const { return is_negative_; }
    927  bool IsPositiveOrZero() const { return !is_negative_; }
    928 
    929  uint64_t GetMagnitude() const {
    930    return is_negative_ ? UnsignedNegate(raw_bits_) : raw_bits_;
    931  }
    932 
    933 private:
    934  // Shift the arithmetic value right, with sign extension if is_negative_.
    935  void ArithmeticShiftRight(int shift) {
    936    VIXL_ASSERT((shift >= 0) && (shift < 64));
    937    if (shift == 0) return;
    938    if (is_negative_) {
    939      raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_);
    940    } else {
    941      raw_bits_ >>= shift;
    942    }
    943  }
    944 
    945  uint64_t raw_bits_;
    946  bool is_negative_;
    947 };
    948 
    949 // This an abstraction that can represent a register or memory location. The
    950 // `MacroAssembler` provides helpers to move data between generic operands.
    951 class GenericOperand {
    952 public:
    953  GenericOperand() { VIXL_ASSERT(!IsValid()); }
    954  GenericOperand(const CPURegister& reg);  // NOLINT(runtime/explicit)
    955  GenericOperand(const MemOperand& mem_op,
    956                 size_t mem_op_size = 0);  // NOLINT(runtime/explicit)
    957 
    958  bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
    959 
    960  bool Equals(const GenericOperand& other) const;
    961 
    962  bool IsCPURegister() const {
    963    VIXL_ASSERT(IsValid());
    964    return cpu_register_.IsValid();
    965  }
    966 
    967  bool IsRegister() const {
    968    return IsCPURegister() && cpu_register_.IsRegister();
    969  }
    970 
    971  bool IsVRegister() const {
    972    return IsCPURegister() && cpu_register_.IsVRegister();
    973  }
    974 
    975  bool IsSameCPURegisterType(const GenericOperand& other) {
    976    return IsCPURegister() && other.IsCPURegister() &&
    977           GetCPURegister().IsSameType(other.GetCPURegister());
    978  }
    979 
    980  bool IsMemOperand() const {
    981    VIXL_ASSERT(IsValid());
    982    return mem_op_.IsValid();
    983  }
    984 
    985  CPURegister GetCPURegister() const {
    986    VIXL_ASSERT(IsCPURegister());
    987    return cpu_register_;
    988  }
    989 
    990  MemOperand GetMemOperand() const {
    991    VIXL_ASSERT(IsMemOperand());
    992    return mem_op_;
    993  }
    994 
    995  size_t GetMemOperandSizeInBytes() const {
    996    VIXL_ASSERT(IsMemOperand());
    997    return mem_op_size_;
    998  }
    999 
   1000  size_t GetSizeInBytes() const {
   1001    return IsCPURegister() ? cpu_register_.GetSizeInBytes()
   1002                           : GetMemOperandSizeInBytes();
   1003  }
   1004 
   1005  size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
   1006 
   1007 private:
   1008  CPURegister cpu_register_;
   1009  MemOperand mem_op_;
   1010  // The size of the memory region pointed to, in bytes.
   1011  // We only support sizes up to X/D register sizes.
   1012  size_t mem_op_size_;
   1013 };
   1014 }  // namespace vixl
   1015 
   1016 #endif  // VIXL_A64_OPERANDS_A64_H_