tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Operands-vixl.cpp (14412B)


      1 // Copyright 2016, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #include "jit/arm64/vixl/Operands-vixl.h"
     28 
     29 namespace vixl {
     30 
     31 // CPURegList utilities.
     32 CPURegister CPURegList::PopLowestIndex(RegList mask) {
     33  RegList list = list_ & mask;
     34  if (list == 0) return NoCPUReg;
     35  int index = CountTrailingZeros(list);
     36  VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);
     37  Remove(index);
     38  return CPURegister(index, size_, type_);
     39 }
     40 
     41 
     42 CPURegister CPURegList::PopHighestIndex(RegList mask) {
     43  RegList list = list_ & mask;
     44  if (list == 0) return NoCPUReg;
     45  int index = CountLeadingZeros(list);
     46  index = kRegListSizeInBits - 1 - index;
     47  VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);
     48  Remove(index);
     49  return CPURegister(index, size_, type_);
     50 }
     51 
     52 
     53 bool CPURegList::IsValid() const {
     54  if (type_ == CPURegister::kNoRegister) {
     55    // We can't use IsEmpty here because that asserts IsValid().
     56    return list_ == 0;
     57  } else {
     58    bool is_valid = true;
     59    // Try to create a CPURegister for each element in the list.
     60    for (int i = 0; i < kRegListSizeInBits; i++) {
     61      if (((list_ >> i) & 1) != 0) {
     62        is_valid &= CPURegister(i, size_, type_).IsValid();
     63      }
     64    }
     65    return is_valid;
     66  }
     67 }
     68 
     69 
     70 void CPURegList::RemoveCalleeSaved() {
     71  if (GetType() == CPURegister::kRegister) {
     72    Remove(GetCalleeSaved(GetRegisterSizeInBits()));
     73  } else if (GetType() == CPURegister::kVRegister) {
     74    Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
     75  } else {
     76    VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
     77    VIXL_ASSERT(IsEmpty());
     78    // The list must already be empty, so do nothing.
     79  }
     80 }
     81 
     82 
     83 CPURegList CPURegList::Union(const CPURegList& list_1,
     84                             const CPURegList& list_2,
     85                             const CPURegList& list_3) {
     86  return Union(list_1, Union(list_2, list_3));
     87 }
     88 
     89 
     90 CPURegList CPURegList::Union(const CPURegList& list_1,
     91                             const CPURegList& list_2,
     92                             const CPURegList& list_3,
     93                             const CPURegList& list_4) {
     94  return Union(Union(list_1, list_2), Union(list_3, list_4));
     95 }
     96 
     97 
     98 CPURegList CPURegList::Intersection(const CPURegList& list_1,
     99                                    const CPURegList& list_2,
    100                                    const CPURegList& list_3) {
    101  return Intersection(list_1, Intersection(list_2, list_3));
    102 }
    103 
    104 
    105 CPURegList CPURegList::Intersection(const CPURegList& list_1,
    106                                    const CPURegList& list_2,
    107                                    const CPURegList& list_3,
    108                                    const CPURegList& list_4) {
    109  return Intersection(Intersection(list_1, list_2),
    110                      Intersection(list_3, list_4));
    111 }
    112 
    113 
    114 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
    115  return CPURegList(CPURegister::kRegister, size, 19, 29);
    116 }
    117 
    118 
    119 CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
    120  return CPURegList(CPURegister::kVRegister, size, 8, 15);
    121 }
    122 
    123 
    124 CPURegList CPURegList::GetCallerSaved(unsigned size) {
    125  // Registers x0-x18 and lr (x30) are caller-saved.
    126  CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
    127  // Do not use lr directly to avoid initialisation order fiasco bugs for users.
    128  list.Combine(Register(30, kXRegSize));
    129  return list;
    130 }
    131 
    132 
    133 CPURegList CPURegList::GetCallerSavedV(unsigned size) {
    134  // Registers d0-d7 and d16-d31 are caller-saved.
    135  CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
    136  list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
    137  return list;
    138 }
    139 
    140 
    141 const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
    142 const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
    143 const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
    144 const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
    145 
    146 // Operand.
    147 Operand::Operand(int64_t immediate)
    148    : immediate_(immediate),
    149      reg_(NoReg),
    150      shift_(NO_SHIFT),
    151      extend_(NO_EXTEND),
    152      shift_amount_(0) {}
    153 
    154 Operand::Operand(IntegerOperand immediate)
    155    : immediate_(immediate.AsIntN(64)),
    156      reg_(NoReg),
    157      shift_(NO_SHIFT),
    158      extend_(NO_EXTEND),
    159      shift_amount_(0) {}
    160 
    161 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
    162    : reg_(reg),
    163      shift_(shift),
    164      extend_(NO_EXTEND),
    165      shift_amount_(shift_amount) {
    166  VIXL_ASSERT(shift != MSL);
    167  VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
    168  VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
    169  VIXL_ASSERT(!reg.IsSP());
    170 }
    171 
    172 
    173 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
    174    : reg_(reg),
    175      shift_(NO_SHIFT),
    176      extend_(extend),
    177      shift_amount_(shift_amount) {
    178  VIXL_ASSERT(reg.IsValid());
    179  VIXL_ASSERT(shift_amount <= 4);
    180  VIXL_ASSERT(!reg.IsSP());
    181 
    182  // Extend modes SXTX and UXTX require a 64-bit register.
    183  VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
    184 }
    185 
    186 
    187 bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
    188 
    189 
    190 bool Operand::IsPlainRegister() const {
    191  return reg_.IsValid() &&
    192         (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
    193          // No-op shifts.
    194          ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
    195          // No-op extend operations.
    196          // We can't include [US]XTW here without knowing more about the
    197          // context; they are only no-ops for 32-bit operations.
    198          //
    199          // For example, this operand could be replaced with w1:
    200          //   __ Add(w0, w0, Operand(w1, UXTW));
    201          // However, no plain register can replace it in this context:
    202          //   __ Add(x0, x0, Operand(w1, UXTW));
    203          (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
    204 }
    205 
    206 
    207 bool Operand::IsShiftedRegister() const {
    208  return reg_.IsValid() && (shift_ != NO_SHIFT);
    209 }
    210 
    211 
    212 bool Operand::IsExtendedRegister() const {
    213  return reg_.IsValid() && (extend_ != NO_EXTEND);
    214 }
    215 
    216 
    217 bool Operand::IsZero() const {
    218  if (IsImmediate()) {
    219    return GetImmediate() == 0;
    220  } else {
    221    return GetRegister().IsZero();
    222  }
    223 }
    224 
    225 
    226 Operand Operand::ToExtendedRegister() const {
    227  VIXL_ASSERT(IsShiftedRegister());
    228  VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
    229  return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
    230 }
    231 
    232 
    233 // MemOperand
    234 MemOperand::MemOperand()
    235    : base_(NoReg),
    236      regoffset_(NoReg),
    237      offset_(0),
    238      addrmode_(Offset),
    239      shift_(NO_SHIFT),
    240      extend_(NO_EXTEND) {}
    241 
    242 
    243 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
    244    : base_(base),
    245      regoffset_(NoReg),
    246      offset_(offset),
    247      addrmode_(addrmode),
    248      shift_(NO_SHIFT),
    249      extend_(NO_EXTEND),
    250      shift_amount_(0) {
    251  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    252 }
    253 
    254 
    255 MemOperand::MemOperand(Register base,
    256                       Register regoffset,
    257                       Extend extend,
    258                       unsigned shift_amount)
    259    : base_(base),
    260      regoffset_(regoffset),
    261      offset_(0),
    262      addrmode_(Offset),
    263      shift_(NO_SHIFT),
    264      extend_(extend),
    265      shift_amount_(shift_amount) {
    266  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    267  VIXL_ASSERT(!regoffset.IsSP());
    268  VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
    269 
    270  // SXTX extend mode requires a 64-bit offset register.
    271  VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
    272 }
    273 
    274 
    275 MemOperand::MemOperand(Register base,
    276                       Register regoffset,
    277                       Shift shift,
    278                       unsigned shift_amount)
    279    : base_(base),
    280      regoffset_(regoffset),
    281      offset_(0),
    282      addrmode_(Offset),
    283      shift_(shift),
    284      extend_(NO_EXTEND),
    285      shift_amount_(shift_amount) {
    286  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    287  VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
    288  VIXL_ASSERT(shift == LSL);
    289 }
    290 
    291 
    292 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
    293    : base_(base),
    294      regoffset_(NoReg),
    295      addrmode_(addrmode),
    296      shift_(NO_SHIFT),
    297      extend_(NO_EXTEND),
    298      shift_amount_(0) {
    299  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    300 
    301  if (offset.IsImmediate()) {
    302    offset_ = offset.GetImmediate();
    303  } else if (offset.IsShiftedRegister()) {
    304    VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
    305 
    306    regoffset_ = offset.GetRegister();
    307    shift_ = offset.GetShift();
    308    shift_amount_ = offset.GetShiftAmount();
    309 
    310    extend_ = NO_EXTEND;
    311    offset_ = 0;
    312 
    313    // These assertions match those in the shifted-register constructor.
    314    VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
    315    VIXL_ASSERT(shift_ == LSL);
    316  } else {
    317    VIXL_ASSERT(offset.IsExtendedRegister());
    318    VIXL_ASSERT(addrmode == Offset);
    319 
    320    regoffset_ = offset.GetRegister();
    321    extend_ = offset.GetExtend();
    322    shift_amount_ = offset.GetShiftAmount();
    323 
    324    shift_ = NO_SHIFT;
    325    offset_ = 0;
    326 
    327    // These assertions match those in the extended-register constructor.
    328    VIXL_ASSERT(!regoffset_.IsSP());
    329    VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
    330    VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
    331  }
    332 }
    333 
    334 
    335 bool MemOperand::IsPlainRegister() const {
    336  return IsImmediateOffset() && (GetOffset() == 0);
    337 }
    338 
    339 
    340 bool MemOperand::IsEquivalentToPlainRegister() const {
    341  if (regoffset_.Is(NoReg)) {
    342    // Immediate offset, pre-index or post-index.
    343    return GetOffset() == 0;
    344  } else if (GetRegisterOffset().IsZero()) {
    345    // Zero register offset, pre-index or post-index.
    346    // We can ignore shift and extend options because they all result in zero.
    347    return true;
    348  }
    349  return false;
    350 }
    351 
    352 
    353 bool MemOperand::IsImmediateOffset() const {
    354  return (addrmode_ == Offset) && regoffset_.Is(NoReg);
    355 }
    356 
    357 
    358 bool MemOperand::IsRegisterOffset() const {
    359  return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
    360 }
    361 
    362 bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
    363 bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
    364 
    365 bool MemOperand::IsImmediatePreIndex() const {
    366  return IsPreIndex() && regoffset_.Is(NoReg);
    367 }
    368 
    369 bool MemOperand::IsImmediatePostIndex() const {
    370  return IsPostIndex() && regoffset_.Is(NoReg);
    371 }
    372 
    373 void MemOperand::AddOffset(int64_t offset) {
    374  VIXL_ASSERT(IsImmediateOffset());
    375  offset_ += offset;
    376 }
    377 
    378 
    379 bool SVEMemOperand::IsValid() const {
    380 #ifdef VIXL_DEBUG
    381  {
    382    // It should not be possible for an SVEMemOperand to match multiple types.
    383    int count = 0;
    384    if (IsScalarPlusImmediate()) count++;
    385    if (IsScalarPlusScalar()) count++;
    386    if (IsScalarPlusVector()) count++;
    387    if (IsVectorPlusImmediate()) count++;
    388    if (IsVectorPlusScalar()) count++;
    389    if (IsVectorPlusVector()) count++;
    390    VIXL_ASSERT(count <= 1);
    391  }
    392 #endif
    393 
    394  // We can't have a register _and_ an immediate offset.
    395  if ((offset_ != 0) && (!regoffset_.IsNone())) return false;
    396 
    397  if (shift_amount_ != 0) {
    398    // Only shift and extend modifiers can take a shift amount.
    399    switch (mod_) {
    400      case NO_SVE_OFFSET_MODIFIER:
    401      case SVE_MUL_VL:
    402        return false;
    403      case SVE_LSL:
    404      case SVE_UXTW:
    405      case SVE_SXTW:
    406        // Fall through.
    407        break;
    408    }
    409  }
    410 
    411  return IsScalarPlusImmediate() || IsScalarPlusScalar() ||
    412         IsScalarPlusVector() || IsVectorPlusImmediate() ||
    413         IsVectorPlusScalar() || IsVectorPlusVector();
    414 }
    415 
    416 
    417 bool SVEMemOperand::IsEquivalentToScalar() const {
    418  if (IsScalarPlusImmediate()) {
    419    return GetImmediateOffset() == 0;
    420  }
    421  if (IsScalarPlusScalar()) {
    422    // We can ignore the shift because it will still result in zero.
    423    return GetScalarOffset().IsZero();
    424  }
    425  // Forms involving vectors are never equivalent to a single scalar.
    426  return false;
    427 }
    428 
    429 bool SVEMemOperand::IsPlainRegister() const {
    430  if (IsScalarPlusImmediate()) {
    431    return GetImmediateOffset() == 0;
    432  }
    433  return false;
    434 }
    435 
    436 GenericOperand::GenericOperand(const CPURegister& reg)
    437    : cpu_register_(reg), mem_op_size_(0) {
    438  if (reg.IsQ()) {
    439    VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
    440    // Support for Q registers is not implemented yet.
    441    VIXL_UNIMPLEMENTED();
    442  }
    443 }
    444 
    445 
    446 GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
    447    : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
    448  if (mem_op_size_ > kXRegSizeInBytes) {
    449    // We only support generic operands up to the size of X registers.
    450    VIXL_UNIMPLEMENTED();
    451  }
    452 }
    453 
    454 bool GenericOperand::Equals(const GenericOperand& other) const {
    455  if (!IsValid() || !other.IsValid()) {
    456    // Two invalid generic operands are considered equal.
    457    return !IsValid() && !other.IsValid();
    458  }
    459  if (IsCPURegister() && other.IsCPURegister()) {
    460    return GetCPURegister().Is(other.GetCPURegister());
    461  } else if (IsMemOperand() && other.IsMemOperand()) {
    462    return GetMemOperand().Equals(other.GetMemOperand()) &&
    463           (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
    464  }
    465  return false;
    466 }
    467 }  // namespace vixl