tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

RangeAnalysis.cpp (121814B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/RangeAnalysis.h"
      8 
      9 #include "mozilla/CheckedArithmetic.h"
     10 #include "mozilla/MathAlgorithms.h"
     11 
     12 #include <algorithm>
     13 
     14 #include "jsmath.h"
     15 
     16 #include "jit/CompileInfo.h"
     17 #include "jit/IonAnalysis.h"
     18 #include "jit/JitSpewer.h"
     19 #include "jit/MIR-wasm.h"
     20 #include "jit/MIR.h"
     21 #include "jit/MIRGenerator.h"
     22 #include "jit/MIRGraph.h"
     23 #include "js/Conversions.h"
     24 #include "js/ScalarType.h"  // js::Scalar::Type
     25 #include "util/Unicode.h"
     26 #include "vm/ArgumentsObject.h"
     27 #include "vm/Float16.h"
     28 #include "vm/TypedArrayObject.h"
     29 #include "vm/Uint8Clamped.h"
     30 
     31 #include "vm/BytecodeUtil-inl.h"
     32 
     33 using namespace js;
     34 using namespace js::jit;
     35 
     36 using JS::GenericNaN;
     37 using JS::ToInt32;
     38 using mozilla::Abs;
     39 using mozilla::CountLeadingZeroes32;
     40 using mozilla::ExponentComponent;
     41 using mozilla::FloorLog2;
     42 using mozilla::IsNegativeZero;
     43 using mozilla::NegativeInfinity;
     44 using mozilla::NumberEqualsInt32;
     45 using mozilla::PositiveInfinity;
     46 
     47 // [SMDOC] IonMonkey Range Analysis
     48 //
     49 // This algorithm is based on the paper "Eliminating Range Checks Using
     50 // Static Single Assignment Form" by Gough and Klaren.
     51 //
     52 // We associate a range object with each SSA name, and the ranges are consulted
     53 // in order to determine whether overflow is possible for arithmetic
     54 // computations.
     55 //
     56 // An important source of range information that requires care to take
     57 // advantage of is conditional control flow. Consider the code below:
     58 //
     59 // if (x < 0) {
     60 //   y = x + 2000000000;
     61 // } else {
     62 //   if (x < 1000000000) {
     63 //     y = x * 2;
     64 //   } else {
     65 //     y = x - 3000000000;
     66 //   }
     67 // }
     68 //
     69 // The arithmetic operations in this code cannot overflow, but it is not
     70 // sufficient to simply associate each name with a range, since the information
     71 // differs between basic blocks. The traditional dataflow approach would be
     72 // associate ranges with (name, basic block) pairs. This solution is not
     73 // satisfying, since we lose the benefit of SSA form: in SSA form, each
     74 // definition has a unique name, so there is no need to track information about
     75 // the control flow of the program.
     76 //
     77 // The approach used here is to add a new form of pseudo operation called a
     78 // beta node, which associates range information with a value. These beta
     79 // instructions take one argument and additionally have an auxiliary constant
     80 // range associated with them. Operationally, beta nodes are just copies, but
     81 // the invariant expressed by beta node copies is that the output will fall
     82 // inside the range given by the beta node.  Gough and Klaeren refer to SSA
     83 // extended with these beta nodes as XSA form. The following shows the example
     84 // code transformed into XSA form:
     85 //
     86 // if (x < 0) {
     87 //   x1 = Beta(x, [INT_MIN, -1]);
     88 //   y1 = x1 + 2000000000;
     89 // } else {
     90 //   x2 = Beta(x, [0, INT_MAX]);
     91 //   if (x2 < 1000000000) {
     92 //     x3 = Beta(x2, [INT_MIN, 999999999]);
     93 //     y2 = x3*2;
     94 //   } else {
     95 //     x4 = Beta(x2, [1000000000, INT_MAX]);
     96 //     y3 = x4 - 3000000000;
     97 //   }
     98 //   y4 = Phi(y2, y3);
     99 // }
    100 // y = Phi(y1, y4);
    101 //
    102 // We insert beta nodes for the purposes of range analysis (they might also be
    103 // usefully used for other forms of bounds check elimination) and remove them
    104 // after range analysis is performed. The remaining compiler phases do not ever
    105 // encounter beta nodes.
    106 
    107 static bool IsDominatedUse(const MBasicBlock* block, const MUse* use) {
    108  MNode* n = use->consumer();
    109  bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
    110 
    111  if (isPhi) {
    112    MPhi* phi = n->toDefinition()->toPhi();
    113    return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
    114  }
    115 
    116  return block->dominates(n->block());
    117 }
    118 
    119 static inline void SpewRange(const MDefinition* def) {
    120 #ifdef JS_JITSPEW
    121  if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None &&
    122      def->range()) {
    123    JitSpewHeader(JitSpew_Range);
    124    Fprinter& out = JitSpewPrinter();
    125    out.printf("  ");
    126    def->printName(out);
    127    out.printf(" has range ");
    128    def->range()->dump(out);
    129    out.printf("\n");
    130  }
    131 #endif
    132 }
    133 
    134 #ifdef JS_JITSPEW
    135 static const char* TruncateKindString(TruncateKind kind) {
    136  switch (kind) {
    137    case TruncateKind::NoTruncate:
    138      return "NoTruncate";
    139    case TruncateKind::TruncateAfterBailouts:
    140      return "TruncateAfterBailouts";
    141    case TruncateKind::IndirectTruncate:
    142      return "IndirectTruncate";
    143    case TruncateKind::Truncate:
    144      return "Truncate";
    145    default:
    146      MOZ_CRASH("Unknown truncate kind.");
    147  }
    148 }
    149 
    150 static inline void SpewTruncate(const MDefinition* def, TruncateKind kind,
    151                                bool shouldClone) {
    152  if (JitSpewEnabled(JitSpew_Range)) {
    153    JitSpewHeader(JitSpew_Range);
    154    Fprinter& out = JitSpewPrinter();
    155    out.printf("  ");
    156    out.printf("truncating ");
    157    def->printName(out);
    158    out.printf(" (kind: %s, clone: %d)\n", TruncateKindString(kind),
    159               shouldClone);
    160  }
    161 }
    162 #else
    163 static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
    164                                bool shouldClone) {}
    165 #endif
    166 
    167 TempAllocator& RangeAnalysis::alloc() const { return graph_.alloc(); }
    168 
    169 static void ReplaceDominatedUsesWith(const MDefinition* orig, MDefinition* dom,
    170                                     const MBasicBlock* block) {
    171  for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd();) {
    172    MUse* use = *i++;
    173    if (use->consumer() != dom && IsDominatedUse(block, use)) {
    174      use->replaceProducer(dom);
    175    }
    176  }
    177 }
    178 
    179 bool RangeAnalysis::addBetaNodes() {
    180  JitSpew(JitSpew_Range, "Adding beta nodes");
    181 
    182  for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
    183    if (mir->shouldCancel("RangeAnalysis addBetaNodes")) {
    184      return false;
    185    }
    186 
    187    MBasicBlock* block = *i;
    188    JitSpew(JitSpew_Range, "Looking at block %u", block->id());
    189 
    190    BranchDirection branch_dir;
    191    MTest* test = block->immediateDominatorBranch(&branch_dir);
    192 
    193    if (!test || !test->getOperand(0)->isCompare()) {
    194      continue;
    195    }
    196 
    197    MCompare* compare = test->getOperand(0)->toCompare();
    198 
    199    if (!compare->isNumericComparison()) {
    200      continue;
    201    }
    202 
    203    // TODO: support unsigned comparisons
    204    if (compare->compareType() == MCompare::Compare_UInt32) {
    205      continue;
    206    }
    207 
    208    // isNumericComparison should return false for (U)IntPtr.
    209    MOZ_ASSERT(compare->compareType() != MCompare::Compare_IntPtr &&
    210               compare->compareType() != MCompare::Compare_UIntPtr);
    211 
    212    MDefinition* left = compare->getOperand(0);
    213    MDefinition* right = compare->getOperand(1);
    214    double bound;
    215    double conservativeLower = NegativeInfinity<double>();
    216    double conservativeUpper = PositiveInfinity<double>();
    217    MDefinition* val = nullptr;
    218 
    219    JSOp jsop = compare->jsop();
    220 
    221    if (branch_dir == FALSE_BRANCH) {
    222      jsop = NegateCompareOp(jsop);
    223      conservativeLower = GenericNaN();
    224      conservativeUpper = GenericNaN();
    225    }
    226 
    227    MConstant* leftConst = left->maybeConstantValue();
    228    MConstant* rightConst = right->maybeConstantValue();
    229    if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
    230      bound = leftConst->numberToDouble();
    231      val = right;
    232      jsop = ReverseCompareOp(jsop);
    233    } else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
    234      bound = rightConst->numberToDouble();
    235      val = left;
    236    } else if (left->type() == MIRType::Int32 &&
    237               right->type() == MIRType::Int32) {
    238      MDefinition* smaller = nullptr;
    239      MDefinition* greater = nullptr;
    240      if (jsop == JSOp::Lt) {
    241        smaller = left;
    242        greater = right;
    243      } else if (jsop == JSOp::Gt) {
    244        smaller = right;
    245        greater = left;
    246      }
    247      if (smaller && greater) {
    248        if (!alloc().ensureBallast()) {
    249          return false;
    250        }
    251 
    252        MBeta* beta;
    253        beta = MBeta::New(
    254            alloc(), smaller,
    255            Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX - 1));
    256        block->insertBefore(*block->begin(), beta);
    257        ReplaceDominatedUsesWith(smaller, beta, block);
    258        JitSpew(JitSpew_Range, "  Adding beta node for smaller %u",
    259                smaller->id());
    260        beta = MBeta::New(
    261            alloc(), greater,
    262            Range::NewInt32Range(alloc(), JSVAL_INT_MIN + 1, JSVAL_INT_MAX));
    263        block->insertBefore(*block->begin(), beta);
    264        ReplaceDominatedUsesWith(greater, beta, block);
    265        JitSpew(JitSpew_Range, "  Adding beta node for greater %u",
    266                greater->id());
    267      }
    268      continue;
    269    } else {
    270      continue;
    271    }
    272 
    273    // At this point, one of the operands if the compare is a constant, and
    274    // val is the other operand.
    275    MOZ_ASSERT(val);
    276 
    277    Range comp;
    278    switch (jsop) {
    279      case JSOp::Le:
    280        comp.setDouble(conservativeLower, bound);
    281        break;
    282      case JSOp::Lt:
    283        // For integers, if x < c, the upper bound of x is c-1.
    284        if (val->type() == MIRType::Int32) {
    285          int32_t intbound;
    286          if (NumberEqualsInt32(bound, &intbound) &&
    287              mozilla::SafeSub(intbound, 1, &intbound)) {
    288            bound = intbound;
    289          }
    290        }
    291        comp.setDouble(conservativeLower, bound);
    292 
    293        // Negative zero is not less than zero.
    294        if (bound == 0) {
    295          comp.refineToExcludeNegativeZero();
    296        }
    297        break;
    298      case JSOp::Ge:
    299        comp.setDouble(bound, conservativeUpper);
    300        break;
    301      case JSOp::Gt:
    302        // For integers, if x > c, the lower bound of x is c+1.
    303        if (val->type() == MIRType::Int32) {
    304          int32_t intbound;
    305          if (NumberEqualsInt32(bound, &intbound) &&
    306              mozilla::SafeAdd(intbound, 1, &intbound)) {
    307            bound = intbound;
    308          }
    309        }
    310        comp.setDouble(bound, conservativeUpper);
    311 
    312        // Negative zero is not greater than zero.
    313        if (bound == 0) {
    314          comp.refineToExcludeNegativeZero();
    315        }
    316        break;
    317      case JSOp::StrictEq:
    318      case JSOp::Eq:
    319        comp.setDouble(bound, bound);
    320        break;
    321      case JSOp::StrictNe:
    322      case JSOp::Ne:
    323        // Negative zero is not not-equal to zero.
    324        if (bound == 0) {
    325          comp.refineToExcludeNegativeZero();
    326          break;
    327        }
    328        continue;  // well, we could have
    329                   // [-\inf, bound-1] U [bound+1, \inf] but we only use
    330                   // contiguous ranges.
    331      default:
    332        continue;
    333    }
    334 
    335    if (JitSpewEnabled(JitSpew_Range)) {
    336      JitSpewHeader(JitSpew_Range);
    337      Fprinter& out = JitSpewPrinter();
    338      out.printf("  Adding beta node for %u with range ", val->id());
    339      comp.dump(out);
    340      out.printf("\n");
    341    }
    342 
    343    if (!alloc().ensureBallast()) {
    344      return false;
    345    }
    346 
    347    MBeta* beta = MBeta::New(alloc(), val, new (alloc()) Range(comp));
    348    block->insertBefore(*block->begin(), beta);
    349    ReplaceDominatedUsesWith(val, beta, block);
    350  }
    351 
    352  return true;
    353 }
    354 
    355 bool RangeAnalysis::removeBetaNodes() {
    356  JitSpew(JitSpew_Range, "Removing beta nodes");
    357 
    358  for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
    359    MBasicBlock* block = *i;
    360    for (MDefinitionIterator iter(*i); iter;) {
    361      MDefinition* def = *iter++;
    362      if (def->isBeta()) {
    363        auto* beta = def->toBeta();
    364        MDefinition* op = beta->input();
    365        JitSpew(JitSpew_Range, "  Removing beta node %u for %u", beta->id(),
    366                op->id());
    367        beta->justReplaceAllUsesWith(op);
    368        block->discard(beta);
    369      } else {
    370        // We only place Beta nodes at the beginning of basic
    371        // blocks, so if we see something else, we can move on
    372        // to the next block.
    373        break;
    374      }
    375    }
    376  }
    377  return true;
    378 }
    379 
    380 void SymbolicBound::dump(GenericPrinter& out) const {
    381  if (loop) {
    382    out.printf("[loop] ");
    383  }
    384  sum.dump(out);
    385 }
    386 
    387 void SymbolicBound::dump() const {
    388  Fprinter out(stderr);
    389  dump(out);
    390  out.printf("\n");
    391  out.finish();
    392 }
    393 
    394 // Test whether the given range's exponent tells us anything that its lower
    395 // and upper bound values don't.
    396 static bool IsExponentInteresting(const Range* r) {
    397  // If it lacks either a lower or upper bound, the exponent is interesting.
    398  if (!r->hasInt32Bounds()) {
    399    return true;
    400  }
    401 
    402  // Otherwise if there's no fractional part, the lower and upper bounds,
    403  // which are integers, are perfectly precise.
    404  if (!r->canHaveFractionalPart()) {
    405    return false;
    406  }
    407 
    408  // Otherwise, if the bounds are conservatively rounded across a power-of-two
    409  // boundary, the exponent may imply a tighter range.
    410  return FloorLog2(std::max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
    411 }
    412 
    413 void Range::dump(GenericPrinter& out) const {
    414  assertInvariants();
    415 
    416  // Floating-point or Integer subset.
    417  if (canHaveFractionalPart_) {
    418    out.printf("F");
    419  } else {
    420    out.printf("I");
    421  }
    422 
    423  out.printf("[");
    424 
    425  if (!hasInt32LowerBound_) {
    426    out.printf("?");
    427  } else {
    428    out.printf("%d", lower_);
    429  }
    430  if (symbolicLower_) {
    431    out.printf(" {");
    432    symbolicLower_->dump(out);
    433    out.printf("}");
    434  }
    435 
    436  out.printf(", ");
    437 
    438  if (!hasInt32UpperBound_) {
    439    out.printf("?");
    440  } else {
    441    out.printf("%d", upper_);
    442  }
    443  if (symbolicUpper_) {
    444    out.printf(" {");
    445    symbolicUpper_->dump(out);
    446    out.printf("}");
    447  }
    448 
    449  out.printf("]");
    450 
    451  bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
    452  bool includesNegativeInfinity =
    453      max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
    454  bool includesPositiveInfinity =
    455      max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
    456  bool includesNegativeZero = canBeNegativeZero_;
    457 
    458  if (includesNaN || includesNegativeInfinity || includesPositiveInfinity ||
    459      includesNegativeZero) {
    460    out.printf(" (");
    461    bool first = true;
    462    if (includesNaN) {
    463      if (first) {
    464        first = false;
    465      } else {
    466        out.printf(" ");
    467      }
    468      out.printf("U NaN");
    469    }
    470    if (includesNegativeInfinity) {
    471      if (first) {
    472        first = false;
    473      } else {
    474        out.printf(" ");
    475      }
    476      out.printf("U -Infinity");
    477    }
    478    if (includesPositiveInfinity) {
    479      if (first) {
    480        first = false;
    481      } else {
    482        out.printf(" ");
    483      }
    484      out.printf("U Infinity");
    485    }
    486    if (includesNegativeZero) {
    487      if (first) {
    488        first = false;
    489      } else {
    490        out.printf(" ");
    491      }
    492      out.printf("U -0");
    493    }
    494    out.printf(")");
    495  }
    496  if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this)) {
    497    out.printf(" (< pow(2, %d+1))", max_exponent_);
    498  }
    499 }
    500 
    501 void Range::dump() const {
    502  Fprinter out(stderr);
    503  dump(out);
    504  out.printf("\n");
    505  out.finish();
    506 }
    507 
    508 Range* Range::intersect(TempAllocator& alloc, const Range* lhs,
    509                        const Range* rhs, bool* emptyRange) {
    510  *emptyRange = false;
    511 
    512  if (!lhs && !rhs) {
    513    return nullptr;
    514  }
    515 
    516  if (!lhs) {
    517    return new (alloc) Range(*rhs);
    518  }
    519  if (!rhs) {
    520    return new (alloc) Range(*lhs);
    521  }
    522 
    523  int32_t newLower = std::max(lhs->lower_, rhs->lower_);
    524  int32_t newUpper = std::min(lhs->upper_, rhs->upper_);
    525 
    526  // If upper < lower, then we have conflicting constraints. Consider:
    527  //
    528  // if (x < 0) {
    529  //   if (x > 0) {
    530  //     [Some code.]
    531  //   }
    532  // }
    533  //
    534  // In this case, the block is unreachable.
    535  if (newUpper < newLower) {
    536    // If both ranges can be NaN, the result can still be NaN.
    537    if (!lhs->canBeNaN() || !rhs->canBeNaN()) {
    538      *emptyRange = true;
    539    }
    540    return nullptr;
    541  }
    542 
    543  bool newHasInt32LowerBound =
    544      lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
    545  bool newHasInt32UpperBound =
    546      lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
    547 
    548  FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
    549      lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_);
    550 
    551  // As 0.0 == -0.0, the intersection should include negative zero if any of the
    552  // operands can be negative zero.
    553  NegativeZeroFlag newMayIncludeNegativeZero =
    554      NegativeZeroFlag((lhs->canBeNegativeZero_ && rhs->canBeZero()) ||
    555                       (rhs->canBeNegativeZero_ && lhs->canBeZero()));
    556 
    557  uint16_t newExponent = std::min(lhs->max_exponent_, rhs->max_exponent_);
    558 
    559  // NaN is a special value which is neither greater than infinity or less than
    560  // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
    561  // can end up thinking we have both a lower and upper bound, even though NaN
    562  // is still possible. In this case, just be conservative, since any case where
    563  // we can have NaN is not especially interesting.
    564  if (newHasInt32LowerBound && newHasInt32UpperBound &&
    565      newExponent == IncludesInfinityAndNaN) {
    566    return nullptr;
    567  }
    568 
    569  // If one of the ranges has a fractional part and the other doesn't, it's
    570  // possible that we will have computed a newExponent that's more precise
    571  // than our newLower and newUpper. This is unusual, so we handle it here
    572  // instead of in optimize().
    573  //
    574  // For example, consider the range F[0,1.5]. Range analysis represents the
    575  // lower and upper bound as integers, so we'd actually have
    576  // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
    577  // more precise upper bound than the integer upper bound.
    578  //
    579  // When intersecting such a range with an integer range, the fractional part
    580  // of the range is dropped. The max exponent of 0 remains valid, so the
    581  // upper bound needs to be adjusted to 1.
    582  //
    583  // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
    584  // the naive intersection is I[2,2], but since the max exponent tells us
    585  // that the value is always less than 2, the intersection is actually empty.
    586  if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
    587      (lhs->canHaveFractionalPart() && newHasInt32LowerBound &&
    588       newHasInt32UpperBound && newLower == newUpper)) {
    589    refineInt32BoundsByExponent(newExponent, &newLower, &newHasInt32LowerBound,
    590                                &newUpper, &newHasInt32UpperBound);
    591 
    592    // If we're intersecting two ranges that don't overlap, this could also
    593    // push the bounds past each other, since the actual intersection is
    594    // the empty set.
    595    if (newLower > newUpper) {
    596      *emptyRange = true;
    597      return nullptr;
    598    }
    599  }
    600 
    601  return new (alloc)
    602      Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
    603            newCanHaveFractionalPart, newMayIncludeNegativeZero, newExponent);
    604 }
    605 
    606 void Range::unionWith(const Range* other) {
    607  int32_t newLower = std::min(lower_, other->lower_);
    608  int32_t newUpper = std::max(upper_, other->upper_);
    609 
    610  bool newHasInt32LowerBound =
    611      hasInt32LowerBound_ && other->hasInt32LowerBound_;
    612  bool newHasInt32UpperBound =
    613      hasInt32UpperBound_ && other->hasInt32UpperBound_;
    614 
    615  FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
    616      canHaveFractionalPart_ || other->canHaveFractionalPart_);
    617  NegativeZeroFlag newMayIncludeNegativeZero =
    618      NegativeZeroFlag(canBeNegativeZero_ || other->canBeNegativeZero_);
    619 
    620  uint16_t newExponent = std::max(max_exponent_, other->max_exponent_);
    621 
    622  rawInitialize(newLower, newHasInt32LowerBound, newUpper,
    623                newHasInt32UpperBound, newCanHaveFractionalPart,
    624                newMayIncludeNegativeZero, newExponent);
    625 }
    626 
    627 Range::Range(const MDefinition* def)
    628    : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
    629  if (const Range* other = def->range()) {
    630    // The instruction has range information; use it.
    631    *this = *other;
    632 
    633    // Simulate the effect of converting the value to its type.
    634    // Note: we cannot clamp here, since ranges aren't allowed to shrink
    635    // and truncation can increase range again. So doing wrapAround to
    636    // mimick a possible truncation.
    637    switch (def->type()) {
    638      case MIRType::Int32:
    639        // MToNumberInt32 cannot truncate. So we can safely clamp.
    640        if (def->isToNumberInt32()) {
    641          clampToInt32();
    642        } else {
    643          wrapAroundToInt32();
    644        }
    645        break;
    646      case MIRType::Boolean:
    647        wrapAroundToBoolean();
    648        break;
    649      case MIRType::None:
    650        MOZ_CRASH("Asking for the range of an instruction with no value");
    651      default:
    652        break;
    653    }
    654  } else {
    655    // Otherwise just use type information. We can trust the type here
    656    // because we don't care what value the instruction actually produces,
    657    // but what value we might get after we get past the bailouts.
    658    switch (def->type()) {
    659      case MIRType::Int32:
    660        setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
    661        break;
    662      case MIRType::Boolean:
    663        setInt32(0, 1);
    664        break;
    665      case MIRType::None:
    666        MOZ_CRASH("Asking for the range of an instruction with no value");
    667      default:
    668        setUnknown();
    669        break;
    670    }
    671  }
    672 
    673  // As a special case, MUrsh is permitted to claim a result type of
    674  // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
    675  // bailouts. If range analysis hasn't ruled out values in
    676  // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
    677  // use as either a uint32 or an int32.
    678  if (!hasInt32UpperBound() && def->isUrsh() &&
    679      def->toUrsh()->bailoutsDisabled() && def->type() != MIRType::Int64) {
    680    lower_ = INT32_MIN;
    681  }
    682 
    683  assertInvariants();
    684 }
    685 
    686 static uint16_t ExponentImpliedByDouble(double d) {
    687  // Handle the special values.
    688  if (std::isnan(d)) {
    689    return Range::IncludesInfinityAndNaN;
    690  }
    691  if (std::isinf(d)) {
    692    return Range::IncludesInfinity;
    693  }
    694 
    695  // Otherwise take the exponent part and clamp it at zero, since the Range
    696  // class doesn't track fractional ranges.
    697  return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d)));
    698 }
    699 
    700 void Range::setDouble(double l, double h) {
    701  MOZ_ASSERT(!(l > h));
    702 
    703  // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
    704  if (l >= INT32_MIN && l <= INT32_MAX) {
    705    lower_ = int32_t(::floor(l));
    706    hasInt32LowerBound_ = true;
    707  } else if (l >= INT32_MAX) {
    708    lower_ = INT32_MAX;
    709    hasInt32LowerBound_ = true;
    710  } else {
    711    lower_ = INT32_MIN;
    712    hasInt32LowerBound_ = false;
    713  }
    714  if (h >= INT32_MIN && h <= INT32_MAX) {
    715    upper_ = int32_t(::ceil(h));
    716    hasInt32UpperBound_ = true;
    717  } else if (h <= INT32_MIN) {
    718    upper_ = INT32_MIN;
    719    hasInt32UpperBound_ = true;
    720  } else {
    721    upper_ = INT32_MAX;
    722    hasInt32UpperBound_ = false;
    723  }
    724 
    725  // Infer max_exponent_.
    726  uint16_t lExp = ExponentImpliedByDouble(l);
    727  uint16_t hExp = ExponentImpliedByDouble(h);
    728  max_exponent_ = std::max(lExp, hExp);
    729 
    730  canHaveFractionalPart_ = ExcludesFractionalParts;
    731  canBeNegativeZero_ = ExcludesNegativeZero;
    732 
    733  // If denormals are disabled, any value with exponent 0 will be immediately
    734  // flushed to 0. This gives 2**53 bit patterns that compare equal to zero.
    735  //
    736  // Check whether the range [l .. h] can cross any of the 2^53 zeros. We have
    737  // to be conservative as the main thread might not interpret doubles the same
    738  // way as the compiler thread.
    739  const double doubleMin = mozilla::BitwiseCast<double>(
    740      mozilla::SpecificFloatingPointBits<double, 0, 1, 0>::value);
    741  bool includesNegative = std::isnan(l) || l < doubleMin;
    742  bool includesPositive = std::isnan(h) || h > -doubleMin;
    743  bool crossesZero = includesNegative && includesPositive;
    744 
    745  // Infer the canHaveFractionalPart_ setting. We can have a
    746  // fractional part if the range crosses through the neighborhood of zero. We
    747  // won't have a fractional value if the value is always beyond the point at
    748  // which double precision can't represent fractional values.
    749  uint16_t minExp = std::min(lExp, hExp);
    750  if (crossesZero || minExp < MaxTruncatableExponent) {
    751    canHaveFractionalPart_ = IncludesFractionalParts;
    752  }
    753 
    754  // Infer a conservative value for canBeNegativeZero_ setting. We can have a
    755  // negative zero value if the range crosses through the neighborhood of zero
    756  // and the lower bound can have a sign bit.
    757  if (crossesZero && (std::isnan(l) || mozilla::IsNegative(l))) {
    758    canBeNegativeZero_ = IncludesNegativeZero;
    759  }
    760 
    761  optimize();
    762 }
    763 
    764 void Range::setDoubleSingleton(double d) {
    765  setDouble(d, d);
    766  assertInvariants();
    767 }
    768 
    769 static inline bool MissingAnyInt32Bounds(const Range* lhs, const Range* rhs) {
    770  return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
    771 }
    772 
    773 Range* Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
    774  int64_t l = (int64_t)lhs->lower_ + (int64_t)rhs->lower_;
    775  if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) {
    776    l = NoInt32LowerBound;
    777  }
    778 
    779  int64_t h = (int64_t)lhs->upper_ + (int64_t)rhs->upper_;
    780  if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) {
    781    h = NoInt32UpperBound;
    782  }
    783 
    784  // The exponent is at most one greater than the greater of the operands'
    785  // exponents, except for NaN and infinity cases.
    786  uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
    787  if (e <= Range::MaxFiniteExponent) {
    788    ++e;
    789  }
    790 
    791  // Infinity + -Infinity is NaN.
    792  if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
    793    e = Range::IncludesInfinityAndNaN;
    794  }
    795 
    796  return new (alloc) Range(
    797      l, h,
    798      FractionalPartFlag(lhs->canHaveFractionalPart() ||
    799                         rhs->canHaveFractionalPart()),
    800      NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeNegativeZero()),
    801      e);
    802 }
    803 
    804 Range* Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
    805  int64_t l = (int64_t)lhs->lower_ - (int64_t)rhs->upper_;
    806  if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) {
    807    l = NoInt32LowerBound;
    808  }
    809 
    810  int64_t h = (int64_t)lhs->upper_ - (int64_t)rhs->lower_;
    811  if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) {
    812    h = NoInt32UpperBound;
    813  }
    814 
    815  // The exponent is at most one greater than the greater of the operands'
    816  // exponents, except for NaN and infinity cases.
    817  uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
    818  if (e <= Range::MaxFiniteExponent) {
    819    ++e;
    820  }
    821 
    822  // Infinity - Infinity is NaN.
    823  if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
    824    e = Range::IncludesInfinityAndNaN;
    825  }
    826 
    827  return new (alloc)
    828      Range(l, h,
    829            FractionalPartFlag(lhs->canHaveFractionalPart() ||
    830                               rhs->canHaveFractionalPart()),
    831            NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeZero()), e);
    832 }
    833 
    834 Range* Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
    835  MOZ_ASSERT(lhs->isInt32());
    836  MOZ_ASSERT(rhs->isInt32());
    837 
    838  // If both numbers can be negative, result can be negative in the whole range
    839  if (lhs->lower() < 0 && rhs->lower() < 0) {
    840    return Range::NewInt32Range(alloc, INT32_MIN,
    841                                std::max(lhs->upper(), rhs->upper()));
    842  }
    843 
    844  // Only one of both numbers can be negative.
    845  // - result can't be negative
    846  // - Upper bound is minimum of both upper range,
    847  int32_t lower = 0;
    848  int32_t upper = std::min(lhs->upper(), rhs->upper());
    849 
    850  // EXCEPT when upper bound of non negative number is max value,
    851  // because negative value can return the whole max value.
    852  // -1 & 5 = 5
    853  if (lhs->lower() < 0) {
    854    upper = rhs->upper();
    855  }
    856  if (rhs->lower() < 0) {
    857    upper = lhs->upper();
    858  }
    859 
    860  return Range::NewInt32Range(alloc, lower, upper);
    861 }
    862 
    863 Range* Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
    864  MOZ_ASSERT(lhs->isInt32());
    865  MOZ_ASSERT(rhs->isInt32());
    866  // When one operand is always 0 or always -1, it's a special case where we
    867  // can compute a fully precise result. Handling these up front also
    868  // protects the code below from calling CountLeadingZeroes32 with a zero
    869  // operand or from shifting an int32_t by 32.
    870  if (lhs->lower() == lhs->upper()) {
    871    if (lhs->lower() == 0) {
    872      return new (alloc) Range(*rhs);
    873    }
    874    if (lhs->lower() == -1) {
    875      return new (alloc) Range(*lhs);
    876    }
    877  }
    878  if (rhs->lower() == rhs->upper()) {
    879    if (rhs->lower() == 0) {
    880      return new (alloc) Range(*lhs);
    881    }
    882    if (rhs->lower() == -1) {
    883      return new (alloc) Range(*rhs);
    884    }
    885  }
    886 
    887  // The code below uses CountLeadingZeroes32, which has undefined behavior
    888  // if its operand is 0. We rely on the code above to protect it.
    889  MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
    890  MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
    891  MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
    892  MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
    893 
    894  int32_t lower = INT32_MIN;
    895  int32_t upper = INT32_MAX;
    896 
    897  if (lhs->lower() >= 0 && rhs->lower() >= 0) {
    898    // Both operands are non-negative, so the result won't be less than either.
    899    lower = std::max(lhs->lower(), rhs->lower());
    900    // The result will have leading zeros where both operands have leading
    901    // zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
    902    // account for the bit of sign.
    903    upper = int32_t(UINT32_MAX >> std::min(CountLeadingZeroes32(lhs->upper()),
    904                                           CountLeadingZeroes32(rhs->upper())));
    905  } else {
    906    // The result will have leading ones where either operand has leading ones.
    907    if (lhs->upper() < 0) {
    908      unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
    909      lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
    910      upper = -1;
    911    }
    912    if (rhs->upper() < 0) {
    913      unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
    914      lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
    915      upper = -1;
    916    }
    917  }
    918 
    919  return Range::NewInt32Range(alloc, lower, upper);
    920 }
    921 
    922 Range* Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
    923  MOZ_ASSERT(lhs->isInt32());
    924  MOZ_ASSERT(rhs->isInt32());
    925  int32_t lhsLower = lhs->lower();
    926  int32_t lhsUpper = lhs->upper();
    927  int32_t rhsLower = rhs->lower();
    928  int32_t rhsUpper = rhs->upper();
    929  bool invertAfter = false;
    930 
    931  // If either operand is negative, bitwise-negate it, and arrange to negate
    932  // the result; ~((~x)^y) == x^y. If both are negative the negations on the
    933  // result cancel each other out; effectively this is (~x)^(~y) == x^y.
    934  // These transformations reduce the number of cases we have to handle below.
    935  if (lhsUpper < 0) {
    936    lhsLower = ~lhsLower;
    937    lhsUpper = ~lhsUpper;
    938    std::swap(lhsLower, lhsUpper);
    939    invertAfter = !invertAfter;
    940  }
    941  if (rhsUpper < 0) {
    942    rhsLower = ~rhsLower;
    943    rhsUpper = ~rhsUpper;
    944    std::swap(rhsLower, rhsUpper);
    945    invertAfter = !invertAfter;
    946  }
    947 
    948  // Handle cases where lhs or rhs is always zero specially, because they're
    949  // easy cases where we can be perfectly precise, and because it protects the
    950  // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
    951  // undefined behavior.
    952  int32_t lower = INT32_MIN;
    953  int32_t upper = INT32_MAX;
    954  if (lhsLower == 0 && lhsUpper == 0) {
    955    upper = rhsUpper;
    956    lower = rhsLower;
    957  } else if (rhsLower == 0 && rhsUpper == 0) {
    958    upper = lhsUpper;
    959    lower = lhsLower;
    960  } else if (lhsLower >= 0 && rhsLower >= 0) {
    961    // Both operands are non-negative. The result will be non-negative.
    962    lower = 0;
    963    // To compute the upper value, take each operand's upper value and
    964    // set all bits that don't correspond to leading zero bits in the
    965    // other to one. For each one, this gives an upper bound for the
    966    // result, so we can take the minimum between the two.
    967    unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
    968    unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
    969    upper = std::min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
    970                     lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
    971  }
    972 
    973  // If we bitwise-negated one (but not both) of the operands above, apply the
    974  // bitwise-negate to the result, completing ~((~x)^y) == x^y.
    975  if (invertAfter) {
    976    lower = ~lower;
    977    upper = ~upper;
    978    std::swap(lower, upper);
    979  }
    980 
    981  return Range::NewInt32Range(alloc, lower, upper);
    982 }
    983 
    984 Range* Range::not_(TempAllocator& alloc, const Range* op) {
    985  MOZ_ASSERT(op->isInt32());
    986  return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
    987 }
    988 
    989 Range* Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
    990  FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
    991      lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
    992 
    993  NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(
    994      (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
    995      (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
    996 
    997  uint16_t exponent;
    998  if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
    999    // Two finite values.
   1000    exponent = lhs->numBits() + rhs->numBits() - 1;
   1001    if (exponent > Range::MaxFiniteExponent) {
   1002      exponent = Range::IncludesInfinity;
   1003    }
   1004  } else if (!lhs->canBeNaN() && !rhs->canBeNaN() &&
   1005             !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
   1006             !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) {
   1007    // Two values that multiplied together won't produce a NaN.
   1008    exponent = Range::IncludesInfinity;
   1009  } else {
   1010    // Could be anything.
   1011    exponent = Range::IncludesInfinityAndNaN;
   1012  }
   1013 
   1014  if (MissingAnyInt32Bounds(lhs, rhs)) {
   1015    return new (alloc)
   1016        Range(NoInt32LowerBound, NoInt32UpperBound, newCanHaveFractionalPart,
   1017              newMayIncludeNegativeZero, exponent);
   1018  }
   1019  int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
   1020  int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
   1021  int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
   1022  int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
   1023  return new (alloc)
   1024      Range(std::min(std::min(a, b), std::min(c, d)),
   1025            std::max(std::max(a, b), std::max(c, d)), newCanHaveFractionalPart,
   1026            newMayIncludeNegativeZero, exponent);
   1027 }
   1028 
   1029 Range* Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
   1030  MOZ_ASSERT(lhs->isInt32());
   1031  int32_t shift = c & 0x1f;
   1032 
   1033  // If the shift doesn't loose bits or shift bits into the sign bit, we
   1034  // can simply compute the correct range by shifting.
   1035  if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) ==
   1036          lhs->lower() &&
   1037      (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) ==
   1038          lhs->upper()) {
   1039    return Range::NewInt32Range(alloc, uint32_t(lhs->lower()) << shift,
   1040                                uint32_t(lhs->upper()) << shift);
   1041  }
   1042 
   1043  return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
   1044 }
   1045 
   1046 Range* Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
   1047  MOZ_ASSERT(lhs->isInt32());
   1048  int32_t shift = c & 0x1f;
   1049  return Range::NewInt32Range(alloc, lhs->lower() >> shift,
   1050                              lhs->upper() >> shift);
   1051 }
   1052 
   1053 Range* Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c) {
   1054  // ursh's left operand is uint32, not int32, but for range analysis we
   1055  // currently approximate it as int32. We assume here that the range has
   1056  // already been adjusted accordingly by our callers.
   1057  MOZ_ASSERT(lhs->isInt32());
   1058 
   1059  int32_t shift = c & 0x1f;
   1060 
   1061  // If the value is always non-negative or always negative, we can simply
   1062  // compute the correct range by shifting.
   1063  if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
   1064    return Range::NewUInt32Range(alloc, uint32_t(lhs->lower()) >> shift,
   1065                                 uint32_t(lhs->upper()) >> shift);
   1066  }
   1067 
   1068  // Otherwise return the most general range after the shift.
   1069  return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
   1070 }
   1071 
   1072 Range* Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
   1073  MOZ_ASSERT(lhs->isInt32());
   1074  MOZ_ASSERT(rhs->isInt32());
   1075  return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
   1076 }
   1077 
   1078 Range* Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
   1079  MOZ_ASSERT(lhs->isInt32());
   1080  MOZ_ASSERT(rhs->isInt32());
   1081 
   1082  // Canonicalize the shift range to 0 to 31.
   1083  int32_t shiftLower = rhs->lower();
   1084  int32_t shiftUpper = rhs->upper();
   1085  if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
   1086    shiftLower = 0;
   1087    shiftUpper = 31;
   1088  } else {
   1089    shiftLower &= 0x1f;
   1090    shiftUpper &= 0x1f;
   1091    if (shiftLower > shiftUpper) {
   1092      shiftLower = 0;
   1093      shiftUpper = 31;
   1094    }
   1095  }
   1096  MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
   1097 
   1098  // The lhs bounds are signed, thus the minimum is either the lower bound
   1099  // shift by the smallest shift if negative or the lower bound shifted by the
   1100  // biggest shift otherwise.  And the opposite for the maximum.
   1101  int32_t lhsLower = lhs->lower();
   1102  int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
   1103  int32_t lhsUpper = lhs->upper();
   1104  int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
   1105 
   1106  return Range::NewInt32Range(alloc, min, max);
   1107 }
   1108 
   1109 Range* Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
   1110  // ursh's left operand is uint32, not int32, but for range analysis we
   1111  // currently approximate it as int32. We assume here that the range has
   1112  // already been adjusted accordingly by our callers.
   1113  MOZ_ASSERT(lhs->isInt32());
   1114  MOZ_ASSERT(rhs->isInt32());
   1115  return Range::NewUInt32Range(
   1116      alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
   1117 }
   1118 
   1119 Range* Range::abs(TempAllocator& alloc, const Range* op) {
   1120  int32_t l = op->lower_;
   1121  int32_t u = op->upper_;
   1122  FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
   1123 
   1124  // Abs never produces a negative zero.
   1125  NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
   1126 
   1127  return new (alloc) Range(
   1128      std::max(std::max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), true,
   1129      std::max(std::max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
   1130      op->hasInt32Bounds() && l != INT32_MIN, canHaveFractionalPart,
   1131      canBeNegativeZero, op->max_exponent_);
   1132 }
   1133 
   1134 Range* Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
   1135  // If either operand is NaN, the result is NaN.
   1136  if (lhs->canBeNaN() || rhs->canBeNaN()) {
   1137    return nullptr;
   1138  }
   1139 
   1140  FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
   1141      lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
   1142  NegativeZeroFlag newMayIncludeNegativeZero =
   1143      NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
   1144 
   1145  return new (alloc) Range(std::min(lhs->lower_, rhs->lower_),
   1146                           lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
   1147                           std::min(lhs->upper_, rhs->upper_),
   1148                           lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
   1149                           newCanHaveFractionalPart, newMayIncludeNegativeZero,
   1150                           std::max(lhs->max_exponent_, rhs->max_exponent_));
   1151 }
   1152 
   1153 Range* Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
   1154  // If either operand is NaN, the result is NaN.
   1155  if (lhs->canBeNaN() || rhs->canBeNaN()) {
   1156    return nullptr;
   1157  }
   1158 
   1159  FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
   1160      lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
   1161  NegativeZeroFlag newMayIncludeNegativeZero =
   1162      NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
   1163 
   1164  return new (alloc) Range(std::max(lhs->lower_, rhs->lower_),
   1165                           lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
   1166                           std::max(lhs->upper_, rhs->upper_),
   1167                           lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
   1168                           newCanHaveFractionalPart, newMayIncludeNegativeZero,
   1169                           std::max(lhs->max_exponent_, rhs->max_exponent_));
   1170 }
   1171 
   1172 Range* Range::floor(TempAllocator& alloc, const Range* op) {
   1173  Range* copy = new (alloc) Range(*op);
   1174  // Decrement lower bound of copy range if op have a factional part and lower
   1175  // bound is Int32 defined. Also we avoid to decrement when op have a
   1176  // fractional part but lower_ >= JSVAL_INT_MAX.
   1177  if (op->canHaveFractionalPart() && op->hasInt32LowerBound()) {
   1178    copy->setLowerInit(int64_t(copy->lower_) - 1);
   1179  }
   1180 
   1181  // Also refine max_exponent_ because floor may have decremented int value
   1182  // If we've got int32 defined bounds, just deduce it using defined bounds.
   1183  // But, if we don't have those, value's max_exponent_ may have changed.
   1184  // Because we're looking to maintain an over estimation, if we can,
   1185  // we increment it.
   1186  if (copy->hasInt32Bounds())
   1187    copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
   1188  else if (copy->max_exponent_ < MaxFiniteExponent)
   1189    copy->max_exponent_++;
   1190 
   1191  copy->canHaveFractionalPart_ = ExcludesFractionalParts;
   1192  copy->assertInvariants();
   1193  return copy;
   1194 }
   1195 
   1196 Range* Range::ceil(TempAllocator& alloc, const Range* op) {
   1197  Range* copy = new (alloc) Range(*op);
   1198 
   1199  // We need to refine max_exponent_ because ceil may have incremented the int
   1200  // value. If we have got int32 bounds defined, just deduce it using the
   1201  // defined bounds. Else we can just increment its value, as we are looking to
   1202  // maintain an over estimation.
   1203  if (copy->hasInt32Bounds()) {
   1204    copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
   1205  } else if (copy->max_exponent_ < MaxFiniteExponent) {
   1206    copy->max_exponent_++;
   1207  }
   1208 
   1209  // If the range is definitely above 0 or below -1, we don't need to include
   1210  // -0; otherwise we do.
   1211 
   1212  copy->canBeNegativeZero_ = ((copy->lower_ > 0) || (copy->upper_ <= -1))
   1213                                 ? copy->canBeNegativeZero_
   1214                                 : IncludesNegativeZero;
   1215 
   1216  copy->canHaveFractionalPart_ = ExcludesFractionalParts;
   1217  copy->assertInvariants();
   1218  return copy;
   1219 }
   1220 
   1221 Range* Range::sign(TempAllocator& alloc, const Range* op) {
   1222  if (op->canBeNaN()) {
   1223    return nullptr;
   1224  }
   1225 
   1226  return new (alloc)
   1227      Range(std::clamp(op->lower_, -1, 1), std::clamp(op->upper_, -1, 1),
   1228            Range::ExcludesFractionalParts,
   1229            NegativeZeroFlag(op->canBeNegativeZero()), 0);
   1230 }
   1231 
   1232 Range* Range::NaNToZero(TempAllocator& alloc, const Range* op) {
   1233  Range* copy = new (alloc) Range(*op);
   1234  if (copy->canBeNaN()) {
   1235    copy->max_exponent_ = Range::IncludesInfinity;
   1236    if (!copy->canBeZero()) {
   1237      Range zero;
   1238      zero.setDoubleSingleton(0);
   1239      copy->unionWith(&zero);
   1240    }
   1241  }
   1242  copy->refineToExcludeNegativeZero();
   1243  return copy;
   1244 }
   1245 
   1246 bool Range::negativeZeroMul(const Range* lhs, const Range* rhs) {
   1247  // The result can only be negative zero if both sides are finite and they
   1248  // have differing signs.
   1249  return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
   1250         (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
   1251 }
   1252 
   1253 bool Range::update(const Range* other) {
   1254  bool changed = lower_ != other->lower_ ||
   1255                 hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
   1256                 upper_ != other->upper_ ||
   1257                 hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
   1258                 canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
   1259                 canBeNegativeZero_ != other->canBeNegativeZero_ ||
   1260                 max_exponent_ != other->max_exponent_;
   1261  if (changed) {
   1262    lower_ = other->lower_;
   1263    hasInt32LowerBound_ = other->hasInt32LowerBound_;
   1264    upper_ = other->upper_;
   1265    hasInt32UpperBound_ = other->hasInt32UpperBound_;
   1266    canHaveFractionalPart_ = other->canHaveFractionalPart_;
   1267    canBeNegativeZero_ = other->canBeNegativeZero_;
   1268    max_exponent_ = other->max_exponent_;
   1269    assertInvariants();
   1270  }
   1271 
   1272  return changed;
   1273 }
   1274 
   1275 ///////////////////////////////////////////////////////////////////////////////
   1276 // Range Computation for MIR Nodes
   1277 ///////////////////////////////////////////////////////////////////////////////
   1278 
   1279 void MPhi::computeRange(TempAllocator& alloc) {
   1280  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1281    return;
   1282  }
   1283 
   1284  Range* range = nullptr;
   1285  for (size_t i = 0, e = numOperands(); i < e; i++) {
   1286    if (getOperand(i)->block()->unreachable()) {
   1287      JitSpew(JitSpew_Range, "Ignoring unreachable input %u",
   1288              getOperand(i)->id());
   1289      continue;
   1290    }
   1291 
   1292    // Peek at the pre-bailout range so we can take a short-cut; if any of
   1293    // the operands has an unknown range, this phi has an unknown range.
   1294    if (!getOperand(i)->range()) {
   1295      return;
   1296    }
   1297 
   1298    Range input(getOperand(i));
   1299 
   1300    if (range) {
   1301      range->unionWith(&input);
   1302    } else {
   1303      range = new (alloc) Range(input);
   1304    }
   1305  }
   1306 
   1307  setRange(range);
   1308 }
   1309 
   1310 void MBeta::computeRange(TempAllocator& alloc) {
   1311  bool emptyRange = false;
   1312 
   1313  Range opRange(getOperand(0));
   1314  Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
   1315  if (emptyRange) {
   1316    JitSpew(JitSpew_Range, "Marking block for inst %u unreachable", id());
   1317    block()->setUnreachableUnchecked();
   1318  } else {
   1319    setRange(range);
   1320  }
   1321 }
   1322 
   1323 void MConstant::computeRange(TempAllocator& alloc) {
   1324  if (isTypeRepresentableAsDouble()) {
   1325    double d = numberToDouble();
   1326    setRange(Range::NewDoubleSingletonRange(alloc, d));
   1327  } else if (type() == MIRType::Boolean) {
   1328    bool b = toBoolean();
   1329    setRange(Range::NewInt32Range(alloc, b, b));
   1330  }
   1331 }
   1332 
   1333 void MCharCodeAt::computeRange(TempAllocator& alloc) {
   1334  // ECMA 262 says that the integer will be non-negative and at most 65535.
   1335  setRange(Range::NewInt32Range(alloc, 0, unicode::UTF16Max));
   1336 }
   1337 
   1338 void MCodePointAt::computeRange(TempAllocator& alloc) {
   1339  setRange(Range::NewInt32Range(alloc, 0, unicode::NonBMPMax));
   1340 }
   1341 
   1342 void MClampToUint8::computeRange(TempAllocator& alloc) {
   1343  setRange(Range::NewUInt32Range(alloc, 0, 255));
   1344 }
   1345 
   1346 void MBitAnd::computeRange(TempAllocator& alloc) {
   1347  if (type() != MIRType::Int32) {
   1348    return;
   1349  }
   1350 
   1351  Range left(getOperand(0));
   1352  Range right(getOperand(1));
   1353  left.wrapAroundToInt32();
   1354  right.wrapAroundToInt32();
   1355 
   1356  setRange(Range::and_(alloc, &left, &right));
   1357 }
   1358 
   1359 void MBitOr::computeRange(TempAllocator& alloc) {
   1360  if (type() != MIRType::Int32) {
   1361    return;
   1362  }
   1363 
   1364  Range left(getOperand(0));
   1365  Range right(getOperand(1));
   1366  left.wrapAroundToInt32();
   1367  right.wrapAroundToInt32();
   1368 
   1369  setRange(Range::or_(alloc, &left, &right));
   1370 }
   1371 
   1372 void MBitXor::computeRange(TempAllocator& alloc) {
   1373  if (type() != MIRType::Int32) {
   1374    return;
   1375  }
   1376 
   1377  Range left(getOperand(0));
   1378  Range right(getOperand(1));
   1379  left.wrapAroundToInt32();
   1380  right.wrapAroundToInt32();
   1381 
   1382  setRange(Range::xor_(alloc, &left, &right));
   1383 }
   1384 
   1385 void MBitNot::computeRange(TempAllocator& alloc) {
   1386  if (type() == MIRType::Int64) {
   1387    return;
   1388  }
   1389  MOZ_ASSERT(type() == MIRType::Int32);
   1390 
   1391  Range op(getOperand(0));
   1392  op.wrapAroundToInt32();
   1393 
   1394  setRange(Range::not_(alloc, &op));
   1395 }
   1396 
   1397 void MLsh::computeRange(TempAllocator& alloc) {
   1398  if (type() != MIRType::Int32) {
   1399    return;
   1400  }
   1401 
   1402  Range left(getOperand(0));
   1403  Range right(getOperand(1));
   1404  left.wrapAroundToInt32();
   1405 
   1406  MConstant* rhsConst = getOperand(1)->maybeConstantValue();
   1407  if (rhsConst && rhsConst->type() == MIRType::Int32) {
   1408    int32_t c = rhsConst->toInt32();
   1409    setRange(Range::lsh(alloc, &left, c));
   1410    return;
   1411  }
   1412 
   1413  right.wrapAroundToShiftCount();
   1414  setRange(Range::lsh(alloc, &left, &right));
   1415 }
   1416 
   1417 void MRsh::computeRange(TempAllocator& alloc) {
   1418  if (type() != MIRType::Int32) {
   1419    return;
   1420  }
   1421 
   1422  Range left(getOperand(0));
   1423  Range right(getOperand(1));
   1424  left.wrapAroundToInt32();
   1425 
   1426  MConstant* rhsConst = getOperand(1)->maybeConstantValue();
   1427  if (rhsConst && rhsConst->type() == MIRType::Int32) {
   1428    int32_t c = rhsConst->toInt32();
   1429    setRange(Range::rsh(alloc, &left, c));
   1430    return;
   1431  }
   1432 
   1433  right.wrapAroundToShiftCount();
   1434  setRange(Range::rsh(alloc, &left, &right));
   1435 }
   1436 
   1437 void MUrsh::computeRange(TempAllocator& alloc) {
   1438  if (type() != MIRType::Int32) {
   1439    return;
   1440  }
   1441 
   1442  Range left(getOperand(0));
   1443  Range right(getOperand(1));
   1444 
   1445  // ursh can be thought of as converting its left operand to uint32, or it
   1446  // can be thought of as converting its left operand to int32, and then
   1447  // reinterpreting the int32 bits as a uint32 value. Both approaches yield
   1448  // the same result. Since we lack support for full uint32 ranges, we use
   1449  // the second interpretation, though it does cause us to be conservative.
   1450  left.wrapAroundToInt32();
   1451  right.wrapAroundToShiftCount();
   1452 
   1453  MConstant* rhsConst = getOperand(1)->maybeConstantValue();
   1454  if (rhsConst && rhsConst->type() == MIRType::Int32) {
   1455    int32_t c = rhsConst->toInt32();
   1456    setRange(Range::ursh(alloc, &left, c));
   1457  } else {
   1458    setRange(Range::ursh(alloc, &left, &right));
   1459  }
   1460 
   1461  MOZ_ASSERT(range()->lower() >= 0);
   1462 }
   1463 
   1464 void MAbs::computeRange(TempAllocator& alloc) {
   1465  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1466    return;
   1467  }
   1468 
   1469  Range other(getOperand(0));
   1470  Range* next = Range::abs(alloc, &other);
   1471  if (implicitTruncate_) {
   1472    next->wrapAroundToInt32();
   1473  }
   1474  setRange(next);
   1475 }
   1476 
   1477 void MFloor::computeRange(TempAllocator& alloc) {
   1478  Range other(getOperand(0));
   1479  setRange(Range::floor(alloc, &other));
   1480 }
   1481 
   1482 void MCeil::computeRange(TempAllocator& alloc) {
   1483  Range other(getOperand(0));
   1484  setRange(Range::ceil(alloc, &other));
   1485 }
   1486 
   1487 void MClz::computeRange(TempAllocator& alloc) {
   1488  if (type() != MIRType::Int32) {
   1489    return;
   1490  }
   1491  setRange(Range::NewUInt32Range(alloc, 0, 32));
   1492 }
   1493 
   1494 void MCtz::computeRange(TempAllocator& alloc) {
   1495  if (type() != MIRType::Int32) {
   1496    return;
   1497  }
   1498  setRange(Range::NewUInt32Range(alloc, 0, 32));
   1499 }
   1500 
   1501 void MPopcnt::computeRange(TempAllocator& alloc) {
   1502  if (type() != MIRType::Int32) {
   1503    return;
   1504  }
   1505  setRange(Range::NewUInt32Range(alloc, 0, 32));
   1506 }
   1507 
   1508 void MMinMax::computeRange(TempAllocator& alloc) {
   1509  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1510    return;
   1511  }
   1512 
   1513  Range left(getOperand(0));
   1514  Range right(getOperand(1));
   1515  setRange(isMax() ? Range::max(alloc, &left, &right)
   1516                   : Range::min(alloc, &left, &right));
   1517 }
   1518 
   1519 void MAdd::computeRange(TempAllocator& alloc) {
   1520  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1521    return;
   1522  }
   1523  Range left(getOperand(0));
   1524  Range right(getOperand(1));
   1525  Range* next = Range::add(alloc, &left, &right);
   1526  if (isTruncated()) {
   1527    next->wrapAroundToInt32();
   1528  }
   1529  setRange(next);
   1530 }
   1531 
   1532 void MSub::computeRange(TempAllocator& alloc) {
   1533  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1534    return;
   1535  }
   1536  Range left(getOperand(0));
   1537  Range right(getOperand(1));
   1538  Range* next = Range::sub(alloc, &left, &right);
   1539  if (isTruncated()) {
   1540    next->wrapAroundToInt32();
   1541  }
   1542  setRange(next);
   1543 }
   1544 
   1545 void MMul::computeRange(TempAllocator& alloc) {
   1546  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1547    return;
   1548  }
   1549  Range left(getOperand(0));
   1550  Range right(getOperand(1));
   1551  if (canBeNegativeZero()) {
   1552    canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
   1553  }
   1554  Range* next = Range::mul(alloc, &left, &right);
   1555  if (!next->canBeNegativeZero()) {
   1556    canBeNegativeZero_ = false;
   1557  }
   1558  // Truncated multiplications could overflow in both directions
   1559  if (isTruncated()) {
   1560    next->wrapAroundToInt32();
   1561  }
   1562  setRange(next);
   1563 }
   1564 
   1565 void MMod::computeRange(TempAllocator& alloc) {
   1566  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1567    return;
   1568  }
   1569  Range lhs(getOperand(0));
   1570  Range rhs(getOperand(1));
   1571 
   1572  // If either operand is a NaN, the result is NaN. This also conservatively
   1573  // handles Infinity cases.
   1574  if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
   1575    return;
   1576  }
   1577 
   1578  // If RHS can be zero, the result can be NaN.
   1579  if (rhs.lower() <= 0 && rhs.upper() >= 0) {
   1580    return;
   1581  }
   1582 
   1583  // If both operands are non-negative integers, we can optimize this to an
   1584  // unsigned mod.
   1585  if (type() == MIRType::Int32 && rhs.lower() > 0) {
   1586    bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
   1587                      rhs.canHaveFractionalPart();
   1588    // It is not possible to check that lhs.lower() >= 0, since the range
   1589    // of a ursh with rhs a 0 constant is wrapped around the int32 range in
   1590    // Range::Range(). However, IsUint32Type() will only return true for
   1591    // nodes that lie in the range [0, UINT32_MAX].
   1592    bool hasUint32s =
   1593        IsUint32Type(getOperand(0)) &&
   1594        getOperand(1)->type() == MIRType::Int32 &&
   1595        (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
   1596    if (!hasDoubles || hasUint32s) {
   1597      unsigned_ = true;
   1598    }
   1599  }
   1600 
   1601  // For unsigned mod, we have to convert both operands to unsigned.
   1602  // Note that we handled the case of a zero rhs above.
   1603  if (unsigned_) {
   1604    // The result of an unsigned mod will never be unsigned-greater than
   1605    // either operand.
   1606    uint32_t lhsBound = std::max<uint32_t>(lhs.lower(), lhs.upper());
   1607    uint32_t rhsBound = std::max<uint32_t>(rhs.lower(), rhs.upper());
   1608 
   1609    // If either range crosses through -1 as a signed value, it could be
   1610    // the maximum unsigned value when interpreted as unsigned. If the range
   1611    // doesn't include -1, then the simple max value we computed above is
   1612    // correct.
   1613    if (lhs.lower() <= -1 && lhs.upper() >= -1) {
   1614      lhsBound = UINT32_MAX;
   1615    }
   1616    if (rhs.lower() <= -1 && rhs.upper() >= -1) {
   1617      rhsBound = UINT32_MAX;
   1618    }
   1619 
   1620    // The result will never be equal to the rhs, and we shouldn't have
   1621    // any rounding to worry about.
   1622    MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
   1623    --rhsBound;
   1624 
   1625    // This gives us two upper bounds, so we can take the best one.
   1626    setRange(Range::NewUInt32Range(alloc, 0, std::min(lhsBound, rhsBound)));
   1627    return;
   1628  }
   1629 
   1630  // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
   1631  // First, the absolute value of the result will always be less than the
   1632  // absolute value of rhs. (And if rhs is zero, the result is NaN).
   1633  int64_t a = Abs<int64_t>(rhs.lower());
   1634  int64_t b = Abs<int64_t>(rhs.upper());
   1635  if (a == 0 && b == 0) {
   1636    return;
   1637  }
   1638  int64_t rhsAbsBound = std::max(a, b);
   1639 
   1640  // If the value is known to be integer, less-than abs(rhs) is equivalent
   1641  // to less-than-or-equal abs(rhs)-1. This is important for being able to
   1642  // say that the result of x%256 is an 8-bit unsigned number.
   1643  if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) {
   1644    --rhsAbsBound;
   1645  }
   1646 
   1647  // Next, the absolute value of the result will never be greater than the
   1648  // absolute value of lhs.
   1649  int64_t lhsAbsBound =
   1650      std::max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
   1651 
   1652  // This gives us two upper bounds, so we can take the best one.
   1653  int64_t absBound = std::min(lhsAbsBound, rhsAbsBound);
   1654 
   1655  // Now consider the sign of the result.
   1656  // If lhs is non-negative, the result will be non-negative.
   1657  // If lhs is non-positive, the result will be non-positive.
   1658  int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
   1659  int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
   1660 
   1661  Range::FractionalPartFlag newCanHaveFractionalPart =
   1662      Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
   1663                                rhs.canHaveFractionalPart());
   1664 
   1665  // If the lhs can have the sign bit set and we can return a zero, it'll be a
   1666  // negative zero.
   1667  Range::NegativeZeroFlag newMayIncludeNegativeZero =
   1668      Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
   1669 
   1670  setRange(new (alloc) Range(lower, upper, newCanHaveFractionalPart,
   1671                             newMayIncludeNegativeZero,
   1672                             std::min(lhs.exponent(), rhs.exponent())));
   1673 }
   1674 
   1675 void MDiv::computeRange(TempAllocator& alloc) {
   1676  if (type() != MIRType::Int32 && type() != MIRType::Double) {
   1677    return;
   1678  }
   1679  Range lhs(getOperand(0));
   1680  Range rhs(getOperand(1));
   1681 
   1682  // If either operand is a NaN, the result is NaN. This also conservatively
   1683  // handles Infinity cases.
   1684  if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
   1685    return;
   1686  }
   1687 
   1688  // Something simple for now: When dividing by a positive rhs, the result
   1689  // won't be further from zero than lhs.
   1690  if (lhs.lower() >= 0 && rhs.lower() >= 1) {
   1691    setRange(new (alloc) Range(0, lhs.upper(), Range::IncludesFractionalParts,
   1692                               Range::IncludesNegativeZero, lhs.exponent()));
   1693  } else if (unsigned_ && rhs.lower() >= 1) {
   1694    // We shouldn't set the unsigned flag if the inputs can have
   1695    // fractional parts.
   1696    MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
   1697    // We shouldn't set the unsigned flag if the inputs can be
   1698    // negative zero.
   1699    MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
   1700    // Unsigned division by a non-zero rhs will return a uint32 value.
   1701    setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
   1702  }
   1703 }
   1704 
   1705 void MSqrt::computeRange(TempAllocator& alloc) {
   1706  Range input(getOperand(0));
   1707 
   1708  // If either operand is a NaN, the result is NaN. This also conservatively
   1709  // handles Infinity cases.
   1710  if (!input.hasInt32Bounds()) {
   1711    return;
   1712  }
   1713 
   1714  // Sqrt of a negative non-zero value is NaN.
   1715  if (input.lower() < 0) {
   1716    return;
   1717  }
   1718 
   1719  // Something simple for now: When taking the sqrt of a positive value, the
   1720  // result won't be further from zero than the input.
   1721  // And, sqrt of an integer may have a fractional part.
   1722  setRange(new (alloc) Range(0, input.upper(), Range::IncludesFractionalParts,
   1723                             input.canBeNegativeZero(), input.exponent()));
   1724 }
   1725 
   1726 void MToDouble::computeRange(TempAllocator& alloc) {
   1727  setRange(new (alloc) Range(getOperand(0)));
   1728 }
   1729 
   1730 void MToFloat32::computeRange(TempAllocator& alloc) {}
   1731 
   1732 void MTruncateToInt32::computeRange(TempAllocator& alloc) {
   1733  Range* output = new (alloc) Range(getOperand(0));
   1734  output->wrapAroundToInt32();
   1735  setRange(output);
   1736 }
   1737 
   1738 void MToNumberInt32::computeRange(TempAllocator& alloc) {
   1739  // No clamping since this computes the range *before* bailouts.
   1740  setRange(new (alloc) Range(getOperand(0)));
   1741 }
   1742 
   1743 void MBooleanToInt32::computeRange(TempAllocator& alloc) {
   1744  setRange(Range::NewUInt32Range(alloc, 0, 1));
   1745 }
   1746 
   1747 void MLimitedTruncate::computeRange(TempAllocator& alloc) {
   1748  Range* output = new (alloc) Range(input());
   1749  setRange(output);
   1750 }
   1751 
   1752 static Range* GetArrayBufferViewRange(TempAllocator& alloc, Scalar::Type type) {
   1753  switch (type) {
   1754    case Scalar::Uint8Clamped:
   1755    case Scalar::Uint8:
   1756      return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
   1757    case Scalar::Uint16:
   1758      return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
   1759    case Scalar::Uint32:
   1760      return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
   1761 
   1762    case Scalar::Int8:
   1763      return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
   1764    case Scalar::Int16:
   1765      return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
   1766    case Scalar::Int32:
   1767      return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
   1768 
   1769    case Scalar::BigInt64:
   1770    case Scalar::BigUint64:
   1771    case Scalar::Int64:
   1772    case Scalar::Simd128:
   1773    case Scalar::Float16:
   1774    case Scalar::Float32:
   1775    case Scalar::Float64:
   1776    case Scalar::MaxTypedArrayViewType:
   1777      break;
   1778  }
   1779  return nullptr;
   1780 }
   1781 
   1782 void MLoadUnboxedScalar::computeRange(TempAllocator& alloc) {
   1783  // We have an Int32 type and if this is a UInt32 load it may produce a value
   1784  // outside of our range, but we have a bailout to handle those cases.
   1785  setRange(GetArrayBufferViewRange(alloc, storageType()));
   1786 }
   1787 
   1788 void MLoadDataViewElement::computeRange(TempAllocator& alloc) {
   1789  // We have an Int32 type and if this is a UInt32 load it may produce a value
   1790  // outside of our range, but we have a bailout to handle those cases.
   1791  setRange(GetArrayBufferViewRange(alloc, storageType()));
   1792 }
   1793 
   1794 void MArrayLength::computeRange(TempAllocator& alloc) {
   1795  // Array lengths can go up to UINT32_MAX. We will bail out if the array
   1796  // length > INT32_MAX.
   1797  MOZ_ASSERT(type() == MIRType::Int32);
   1798  setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1799 }
   1800 
   1801 void MInitializedLength::computeRange(TempAllocator& alloc) {
   1802  setRange(
   1803      Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
   1804 }
   1805 
   1806 void MArrayBufferViewLength::computeRange(TempAllocator& alloc) {
   1807  if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
   1808    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1809  }
   1810 }
   1811 
   1812 void MArrayBufferViewByteOffset::computeRange(TempAllocator& alloc) {
   1813  if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
   1814    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1815  }
   1816 }
   1817 
   1818 void MResizableTypedArrayLength::computeRange(TempAllocator& alloc) {
   1819  if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
   1820    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1821  }
   1822 }
   1823 
   1824 void MResizableDataViewByteLength::computeRange(TempAllocator& alloc) {
   1825  if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
   1826    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1827  }
   1828 }
   1829 
   1830 void MTypedArrayElementSize::computeRange(TempAllocator& alloc) {
   1831  constexpr auto MaxTypedArraySize = sizeof(double);
   1832 
   1833 #define ASSERT_MAX_SIZE(_, T, N)                \
   1834  static_assert(sizeof(T) <= MaxTypedArraySize, \
   1835                "unexpected typed array type exceeding 64-bits storage");
   1836  JS_FOR_EACH_TYPED_ARRAY(ASSERT_MAX_SIZE)
   1837 #undef ASSERT_MAX_SIZE
   1838 
   1839  setRange(Range::NewUInt32Range(alloc, 0, MaxTypedArraySize));
   1840 }
   1841 
   1842 void MStringLength::computeRange(TempAllocator& alloc) {
   1843  static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
   1844                "NewUInt32Range requires a uint32 value");
   1845  setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
   1846 }
   1847 
   1848 void MArgumentsLength::computeRange(TempAllocator& alloc) {
   1849  // This is is a conservative upper bound on what |TooManyActualArguments|
   1850  // checks.  If exceeded, Ion will not be entered in the first place.
   1851  static_assert(ARGS_LENGTH_MAX <= UINT32_MAX,
   1852                "NewUInt32Range requires a uint32 value");
   1853  setRange(Range::NewUInt32Range(alloc, 0, ARGS_LENGTH_MAX));
   1854 }
   1855 
   1856 void MBoundsCheck::computeRange(TempAllocator& alloc) {
   1857  // Just transfer the incoming index range to the output. The length() is
   1858  // also interesting, but it is handled as a bailout check, and we're
   1859  // computing a pre-bailout range here.
   1860  setRange(new (alloc) Range(index()));
   1861 }
   1862 
   1863 void MSpectreMaskIndex::computeRange(TempAllocator& alloc) {
   1864  // Just transfer the incoming index range to the output for now.
   1865  setRange(new (alloc) Range(index()));
   1866 }
   1867 
   1868 void MInt32ToIntPtr::computeRange(TempAllocator& alloc) {
   1869  setRange(new (alloc) Range(input()));
   1870 }
   1871 
   1872 void MNonNegativeIntPtrToInt32::computeRange(TempAllocator& alloc) {
   1873  // We will bail out if the IntPtr value > INT32_MAX.
   1874  setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1875 }
   1876 
   1877 void MArrayPush::computeRange(TempAllocator& alloc) {
   1878  // MArrayPush returns the new array length. It bails out if the new length
   1879  // doesn't fit in an Int32.
   1880  MOZ_ASSERT(type() == MIRType::Int32);
   1881  setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
   1882 }
   1883 
   1884 void MMathFunction::computeRange(TempAllocator& alloc) {
   1885  Range opRange(getOperand(0));
   1886  switch (function()) {
   1887    case UnaryMathFunction::SinNative:
   1888    case UnaryMathFunction::SinFdlibm:
   1889    case UnaryMathFunction::CosNative:
   1890    case UnaryMathFunction::CosFdlibm:
   1891      if (!opRange.canBeInfiniteOrNaN()) {
   1892        setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
   1893      }
   1894      break;
   1895    default:
   1896      break;
   1897  }
   1898 }
   1899 
   1900 void MSign::computeRange(TempAllocator& alloc) {
   1901  Range opRange(getOperand(0));
   1902  setRange(Range::sign(alloc, &opRange));
   1903 }
   1904 
   1905 void MRandom::computeRange(TempAllocator& alloc) {
   1906  Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
   1907 
   1908  // Random never returns negative zero.
   1909  r->refineToExcludeNegativeZero();
   1910 
   1911  setRange(r);
   1912 }
   1913 
   1914 void MNaNToZero::computeRange(TempAllocator& alloc) {
   1915  Range other(input());
   1916  setRange(Range::NaNToZero(alloc, &other));
   1917 }
   1918 
   1919 ///////////////////////////////////////////////////////////////////////////////
   1920 // Range Analysis
   1921 ///////////////////////////////////////////////////////////////////////////////
   1922 
   1923 static BranchDirection NegateBranchDirection(BranchDirection dir) {
   1924  return (dir == FALSE_BRANCH) ? TRUE_BRANCH : FALSE_BRANCH;
   1925 }
   1926 
   1927 bool RangeAnalysis::analyzeLoop(const MBasicBlock* header) {
   1928  MOZ_ASSERT(header->hasUniqueBackedge());
   1929 
   1930  // Try to compute an upper bound on the number of times the loop backedge
   1931  // will be taken. Look for tests that dominate the backedge and which have
   1932  // an edge leaving the loop body.
   1933  MBasicBlock* backedge = header->backedge();
   1934 
   1935  // Ignore trivial infinite loops.
   1936  if (backedge == header) {
   1937    return true;
   1938  }
   1939 
   1940  bool canOsr;
   1941  size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
   1942 
   1943  // Ignore broken loops.
   1944  if (numBlocks == 0) {
   1945    return true;
   1946  }
   1947 
   1948  LoopIterationBound* iterationBound = nullptr;
   1949 
   1950  MBasicBlock* block = backedge;
   1951  do {
   1952    BranchDirection direction;
   1953    MTest* branch = block->immediateDominatorBranch(&direction);
   1954 
   1955    if (block == block->immediateDominator()) {
   1956      break;
   1957    }
   1958 
   1959    block = block->immediateDominator();
   1960 
   1961    if (branch) {
   1962      direction = NegateBranchDirection(direction);
   1963      MBasicBlock* otherBlock = branch->branchSuccessor(direction);
   1964      if (!otherBlock->isMarked()) {
   1965        if (!alloc().ensureBallast()) {
   1966          return false;
   1967        }
   1968        iterationBound = analyzeLoopIterationCount(header, branch, direction);
   1969        if (iterationBound) {
   1970          break;
   1971        }
   1972      }
   1973    }
   1974  } while (block != header);
   1975 
   1976  if (!iterationBound) {
   1977    UnmarkLoopBlocks(graph_, header);
   1978    return true;
   1979  }
   1980 
   1981  if (!loopIterationBounds.append(iterationBound)) {
   1982    return false;
   1983  }
   1984 
   1985 #ifdef DEBUG
   1986  if (JitSpewEnabled(JitSpew_Range)) {
   1987    Sprinter sp(GetJitContext()->cx);
   1988    if (!sp.init()) {
   1989      return false;
   1990    }
   1991    iterationBound->boundSum.dump(sp);
   1992    JS::UniqueChars str = sp.release();
   1993    if (!str) {
   1994      return false;
   1995    }
   1996    JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
   1997            str.get());
   1998  }
   1999 #endif
   2000 
   2001  // Try to compute symbolic bounds for the phi nodes at the head of this
   2002  // loop, expressed in terms of the iteration bound just computed.
   2003 
   2004  for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd();
   2005       iter++) {
   2006    analyzeLoopPhi(iterationBound, *iter);
   2007  }
   2008 
   2009  if (!mir->compilingWasm() && !mir->outerInfo().hadBoundsCheckBailout()) {
   2010    // Try to hoist any bounds checks from the loop using symbolic bounds.
   2011 
   2012    Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
   2013 
   2014    for (ReversePostorderIterator iter(graph_.rpoBegin(header));
   2015         iter != graph_.rpoEnd(); iter++) {
   2016      if (mir->shouldCancel("RangeAnalysis analyzeLoop")) {
   2017        return false;
   2018      }
   2019 
   2020      MBasicBlock* block = *iter;
   2021      if (!block->isMarked()) {
   2022        continue;
   2023      }
   2024 
   2025      for (MDefinitionIterator iter(block); iter; iter++) {
   2026        MDefinition* def = *iter;
   2027        if (def->isBoundsCheck() && def->isMovable()) {
   2028          if (!alloc().ensureBallast()) {
   2029            return false;
   2030          }
   2031          if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
   2032            if (!hoistedChecks.append(def->toBoundsCheck())) {
   2033              return false;
   2034            }
   2035          }
   2036        }
   2037      }
   2038    }
   2039 
   2040    // Note: replace all uses of the original bounds check with the
   2041    // actual index. This is usually done during bounds check elimination,
   2042    // but in this case it's safe to do it here since the load/store is
   2043    // definitely not loop-invariant, so we will never move it before
   2044    // one of the bounds checks we just added.
   2045    for (size_t i = 0; i < hoistedChecks.length(); i++) {
   2046      MBoundsCheck* ins = hoistedChecks[i];
   2047      ins->replaceAllUsesWith(ins->index());
   2048      ins->block()->discard(ins);
   2049    }
   2050  }
   2051 
   2052  UnmarkLoopBlocks(graph_, header);
   2053  return true;
   2054 }
   2055 
   2056 // Unbox beta nodes in order to hoist instruction properly, and not be limited
   2057 // by the beta nodes which are added after each branch.
   2058 static inline MDefinition* DefinitionOrBetaInputDefinition(MDefinition* ins) {
   2059  while (ins->isBeta()) {
   2060    ins = ins->toBeta()->input();
   2061  }
   2062  return ins;
   2063 }
   2064 
   2065 LoopIterationBound* RangeAnalysis::analyzeLoopIterationCount(
   2066    const MBasicBlock* header, const MTest* test, BranchDirection direction) {
   2067  SimpleLinearSum lhs(nullptr, 0);
   2068  MDefinition* rhs;
   2069  bool lessEqual;
   2070  if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) {
   2071    return nullptr;
   2072  }
   2073 
   2074  // Ensure the rhs is a loop invariant term.
   2075  if (rhs && rhs->block()->isMarked()) {
   2076    if (lhs.term && lhs.term->block()->isMarked()) {
   2077      return nullptr;
   2078    }
   2079    MDefinition* temp = lhs.term;
   2080    lhs.term = rhs;
   2081    rhs = temp;
   2082    if (!mozilla::SafeSub(0, lhs.constant, &lhs.constant)) {
   2083      return nullptr;
   2084    }
   2085    lessEqual = !lessEqual;
   2086  }
   2087 
   2088  MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
   2089 
   2090  // Ensure the lhs is a phi node from the start of the loop body.
   2091  if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) {
   2092    return nullptr;
   2093  }
   2094 
   2095  // Check that the value of the lhs changes by a constant amount with each
   2096  // loop iteration. This requires that the lhs be written in every loop
   2097  // iteration with a value that is a constant difference from its value at
   2098  // the start of the iteration.
   2099 
   2100  if (lhs.term->toPhi()->numOperands() != 2) {
   2101    return nullptr;
   2102  }
   2103 
   2104  // The first operand of the phi should be the lhs' value at the start of
   2105  // the first executed iteration, and not a value written which could
   2106  // replace the second operand below during the middle of execution.
   2107  MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
   2108  if (lhsInitial->block()->isMarked()) {
   2109    return nullptr;
   2110  }
   2111 
   2112  // The second operand of the phi should be a value written by an add/sub
   2113  // in every loop iteration, i.e. in a block which dominates the backedge.
   2114  MDefinition* lhsWrite = DefinitionOrBetaInputDefinition(
   2115      lhs.term->toPhi()->getLoopBackedgeOperand());
   2116  if (!lhsWrite->isAdd() && !lhsWrite->isSub()) {
   2117    return nullptr;
   2118  }
   2119  if (!lhsWrite->block()->isMarked()) {
   2120    return nullptr;
   2121  }
   2122  MBasicBlock* bb = header->backedge();
   2123  for (; bb != lhsWrite->block() && bb != header;
   2124       bb = bb->immediateDominator()) {
   2125  }
   2126  if (bb != lhsWrite->block()) {
   2127    return nullptr;
   2128  }
   2129 
   2130  SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
   2131 
   2132  // Check that the value of the lhs at the backedge is of the form
   2133  // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
   2134  // of the iteration, and not that written to lhs in a previous iteration,
   2135  // as such a previous value could not appear directly in the addition:
   2136  // it could not be stored in lhs as the lhs add/sub executes in every
   2137  // iteration, and if it were stored in another variable its use here would
   2138  // be as an operand to a phi node for that variable.
   2139  if (lhsModified.term != lhs.term) {
   2140    return nullptr;
   2141  }
   2142 
   2143  LinearSum iterationBound(alloc());
   2144 
   2145  if (lhsModified.constant == 1 && !lessEqual) {
   2146    // The value of lhs is 'initial(lhs) + iterCount' and this will end
   2147    // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
   2148    // on the number of backedges executed is:
   2149    //
   2150    // initial(lhs) + iterCount + lhsN == rhs
   2151    // iterCount == rhsN - initial(lhs) - lhsN
   2152 
   2153    if (rhs) {
   2154      if (!iterationBound.add(rhs, 1)) {
   2155        return nullptr;
   2156      }
   2157    }
   2158    if (!iterationBound.add(lhsInitial, -1)) {
   2159      return nullptr;
   2160    }
   2161 
   2162    int32_t lhsConstant;
   2163    if (!mozilla::SafeSub(0, lhs.constant, &lhsConstant)) {
   2164      return nullptr;
   2165    }
   2166    if (!iterationBound.add(lhsConstant)) {
   2167      return nullptr;
   2168    }
   2169  } else if (lhsModified.constant == -1 && lessEqual) {
   2170    // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
   2171    // case, an upper bound on the number of backedges executed is:
   2172    //
   2173    // initial(lhs) - iterCount + lhsN == rhs
   2174    // iterCount == initial(lhs) - rhs + lhsN
   2175 
   2176    if (!iterationBound.add(lhsInitial, 1)) {
   2177      return nullptr;
   2178    }
   2179    if (rhs) {
   2180      if (!iterationBound.add(rhs, -1)) {
   2181        return nullptr;
   2182      }
   2183    }
   2184    if (!iterationBound.add(lhs.constant)) {
   2185      return nullptr;
   2186    }
   2187  } else {
   2188    return nullptr;
   2189  }
   2190 
   2191  return new (alloc()) LoopIterationBound(test, iterationBound);
   2192 }
   2193 
   2194 void RangeAnalysis::analyzeLoopPhi(const LoopIterationBound* loopBound,
   2195                                   MPhi* phi) {
   2196  // Given a bound on the number of backedges taken, compute an upper and
   2197  // lower bound for a phi node that may change by a constant amount each
   2198  // iteration. Unlike for the case when computing the iteration bound
   2199  // itself, the phi does not need to change the same amount every iteration,
   2200  // but is required to change at most N and be either nondecreasing or
   2201  // nonincreasing.
   2202 
   2203  MOZ_ASSERT(phi->numOperands() == 2);
   2204 
   2205  MDefinition* initial = phi->getLoopPredecessorOperand();
   2206  if (initial->block()->isMarked()) {
   2207    return;
   2208  }
   2209 
   2210  SimpleLinearSum modified =
   2211      ExtractLinearSum(phi->getLoopBackedgeOperand(), MathSpace::Infinite);
   2212 
   2213  if (modified.term != phi || modified.constant == 0) {
   2214    return;
   2215  }
   2216 
   2217  if (!phi->range()) {
   2218    phi->setRange(new (alloc()) Range(phi));
   2219  }
   2220 
   2221  LinearSum initialSum(alloc());
   2222  if (!initialSum.add(initial, 1)) {
   2223    return;
   2224  }
   2225 
   2226  // The phi may change by N each iteration, and is either nondecreasing or
   2227  // nonincreasing. initial(phi) is either a lower or upper bound for the
   2228  // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
   2229  // at all points within the loop, provided that loopBound >= 0.
   2230  //
   2231  // We are more interested, however, in the bound for phi at points
   2232  // dominated by the loop bound's test; if the test dominates e.g. a bounds
   2233  // check we want to hoist from the loop, using the value of the phi at the
   2234  // head of the loop for this will usually be too imprecise to hoist the
   2235  // check. These points will execute only if the backedge executes at least
   2236  // one more time (as the test passed and the test dominates the backedge),
   2237  // so we know both that loopBound >= 1 and that the phi's value has changed
   2238  // at most loopBound - 1 times. Thus, another upper or lower bound for the
   2239  // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
   2240  // ensure that loopBound >= 0.
   2241 
   2242  LinearSum limitSum(loopBound->boundSum);
   2243  if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) {
   2244    return;
   2245  }
   2246 
   2247  int32_t negativeConstant;
   2248  if (!mozilla::SafeSub(0, modified.constant, &negativeConstant) ||
   2249      !limitSum.add(negativeConstant)) {
   2250    return;
   2251  }
   2252 
   2253  Range* initRange = initial->range();
   2254  if (modified.constant > 0) {
   2255    if (initRange && initRange->hasInt32LowerBound()) {
   2256      phi->range()->refineLower(initRange->lower());
   2257    }
   2258    phi->range()->setSymbolicLower(
   2259        SymbolicBound::New(alloc(), nullptr, initialSum));
   2260    phi->range()->setSymbolicUpper(
   2261        SymbolicBound::New(alloc(), loopBound, limitSum));
   2262  } else {
   2263    if (initRange && initRange->hasInt32UpperBound()) {
   2264      phi->range()->refineUpper(initRange->upper());
   2265    }
   2266    phi->range()->setSymbolicUpper(
   2267        SymbolicBound::New(alloc(), nullptr, initialSum));
   2268    phi->range()->setSymbolicLower(
   2269        SymbolicBound::New(alloc(), loopBound, limitSum));
   2270  }
   2271 
   2272  JitSpew(JitSpew_Range, "added symbolic range on %u", phi->id());
   2273  SpewRange(phi);
   2274 }
   2275 
   2276 // Whether bound is valid at the specified bounds check instruction in a loop,
   2277 // and may be used to hoist ins.
   2278 static inline bool SymbolicBoundIsValid(const MBasicBlock* header,
   2279                                        const MBoundsCheck* ins,
   2280                                        const SymbolicBound* bound) {
   2281  if (!bound->loop) {
   2282    return true;
   2283  }
   2284  if (ins->block() == header) {
   2285    return false;
   2286  }
   2287  MBasicBlock* bb = ins->block()->immediateDominator();
   2288  while (bb != header && bb != bound->loop->test->block()) {
   2289    bb = bb->immediateDominator();
   2290  }
   2291  return bb == bound->loop->test->block();
   2292 }
   2293 
   2294 bool RangeAnalysis::tryHoistBoundsCheck(const MBasicBlock* header,
   2295                                        const MBoundsCheck* ins) {
   2296  // The bounds check's length must be loop invariant or a constant.
   2297  MDefinition* length = DefinitionOrBetaInputDefinition(ins->length());
   2298  if (length->block()->isMarked() && !length->isConstant()) {
   2299    return false;
   2300  }
   2301 
   2302  // The bounds check's index should not be loop invariant (else we would
   2303  // already have hoisted it during LICM).
   2304  SimpleLinearSum index = ExtractLinearSum(ins->index());
   2305  if (!index.term || !index.term->block()->isMarked()) {
   2306    return false;
   2307  }
   2308 
   2309  // Check for a symbolic lower and upper bound on the index. If either
   2310  // condition depends on an iteration bound for the loop, only hoist if
   2311  // the bounds check is dominated by the iteration bound's test.
   2312  if (!index.term->range()) {
   2313    return false;
   2314  }
   2315  const SymbolicBound* lower = index.term->range()->symbolicLower();
   2316  if (!lower || !SymbolicBoundIsValid(header, ins, lower)) {
   2317    return false;
   2318  }
   2319  const SymbolicBound* upper = index.term->range()->symbolicUpper();
   2320  if (!upper || !SymbolicBoundIsValid(header, ins, upper)) {
   2321    return false;
   2322  }
   2323 
   2324  MBasicBlock* preLoop = header->loopPredecessor();
   2325  MOZ_ASSERT(!preLoop->isMarked());
   2326 
   2327  MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum,
   2328                                            BailoutKind::HoistBoundsCheck);
   2329  if (!lowerTerm) {
   2330    return false;
   2331  }
   2332 
   2333  MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum,
   2334                                            BailoutKind::HoistBoundsCheck);
   2335  if (!upperTerm) {
   2336    return false;
   2337  }
   2338 
   2339  // We are checking that index + indexConstant >= 0, and know that
   2340  // index >= lowerTerm + lowerConstant. Thus, check that:
   2341  //
   2342  // lowerTerm + lowerConstant + indexConstant >= 0
   2343  // lowerTerm >= -lowerConstant - indexConstant
   2344 
   2345  int32_t lowerConstant = 0;
   2346  if (!mozilla::SafeSub(lowerConstant, index.constant, &lowerConstant)) {
   2347    return false;
   2348  }
   2349  if (!mozilla::SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) {
   2350    return false;
   2351  }
   2352 
   2353  // We are checking that index < boundsLength, and know that
   2354  // index <= upperTerm + upperConstant. Thus, check that:
   2355  //
   2356  // upperTerm + upperConstant < boundsLength
   2357 
   2358  int32_t upperConstant = index.constant;
   2359  if (!mozilla::SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) {
   2360    return false;
   2361  }
   2362 
   2363  // Hoist the loop invariant lower bounds checks.
   2364  MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
   2365  lowerCheck->setMinimum(lowerConstant);
   2366  lowerCheck->computeRange(alloc());
   2367  lowerCheck->collectRangeInfoPreTrunc();
   2368  lowerCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
   2369  preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
   2370 
   2371  // A common pattern for iterating over typed arrays is this:
   2372  //
   2373  //   for (var i = 0; i < ta.length; i++) {
   2374  //     use ta[i];
   2375  //   }
   2376  //
   2377  // Here |upperTerm| (= ta.length) is a NonNegativeIntPtrToInt32 instruction.
   2378  // Unwrap this if |length| is also an IntPtr so that we don't add an
   2379  // unnecessary bounds check and Int32ToIntPtr below.
   2380  if (upperTerm->isNonNegativeIntPtrToInt32() &&
   2381      length->type() == MIRType::IntPtr) {
   2382    upperTerm = upperTerm->toNonNegativeIntPtrToInt32()->input();
   2383  }
   2384 
   2385  // Hoist the loop invariant upper bounds checks.
   2386  if (upperTerm != length || upperConstant >= 0) {
   2387    // Hoist the bound check's length if it isn't already loop invariant.
   2388    if (length->block()->isMarked()) {
   2389      MOZ_ASSERT(length->isConstant());
   2390      MInstruction* lengthIns = length->toInstruction();
   2391      lengthIns->block()->moveBefore(preLoop->lastIns(), lengthIns);
   2392    }
   2393 
   2394    // If the length is IntPtr, convert the upperTerm to that as well for the
   2395    // bounds check.
   2396    if (length->type() == MIRType::IntPtr &&
   2397        upperTerm->type() == MIRType::Int32) {
   2398      upperTerm = MInt32ToIntPtr::New(alloc(), upperTerm);
   2399      upperTerm->computeRange(alloc());
   2400      upperTerm->collectRangeInfoPreTrunc();
   2401      preLoop->insertBefore(preLoop->lastIns(), upperTerm->toInstruction());
   2402    }
   2403 
   2404    MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
   2405    upperCheck->setMinimum(upperConstant);
   2406    upperCheck->setMaximum(upperConstant);
   2407    upperCheck->computeRange(alloc());
   2408    upperCheck->collectRangeInfoPreTrunc();
   2409    upperCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
   2410    preLoop->insertBefore(preLoop->lastIns(), upperCheck);
   2411  }
   2412 
   2413  return true;
   2414 }
   2415 
   2416 bool RangeAnalysis::analyze() {
   2417  JitSpew(JitSpew_Range, "Doing range propagation");
   2418 
   2419  for (ReversePostorderIterator iter(graph_.rpoBegin());
   2420       iter != graph_.rpoEnd(); iter++) {
   2421    if (mir->shouldCancel("RangeAnalysis analyze")) {
   2422      return false;
   2423    }
   2424 
   2425    MBasicBlock* block = *iter;
   2426    // No blocks are supposed to be unreachable, except when we have an OSR
   2427    // block, in which case the Value Numbering phase add fixup blocks which
   2428    // are unreachable.
   2429    MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
   2430 
   2431    // If the block's immediate dominator is unreachable, the block is
   2432    // unreachable. Iterating in RPO, we'll always see the immediate
   2433    // dominator before the block.
   2434    if (block->immediateDominator()->unreachable()) {
   2435      block->setUnreachableUnchecked();
   2436      continue;
   2437    }
   2438 
   2439    for (MDefinitionIterator iter(block); iter; iter++) {
   2440      MDefinition* def = *iter;
   2441      if (!alloc().ensureBallast()) {
   2442        return false;
   2443      }
   2444 
   2445      def->computeRange(alloc());
   2446      JitSpew(JitSpew_Range, "computing range on %u", def->id());
   2447      SpewRange(def);
   2448    }
   2449 
   2450    // Beta node range analysis may have marked this block unreachable. If
   2451    // so, it's no longer interesting to continue processing it.
   2452    if (block->unreachable()) {
   2453      continue;
   2454    }
   2455 
   2456    if (block->isLoopHeader()) {
   2457      if (!analyzeLoop(block)) {
   2458        return false;
   2459      }
   2460    }
   2461 
   2462    // First pass at collecting range info - while the beta nodes are still
   2463    // around and before truncation.
   2464    for (MInstructionIterator iter(block->begin()); iter != block->end();
   2465         iter++) {
   2466      iter->collectRangeInfoPreTrunc();
   2467    }
   2468  }
   2469 
   2470  return true;
   2471 }
   2472 
   2473 bool RangeAnalysis::addRangeAssertions() {
   2474  if (!JitOptions.checkRangeAnalysis) {
   2475    return true;
   2476  }
   2477 
   2478  // Check the computed range for this instruction, if the option is set. Note
   2479  // that this code is quite invasive; it adds numerous additional
   2480  // instructions for each MInstruction with a computed range, and it uses
   2481  // registers, so it also affects register allocation.
   2482  for (ReversePostorderIterator iter(graph_.rpoBegin());
   2483       iter != graph_.rpoEnd(); iter++) {
   2484    MBasicBlock* block = *iter;
   2485 
   2486    // Do not add assertions in unreachable blocks.
   2487    if (block->unreachable()) {
   2488      continue;
   2489    }
   2490 
   2491    for (MDefinitionIterator iter(block); iter; iter++) {
   2492      MDefinition* ins = *iter;
   2493 
   2494      // Perform range checking for all numeric and numeric-like types.
   2495      if (!IsNumberType(ins->type()) && ins->type() != MIRType::Boolean &&
   2496          ins->type() != MIRType::Value) {
   2497        continue;
   2498      }
   2499 
   2500      // MIsNoIter is fused with the MTest that follows it and emitted as
   2501      // LIsNoIterAndBranch. Similarly, MIteratorHasIndices is fused to
   2502      // become LIteratorHasIndicesAndBranch and IteratorsMatchAndHaveIndices
   2503      // becomes LIteratorsMatchAndHaveIndicesAndBranch. Skip them to avoid
   2504      // complicating lowering.
   2505      if (ins->isIsNoIter() || ins->isIteratorHasIndices() ||
   2506          ins->isIteratorsMatchAndHaveIndices()) {
   2507        MOZ_ASSERT(ins->hasOneUse());
   2508        continue;
   2509      }
   2510 
   2511      Range r(ins);
   2512 
   2513      MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
   2514 
   2515      // Don't insert assertions if there's nothing interesting to assert.
   2516      if (r.isUnknown() ||
   2517          (ins->type() == MIRType::Int32 && r.isUnknownInt32())) {
   2518        continue;
   2519      }
   2520 
   2521      // Don't add a use to an instruction that is recovered on bailout.
   2522      if (ins->isRecoveredOnBailout()) {
   2523        continue;
   2524      }
   2525 
   2526      if (!alloc().ensureBallast()) {
   2527        return false;
   2528      }
   2529      MAssertRange* guard =
   2530          MAssertRange::New(alloc(), ins, new (alloc()) Range(r));
   2531 
   2532      // Beta nodes and interrupt checks are required to be located at the
   2533      // beginnings of basic blocks, so we must insert range assertions
   2534      // after any such instructions.
   2535      MInstruction* insertAt = nullptr;
   2536      if (block->graph().osrBlock() == block) {
   2537        insertAt = ins->toInstruction();
   2538      } else {
   2539        insertAt = block->safeInsertTop(ins);
   2540      }
   2541 
   2542      if (insertAt == *iter) {
   2543        block->insertAfter(insertAt, guard);
   2544      } else {
   2545        block->insertBefore(insertAt, guard);
   2546      }
   2547    }
   2548  }
   2549 
   2550  return true;
   2551 }
   2552 
   2553 ///////////////////////////////////////////////////////////////////////////////
   2554 // Range based Truncation
   2555 ///////////////////////////////////////////////////////////////////////////////
   2556 
   2557 void Range::clampToInt32() {
   2558  if (isInt32()) {
   2559    return;
   2560  }
   2561  int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
   2562  int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
   2563  setInt32(l, h);
   2564 }
   2565 
   2566 void Range::wrapAroundToInt32() {
   2567  if (!hasInt32Bounds()) {
   2568    setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
   2569  } else if (canHaveFractionalPart()) {
   2570    // Clearing the fractional field may provide an opportunity to refine
   2571    // lower_ or upper_.
   2572    canHaveFractionalPart_ = ExcludesFractionalParts;
   2573    canBeNegativeZero_ = ExcludesNegativeZero;
   2574    refineInt32BoundsByExponent(max_exponent_, &lower_, &hasInt32LowerBound_,
   2575                                &upper_, &hasInt32UpperBound_);
   2576 
   2577    assertInvariants();
   2578  } else {
   2579    // If nothing else, we can clear the negative zero flag.
   2580    canBeNegativeZero_ = ExcludesNegativeZero;
   2581  }
   2582  MOZ_ASSERT(isInt32());
   2583 }
   2584 
   2585 void Range::wrapAroundToShiftCount() {
   2586  wrapAroundToInt32();
   2587  if (lower() < 0 || upper() >= 32) {
   2588    setInt32(0, 31);
   2589  }
   2590 }
   2591 
   2592 void Range::wrapAroundToBoolean() {
   2593  wrapAroundToInt32();
   2594  if (!isBoolean()) {
   2595    setInt32(0, 1);
   2596  }
   2597  MOZ_ASSERT(isBoolean());
   2598 }
   2599 
   2600 bool MDefinition::canTruncate() const {
   2601  // No procedure defined for truncating this instruction.
   2602  return false;
   2603 }
   2604 
   2605 void MDefinition::truncate(TruncateKind kind) {
   2606  MOZ_CRASH("No procedure defined for truncating this instruction.");
   2607 }
   2608 
   2609 bool MConstant::canTruncate() const { return IsFloatingPointType(type()); }
   2610 
   2611 void MConstant::truncate(TruncateKind kind) {
   2612  MOZ_ASSERT(canTruncate());
   2613 
   2614  // Truncate the double to int, since all uses truncates it.
   2615  int32_t res = ToInt32(numberToDouble());
   2616  payload_.asBits = 0;
   2617  payload_.i32 = res;
   2618  setResultType(MIRType::Int32);
   2619  if (range()) {
   2620    range()->setInt32(res, res);
   2621  }
   2622 }
   2623 
   2624 bool MPhi::canTruncate() const {
   2625  return type() == MIRType::Double || type() == MIRType::Int32;
   2626 }
   2627 
   2628 void MPhi::truncate(TruncateKind kind) {
   2629  MOZ_ASSERT(canTruncate());
   2630  truncateKind_ = kind;
   2631  setResultType(MIRType::Int32);
   2632  if (kind >= TruncateKind::IndirectTruncate && range()) {
   2633    range()->wrapAroundToInt32();
   2634  }
   2635 }
   2636 
   2637 bool MAdd::canTruncate() const {
   2638  return type() == MIRType::Double || type() == MIRType::Int32;
   2639 }
   2640 
   2641 void MAdd::truncate(TruncateKind kind) {
   2642  MOZ_ASSERT(canTruncate());
   2643 
   2644  // Remember analysis, needed for fallible checks.
   2645  setTruncateKind(kind);
   2646 
   2647  setSpecialization(MIRType::Int32);
   2648  if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
   2649    range()->wrapAroundToInt32();
   2650  }
   2651 }
   2652 
   2653 bool MSub::canTruncate() const {
   2654  return type() == MIRType::Double || type() == MIRType::Int32;
   2655 }
   2656 
   2657 void MSub::truncate(TruncateKind kind) {
   2658  MOZ_ASSERT(canTruncate());
   2659 
   2660  // Remember analysis, needed for fallible checks.
   2661  setTruncateKind(kind);
   2662  setSpecialization(MIRType::Int32);
   2663  if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
   2664    range()->wrapAroundToInt32();
   2665  }
   2666 }
   2667 
   2668 bool MMul::canTruncate() const {
   2669  return type() == MIRType::Double || type() == MIRType::Int32;
   2670 }
   2671 
   2672 void MMul::truncate(TruncateKind kind) {
   2673  MOZ_ASSERT(canTruncate());
   2674 
   2675  // Remember analysis, needed for fallible checks.
   2676  setTruncateKind(kind);
   2677  setSpecialization(MIRType::Int32);
   2678  if (truncateKind() >= TruncateKind::IndirectTruncate) {
   2679    setCanBeNegativeZero(false);
   2680    if (range()) {
   2681      range()->wrapAroundToInt32();
   2682    }
   2683  }
   2684 }
   2685 
   2686 bool MDiv::canTruncate() const {
   2687  return type() == MIRType::Double || type() == MIRType::Int32;
   2688 }
   2689 
   2690 void MDiv::truncate(TruncateKind kind) {
   2691  MOZ_ASSERT(canTruncate());
   2692 
   2693  // Remember analysis, needed for fallible checks.
   2694  setTruncateKind(kind);
   2695  setSpecialization(MIRType::Int32);
   2696 
   2697  // Divisions where the lhs and rhs are unsigned and the result is
   2698  // truncated can be lowered more efficiently.
   2699  if (unsignedOperands()) {
   2700    replaceWithUnsignedOperands();
   2701    unsigned_ = true;
   2702  }
   2703 }
   2704 
   2705 bool MMod::canTruncate() const {
   2706  return type() == MIRType::Double || type() == MIRType::Int32;
   2707 }
   2708 
   2709 void MMod::truncate(TruncateKind kind) {
   2710  // As for division, handle unsigned modulus with a truncated result.
   2711  MOZ_ASSERT(canTruncate());
   2712 
   2713  // Remember analysis, needed for fallible checks.
   2714  setTruncateKind(kind);
   2715  setSpecialization(MIRType::Int32);
   2716 
   2717  if (unsignedOperands()) {
   2718    replaceWithUnsignedOperands();
   2719    unsigned_ = true;
   2720  }
   2721 }
   2722 
   2723 bool MToDouble::canTruncate() const {
   2724  MOZ_ASSERT(type() == MIRType::Double);
   2725  return true;
   2726 }
   2727 
   2728 void MToDouble::truncate(TruncateKind kind) {
   2729  MOZ_ASSERT(canTruncate());
   2730  setTruncateKind(kind);
   2731 
   2732  // We use the return type to flag that this MToDouble should be replaced by
   2733  // a MTruncateToInt32 when modifying the graph.
   2734  setResultType(MIRType::Int32);
   2735  if (truncateKind() >= TruncateKind::IndirectTruncate) {
   2736    if (range()) {
   2737      range()->wrapAroundToInt32();
   2738    }
   2739  }
   2740 }
   2741 
   2742 bool MLimitedTruncate::canTruncate() const { return true; }
   2743 
   2744 void MLimitedTruncate::truncate(TruncateKind kind) {
   2745  MOZ_ASSERT(canTruncate());
   2746  setTruncateKind(kind);
   2747  setResultType(MIRType::Int32);
   2748  if (kind >= TruncateKind::IndirectTruncate && range()) {
   2749    range()->wrapAroundToInt32();
   2750  }
   2751 }
   2752 
   2753 bool MCompare::canTruncate() const {
   2754  if (!isDoubleComparison()) {
   2755    return false;
   2756  }
   2757 
   2758  // If both operands are naturally in the int32 range, we can convert from
   2759  // a double comparison to being an int32 comparison.
   2760  if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) {
   2761    return false;
   2762  }
   2763 
   2764  return true;
   2765 }
   2766 
   2767 void MCompare::truncate(TruncateKind kind) {
   2768  MOZ_ASSERT(canTruncate());
   2769  compareType_ = Compare_Int32;
   2770 
   2771  // Truncating the operands won't change their value because we don't force a
   2772  // truncation, but it will change their type, which we need because we
   2773  // now expect integer inputs.
   2774  truncateOperands_ = true;
   2775 }
   2776 
   2777 TruncateKind MDefinition::operandTruncateKind(size_t index) const {
   2778  // Generic routine: We don't know anything.
   2779  return TruncateKind::NoTruncate;
   2780 }
   2781 
   2782 TruncateKind MPhi::operandTruncateKind(size_t index) const {
   2783  // The truncation applied to a phi is effectively applied to the phi's
   2784  // operands.
   2785  return truncateKind_;
   2786 }
   2787 
   2788 TruncateKind MTruncateToInt32::operandTruncateKind(size_t index) const {
   2789  // This operator is an explicit truncate to int32.
   2790  return TruncateKind::Truncate;
   2791 }
   2792 
   2793 TruncateKind MBinaryBitwiseInstruction::operandTruncateKind(
   2794    size_t index) const {
   2795  // The bitwise operators truncate to int32.
   2796  return TruncateKind::Truncate;
   2797 }
   2798 
   2799 TruncateKind MLimitedTruncate::operandTruncateKind(size_t index) const {
   2800  return std::min(truncateKind(), truncateLimit_);
   2801 }
   2802 
   2803 TruncateKind MAdd::operandTruncateKind(size_t index) const {
   2804  // This operator is doing some arithmetic. If its result is truncated,
   2805  // it's an indirect truncate for its operands.
   2806  return std::min(truncateKind(), TruncateKind::IndirectTruncate);
   2807 }
   2808 
   2809 TruncateKind MSub::operandTruncateKind(size_t index) const {
   2810  // See the comment in MAdd::operandTruncateKind.
   2811  return std::min(truncateKind(), TruncateKind::IndirectTruncate);
   2812 }
   2813 
   2814 TruncateKind MMul::operandTruncateKind(size_t index) const {
   2815  // See the comment in MAdd::operandTruncateKind.
   2816  return std::min(truncateKind(), TruncateKind::IndirectTruncate);
   2817 }
   2818 
   2819 TruncateKind MToDouble::operandTruncateKind(size_t index) const {
   2820  // MToDouble propagates its truncate kind to its operand.
   2821  return truncateKind();
   2822 }
   2823 
   2824 TruncateKind MStoreUnboxedScalar::operandTruncateKind(size_t index) const {
   2825  // An integer store truncates the stored value.
   2826  return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
   2827                                          : TruncateKind::NoTruncate;
   2828 }
   2829 
   2830 TruncateKind MStoreDataViewElement::operandTruncateKind(size_t index) const {
   2831  // An integer store truncates the stored value.
   2832  return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
   2833                                          : TruncateKind::NoTruncate;
   2834 }
   2835 
   2836 TruncateKind MStoreTypedArrayElementHole::operandTruncateKind(
   2837    size_t index) const {
   2838  // An integer store truncates the stored value.
   2839  return (index == 3 && isIntegerWrite()) ? TruncateKind::Truncate
   2840                                          : TruncateKind::NoTruncate;
   2841 }
   2842 
   2843 TruncateKind MTypedArrayFill::operandTruncateKind(size_t index) const {
   2844  // An integer store truncates the stored value.
   2845  return (index == 1 && isIntegerWrite()) ? TruncateKind::Truncate
   2846                                          : TruncateKind::NoTruncate;
   2847 }
   2848 
   2849 TruncateKind MDiv::operandTruncateKind(size_t index) const {
   2850  return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
   2851 }
   2852 
   2853 TruncateKind MMod::operandTruncateKind(size_t index) const {
   2854  return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
   2855 }
   2856 
   2857 TruncateKind MCompare::operandTruncateKind(size_t index) const {
   2858  // If we're doing an int32 comparison on operands which were previously
   2859  // floating-point, convert them!
   2860  MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
   2861  return truncateOperands_ ? TruncateKind::TruncateAfterBailouts
   2862                           : TruncateKind::NoTruncate;
   2863 }
   2864 
   2865 static bool TruncateTest(TempAllocator& alloc, const MTest* test) {
   2866  // If all possible inputs to the test are either int32 or boolean,
   2867  // convert those inputs to int32 so that an int32 test can be performed.
   2868 
   2869  if (test->input()->type() != MIRType::Value) {
   2870    return true;
   2871  }
   2872 
   2873  if (!test->input()->isPhi() || !test->input()->hasOneDefUse() ||
   2874      test->input()->isImplicitlyUsed()) {
   2875    return true;
   2876  }
   2877 
   2878  MPhi* phi = test->input()->toPhi();
   2879  for (size_t i = 0; i < phi->numOperands(); i++) {
   2880    MDefinition* def = phi->getOperand(i);
   2881    if (!def->isBox()) {
   2882      return true;
   2883    }
   2884    MDefinition* inner = def->getOperand(0);
   2885    if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32) {
   2886      return true;
   2887    }
   2888  }
   2889 
   2890  for (size_t i = 0; i < phi->numOperands(); i++) {
   2891    MDefinition* inner = phi->getOperand(i)->getOperand(0);
   2892    if (inner->type() != MIRType::Int32) {
   2893      if (!alloc.ensureBallast()) {
   2894        return false;
   2895      }
   2896      MBasicBlock* block = inner->block();
   2897      inner = MToNumberInt32::New(alloc, inner);
   2898      block->insertBefore(block->lastIns(), inner->toInstruction());
   2899    }
   2900    MOZ_ASSERT(inner->type() == MIRType::Int32);
   2901    phi->replaceOperand(i, inner);
   2902  }
   2903 
   2904  phi->setResultType(MIRType::Int32);
   2905  return true;
   2906 }
   2907 
   2908 // Truncating instruction result is an optimization which implies
   2909 // knowing all uses of an instruction.  This implies that if one of
   2910 // the uses got removed, then Range Analysis is not be allowed to do
   2911 // any modification which can change the result, especially if the
   2912 // result can be observed.
   2913 //
   2914 // This corner can easily be understood with UCE examples, but it
   2915 // might also happen with type inference assumptions.  Note: Type
   2916 // inference is implicitly branches where other types might be
   2917 // flowing into.
   2918 static bool CloneForDeadBranches(TempAllocator& alloc,
   2919                                 MInstruction* candidate) {
   2920  // Compare returns a boolean so it doesn't have to be recovered on bailout
   2921  // because the output would remain correct.
   2922  if (candidate->isCompare()) {
   2923    return true;
   2924  }
   2925 
   2926  MOZ_ASSERT(candidate->canClone());
   2927  if (!alloc.ensureBallast()) {
   2928    return false;
   2929  }
   2930 
   2931  MDefinitionVector operands(alloc);
   2932  size_t end = candidate->numOperands();
   2933  if (!operands.reserve(end)) {
   2934    return false;
   2935  }
   2936  for (size_t i = 0; i < end; ++i) {
   2937    operands.infallibleAppend(candidate->getOperand(i));
   2938  }
   2939 
   2940  MInstruction* clone = candidate->clone(alloc, operands);
   2941  if (!clone) {
   2942    return false;
   2943  }
   2944  clone->setRange(nullptr);
   2945 
   2946  // Set ImplicitlyUsed flag on the cloned instruction in order to chain recover
   2947  // instruction for the bailout path.
   2948  clone->setImplicitlyUsedUnchecked();
   2949 
   2950  candidate->block()->insertBefore(candidate, clone);
   2951 
   2952  if (!candidate->maybeConstantValue()) {
   2953    MOZ_ASSERT(clone->canRecoverOnBailout());
   2954    clone->setRecoveredOnBailout();
   2955  }
   2956 
   2957  // Replace the candidate by its recovered on bailout clone within recovered
   2958  // instructions and resume points operands.
   2959  for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd();) {
   2960    MUse* use = *i++;
   2961    MNode* ins = use->consumer();
   2962    if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout()) {
   2963      continue;
   2964    }
   2965 
   2966    use->replaceProducer(clone);
   2967  }
   2968 
   2969  return true;
   2970 }
   2971 
   2972 struct ComputedTruncateKind {
   2973  TruncateKind kind = TruncateKind::NoTruncate;
   2974  bool shouldClone = false;
   2975 };
   2976 
   2977 // Examine all the users of |candidate| and determine the most aggressive
   2978 // truncate kind that satisfies all of them.
   2979 static ComputedTruncateKind ComputeRequestedTruncateKind(
   2980    const MDefinition* candidate) {
   2981  // Don't call this method when truncation isn't supported, because the result
   2982  // isn't used anyway.
   2983  MOZ_ASSERT(candidate->canTruncate());
   2984 
   2985  bool isCapturedResult =
   2986      false;  // Check if used by a recovered instruction or a resume point.
   2987  bool isObservableResult =
   2988      false;  // Check if it can be read from another frame.
   2989  bool isRecoverableResult = true;  // Check if it can safely be reconstructed.
   2990  bool isImplicitlyUsed = candidate->isImplicitlyUsed();
   2991  bool hasTryBlock = candidate->block()->graph().hasTryBlock();
   2992 
   2993  TruncateKind kind = TruncateKind::Truncate;
   2994  for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd();
   2995       use++) {
   2996    if (use->consumer()->isResumePoint()) {
   2997      // Truncation is a destructive optimization, as such, we need to pay
   2998      // attention to removed branches and prevent optimization
   2999      // destructive optimizations if we have no alternative. (see
   3000      // ImplicitlyUsed flag)
   3001      isCapturedResult = true;
   3002      isObservableResult =
   3003          isObservableResult ||
   3004          use->consumer()->toResumePoint()->isObservableOperand(*use);
   3005      isRecoverableResult =
   3006          isRecoverableResult &&
   3007          use->consumer()->toResumePoint()->isRecoverableOperand(*use);
   3008      continue;
   3009    }
   3010 
   3011    MDefinition* consumer = use->consumer()->toDefinition();
   3012    if (consumer->isRecoveredOnBailout()) {
   3013      isCapturedResult = true;
   3014      isImplicitlyUsed = isImplicitlyUsed || consumer->isImplicitlyUsed();
   3015      continue;
   3016    }
   3017 
   3018    TruncateKind consumerKind =
   3019        consumer->operandTruncateKind(consumer->indexOf(*use));
   3020    kind = std::min(kind, consumerKind);
   3021    if (kind == TruncateKind::NoTruncate) {
   3022      break;
   3023    }
   3024  }
   3025 
   3026  // We cannot do full truncation on guarded instructions.
   3027  if (candidate->isGuard() || candidate->isGuardRangeBailouts()) {
   3028    kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
   3029  }
   3030 
   3031  // If the value naturally produces an int32 value (before bailout checks)
   3032  // that needs no conversion, we don't have to worry about resume points
   3033  // seeing truncated values.
   3034  bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
   3035 
   3036  // If the instruction is explicitly truncated (not indirectly) by all its
   3037  // uses and if it is not implicitly used, then we can safely encode its
   3038  // truncated result as part of the resume point operands.  This is safe,
   3039  // because even if we resume with a truncated double, the next baseline
   3040  // instruction operating on this instruction is going to be a no-op.
   3041  //
   3042  // Note, that if the result can be observed from another frame, then this
   3043  // optimization is not safe. Similarly, if this function contains a try
   3044  // block, the result could be observed from a catch block, which we do
   3045  // not compile.
   3046  bool safeToConvert = kind == TruncateKind::Truncate && !isImplicitlyUsed &&
   3047                       !isObservableResult && !hasTryBlock;
   3048 
   3049  // If the candidate instruction appears as operand of a resume point or a
   3050  // recover instruction, and we have to truncate its result, then we might
   3051  // have to either recover the result during the bailout, or avoid the
   3052  // truncation.
   3053  bool shouldClone = false;
   3054  if (isCapturedResult && needsConversion && !safeToConvert) {
   3055    // If the result can be recovered from all the resume points (not needed
   3056    // for iterating over the inlined frames), and this instruction can be
   3057    // recovered on bailout, then we can clone it and use the cloned
   3058    // instruction to encode the recover instruction.  Otherwise, we should
   3059    // keep the original result and bailout if the value is not in the int32
   3060    // range.
   3061    if (!JitOptions.disableRecoverIns && isRecoverableResult &&
   3062        candidate->canRecoverOnBailout()) {
   3063      shouldClone = true;
   3064    } else {
   3065      kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
   3066    }
   3067  }
   3068 
   3069  return {kind, shouldClone};
   3070 }
   3071 
   3072 static ComputedTruncateKind ComputeTruncateKind(const MDefinition* candidate) {
   3073  // Don't call this method when truncation isn't supported, because the result
   3074  // isn't used anyway.
   3075  MOZ_ASSERT(candidate->canTruncate());
   3076 
   3077  // Compare operations might coerce its inputs to int32 if the ranges are
   3078  // correct.  So we do not need to check if all uses are coerced.
   3079  if (candidate->isCompare()) {
   3080    return {TruncateKind::TruncateAfterBailouts};
   3081  }
   3082 
   3083  // Set truncated flag if range analysis ensure that it has no
   3084  // rounding errors and no fractional part. Note that we can't use
   3085  // the MDefinition Range constructor, because we need to know if
   3086  // the value will have rounding errors before any bailout checks.
   3087  const Range* r = candidate->range();
   3088  bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
   3089 
   3090  // Special case integer division and modulo: a/b can be infinite, and a%b
   3091  // can be NaN but cannot actually have rounding errors induced by truncation.
   3092  if ((candidate->isDiv() || candidate->isMod()) &&
   3093      candidate->type() == MIRType::Int32) {
   3094    canHaveRoundingErrors = false;
   3095  }
   3096 
   3097  if (canHaveRoundingErrors) {
   3098    return {TruncateKind::NoTruncate};
   3099  }
   3100 
   3101  // Ensure all observable uses are truncated.
   3102  return ComputeRequestedTruncateKind(candidate);
   3103 }
   3104 
   3105 static void RemoveTruncatesOnOutput(MDefinition* truncated) {
   3106  // Compare returns a boolean so it doesn't have any output truncates.
   3107  if (truncated->isCompare()) {
   3108    return;
   3109  }
   3110 
   3111  MOZ_ASSERT(truncated->type() == MIRType::Int32);
   3112  MOZ_ASSERT(Range(truncated).isInt32());
   3113 
   3114  for (MUseDefIterator use(truncated); use; use++) {
   3115    MDefinition* def = use.def();
   3116    if (!def->isTruncateToInt32() || !def->isToNumberInt32()) {
   3117      continue;
   3118    }
   3119 
   3120    def->replaceAllUsesWith(truncated);
   3121  }
   3122 }
   3123 
   3124 void RangeAnalysis::adjustTruncatedInputs(MDefinition* truncated) {
   3125  MBasicBlock* block = truncated->block();
   3126  for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
   3127    TruncateKind kind = truncated->operandTruncateKind(i);
   3128    if (kind == TruncateKind::NoTruncate) {
   3129      continue;
   3130    }
   3131 
   3132    MDefinition* input = truncated->getOperand(i);
   3133    if (input->type() == MIRType::Int32) {
   3134      continue;
   3135    }
   3136 
   3137    if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
   3138      truncated->replaceOperand(i, input->getOperand(0));
   3139    } else {
   3140      MInstruction* op;
   3141      if (kind == TruncateKind::TruncateAfterBailouts) {
   3142        MOZ_ASSERT(!mir->outerInfo().hadEagerTruncationBailout());
   3143        op = MToNumberInt32::New(alloc(), truncated->getOperand(i));
   3144        op->setBailoutKind(BailoutKind::EagerTruncation);
   3145      } else {
   3146        op = MTruncateToInt32::New(alloc(), truncated->getOperand(i));
   3147      }
   3148 
   3149      if (truncated->isPhi()) {
   3150        MBasicBlock* pred = block->getPredecessor(i);
   3151        pred->insertBefore(pred->lastIns(), op);
   3152      } else {
   3153        block->insertBefore(truncated->toInstruction(), op);
   3154      }
   3155      truncated->replaceOperand(i, op);
   3156    }
   3157  }
   3158 
   3159  if (truncated->isToDouble()) {
   3160    truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
   3161    block->discard(truncated->toToDouble());
   3162  }
   3163 }
   3164 
   3165 bool RangeAnalysis::canTruncate(const MDefinition* def,
   3166                                TruncateKind kind) const {
   3167  // Don't call this method when truncation isn't supported, because the result
   3168  // isn't used anyway.
   3169  MOZ_ASSERT(def->canTruncate());
   3170 
   3171  if (kind == TruncateKind::NoTruncate) {
   3172    return false;
   3173  }
   3174 
   3175  // Range Analysis is sometimes eager to do optimizations, even if we
   3176  // are not able to truncate an instruction. In such case, we
   3177  // speculatively compile the instruction to an int32 instruction
   3178  // while adding a guard. This is what is implied by
   3179  // TruncateAfterBailout.
   3180  //
   3181  // If a previous compilation was invalidated because a speculative
   3182  // truncation bailed out, we no longer attempt to make this kind of
   3183  // eager optimization.
   3184  if (mir->outerInfo().hadEagerTruncationBailout()) {
   3185    if (kind == TruncateKind::TruncateAfterBailouts) {
   3186      return false;
   3187    }
   3188    // MDiv and MMod always require TruncateAfterBailout for their operands.
   3189    // See MDiv::operandTruncateKind and MMod::operandTruncateKind.
   3190    if (def->isDiv() || def->isMod()) {
   3191      return false;
   3192    }
   3193  }
   3194 
   3195  return true;
   3196 }
   3197 
   3198 // Iterate backward on all instruction and attempt to truncate operations for
   3199 // each instruction which respect the following list of predicates: Has been
   3200 // analyzed by range analysis, the range has no rounding errors, all uses cases
   3201 // are truncating the result.
   3202 //
   3203 // If the truncation of the operation is successful, then the instruction is
   3204 // queue for later updating the graph to restore the type correctness by
   3205 // converting the operands that need to be truncated.
   3206 //
   3207 // We iterate backward because it is likely that a truncated operation truncates
   3208 // some of its operands.
   3209 bool RangeAnalysis::truncate() {
   3210  JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
   3211 
   3212  // Automatic truncation is disabled for wasm because the truncation logic
   3213  // is based on IonMonkey which assumes that we can bailout if the truncation
   3214  // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
   3215  // any automatic truncations.
   3216  MOZ_ASSERT(!mir->compilingWasm());
   3217 
   3218  Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
   3219 
   3220  for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd();
   3221       block++) {
   3222    if (mir->shouldCancel("RangeAnalysis truncate")) {
   3223      return false;
   3224    }
   3225 
   3226    for (MInstructionReverseIterator iter(block->rbegin());
   3227         iter != block->rend(); iter++) {
   3228      if (iter->isRecoveredOnBailout()) {
   3229        continue;
   3230      }
   3231 
   3232      if (iter->type() == MIRType::None) {
   3233        if (iter->isTest()) {
   3234          if (!TruncateTest(alloc(), iter->toTest())) {
   3235            return false;
   3236          }
   3237        }
   3238        continue;
   3239      }
   3240 
   3241      // Remember all bitop instructions for folding after range analysis.
   3242      switch (iter->op()) {
   3243        case MDefinition::Opcode::BitAnd:
   3244        case MDefinition::Opcode::BitOr:
   3245        case MDefinition::Opcode::BitXor:
   3246        case MDefinition::Opcode::Lsh:
   3247        case MDefinition::Opcode::Rsh:
   3248        case MDefinition::Opcode::Ursh:
   3249          if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter))) {
   3250            return false;
   3251          }
   3252          break;
   3253        default:;
   3254      }
   3255 
   3256      // Skip instructions which can't be truncated.
   3257      if (!iter->canTruncate()) {
   3258        continue;
   3259      }
   3260 
   3261      auto [kind, shouldClone] = ComputeTruncateKind(*iter);
   3262 
   3263      // Truncate this instruction if possible.
   3264      if (!canTruncate(*iter, kind)) {
   3265        continue;
   3266      }
   3267 
   3268      SpewTruncate(*iter, kind, shouldClone);
   3269 
   3270      // If needed, clone the current instruction for keeping it for the
   3271      // bailout path.  This give us the ability to truncate instructions
   3272      // even after the removal of branches.
   3273      if (shouldClone && !CloneForDeadBranches(alloc(), *iter)) {
   3274        return false;
   3275      }
   3276 
   3277      // TruncateAfterBailouts keeps the bailout code as-is and
   3278      // continues with truncated operations, with the expectation
   3279      // that we are unlikely to bail out. If we do bail out, then we
   3280      // will set a flag in FinishBailoutToBaseline to prevent eager
   3281      // truncation when we recompile, to avoid bailout loops.
   3282      if (kind == TruncateKind::TruncateAfterBailouts) {
   3283        iter->setBailoutKind(BailoutKind::EagerTruncation);
   3284      }
   3285 
   3286      iter->truncate(kind);
   3287 
   3288      // Delay updates of inputs/outputs to avoid creating node which
   3289      // would be removed by the truncation of the next operations.
   3290      iter->setInWorklist();
   3291      if (!worklist.append(*iter)) {
   3292        return false;
   3293      }
   3294    }
   3295    for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
   3296         iter != end; ++iter) {
   3297      // Skip phis which can't be truncated.
   3298      if (!iter->canTruncate()) {
   3299        continue;
   3300      }
   3301 
   3302      auto [kind, shouldClone] = ComputeTruncateKind(*iter);
   3303 
   3304      // Truncate this phi if possible.
   3305      if (shouldClone || !canTruncate(*iter, kind)) {
   3306        continue;
   3307      }
   3308 
   3309      SpewTruncate(*iter, kind, shouldClone);
   3310 
   3311      iter->truncate(kind);
   3312 
   3313      // Delay updates of inputs/outputs to avoid creating node which
   3314      // would be removed by the truncation of the next operations.
   3315      iter->setInWorklist();
   3316      if (!worklist.append(*iter)) {
   3317        return false;
   3318      }
   3319    }
   3320  }
   3321 
   3322  // Update inputs/outputs of truncated instructions.
   3323  JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
   3324  while (!worklist.empty()) {
   3325    if (!alloc().ensureBallast()) {
   3326      return false;
   3327    }
   3328    MDefinition* def = worklist.popCopy();
   3329    def->setNotInWorklist();
   3330    RemoveTruncatesOnOutput(def);
   3331    adjustTruncatedInputs(def);
   3332  }
   3333 
   3334  return true;
   3335 }
   3336 
   3337 bool RangeAnalysis::removeUnnecessaryBitops() {
   3338  JitSpew(JitSpew_Range, "Begin (removeUnnecessaryBitops)");
   3339  // Note: This operation change the semantic of the program in a way which
   3340  // uniquely works with Int32, Recover Instructions added by the Sink phase
   3341  // expects the MIR Graph to still have a valid flow as-if they were double
   3342  // operations instead of Int32 operations. Thus, this phase should be
   3343  // executed after the Sink phase, and before DCE.
   3344 
   3345  // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
   3346  // input. This is done after range analysis rather than during GVN as the
   3347  // presence of the bitop can change which instructions are truncated.
   3348  for (size_t i = 0; i < bitops.length(); i++) {
   3349    MBinaryBitwiseInstruction* ins = bitops[i];
   3350    if (ins->isRecoveredOnBailout()) {
   3351      continue;
   3352    }
   3353 
   3354    MDefinition* folded = ins->foldUnnecessaryBitop();
   3355    if (folded != ins) {
   3356      ins->replaceAllLiveUsesWith(folded);
   3357      ins->setRecoveredOnBailout();
   3358    }
   3359  }
   3360 
   3361  bitops.clear();
   3362  return true;
   3363 }
   3364 
   3365 ///////////////////////////////////////////////////////////////////////////////
   3366 // Collect Range information of operands
   3367 ///////////////////////////////////////////////////////////////////////////////
   3368 
   3369 void MInArray::collectRangeInfoPreTrunc() {
   3370  Range indexRange(index());
   3371  if (indexRange.isFiniteNonNegative()) {
   3372    needsNegativeIntCheck_ = false;
   3373    setNotGuard();
   3374  }
   3375 }
   3376 
   3377 void MLoadElementHole::collectRangeInfoPreTrunc() {
   3378  Range indexRange(index());
   3379  if (indexRange.isFiniteNonNegative()) {
   3380    needsNegativeIntCheck_ = false;
   3381    setNotGuard();
   3382  }
   3383 }
   3384 
   3385 void MInt32ToIntPtr::collectRangeInfoPreTrunc() {
   3386  Range inputRange(input());
   3387  if (inputRange.isFiniteNonNegative()) {
   3388    canBeNegative_ = false;
   3389  }
   3390 }
   3391 
   3392 void MClz::collectRangeInfoPreTrunc() {
   3393  Range inputRange(input());
   3394  if (!inputRange.canBeZero()) {
   3395    operandIsNeverZero_ = true;
   3396  }
   3397 }
   3398 
   3399 void MCtz::collectRangeInfoPreTrunc() {
   3400  Range inputRange(input());
   3401  if (!inputRange.canBeZero()) {
   3402    operandIsNeverZero_ = true;
   3403  }
   3404 }
   3405 
   3406 void MDiv::collectRangeInfoPreTrunc() {
   3407  Range lhsRange(lhs());
   3408  Range rhsRange(rhs());
   3409 
   3410  // Test if Dividend is non-negative.
   3411  if (lhsRange.isFiniteNonNegative()) {
   3412    canBeNegativeDividend_ = false;
   3413  }
   3414 
   3415  // Try removing divide by zero check.
   3416  if (!rhsRange.canBeZero()) {
   3417    canBeDivideByZero_ = false;
   3418  }
   3419 
   3420  // If lhsRange does not contain INT32_MIN in its range,
   3421  // negative overflow check can be skipped.
   3422  if (!lhsRange.contains(INT32_MIN)) {
   3423    canBeNegativeOverflow_ = false;
   3424  }
   3425 
   3426  // If rhsRange does not contain -1 likewise.
   3427  if (!rhsRange.contains(-1)) {
   3428    canBeNegativeOverflow_ = false;
   3429  }
   3430 
   3431  // If lhsRange does not contain a zero,
   3432  // negative zero check can be skipped.
   3433  if (!lhsRange.canBeZero()) {
   3434    canBeNegativeZero_ = false;
   3435  }
   3436 
   3437  // If rhsRange >= 0 negative zero check can be skipped.
   3438  if (rhsRange.isFiniteNonNegative()) {
   3439    canBeNegativeZero_ = false;
   3440  }
   3441 
   3442  if (type() == MIRType::Int32 && fallible()) {
   3443    setGuardRangeBailoutsUnchecked();
   3444  }
   3445 }
   3446 
   3447 void MMul::collectRangeInfoPreTrunc() {
   3448  Range lhsRange(lhs());
   3449  Range rhsRange(rhs());
   3450 
   3451  // If lhsRange contains only positive then we can skip negative zero check.
   3452  if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero()) {
   3453    setCanBeNegativeZero(false);
   3454  }
   3455 
   3456  // Likewise rhsRange.
   3457  if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero()) {
   3458    setCanBeNegativeZero(false);
   3459  }
   3460 
   3461  // If rhsRange and lhsRange contain Non-negative integers only,
   3462  // We skip negative zero check.
   3463  if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative()) {
   3464    setCanBeNegativeZero(false);
   3465  }
   3466 
   3467  // If rhsRange and lhsRange < 0. Then we skip negative zero check.
   3468  if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative()) {
   3469    setCanBeNegativeZero(false);
   3470  }
   3471 }
   3472 
   3473 void MMod::collectRangeInfoPreTrunc() {
   3474  Range lhsRange(lhs());
   3475  Range rhsRange(rhs());
   3476  if (lhsRange.isFiniteNonNegative()) {
   3477    canBeNegativeDividend_ = false;
   3478  }
   3479  if (!rhsRange.canBeZero()) {
   3480    canBeDivideByZero_ = false;
   3481  }
   3482  if (type() == MIRType::Int32 && fallible()) {
   3483    setGuardRangeBailoutsUnchecked();
   3484  }
   3485 }
   3486 
   3487 void MToNumberInt32::collectRangeInfoPreTrunc() {
   3488  Range inputRange(input());
   3489  if (!inputRange.canBeNegativeZero()) {
   3490    needsNegativeZeroCheck_ = false;
   3491  }
   3492 }
   3493 
   3494 void MBoundsCheck::collectRangeInfoPreTrunc() {
   3495  Range indexRange(index());
   3496  Range lengthRange(length());
   3497  if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound()) {
   3498    return;
   3499  }
   3500  if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN()) {
   3501    return;
   3502  }
   3503 
   3504  int64_t indexLower = indexRange.lower();
   3505  int64_t indexUpper = indexRange.upper();
   3506  int64_t lengthLower = lengthRange.lower();
   3507  int64_t min = minimum();
   3508  int64_t max = maximum();
   3509 
   3510  if (indexLower + min >= 0 && indexUpper + max < lengthLower) {
   3511    fallible_ = false;
   3512  }
   3513 }
   3514 
   3515 void MBoundsCheckLower::collectRangeInfoPreTrunc() {
   3516  Range indexRange(index());
   3517  if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_) {
   3518    fallible_ = false;
   3519  }
   3520 }
   3521 
   3522 void MCompare::collectRangeInfoPreTrunc() {
   3523  if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) {
   3524    operandsAreNeverNaN_ = true;
   3525  }
   3526 }
   3527 
   3528 void MNot::collectRangeInfoPreTrunc() {
   3529  if (!Range(input()).canBeNaN()) {
   3530    operandIsNeverNaN_ = true;
   3531  }
   3532 }
   3533 
   3534 void MPowHalf::collectRangeInfoPreTrunc() {
   3535  Range inputRange(input());
   3536  if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound()) {
   3537    operandIsNeverNegativeInfinity_ = true;
   3538  }
   3539  if (!inputRange.canBeNegativeZero()) {
   3540    operandIsNeverNegativeZero_ = true;
   3541  }
   3542  if (!inputRange.canBeNaN()) {
   3543    operandIsNeverNaN_ = true;
   3544  }
   3545 }
   3546 
   3547 void MUrsh::collectRangeInfoPreTrunc() {
   3548  if (type() == MIRType::Int64) {
   3549    return;
   3550  }
   3551 
   3552  Range lhsRange(lhs()), rhsRange(rhs());
   3553 
   3554  // As in MUrsh::computeRange(), convert the inputs.
   3555  lhsRange.wrapAroundToInt32();
   3556  rhsRange.wrapAroundToShiftCount();
   3557 
   3558  // If the most significant bit of our result is always going to be zero,
   3559  // we can optimize by disabling bailout checks for enforcing an int32 range.
   3560  if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) {
   3561    bailoutsDisabled_ = true;
   3562  }
   3563 }
   3564 
   3565 static bool DoesMaskMatchRange(int32_t mask, const Range& range) {
   3566  // Check if range is positive, because the bitand operator in `(-3) & 0xff`
   3567  // can't be eliminated.
   3568  if (range.lower() >= 0) {
   3569    MOZ_ASSERT(range.isInt32());
   3570    // Check that the mask value has all bits set given the range upper bound.
   3571    // Note that the upper bound does not have to be exactly the mask value. For
   3572    // example, consider `x & 0xfff` where `x` is a uint8. That expression can
   3573    // still be optimized to `x`.
   3574    int bits = 1 + FloorLog2(range.upper());
   3575    uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
   3576    if ((mask & maskNeeded) == maskNeeded) {
   3577      return true;
   3578    }
   3579  }
   3580 
   3581  return false;
   3582 }
   3583 
   3584 void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
   3585  Range lhsRange(lhs());
   3586  Range rhsRange(rhs());
   3587 
   3588  if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
   3589      DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange)) {
   3590    maskMatchesRightRange = true;
   3591  }
   3592 
   3593  if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
   3594      DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange)) {
   3595    maskMatchesLeftRange = true;
   3596  }
   3597 }
   3598 
   3599 void MNaNToZero::collectRangeInfoPreTrunc() {
   3600  Range inputRange(input());
   3601 
   3602  if (!inputRange.canBeNaN()) {
   3603    operandIsNeverNaN_ = true;
   3604  }
   3605  if (!inputRange.canBeNegativeZero()) {
   3606    operandIsNeverNegativeZero_ = true;
   3607  }
   3608 }
   3609 
   3610 bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode) {
   3611  *shouldRemoveDeadCode = false;
   3612 
   3613  for (ReversePostorderIterator iter(graph_.rpoBegin());
   3614       iter != graph_.rpoEnd(); iter++) {
   3615    MBasicBlock* block = *iter;
   3616 
   3617    if (!block->unreachable()) {
   3618      continue;
   3619    }
   3620 
   3621    // Filter out unreachable fake entries.
   3622    if (block->numPredecessors() == 0) {
   3623      // Ignore fixup blocks added by the Value Numbering phase, in order
   3624      // to keep the dominator tree as-is when we have OSR Block which are
   3625      // no longer reachable from the main entry point of the graph.
   3626      MOZ_ASSERT(graph_.osrBlock());
   3627      continue;
   3628    }
   3629 
   3630    MControlInstruction* cond = block->getPredecessor(0)->lastIns();
   3631    if (!cond->isTest()) {
   3632      continue;
   3633    }
   3634 
   3635    // Replace the condition of the test control instruction by a constant
   3636    // chosen based which of the successors has the unreachable flag which is
   3637    // added by MBeta::computeRange on its own block.
   3638    MTest* test = cond->toTest();
   3639    MDefinition* condition = test->input();
   3640 
   3641    // If the false-branch is unreachable, then the test condition must be true.
   3642    // If the true-branch is unreachable, then the test condition must be false.
   3643    MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
   3644    bool value = block == test->ifFalse();
   3645    MConstant* constant =
   3646        MConstant::New(alloc().fallible(), BooleanValue(value));
   3647    if (!constant) {
   3648      return false;
   3649    }
   3650 
   3651    condition->setGuardRangeBailoutsUnchecked();
   3652 
   3653    test->block()->insertBefore(test, constant);
   3654 
   3655    test->replaceOperand(0, constant);
   3656    JitSpew(JitSpew_Range,
   3657            "Update condition of %u to reflect unreachable branches.",
   3658            test->id());
   3659 
   3660    *shouldRemoveDeadCode = true;
   3661  }
   3662 
   3663  return tryRemovingGuards();
   3664 }
   3665 
   3666 bool RangeAnalysis::tryRemovingGuards() {
   3667  MDefinitionVector guards(alloc());
   3668 
   3669  for (ReversePostorderIterator block = graph_.rpoBegin();
   3670       block != graph_.rpoEnd(); block++) {
   3671    if (mir->shouldCancel("RangeAnalysis tryRemovingGuards (block loop)")) {
   3672      return false;
   3673    }
   3674 
   3675    for (MDefinitionIterator iter(*block); iter; iter++) {
   3676      if (!iter->isGuardRangeBailouts()) {
   3677        continue;
   3678      }
   3679 
   3680      iter->setInWorklist();
   3681      if (!guards.append(*iter)) {
   3682        return false;
   3683      }
   3684    }
   3685  }
   3686 
   3687  // Flag all fallible instructions which were indirectly used in the
   3688  // computation of the condition, such that we do not ignore
   3689  // bailout-paths which are used to shrink the input range of the
   3690  // operands of the condition.
   3691  for (size_t i = 0; i < guards.length(); i++) {
   3692    if (mir->shouldCancel("RangeAnalysis tryRemovingGuards (guards loop)")) {
   3693      return false;
   3694    }
   3695 
   3696    MDefinition* guard = guards[i];
   3697 
   3698    // If this ins is a guard even without guardRangeBailouts,
   3699    // there is no reason in trying to hoist the guardRangeBailouts check.
   3700    guard->setNotGuardRangeBailouts();
   3701    if (!DeadIfUnused(guard)) {
   3702      guard->setGuardRangeBailouts();
   3703      continue;
   3704    }
   3705    guard->setGuardRangeBailouts();
   3706 
   3707    if (!guard->isPhi()) {
   3708      if (!guard->range()) {
   3709        continue;
   3710      }
   3711 
   3712      // Filter the range of the instruction based on its MIRType.
   3713      Range typeFilteredRange(guard);
   3714 
   3715      // If the output range is updated by adding the inner range,
   3716      // then the MIRType act as an effectful filter. As we do not know if
   3717      // this filtered Range might change or not the result of the
   3718      // previous comparison, we have to keep this instruction as a guard
   3719      // because it has to bailout in order to restrict the Range to its
   3720      // MIRType.
   3721      if (typeFilteredRange.update(guard->range())) {
   3722        continue;
   3723      }
   3724    }
   3725 
   3726    guard->setNotGuardRangeBailouts();
   3727 
   3728    // Propagate the guard to its operands.
   3729    for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
   3730      MDefinition* operand = guard->getOperand(op);
   3731 
   3732      // Already marked.
   3733      if (operand->isInWorklist()) {
   3734        continue;
   3735      }
   3736 
   3737      MOZ_ASSERT(!operand->isGuardRangeBailouts());
   3738 
   3739      operand->setInWorklist();
   3740      operand->setGuardRangeBailouts();
   3741      if (!guards.append(operand)) {
   3742        return false;
   3743      }
   3744    }
   3745  }
   3746 
   3747  for (size_t i = 0; i < guards.length(); i++) {
   3748    MDefinition* guard = guards[i];
   3749    guard->setNotInWorklist();
   3750  }
   3751 
   3752  return true;
   3753 }