tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

IonAnalysis.cpp (191877B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/IonAnalysis.h"
      8 
      9 #include "mozilla/CheckedArithmetic.h"
     10 #include "mozilla/HashFunctions.h"
     11 
     12 #include <algorithm>
     13 #include <utility>  // for ::std::pair
     14 
     15 #include "jit/AliasAnalysis.h"
     16 #include "jit/CompileInfo.h"
     17 #include "jit/DominatorTree.h"
     18 #include "jit/MIRGenerator.h"
     19 #include "jit/MIRGraph.h"
     20 
     21 #include "vm/BytecodeUtil-inl.h"
     22 
     23 using namespace js;
     24 using namespace js::jit;
     25 
     26 using mozilla::DebugOnly;
     27 
     28 // Stack used by FlagPhiInputsAsImplicitlyUsed. It stores the Phi instruction
     29 // pointer and the MUseIterator which should be visited next.
     30 using MPhiUseIteratorStack =
     31    Vector<std::pair<MPhi*, MUseIterator>, 16, SystemAllocPolicy>;
     32 
     33 // Look for Phi uses with a depth-first search. If any uses are found the stack
     34 // of MPhi instructions is returned in the |worklist| argument.
     35 [[nodiscard]] static bool DepthFirstSearchUse(const MIRGenerator* mir,
     36                                              MPhiUseIteratorStack& worklist,
     37                                              MPhi* phi) {
     38  // Push a Phi and the next use to iterate over in the worklist.
     39  auto push = [&worklist](MPhi* phi, MUseIterator use) -> bool {
     40    phi->setInWorklist();
     41    return worklist.append(std::make_pair(phi, use));
     42  };
     43 
     44 #ifdef DEBUG
     45  // Used to assert that when we have no uses, we at least visited all the
     46  // transitive uses.
     47  size_t refUseCount = phi->useCount();
     48  size_t useCount = 0;
     49 #endif
     50  MOZ_ASSERT(worklist.empty());
     51  if (!push(phi, phi->usesBegin())) {
     52    return false;
     53  }
     54 
     55  while (!worklist.empty()) {
     56    // Resume iterating over the last phi-use pair added by the next loop.
     57    auto pair = worklist.popCopy();
     58    MPhi* producer = pair.first;
     59    MUseIterator use = pair.second;
     60    MUseIterator end(producer->usesEnd());
     61    producer->setNotInWorklist();
     62 
     63    // Keep going down the tree of uses, skipping (continue)
     64    // non-observable/unused cases and Phi which are already listed in the
     65    // worklist. Stop (return) as soon as one use is found.
     66    while (use != end) {
     67      MNode* consumer = (*use)->consumer();
     68      MUseIterator it = use;
     69      use++;
     70 #ifdef DEBUG
     71      useCount++;
     72 #endif
     73      if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed inner loop")) {
     74        return false;
     75      }
     76 
     77      if (consumer->isResumePoint()) {
     78        MResumePoint* rp = consumer->toResumePoint();
     79        // Observable operands are similar to potential uses.
     80        if (rp->isObservableOperand(*it)) {
     81          return push(producer, use);
     82        }
     83        continue;
     84      }
     85 
     86      MDefinition* cdef = consumer->toDefinition();
     87      if (!cdef->isPhi()) {
     88        // The producer is explicitly used by a definition.
     89        return push(producer, use);
     90      }
     91 
     92      MPhi* cphi = cdef->toPhi();
     93      if (cphi->getUsageAnalysis() == PhiUsage::Used ||
     94          cphi->isImplicitlyUsed()) {
     95        // The information got cached on the Phi the last time it
     96        // got visited, or when flagging operands of implicitly used
     97        // instructions.
     98        return push(producer, use);
     99      }
    100 
    101      if (cphi->isInWorklist() || cphi == producer) {
    102        // We are already iterating over the uses of this Phi instruction which
    103        // are part of a loop, instead of trying to handle loops, conservatively
    104        // mark them as used.
    105        return push(producer, use);
    106      }
    107 
    108      if (cphi->getUsageAnalysis() == PhiUsage::Unused) {
    109        // The instruction already got visited and is known to have
    110        // no uses. Skip it.
    111        continue;
    112      }
    113 
    114      // We found another Phi instruction, move the use iterator to
    115      // the next use push it to the worklist stack. Then, continue
    116      // with a depth search.
    117      if (!push(producer, use)) {
    118        return false;
    119      }
    120      producer = cphi;
    121      use = producer->usesBegin();
    122      end = producer->usesEnd();
    123 #ifdef DEBUG
    124      refUseCount += producer->useCount();
    125 #endif
    126    }
    127 
    128    // When unused, we cannot bubble up this information without iterating
    129    // over the rest of the previous Phi instruction consumers.
    130    MOZ_ASSERT(use == end);
    131    producer->setUsageAnalysis(PhiUsage::Unused);
    132  }
    133 
    134  MOZ_ASSERT(useCount == refUseCount);
    135  return true;
    136 }
    137 
    138 [[nodiscard]] static bool FlagPhiInputsAsImplicitlyUsed(
    139    const MIRGenerator* mir, MBasicBlock* block, MBasicBlock* succ,
    140    MPhiUseIteratorStack& worklist) {
    141  // When removing an edge between 2 blocks, we might remove the ability of
    142  // later phases to figure out that the uses of a Phi should be considered as
    143  // a use of all its inputs. Thus we need to mark the Phi inputs as being
    144  // implicitly used iff the phi has any uses.
    145  //
    146  //
    147  //        +--------------------+         +---------------------+
    148  //        |12 MFoo 6           |         |32 MBar 5            |
    149  //        |                    |         |                     |
    150  //        |   ...              |         |   ...               |
    151  //        |                    |         |                     |
    152  //        |25 MGoto Block 4    |         |43 MGoto Block 4     |
    153  //        +--------------------+         +---------------------+
    154  //                   |                              |
    155  //             |     |                              |
    156  //             |     |                              |
    157  //             |     +-----X------------------------+
    158  //             |         Edge       |
    159  //             |        Removed     |
    160  //             |                    |
    161  //             |       +------------v-----------+
    162  //             |       |50 MPhi 12 32           |
    163  //             |       |                        |
    164  //             |       |   ...                  |
    165  //             |       |                        |
    166  //             |       |70 MReturn 50           |
    167  //             |       +------------------------+
    168  //             |
    169  //   - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    170  //             |
    171  //             v
    172  //
    173  //    ^   +--------------------+         +---------------------+
    174  //   /!\  |12 MConst opt-out   |         |32 MBar 5            |
    175  //  '---' |                    |         |                     |
    176  //        |   ...              |         |   ...               |
    177  //        |78 MBail            |         |                     |
    178  //        |80 MUnreachable     |         |43 MGoto Block 4     |
    179  //        +--------------------+         +---------------------+
    180  //                                                  |
    181  //                                                  |
    182  //                                                  |
    183  //                                  +---------------+
    184  //                                  |
    185  //                                  |
    186  //                                  |
    187  //                     +------------v-----------+
    188  //                     |50 MPhi 32              |
    189  //                     |                        |
    190  //                     |   ...                  |
    191  //                     |                        |
    192  //                     |70 MReturn 50           |
    193  //                     +------------------------+
    194  //
    195  //
    196  // If the inputs of the Phi are not flagged as implicitly used, then
    197  // later compilation phase might optimize them out. The problem is that a
    198  // bailout will use this value and give it back to baseline, which will then
    199  // use the OptimizedOut magic value in a computation.
    200  //
    201  // Unfortunately, we cannot be too conservative about flagging Phi inputs as
    202  // having implicit uses, as this would prevent many optimizations from being
    203  // used. Thus, the following code is in charge of flagging Phi instructions
    204  // as Unused or Used, and setting ImplicitlyUsed accordingly.
    205  size_t predIndex = succ->getPredecessorIndex(block);
    206  MPhiIterator end = succ->phisEnd();
    207  MPhiIterator it = succ->phisBegin();
    208  for (; it != end; it++) {
    209    MPhi* phi = *it;
    210 
    211    if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed outer loop")) {
    212      return false;
    213    }
    214 
    215    // We are looking to mark the Phi inputs which are used across the edge
    216    // between the |block| and its successor |succ|.
    217    MDefinition* def = phi->getOperand(predIndex);
    218    if (def->isImplicitlyUsed()) {
    219      continue;
    220    }
    221 
    222    // If the Phi is either Used or Unused, set the ImplicitlyUsed flag
    223    // accordingly.
    224    if (phi->getUsageAnalysis() == PhiUsage::Used || phi->isImplicitlyUsed()) {
    225      def->setImplicitlyUsedUnchecked();
    226      continue;
    227    } else if (phi->getUsageAnalysis() == PhiUsage::Unused) {
    228      continue;
    229    }
    230 
    231    // We do not know if the Phi was Used or Unused, iterate over all uses
    232    // with a depth-search of uses. Returns the matching stack in the
    233    // worklist as soon as one use is found.
    234    MOZ_ASSERT(worklist.empty());
    235    if (!DepthFirstSearchUse(mir, worklist, phi)) {
    236      return false;
    237    }
    238 
    239    MOZ_ASSERT_IF(worklist.empty(),
    240                  phi->getUsageAnalysis() == PhiUsage::Unused);
    241    if (!worklist.empty()) {
    242      // One of the Phis is used, set Used flags on all the Phis which are
    243      // in the use chain.
    244      def->setImplicitlyUsedUnchecked();
    245      do {
    246        auto pair = worklist.popCopy();
    247        MPhi* producer = pair.first;
    248        producer->setUsageAnalysis(PhiUsage::Used);
    249        producer->setNotInWorklist();
    250      } while (!worklist.empty());
    251    }
    252    MOZ_ASSERT(phi->getUsageAnalysis() != PhiUsage::Unknown);
    253  }
    254 
    255  return true;
    256 }
    257 
    258 static MInstructionIterator FindFirstInstructionAfterBail(MBasicBlock* block) {
    259  MOZ_ASSERT(block->alwaysBails());
    260  for (MInstructionIterator it = block->begin(); it != block->end(); it++) {
    261    MInstruction* ins = *it;
    262    if (ins->isBail()) {
    263      it++;
    264      return it;
    265    }
    266  }
    267  MOZ_CRASH("Expected MBail in alwaysBails block");
    268 }
    269 
    270 // Given an iterator pointing to the first removed instruction, mark
    271 // the operands of each removed instruction as having implicit uses.
    272 [[nodiscard]] static bool FlagOperandsAsImplicitlyUsedAfter(
    273    const MIRGenerator* mir, MBasicBlock* block,
    274    MInstructionIterator firstRemoved) {
    275  MOZ_ASSERT(firstRemoved->block() == block);
    276 
    277  const CompileInfo& info = block->info();
    278 
    279  // Flag operands of removed instructions as having implicit uses.
    280  MInstructionIterator end = block->end();
    281  for (MInstructionIterator it = firstRemoved; it != end; it++) {
    282    if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 1)")) {
    283      return false;
    284    }
    285 
    286    MInstruction* ins = *it;
    287    for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
    288      ins->getOperand(i)->setImplicitlyUsedUnchecked();
    289    }
    290 
    291    // Flag observable resume point operands as having implicit uses.
    292    if (MResumePoint* rp = ins->resumePoint()) {
    293      // Note: no need to iterate over the caller's of the resume point as
    294      // this is the same as the entry resume point.
    295      MOZ_ASSERT(&rp->block()->info() == &info);
    296      for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
    297        if (info.isObservableSlot(i)) {
    298          rp->getOperand(i)->setImplicitlyUsedUnchecked();
    299        }
    300      }
    301    }
    302  }
    303 
    304  // Flag Phi inputs of the successors as having implicit uses.
    305  MPhiUseIteratorStack worklist;
    306  for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
    307    if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 2)")) {
    308      return false;
    309    }
    310 
    311    if (!FlagPhiInputsAsImplicitlyUsed(mir, block, block->getSuccessor(i),
    312                                       worklist)) {
    313      return false;
    314    }
    315  }
    316 
    317  return true;
    318 }
    319 
    320 [[nodiscard]] static bool FlagEntryResumePointOperands(const MIRGenerator* mir,
    321                                                       MBasicBlock* block) {
    322  // Flag observable operands of the entry resume point as having implicit uses.
    323  MResumePoint* rp = block->entryResumePoint();
    324  while (rp) {
    325    if (mir->shouldCancel("FlagEntryResumePointOperands")) {
    326      return false;
    327    }
    328 
    329    const CompileInfo& info = rp->block()->info();
    330    for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
    331      if (info.isObservableSlot(i)) {
    332        rp->getOperand(i)->setImplicitlyUsedUnchecked();
    333      }
    334    }
    335 
    336    rp = rp->caller();
    337  }
    338 
    339  return true;
    340 }
    341 
    342 [[nodiscard]] static bool FlagAllOperandsAsImplicitlyUsed(
    343    const MIRGenerator* mir, MBasicBlock* block) {
    344  return FlagEntryResumePointOperands(mir, block) &&
    345         FlagOperandsAsImplicitlyUsedAfter(mir, block, block->begin());
    346 }
    347 
    348 // WarpBuilder sets the alwaysBails flag on blocks that contain an
    349 // unconditional bailout. We trim any instructions in those blocks
    350 // after the first unconditional bailout, and remove any blocks that
    351 // are only reachable through bailing blocks.
    352 bool jit::PruneUnusedBranches(const MIRGenerator* mir, MIRGraph& graph) {
    353  JitSpew(JitSpew_Prune, "Begin");
    354 
    355  // Pruning is guided by unconditional bailouts. Wasm does not have bailouts.
    356  MOZ_ASSERT(!mir->compilingWasm());
    357 
    358  Vector<MBasicBlock*, 16, SystemAllocPolicy> worklist;
    359  uint32_t numMarked = 0;
    360  bool needsTrim = false;
    361 
    362  auto markReachable = [&](MBasicBlock* block) -> bool {
    363    block->mark();
    364    numMarked++;
    365    if (block->alwaysBails()) {
    366      needsTrim = true;
    367    }
    368    return worklist.append(block);
    369  };
    370 
    371  // The entry block is always reachable.
    372  if (!markReachable(graph.entryBlock())) {
    373    return false;
    374  }
    375 
    376  // The OSR entry block is always reachable if it exists.
    377  if (graph.osrBlock() && !markReachable(graph.osrBlock())) {
    378    return false;
    379  }
    380 
    381  // Iteratively mark all reachable blocks.
    382  while (!worklist.empty()) {
    383    if (mir->shouldCancel("Prune unused branches (marking reachable)")) {
    384      return false;
    385    }
    386    MBasicBlock* block = worklist.popCopy();
    387 
    388    JitSpew(JitSpew_Prune, "Visit block %u:", block->id());
    389    JitSpewIndent indent(JitSpew_Prune);
    390 
    391    // If this block always bails, then it does not reach its successors.
    392    if (block->alwaysBails()) {
    393      continue;
    394    }
    395 
    396    for (size_t i = 0; i < block->numSuccessors(); i++) {
    397      MBasicBlock* succ = block->getSuccessor(i);
    398      if (succ->isMarked()) {
    399        continue;
    400      }
    401      JitSpew(JitSpew_Prune, "Reaches block %u", succ->id());
    402      if (!markReachable(succ)) {
    403        return false;
    404      }
    405    }
    406  }
    407 
    408  if (!needsTrim && numMarked == graph.numBlocks()) {
    409    // There is nothing to prune.
    410    graph.unmarkBlocks();
    411    return true;
    412  }
    413 
    414  JitSpew(JitSpew_Prune, "Remove unreachable instructions and blocks:");
    415  JitSpewIndent indent(JitSpew_Prune);
    416 
    417  // The operands of removed instructions may be needed in baseline
    418  // after bailing out.
    419  for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
    420    if (mir->shouldCancel("Prune unused branches (marking operands)")) {
    421      return false;
    422    }
    423 
    424    MBasicBlock* block = *it++;
    425    if (!block->isMarked()) {
    426      // If we are removing the block entirely, mark the operands of every
    427      // instruction as being implicitly used.
    428      if (!FlagAllOperandsAsImplicitlyUsed(mir, block)) {
    429        return false;
    430      }
    431    } else if (block->alwaysBails()) {
    432      // If we are only trimming instructions after a bail, only mark operands
    433      // of removed instructions.
    434      MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
    435      if (!FlagOperandsAsImplicitlyUsedAfter(mir, block, firstRemoved)) {
    436        return false;
    437      }
    438    }
    439  }
    440 
    441  // Remove the blocks in post-order such that consumers are visited before
    442  // the predecessors, the only exception being the Phi nodes of loop headers.
    443  for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
    444    if (mir->shouldCancel("Prune unused branches (removal loop)")) {
    445      return false;
    446    }
    447    if (!graph.alloc().ensureBallast()) {
    448      return false;
    449    }
    450 
    451    MBasicBlock* block = *it++;
    452    if (block->isMarked() && !block->alwaysBails()) {
    453      continue;
    454    }
    455 
    456    // As we are going to replace/remove the last instruction, we first have
    457    // to remove this block from the predecessor list of its successors.
    458    size_t numSucc = block->numSuccessors();
    459    for (uint32_t i = 0; i < numSucc; i++) {
    460      MBasicBlock* succ = block->getSuccessor(i);
    461      if (succ->isDead()) {
    462        continue;
    463      }
    464 
    465      // Our dominators code expects all loop headers to have two predecessors.
    466      // If we are removing the normal entry to a loop, but can still reach
    467      // the loop header via OSR, we create a fake unreachable predecessor.
    468      if (succ->isLoopHeader() && block != succ->backedge()) {
    469        MOZ_ASSERT(graph.osrBlock());
    470        if (!graph.alloc().ensureBallast()) {
    471          return false;
    472        }
    473 
    474        MBasicBlock* fake = MBasicBlock::NewFakeLoopPredecessor(graph, succ);
    475        if (!fake) {
    476          return false;
    477        }
    478        // Mark the block to avoid removing it as unreachable.
    479        fake->mark();
    480 
    481        JitSpew(JitSpew_Prune,
    482                "Header %u only reachable by OSR. Add fake predecessor %u",
    483                succ->id(), fake->id());
    484      }
    485 
    486      JitSpew(JitSpew_Prune, "Remove block edge %u -> %u.", block->id(),
    487              succ->id());
    488      succ->removePredecessor(block);
    489    }
    490 
    491    if (!block->isMarked()) {
    492      // Remove unreachable blocks from the CFG.
    493      JitSpew(JitSpew_Prune, "Remove block %u.", block->id());
    494      graph.removeBlock(block);
    495    } else {
    496      // Remove unreachable instructions after unconditional bailouts.
    497      JitSpew(JitSpew_Prune, "Trim block %u.", block->id());
    498 
    499      // Discard all instructions after the first MBail.
    500      MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
    501      block->discardAllInstructionsStartingAt(firstRemoved);
    502 
    503      if (block->outerResumePoint()) {
    504        block->clearOuterResumePoint();
    505      }
    506 
    507      block->end(MUnreachable::New(graph.alloc()));
    508    }
    509  }
    510  graph.unmarkBlocks();
    511 
    512  return true;
    513 }
    514 
    515 [[nodiscard]] static bool SplitCriticalEdgesForBlock(MIRGraph& graph,
    516                                                     MBasicBlock* block) {
    517  if (block->numSuccessors() < 2) {
    518    return true;
    519  }
    520  for (size_t i = 0; i < block->numSuccessors(); i++) {
    521    MBasicBlock* target = block->getSuccessor(i);
    522    if (target->numPredecessors() < 2) {
    523      continue;
    524    }
    525 
    526    // Create a simple new block which contains a goto and which split the
    527    // edge between block and target.
    528    MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block, i, target);
    529    if (!split) {
    530      return false;
    531    }
    532  }
    533  return true;
    534 }
    535 
    536 // A critical edge is an edge which is neither its successor's only predecessor
    537 // nor its predecessor's only successor. Critical edges must be split to
    538 // prevent copy-insertion and code motion from affecting other edges.
    539 bool jit::SplitCriticalEdges(MIRGraph& graph) {
    540  for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
    541    MBasicBlock* block = *iter;
    542    if (!SplitCriticalEdgesForBlock(graph, block)) {
    543      return false;
    544    }
    545  }
    546  return true;
    547 }
    548 
    549 bool jit::IsUint32Type(const MDefinition* def) {
    550  if (def->isBeta()) {
    551    def = def->getOperand(0);
    552  }
    553 
    554  if (def->type() != MIRType::Int32) {
    555    return false;
    556  }
    557 
    558  return def->isUrsh() && def->getOperand(1)->isConstant() &&
    559         def->getOperand(1)->toConstant()->type() == MIRType::Int32 &&
    560         def->getOperand(1)->toConstant()->toInt32() == 0;
    561 }
    562 
    563 // Determine whether phiBlock/testBlock simply compute a phi and perform a
    564 // test on it.
    565 static bool BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock,
    566                              MPhi** pphi, MTest** ptest) {
    567  *pphi = nullptr;
    568  *ptest = nullptr;
    569 
    570  if (phiBlock != testBlock) {
    571    MOZ_ASSERT(phiBlock->numSuccessors() == 1 &&
    572               phiBlock->getSuccessor(0) == testBlock);
    573    if (!phiBlock->begin()->isGoto()) {
    574      return false;
    575    }
    576  }
    577 
    578  auto iter = testBlock->rbegin();
    579  if (!iter->isTest()) {
    580    return false;
    581  }
    582  MTest* test = iter->toTest();
    583 
    584  // Unwrap boolean conversion performed through the '!!' idiom.
    585  MInstruction* testOrNot = test;
    586  bool hasOddNumberOfNots = false;
    587  while (++iter != testBlock->rend()) {
    588    if (iter->isNot()) {
    589      // The MNot must only be used by |testOrNot|.
    590      auto* notIns = iter->toNot();
    591      if (testOrNot->getOperand(0) != notIns) {
    592        return false;
    593      }
    594      if (!notIns->hasOneUse()) {
    595        return false;
    596      }
    597 
    598      testOrNot = notIns;
    599      hasOddNumberOfNots = !hasOddNumberOfNots;
    600    } else {
    601      // Fail if there are any other instructions than MNot.
    602      return false;
    603    }
    604  }
    605 
    606  // There's an odd number of MNot, so this can't be the '!!' idiom.
    607  if (hasOddNumberOfNots) {
    608    return false;
    609  }
    610 
    611  MOZ_ASSERT(testOrNot->isTest() || testOrNot->isNot());
    612 
    613  MDefinition* testInput = testOrNot->getOperand(0);
    614  if (!testInput->isPhi()) {
    615    return false;
    616  }
    617  MPhi* phi = testInput->toPhi();
    618  if (phi->block() != phiBlock) {
    619    return false;
    620  }
    621 
    622  for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
    623    MUse* use = *iter;
    624    if (use->consumer() == testOrNot) {
    625      continue;
    626    }
    627    if (use->consumer()->isResumePoint()) {
    628      MBasicBlock* useBlock = use->consumer()->block();
    629      if (useBlock == phiBlock || useBlock == testBlock) {
    630        continue;
    631      }
    632    }
    633    return false;
    634  }
    635 
    636  for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd();
    637       ++iter) {
    638    if (*iter != phi) {
    639      return false;
    640    }
    641  }
    642 
    643  if (phiBlock != testBlock && !testBlock->phisEmpty()) {
    644    return false;
    645  }
    646 
    647  *pphi = phi;
    648  *ptest = test;
    649 
    650  return true;
    651 }
    652 
    653 // Determine if value is directly or indirectly the test input.
    654 static bool IsTestInputMaybeToBool(MTest* test, MDefinition* value) {
    655  auto* input = test->input();
    656  bool hasEvenNumberOfNots = true;
    657  while (true) {
    658    // Only accept if there's an even number of MNot.
    659    if (input == value && hasEvenNumberOfNots) {
    660      return true;
    661    }
    662 
    663    // Unwrap boolean conversion performed through the '!!' idiom.
    664    if (input->isNot()) {
    665      input = input->toNot()->input();
    666      hasEvenNumberOfNots = !hasEvenNumberOfNots;
    667      continue;
    668    }
    669 
    670    return false;
    671  }
    672 }
    673 
    674 // Change |block| so that it ends in a goto to the specific |target| block.
    675 // |existingPred| is an existing predecessor of the block.
    676 //
    677 // |blockResult| is the value computed by |block|. This was a phi input but the
    678 // caller has determined that |blockResult| matches the input of an earlier
    679 // MTest instruction and we don't need to test it a second time. Mark it as
    680 // implicitly-used because we're removing a use.
    681 [[nodiscard]] static bool UpdateGotoSuccessor(TempAllocator& alloc,
    682                                              MBasicBlock* block,
    683                                              MDefinition* blockResult,
    684                                              MBasicBlock* target,
    685                                              MBasicBlock* existingPred) {
    686  blockResult->setImplicitlyUsedUnchecked();
    687 
    688  MInstruction* ins = block->lastIns();
    689  MOZ_ASSERT(ins->isGoto());
    690  ins->toGoto()->target()->removePredecessor(block);
    691  block->discardLastIns();
    692 
    693  MGoto* newGoto = MGoto::New(alloc, target);
    694  block->end(newGoto);
    695 
    696  return target->addPredecessorSameInputsAs(block, existingPred);
    697 }
    698 
    699 // Change block so that it ends in a test of the specified value, going to
    700 // either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
    701 // or ifFalse with the same values incoming to ifTrue/ifFalse as block.
    702 // existingPred is not required to be a predecessor of ifTrue/ifFalse if block
    703 // already ends in a test going to that block on a true/false result.
    704 [[nodiscard]] static bool UpdateTestSuccessors(
    705    TempAllocator& alloc, MBasicBlock* block, MDefinition* value,
    706    MBasicBlock* ifTrue, MBasicBlock* ifFalse, MBasicBlock* existingPred) {
    707  MInstruction* ins = block->lastIns();
    708  if (ins->isTest()) {
    709    MTest* test = ins->toTest();
    710    MOZ_ASSERT(test->input() == value);
    711 
    712    if (ifTrue != test->ifTrue()) {
    713      test->ifTrue()->removePredecessor(block);
    714      if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
    715        return false;
    716      }
    717      MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
    718      test->replaceSuccessor(0, ifTrue);
    719    }
    720 
    721    if (ifFalse != test->ifFalse()) {
    722      test->ifFalse()->removePredecessor(block);
    723      if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
    724        return false;
    725      }
    726      MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
    727      test->replaceSuccessor(1, ifFalse);
    728    }
    729 
    730    return true;
    731  }
    732 
    733  MOZ_ASSERT(ins->isGoto());
    734  ins->toGoto()->target()->removePredecessor(block);
    735  block->discardLastIns();
    736 
    737  MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
    738  block->end(test);
    739 
    740  if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
    741    return false;
    742  }
    743  if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
    744    return false;
    745  }
    746  return true;
    747 }
    748 
    749 /*
    750 * Look for a diamond pattern:
    751 *
    752 *        initialBlock
    753 *          /     \
    754 *  trueBranch  falseBranch
    755 *          \     /
    756 *          phiBlock
    757 *             |
    758 *         testBlock
    759 */
    760 static bool IsDiamondPattern(MBasicBlock* initialBlock) {
    761  MInstruction* ins = initialBlock->lastIns();
    762  if (!ins->isTest()) {
    763    return false;
    764  }
    765  MTest* initialTest = ins->toTest();
    766 
    767  MBasicBlock* trueBranch = initialTest->ifTrue();
    768  if (trueBranch->numPredecessors() != 1 || !trueBranch->lastIns()->isGoto()) {
    769    return false;
    770  }
    771 
    772  MBasicBlock* falseBranch = initialTest->ifFalse();
    773  if (falseBranch->numPredecessors() != 1 ||
    774      !falseBranch->lastIns()->isGoto()) {
    775    return false;
    776  }
    777 
    778  MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
    779  if (phiBlock != falseBranch->getSuccessor(0)) {
    780    return false;
    781  }
    782  if (phiBlock->numPredecessors() != 2) {
    783    return false;
    784  }
    785  return true;
    786 }
    787 
    788 [[nodiscard]] static bool MaybeFoldDiamondConditionBlock(
    789    MIRGraph& graph, MBasicBlock* initialBlock) {
    790  MOZ_ASSERT(IsDiamondPattern(initialBlock));
    791 
    792  // Optimize the MIR graph to improve the code generated for conditional
    793  // operations. A test like 'if (a ? b : c)' normally requires four blocks,
    794  // with a phi for the intermediate value. This can be improved to use three
    795  // blocks with no phi value.
    796 
    797  /*
    798   * Look for a diamond pattern:
    799   *
    800   *        initialBlock
    801   *          /     \
    802   *  trueBranch  falseBranch
    803   *          \     /
    804   *          phiBlock
    805   *             |
    806   *         testBlock
    807   *
    808   * Where phiBlock contains a single phi combining values pushed onto the
    809   * stack by trueBranch and falseBranch, and testBlock contains a test on
    810   * that phi. phiBlock and testBlock may be the same block; generated code
    811   * will use different blocks if the (?:) op is in an inlined function.
    812   */
    813 
    814  MTest* initialTest = initialBlock->lastIns()->toTest();
    815 
    816  MBasicBlock* trueBranch = initialTest->ifTrue();
    817  MBasicBlock* falseBranch = initialTest->ifFalse();
    818  if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
    819      falseBranch->isLoopBackedge()) {
    820    return true;
    821  }
    822 
    823  MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
    824  MBasicBlock* testBlock = phiBlock;
    825  if (testBlock->numSuccessors() == 1) {
    826    if (testBlock->isLoopBackedge()) {
    827      return true;
    828    }
    829    testBlock = testBlock->getSuccessor(0);
    830    if (testBlock->numPredecessors() != 1) {
    831      return true;
    832    }
    833  }
    834 
    835  MPhi* phi;
    836  MTest* finalTest;
    837  if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
    838    return true;
    839  }
    840 
    841  MOZ_ASSERT(phi->numOperands() == 2);
    842 
    843  // Make sure the test block does not have any outgoing loop backedges.
    844  if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
    845    return false;
    846  }
    847 
    848  MDefinition* trueResult =
    849      phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
    850  MDefinition* falseResult =
    851      phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
    852 
    853  // OK, we found the desired pattern, now transform the graph.
    854 
    855  // Remove the phi from phiBlock.
    856  phiBlock->discardPhi(*phiBlock->phisBegin());
    857 
    858  // Change the end of the block to a test that jumps directly to successors of
    859  // testBlock, rather than to testBlock itself.
    860 
    861  if (IsTestInputMaybeToBool(initialTest, trueResult)) {
    862    if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, trueResult,
    863                             finalTest->ifTrue(), testBlock)) {
    864      return false;
    865    }
    866  } else {
    867    if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
    868                              finalTest->ifTrue(), finalTest->ifFalse(),
    869                              testBlock)) {
    870      return false;
    871    }
    872  }
    873 
    874  if (IsTestInputMaybeToBool(initialTest, falseResult)) {
    875    if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, falseResult,
    876                             finalTest->ifFalse(), testBlock)) {
    877      return false;
    878    }
    879  } else {
    880    if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
    881                              finalTest->ifTrue(), finalTest->ifFalse(),
    882                              testBlock)) {
    883      return false;
    884    }
    885  }
    886 
    887  // Remove phiBlock, if different from testBlock.
    888  if (phiBlock != testBlock) {
    889    testBlock->removePredecessor(phiBlock);
    890    graph.removeBlock(phiBlock);
    891  }
    892 
    893  // Remove testBlock itself.
    894  finalTest->ifTrue()->removePredecessor(testBlock);
    895  finalTest->ifFalse()->removePredecessor(testBlock);
    896  graph.removeBlock(testBlock);
    897 
    898  return true;
    899 }
    900 
    901 /*
    902 * Look for a triangle pattern:
    903 *
    904 *        initialBlock
    905 *          /     \
    906 *  trueBranch     |
    907 *          \     /
    908 *     phiBlock+falseBranch
    909 *             |
    910 *         testBlock
    911 *
    912 * Or:
    913 *
    914 *        initialBlock
    915 *          /     \
    916 *         |    falseBranch
    917 *          \     /
    918 *     phiBlock+trueBranch
    919 *             |
    920 *         testBlock
    921 */
    922 static bool IsTrianglePattern(MBasicBlock* initialBlock) {
    923  MInstruction* ins = initialBlock->lastIns();
    924  if (!ins->isTest()) {
    925    return false;
    926  }
    927  MTest* initialTest = ins->toTest();
    928 
    929  MBasicBlock* trueBranch = initialTest->ifTrue();
    930  MBasicBlock* falseBranch = initialTest->ifFalse();
    931 
    932  if (trueBranch->numSuccessors() == 1 &&
    933      trueBranch->getSuccessor(0) == falseBranch) {
    934    if (trueBranch->numPredecessors() != 1) {
    935      return false;
    936    }
    937    if (falseBranch->numPredecessors() != 2) {
    938      return false;
    939    }
    940    return true;
    941  }
    942 
    943  if (falseBranch->numSuccessors() == 1 &&
    944      falseBranch->getSuccessor(0) == trueBranch) {
    945    if (trueBranch->numPredecessors() != 2) {
    946      return false;
    947    }
    948    if (falseBranch->numPredecessors() != 1) {
    949      return false;
    950    }
    951    return true;
    952  }
    953 
    954  return false;
    955 }
    956 
    957 [[nodiscard]] static bool MaybeFoldTriangleConditionBlock(
    958    MIRGraph& graph, MBasicBlock* initialBlock) {
    959  MOZ_ASSERT(IsTrianglePattern(initialBlock));
    960 
    961  // Optimize the MIR graph to improve the code generated for boolean
    962  // operations. A test like 'if (a && b)' normally requires three blocks, with
    963  // a phi for the intermediate value. This can be improved to use no phi value.
    964 
    965  /*
    966   * Look for a triangle pattern:
    967   *
    968   *        initialBlock
    969   *          /     \
    970   *  trueBranch     |
    971   *          \     /
    972   *     phiBlock+falseBranch
    973   *             |
    974   *         testBlock
    975   *
    976   * Or:
    977   *
    978   *        initialBlock
    979   *          /     \
    980   *         |    falseBranch
    981   *          \     /
    982   *     phiBlock+trueBranch
    983   *             |
    984   *         testBlock
    985   *
    986   * Where phiBlock contains a single phi combining values pushed onto the stack
    987   * by trueBranch and falseBranch, and testBlock contains a test on that phi.
    988   * phiBlock and testBlock may be the same block; generated code will use
    989   * different blocks if the (&&) op is in an inlined function.
    990   */
    991 
    992  MTest* initialTest = initialBlock->lastIns()->toTest();
    993 
    994  MBasicBlock* trueBranch = initialTest->ifTrue();
    995  MBasicBlock* falseBranch = initialTest->ifFalse();
    996  if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
    997      falseBranch->isLoopBackedge()) {
    998    return true;
    999  }
   1000 
   1001  MBasicBlock* phiBlock;
   1002  if (trueBranch->numSuccessors() == 1 &&
   1003      trueBranch->getSuccessor(0) == falseBranch) {
   1004    phiBlock = falseBranch;
   1005  } else {
   1006    MOZ_ASSERT(falseBranch->getSuccessor(0) == trueBranch);
   1007    phiBlock = trueBranch;
   1008  }
   1009 
   1010  MBasicBlock* testBlock = phiBlock;
   1011  if (testBlock->numSuccessors() == 1) {
   1012    MOZ_ASSERT(!testBlock->isLoopBackedge());
   1013 
   1014    testBlock = testBlock->getSuccessor(0);
   1015    if (testBlock->numPredecessors() != 1) {
   1016      return true;
   1017    }
   1018  }
   1019 
   1020  MPhi* phi;
   1021  MTest* finalTest;
   1022  if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
   1023    return true;
   1024  }
   1025 
   1026  MOZ_ASSERT(phi->numOperands() == 2);
   1027 
   1028  // If the phi-operand doesn't match the initial input, we can't fold the test.
   1029  auto* phiInputForInitialBlock =
   1030      phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
   1031  if (!IsTestInputMaybeToBool(initialTest, phiInputForInitialBlock)) {
   1032    return true;
   1033  }
   1034 
   1035  // Make sure the test block does not have any outgoing loop backedges.
   1036  if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
   1037    return false;
   1038  }
   1039 
   1040  MDefinition* trueResult;
   1041  MDefinition* falseResult;
   1042  if (phiBlock == trueBranch) {
   1043    trueResult = phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
   1044    falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
   1045  } else {
   1046    trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
   1047    falseResult = phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
   1048  }
   1049 
   1050  // OK, we found the desired pattern, now transform the graph.
   1051 
   1052  // Remove the phi from phiBlock.
   1053  phiBlock->discardPhi(*phiBlock->phisBegin());
   1054 
   1055  // Change the end of the block to a test that jumps directly to successors of
   1056  // testBlock, rather than to testBlock itself.
   1057 
   1058  if (phiBlock == trueBranch) {
   1059    if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
   1060                              finalTest->ifTrue(), initialTest->ifFalse(),
   1061                              testBlock)) {
   1062      return false;
   1063    }
   1064  } else if (IsTestInputMaybeToBool(initialTest, trueResult)) {
   1065    if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, trueResult,
   1066                             finalTest->ifTrue(), testBlock)) {
   1067      return false;
   1068    }
   1069  } else {
   1070    if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
   1071                              finalTest->ifTrue(), finalTest->ifFalse(),
   1072                              testBlock)) {
   1073      return false;
   1074    }
   1075  }
   1076 
   1077  if (phiBlock == falseBranch) {
   1078    if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
   1079                              initialTest->ifTrue(), finalTest->ifFalse(),
   1080                              testBlock)) {
   1081      return false;
   1082    }
   1083  } else if (IsTestInputMaybeToBool(initialTest, falseResult)) {
   1084    if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, falseResult,
   1085                             finalTest->ifFalse(), testBlock)) {
   1086      return false;
   1087    }
   1088  } else {
   1089    if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
   1090                              finalTest->ifTrue(), finalTest->ifFalse(),
   1091                              testBlock)) {
   1092      return false;
   1093    }
   1094  }
   1095 
   1096  // Remove phiBlock, if different from testBlock.
   1097  if (phiBlock != testBlock) {
   1098    testBlock->removePredecessor(phiBlock);
   1099    graph.removeBlock(phiBlock);
   1100  }
   1101 
   1102  // Remove testBlock itself.
   1103  finalTest->ifTrue()->removePredecessor(testBlock);
   1104  finalTest->ifFalse()->removePredecessor(testBlock);
   1105  graph.removeBlock(testBlock);
   1106 
   1107  return true;
   1108 }
   1109 
   1110 [[nodiscard]] static bool MaybeFoldConditionBlock(MIRGraph& graph,
   1111                                                  MBasicBlock* initialBlock) {
   1112  if (IsDiamondPattern(initialBlock)) {
   1113    return MaybeFoldDiamondConditionBlock(graph, initialBlock);
   1114  }
   1115  if (IsTrianglePattern(initialBlock)) {
   1116    return MaybeFoldTriangleConditionBlock(graph, initialBlock);
   1117  }
   1118  return true;
   1119 }
   1120 
   1121 [[nodiscard]] static bool MaybeFoldTestBlock(MIRGraph& graph,
   1122                                             MBasicBlock* initialBlock) {
   1123  // Handle test expressions on more than two inputs. For example
   1124  // |if ((x > 10) && (y > 20) && (z > 30)) { ... }|, which results in the below
   1125  // pattern.
   1126  //
   1127  // Look for the pattern:
   1128  //                       ┌─────────────────┐
   1129  //                    1  │ 1 compare       │
   1130  //                 ┌─────┤ 2 test compare1 │
   1131  //                 │     └──────┬──────────┘
   1132  //                 │            │0
   1133  //         ┌───────▼─────────┐  │
   1134  //         │ 3 compare       │  │
   1135  //         │ 4 test compare3 │  └──────────┐
   1136  //         └──┬──────────┬───┘             │
   1137  //           1│          │0                │
   1138  // ┌──────────▼──────┐   │                 │
   1139  // │ 5 compare       │   └─────────┐       │
   1140  // │ 6 goto          │             │       │
   1141  // └───────┬─────────┘             │       │
   1142  //         │                       │       │
   1143  //         │    ┌──────────────────▼───────▼───────┐
   1144  //         └───►│ 9 phi compare1 compare3 compare5 │
   1145  //              │10 goto                           │
   1146  //              └────────────────┬─────────────────┘
   1147  //                               │
   1148  //                      ┌────────▼────────┐
   1149  //                      │11 test phi9     │
   1150  //                      └─────┬─────┬─────┘
   1151  //                           1│     │0
   1152  //         ┌────────────┐     │     │      ┌─────────────┐
   1153  //         │ TrueBranch │◄────┘     └─────►│ FalseBranch │
   1154  //         └────────────┘                  └─────────────┘
   1155  //
   1156  // And transform it to:
   1157  //
   1158  //                      ┌─────────────────┐
   1159  //                   1  │ 1 compare       │
   1160  //                  ┌───┤ 2 test compare1 │
   1161  //                  │   └──────────┬──────┘
   1162  //                  │              │0
   1163  //          ┌───────▼─────────┐    │
   1164  //          │ 3 compare       │    │
   1165  //          │ 4 test compare3 │    │
   1166  //          └──┬─────────┬────┘    │
   1167  //            1│         │0        │
   1168  //  ┌──────────▼──────┐  │         │
   1169  //  │ 5 compare       │  └──────┐  │
   1170  //  │ 6 test compare5 │         │  │
   1171  //  └────┬────────┬───┘         │  │
   1172  //      1│        │0            │  │
   1173  // ┌─────▼──────┐ │         ┌───▼──▼──────┐
   1174  // │ TrueBranch │ └─────────► FalseBranch │
   1175  // └────────────┘           └─────────────┘
   1176 
   1177  auto* ins = initialBlock->lastIns();
   1178  if (!ins->isTest()) {
   1179    return true;
   1180  }
   1181  auto* initialTest = ins->toTest();
   1182 
   1183  MBasicBlock* trueBranch = initialTest->ifTrue();
   1184  MBasicBlock* falseBranch = initialTest->ifFalse();
   1185 
   1186  // MaybeFoldConditionBlock handles the case for two operands.
   1187  MBasicBlock* phiBlock;
   1188  if (trueBranch->numPredecessors() > 2) {
   1189    phiBlock = trueBranch;
   1190  } else if (falseBranch->numPredecessors() > 2) {
   1191    phiBlock = falseBranch;
   1192  } else {
   1193    return true;
   1194  }
   1195 
   1196  MBasicBlock* testBlock = phiBlock;
   1197  if (testBlock->numSuccessors() == 1) {
   1198    if (testBlock->isLoopBackedge()) {
   1199      return true;
   1200    }
   1201    testBlock = testBlock->getSuccessor(0);
   1202    if (testBlock->numPredecessors() != 1) {
   1203      return true;
   1204    }
   1205  }
   1206 
   1207  MOZ_ASSERT(!phiBlock->isLoopBackedge());
   1208 
   1209  MPhi* phi = nullptr;
   1210  MTest* finalTest = nullptr;
   1211  if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
   1212    return true;
   1213  }
   1214 
   1215  MOZ_ASSERT(phiBlock->numPredecessors() == phi->numOperands());
   1216 
   1217  // If the phi-operand doesn't match the initial input, we can't fold the test.
   1218  auto* phiInputForInitialBlock =
   1219      phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
   1220  if (!IsTestInputMaybeToBool(initialTest, phiInputForInitialBlock)) {
   1221    return true;
   1222  }
   1223 
   1224  MBasicBlock* newTestBlock = nullptr;
   1225  MDefinition* newTestInput = nullptr;
   1226 
   1227  // The block of each phi operand must either end with a test instruction on
   1228  // that phi operand or it's the sole block which ends with a goto instruction.
   1229  for (size_t i = 0; i < phiBlock->numPredecessors(); i++) {
   1230    auto* pred = phiBlock->getPredecessor(i);
   1231    auto* operand = phi->getOperand(i);
   1232 
   1233    // Each predecessor must end with either a test or goto instruction.
   1234    auto* lastIns = pred->lastIns();
   1235    if (lastIns->isGoto() && !newTestBlock) {
   1236      newTestBlock = pred;
   1237      newTestInput = operand;
   1238    } else if (lastIns->isTest()) {
   1239      if (!IsTestInputMaybeToBool(lastIns->toTest(), operand)) {
   1240        return true;
   1241      }
   1242    } else {
   1243      return true;
   1244    }
   1245 
   1246    MOZ_ASSERT(!pred->isLoopBackedge());
   1247  }
   1248 
   1249  // Ensure we found the single goto block.
   1250  if (!newTestBlock) {
   1251    return true;
   1252  }
   1253 
   1254  // Make sure the test block does not have any outgoing loop backedges.
   1255  if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
   1256    return false;
   1257  }
   1258 
   1259  // OK, we found the desired pattern, now transform the graph.
   1260 
   1261  // Remove the phi from phiBlock.
   1262  phiBlock->discardPhi(*phiBlock->phisBegin());
   1263 
   1264  // Create the new test instruction.
   1265  if (!UpdateTestSuccessors(graph.alloc(), newTestBlock, newTestInput,
   1266                            finalTest->ifTrue(), finalTest->ifFalse(),
   1267                            testBlock)) {
   1268    return false;
   1269  }
   1270 
   1271  // Update all test instructions to point to the final target.
   1272  while (phiBlock->numPredecessors()) {
   1273    mozilla::DebugOnly<size_t> oldNumPred = phiBlock->numPredecessors();
   1274 
   1275    auto* pred = phiBlock->getPredecessor(0);
   1276    auto* test = pred->lastIns()->toTest();
   1277    if (test->ifTrue() == phiBlock) {
   1278      if (!UpdateTestSuccessors(graph.alloc(), pred, test->input(),
   1279                                finalTest->ifTrue(), test->ifFalse(),
   1280                                testBlock)) {
   1281        return false;
   1282      }
   1283    } else {
   1284      MOZ_ASSERT(test->ifFalse() == phiBlock);
   1285      if (!UpdateTestSuccessors(graph.alloc(), pred, test->input(),
   1286                                test->ifTrue(), finalTest->ifFalse(),
   1287                                testBlock)) {
   1288        return false;
   1289      }
   1290    }
   1291 
   1292    // Ensure we've made progress.
   1293    MOZ_ASSERT(phiBlock->numPredecessors() + 1 == oldNumPred);
   1294  }
   1295 
   1296  // Remove phiBlock, if different from testBlock.
   1297  if (phiBlock != testBlock) {
   1298    testBlock->removePredecessor(phiBlock);
   1299    graph.removeBlock(phiBlock);
   1300  }
   1301 
   1302  // Remove testBlock itself.
   1303  finalTest->ifTrue()->removePredecessor(testBlock);
   1304  finalTest->ifFalse()->removePredecessor(testBlock);
   1305  graph.removeBlock(testBlock);
   1306 
   1307  return true;
   1308 }
   1309 
   1310 bool jit::FoldTests(MIRGraph& graph) {
   1311  for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
   1312       block++) {
   1313    if (!MaybeFoldConditionBlock(graph, *block)) {
   1314      return false;
   1315    }
   1316    if (!MaybeFoldTestBlock(graph, *block)) {
   1317      return false;
   1318    }
   1319  }
   1320  return true;
   1321 }
   1322 
   1323 bool jit::FoldEmptyBlocks(MIRGraph& graph, bool* changed) {
   1324  *changed = false;
   1325 
   1326  for (MBasicBlockIterator iter(graph.begin()); iter != graph.end();) {
   1327    MBasicBlock* block = *iter;
   1328    iter++;
   1329 
   1330    if (block->numPredecessors() != 1 || block->numSuccessors() != 1) {
   1331      continue;
   1332    }
   1333 
   1334    if (!block->phisEmpty()) {
   1335      continue;
   1336    }
   1337 
   1338    if (block->outerResumePoint()) {
   1339      continue;
   1340    }
   1341 
   1342    if (*block->begin() != *block->rbegin()) {
   1343      continue;
   1344    }
   1345 
   1346    MBasicBlock* succ = block->getSuccessor(0);
   1347    MBasicBlock* pred = block->getPredecessor(0);
   1348 
   1349    if (succ->numPredecessors() != 1) {
   1350      continue;
   1351    }
   1352 
   1353    size_t pos = pred->getSuccessorIndex(block);
   1354    pred->lastIns()->replaceSuccessor(pos, succ);
   1355 
   1356    graph.removeBlock(block);
   1357 
   1358    if (!succ->addPredecessorSameInputsAs(pred, block)) {
   1359      return false;
   1360    }
   1361    succ->removePredecessor(block);
   1362 
   1363    *changed = true;
   1364  }
   1365  return true;
   1366 }
   1367 
   1368 static void EliminateTriviallyDeadResumePointOperands(MIRGraph& graph,
   1369                                                      MResumePoint* rp) {
   1370  // If we will pop the top of the stack immediately after resuming,
   1371  // then don't preserve the top value in the resume point.
   1372  if (rp->mode() != ResumeMode::ResumeAt) {
   1373    return;
   1374  }
   1375 
   1376  jsbytecode* pc = rp->pc();
   1377  if (JSOp(*pc) == JSOp::JumpTarget) {
   1378    pc += JSOpLength_JumpTarget;
   1379  }
   1380  if (JSOp(*pc) != JSOp::Pop) {
   1381    return;
   1382  }
   1383 
   1384  size_t top = rp->stackDepth() - 1;
   1385  MOZ_ASSERT(!rp->isObservableOperand(top));
   1386 
   1387  MDefinition* def = rp->getOperand(top);
   1388  if (def->isConstant()) {
   1389    return;
   1390  }
   1391 
   1392  MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
   1393  rp->replaceOperand(top, constant);
   1394 }
   1395 
   1396 // Operands to a resume point which are dead at the point of the resume can be
   1397 // replaced with a magic value. This pass only replaces resume points which are
   1398 // trivially dead.
   1399 //
   1400 // This is intended to ensure that extra resume points within a basic block
   1401 // will not artificially extend the lifetimes of any SSA values. This could
   1402 // otherwise occur if the new resume point captured a value which is created
   1403 // between the old and new resume point and is dead at the new resume point.
   1404 bool jit::EliminateTriviallyDeadResumePointOperands(const MIRGenerator* mir,
   1405                                                    MIRGraph& graph) {
   1406  for (auto* block : graph) {
   1407    if (MResumePoint* rp = block->entryResumePoint()) {
   1408      if (!graph.alloc().ensureBallast()) {
   1409        return false;
   1410      }
   1411      ::EliminateTriviallyDeadResumePointOperands(graph, rp);
   1412    }
   1413  }
   1414  return true;
   1415 }
   1416 
   1417 // Operands to a resume point which are dead at the point of the resume can be
   1418 // replaced with a magic value. This analysis supports limited detection of
   1419 // dead operands, pruning those which are defined in the resume point's basic
   1420 // block and have no uses outside the block or at points later than the resume
   1421 // point.
   1422 //
   1423 // This is intended to ensure that extra resume points within a basic block
   1424 // will not artificially extend the lifetimes of any SSA values. This could
   1425 // otherwise occur if the new resume point captured a value which is created
   1426 // between the old and new resume point and is dead at the new resume point.
   1427 bool jit::EliminateDeadResumePointOperands(const MIRGenerator* mir,
   1428                                           MIRGraph& graph) {
   1429  // If we are compiling try blocks, locals and arguments may be observable
   1430  // from catch or finally blocks (which Ion does not compile). For now just
   1431  // disable the pass in this case.
   1432  if (graph.hasTryBlock()) {
   1433    return true;
   1434  }
   1435 
   1436  for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
   1437       block++) {
   1438    if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)")) {
   1439      return false;
   1440    }
   1441 
   1442    if (MResumePoint* rp = block->entryResumePoint()) {
   1443      if (!graph.alloc().ensureBallast()) {
   1444        return false;
   1445      }
   1446      ::EliminateTriviallyDeadResumePointOperands(graph, rp);
   1447    }
   1448 
   1449    // The logic below can get confused on infinite loops.
   1450    if (block->isLoopHeader() && block->backedge() == *block) {
   1451      continue;
   1452    }
   1453 
   1454    for (MInstructionIterator ins = block->begin(); ins != block->end();
   1455         ins++) {
   1456      if (MResumePoint* rp = ins->resumePoint()) {
   1457        if (!graph.alloc().ensureBallast()) {
   1458          return false;
   1459        }
   1460        ::EliminateTriviallyDeadResumePointOperands(graph, rp);
   1461      }
   1462 
   1463      // No benefit to replacing constant operands with other constants.
   1464      if (ins->isConstant()) {
   1465        continue;
   1466      }
   1467 
   1468      // Scanning uses does not give us sufficient information to tell
   1469      // where instructions that are involved in box/unbox operations or
   1470      // parameter passing might be live. Rewriting uses of these terms
   1471      // in resume points may affect the interpreter's behavior. Rather
   1472      // than doing a more sophisticated analysis, just ignore these.
   1473      if (ins->isUnbox() || ins->isParameter() || ins->isBoxNonStrictThis()) {
   1474        continue;
   1475      }
   1476 
   1477      // Early intermediate values captured by resume points, such as
   1478      // ArrayState and its allocation, may be legitimately dead in Ion code,
   1479      // but are still needed if we bail out. They can recover on bailout.
   1480      if (ins->isRecoveredOnBailout()) {
   1481        MOZ_ASSERT(ins->canRecoverOnBailout());
   1482        continue;
   1483      }
   1484 
   1485      // If the instruction's behavior has been constant folded into a
   1486      // separate instruction, we can't determine precisely where the
   1487      // instruction becomes dead and can't eliminate its uses.
   1488      if (ins->isImplicitlyUsed()) {
   1489        continue;
   1490      }
   1491 
   1492      // Check if this instruction's result is only used within the
   1493      // current block, and keep track of its last use in a definition
   1494      // (not resume point). This requires the instructions in the block
   1495      // to be numbered, ensured by running this immediately after alias
   1496      // analysis.
   1497      uint32_t maxDefinition = 0;
   1498      for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();
   1499           uses++) {
   1500        MNode* consumer = uses->consumer();
   1501        if (consumer->isResumePoint()) {
   1502          // If the instruction's is captured by one of the resume point, then
   1503          // it might be observed indirectly while the frame is live on the
   1504          // stack, so it has to be computed.
   1505          MResumePoint* resume = consumer->toResumePoint();
   1506          if (resume->isObservableOperand(*uses)) {
   1507            maxDefinition = UINT32_MAX;
   1508            break;
   1509          }
   1510          continue;
   1511        }
   1512 
   1513        MDefinition* def = consumer->toDefinition();
   1514        if (def->block() != *block || def->isBox() || def->isPhi()) {
   1515          maxDefinition = UINT32_MAX;
   1516          break;
   1517        }
   1518        maxDefinition = std::max(maxDefinition, def->id());
   1519      }
   1520      if (maxDefinition == UINT32_MAX) {
   1521        continue;
   1522      }
   1523 
   1524      // Walk the uses a second time, removing any in resume points after
   1525      // the last use in a definition.
   1526      for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();) {
   1527        MUse* use = *uses++;
   1528        if (use->consumer()->isDefinition()) {
   1529          continue;
   1530        }
   1531        MResumePoint* mrp = use->consumer()->toResumePoint();
   1532        if (mrp->block() != *block || !mrp->instruction() ||
   1533            mrp->instruction() == *ins ||
   1534            mrp->instruction()->id() <= maxDefinition) {
   1535          continue;
   1536        }
   1537 
   1538        if (!graph.alloc().ensureBallast()) {
   1539          return false;
   1540        }
   1541 
   1542        // Store an optimized out magic value in place of all dead
   1543        // resume point operands. Making any such substitution can in
   1544        // general alter the interpreter's behavior, even though the
   1545        // code is dead, as the interpreter will still execute opcodes
   1546        // whose effects cannot be observed. If the magic value value
   1547        // were to flow to, say, a dead property access the
   1548        // interpreter could throw an exception; we avoid this problem
   1549        // by removing dead operands before removing dead code.
   1550        MConstant* constant =
   1551            MConstant::NewMagic(graph.alloc(), JS_OPTIMIZED_OUT);
   1552        block->insertBefore(*(block->begin()), constant);
   1553        use->replaceProducer(constant);
   1554      }
   1555    }
   1556  }
   1557 
   1558  return true;
   1559 }
   1560 
   1561 // Test whether |def| would be needed if it had no uses.
   1562 bool js::jit::DeadIfUnused(const MDefinition* def) {
   1563  // Effectful instructions of course cannot be removed.
   1564  if (def->isEffectful()) {
   1565    return false;
   1566  }
   1567 
   1568  // Never eliminate guard instructions.
   1569  if (def->isGuard()) {
   1570    return false;
   1571  }
   1572 
   1573  // Required to be preserved, as the type guard related to this instruction
   1574  // is part of the semantics of a transformation.
   1575  if (def->isGuardRangeBailouts()) {
   1576    return false;
   1577  }
   1578 
   1579  // Control instructions have no uses, but also shouldn't be optimized out
   1580  if (def->isControlInstruction()) {
   1581    return false;
   1582  }
   1583 
   1584  // Used when lowering to generate the corresponding snapshots and aggregate
   1585  // the list of recover instructions to be repeated.
   1586  if (def->isInstruction() && def->toInstruction()->resumePoint()) {
   1587    return false;
   1588  }
   1589 
   1590  return true;
   1591 }
   1592 
   1593 // Similar to DeadIfUnused(), but additionally allows effectful instructions.
   1594 bool js::jit::DeadIfUnusedAllowEffectful(const MDefinition* def) {
   1595  // Never eliminate guard instructions.
   1596  if (def->isGuard()) {
   1597    return false;
   1598  }
   1599 
   1600  // Required to be preserved, as the type guard related to this instruction
   1601  // is part of the semantics of a transformation.
   1602  if (def->isGuardRangeBailouts()) {
   1603    return false;
   1604  }
   1605 
   1606  // Control instructions have no uses, but also shouldn't be optimized out
   1607  if (def->isControlInstruction()) {
   1608    return false;
   1609  }
   1610 
   1611  // Used when lowering to generate the corresponding snapshots and aggregate
   1612  // the list of recover instructions to be repeated.
   1613  if (def->isInstruction() && def->toInstruction()->resumePoint()) {
   1614    // All effectful instructions must have a resume point attached. We're
   1615    // allowing effectful instructions here, so we have to ignore any resume
   1616    // points if we want to consider effectful instructions as dead.
   1617    if (!def->isEffectful()) {
   1618      return false;
   1619    }
   1620  }
   1621 
   1622  return true;
   1623 }
   1624 
   1625 // Test whether |def| may be safely discarded, due to being dead or due to being
   1626 // located in a basic block which has itself been marked for discarding.
   1627 bool js::jit::IsDiscardable(const MDefinition* def) {
   1628  return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
   1629 }
   1630 
   1631 // Similar to IsDiscardable(), but additionally allows effectful instructions.
   1632 bool js::jit::IsDiscardableAllowEffectful(const MDefinition* def) {
   1633  return !def->hasUses() &&
   1634         (DeadIfUnusedAllowEffectful(def) || def->block()->isMarked());
   1635 }
   1636 
   1637 // Instructions are useless if they are unused and have no side effects.
   1638 // This pass eliminates useless instructions.
   1639 // The graph itself is unchanged.
   1640 bool jit::EliminateDeadCode(const MIRGenerator* mir, MIRGraph& graph) {
   1641  // Traverse in postorder so that we hit uses before definitions.
   1642  // Traverse instruction list backwards for the same reason.
   1643  for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
   1644       block++) {
   1645    if (mir->shouldCancel("Eliminate Dead Code (main loop)")) {
   1646      return false;
   1647    }
   1648 
   1649    // Remove unused instructions.
   1650    for (MInstructionReverseIterator iter = block->rbegin();
   1651         iter != block->rend();) {
   1652      MInstruction* inst = *iter++;
   1653      if (js::jit::IsDiscardable(inst)) {
   1654        block->discard(inst);
   1655      }
   1656    }
   1657  }
   1658 
   1659  return true;
   1660 }
   1661 
   1662 static inline bool IsPhiObservable(MPhi* phi, Observability observe) {
   1663  // If the phi has uses which are not reflected in SSA, then behavior in the
   1664  // interpreter may be affected by removing the phi.
   1665  if (phi->isImplicitlyUsed()) {
   1666    return true;
   1667  }
   1668 
   1669  // Check for uses of this phi node outside of other phi nodes.
   1670  // Note that, initially, we skip reading resume points, which we
   1671  // don't count as actual uses. If the only uses are resume points,
   1672  // then the SSA name is never consumed by the program.  However,
   1673  // after optimizations have been performed, it's possible that the
   1674  // actual uses in the program have been (incorrectly) optimized
   1675  // away, so we must be more conservative and consider resume
   1676  // points as well.
   1677  for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
   1678    MNode* consumer = iter->consumer();
   1679    if (consumer->isResumePoint()) {
   1680      MResumePoint* resume = consumer->toResumePoint();
   1681      if (observe == ConservativeObservability) {
   1682        return true;
   1683      }
   1684      if (resume->isObservableOperand(*iter)) {
   1685        return true;
   1686      }
   1687    } else {
   1688      MDefinition* def = consumer->toDefinition();
   1689      if (!def->isPhi()) {
   1690        return true;
   1691      }
   1692    }
   1693  }
   1694 
   1695  return false;
   1696 }
   1697 
   1698 // Handles cases like:
   1699 //    x is phi(a, x) --> a
   1700 //    x is phi(a, a) --> a
   1701 static inline MDefinition* IsPhiRedundant(MPhi* phi) {
   1702  MDefinition* first = phi->operandIfRedundant();
   1703  if (first == nullptr) {
   1704    return nullptr;
   1705  }
   1706 
   1707  // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
   1708  if (phi->isImplicitlyUsed()) {
   1709    first->setImplicitlyUsedUnchecked();
   1710  }
   1711 
   1712  return first;
   1713 }
   1714 
   1715 bool jit::EliminatePhis(const MIRGenerator* mir, MIRGraph& graph,
   1716                        Observability observe) {
   1717  // Eliminates redundant or unobservable phis from the graph.  A
   1718  // redundant phi is something like b = phi(a, a) or b = phi(a, b),
   1719  // both of which can be replaced with a.  An unobservable phi is
   1720  // one that whose value is never used in the program.
   1721  //
   1722  // Note that we must be careful not to eliminate phis representing
   1723  // values that the interpreter will require later.  When the graph
   1724  // is first constructed, we can be more aggressive, because there
   1725  // is a greater correspondence between the CFG and the bytecode.
   1726  // After optimizations such as GVN have been performed, however,
   1727  // the bytecode and CFG may not correspond as closely to one
   1728  // another.  In that case, we must be more conservative.  The flag
   1729  // |conservativeObservability| is used to indicate that eliminate
   1730  // phis is being run after some optimizations have been performed,
   1731  // and thus we should use more conservative rules about
   1732  // observability.  The particular danger is that we can optimize
   1733  // away uses of a phi because we think they are not executable,
   1734  // but the foundation for that assumption is false TI information
   1735  // that will eventually be invalidated.  Therefore, if
   1736  // |conservativeObservability| is set, we will consider any use
   1737  // from a resume point to be observable.  Otherwise, we demand a
   1738  // use from an actual instruction.
   1739 
   1740  Vector<MPhi*, 16, SystemAllocPolicy> worklist;
   1741 
   1742  // Add all observable phis to a worklist. We use the "in worklist" bit to
   1743  // mean "this phi is live".
   1744  for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
   1745       block++) {
   1746    MPhiIterator iter = block->phisBegin();
   1747    while (iter != block->phisEnd()) {
   1748      MPhi* phi = *iter++;
   1749 
   1750      if (mir->shouldCancel("Eliminate Phis (populate loop)")) {
   1751        return false;
   1752      }
   1753 
   1754      // Flag all as unused, only observable phis would be marked as used
   1755      // when processed by the work list.
   1756      phi->setUnused();
   1757 
   1758      // If the phi is redundant, remove it here.
   1759      if (MDefinition* redundant = IsPhiRedundant(phi)) {
   1760        phi->justReplaceAllUsesWith(redundant);
   1761        block->discardPhi(phi);
   1762        continue;
   1763      }
   1764 
   1765      // Enqueue observable Phis.
   1766      if (IsPhiObservable(phi, observe)) {
   1767        phi->setInWorklist();
   1768        if (!worklist.append(phi)) {
   1769          return false;
   1770        }
   1771      }
   1772    }
   1773  }
   1774 
   1775  // Iteratively mark all phis reachable from live phis.
   1776  while (!worklist.empty()) {
   1777    if (mir->shouldCancel("Eliminate Phis (worklist)")) {
   1778      return false;
   1779    }
   1780 
   1781    MPhi* phi = worklist.popCopy();
   1782    MOZ_ASSERT(phi->isUnused());
   1783    phi->setNotInWorklist();
   1784 
   1785    // The removal of Phis can produce newly redundant phis.
   1786    if (MDefinition* redundant = IsPhiRedundant(phi)) {
   1787      // Add to the worklist the used phis which are impacted.
   1788      for (MUseDefIterator it(phi); it; it++) {
   1789        if (it.def()->isPhi()) {
   1790          MPhi* use = it.def()->toPhi();
   1791          if (!use->isUnused()) {
   1792            use->setUnusedUnchecked();
   1793            use->setInWorklist();
   1794            if (!worklist.append(use)) {
   1795              return false;
   1796            }
   1797          }
   1798        }
   1799      }
   1800      phi->justReplaceAllUsesWith(redundant);
   1801    } else {
   1802      // Otherwise flag them as used.
   1803      phi->setNotUnused();
   1804    }
   1805 
   1806    // The current phi is/was used, so all its operands are used.
   1807    for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
   1808      MDefinition* in = phi->getOperand(i);
   1809      if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) {
   1810        continue;
   1811      }
   1812      in->setInWorklist();
   1813      if (!worklist.append(in->toPhi())) {
   1814        return false;
   1815      }
   1816    }
   1817  }
   1818 
   1819  // Sweep dead phis.
   1820  for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
   1821       block++) {
   1822    if (mir->shouldCancel("Eliminate Phis (sweep dead phis)")) {
   1823      return false;
   1824    }
   1825 
   1826    MPhiIterator iter = block->phisBegin();
   1827    while (iter != block->phisEnd()) {
   1828      MPhi* phi = *iter++;
   1829      if (phi->isUnused()) {
   1830        if (!phi->optimizeOutAllUses(graph.alloc())) {
   1831          return false;
   1832        }
   1833        block->discardPhi(phi);
   1834      }
   1835    }
   1836  }
   1837 
   1838  return true;
   1839 }
   1840 
   1841 namespace {
   1842 
   1843 // The type analysis algorithm inserts conversions and box/unbox instructions
   1844 // to make the IR graph well-typed for future passes.
   1845 //
   1846 // Phi adjustment: If a phi's inputs are all the same type, the phi is
   1847 // specialized to return that type.
   1848 //
   1849 // Input adjustment: Each input is asked to apply conversion operations to its
   1850 // inputs. This may include Box, Unbox, or other instruction-specific type
   1851 // conversion operations.
   1852 //
   1853 class TypeAnalyzer {
   1854  const MIRGenerator* mir;
   1855  MIRGraph& graph;
   1856  Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
   1857 
   1858  TempAllocator& alloc() const { return graph.alloc(); }
   1859 
   1860  bool addPhiToWorklist(MPhi* phi) {
   1861    if (phi->isInWorklist()) {
   1862      return true;
   1863    }
   1864    if (!phiWorklist_.append(phi)) {
   1865      return false;
   1866    }
   1867    phi->setInWorklist();
   1868    return true;
   1869  }
   1870  MPhi* popPhi() {
   1871    MPhi* phi = phiWorklist_.popCopy();
   1872    phi->setNotInWorklist();
   1873    return phi;
   1874  }
   1875 
   1876  [[nodiscard]] bool propagateAllPhiSpecializations();
   1877 
   1878  bool respecialize(MPhi* phi, MIRType type);
   1879  bool propagateSpecialization(MPhi* phi);
   1880  bool specializePhis();
   1881  bool specializeOsrOnlyPhis();
   1882  void replaceRedundantPhi(MPhi* phi);
   1883  bool adjustPhiInputs(MPhi* phi);
   1884  bool adjustInputs(MDefinition* def);
   1885  bool insertConversions();
   1886 
   1887  bool checkFloatCoherency();
   1888  bool graphContainsFloat32();
   1889  bool markPhiConsumers();
   1890  bool markPhiProducers();
   1891  bool specializeValidFloatOps();
   1892  bool tryEmitFloatOperations();
   1893  bool propagateUnbox();
   1894 
   1895  bool shouldSpecializeOsrPhis() const;
   1896  MIRType guessPhiType(MPhi* phi) const;
   1897 
   1898 public:
   1899  TypeAnalyzer(const MIRGenerator* mir, MIRGraph& graph)
   1900      : mir(mir), graph(graph) {}
   1901 
   1902  bool analyze();
   1903 };
   1904 
   1905 } /* anonymous namespace */
   1906 
   1907 bool TypeAnalyzer::shouldSpecializeOsrPhis() const {
   1908  // [SMDOC] OSR Phi Type Specialization
   1909  //
   1910  // Without special handling for OSR phis, we end up with unspecialized phis
   1911  // (MIRType::Value) in the loop (pre)header and other blocks, resulting in
   1912  // unnecessary boxing and unboxing in the loop body.
   1913  //
   1914  // To fix this, phi type specialization needs special code to deal with the
   1915  // OSR entry block. Recall that OSR results in the following basic block
   1916  // structure:
   1917  //
   1918  //  +------------------+                 +-----------------+
   1919  //  | Code before loop |                 | OSR entry block |
   1920  //  +------------------+                 +-----------------+
   1921  //          |                                       |
   1922  //          |                                       |
   1923  //          |           +---------------+           |
   1924  //          +---------> | OSR preheader | <---------+
   1925  //                      +---------------+
   1926  //                              |
   1927  //                              V
   1928  //                      +---------------+
   1929  //                      | Loop header   |<-----+
   1930  //                      +---------------+      |
   1931  //                              |              |
   1932  //                             ...             |
   1933  //                              |              |
   1934  //                      +---------------+      |
   1935  //                      | Loop backedge |------+
   1936  //                      +---------------+
   1937  //
   1938  // OSR phi specialization happens in three steps:
   1939  //
   1940  // (1) Specialize phis but ignore MOsrValue phi inputs. In other words,
   1941  //     pretend the OSR entry block doesn't exist. See guessPhiType.
   1942  //
   1943  // (2) Once phi specialization is done, look at the types of loop header phis
   1944  //     and add these types to the corresponding preheader phis. This way, the
   1945  //     types of the preheader phis are based on the code before the loop and
   1946  //     the code in the loop body. These are exactly the types we expect for
   1947  //     the OSR Values. See the last part of TypeAnalyzer::specializePhis.
   1948  //
   1949  // (3) For type-specialized preheader phis, add guard/unbox instructions to
   1950  //     the OSR entry block to guard the incoming Value indeed has this type.
   1951  //     This happens in:
   1952  //
   1953  //     * TypeAnalyzer::adjustPhiInputs: adds a fallible unbox for values that
   1954  //       can be unboxed.
   1955  //
   1956  //     * TypeAnalyzer::replaceRedundantPhi: adds a type guard for values that
   1957  //       can't be unboxed (null/undefined/magic Values).
   1958  if (!graph.osrBlock()) {
   1959    return false;
   1960  }
   1961 
   1962  return !mir->outerInfo().hadSpeculativePhiBailout();
   1963 }
   1964 
   1965 // Try to specialize this phi based on its non-cyclic inputs.
   1966 MIRType TypeAnalyzer::guessPhiType(MPhi* phi) const {
   1967 #ifdef DEBUG
   1968  // Check that different magic constants aren't flowing together. Ignore
   1969  // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
   1970  // away.
   1971  MIRType magicType = MIRType::None;
   1972  for (size_t i = 0; i < phi->numOperands(); i++) {
   1973    MDefinition* in = phi->getOperand(i);
   1974    if (in->type() == MIRType::MagicHole ||
   1975        in->type() == MIRType::MagicIsConstructing) {
   1976      if (magicType == MIRType::None) {
   1977        magicType = in->type();
   1978      }
   1979      MOZ_ASSERT(magicType == in->type());
   1980    }
   1981  }
   1982 #endif
   1983 
   1984  MIRType type = MIRType::None;
   1985  bool convertibleToFloat32 = false;
   1986  bool hasOSRValueInput = false;
   1987  DebugOnly<bool> hasSpecializableInput = false;
   1988  for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
   1989    MDefinition* in = phi->getOperand(i);
   1990    if (in->isPhi()) {
   1991      hasSpecializableInput = true;
   1992      if (!in->toPhi()->triedToSpecialize()) {
   1993        continue;
   1994      }
   1995      if (in->type() == MIRType::None) {
   1996        // The operand is a phi we tried to specialize, but we were
   1997        // unable to guess its type. propagateSpecialization will
   1998        // propagate the type to this phi when it becomes known.
   1999        continue;
   2000      }
   2001    }
   2002 
   2003    // See shouldSpecializeOsrPhis comment. This is the first step mentioned
   2004    // there.
   2005    if (shouldSpecializeOsrPhis() && in->isOsrValue()) {
   2006      hasOSRValueInput = true;
   2007      hasSpecializableInput = true;
   2008      continue;
   2009    }
   2010 
   2011    if (type == MIRType::None) {
   2012      type = in->type();
   2013      if (in->canProduceFloat32() &&
   2014          !mir->outerInfo().hadSpeculativePhiBailout()) {
   2015        convertibleToFloat32 = true;
   2016      }
   2017      continue;
   2018    }
   2019 
   2020    if (type == in->type()) {
   2021      convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
   2022    } else {
   2023      if (convertibleToFloat32 && in->type() == MIRType::Float32) {
   2024        // If we only saw definitions that can be converted into Float32 before
   2025        // and encounter a Float32 value, promote previous values to Float32
   2026        type = MIRType::Float32;
   2027      } else if (IsTypeRepresentableAsDouble(type) &&
   2028                 IsTypeRepresentableAsDouble(in->type())) {
   2029        // Specialize phis with int32 and double operands as double.
   2030        type = MIRType::Double;
   2031        convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
   2032      } else {
   2033        return MIRType::Value;
   2034      }
   2035    }
   2036  }
   2037 
   2038  if (hasOSRValueInput && type == MIRType::Float32) {
   2039    // TODO(post-Warp): simplify float32 handling in this function or (better)
   2040    // make the float32 analysis a stand-alone optimization pass instead of
   2041    // complicating type analysis. See bug 1655773.
   2042    type = MIRType::Double;
   2043  }
   2044 
   2045  MOZ_ASSERT_IF(type == MIRType::None, hasSpecializableInput);
   2046  return type;
   2047 }
   2048 
   2049 bool TypeAnalyzer::respecialize(MPhi* phi, MIRType type) {
   2050  if (phi->type() == type) {
   2051    return true;
   2052  }
   2053  phi->specialize(type);
   2054  return addPhiToWorklist(phi);
   2055 }
   2056 
   2057 bool TypeAnalyzer::propagateSpecialization(MPhi* phi) {
   2058  MOZ_ASSERT(phi->type() != MIRType::None);
   2059 
   2060  // Verify that this specialization matches any phis depending on it.
   2061  for (MUseDefIterator iter(phi); iter; iter++) {
   2062    if (!iter.def()->isPhi()) {
   2063      continue;
   2064    }
   2065    MPhi* use = iter.def()->toPhi();
   2066    if (!use->triedToSpecialize()) {
   2067      continue;
   2068    }
   2069    if (use->type() == MIRType::None) {
   2070      // We tried to specialize this phi, but were unable to guess its
   2071      // type. Now that we know the type of one of its operands, we can
   2072      // specialize it. If it can't be specialized as float32, specialize
   2073      // as double.
   2074      MIRType type = phi->type();
   2075      if (type == MIRType::Float32 && !use->canProduceFloat32()) {
   2076        type = MIRType::Double;
   2077      }
   2078      if (!respecialize(use, type)) {
   2079        return false;
   2080      }
   2081      continue;
   2082    }
   2083    if (use->type() != phi->type()) {
   2084      // Specialize phis with int32 that can be converted to float and float
   2085      // operands as floats.
   2086      if ((use->type() == MIRType::Int32 && use->canProduceFloat32() &&
   2087           phi->type() == MIRType::Float32) ||
   2088          (phi->type() == MIRType::Int32 && phi->canProduceFloat32() &&
   2089           use->type() == MIRType::Float32)) {
   2090        if (!respecialize(use, MIRType::Float32)) {
   2091          return false;
   2092        }
   2093        continue;
   2094      }
   2095 
   2096      // Specialize phis with int32 and double operands as double.
   2097      if (IsTypeRepresentableAsDouble(use->type()) &&
   2098          IsTypeRepresentableAsDouble(phi->type())) {
   2099        if (!respecialize(use, MIRType::Double)) {
   2100          return false;
   2101        }
   2102        continue;
   2103      }
   2104 
   2105      // This phi in our use chain can now no longer be specialized.
   2106      if (!respecialize(use, MIRType::Value)) {
   2107        return false;
   2108      }
   2109    }
   2110  }
   2111 
   2112  return true;
   2113 }
   2114 
   2115 bool TypeAnalyzer::propagateAllPhiSpecializations() {
   2116  while (!phiWorklist_.empty()) {
   2117    if (mir->shouldCancel("Specialize Phis (worklist)")) {
   2118      return false;
   2119    }
   2120 
   2121    MPhi* phi = popPhi();
   2122    if (!propagateSpecialization(phi)) {
   2123      return false;
   2124    }
   2125  }
   2126 
   2127  return true;
   2128 }
   2129 
   2130 // If branch pruning removes the path from the entry block to the OSR
   2131 // preheader, we may have phis (or chains of phis) with no operands
   2132 // other than OsrValues. These phis will still have MIRType::None.
   2133 // Since we don't have any information about them, we specialize them
   2134 // as MIRType::Value.
   2135 bool TypeAnalyzer::specializeOsrOnlyPhis() {
   2136  MOZ_ASSERT(graph.osrBlock());
   2137  MOZ_ASSERT(graph.osrPreHeaderBlock()->numPredecessors() == 1);
   2138 
   2139  for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
   2140       block++) {
   2141    if (mir->shouldCancel("Specialize osr-only phis (main loop)")) {
   2142      return false;
   2143    }
   2144 
   2145    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
   2146      if (mir->shouldCancel("Specialize osr-only phis (inner loop)")) {
   2147        return false;
   2148      }
   2149 
   2150      if (phi->type() == MIRType::None) {
   2151        phi->specialize(MIRType::Value);
   2152      }
   2153    }
   2154  }
   2155  return true;
   2156 }
   2157 
   2158 bool TypeAnalyzer::specializePhis() {
   2159  for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
   2160       block++) {
   2161    if (mir->shouldCancel("Specialize Phis (main loop)")) {
   2162      return false;
   2163    }
   2164 
   2165    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
   2166      if (mir->shouldCancel("Specialize Phis (inner loop)")) {
   2167        return false;
   2168      }
   2169 
   2170      MIRType type = guessPhiType(*phi);
   2171      phi->specialize(type);
   2172      if (type == MIRType::None) {
   2173        // We tried to guess the type but failed because all operands are
   2174        // phis we still have to visit. Set the triedToSpecialize flag but
   2175        // don't propagate the type to other phis, propagateSpecialization
   2176        // will do that once we know the type of one of the operands.
   2177        continue;
   2178      }
   2179      if (!propagateSpecialization(*phi)) {
   2180        return false;
   2181      }
   2182    }
   2183  }
   2184 
   2185  if (!propagateAllPhiSpecializations()) {
   2186    return false;
   2187  }
   2188 
   2189  if (shouldSpecializeOsrPhis()) {
   2190    // See shouldSpecializeOsrPhis comment. This is the second step, propagating
   2191    // loop header phi types to preheader phis.
   2192    MBasicBlock* preHeader = graph.osrPreHeaderBlock();
   2193    MBasicBlock* header = preHeader->getSingleSuccessor();
   2194 
   2195    if (preHeader->numPredecessors() == 1) {
   2196      MOZ_ASSERT(preHeader->getPredecessor(0) == graph.osrBlock());
   2197      // Branch pruning has removed the path from the entry block
   2198      // to the preheader. Specialize any phis with no non-osr inputs.
   2199      if (!specializeOsrOnlyPhis()) {
   2200        return false;
   2201      }
   2202    } else if (header->isLoopHeader()) {
   2203      for (MPhiIterator phi(header->phisBegin()); phi != header->phisEnd();
   2204           phi++) {
   2205        MPhi* preHeaderPhi = phi->getOperand(0)->toPhi();
   2206        MOZ_ASSERT(preHeaderPhi->block() == preHeader);
   2207 
   2208        if (preHeaderPhi->type() == MIRType::Value) {
   2209          // Already includes everything.
   2210          continue;
   2211        }
   2212 
   2213        MIRType loopType = phi->type();
   2214        if (!respecialize(preHeaderPhi, loopType)) {
   2215          return false;
   2216        }
   2217      }
   2218      if (!propagateAllPhiSpecializations()) {
   2219        return false;
   2220      }
   2221    } else {
   2222      // Edge case: there is no backedge in this loop. This can happen
   2223      // if the header is a 'pending' loop header when control flow in
   2224      // the loop body is terminated unconditionally, or if a block
   2225      // that dominates the backedge unconditionally bails out. In
   2226      // this case the header only has the preheader as predecessor
   2227      // and we don't need to do anything.
   2228      MOZ_ASSERT(header->numPredecessors() == 1);
   2229    }
   2230  }
   2231 
   2232  MOZ_ASSERT(phiWorklist_.empty());
   2233  return true;
   2234 }
   2235 
   2236 bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
   2237  MIRType phiType = phi->type();
   2238  MOZ_ASSERT(phiType != MIRType::None);
   2239 
   2240  // If we specialized a type that's not Value, there are 3 cases:
   2241  // 1. Every input is of that type.
   2242  // 2. Every observed input is of that type (i.e., some inputs haven't been
   2243  // executed yet).
   2244  // 3. Inputs were numbers, and was specialized to floating point type.
   2245  if (phiType != MIRType::Value) {
   2246    for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
   2247      MDefinition* in = phi->getOperand(i);
   2248      if (in->type() == phiType) {
   2249        continue;
   2250      }
   2251 
   2252      if (in->isBox() && in->toBox()->input()->type() == phiType) {
   2253        phi->replaceOperand(i, in->toBox()->input());
   2254        continue;
   2255      }
   2256 
   2257      if (!alloc().ensureBallast()) {
   2258        return false;
   2259      }
   2260 
   2261      MBasicBlock* predecessor = phi->block()->getPredecessor(i);
   2262 
   2263      MInstruction* replacement;
   2264      if (IsFloatingPointType(phiType) &&
   2265          IsTypeRepresentableAsDouble(in->type())) {
   2266        // Convert number operands to |phiType|.
   2267        if (phiType == MIRType::Double) {
   2268          replacement = MToDouble::New(alloc(), in);
   2269        } else {
   2270          MOZ_ASSERT(phiType == MIRType::Float32);
   2271          replacement = MToFloat32::New(alloc(), in);
   2272        }
   2273      } else {
   2274        // If we know this branch will fail to convert to phiType, insert a box
   2275        // that'll immediately fail in the fallible unbox below.
   2276        if (in->type() != MIRType::Value) {
   2277          auto* box = MBox::New(alloc(), in);
   2278          predecessor->insertAtEnd(box);
   2279          in = box;
   2280        }
   2281 
   2282        // Be optimistic and insert unboxes when the operand is a value.
   2283        if (phiType == MIRType::Float32) {
   2284          // Float32 is unboxed as Double, then converted.
   2285          auto* unbox =
   2286              MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
   2287          unbox->setBailoutKind(BailoutKind::SpeculativePhi);
   2288          predecessor->insertAtEnd(unbox);
   2289          replacement = MToFloat32::New(alloc(), unbox);
   2290        } else {
   2291          replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
   2292          replacement->setBailoutKind(BailoutKind::SpeculativePhi);
   2293        }
   2294      }
   2295      MOZ_ASSERT(replacement->type() == phiType);
   2296 
   2297      predecessor->insertAtEnd(replacement);
   2298      phi->replaceOperand(i, replacement);
   2299    }
   2300 
   2301    return true;
   2302  }
   2303 
   2304  // Box every typed input.
   2305  for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
   2306    MDefinition* in = phi->getOperand(i);
   2307    if (in->type() == MIRType::Value) {
   2308      continue;
   2309    }
   2310 
   2311    // The input is being explicitly unboxed, so sneak past and grab the
   2312    // original box. Don't bother optimizing if magic values are involved.
   2313    if (in->isUnbox()) {
   2314      MDefinition* unboxInput = in->toUnbox()->input();
   2315      if (!IsMagicType(unboxInput->type()) && phi->typeIncludes(unboxInput)) {
   2316        in = in->toUnbox()->input();
   2317      }
   2318    }
   2319 
   2320    if (in->type() != MIRType::Value) {
   2321      if (!alloc().ensureBallast()) {
   2322        return false;
   2323      }
   2324 
   2325      MBasicBlock* pred = phi->block()->getPredecessor(i);
   2326      in = BoxAt(alloc(), pred->lastIns(), in);
   2327    }
   2328 
   2329    phi->replaceOperand(i, in);
   2330  }
   2331 
   2332  return true;
   2333 }
   2334 
   2335 bool TypeAnalyzer::adjustInputs(MDefinition* def) {
   2336  // Definitions such as MPhi have no type policy.
   2337  if (!def->isInstruction()) {
   2338    return true;
   2339  }
   2340 
   2341  MInstruction* ins = def->toInstruction();
   2342  const TypePolicy* policy = ins->typePolicy();
   2343  if (policy && !policy->adjustInputs(alloc(), ins)) {
   2344    return false;
   2345  }
   2346  return true;
   2347 }
   2348 
   2349 void TypeAnalyzer::replaceRedundantPhi(MPhi* phi) {
   2350  MBasicBlock* block = phi->block();
   2351  js::Value v;
   2352  switch (phi->type()) {
   2353    case MIRType::Undefined:
   2354      v = UndefinedValue();
   2355      break;
   2356    case MIRType::Null:
   2357      v = NullValue();
   2358      break;
   2359    case MIRType::MagicOptimizedOut:
   2360      v = MagicValue(JS_OPTIMIZED_OUT);
   2361      break;
   2362    case MIRType::MagicUninitializedLexical:
   2363      v = MagicValue(JS_UNINITIALIZED_LEXICAL);
   2364      break;
   2365    case MIRType::MagicIsConstructing:
   2366      v = MagicValue(JS_IS_CONSTRUCTING);
   2367      break;
   2368    case MIRType::MagicHole:
   2369    default:
   2370      MOZ_CRASH("unexpected type");
   2371  }
   2372  MConstant* c = MConstant::New(alloc(), v);
   2373  // The instruction pass will insert the box
   2374  block->insertBefore(*(block->begin()), c);
   2375  phi->justReplaceAllUsesWith(c);
   2376 
   2377  if (shouldSpecializeOsrPhis()) {
   2378    // See shouldSpecializeOsrPhis comment. This is part of the third step,
   2379    // guard the incoming MOsrValue is of this type.
   2380    for (uint32_t i = 0; i < phi->numOperands(); i++) {
   2381      MDefinition* def = phi->getOperand(i);
   2382      if (def->type() != phi->type()) {
   2383        MOZ_ASSERT(def->isOsrValue() || def->isPhi());
   2384        MOZ_ASSERT(def->type() == MIRType::Value);
   2385        MGuardValue* guard = MGuardValue::New(alloc(), def, v);
   2386        guard->setBailoutKind(BailoutKind::SpeculativePhi);
   2387        def->block()->insertBefore(def->block()->lastIns(), guard);
   2388      }
   2389    }
   2390  }
   2391 }
   2392 
   2393 bool TypeAnalyzer::insertConversions() {
   2394  // Instructions are processed in reverse postorder: all uses are defs are
   2395  // seen before uses. This ensures that output adjustment (which may rewrite
   2396  // inputs of uses) does not conflict with input adjustment.
   2397  for (ReversePostorderIterator block(graph.rpoBegin());
   2398       block != graph.rpoEnd(); block++) {
   2399    if (mir->shouldCancel("Insert Conversions")) {
   2400      return false;
   2401    }
   2402 
   2403    for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
   2404         iter != end;) {
   2405      MPhi* phi = *iter++;
   2406      if (IsNullOrUndefined(phi->type()) || IsMagicType(phi->type())) {
   2407        // We can replace this phi with a constant.
   2408        if (!alloc().ensureBallast()) {
   2409          return false;
   2410        }
   2411        replaceRedundantPhi(phi);
   2412        block->discardPhi(phi);
   2413      } else {
   2414        if (!adjustPhiInputs(phi)) {
   2415          return false;
   2416        }
   2417      }
   2418    }
   2419 
   2420    // AdjustInputs can add/remove/mutate instructions before and after the
   2421    // current instruction. Only increment the iterator after it is finished.
   2422    for (MInstructionIterator iter(block->begin()); iter != block->end();
   2423         iter++) {
   2424      if (!alloc().ensureBallast()) {
   2425        return false;
   2426      }
   2427 
   2428      if (!adjustInputs(*iter)) {
   2429        return false;
   2430      }
   2431    }
   2432  }
   2433  return true;
   2434 }
   2435 
   2436 /* clang-format off */
   2437 //
   2438 // This function tries to emit Float32 specialized operations whenever it's possible.
   2439 // MIR nodes are flagged as:
   2440 // - Producers, when they can create Float32 that might need to be coerced into a Double.
   2441 //   Loads in Float32 arrays and conversions to Float32 are producers.
   2442 // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
   2443 //   Stores in Float32 arrays and conversions to Float32 are consumers.
   2444 // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
   2445 //   does not result in a compound loss of precision. This is the case for +, -, /, * with 2
   2446 //   operands, for instance. However, an addition with 3 operands is not commutative anymore,
   2447 //   so an intermediate coercion is needed.
   2448 // Except for phis, all these flags are known after Ion building, so they cannot change during
   2449 // the process.
   2450 //
   2451 // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
   2452 // has only producers as inputs and consumers as uses, we can specialize the operation as a
   2453 // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
   2454 // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
   2455 //
   2456 // Phis have a special status. Phis need to be flagged as producers or consumers as they can
   2457 // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
   2458 // properties are such that we can deduce the property using all non phis inputs first (which form
   2459 // an initial phi graph) and then propagate all properties from one phi to another using a
   2460 // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
   2461 // many flagged phis as the previous iteration (so the worst steady state case is all phis being
   2462 // flagged as false).
   2463 //
   2464 // In a nutshell, the algorithm applies three passes:
   2465 // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
   2466 // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
   2467 // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
   2468 // consumer if all of its uses are consumers.
   2469 // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
   2470 // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
   2471 // 3 - Go through all commutative operations and ensure their inputs are all producers and their
   2472 // uses are all consumers.
   2473 //
   2474 /* clang-format on */
   2475 bool TypeAnalyzer::markPhiConsumers() {
   2476  MOZ_ASSERT(phiWorklist_.empty());
   2477 
   2478  // Iterate in postorder so worklist is initialized to RPO.
   2479  for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
   2480       ++block) {
   2481    if (mir->shouldCancel(
   2482            "Ensure Float32 commutativity - Consumer Phis - Initial state")) {
   2483      return false;
   2484    }
   2485 
   2486    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
   2487      MOZ_ASSERT(!phi->isInWorklist());
   2488      bool canConsumeFloat32 = !phi->isImplicitlyUsed();
   2489      for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
   2490        MDefinition* usedef = use.def();
   2491        canConsumeFloat32 &=
   2492            usedef->isPhi() || usedef->canConsumeFloat32(use.use());
   2493      }
   2494      phi->setCanConsumeFloat32(canConsumeFloat32);
   2495      if (canConsumeFloat32 && !addPhiToWorklist(*phi)) {
   2496        return false;
   2497      }
   2498    }
   2499  }
   2500 
   2501  while (!phiWorklist_.empty()) {
   2502    if (mir->shouldCancel(
   2503            "Ensure Float32 commutativity - Consumer Phis - Fixed point")) {
   2504      return false;
   2505    }
   2506 
   2507    MPhi* phi = popPhi();
   2508    MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
   2509 
   2510    bool validConsumer = true;
   2511    for (MUseDefIterator use(phi); use; use++) {
   2512      MDefinition* def = use.def();
   2513      if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
   2514        validConsumer = false;
   2515        break;
   2516      }
   2517    }
   2518 
   2519    if (validConsumer) {
   2520      continue;
   2521    }
   2522 
   2523    // Propagate invalidated phis
   2524    phi->setCanConsumeFloat32(false);
   2525    for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
   2526      MDefinition* input = phi->getOperand(i);
   2527      if (input->isPhi() && !input->isInWorklist() &&
   2528          input->canConsumeFloat32(nullptr /* unused */)) {
   2529        if (!addPhiToWorklist(input->toPhi())) {
   2530          return false;
   2531        }
   2532      }
   2533    }
   2534  }
   2535  return true;
   2536 }
   2537 
   2538 bool TypeAnalyzer::markPhiProducers() {
   2539  MOZ_ASSERT(phiWorklist_.empty());
   2540 
   2541  // Iterate in reverse postorder so worklist is initialized to PO.
   2542  for (ReversePostorderIterator block(graph.rpoBegin());
   2543       block != graph.rpoEnd(); ++block) {
   2544    if (mir->shouldCancel(
   2545            "Ensure Float32 commutativity - Producer Phis - initial state")) {
   2546      return false;
   2547    }
   2548 
   2549    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
   2550      MOZ_ASSERT(!phi->isInWorklist());
   2551      bool canProduceFloat32 = true;
   2552      for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e;
   2553           ++i) {
   2554        MDefinition* input = phi->getOperand(i);
   2555        canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
   2556      }
   2557      phi->setCanProduceFloat32(canProduceFloat32);
   2558      if (canProduceFloat32 && !addPhiToWorklist(*phi)) {
   2559        return false;
   2560      }
   2561    }
   2562  }
   2563 
   2564  while (!phiWorklist_.empty()) {
   2565    if (mir->shouldCancel(
   2566            "Ensure Float32 commutativity - Producer Phis - Fixed point")) {
   2567      return false;
   2568    }
   2569 
   2570    MPhi* phi = popPhi();
   2571    MOZ_ASSERT(phi->canProduceFloat32());
   2572 
   2573    bool validProducer = true;
   2574    for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
   2575      MDefinition* input = phi->getOperand(i);
   2576      if (input->isPhi() && !input->canProduceFloat32()) {
   2577        validProducer = false;
   2578        break;
   2579      }
   2580    }
   2581 
   2582    if (validProducer) {
   2583      continue;
   2584    }
   2585 
   2586    // Propagate invalidated phis
   2587    phi->setCanProduceFloat32(false);
   2588    for (MUseDefIterator use(phi); use; use++) {
   2589      MDefinition* def = use.def();
   2590      if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32()) {
   2591        if (!addPhiToWorklist(def->toPhi())) {
   2592          return false;
   2593        }
   2594      }
   2595    }
   2596  }
   2597  return true;
   2598 }
   2599 
   2600 bool TypeAnalyzer::specializeValidFloatOps() {
   2601  for (ReversePostorderIterator block(graph.rpoBegin());
   2602       block != graph.rpoEnd(); ++block) {
   2603    if (mir->shouldCancel("Ensure Float32 commutativity - Instructions")) {
   2604      return false;
   2605    }
   2606 
   2607    for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
   2608      if (!ins->isFloat32Commutative()) {
   2609        continue;
   2610      }
   2611 
   2612      if (ins->type() == MIRType::Float32) {
   2613        continue;
   2614      }
   2615 
   2616      if (!alloc().ensureBallast()) {
   2617        return false;
   2618      }
   2619 
   2620      // This call will try to specialize the instruction iff all uses are
   2621      // consumers and all inputs are producers.
   2622      ins->trySpecializeFloat32(alloc());
   2623    }
   2624  }
   2625  return true;
   2626 }
   2627 
   2628 bool TypeAnalyzer::graphContainsFloat32() {
   2629  for (ReversePostorderIterator block(graph.rpoBegin());
   2630       block != graph.rpoEnd(); ++block) {
   2631    for (MDefinitionIterator def(*block); def; def++) {
   2632      if (mir->shouldCancel(
   2633              "Ensure Float32 commutativity - Graph contains Float32")) {
   2634        return false;
   2635      }
   2636 
   2637      if (def->type() == MIRType::Float32) {
   2638        return true;
   2639      }
   2640    }
   2641  }
   2642  return false;
   2643 }
   2644 
   2645 bool TypeAnalyzer::tryEmitFloatOperations() {
   2646  // Asm.js uses the ahead of time type checks to specialize operations, no need
   2647  // to check them again at this point.
   2648  if (mir->compilingWasm()) {
   2649    return true;
   2650  }
   2651 
   2652  // Check ahead of time that there is at least one definition typed as Float32,
   2653  // otherwise we don't need this pass.
   2654  if (!graphContainsFloat32()) {
   2655    return true;
   2656  }
   2657 
   2658  // WarpBuilder skips over code that can't be reached except through
   2659  // a catch block. Locals and arguments may be observable in such
   2660  // code after bailing out, so we can't rely on seeing all uses.
   2661  if (graph.hasTryBlock()) {
   2662    return true;
   2663  }
   2664 
   2665  if (!markPhiConsumers()) {
   2666    return false;
   2667  }
   2668  if (!markPhiProducers()) {
   2669    return false;
   2670  }
   2671  if (!specializeValidFloatOps()) {
   2672    return false;
   2673  }
   2674  return true;
   2675 }
   2676 
   2677 bool TypeAnalyzer::checkFloatCoherency() {
   2678 #ifdef DEBUG
   2679  // Asserts that all Float32 instructions are flowing into Float32 consumers or
   2680  // specialized operations
   2681  for (ReversePostorderIterator block(graph.rpoBegin());
   2682       block != graph.rpoEnd(); ++block) {
   2683    if (mir->shouldCancel("Check Float32 coherency")) {
   2684      return false;
   2685    }
   2686 
   2687    for (MDefinitionIterator def(*block); def; def++) {
   2688      if (def->type() != MIRType::Float32) {
   2689        continue;
   2690      }
   2691 
   2692      for (MUseDefIterator use(*def); use; use++) {
   2693        MDefinition* consumer = use.def();
   2694        MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
   2695      }
   2696    }
   2697  }
   2698 #endif
   2699  return true;
   2700 }
   2701 
   2702 static bool HappensBefore(const MDefinition* earlier,
   2703                          const MDefinition* later) {
   2704  MOZ_ASSERT(earlier->block() == later->block());
   2705 
   2706  for (auto* ins : *earlier->block()) {
   2707    if (ins == earlier) {
   2708      return true;
   2709    }
   2710    if (ins == later) {
   2711      return false;
   2712    }
   2713  }
   2714  MOZ_CRASH("earlier and later are instructions in the block");
   2715 }
   2716 
   2717 // Propagate type information from dominating unbox instructions.
   2718 //
   2719 // This optimization applies for example for self-hosted String.prototype
   2720 // functions.
   2721 //
   2722 // Example:
   2723 // ```
   2724 // String.prototype.id = function() {
   2725 //   // Strict mode to avoid ToObject on primitive this-values.
   2726 //   "use strict";
   2727 //
   2728 //   // Template string to apply ToString on the this-value.
   2729 //   return `${this}`;
   2730 // };
   2731 //
   2732 // function f(s) {
   2733 //   // Assume |s| is a string value.
   2734 //   return s.id();
   2735 // }
   2736 // ```
   2737 //
   2738 // Compiles into: (Graph after Scalar Replacement)
   2739 //
   2740 // ┌───────────────────────────────────────────────────────────────────────────┐
   2741 // │                             Block 0                                       │
   2742 // │ resumepoint 1 0 2 2                                                       │
   2743 // │ 0 parameter THIS_SLOT                                           Value     │
   2744 // │ 1 parameter 0                                                   Value     │
   2745 // │ 2 constant undefined                                            Undefined │
   2746 // │ 3 start                                                                   │
   2747 // │ 4 checkoverrecursed                                                       │
   2748 // │ 5 unbox parameter1 to String (fallible)                         String    │
   2749 // │ 6 constant object 1d908053e088 (String)                         Object    │
   2750 // │ 7 guardshape constant6:Object                                   Object    │
   2751 // │ 8 slots guardshape7:Object                                      Slots     │
   2752 // │ 9 loaddynamicslot slots8:Slots (slot 53)                        Value     │
   2753 // │ 10 constant 0x0                                                 Int32     │
   2754 // │ 11 unbox loaddynamicslot9 to Object (fallible)                  Object    │
   2755 // │ 12 nurseryobject                                                Object    │
   2756 // │ 13 guardspecificfunction unbox11:Object nurseryobject12:Object  Object    │
   2757 // │ 14 goto block1                                                            │
   2758 // └──────────────────────────────────┬────────────────────────────────────────┘
   2759 //                                    │
   2760 // ┌──────────────────────────────────▼────────────────────────────────────────┐
   2761 // │                               Block 1                                     │
   2762 // │ ((0)) resumepoint 15 1 15 15 | 1 13 1 0 2 2                               │
   2763 // │ 15 constant undefined                                           Undefined │
   2764 // │ 16 tostring parameter1:Value                                    String    │
   2765 // │ 18 goto block2                                                            │
   2766 // └──────────────────────────────────┬────────────────────────────────────────┘
   2767 //                                    │
   2768 //                     ┌──────────────▼──────────────┐
   2769 //                     │           Block 2           │
   2770 //                     │ resumepoint 16 1 0 2 2      │
   2771 //                     │ 19 return tostring16:String │
   2772 //                     └─────────────────────────────┘
   2773 //
   2774 // The Unbox instruction is used as a type guard. The ToString instruction
   2775 // doesn't use the type information from the preceding Unbox instruction and
   2776 // therefore has to assume its operand can be any value.
   2777 //
   2778 // When instead propagating the type information from the preceding Unbox
   2779 // instruction, this graph is constructed after the "Apply types" phase:
   2780 //
   2781 // ┌───────────────────────────────────────────────────────────────────────────┐
   2782 // │                             Block 0                                       │
   2783 // │ resumepoint 1 0 2 2                                                       │
   2784 // │ 0 parameter THIS_SLOT                                           Value     │
   2785 // │ 1 parameter 0                                                   Value     │
   2786 // │ 2 constant undefined                                            Undefined │
   2787 // │ 3 start                                                                   │
   2788 // │ 4 checkoverrecursed                                                       │
   2789 // │ 5 unbox parameter1 to String (fallible)                         String    │
   2790 // │ 6 constant object 1d908053e088 (String)                         Object    │
   2791 // │ 7 guardshape constant6:Object                                   Object    │
   2792 // │ 8 slots guardshape7:Object                                      Slots     │
   2793 // │ 9 loaddynamicslot slots8:Slots (slot 53)                        Value     │
   2794 // │ 10 constant 0x0                                                 Int32     │
   2795 // │ 11 unbox loaddynamicslot9 to Object (fallible)                  Object    │
   2796 // │ 12 nurseryobject                                                Object    │
   2797 // │ 13 guardspecificfunction unbox11:Object nurseryobject12:Object  Object    │
   2798 // │ 14 goto block1                                                            │
   2799 // └──────────────────────────────────┬────────────────────────────────────────┘
   2800 //                                    │
   2801 // ┌──────────────────────────────────▼────────────────────────────────────────┐
   2802 // │                               Block 1                                     │
   2803 // │ ((0)) resumepoint 15 1 15 15 | 1 13 1 0 2 2                               │
   2804 // │ 15 constant undefined                                           Undefined │
   2805 // │ 20 unbox parameter1 to String (fallible)                        String    │
   2806 // │ 16 tostring parameter1:Value                                    String    │
   2807 // │ 18 goto block2                                                            │
   2808 // └──────────────────────────────────┬────────────────────────────────────────┘
   2809 //                                    │
   2810 //                     ┌──────────────▼─────────────────────┐
   2811 //                     │           Block 2                  │
   2812 //                     │ resumepoint 16 1 0 2 2             │
   2813 //                     │ 21 box tostring16:String     Value │
   2814 //                     │ 19 return box21:Value              │
   2815 //                     └────────────────────────────────────┘
   2816 //
   2817 // GVN will later merge both Unbox instructions and fold away the ToString
   2818 // instruction, so we get this final graph:
   2819 //
   2820 // ┌───────────────────────────────────────────────────────────────────────────┐
   2821 // │                             Block 0                                       │
   2822 // │ resumepoint 1 0 2 2                                                       │
   2823 // │ 0 parameter THIS_SLOT                                           Value     │
   2824 // │ 1 parameter 0                                                   Value     │
   2825 // │ 2 constant undefined                                            Undefined │
   2826 // │ 3 start                                                                   │
   2827 // │ 4 checkoverrecursed                                                       │
   2828 // │ 5 unbox parameter1 to String (fallible)                         String    │
   2829 // │ 6 constant object 1d908053e088 (String)                         Object    │
   2830 // │ 7 guardshape constant6:Object                                   Object    │
   2831 // │ 8 slots guardshape7:Object                                      Slots     │
   2832 // │ 22 loaddynamicslotandunbox slots8:Slots (slot 53)               Object    │
   2833 // │ 11 nurseryobject                                                Object    │
   2834 // │ 12 guardspecificfunction load22:Object nurseryobject11:Object   Object    │
   2835 // │ 13 goto block1                                                            │
   2836 // └──────────────────────────────────┬────────────────────────────────────────┘
   2837 //                                    │
   2838 // ┌──────────────────────────────────▼────────────────────────────────────────┐
   2839 // │                               Block 1                                     │
   2840 // │ ((0)) resumepoint 2 1 2 2 | 1 12 1 0 2 2                                  │
   2841 // │ 14 goto block2                                                            │
   2842 // └──────────────────────────────────┬────────────────────────────────────────┘
   2843 //                                    │
   2844 //                     ┌──────────────▼─────────────────────┐
   2845 //                     │           Block 2                  │
   2846 //                     │ resumepoint 5 1 0 2 2              │
   2847 //                     │ 15 return parameter1:Value         │
   2848 //                     └────────────────────────────────────┘
   2849 //
   2850 bool TypeAnalyzer::propagateUnbox() {
   2851  // Visit the blocks in post-order, so that the type information of the closest
   2852  // unbox operation is used.
   2853  for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
   2854       block++) {
   2855    if (mir->shouldCancel("Propagate Unbox")) {
   2856      return false;
   2857    }
   2858 
   2859    // Iterate over all instructions to look for unbox instructions.
   2860    for (MInstructionIterator iter(block->begin()); iter != block->end();
   2861         iter++) {
   2862      if (!iter->isUnbox()) {
   2863        continue;
   2864      }
   2865 
   2866      auto* unbox = iter->toUnbox();
   2867      auto* input = unbox->input();
   2868 
   2869      // Ignore unbox operations on typed values.
   2870      if (input->type() != MIRType::Value) {
   2871        continue;
   2872      }
   2873 
   2874      // Ignore unbox to floating point types, because propagating boxed Int32
   2875      // values as Double can lead to repeated bailouts when later instructions
   2876      // expect Int32 inputs.
   2877      if (IsFloatingPointType(unbox->type())) {
   2878        continue;
   2879      }
   2880 
   2881      // Inspect other uses of |input| to propagate the unboxed type information
   2882      // from |unbox|.
   2883      for (auto uses = input->usesBegin(); uses != input->usesEnd();) {
   2884        auto* use = *uses++;
   2885 
   2886        // Ignore resume points.
   2887        if (!use->consumer()->isDefinition()) {
   2888          continue;
   2889        }
   2890        auto* def = use->consumer()->toDefinition();
   2891 
   2892        // Ignore any unbox operations, including the current |unbox|.
   2893        if (def->isUnbox()) {
   2894          continue;
   2895        }
   2896 
   2897        // Ignore phi nodes, because we don't yet support them.
   2898        if (def->isPhi()) {
   2899          continue;
   2900        }
   2901 
   2902        // The unbox operation needs to happen before the other use, otherwise
   2903        // we can't propagate the type information.
   2904        if (unbox->block() == def->block()) {
   2905          if (!HappensBefore(unbox, def)) {
   2906            continue;
   2907          }
   2908        } else {
   2909          if (!unbox->block()->dominates(def->block())) {
   2910            continue;
   2911          }
   2912        }
   2913 
   2914        // Replace the use with |unbox|, so that GVN knows about the actual
   2915        // value type and can more easily fold unnecessary operations. If the
   2916        // instruction actually needs a boxed input, the BoxPolicy type policy
   2917        // will simply unwrap the unbox instruction.
   2918        use->replaceProducer(unbox);
   2919 
   2920        // The uses in the MIR graph no longer reflect the uses in the bytecode,
   2921        // so we have to mark |input| as implicitly used.
   2922        input->setImplicitlyUsedUnchecked();
   2923      }
   2924    }
   2925  }
   2926  return true;
   2927 }
   2928 
   2929 bool TypeAnalyzer::analyze() {
   2930  if (!tryEmitFloatOperations()) {
   2931    return false;
   2932  }
   2933  if (!specializePhis()) {
   2934    return false;
   2935  }
   2936  if (!propagateUnbox()) {
   2937    return false;
   2938  }
   2939  if (!insertConversions()) {
   2940    return false;
   2941  }
   2942  if (!checkFloatCoherency()) {
   2943    return false;
   2944  }
   2945  return true;
   2946 }
   2947 
   2948 bool jit::ApplyTypeInformation(const MIRGenerator* mir, MIRGraph& graph) {
   2949  TypeAnalyzer analyzer(mir, graph);
   2950 
   2951  if (!analyzer.analyze()) {
   2952    return false;
   2953  }
   2954 
   2955  return true;
   2956 }
   2957 
   2958 void jit::RenumberBlocks(MIRGraph& graph) {
   2959  size_t id = 0;
   2960  for (ReversePostorderIterator block(graph.rpoBegin());
   2961       block != graph.rpoEnd(); block++) {
   2962    block->setId(id++);
   2963  }
   2964 }
   2965 
   2966 // A utility for code which adds/deletes blocks. Renumber the remaining blocks,
   2967 // recompute dominators, and optionally recompute AliasAnalysis dependencies.
   2968 bool jit::AccountForCFGChanges(const MIRGenerator* mir, MIRGraph& graph,
   2969                               bool updateAliasAnalysis,
   2970                               bool underValueNumberer) {
   2971  // Renumber the blocks and clear out the old dominator info.
   2972  size_t id = 0;
   2973  for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e;
   2974       ++i) {
   2975    i->clearDominatorInfo();
   2976    i->setId(id++);
   2977  }
   2978 
   2979  // Recompute dominator info.
   2980  if (!BuildDominatorTree(mir, graph)) {
   2981    return false;
   2982  }
   2983 
   2984  // If needed, update alias analysis dependencies.
   2985  if (updateAliasAnalysis) {
   2986    if (!AliasAnalysis(mir, graph).analyze()) {
   2987      return false;
   2988    }
   2989  }
   2990 
   2991  AssertExtendedGraphCoherency(graph, underValueNumberer);
   2992  return true;
   2993 }
   2994 
   2995 // Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
   2996 // Alias analysis dependencies may be invalid after calling this function.
   2997 bool jit::RemoveUnmarkedBlocks(const MIRGenerator* mir, MIRGraph& graph,
   2998                               uint32_t numMarkedBlocks) {
   2999  if (numMarkedBlocks == graph.numBlocks()) {
   3000    // If all blocks are marked, no blocks need removal. Just clear the
   3001    // marks. We'll still need to update the dominator tree below though,
   3002    // since we may have removed edges even if we didn't remove any blocks.
   3003    graph.unmarkBlocks();
   3004  } else {
   3005    // As we are going to remove edges and basic blocks, we have to mark
   3006    // instructions which would be needed by baseline if we were to
   3007    // bailout.
   3008    for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
   3009      MBasicBlock* block = *it++;
   3010      if (block->isMarked()) {
   3011        continue;
   3012      }
   3013 
   3014      if (!FlagAllOperandsAsImplicitlyUsed(mir, block)) {
   3015        return false;
   3016      }
   3017    }
   3018 
   3019    // Find unmarked blocks and remove them.
   3020    for (ReversePostorderIterator iter(graph.rpoBegin());
   3021         iter != graph.rpoEnd();) {
   3022      MBasicBlock* block = *iter++;
   3023 
   3024      if (block->isMarked()) {
   3025        block->unmark();
   3026        continue;
   3027      }
   3028 
   3029      // The block is unreachable. Clear out the loop header flag, as
   3030      // we're doing the sweep of a mark-and-sweep here, so we no longer
   3031      // need to worry about whether an unmarked block is a loop or not.
   3032      if (block->isLoopHeader()) {
   3033        block->clearLoopHeader();
   3034      }
   3035 
   3036      for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
   3037        block->getSuccessor(i)->removePredecessor(block);
   3038      }
   3039      graph.removeBlock(block);
   3040    }
   3041  }
   3042 
   3043  // Renumber the blocks and update the dominator tree.
   3044  return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
   3045 }
   3046 
   3047 bool jit::BuildPhiReverseMapping(MIRGraph& graph) {
   3048  // Build a mapping such that given a basic block, whose successor has one or
   3049  // more phis, we can find our specific input to that phi. To make this fast
   3050  // mapping work we rely on a specific property of our structured control
   3051  // flow graph: For a block with phis, its predecessors each have only one
   3052  // successor with phis. Consider each case:
   3053  //   * Blocks with less than two predecessors cannot have phis.
   3054  //   * Breaks. A break always has exactly one successor, and the break
   3055  //             catch block has exactly one predecessor for each break, as
   3056  //             well as a final predecessor for the actual loop exit.
   3057  //   * Continues. A continue always has exactly one successor, and the
   3058  //             continue catch block has exactly one predecessor for each
   3059  //             continue, as well as a final predecessor for the actual
   3060  //             loop continuation. The continue itself has exactly one
   3061  //             successor.
   3062  //   * An if. Each branch as exactly one predecessor.
   3063  //   * A switch. Each branch has exactly one predecessor.
   3064  //   * Loop tail. A new block is always created for the exit, and if a
   3065  //             break statement is present, the exit block will forward
   3066  //             directly to the break block.
   3067  for (MBasicBlockIterator block(graph.begin()); block != graph.end();
   3068       block++) {
   3069    if (block->phisEmpty()) {
   3070      continue;
   3071    }
   3072 
   3073    // Assert on the above.
   3074    for (size_t j = 0; j < block->numPredecessors(); j++) {
   3075      MBasicBlock* pred = block->getPredecessor(j);
   3076 
   3077 #ifdef DEBUG
   3078      size_t numSuccessorsWithPhis = 0;
   3079      for (size_t k = 0; k < pred->numSuccessors(); k++) {
   3080        MBasicBlock* successor = pred->getSuccessor(k);
   3081        if (!successor->phisEmpty()) {
   3082          numSuccessorsWithPhis++;
   3083        }
   3084      }
   3085      MOZ_ASSERT(numSuccessorsWithPhis <= 1);
   3086 #endif
   3087 
   3088      pred->setSuccessorWithPhis(*block, j);
   3089    }
   3090  }
   3091 
   3092  return true;
   3093 }
   3094 
   3095 #ifdef DEBUG
   3096 static bool CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B) {
   3097  // Assuming B = succ(A), verify A = pred(B).
   3098  for (size_t i = 0; i < B->numPredecessors(); i++) {
   3099    if (A == B->getPredecessor(i)) {
   3100      return true;
   3101    }
   3102  }
   3103  return false;
   3104 }
   3105 
   3106 static bool CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B) {
   3107  // Assuming B = pred(A), verify A = succ(B).
   3108  for (size_t i = 0; i < B->numSuccessors(); i++) {
   3109    if (A == B->getSuccessor(i)) {
   3110      return true;
   3111    }
   3112  }
   3113  return false;
   3114 }
   3115 
   3116 // If you have issues with the usesBalance assertions, then define the macro
   3117 // _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
   3118 // This output can then be processed with the following awk script to filter and
   3119 // highlight which checks are missing or if there is an unexpected operand /
   3120 // use.
   3121 //
   3122 // define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
   3123 /*
   3124 
   3125 $ ./js 2>stderr.log
   3126 $ gawk '
   3127    /^==Check/ { context = ""; state = $2; }
   3128    /^[a-z]/ { context = context "\n\t" $0; }
   3129    /^==End/ {
   3130      if (state == "Operand") {
   3131        list[context] = list[context] - 1;
   3132      } else if (state == "Use") {
   3133        list[context] = list[context] + 1;
   3134      }
   3135    }
   3136    END {
   3137      for (ctx in list) {
   3138        if (list[ctx] > 0) {
   3139          print "Missing operand check", ctx, "\n"
   3140        }
   3141        if (list[ctx] < 0) {
   3142          print "Missing use check", ctx, "\n"
   3143        }
   3144      };
   3145    }'  < stderr.log
   3146 
   3147 */
   3148 
   3149 static void CheckOperand(const MNode* consumer, const MUse* use,
   3150                         int32_t* usesBalance) {
   3151  MOZ_ASSERT(use->hasProducer());
   3152  MDefinition* producer = use->producer();
   3153  MOZ_ASSERT(!producer->isDiscarded());
   3154  MOZ_ASSERT(producer->block() != nullptr);
   3155  MOZ_ASSERT(use->consumer() == consumer);
   3156 #  ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
   3157  Fprinter print(stderr);
   3158  print.printf("==Check Operand\n");
   3159  use->producer()->dump(print);
   3160  print.printf("  index: %zu\n", use->consumer()->indexOf(use));
   3161  use->consumer()->dump(print);
   3162  print.printf("==End\n");
   3163 #  endif
   3164  --*usesBalance;
   3165 }
   3166 
   3167 static void CheckUse(const MDefinition* producer, const MUse* use,
   3168                     int32_t* usesBalance) {
   3169  MOZ_ASSERT(!use->consumer()->block()->isDead());
   3170  MOZ_ASSERT_IF(use->consumer()->isDefinition(),
   3171                !use->consumer()->toDefinition()->isDiscarded());
   3172  MOZ_ASSERT(use->consumer()->block() != nullptr);
   3173  MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
   3174 #  ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
   3175  Fprinter print(stderr);
   3176  print.printf("==Check Use\n");
   3177  use->producer()->dump(print);
   3178  print.printf("  index: %zu\n", use->consumer()->indexOf(use));
   3179  use->consumer()->dump(print);
   3180  print.printf("==End\n");
   3181 #  endif
   3182  ++*usesBalance;
   3183 }
   3184 
   3185 // To properly encode entry resume points, we have to ensure that all the
   3186 // operands of the entry resume point are located before the safeInsertTop
   3187 // location.
   3188 static void AssertOperandsBeforeSafeInsertTop(MResumePoint* resume) {
   3189  MBasicBlock* block = resume->block();
   3190  if (block == block->graph().osrBlock()) {
   3191    return;
   3192  }
   3193  MInstruction* stop = block->safeInsertTop();
   3194  for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
   3195    MDefinition* def = resume->getOperand(i);
   3196    if (def->block() != block) {
   3197      continue;
   3198    }
   3199    if (def->isPhi()) {
   3200      continue;
   3201    }
   3202 
   3203    for (MInstructionIterator ins = block->begin(); true; ins++) {
   3204      if (*ins == def) {
   3205        break;
   3206      }
   3207      MOZ_ASSERT(
   3208          *ins != stop,
   3209          "Resume point operand located after the safeInsertTop location");
   3210    }
   3211  }
   3212 }
   3213 #endif  // DEBUG
   3214 
   3215 void jit::AssertBasicGraphCoherency(MIRGraph& graph, bool force) {
   3216 #ifdef DEBUG
   3217  if (!JitOptions.fullDebugChecks && !force) {
   3218    return;
   3219  }
   3220 
   3221  MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
   3222  MOZ_ASSERT(graph.entryBlock()->phisEmpty());
   3223  MOZ_ASSERT(!graph.entryBlock()->unreachable());
   3224 
   3225  if (MBasicBlock* osrBlock = graph.osrBlock()) {
   3226    MOZ_ASSERT(osrBlock->numPredecessors() == 0);
   3227    MOZ_ASSERT(osrBlock->phisEmpty());
   3228    MOZ_ASSERT(osrBlock != graph.entryBlock());
   3229    MOZ_ASSERT(!osrBlock->unreachable());
   3230  }
   3231 
   3232  if (MResumePoint* resumePoint = graph.entryResumePoint()) {
   3233    MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
   3234  }
   3235 
   3236  // Assert successor and predecessor list coherency.
   3237  uint32_t count = 0;
   3238  int32_t usesBalance = 0;
   3239  for (MBasicBlockIterator block(graph.begin()); block != graph.end();
   3240       block++) {
   3241    count++;
   3242 
   3243    MOZ_ASSERT(&block->graph() == &graph);
   3244    MOZ_ASSERT(!block->isDead());
   3245    MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
   3246                  block->entryResumePoint() != nullptr);
   3247 
   3248    for (size_t i = 0; i < block->numSuccessors(); i++) {
   3249      MOZ_ASSERT(
   3250          CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
   3251    }
   3252 
   3253    for (size_t i = 0; i < block->numPredecessors(); i++) {
   3254      MOZ_ASSERT(
   3255          CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
   3256    }
   3257 
   3258    if (MResumePoint* resume = block->entryResumePoint()) {
   3259      MOZ_ASSERT(!resume->instruction());
   3260      MOZ_ASSERT(resume->block() == *block);
   3261      AssertOperandsBeforeSafeInsertTop(resume);
   3262    }
   3263    if (MResumePoint* resume = block->outerResumePoint()) {
   3264      MOZ_ASSERT(!resume->instruction());
   3265      MOZ_ASSERT(resume->block() == *block);
   3266    }
   3267    for (MResumePointIterator iter(block->resumePointsBegin());
   3268         iter != block->resumePointsEnd(); iter++) {
   3269      // We cannot yet assert that is there is no instruction then this is
   3270      // the entry resume point because we are still storing resume points
   3271      // in the InlinePropertyTable.
   3272      MOZ_ASSERT_IF(iter->instruction(),
   3273                    iter->instruction()->block() == *block);
   3274      for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) {
   3275        CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
   3276      }
   3277    }
   3278    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
   3279      MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
   3280      MOZ_ASSERT(!phi->isRecoveredOnBailout());
   3281      MOZ_ASSERT(phi->type() != MIRType::None);
   3282      MOZ_ASSERT(phi->dependency() == nullptr);
   3283    }
   3284    for (MDefinitionIterator iter(*block); iter; iter++) {
   3285      MOZ_ASSERT(iter->block() == *block);
   3286      MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
   3287      MOZ_ASSERT(!iter->isDiscarded());
   3288      MOZ_ASSERT_IF(iter->isStart(),
   3289                    *block == graph.entryBlock() || *block == graph.osrBlock());
   3290      MOZ_ASSERT_IF(iter->isParameter(),
   3291                    *block == graph.entryBlock() || *block == graph.osrBlock());
   3292      MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
   3293      MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
   3294 
   3295      // Assert that use chains are valid for this instruction.
   3296      for (uint32_t i = 0, end = iter->numOperands(); i < end; i++) {
   3297        CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
   3298      }
   3299      for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++) {
   3300        CheckUse(*iter, *use, &usesBalance);
   3301      }
   3302 
   3303      if (iter->isInstruction()) {
   3304        if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
   3305          MOZ_ASSERT(resume->instruction() == *iter);
   3306          MOZ_ASSERT(resume->block() == *block);
   3307          MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
   3308        }
   3309      }
   3310 
   3311      if (iter->isRecoveredOnBailout()) {
   3312        MOZ_ASSERT(!iter->hasLiveDefUses());
   3313      }
   3314    }
   3315 
   3316    // The control instruction is not visited by the MDefinitionIterator.
   3317    MControlInstruction* control = block->lastIns();
   3318    MOZ_ASSERT(control->block() == *block);
   3319    MOZ_ASSERT(!control->hasUses());
   3320    MOZ_ASSERT(control->type() == MIRType::None);
   3321    MOZ_ASSERT(!control->isDiscarded());
   3322    MOZ_ASSERT(!control->isRecoveredOnBailout());
   3323    MOZ_ASSERT(control->resumePoint() == nullptr);
   3324    for (uint32_t i = 0, end = control->numOperands(); i < end; i++) {
   3325      CheckOperand(control, control->getUseFor(i), &usesBalance);
   3326    }
   3327    for (size_t i = 0; i < control->numSuccessors(); i++) {
   3328      MOZ_ASSERT(control->getSuccessor(i));
   3329    }
   3330  }
   3331 
   3332  // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
   3333  MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
   3334  MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
   3335  MOZ_ASSERT(graph.numBlocks() == count);
   3336 #endif
   3337 }
   3338 
   3339 #ifdef DEBUG
   3340 static void AssertReversePostorder(MIRGraph& graph) {
   3341  // Check that every block is visited after all its predecessors (except
   3342  // backedges).
   3343  for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();
   3344       ++iter) {
   3345    MBasicBlock* block = *iter;
   3346    MOZ_ASSERT(!block->isMarked());
   3347 
   3348    for (size_t i = 0; i < block->numPredecessors(); i++) {
   3349      MBasicBlock* pred = block->getPredecessor(i);
   3350      if (!pred->isMarked()) {
   3351        MOZ_ASSERT(pred->isLoopBackedge());
   3352        MOZ_ASSERT(block->backedge() == pred);
   3353      }
   3354    }
   3355 
   3356    block->mark();
   3357  }
   3358 
   3359  graph.unmarkBlocks();
   3360 }
   3361 #endif
   3362 
   3363 #ifdef DEBUG
   3364 static void AssertDominatorTree(MIRGraph& graph) {
   3365  // Check dominators.
   3366 
   3367  MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
   3368  if (MBasicBlock* osrBlock = graph.osrBlock()) {
   3369    MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
   3370  } else {
   3371    MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
   3372  }
   3373 
   3374  size_t i = graph.numBlocks();
   3375  size_t totalNumDominated = 0;
   3376  for (MBasicBlockIterator block(graph.begin()); block != graph.end();
   3377       block++) {
   3378    MOZ_ASSERT(block->dominates(*block));
   3379 
   3380    MBasicBlock* idom = block->immediateDominator();
   3381    MOZ_ASSERT(idom->dominates(*block));
   3382    MOZ_ASSERT(idom == *block || idom->id() < block->id());
   3383 
   3384    if (idom == *block) {
   3385      totalNumDominated += block->numDominated();
   3386    } else {
   3387      bool foundInParent = false;
   3388      for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
   3389        if (idom->getImmediatelyDominatedBlock(j) == *block) {
   3390          foundInParent = true;
   3391          break;
   3392        }
   3393      }
   3394      MOZ_ASSERT(foundInParent);
   3395    }
   3396 
   3397    size_t numDominated = 1;
   3398    for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
   3399      MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
   3400      MOZ_ASSERT(block->dominates(dom));
   3401      MOZ_ASSERT(dom->id() > block->id());
   3402      MOZ_ASSERT(dom->immediateDominator() == *block);
   3403 
   3404      numDominated += dom->numDominated();
   3405    }
   3406    MOZ_ASSERT(block->numDominated() == numDominated);
   3407    MOZ_ASSERT(block->numDominated() <= i);
   3408    MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
   3409    i--;
   3410  }
   3411  MOZ_ASSERT(i == 0);
   3412  MOZ_ASSERT(totalNumDominated == graph.numBlocks());
   3413 }
   3414 #endif
   3415 
   3416 void jit::AssertGraphCoherency(MIRGraph& graph, bool force) {
   3417 #ifdef DEBUG
   3418  if (!JitOptions.checkGraphConsistency) {
   3419    return;
   3420  }
   3421  if (!JitOptions.fullDebugChecks && !force) {
   3422    return;
   3423  }
   3424  AssertBasicGraphCoherency(graph, force);
   3425  AssertReversePostorder(graph);
   3426 #endif
   3427 }
   3428 
   3429 #ifdef DEBUG
   3430 static bool IsResumableMIRType(MIRType type) {
   3431  // see CodeGeneratorShared::encodeAllocation
   3432  switch (type) {
   3433    case MIRType::Undefined:
   3434    case MIRType::Null:
   3435    case MIRType::Boolean:
   3436    case MIRType::Int32:
   3437    case MIRType::Double:
   3438    case MIRType::Float32:
   3439    case MIRType::String:
   3440    case MIRType::Symbol:
   3441    case MIRType::BigInt:
   3442    case MIRType::Object:
   3443    case MIRType::Shape:
   3444    case MIRType::MagicOptimizedOut:
   3445    case MIRType::MagicUninitializedLexical:
   3446    case MIRType::MagicIsConstructing:
   3447    case MIRType::Value:
   3448    case MIRType::Int64:
   3449    case MIRType::IntPtr:
   3450      return true;
   3451 
   3452    case MIRType::Simd128:
   3453    case MIRType::MagicHole:
   3454    case MIRType::None:
   3455    case MIRType::Slots:
   3456    case MIRType::Elements:
   3457    case MIRType::Pointer:
   3458    case MIRType::WasmAnyRef:
   3459    case MIRType::WasmStructData:
   3460    case MIRType::WasmArrayData:
   3461    case MIRType::StackResults:
   3462      return false;
   3463  }
   3464  MOZ_CRASH("Unknown MIRType.");
   3465 }
   3466 
   3467 static void AssertResumableOperands(MNode* node) {
   3468  for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
   3469    MDefinition* op = node->getOperand(i);
   3470    if (op->isRecoveredOnBailout()) {
   3471      continue;
   3472    }
   3473    MOZ_ASSERT(IsResumableMIRType(op->type()),
   3474               "Resume point cannot encode its operands");
   3475  }
   3476 }
   3477 
   3478 static void AssertIfResumableInstruction(MDefinition* def) {
   3479  if (!def->isRecoveredOnBailout()) {
   3480    return;
   3481  }
   3482  AssertResumableOperands(def);
   3483 }
   3484 
   3485 static void AssertResumePointDominatedByOperands(MResumePoint* resume) {
   3486  for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
   3487    MDefinition* op = resume->getOperand(i);
   3488    MOZ_ASSERT(op->block()->dominates(resume->block()),
   3489               "Resume point is not dominated by its operands");
   3490  }
   3491 }
   3492 #endif  // DEBUG
   3493 
   3494 // Checks the basic GraphCoherency but also other conditions that
   3495 // do not hold immediately (such as the fact that critical edges
   3496 // are split, or conditions related to wasm semantics)
   3497 void jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer,
   3498                                       bool force) {
   3499 #ifdef DEBUG
   3500  if (!JitOptions.checkGraphConsistency) {
   3501    return;
   3502  }
   3503  if (!JitOptions.fullDebugChecks && !force) {
   3504    return;
   3505  }
   3506 
   3507  AssertGraphCoherency(graph, force);
   3508 
   3509  AssertDominatorTree(graph);
   3510 
   3511  DebugOnly<uint32_t> idx = 0;
   3512  for (MBasicBlockIterator block(graph.begin()); block != graph.end();
   3513       block++) {
   3514    MOZ_ASSERT(block->id() == idx);
   3515    ++idx;
   3516 
   3517    // No critical edges:
   3518    if (block->numSuccessors() > 1) {
   3519      for (size_t i = 0; i < block->numSuccessors(); i++) {
   3520        MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
   3521      }
   3522    }
   3523 
   3524    if (block->isLoopHeader()) {
   3525      if (underValueNumberer && block->numPredecessors() == 3) {
   3526        // Fixup block.
   3527        MOZ_ASSERT(block->getPredecessor(1)->numPredecessors() == 0);
   3528        MOZ_ASSERT(graph.osrBlock(),
   3529                   "Fixup blocks should only exists if we have an osr block.");
   3530      } else {
   3531        MOZ_ASSERT(block->numPredecessors() == 2);
   3532      }
   3533      MBasicBlock* backedge = block->backedge();
   3534      MOZ_ASSERT(backedge->id() >= block->id());
   3535      MOZ_ASSERT(backedge->numSuccessors() == 1);
   3536      MOZ_ASSERT(backedge->getSuccessor(0) == *block);
   3537    }
   3538 
   3539    if (!block->phisEmpty()) {
   3540      for (size_t i = 0; i < block->numPredecessors(); i++) {
   3541        MBasicBlock* pred = block->getPredecessor(i);
   3542        MOZ_ASSERT(pred->successorWithPhis() == *block);
   3543        MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
   3544      }
   3545    }
   3546 
   3547    uint32_t successorWithPhis = 0;
   3548    for (size_t i = 0; i < block->numSuccessors(); i++) {
   3549      if (!block->getSuccessor(i)->phisEmpty()) {
   3550        successorWithPhis++;
   3551      }
   3552    }
   3553 
   3554    MOZ_ASSERT(successorWithPhis <= 1);
   3555    MOZ_ASSERT((successorWithPhis != 0) ==
   3556               (block->successorWithPhis() != nullptr));
   3557 
   3558    // Verify that phi operands dominate the corresponding CFG predecessor
   3559    // edges.
   3560    for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
   3561         iter != end; ++iter) {
   3562      MPhi* phi = *iter;
   3563      for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
   3564        MOZ_ASSERT(
   3565            phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
   3566            "Phi input is not dominated by its operand");
   3567      }
   3568    }
   3569 
   3570    // Verify that instructions are dominated by their operands.
   3571    for (MInstructionIterator iter(block->begin()), end(block->end());
   3572         iter != end; ++iter) {
   3573      MInstruction* ins = *iter;
   3574      for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
   3575        MDefinition* op = ins->getOperand(i);
   3576        MBasicBlock* opBlock = op->block();
   3577        MOZ_ASSERT(opBlock->dominates(*block),
   3578                   "Instruction is not dominated by its operands");
   3579 
   3580        // If the operand is an instruction in the same block, check
   3581        // that it comes first.
   3582        if (opBlock == *block && !op->isPhi()) {
   3583          MInstructionIterator opIter = block->begin(op->toInstruction());
   3584          do {
   3585            ++opIter;
   3586            MOZ_ASSERT(opIter != block->end(),
   3587                       "Operand in same block as instruction does not precede");
   3588          } while (*opIter != ins);
   3589        }
   3590      }
   3591      AssertIfResumableInstruction(ins);
   3592      if (MResumePoint* resume = ins->resumePoint()) {
   3593        AssertResumePointDominatedByOperands(resume);
   3594        AssertResumableOperands(resume);
   3595      }
   3596    }
   3597 
   3598    // Verify that the block resume points are dominated by their operands.
   3599    if (MResumePoint* resume = block->entryResumePoint()) {
   3600      AssertResumePointDominatedByOperands(resume);
   3601      AssertResumableOperands(resume);
   3602    }
   3603    if (MResumePoint* resume = block->outerResumePoint()) {
   3604      AssertResumePointDominatedByOperands(resume);
   3605      AssertResumableOperands(resume);
   3606    }
   3607 
   3608    // Verify that any nodes with a wasm ref type have MIRType WasmAnyRef.
   3609    for (MDefinitionIterator def(*block); def; def++) {
   3610      MOZ_ASSERT_IF(def->wasmRefType().isSome(),
   3611                    def->type() == MIRType::WasmAnyRef);
   3612    }
   3613  }
   3614 #endif
   3615 }
   3616 
   3617 struct BoundsCheckInfo {
   3618  MBoundsCheck* check;
   3619  uint32_t validEnd;
   3620 };
   3621 
   3622 using BoundsCheckMap =
   3623    HashMap<uint32_t, BoundsCheckInfo, DefaultHasher<uint32_t>, JitAllocPolicy>;
   3624 
   3625 // Compute a hash for bounds checks which ignores constant offsets in the index.
   3626 static HashNumber BoundsCheckHashIgnoreOffset(MBoundsCheck* check) {
   3627  SimpleLinearSum indexSum = ExtractLinearSum(check->index());
   3628  uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
   3629  uintptr_t length = uintptr_t(check->length());
   3630  return index ^ length;
   3631 }
   3632 
   3633 static MBoundsCheck* FindDominatingBoundsCheck(BoundsCheckMap& checks,
   3634                                               MBoundsCheck* check,
   3635                                               size_t index) {
   3636  // Since we are traversing the dominator tree in pre-order, when we
   3637  // are looking at the |index|-th block, the next numDominated() blocks
   3638  // we traverse are precisely the set of blocks that are dominated.
   3639  //
   3640  // So, this value is visible in all blocks if:
   3641  // index <= index + ins->block->numDominated()
   3642  // and becomes invalid after that.
   3643  HashNumber hash = BoundsCheckHashIgnoreOffset(check);
   3644  BoundsCheckMap::Ptr p = checks.lookup(hash);
   3645  if (!p || index >= p->value().validEnd) {
   3646    // We didn't find a dominating bounds check.
   3647    BoundsCheckInfo info;
   3648    info.check = check;
   3649    info.validEnd = index + check->block()->numDominated();
   3650 
   3651    if (!checks.put(hash, info)) return nullptr;
   3652 
   3653    return check;
   3654  }
   3655 
   3656  return p->value().check;
   3657 }
   3658 
   3659 static MathSpace ExtractMathSpace(MDefinition* ins) {
   3660  MOZ_ASSERT(ins->isAdd() || ins->isSub());
   3661  MBinaryArithInstruction* arith = nullptr;
   3662  if (ins->isAdd()) {
   3663    arith = ins->toAdd();
   3664  } else {
   3665    arith = ins->toSub();
   3666  }
   3667  switch (arith->truncateKind()) {
   3668    case TruncateKind::NoTruncate:
   3669    case TruncateKind::TruncateAfterBailouts:
   3670      // TruncateAfterBailouts is considered as infinite space because the
   3671      // LinearSum will effectively remove the bailout check.
   3672      return MathSpace::Infinite;
   3673    case TruncateKind::IndirectTruncate:
   3674    case TruncateKind::Truncate:
   3675      return MathSpace::Modulo;
   3676  }
   3677  MOZ_CRASH("Unknown TruncateKind");
   3678 }
   3679 
   3680 static bool MonotoneAdd(int32_t lhs, int32_t rhs) {
   3681  return (lhs >= 0 && rhs >= 0) || (lhs <= 0 && rhs <= 0);
   3682 }
   3683 
   3684 static bool MonotoneSub(int32_t lhs, int32_t rhs) {
   3685  return (lhs >= 0 && rhs <= 0) || (lhs <= 0 && rhs >= 0);
   3686 }
   3687 
   3688 // Extract a linear sum from ins, if possible (otherwise giving the
   3689 // sum 'ins + 0').
   3690 SimpleLinearSum jit::ExtractLinearSum(MDefinition* ins, MathSpace space,
   3691                                      int32_t recursionDepth) {
   3692  const int32_t SAFE_RECURSION_LIMIT = 100;
   3693  if (recursionDepth > SAFE_RECURSION_LIMIT) {
   3694    return SimpleLinearSum(ins, 0);
   3695  }
   3696 
   3697  // Unwrap Int32ToIntPtr. This instruction only changes the representation
   3698  // (int32_t to intptr_t) without affecting the value.
   3699  if (ins->isInt32ToIntPtr()) {
   3700    ins = ins->toInt32ToIntPtr()->input();
   3701  }
   3702 
   3703  if (ins->isBeta()) {
   3704    ins = ins->getOperand(0);
   3705  }
   3706 
   3707  MOZ_ASSERT(!ins->isInt32ToIntPtr());
   3708 
   3709  if (ins->type() != MIRType::Int32) {
   3710    return SimpleLinearSum(ins, 0);
   3711  }
   3712 
   3713  if (ins->isConstant()) {
   3714    return SimpleLinearSum(nullptr, ins->toConstant()->toInt32());
   3715  }
   3716 
   3717  if (!ins->isAdd() && !ins->isSub()) {
   3718    return SimpleLinearSum(ins, 0);
   3719  }
   3720 
   3721  // Only allow math which are in the same space.
   3722  MathSpace insSpace = ExtractMathSpace(ins);
   3723  if (space == MathSpace::Unknown) {
   3724    space = insSpace;
   3725  } else if (space != insSpace) {
   3726    return SimpleLinearSum(ins, 0);
   3727  }
   3728  MOZ_ASSERT(space == MathSpace::Modulo || space == MathSpace::Infinite);
   3729 
   3730  if (space == MathSpace::Modulo) {
   3731    return SimpleLinearSum(ins, 0);
   3732  }
   3733 
   3734  MDefinition* lhs = ins->getOperand(0);
   3735  MDefinition* rhs = ins->getOperand(1);
   3736  if (lhs->type() != MIRType::Int32 || rhs->type() != MIRType::Int32) {
   3737    return SimpleLinearSum(ins, 0);
   3738  }
   3739 
   3740  // Extract linear sums of each operand.
   3741  SimpleLinearSum lsum = ExtractLinearSum(lhs, space, recursionDepth + 1);
   3742  SimpleLinearSum rsum = ExtractLinearSum(rhs, space, recursionDepth + 1);
   3743 
   3744  // LinearSum only considers a single term operand, if both sides have
   3745  // terms, then ignore extracted linear sums.
   3746  if (lsum.term && rsum.term) {
   3747    return SimpleLinearSum(ins, 0);
   3748  }
   3749 
   3750  // Check if this is of the form <SUM> + n or n + <SUM>.
   3751  if (ins->isAdd()) {
   3752    int32_t constant;
   3753    if (space == MathSpace::Modulo) {
   3754      constant = uint32_t(lsum.constant) + uint32_t(rsum.constant);
   3755    } else if (!mozilla::SafeAdd(lsum.constant, rsum.constant, &constant) ||
   3756               !MonotoneAdd(lsum.constant, rsum.constant)) {
   3757      return SimpleLinearSum(ins, 0);
   3758    }
   3759    return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
   3760  }
   3761 
   3762  MOZ_ASSERT(ins->isSub());
   3763  // Check if this is of the form <SUM> - n.
   3764  if (lsum.term) {
   3765    int32_t constant;
   3766    if (space == MathSpace::Modulo) {
   3767      constant = uint32_t(lsum.constant) - uint32_t(rsum.constant);
   3768    } else if (!mozilla::SafeSub(lsum.constant, rsum.constant, &constant) ||
   3769               !MonotoneSub(lsum.constant, rsum.constant)) {
   3770      return SimpleLinearSum(ins, 0);
   3771    }
   3772    return SimpleLinearSum(lsum.term, constant);
   3773  }
   3774 
   3775  // Ignore any of the form n - <SUM>.
   3776  return SimpleLinearSum(ins, 0);
   3777 }
   3778 
   3779 // Extract a linear inequality holding when a boolean test goes in the
   3780 // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
   3781 bool jit::ExtractLinearInequality(const MTest* test, BranchDirection direction,
   3782                                  SimpleLinearSum* plhs, MDefinition** prhs,
   3783                                  bool* plessEqual) {
   3784  if (!test->getOperand(0)->isCompare()) {
   3785    return false;
   3786  }
   3787 
   3788  MCompare* compare = test->getOperand(0)->toCompare();
   3789 
   3790  MDefinition* lhs = compare->getOperand(0);
   3791  MDefinition* rhs = compare->getOperand(1);
   3792 
   3793  // TODO: optimize Compare_UInt32
   3794  if (!compare->isInt32Comparison()) {
   3795    return false;
   3796  }
   3797 
   3798  MOZ_ASSERT(lhs->type() == MIRType::Int32);
   3799  MOZ_ASSERT(rhs->type() == MIRType::Int32);
   3800 
   3801  JSOp jsop = compare->jsop();
   3802  if (direction == FALSE_BRANCH) {
   3803    jsop = NegateCompareOp(jsop);
   3804  }
   3805 
   3806  SimpleLinearSum lsum = ExtractLinearSum(lhs);
   3807  SimpleLinearSum rsum = ExtractLinearSum(rhs);
   3808 
   3809  if (!mozilla::SafeSub(lsum.constant, rsum.constant, &lsum.constant)) {
   3810    return false;
   3811  }
   3812 
   3813  // Normalize operations to use <= or >=.
   3814  switch (jsop) {
   3815    case JSOp::Le:
   3816      *plessEqual = true;
   3817      break;
   3818    case JSOp::Lt:
   3819      /* x < y ==> x + 1 <= y */
   3820      if (!mozilla::SafeAdd(lsum.constant, 1, &lsum.constant)) {
   3821        return false;
   3822      }
   3823      *plessEqual = true;
   3824      break;
   3825    case JSOp::Ge:
   3826      *plessEqual = false;
   3827      break;
   3828    case JSOp::Gt:
   3829      /* x > y ==> x - 1 >= y */
   3830      if (!mozilla::SafeSub(lsum.constant, 1, &lsum.constant)) {
   3831        return false;
   3832      }
   3833      *plessEqual = false;
   3834      break;
   3835    default:
   3836      return false;
   3837  }
   3838 
   3839  *plhs = lsum;
   3840  *prhs = rsum.term;
   3841 
   3842  return true;
   3843 }
   3844 
   3845 static bool TryEliminateBoundsCheck(BoundsCheckMap& checks, size_t blockIndex,
   3846                                    MBoundsCheck* dominated, bool* eliminated) {
   3847  MOZ_ASSERT(!*eliminated);
   3848 
   3849  // Replace all uses of the bounds check with the actual index.
   3850  // This is (a) necessary, because we can coalesce two different
   3851  // bounds checks and would otherwise use the wrong index and
   3852  // (b) helps register allocation. Note that this is safe since
   3853  // no other pass after bounds check elimination moves instructions.
   3854  dominated->replaceAllUsesWith(dominated->index());
   3855 
   3856  if (!dominated->isMovable()) {
   3857    return true;
   3858  }
   3859 
   3860  if (!dominated->fallible()) {
   3861    return true;
   3862  }
   3863 
   3864  MBoundsCheck* dominating =
   3865      FindDominatingBoundsCheck(checks, dominated, blockIndex);
   3866  if (!dominating) {
   3867    return false;
   3868  }
   3869 
   3870  if (dominating == dominated) {
   3871    // We didn't find a dominating bounds check.
   3872    return true;
   3873  }
   3874 
   3875  // We found two bounds checks with the same hash number, but we still have
   3876  // to make sure the lengths and index terms are equal.
   3877  if (dominating->length() != dominated->length()) {
   3878    return true;
   3879  }
   3880 
   3881  SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
   3882  SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
   3883 
   3884  // Both terms should be nullptr or the same definition.
   3885  if (sumA.term != sumB.term) {
   3886    return true;
   3887  }
   3888 
   3889  // This bounds check is redundant.
   3890  *eliminated = true;
   3891 
   3892  // Normalize the ranges according to the constant offsets in the two indexes.
   3893  int32_t minimumA, maximumA, minimumB, maximumB;
   3894  if (!mozilla::SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
   3895      !mozilla::SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
   3896      !mozilla::SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
   3897      !mozilla::SafeAdd(sumB.constant, dominated->maximum(), &maximumB)) {
   3898    return false;
   3899  }
   3900 
   3901  // Update the dominating check to cover both ranges, denormalizing the
   3902  // result per the constant offset in the index.
   3903  int32_t newMinimum, newMaximum;
   3904  if (!mozilla::SafeSub(std::min(minimumA, minimumB), sumA.constant,
   3905                        &newMinimum) ||
   3906      !mozilla::SafeSub(std::max(maximumA, maximumB), sumA.constant,
   3907                        &newMaximum)) {
   3908    return false;
   3909  }
   3910 
   3911  dominating->setMinimum(newMinimum);
   3912  dominating->setMaximum(newMaximum);
   3913  dominating->setBailoutKind(BailoutKind::HoistBoundsCheck);
   3914 
   3915  return true;
   3916 }
   3917 
   3918 // Eliminate checks which are redundant given each other or other instructions.
   3919 //
   3920 // A bounds check is considered redundant if it's dominated by another bounds
   3921 // check with the same length and the indexes differ by only a constant amount.
   3922 // In this case we eliminate the redundant bounds check and update the other one
   3923 // to cover the ranges of both checks.
   3924 //
   3925 // Bounds checks are added to a hash map and since the hash function ignores
   3926 // differences in constant offset, this offers a fast way to find redundant
   3927 // checks.
   3928 bool jit::EliminateRedundantChecks(MIRGraph& graph) {
   3929  BoundsCheckMap checks(graph.alloc());
   3930 
   3931  // Stack for pre-order CFG traversal.
   3932  Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
   3933 
   3934  // The index of the current block in the CFG traversal.
   3935  size_t index = 0;
   3936 
   3937  // Add all self-dominating blocks to the worklist.
   3938  // This includes all roots. Order does not matter.
   3939  for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
   3940    MBasicBlock* block = *i;
   3941    if (block->immediateDominator() == block) {
   3942      if (!worklist.append(block)) {
   3943        return false;
   3944      }
   3945    }
   3946  }
   3947 
   3948  // Starting from each self-dominating block, traverse the CFG in pre-order.
   3949  while (!worklist.empty()) {
   3950    MBasicBlock* block = worklist.popCopy();
   3951 
   3952    // Add all immediate dominators to the front of the worklist.
   3953    if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
   3954                         block->immediatelyDominatedBlocksEnd())) {
   3955      return false;
   3956    }
   3957 
   3958    for (MDefinitionIterator iter(block); iter;) {
   3959      MDefinition* def = *iter++;
   3960 
   3961      if (!def->isBoundsCheck()) {
   3962        continue;
   3963      }
   3964      auto* boundsCheck = def->toBoundsCheck();
   3965 
   3966      bool eliminated = false;
   3967      if (!TryEliminateBoundsCheck(checks, index, boundsCheck, &eliminated)) {
   3968        return false;
   3969      }
   3970 
   3971      if (eliminated) {
   3972        block->discard(boundsCheck);
   3973      }
   3974    }
   3975    index++;
   3976  }
   3977 
   3978  MOZ_ASSERT(index == graph.numBlocks());
   3979 
   3980  return true;
   3981 }
   3982 
   3983 static bool ShapeGuardIsRedundant(MGuardShape* guard,
   3984                                  const MDefinition* storeObject,
   3985                                  const Shape* storeShape) {
   3986  const MDefinition* guardObject = guard->object()->skipObjectGuards();
   3987  if (guardObject != storeObject) {
   3988    JitSpew(JitSpew_RedundantShapeGuards, "SKIP: different objects (%d vs %d)",
   3989            guardObject->id(), storeObject->id());
   3990    return false;
   3991  }
   3992 
   3993  const Shape* guardShape = guard->shape();
   3994  if (guardShape != storeShape) {
   3995    JitSpew(JitSpew_RedundantShapeGuards, "SKIP: different shapes");
   3996    return false;
   3997  }
   3998 
   3999  return true;
   4000 }
   4001 
   4002 // Eliminate shape guards which are redundant given other instructions.
   4003 //
   4004 // A shape guard is redundant if we can prove that the object being
   4005 // guarded already has the correct shape. The conditions for doing so
   4006 // are as follows:
   4007 //
   4008 // 1. We can see the most recent change to the shape of this object.
   4009 //    (This can be an AddAndStoreSlot, an AllocateAndStoreSlot, or the
   4010 //    creation of the object itself.
   4011 // 2. That mutation dominates the shape guard.
   4012 // 3. The shape that was assigned at that point matches the shape
   4013 //    we expect.
   4014 //
   4015 // If all of these conditions hold, then we can remove the shape guard.
   4016 // In debug, we replace it with an AssertShape to help verify correctness.
   4017 bool jit::EliminateRedundantShapeGuards(MIRGraph& graph) {
   4018  JitSpew(JitSpew_RedundantShapeGuards, "Begin");
   4019 
   4020  for (ReversePostorderIterator block = graph.rpoBegin();
   4021       block != graph.rpoEnd(); block++) {
   4022    for (MInstructionIterator insIter(block->begin());
   4023         insIter != block->end();) {
   4024      MInstruction* ins = *insIter;
   4025      insIter++;
   4026 
   4027      // Skip instructions that aren't shape guards.
   4028      if (!ins->isGuardShape()) {
   4029        continue;
   4030      }
   4031      MGuardShape* guard = ins->toGuardShape();
   4032      MDefinition* lastStore = guard->dependency();
   4033 
   4034      JitSpew(JitSpew_RedundantShapeGuards, "Visit shape guard %d",
   4035              guard->id());
   4036      JitSpewIndent spewIndent(JitSpew_RedundantShapeGuards);
   4037 
   4038      if (lastStore->isDiscarded() || lastStore->block()->isDead() ||
   4039          !lastStore->block()->dominates(guard->block())) {
   4040        JitSpew(JitSpew_RedundantShapeGuards,
   4041                "SKIP: ins %d does not dominate block %d", lastStore->id(),
   4042                guard->block()->id());
   4043        continue;
   4044      }
   4045 
   4046      if (lastStore->isAddAndStoreSlot()) {
   4047        auto* add = lastStore->toAddAndStoreSlot();
   4048        auto* addObject = add->object()->skipObjectGuards();
   4049        if (!ShapeGuardIsRedundant(guard, addObject, add->shape())) {
   4050          continue;
   4051        }
   4052      } else if (lastStore->isAllocateAndStoreSlot()) {
   4053        auto* allocate = lastStore->toAllocateAndStoreSlot();
   4054        auto* allocateObject = allocate->object()->skipObjectGuards();
   4055        if (!ShapeGuardIsRedundant(guard, allocateObject, allocate->shape())) {
   4056          continue;
   4057        }
   4058      } else if (lastStore->isStart()) {
   4059        // The guard doesn't depend on any other instruction that is modifying
   4060        // the object operand, so we check the object operand directly.
   4061        auto* obj = guard->object()->skipObjectGuards();
   4062 
   4063        const Shape* initialShape = nullptr;
   4064        if (obj->isNewObject()) {
   4065          auto* templateObject = obj->toNewObject()->templateObject();
   4066          if (!templateObject) {
   4067            JitSpew(JitSpew_RedundantShapeGuards, "SKIP: no template");
   4068            continue;
   4069          }
   4070          initialShape = templateObject->shape();
   4071        } else if (obj->isNewPlainObject()) {
   4072          initialShape = obj->toNewPlainObject()->shape();
   4073        } else {
   4074          JitSpew(JitSpew_RedundantShapeGuards,
   4075                  "SKIP: not NewObject or NewPlainObject (%d)", obj->id());
   4076          continue;
   4077        }
   4078        if (initialShape != guard->shape()) {
   4079          JitSpew(JitSpew_RedundantShapeGuards, "SKIP: shapes don't match");
   4080          continue;
   4081        }
   4082      } else {
   4083        JitSpew(JitSpew_RedundantShapeGuards,
   4084                "SKIP: Last store not supported (%d)", lastStore->id());
   4085        continue;
   4086      }
   4087 
   4088 #ifdef DEBUG
   4089      if (!graph.alloc().ensureBallast()) {
   4090        return false;
   4091      }
   4092      auto* assert = MAssertShape::New(graph.alloc(), guard->object(),
   4093                                       const_cast<Shape*>(guard->shape()));
   4094      guard->block()->insertBefore(guard, assert);
   4095 #endif
   4096 
   4097      JitSpew(JitSpew_RedundantShapeGuards, "SUCCESS: Removing shape guard %d",
   4098              guard->id());
   4099      guard->replaceAllUsesWith(guard->input());
   4100      guard->block()->discard(guard);
   4101    }
   4102  }
   4103 
   4104  return true;
   4105 }
   4106 
   4107 [[nodiscard]] static bool TryEliminateGCBarriersForAllocation(
   4108    TempAllocator& alloc, MInstruction* allocation) {
   4109  MOZ_ASSERT(allocation->type() == MIRType::Object);
   4110 
   4111  JitSpew(JitSpew_RedundantGCBarriers, "Analyzing allocation %s",
   4112          allocation->opName());
   4113 
   4114  MBasicBlock* block = allocation->block();
   4115  MInstructionIterator insIter(block->begin(allocation));
   4116 
   4117  // Skip `allocation`.
   4118  MOZ_ASSERT(*insIter == allocation);
   4119  insIter++;
   4120 
   4121  // Try to optimize the other instructions in the block.
   4122  while (insIter != block->end()) {
   4123    MInstruction* ins = *insIter;
   4124    insIter++;
   4125    switch (ins->op()) {
   4126      case MDefinition::Opcode::Constant:
   4127      case MDefinition::Opcode::Box:
   4128      case MDefinition::Opcode::Unbox:
   4129      case MDefinition::Opcode::AssertCanElidePostWriteBarrier:
   4130        // These instructions can't trigger GC or affect this analysis in other
   4131        // ways.
   4132        break;
   4133      case MDefinition::Opcode::StoreFixedSlot: {
   4134        auto* store = ins->toStoreFixedSlot();
   4135        if (store->object() != allocation) {
   4136          JitSpew(JitSpew_RedundantGCBarriers,
   4137                  "Stopped at StoreFixedSlot for other object");
   4138          return true;
   4139        }
   4140        store->setNeedsBarrier(false);
   4141        JitSpew(JitSpew_RedundantGCBarriers, "Elided StoreFixedSlot barrier");
   4142        break;
   4143      }
   4144      case MDefinition::Opcode::PostWriteBarrier: {
   4145        auto* barrier = ins->toPostWriteBarrier();
   4146        if (barrier->object() != allocation) {
   4147          JitSpew(JitSpew_RedundantGCBarriers,
   4148                  "Stopped at PostWriteBarrier for other object");
   4149          return true;
   4150        }
   4151 #ifdef DEBUG
   4152        if (!alloc.ensureBallast()) {
   4153          return false;
   4154        }
   4155        MDefinition* value = barrier->value();
   4156        if (value->type() != MIRType::Value) {
   4157          value = MBox::New(alloc, value);
   4158          block->insertBefore(barrier, value->toInstruction());
   4159        }
   4160        auto* assert =
   4161            MAssertCanElidePostWriteBarrier::New(alloc, allocation, value);
   4162        block->insertBefore(barrier, assert);
   4163 #endif
   4164        block->discard(barrier);
   4165        JitSpew(JitSpew_RedundantGCBarriers, "Elided PostWriteBarrier");
   4166        break;
   4167      }
   4168      default:
   4169        JitSpew(JitSpew_RedundantGCBarriers,
   4170                "Stopped at unsupported instruction %s", ins->opName());
   4171        return true;
   4172    }
   4173  }
   4174 
   4175  return true;
   4176 }
   4177 
   4178 bool jit::EliminateRedundantGCBarriers(MIRGraph& graph) {
   4179  // Peephole optimization for the following pattern:
   4180  //
   4181  //   0: MNewCallObject
   4182  //   1: MStoreFixedSlot(0, ...)
   4183  //   2: MStoreFixedSlot(0, ...)
   4184  //   3: MPostWriteBarrier(0, ...)
   4185  //
   4186  // If the instructions immediately following the allocation instruction can't
   4187  // trigger GC and we are storing to the new object's slots, we can elide the
   4188  // pre-barrier.
   4189  //
   4190  // We also eliminate the post barrier and (in debug builds) replace it with an
   4191  // assertion.
   4192  //
   4193  // See also the similar optimizations in WarpBuilder::buildCallObject.
   4194 
   4195  JitSpew(JitSpew_RedundantGCBarriers, "Begin");
   4196 
   4197  for (ReversePostorderIterator block = graph.rpoBegin();
   4198       block != graph.rpoEnd(); block++) {
   4199    for (MInstructionIterator insIter(block->begin()); insIter != block->end();
   4200         insIter++) {
   4201      MInstruction* ins = *insIter;
   4202      if (ins->isNewCallObject()) {
   4203        MNewCallObject* allocation = ins->toNewCallObject();
   4204        // We can only eliminate the post barrier if we know the call object
   4205        // will be allocated in the nursery.
   4206        if (allocation->initialHeap() == gc::Heap::Default) {
   4207          if (!TryEliminateGCBarriersForAllocation(graph.alloc(), allocation)) {
   4208            return false;
   4209          }
   4210        }
   4211      }
   4212    }
   4213  }
   4214 
   4215  return true;
   4216 }
   4217 
   4218 bool jit::MarkLoadsUsedAsPropertyKeys(MIRGraph& graph) {
   4219  // When a string is used as a property key, or as the key for a Map or Set, we
   4220  // require it to be atomized. To avoid repeatedly atomizing the same string,
   4221  // this analysis looks for cases where we are loading a value from the slot of
   4222  // an object (which includes access to global variables and global lexicals)
   4223  // and using it as a property key, and marks those loads. During codegen,
   4224  // marked loads will check whether the value loaded is a non-atomized string.
   4225  // If it is, we will atomize the string and update the stored value, ensuring
   4226  // that future loads from the same slot will not have to atomize again.
   4227  JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys, "Begin");
   4228 
   4229  for (ReversePostorderIterator block = graph.rpoBegin();
   4230       block != graph.rpoEnd(); block++) {
   4231    for (MInstructionIterator insIter(block->begin());
   4232         insIter != block->end();) {
   4233      MInstruction* ins = *insIter;
   4234      insIter++;
   4235 
   4236      MDefinition* idVal = nullptr;
   4237      if (ins->isGetPropertyCache()) {
   4238        idVal = ins->toGetPropertyCache()->idval();
   4239      } else if (ins->isHasOwnCache()) {
   4240        idVal = ins->toHasOwnCache()->idval();
   4241      } else if (ins->isSetPropertyCache()) {
   4242        idVal = ins->toSetPropertyCache()->idval();
   4243      } else if (ins->isGetPropSuperCache()) {
   4244        idVal = ins->toGetPropSuperCache()->idval();
   4245      } else if (ins->isMegamorphicLoadSlotByValue()) {
   4246        idVal = ins->toMegamorphicLoadSlotByValue()->idVal();
   4247      } else if (ins->isMegamorphicLoadSlotByValuePermissive()) {
   4248        idVal = ins->toMegamorphicLoadSlotByValuePermissive()->idVal();
   4249      } else if (ins->isMegamorphicHasProp()) {
   4250        idVal = ins->toMegamorphicHasProp()->idVal();
   4251      } else if (ins->isMegamorphicSetElement()) {
   4252        idVal = ins->toMegamorphicSetElement()->index();
   4253      } else if (ins->isProxyGetByValue()) {
   4254        idVal = ins->toProxyGetByValue()->idVal();
   4255      } else if (ins->isProxyHasProp()) {
   4256        idVal = ins->toProxyHasProp()->idVal();
   4257      } else if (ins->isProxySetByValue()) {
   4258        idVal = ins->toProxySetByValue()->idVal();
   4259      } else if (ins->isIdToStringOrSymbol()) {
   4260        idVal = ins->toIdToStringOrSymbol()->idVal();
   4261      } else if (ins->isGuardSpecificAtom()) {
   4262        idVal = ins->toGuardSpecificAtom()->input();
   4263      } else if (ins->isToHashableString()) {
   4264        idVal = ins->toToHashableString()->input();
   4265      } else if (ins->isToHashableValue()) {
   4266        idVal = ins->toToHashableValue()->input();
   4267      } else if (ins->isMapObjectHasValueVMCall()) {
   4268        idVal = ins->toMapObjectHasValueVMCall()->value();
   4269      } else if (ins->isMapObjectGetValueVMCall()) {
   4270        idVal = ins->toMapObjectGetValueVMCall()->value();
   4271      } else if (ins->isSetObjectHasValueVMCall()) {
   4272        idVal = ins->toSetObjectHasValueVMCall()->value();
   4273      } else {
   4274        continue;
   4275      }
   4276      JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
   4277              "Analyzing property access %s%d with idVal %s%d", ins->opName(),
   4278              ins->id(), idVal->opName(), idVal->id());
   4279 
   4280      // Skip intermediate nodes.
   4281      do {
   4282        if (idVal->isLexicalCheck()) {
   4283          idVal = idVal->toLexicalCheck()->input();
   4284          JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
   4285                  "- Skipping lexical check. idVal is now %s%d",
   4286                  idVal->opName(), idVal->id());
   4287          continue;
   4288        }
   4289        if (idVal->isUnbox() && idVal->type() == MIRType::String) {
   4290          idVal = idVal->toUnbox()->input();
   4291          JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
   4292                  "- Skipping unbox. idVal is now %s%d", idVal->opName(),
   4293                  idVal->id());
   4294          continue;
   4295        }
   4296        break;
   4297      } while (true);
   4298 
   4299      if (idVal->isLoadFixedSlot()) {
   4300        JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
   4301                "- SUCCESS: Marking fixed slot");
   4302        idVal->toLoadFixedSlot()->setUsedAsPropertyKey();
   4303      } else if (idVal->isLoadDynamicSlot()) {
   4304        JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
   4305                "- SUCCESS: Marking dynamic slot");
   4306        idVal->toLoadDynamicSlot()->setUsedAsPropertyKey();
   4307      } else {
   4308        JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys, "- SKIP: %s not supported",
   4309                idVal->opName());
   4310      }
   4311    }
   4312  }
   4313 
   4314  return true;
   4315 }
   4316 
   4317 // Updates the wasm ref type of a node and verifies that in this pass we only
   4318 // narrow types, and never widen.
   4319 static bool UpdateWasmRefType(MDefinition* def) {
   4320  wasm::MaybeRefType newRefType = def->computeWasmRefType();
   4321  bool changed = newRefType != def->wasmRefType();
   4322 
   4323  // Ensure that we do not regress from Some to Nothing.
   4324  MOZ_ASSERT(!(def->wasmRefType().isSome() && newRefType.isNothing()));
   4325  // Ensure that the new ref type is a subtype of the previous one (i.e. we
   4326  // only narrow ref types).
   4327  MOZ_ASSERT_IF(def->wasmRefType().isSome(),
   4328                wasm::RefType::isSubTypeOf(newRefType.value(),
   4329                                           def->wasmRefType().value()));
   4330 
   4331  def->setWasmRefType(newRefType);
   4332  return changed;
   4333 }
   4334 
   4335 // Since wasm has a fairly rich type system enforced in validation, we can use
   4336 // this type system within MIR to robustly track the types of ref values. This
   4337 // allows us to make MIR-level optimizations such as eliding null checks or
   4338 // omitting redundant casts.
   4339 //
   4340 // This analysis pass performs simple data flow analysis by assigning ref types
   4341 // to each definition, then revisiting phis and their uses as necessary until
   4342 // the types have narrowed to a fixed point.
   4343 bool jit::TrackWasmRefTypes(MIRGraph& graph) {
   4344  // The worklist tracks nodes whose types have changed and whose uses must
   4345  // therefore be re-evaluated.
   4346  Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
   4347 
   4348  // Assign an initial ref type to each definition. Reverse postorder ensures
   4349  // that nodes are always visited before their uses, with the exception of loop
   4350  // backedge phis.
   4351  for (ReversePostorderIterator blockIter = graph.rpoBegin();
   4352       blockIter != graph.rpoEnd(); blockIter++) {
   4353    MBasicBlock* block = *blockIter;
   4354    for (MDefinitionIterator def(block); def; def++) {
   4355      // Set the initial type on all nodes. If a type is produced, then any
   4356      // loop backedge phis that use this node must have been previously
   4357      // visited, and must be updated and possibly added to the worklist. (Any
   4358      // other uses of this node will be visited later in this first pass.)
   4359 
   4360      if (def->type() != MIRType::WasmAnyRef) {
   4361        continue;
   4362      }
   4363 
   4364      bool hasType = UpdateWasmRefType(*def);
   4365      if (hasType) {
   4366        for (MUseIterator use(def->usesBegin()); use != def->usesEnd(); use++) {
   4367          MNode* consumer = use->consumer();
   4368          if (!consumer->isDefinition() || !consumer->toDefinition()->isPhi()) {
   4369            continue;
   4370          }
   4371          MPhi* phi = consumer->toDefinition()->toPhi();
   4372          if (phi->block()->isLoopHeader() &&
   4373              *def == phi->getLoopBackedgeOperand()) {
   4374            bool changed = UpdateWasmRefType(phi);
   4375            if (changed && !worklist.append(phi)) {
   4376              return false;
   4377            }
   4378          } else {
   4379            // Any other type of use must not have a ref type yet, because we
   4380            // are yet to hit it in this forward pass.
   4381            MOZ_ASSERT(consumer->toDefinition()->wasmRefType().isNothing());
   4382          }
   4383        }
   4384      }
   4385    }
   4386  }
   4387 
   4388  // Until the worklist is empty, update the uses of any worklist nodes and
   4389  // track the ones whose types change.
   4390  while (!worklist.empty()) {
   4391    MDefinition* def = worklist.popCopy();
   4392 
   4393    for (MUseIterator use(def->usesBegin()); use != def->usesEnd(); use++) {
   4394      if (!use->consumer()->isDefinition()) {
   4395        continue;
   4396      }
   4397      bool changed = UpdateWasmRefType(use->consumer()->toDefinition());
   4398      if (changed && !worklist.append(use->consumer()->toDefinition())) {
   4399        return false;
   4400      }
   4401    }
   4402  }
   4403 
   4404  return true;
   4405 }
   4406 
   4407 static bool NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use) {
   4408  MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
   4409             slotsOrElements->type() == MIRType::Slots);
   4410 
   4411  if (slotsOrElements->block() != use->block()) {
   4412    return true;
   4413  }
   4414 
   4415  MBasicBlock* block = use->block();
   4416  MInstructionIterator iter(block->begin(slotsOrElements));
   4417  MOZ_ASSERT(*iter == slotsOrElements);
   4418  ++iter;
   4419 
   4420  while (true) {
   4421    MInstruction* ins = *iter;
   4422    switch (ins->op()) {
   4423      case MDefinition::Opcode::Nop:
   4424      case MDefinition::Opcode::Constant:
   4425      case MDefinition::Opcode::KeepAliveObject:
   4426      case MDefinition::Opcode::Unbox:
   4427      case MDefinition::Opcode::LoadDynamicSlot:
   4428      case MDefinition::Opcode::LoadDynamicSlotAndUnbox:
   4429      case MDefinition::Opcode::StoreDynamicSlot:
   4430      case MDefinition::Opcode::LoadFixedSlot:
   4431      case MDefinition::Opcode::LoadFixedSlotAndUnbox:
   4432      case MDefinition::Opcode::StoreFixedSlot:
   4433      case MDefinition::Opcode::LoadElement:
   4434      case MDefinition::Opcode::LoadElementAndUnbox:
   4435      case MDefinition::Opcode::LoadElementHole:
   4436      case MDefinition::Opcode::StoreElement:
   4437      case MDefinition::Opcode::StoreHoleValueElement:
   4438      case MDefinition::Opcode::LoadUnboxedScalar:
   4439      case MDefinition::Opcode::StoreUnboxedScalar:
   4440      case MDefinition::Opcode::StoreTypedArrayElementHole:
   4441      case MDefinition::Opcode::LoadDataViewElement:
   4442      case MDefinition::Opcode::StoreDataViewElement:
   4443      case MDefinition::Opcode::AtomicTypedArrayElementBinop:
   4444      case MDefinition::Opcode::AtomicExchangeTypedArrayElement:
   4445      case MDefinition::Opcode::CompareExchangeTypedArrayElement:
   4446      case MDefinition::Opcode::InitializedLength:
   4447      case MDefinition::Opcode::SetInitializedLength:
   4448      case MDefinition::Opcode::ArrayLength:
   4449      case MDefinition::Opcode::BoundsCheck:
   4450      case MDefinition::Opcode::GuardElementNotHole:
   4451      case MDefinition::Opcode::GuardElementsArePacked:
   4452      case MDefinition::Opcode::InArray:
   4453      case MDefinition::Opcode::SpectreMaskIndex:
   4454      case MDefinition::Opcode::DebugEnterGCUnsafeRegion:
   4455      case MDefinition::Opcode::DebugLeaveGCUnsafeRegion:
   4456        break;
   4457      case MDefinition::Opcode::LoadTypedArrayElementHole: {
   4458        // Allocating a BigInt can GC, so we have to keep the object alive.
   4459        auto* loadIns = ins->toLoadTypedArrayElementHole();
   4460        if (Scalar::isBigIntType(loadIns->arrayType())) {
   4461          return true;
   4462        }
   4463        break;
   4464      }
   4465      default:
   4466        return true;
   4467    }
   4468 
   4469    if (ins == use) {
   4470      // We didn't find any instructions in range [slotsOrElements, use] that
   4471      // can GC.
   4472      return false;
   4473    }
   4474    iter++;
   4475  }
   4476 
   4477  MOZ_CRASH("Unreachable");
   4478 }
   4479 
   4480 bool jit::AddKeepAliveInstructions(MIRGraph& graph) {
   4481  for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
   4482    MBasicBlock* block = *i;
   4483 
   4484    for (MInstructionIterator insIter(block->begin()); insIter != block->end();
   4485         insIter++) {
   4486      MInstruction* ins = *insIter;
   4487      if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots) {
   4488        continue;
   4489      }
   4490 
   4491      MDefinition* ownerObject;
   4492      switch (ins->op()) {
   4493        case MDefinition::Opcode::Elements:
   4494        case MDefinition::Opcode::ArrayBufferViewElements:
   4495          MOZ_ASSERT(ins->numOperands() == 1);
   4496          ownerObject = ins->getOperand(0);
   4497          break;
   4498        case MDefinition::Opcode::ArrayBufferViewElementsWithOffset:
   4499          MOZ_ASSERT(ins->numOperands() == 2);
   4500          ownerObject = ins->getOperand(0);
   4501          break;
   4502        case MDefinition::Opcode::Slots:
   4503          ownerObject = ins->toSlots()->object();
   4504          break;
   4505        default:
   4506          MOZ_CRASH("Unexpected op");
   4507      }
   4508 
   4509      MOZ_ASSERT(ownerObject->type() == MIRType::Object);
   4510 
   4511      const MDefinition* unwrapped = ownerObject->skipObjectGuards();
   4512      if (unwrapped->isConstant() || unwrapped->isNurseryObject()) {
   4513        // Constants are kept alive by other pointers, for instance ImmGCPtr in
   4514        // JIT code. NurseryObjects will be kept alive by the IonScript.
   4515        continue;
   4516      }
   4517 
   4518      for (MUseDefIterator uses(ins); uses; uses++) {
   4519        MInstruction* use = uses.def()->toInstruction();
   4520 
   4521        if (use->isStoreElementHole()) {
   4522          // StoreElementHole has an explicit object operand. If GVN
   4523          // is disabled, we can get different unbox instructions with
   4524          // the same object as input, so we check for that case.
   4525          MOZ_ASSERT_IF(!use->toStoreElementHole()->object()->isUnbox() &&
   4526                            !ownerObject->isUnbox(),
   4527                        use->toStoreElementHole()->object() == ownerObject);
   4528          continue;
   4529        }
   4530 
   4531        if (!NeedsKeepAlive(ins, use)) {
   4532 #ifdef DEBUG
   4533          if (!graph.alloc().ensureBallast()) {
   4534            return false;
   4535          }
   4536 
   4537          // Enter a GC unsafe region while the elements/slots are on the stack.
   4538          auto* enter = MDebugEnterGCUnsafeRegion::New(graph.alloc());
   4539          use->block()->insertAfter(ins, enter);
   4540 
   4541          // Leave the region after the use.
   4542          auto* leave = MDebugLeaveGCUnsafeRegion::New(graph.alloc());
   4543          use->block()->insertAfter(use, leave);
   4544 #endif
   4545          continue;
   4546        }
   4547 
   4548        if (!graph.alloc().ensureBallast()) {
   4549          return false;
   4550        }
   4551        MKeepAliveObject* keepAlive =
   4552            MKeepAliveObject::New(graph.alloc(), ownerObject);
   4553        use->block()->insertAfter(use, keepAlive);
   4554      }
   4555    }
   4556  }
   4557 
   4558  return true;
   4559 }
   4560 
   4561 bool LinearSum::multiply(int32_t scale) {
   4562  for (size_t i = 0; i < terms_.length(); i++) {
   4563    if (!mozilla::SafeMul(scale, terms_[i].scale, &terms_[i].scale)) {
   4564      return false;
   4565    }
   4566  }
   4567  return mozilla::SafeMul(scale, constant_, &constant_);
   4568 }
   4569 
   4570 bool LinearSum::divide(uint32_t scale) {
   4571  MOZ_ASSERT(scale > 0);
   4572 
   4573  for (size_t i = 0; i < terms_.length(); i++) {
   4574    if (terms_[i].scale % scale != 0) {
   4575      return false;
   4576    }
   4577  }
   4578  if (constant_ % scale != 0) {
   4579    return false;
   4580  }
   4581 
   4582  for (size_t i = 0; i < terms_.length(); i++) {
   4583    terms_[i].scale /= scale;
   4584  }
   4585  constant_ /= scale;
   4586 
   4587  return true;
   4588 }
   4589 
   4590 bool LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */) {
   4591  for (size_t i = 0; i < other.terms_.length(); i++) {
   4592    int32_t newScale = scale;
   4593    if (!mozilla::SafeMul(scale, other.terms_[i].scale, &newScale)) {
   4594      return false;
   4595    }
   4596    if (!add(other.terms_[i].term, newScale)) {
   4597      return false;
   4598    }
   4599  }
   4600  int32_t newConstant = scale;
   4601  if (!mozilla::SafeMul(scale, other.constant_, &newConstant)) {
   4602    return false;
   4603  }
   4604  return add(newConstant);
   4605 }
   4606 
   4607 bool LinearSum::add(SimpleLinearSum other, int32_t scale) {
   4608  if (other.term && !add(other.term, scale)) {
   4609    return false;
   4610  }
   4611 
   4612  int32_t constant;
   4613  if (!mozilla::SafeMul(other.constant, scale, &constant)) {
   4614    return false;
   4615  }
   4616 
   4617  return add(constant);
   4618 }
   4619 
   4620 bool LinearSum::add(MDefinition* term, int32_t scale) {
   4621  MOZ_ASSERT(term);
   4622 
   4623  if (scale == 0) {
   4624    return true;
   4625  }
   4626 
   4627  if (MConstant* termConst = term->maybeConstantValue()) {
   4628    int32_t constant = termConst->toInt32();
   4629    if (!mozilla::SafeMul(constant, scale, &constant)) {
   4630      return false;
   4631    }
   4632    return add(constant);
   4633  }
   4634 
   4635  for (size_t i = 0; i < terms_.length(); i++) {
   4636    if (term == terms_[i].term) {
   4637      if (!mozilla::SafeAdd(scale, terms_[i].scale, &terms_[i].scale)) {
   4638        return false;
   4639      }
   4640      if (terms_[i].scale == 0) {
   4641        terms_[i] = terms_.back();
   4642        terms_.popBack();
   4643      }
   4644      return true;
   4645    }
   4646  }
   4647 
   4648  AutoEnterOOMUnsafeRegion oomUnsafe;
   4649  if (!terms_.append(LinearTerm(term, scale))) {
   4650    oomUnsafe.crash("LinearSum::add");
   4651  }
   4652 
   4653  return true;
   4654 }
   4655 
   4656 bool LinearSum::add(int32_t constant) {
   4657  return mozilla::SafeAdd(constant, constant_, &constant_);
   4658 }
   4659 
   4660 void LinearSum::dump(GenericPrinter& out) const {
   4661  for (size_t i = 0; i < terms_.length(); i++) {
   4662    int32_t scale = terms_[i].scale;
   4663    int32_t id = terms_[i].term->id();
   4664    MOZ_ASSERT(scale);
   4665    if (scale > 0) {
   4666      if (i) {
   4667        out.printf("+");
   4668      }
   4669      if (scale == 1) {
   4670        out.printf("#%d", id);
   4671      } else {
   4672        out.printf("%d*#%d", scale, id);
   4673      }
   4674    } else if (scale == -1) {
   4675      out.printf("-#%d", id);
   4676    } else {
   4677      out.printf("%d*#%d", scale, id);
   4678    }
   4679  }
   4680  if (constant_ > 0) {
   4681    out.printf("+%d", constant_);
   4682  } else if (constant_ < 0) {
   4683    out.printf("%d", constant_);
   4684  }
   4685 }
   4686 
   4687 void LinearSum::dump() const {
   4688  Fprinter out(stderr);
   4689  dump(out);
   4690  out.finish();
   4691 }
   4692 
   4693 MDefinition* jit::ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block,
   4694                                   const LinearSum& sum,
   4695                                   BailoutKind bailoutKind) {
   4696  MDefinition* def = nullptr;
   4697 
   4698  for (size_t i = 0; i < sum.numTerms(); i++) {
   4699    LinearTerm term = sum.term(i);
   4700    MOZ_ASSERT(!term.term->isConstant());
   4701    if (term.scale == 1) {
   4702      if (def) {
   4703        def = MAdd::New(alloc, def, term.term, MIRType::Int32);
   4704        def->setBailoutKind(bailoutKind);
   4705        block->insertAtEnd(def->toInstruction());
   4706        def->computeRange(alloc);
   4707      } else {
   4708        def = term.term;
   4709      }
   4710    } else if (term.scale == -1) {
   4711      if (!def) {
   4712        def = MConstant::NewInt32(alloc, 0);
   4713        block->insertAtEnd(def->toInstruction());
   4714        def->computeRange(alloc);
   4715      }
   4716      def = MSub::New(alloc, def, term.term, MIRType::Int32);
   4717      def->setBailoutKind(bailoutKind);
   4718      block->insertAtEnd(def->toInstruction());
   4719      def->computeRange(alloc);
   4720    } else {
   4721      MOZ_ASSERT(term.scale != 0);
   4722      MConstant* factor = MConstant::NewInt32(alloc, term.scale);
   4723      block->insertAtEnd(factor);
   4724      MMul* mul = MMul::New(alloc, term.term, factor, MIRType::Int32);
   4725      mul->setBailoutKind(bailoutKind);
   4726      block->insertAtEnd(mul);
   4727      mul->computeRange(alloc);
   4728      if (def) {
   4729        def = MAdd::New(alloc, def, mul, MIRType::Int32);
   4730        def->setBailoutKind(bailoutKind);
   4731        block->insertAtEnd(def->toInstruction());
   4732        def->computeRange(alloc);
   4733      } else {
   4734        def = mul;
   4735      }
   4736    }
   4737  }
   4738 
   4739  if (!def) {
   4740    def = MConstant::NewInt32(alloc, 0);
   4741    block->insertAtEnd(def->toInstruction());
   4742    def->computeRange(alloc);
   4743  }
   4744 
   4745  return def;
   4746 }
   4747 
   4748 // Mark all the blocks that are in the loop with the given header.
   4749 // Returns the number of blocks marked. Set *canOsr to true if the loop is
   4750 // reachable from both the normal entry and the OSR entry.
   4751 size_t jit::MarkLoopBlocks(MIRGraph& graph, const MBasicBlock* header,
   4752                           bool* canOsr) {
   4753 #ifdef DEBUG
   4754  for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
   4755       i != e; ++i) {
   4756    MOZ_ASSERT(!i->isMarked(), "Some blocks already marked");
   4757  }
   4758 #endif
   4759 
   4760  MBasicBlock* osrBlock = graph.osrBlock();
   4761  *canOsr = false;
   4762 
   4763  // The blocks are in RPO; start at the loop backedge, which marks the bottom
   4764  // of the loop, and walk up until we get to the header. Loops may be
   4765  // discontiguous, so we trace predecessors to determine which blocks are
   4766  // actually part of the loop. The backedge is always part of the loop, and
   4767  // so are its predecessors, transitively, up to the loop header or an OSR
   4768  // entry.
   4769  MBasicBlock* backedge = header->backedge();
   4770  backedge->mark();
   4771  size_t numMarked = 1;
   4772  for (PostorderIterator i = graph.poBegin(backedge);; ++i) {
   4773    MOZ_ASSERT(
   4774        i != graph.poEnd(),
   4775        "Reached the end of the graph while searching for the loop header");
   4776    MBasicBlock* block = *i;
   4777    // If we've reached the loop header, we're done.
   4778    if (block == header) {
   4779      break;
   4780    }
   4781    // A block not marked by the time we reach it is not in the loop.
   4782    if (!block->isMarked()) {
   4783      continue;
   4784    }
   4785 
   4786    // This block is in the loop; trace to its predecessors.
   4787    for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
   4788      MBasicBlock* pred = block->getPredecessor(p);
   4789      if (pred->isMarked()) {
   4790        continue;
   4791      }
   4792 
   4793      // Blocks dominated by the OSR entry are not part of the loop
   4794      // (unless they aren't reachable from the normal entry).
   4795      if (osrBlock && pred != header && osrBlock->dominates(pred) &&
   4796          !osrBlock->dominates(header)) {
   4797        *canOsr = true;
   4798        continue;
   4799      }
   4800 
   4801      MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
   4802                 "Loop block not between loop header and loop backedge");
   4803 
   4804      pred->mark();
   4805      ++numMarked;
   4806 
   4807      // A nested loop may not exit back to the enclosing loop at its
   4808      // bottom. If we just marked its header, then the whole nested loop
   4809      // is part of the enclosing loop.
   4810      if (pred->isLoopHeader()) {
   4811        MBasicBlock* innerBackedge = pred->backedge();
   4812        if (!innerBackedge->isMarked()) {
   4813          // Mark its backedge so that we add all of its blocks to the
   4814          // outer loop as we walk upwards.
   4815          innerBackedge->mark();
   4816          ++numMarked;
   4817 
   4818          // If the nested loop is not contiguous, we may have already
   4819          // passed its backedge. If this happens, back up.
   4820          if (innerBackedge->id() > block->id()) {
   4821            i = graph.poBegin(innerBackedge);
   4822            --i;
   4823          }
   4824        }
   4825      }
   4826    }
   4827  }
   4828 
   4829  // If there's no path connecting the header to the backedge, then this isn't
   4830  // actually a loop. This can happen when the code starts with a loop but GVN
   4831  // folds some branches away.
   4832  if (!header->isMarked()) {
   4833    jit::UnmarkLoopBlocks(graph, header);
   4834    return 0;
   4835  }
   4836 
   4837  return numMarked;
   4838 }
   4839 
   4840 // Unmark all the blocks that are in the loop with the given header.
   4841 void jit::UnmarkLoopBlocks(MIRGraph& graph, const MBasicBlock* header) {
   4842  MBasicBlock* backedge = header->backedge();
   4843  for (ReversePostorderIterator i = graph.rpoBegin(header);; ++i) {
   4844    MOZ_ASSERT(i != graph.rpoEnd(),
   4845               "Reached the end of the graph while searching for the backedge");
   4846    MBasicBlock* block = *i;
   4847    if (block->isMarked()) {
   4848      block->unmark();
   4849      if (block == backedge) {
   4850        break;
   4851      }
   4852    }
   4853  }
   4854 
   4855 #ifdef DEBUG
   4856  for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
   4857       i != e; ++i) {
   4858    MOZ_ASSERT(!i->isMarked(), "Not all blocks got unmarked");
   4859  }
   4860 #endif
   4861 }
   4862 
   4863 bool jit::FoldLoadsWithUnbox(const MIRGenerator* mir, MIRGraph& graph) {
   4864  // This pass folds MLoadFixedSlot, MLoadDynamicSlot, MLoadElement instructions
   4865  // followed by MUnbox into a single instruction. For LoadElement this allows
   4866  // us to fuse the hole check with the type check for the unbox. It may also
   4867  // allow us to remove some GuardElementsArePacked nodes.
   4868 
   4869  Vector<MInstruction*, 16, SystemAllocPolicy> optimizedElements;
   4870  for (MBasicBlockIterator block(graph.begin()); block != graph.end();
   4871       block++) {
   4872    if (mir->shouldCancel("FoldLoadsWithUnbox")) {
   4873      return false;
   4874    }
   4875 
   4876    for (MInstructionIterator insIter(block->begin());
   4877         insIter != block->end();) {
   4878      MInstruction* ins = *insIter;
   4879      insIter++;
   4880 
   4881      // We're only interested in loads producing a Value.
   4882      if (!ins->isLoadFixedSlot() && !ins->isLoadDynamicSlot() &&
   4883          !ins->isLoadElement() && !ins->isSuperFunction()) {
   4884        continue;
   4885      }
   4886      if (ins->type() != MIRType::Value) {
   4887        continue;
   4888      }
   4889 
   4890      MInstruction* load = ins;
   4891 
   4892      // Ensure there's a single def-use (ignoring resume points) and it's an
   4893      // unbox. Unwrap MLexicalCheck because it's redundant if we have a
   4894      // fallible unbox (checked below).
   4895      MDefinition* defUse = load->maybeSingleDefUse();
   4896      if (!defUse) {
   4897        continue;
   4898      }
   4899      MLexicalCheck* lexicalCheck = nullptr;
   4900      if (defUse->isLexicalCheck()) {
   4901        lexicalCheck = defUse->toLexicalCheck();
   4902        defUse = lexicalCheck->maybeSingleDefUse();
   4903        if (!defUse) {
   4904          continue;
   4905        }
   4906      }
   4907      if (!defUse->isUnbox()) {
   4908        continue;
   4909      }
   4910 
   4911      // For now require the load and unbox to be in the same block. This isn't
   4912      // strictly necessary but it's the common case and could prevent bailouts
   4913      // when moving the unbox before a loop.
   4914      MUnbox* unbox = defUse->toUnbox();
   4915      if (unbox->block() != *block) {
   4916        continue;
   4917      }
   4918      MOZ_ASSERT_IF(lexicalCheck, lexicalCheck->block() == *block);
   4919 
   4920      MOZ_ASSERT(!IsMagicType(unbox->type()));
   4921 
   4922      // If this is a LoadElement or if we have a lexical check between the load
   4923      // and unbox, we only support folding the load with a fallible unbox so
   4924      // that we can eliminate the MagicValue check.
   4925      if ((load->isLoadElement() || lexicalCheck) && !unbox->fallible()) {
   4926        continue;
   4927      }
   4928 
   4929      // If this is a SuperFunction, we only support folding the load when the
   4930      // unbox is fallible and its type is Object.
   4931      //
   4932      // SuperFunction is currently only used for `super()` constructor calls
   4933      // in classes, which always use fallible unbox to Object.
   4934      if (load->isSuperFunction() &&
   4935          !(unbox->type() == MIRType::Object && unbox->fallible())) {
   4936        continue;
   4937      }
   4938 
   4939      // Combine the load and unbox into a single MIR instruction.
   4940      if (!graph.alloc().ensureBallast()) {
   4941        return false;
   4942      }
   4943 
   4944      MIRType type = unbox->type();
   4945      MUnbox::Mode mode = unbox->mode();
   4946 
   4947      MInstruction* replacement;
   4948      switch (load->op()) {
   4949        case MDefinition::Opcode::LoadFixedSlot: {
   4950          auto* loadIns = load->toLoadFixedSlot();
   4951          replacement = MLoadFixedSlotAndUnbox::New(
   4952              graph.alloc(), loadIns->object(), loadIns->slot(), mode, type,
   4953              loadIns->usedAsPropertyKey());
   4954          break;
   4955        }
   4956        case MDefinition::Opcode::LoadDynamicSlot: {
   4957          auto* loadIns = load->toLoadDynamicSlot();
   4958          replacement = MLoadDynamicSlotAndUnbox::New(
   4959              graph.alloc(), loadIns->slots(), loadIns->slot(), mode, type,
   4960              loadIns->usedAsPropertyKey());
   4961          break;
   4962        }
   4963        case MDefinition::Opcode::LoadElement: {
   4964          auto* loadIns = load->toLoadElement();
   4965          MOZ_ASSERT(unbox->fallible());
   4966          replacement = MLoadElementAndUnbox::New(
   4967              graph.alloc(), loadIns->elements(), loadIns->index(), mode, type);
   4968          MOZ_ASSERT(!IsMagicType(type));
   4969          // FoldElementAndUnbox will implicitly check for holes by unboxing. We
   4970          // may be able to remove a GuardElementsArePacked check. Add this
   4971          // Elements to a list to check later (unless we just added it for
   4972          // a different load).
   4973          if ((optimizedElements.empty() ||
   4974               optimizedElements.back() != loadIns) &&
   4975              !optimizedElements.append(loadIns->elements()->toInstruction())) {
   4976            return false;
   4977          }
   4978          break;
   4979        }
   4980        case MDefinition::Opcode::SuperFunction: {
   4981          auto* loadIns = load->toSuperFunction();
   4982          MOZ_ASSERT(unbox->fallible());
   4983          MOZ_ASSERT(unbox->type() == MIRType::Object);
   4984          replacement =
   4985              MSuperFunctionAndUnbox::New(graph.alloc(), loadIns->callee());
   4986          break;
   4987        }
   4988        default:
   4989          MOZ_CRASH("Unexpected instruction");
   4990      }
   4991      replacement->setBailoutKind(BailoutKind::UnboxFolding);
   4992 
   4993      block->insertBefore(load, replacement);
   4994      unbox->replaceAllUsesWith(replacement);
   4995      if (lexicalCheck) {
   4996        lexicalCheck->replaceAllUsesWith(replacement);
   4997      }
   4998      load->replaceAllUsesWith(replacement);
   4999 
   5000      if (lexicalCheck && *insIter == lexicalCheck) {
   5001        insIter++;
   5002      }
   5003      if (*insIter == unbox) {
   5004        insIter++;
   5005      }
   5006      block->discard(unbox);
   5007      if (lexicalCheck) {
   5008        block->discard(lexicalCheck);
   5009      }
   5010      block->discard(load);
   5011    }
   5012  }
   5013 
   5014  // For each Elements that had a load folded with an unbox, check to see if
   5015  // there is a GuardElementsArePacked node that can be removed. It can't be
   5016  // removed if:
   5017  //     1. There is a loadElement/storeElement use that will not emit a
   5018  //        hole check.
   5019  //     2. There is another use that has not been allow-listed.
   5020  // It is safe to add additional operations to the allow list if they don't
   5021  // require a packed Elements array as input.
   5022  for (auto* elements : optimizedElements) {
   5023    bool canRemovePackedChecks = true;
   5024    Vector<MInstruction*, 4, SystemAllocPolicy> guards;
   5025    for (MUseDefIterator uses(elements); uses; uses++) {
   5026      MInstruction* use = uses.def()->toInstruction();
   5027      if (use->isGuardElementsArePacked()) {
   5028        if (!guards.append(use)) {
   5029          return false;
   5030        }
   5031      } else if (use->isLoadElement()) {
   5032        if (!use->toLoadElement()->needsHoleCheck()) {
   5033          canRemovePackedChecks = false;
   5034          break;
   5035        }
   5036      } else if (use->isStoreElement()) {
   5037        if (!use->toStoreElement()->needsHoleCheck()) {
   5038          canRemovePackedChecks = false;
   5039          break;
   5040        }
   5041      } else if (use->isLoadElementAndUnbox() || use->isInitializedLength() ||
   5042                 use->isArrayLength()) {
   5043        // These operations are not affected by the packed flag.
   5044        continue;
   5045      } else {
   5046        canRemovePackedChecks = false;
   5047        break;
   5048      }
   5049    }
   5050    if (!canRemovePackedChecks) {
   5051      continue;
   5052    }
   5053    for (auto* guard : guards) {
   5054      guard->block()->discard(guard);
   5055    }
   5056  }
   5057 
   5058  return true;
   5059 }
   5060 
   5061 // Reorder the blocks in the loop starting at the given header to be contiguous.
   5062 static void MakeLoopContiguous(MIRGraph& graph, MBasicBlock* header,
   5063                               size_t numMarked) {
   5064  MBasicBlock* backedge = header->backedge();
   5065 
   5066  MOZ_ASSERT(header->isMarked(), "Loop header is not part of loop");
   5067  MOZ_ASSERT(backedge->isMarked(), "Loop backedge is not part of loop");
   5068 
   5069  // If there are any blocks between the loop header and the loop backedge
   5070  // that are not part of the loop, prepare to move them to the end. We keep
   5071  // them in order, which preserves RPO.
   5072  ReversePostorderIterator insertIter = graph.rpoBegin(backedge);
   5073  insertIter++;
   5074  MBasicBlock* insertPt = *insertIter;
   5075 
   5076  // Visit all the blocks from the loop header to the loop backedge.
   5077  size_t headerId = header->id();
   5078  size_t inLoopId = headerId;
   5079  size_t notInLoopId = inLoopId + numMarked;
   5080  ReversePostorderIterator i = graph.rpoBegin(header);
   5081  for (;;) {
   5082    MBasicBlock* block = *i++;
   5083    MOZ_ASSERT(block->id() >= header->id() && block->id() <= backedge->id(),
   5084               "Loop backedge should be last block in loop");
   5085 
   5086    if (block->isMarked()) {
   5087      // This block is in the loop.
   5088      block->unmark();
   5089      block->setId(inLoopId++);
   5090      // If we've reached the loop backedge, we're done!
   5091      if (block == backedge) {
   5092        break;
   5093      }
   5094    } else {
   5095      // This block is not in the loop. Move it to the end.
   5096      graph.moveBlockBefore(insertPt, block);
   5097      block->setId(notInLoopId++);
   5098    }
   5099  }
   5100  MOZ_ASSERT(header->id() == headerId, "Loop header id changed");
   5101  MOZ_ASSERT(inLoopId == headerId + numMarked,
   5102             "Wrong number of blocks kept in loop");
   5103  MOZ_ASSERT(notInLoopId == (insertIter != graph.rpoEnd() ? insertPt->id()
   5104                                                          : graph.numBlocks()),
   5105             "Wrong number of blocks moved out of loop");
   5106 }
   5107 
   5108 // Reorder the blocks in the graph so that loops are contiguous.
   5109 bool jit::MakeLoopsContiguous(MIRGraph& graph) {
   5110  // Visit all loop headers (in any order).
   5111  for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
   5112    MBasicBlock* header = *i;
   5113    if (!header->isLoopHeader()) {
   5114      continue;
   5115    }
   5116 
   5117    // Mark all blocks that are actually part of the loop.
   5118    bool canOsr;
   5119    size_t numMarked = MarkLoopBlocks(graph, header, &canOsr);
   5120 
   5121    // If the loop isn't a loop, don't try to optimize it.
   5122    if (numMarked == 0) {
   5123      continue;
   5124    }
   5125 
   5126    // If there's an OSR block entering the loop in the middle, it's tricky,
   5127    // so don't try to handle it, for now.
   5128    if (canOsr) {
   5129      UnmarkLoopBlocks(graph, header);
   5130      continue;
   5131    }
   5132 
   5133    // Move all blocks between header and backedge that aren't marked to
   5134    // the end of the loop, making the loop itself contiguous.
   5135    MakeLoopContiguous(graph, header, numMarked);
   5136  }
   5137 
   5138  return true;
   5139 }
   5140 
   5141 static MDefinition* SkipIterObjectUnbox(MDefinition* ins) {
   5142  if (ins->isGuardIsNotProxy()) {
   5143    ins = ins->toGuardIsNotProxy()->input();
   5144  }
   5145  if (ins->isUnbox()) {
   5146    ins = ins->toUnbox()->input();
   5147  }
   5148  return ins;
   5149 }
   5150 
   5151 static MDefinition* SkipBox(MDefinition* ins) {
   5152  if (ins->isBox()) {
   5153    return ins->toBox()->input();
   5154  }
   5155  return ins;
   5156 }
   5157 
   5158 static MObjectToIterator* FindObjectToIteratorUse(MDefinition* ins) {
   5159  for (MUseIterator use(ins->usesBegin()); use != ins->usesEnd(); use++) {
   5160    if (!(*use)->consumer()->isDefinition()) {
   5161      continue;
   5162    }
   5163    MDefinition* def = (*use)->consumer()->toDefinition();
   5164    if (def->isGuardIsNotProxy()) {
   5165      MObjectToIterator* recursed = FindObjectToIteratorUse(def);
   5166      if (recursed) {
   5167        return recursed;
   5168      }
   5169    } else if (def->isUnbox()) {
   5170      MObjectToIterator* recursed = FindObjectToIteratorUse(def);
   5171      if (recursed) {
   5172        return recursed;
   5173      }
   5174    } else if (def->isObjectToIterator()) {
   5175      return def->toObjectToIterator();
   5176    }
   5177  }
   5178 
   5179  return nullptr;
   5180 }
   5181 
   5182 bool jit::OptimizeIteratorIndices(const MIRGenerator* mir, MIRGraph& graph) {
   5183  bool changed = false;
   5184 
   5185  for (ReversePostorderIterator blockIter = graph.rpoBegin();
   5186       blockIter != graph.rpoEnd();) {
   5187    MBasicBlock* block = *blockIter++;
   5188    for (MInstructionIterator insIter(block->begin());
   5189         insIter != block->end();) {
   5190      MInstruction* ins = *insIter;
   5191      insIter++;
   5192      if (!graph.alloc().ensureBallast()) {
   5193        return false;
   5194      }
   5195 
   5196      MDefinition* receiver = nullptr;
   5197      MDefinition* idVal = nullptr;
   5198      MDefinition* setValue = nullptr;
   5199      if (ins->isMegamorphicHasProp() &&
   5200          ins->toMegamorphicHasProp()->hasOwn()) {
   5201        receiver = ins->toMegamorphicHasProp()->object();
   5202        idVal = ins->toMegamorphicHasProp()->idVal();
   5203      } else if (ins->isHasOwnCache()) {
   5204        receiver = ins->toHasOwnCache()->value();
   5205        idVal = ins->toHasOwnCache()->idval();
   5206      } else if (ins->isMegamorphicLoadSlotByValue()) {
   5207        receiver = ins->toMegamorphicLoadSlotByValue()->object();
   5208        idVal = ins->toMegamorphicLoadSlotByValue()->idVal();
   5209      } else if (ins->isMegamorphicLoadSlotByValuePermissive()) {
   5210        receiver = ins->toMegamorphicLoadSlotByValuePermissive()->object();
   5211        idVal = ins->toMegamorphicLoadSlotByValuePermissive()->idVal();
   5212      } else if (ins->isGetPropertyCache()) {
   5213        receiver = ins->toGetPropertyCache()->value();
   5214        idVal = ins->toGetPropertyCache()->idval();
   5215      } else if (ins->isMegamorphicSetElement()) {
   5216        receiver = ins->toMegamorphicSetElement()->object();
   5217        idVal = ins->toMegamorphicSetElement()->index();
   5218        setValue = ins->toMegamorphicSetElement()->value();
   5219      } else if (ins->isSetPropertyCache()) {
   5220        receiver = ins->toSetPropertyCache()->object();
   5221        idVal = ins->toSetPropertyCache()->idval();
   5222        setValue = ins->toSetPropertyCache()->value();
   5223      }
   5224 
   5225      if (!receiver) {
   5226        continue;
   5227      }
   5228 
   5229      // Given the following structure (that occurs inside for-in loops or
   5230      // when iterating a scalar-replaced Object.keys result):
   5231      //   obj: some object
   5232      //   iter: ObjectToIterator <obj>
   5233      //   iterLoad: IteratorMore <iter> | LoadIteratorElement <iter, index>
   5234      //   access: HasProp/GetElem <obj> <iterLoad>
   5235      // If the iterator object has an indices array, we can speed up the
   5236      // property access:
   5237      // 1. If the property access is a HasProp looking for own properties,
   5238      //    then the result will always be true if the iterator has indices,
   5239      //    because we only populate the indices array for objects with no
   5240      //    enumerable properties on the prototype.
   5241      // 2. If the property access is a GetProp, then we can use the contents
   5242      //    of the indices array to find the correct property faster than
   5243      //    the megamorphic cache.
   5244      // 3. If the property access is a SetProp, then we can use the contents
   5245      //    of the indices array to find the correct slots faster than the
   5246      //    megamorphic cache.
   5247      //
   5248      // In some cases involving Object.keys, we can also end up with a pattern
   5249      // like this:
   5250      //
   5251      //   obj1: some object
   5252      //   obj2: some object
   5253      //   iter1: ObjectToIterator <obj1>
   5254      //   iter2: ObjectToIterator <obj2>
   5255      //   iterLoad: LoadIteratorElement <iter1>
   5256      //   access: GetElem <obj2> <iterLoad>
   5257      //
   5258      // This corresponds to `obj2[Object.keys(obj1)[index]]`. In the general
   5259      // case we can't do much with this, but if obj1 and obj2 have the same
   5260      // shape, then we may reuse the iterator, in which case iter1 == iter2.
   5261      // In that case, we can optimize the access as if it were using iter2,
   5262      // at the cost of a single comparison to see if iter1 == iter2.
   5263 #ifdef JS_CODEGEN_X86
   5264      // The ops required for this want more registers than is convenient on
   5265      // x86
   5266      bool supportObjectKeys = false;
   5267 #else
   5268      bool supportObjectKeys = true;
   5269 #endif
   5270 
   5271      MObjectToIterator* iter = nullptr;
   5272      MObjectToIterator* otherIter = nullptr;
   5273      MDefinition* iterElementIndex = nullptr;
   5274      if (idVal->isIteratorMore()) {
   5275        auto* iterNext = idVal->toIteratorMore();
   5276 
   5277        if (!iterNext->iterator()->isObjectToIterator()) {
   5278          continue;
   5279        }
   5280 
   5281        iter = iterNext->iterator()->toObjectToIterator();
   5282        if (SkipIterObjectUnbox(iter->object()) !=
   5283            SkipIterObjectUnbox(receiver)) {
   5284          continue;
   5285        }
   5286      } else if (supportObjectKeys && SkipBox(idVal)->isLoadIteratorElement()) {
   5287        auto* iterLoad = SkipBox(idVal)->toLoadIteratorElement();
   5288 
   5289        if (!iterLoad->iter()->isObjectToIterator()) {
   5290          continue;
   5291        }
   5292 
   5293        iter = iterLoad->iter()->toObjectToIterator();
   5294        if (SkipIterObjectUnbox(iter->object()) !=
   5295            SkipIterObjectUnbox(receiver)) {
   5296          if (!setValue) {
   5297            otherIter = FindObjectToIteratorUse(SkipIterObjectUnbox(receiver));
   5298          }
   5299 
   5300          if (!otherIter || !otherIter->block()->dominates(ins->block())) {
   5301            continue;
   5302          }
   5303        }
   5304        iterElementIndex = iterLoad->index();
   5305      } else {
   5306        continue;
   5307      }
   5308 
   5309      MOZ_ASSERT_IF(iterElementIndex, supportObjectKeys);
   5310      MOZ_ASSERT_IF(otherIter, supportObjectKeys);
   5311 
   5312      MInstruction* indicesCheck = nullptr;
   5313      if (otherIter) {
   5314        indicesCheck = MIteratorsMatchAndHaveIndices::New(
   5315            graph.alloc(), otherIter->object(), iter, otherIter);
   5316      } else {
   5317        indicesCheck =
   5318            MIteratorHasIndices::New(graph.alloc(), iter->object(), iter);
   5319      }
   5320 
   5321      MInstruction* replacement;
   5322      if (ins->isHasOwnCache() || ins->isMegamorphicHasProp()) {
   5323        MOZ_ASSERT(!setValue);
   5324        replacement = MConstant::NewBoolean(graph.alloc(), true);
   5325      } else if (ins->isMegamorphicLoadSlotByValue() ||
   5326                 ins->isMegamorphicLoadSlotByValuePermissive() ||
   5327                 ins->isGetPropertyCache()) {
   5328        MOZ_ASSERT(!setValue);
   5329        if (iterElementIndex) {
   5330          replacement = MLoadSlotByIteratorIndexIndexed::New(
   5331              graph.alloc(), receiver, iter, iterElementIndex);
   5332        } else {
   5333          replacement =
   5334              MLoadSlotByIteratorIndex::New(graph.alloc(), receiver, iter);
   5335        }
   5336      } else {
   5337        MOZ_ASSERT(ins->isMegamorphicSetElement() || ins->isSetPropertyCache());
   5338        MOZ_ASSERT(setValue);
   5339        if (iterElementIndex) {
   5340          replacement = MStoreSlotByIteratorIndexIndexed::New(
   5341              graph.alloc(), receiver, iter, iterElementIndex, setValue);
   5342        } else {
   5343          replacement = MStoreSlotByIteratorIndex::New(graph.alloc(), receiver,
   5344                                                       iter, setValue);
   5345        }
   5346      }
   5347 
   5348      if (!block->wrapInstructionInFastpath(ins, replacement, indicesCheck)) {
   5349        return false;
   5350      }
   5351 
   5352      iter->setWantsIndices(true);
   5353      changed = true;
   5354 
   5355      // Advance to join block.
   5356      blockIter = graph.rpoBegin(block->getSuccessor(0)->getSuccessor(0));
   5357      break;
   5358    }
   5359  }
   5360  if (changed && !AccountForCFGChanges(mir, graph,
   5361                                       /*updateAliasAnalysis=*/false)) {
   5362    return false;
   5363  }
   5364 
   5365  return true;
   5366 }
   5367 
   5368 // =====================================================================
   5369 //
   5370 // Debug printing
   5371 
   5372 void jit::DumpHashedPointer(GenericPrinter& out, const void* p) {
   5373 #ifdef JS_JITSPEW
   5374  if (!p) {
   5375    out.printf("NULL");
   5376    return;
   5377  }
   5378  char tab[27] = "abcdefghijklmnopqrstuvwxyz";
   5379  MOZ_ASSERT(tab[26] == '\0');
   5380  mozilla::HashNumber hash = mozilla::AddToHash(mozilla::HashNumber(0), p);
   5381  hash %= (26 * 26 * 26 * 26 * 26);
   5382  char buf[6];
   5383  for (int i = 0; i <= 4; i++) {
   5384    buf[i] = tab[hash % 26];
   5385    hash /= 26;
   5386  }
   5387  buf[5] = '\0';
   5388  out.printf("%s", buf);
   5389 #endif
   5390 }
   5391 
   5392 void jit::DumpMIRDefinitionID(GenericPrinter& out, const MDefinition* def,
   5393                              bool showDetails) {
   5394 #ifdef JS_JITSPEW
   5395  if (!def) {
   5396    out.printf("(null)");
   5397    return;
   5398  }
   5399  if (showDetails) {
   5400    DumpHashedPointer(out, def);
   5401    out.printf(".");
   5402  }
   5403  out.printf("%u", def->id());
   5404 #endif
   5405 }
   5406 
   5407 void jit::DumpMIRDefinition(GenericPrinter& out, const MDefinition* def,
   5408                            bool showDetails) {
   5409 #ifdef JS_JITSPEW
   5410  DumpMIRDefinitionID(out, def, showDetails);
   5411  out.printf(" = %s.", StringFromMIRType(def->type()));
   5412  if (def->isConstant()) {
   5413    def->printOpcode(out);
   5414  } else {
   5415    MDefinition::PrintOpcodeName(out, def->op());
   5416  }
   5417 
   5418  // Get any extra bits of text that the MIR node wants to show us.  Both the
   5419  // vector and the strings added to it belong to this function, so both will
   5420  // be automatically freed at exit.
   5421  ExtrasCollector extras;
   5422  def->getExtras(&extras);
   5423  for (size_t i = 0; i < extras.count(); i++) {
   5424    out.printf(" %s", extras.get(i).get());
   5425  }
   5426 
   5427  for (size_t i = 0; i < def->numOperands(); i++) {
   5428    out.printf(" ");
   5429    DumpMIRDefinitionID(out, def->getOperand(i), showDetails);
   5430  }
   5431 
   5432  if (def->dependency() && showDetails) {
   5433    out.printf(" DEP=");
   5434    DumpMIRDefinitionID(out, def->dependency(), showDetails);
   5435  }
   5436 
   5437  if (def->hasUses()) {
   5438    out.printf("   uses=");
   5439    bool first = true;
   5440    for (auto use = def->usesBegin(); use != def->usesEnd(); use++) {
   5441      MNode* consumer = (*use)->consumer();
   5442      if (!first) {
   5443        out.printf(",");
   5444      }
   5445      if (consumer->isDefinition()) {
   5446        out.printf("%d", consumer->toDefinition()->id());
   5447      } else {
   5448        out.printf("?");
   5449      }
   5450      first = false;
   5451    }
   5452  }
   5453 
   5454  if (def->hasAnyFlags() && showDetails) {
   5455    out.printf("   flags=");
   5456    bool first = true;
   5457 #  define OUTPUT_FLAG(_F)                          \
   5458    do {                                           \
   5459      if (def->is##_F()) {                         \
   5460        out.printf("%s%s", first ? "" : ",", #_F); \
   5461        first = false;                             \
   5462      }                                            \
   5463    } while (0);
   5464    MIR_FLAG_LIST(OUTPUT_FLAG);
   5465 #  undef OUTPUT_FLAG
   5466  }
   5467 #endif
   5468 }
   5469 
   5470 void jit::DumpMIRBlockID(GenericPrinter& out, const MBasicBlock* block,
   5471                         bool showDetails) {
   5472 #ifdef JS_JITSPEW
   5473  if (!block) {
   5474    out.printf("Block(null)");
   5475    return;
   5476  }
   5477  out.printf("Block");
   5478  if (showDetails) {
   5479    out.printf(".");
   5480    DumpHashedPointer(out, block);
   5481    out.printf(".");
   5482  }
   5483  out.printf("%u", block->id());
   5484 #endif
   5485 }
   5486 
   5487 void jit::DumpMIRBlock(GenericPrinter& out, MBasicBlock* block,
   5488                       bool showDetails) {
   5489 #ifdef JS_JITSPEW
   5490  out.printf("  ");
   5491  DumpMIRBlockID(out, block, showDetails);
   5492  out.printf(" -- preds=[");
   5493  for (uint32_t i = 0; i < block->numPredecessors(); i++) {
   5494    MBasicBlock* pred = block->getPredecessor(i);
   5495    out.printf("%s", i == 0 ? "" : ", ");
   5496    DumpMIRBlockID(out, pred, showDetails);
   5497  }
   5498  out.printf("] -- LD=%u -- K=%s -- s-w-phis=", block->loopDepth(),
   5499             block->nameOfKind());
   5500  if (block->successorWithPhis()) {
   5501    DumpMIRBlockID(out, block->successorWithPhis(), showDetails);
   5502    out.printf(",#%u\n", block->positionInPhiSuccessor());
   5503  } else {
   5504    out.printf("(null)\n");
   5505  }
   5506  for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
   5507       iter != end; iter++) {
   5508    out.printf("    ");
   5509    jit::DumpMIRDefinition(out, *iter, showDetails);
   5510    out.printf("\n");
   5511  }
   5512  for (MInstructionIterator iter(block->begin()), end(block->end());
   5513       iter != end; iter++) {
   5514    out.printf("    ");
   5515    DumpMIRDefinition(out, *iter, showDetails);
   5516    out.printf("\n");
   5517  }
   5518 #endif
   5519 }
   5520 
   5521 void jit::DumpMIRGraph(GenericPrinter& out, MIRGraph& graph, bool showDetails) {
   5522 #ifdef JS_JITSPEW
   5523  for (ReversePostorderIterator block(graph.rpoBegin());
   5524       block != graph.rpoEnd(); block++) {
   5525    DumpMIRBlock(out, *block, showDetails);
   5526  }
   5527 #endif
   5528 }
   5529 
   5530 void jit::DumpMIRExpressions(GenericPrinter& out, MIRGraph& graph,
   5531                             const CompileInfo& info, const char* phase,
   5532                             bool showDetails) {
   5533 #ifdef JS_JITSPEW
   5534  if (!JitSpewEnabled(JitSpew_MIRExpressions)) {
   5535    return;
   5536  }
   5537 
   5538  out.printf("===== %s =====\n", phase);
   5539 
   5540  DumpMIRGraph(out, graph, showDetails);
   5541 
   5542  if (info.compilingWasm()) {
   5543    out.printf("===== end wasm MIR dump =====\n");
   5544  } else {
   5545    out.printf("===== %s:%u =====\n", info.filename(), info.lineno());
   5546  }
   5547 #endif
   5548 }