tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MIRGraph.cpp (43295B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/MIRGraph.h"
      8 
      9 #include "jit/CompileInfo.h"
     10 #include "jit/InlineScriptTree.h"
     11 #include "jit/IonOptimizationLevels.h"
     12 #include "jit/JitSpewer.h"
     13 #include "jit/MIR-wasm.h"
     14 #include "jit/MIR.h"
     15 #include "jit/MIRGenerator.h"
     16 #include "wasm/WasmMetadata.h"
     17 
     18 using namespace js;
     19 using namespace js::jit;
     20 
     21 MIRGenerator::MIRGenerator(CompileRealm* realm,
     22                           const JitCompileOptions& options,
     23                           TempAllocator* alloc, MIRGraph* graph,
     24                           const CompileInfo* info,
     25                           const OptimizationInfo* optimizationInfo,
     26                           const wasm::CodeMetadata* wasmCodeMeta)
     27    : realm(realm),
     28      runtime(realm ? realm->runtime() : nullptr),
     29      outerInfo_(info),
     30      optimizationInfo_(optimizationInfo),
     31      wasmCodeMeta_(wasmCodeMeta),
     32      alloc_(alloc),
     33      graph_(graph),
     34      offThreadStatus_(Ok()),
     35      cancelBuild_(false),
     36      wasmMaxStackArgBytes_(0),
     37      needsOverrecursedCheck_(false),
     38      needsStaticStackAlignment_(false),
     39      instrumentedProfiling_(false),
     40      instrumentedProfilingIsCached_(false),
     41      stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings()
     42                                   : false),
     43      bigIntsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateBigInts()
     44                                   : false),
     45      options(options),
     46      jitSpewer_(alloc, wasmCodeMeta) {}
     47 
     48 bool MIRGenerator::licmEnabled() const {
     49  return optimizationInfo().licmEnabled() && !disableLICM_ &&
     50         !outerInfo().hadLICMInvalidation();
     51 }
     52 
     53 bool MIRGenerator::branchHintingEnabled() const {
     54  return outerInfo().branchHintingEnabled();
     55 }
     56 
     57 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
     58  if (JitSpewEnabled(JitSpew_IonAbort)) {
     59    switch (r) {
     60      case AbortReason::Alloc:
     61        JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
     62        break;
     63      case AbortReason::Disable:
     64        JitSpew(JitSpew_IonAbort, "AbortReason::Disable");
     65        break;
     66      case AbortReason::Error:
     67        JitSpew(JitSpew_IonAbort, "AbortReason::Error");
     68        break;
     69      case AbortReason::NoAbort:
     70        MOZ_CRASH("Abort with AbortReason::NoAbort");
     71        break;
     72    }
     73  }
     74  return Err(std::move(r));
     75 }
     76 
     77 void MIRGenerator::spewBeginFunction(JSScript* function) {
     78  jitSpewer_.init(&graph(), function);
     79  jitSpewer_.beginFunction(function);
     80 #ifdef JS_JITSPEW
     81  if (graphSpewer_) {
     82    graphSpewer_->beginFunction(function);
     83  }
     84 #endif
     85  perfSpewer().startRecording();
     86 }
     87 
     88 void MIRGenerator::spewBeginWasmFunction(unsigned funcIndex) {
     89  jitSpewer_.init(&graph(), nullptr);
     90  jitSpewer_.beginWasmFunction(funcIndex);
     91 #ifdef JS_JITSPEW
     92  if (graphSpewer_) {
     93    graphSpewer_->beginWasmFunction(funcIndex);
     94  }
     95 #endif
     96  perfSpewer().startRecording(wasmCodeMeta_);
     97 }
     98 
     99 void MIRGenerator::spewPass(const char* name, BacktrackingAllocator* ra) {
    100  jitSpewer_.spewPass(name, ra);
    101 #ifdef JS_JITSPEW
    102  if (graphSpewer_) {
    103    graphSpewer_->spewPass(name, &graph(), ra);
    104  }
    105 #endif
    106  perfSpewer().recordPass(name, &graph(), ra);
    107 }
    108 
    109 void MIRGenerator::spewEndFunction() {
    110  jitSpewer_.endFunction();
    111 #ifdef JS_JITSPEW
    112  if (graphSpewer_) {
    113    graphSpewer_->endFunction();
    114  }
    115 #endif
    116  perfSpewer().endRecording();
    117 }
    118 
    119 AutoSpewEndFunction::~AutoSpewEndFunction() {
    120  if (mir_) {
    121    mir_->spewEndFunction();
    122  }
    123 }
    124 
    125 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abortFmt(
    126    AbortReason r, const char* message, va_list ap) {
    127  JitSpewVA(JitSpew_IonAbort, message, ap);
    128  return Err(std::move(r));
    129 }
    130 
    131 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(
    132    AbortReason r, const char* message, ...) {
    133  va_list ap;
    134  va_start(ap, message);
    135  auto forward = abortFmt(r, message, ap);
    136  va_end(ap);
    137  return forward;
    138 }
    139 
    140 void MIRGraph::addBlock(MBasicBlock* block) {
    141  MOZ_ASSERT(block);
    142  block->setId(blockIdGen_++);
    143  blocks_.pushBack(block);
    144  numBlocks_++;
    145 }
    146 
    147 void MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block) {
    148  block->setId(blockIdGen_++);
    149  blocks_.insertAfter(at, block);
    150  numBlocks_++;
    151 }
    152 
    153 void MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block) {
    154  block->setId(blockIdGen_++);
    155  blocks_.insertBefore(at, block);
    156  numBlocks_++;
    157 }
    158 
    159 void MIRGraph::removeBlock(MBasicBlock* block) {
    160  // Remove a block from the graph. It will also cleanup the block.
    161 
    162  if (block == osrBlock_) {
    163    osrBlock_ = nullptr;
    164  }
    165 
    166  if (returnAccumulator_) {
    167    size_t i = 0;
    168    while (i < returnAccumulator_->length()) {
    169      if ((*returnAccumulator_)[i] == block) {
    170        returnAccumulator_->erase(returnAccumulator_->begin() + i);
    171      } else {
    172        i++;
    173      }
    174    }
    175  }
    176 
    177  block->clear();
    178  block->markAsDead();
    179 
    180  if (block->isInList()) {
    181    blocks_.remove(block);
    182    numBlocks_--;
    183  }
    184 }
    185 
    186 void MIRGraph::unmarkBlocks() {
    187  for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++) {
    188    i->unmark();
    189  }
    190 }
    191 
    192 MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth,
    193                              const CompileInfo& info, MBasicBlock* maybePred,
    194                              BytecodeSite* site, Kind kind) {
    195  MOZ_ASSERT(site->pc() != nullptr);
    196 
    197  MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
    198  if (!block->init()) {
    199    return nullptr;
    200  }
    201 
    202  if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) {
    203    return nullptr;
    204  }
    205 
    206  return block;
    207 }
    208 
    209 MBasicBlock* MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
    210                                  MBasicBlock* pred, BytecodeSite* site,
    211                                  Kind kind, uint32_t popped) {
    212  MOZ_ASSERT(site->pc() != nullptr);
    213 
    214  MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
    215  if (!block->init()) {
    216    return nullptr;
    217  }
    218 
    219  if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, popped)) {
    220    return nullptr;
    221  }
    222 
    223  return block;
    224 }
    225 
    226 MBasicBlock* MBasicBlock::NewPendingLoopHeader(MIRGraph& graph,
    227                                               const CompileInfo& info,
    228                                               MBasicBlock* pred,
    229                                               BytecodeSite* site) {
    230  MOZ_ASSERT(site->pc() != nullptr);
    231 
    232  MBasicBlock* block =
    233      new (graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
    234  if (!block->init()) {
    235    return nullptr;
    236  }
    237 
    238  if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, 0)) {
    239    return nullptr;
    240  }
    241 
    242  return block;
    243 }
    244 
    245 MBasicBlock* MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
    246                                       size_t predEdgeIdx, MBasicBlock* succ) {
    247  MBasicBlock* split = nullptr;
    248  if (!succ->pc()) {
    249    // The predecessor does not have a PC, this is a Wasm compilation.
    250    split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
    251    if (!split) {
    252      return nullptr;
    253    }
    254 
    255    // Insert the split edge block in-between.
    256    split->end(MGoto::New(graph.alloc(), succ));
    257  } else {
    258    // The predecessor has a PC, this is a Warp compilation.
    259    MResumePoint* succEntry = succ->entryResumePoint();
    260 
    261    BytecodeSite* site =
    262        new (graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
    263    split =
    264        new (graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
    265 
    266    if (!split->init()) {
    267      return nullptr;
    268    }
    269 
    270    // A split edge is used to simplify the graph to avoid having a
    271    // predecessor with multiple successors as well as a successor with
    272    // multiple predecessors.  As instructions can be moved in this
    273    // split-edge block, we need to give this block a resume point. To do
    274    // so, we copy the entry resume points of the successor and filter the
    275    // phis to keep inputs from the current edge.
    276 
    277    // Propagate the caller resume point from the inherited block.
    278    split->callerResumePoint_ = succ->callerResumePoint();
    279 
    280    // Split-edge are created after the interpreter stack emulation. Thus,
    281    // there is no need for creating slots.
    282    split->stackPosition_ = succEntry->stackDepth();
    283 
    284    // Create a resume point using our initial stack position.
    285    MResumePoint* splitEntry = new (graph.alloc())
    286        MResumePoint(split, succEntry->pc(), ResumeMode::ResumeAt);
    287    if (!splitEntry->init(graph.alloc())) {
    288      return nullptr;
    289    }
    290    split->entryResumePoint_ = splitEntry;
    291 
    292    // Insert the split edge block in-between.
    293    split->end(MGoto::New(graph.alloc(), succ));
    294 
    295    // The target entry resume point might have phi operands, keep the
    296    // operands of the phi coming from our edge.
    297    size_t succEdgeIdx = succ->indexForPredecessor(pred);
    298 
    299    for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
    300      MDefinition* def = succEntry->getOperand(i);
    301      // This early in the pipeline, we have no recover instructions in
    302      // any entry resume point.
    303      if (def->block() == succ) {
    304        if (def->isPhi()) {
    305          def = def->toPhi()->getOperand(succEdgeIdx);
    306        } else {
    307          // The phi-operand may already have been optimized out.
    308          MOZ_ASSERT(def->isConstant());
    309          MOZ_ASSERT(def->type() == MIRType::MagicOptimizedOut);
    310 
    311          def = split->optimizedOutConstant(graph.alloc());
    312        }
    313      }
    314 
    315      splitEntry->initOperand(i, def);
    316    }
    317 
    318    // This is done in the New variant for wasm, so we cannot keep this
    319    // line below, where the rest of the graph is modified.
    320    if (!split->predecessors_.append(pred)) {
    321      return nullptr;
    322    }
    323  }
    324 
    325  split->setLoopDepth(succ->loopDepth());
    326 
    327  graph.insertBlockAfter(pred, split);
    328 
    329  pred->replaceSuccessor(predEdgeIdx, split);
    330  succ->replacePredecessor(pred, split);
    331  return split;
    332 }
    333 
    334 void MBasicBlock::moveToNewBlock(MInstruction* ins, MBasicBlock* dst) {
    335  MOZ_ASSERT(ins->block() == this);
    336  MOZ_ASSERT(!dst->hasLastIns());
    337  instructions_.remove(ins);
    338  ins->setInstructionBlock(dst, dst->trackedSite());
    339  if (MResumePoint* rp = ins->resumePoint()) {
    340    removeResumePoint(rp);
    341    dst->addResumePoint(rp);
    342    rp->setBlock(dst);
    343  }
    344  dst->instructions_.pushBack(ins);
    345 }
    346 
    347 void MBasicBlock::moveOuterResumePointTo(MBasicBlock* dest) {
    348  if (MResumePoint* outer = outerResumePoint()) {
    349    removeResumePoint(outer);
    350    outerResumePoint_ = nullptr;
    351    dest->setOuterResumePoint(outer);
    352    dest->addResumePoint(outer);
    353    outer->setBlock(dest);
    354  }
    355 }
    356 
    357 bool MBasicBlock::wrapInstructionInFastpath(MInstruction* ins,
    358                                            MInstruction* fastpath,
    359                                            MInstruction* condition) {
    360  MOZ_ASSERT(ins->block() == this);
    361  MOZ_ASSERT(!ins->isControlInstruction());
    362 
    363  MInstructionIterator rest(begin(ins));
    364  rest++;
    365 
    366  MResumePoint* resumeBeforeIns = activeResumePoint(ins);
    367  MResumePoint* resumeAfterIns = activeResumePoint(*rest);
    368 
    369  // Create the join block.
    370  MBasicBlock* join = MBasicBlock::NewInternal(graph_, this, resumeAfterIns);
    371  if (!join) {
    372    return false;
    373  }
    374 
    375  // Update the successors of the current block.
    376  for (uint32_t i = 0; i < numSuccessors(); i++) {
    377    getSuccessor(i)->replacePredecessor(this, join);
    378  }
    379  if (successorWithPhis()) {
    380    join->setSuccessorWithPhis(successorWithPhis(), positionInPhiSuccessor());
    381    clearSuccessorWithPhis();
    382  }
    383 
    384  // Copy all instructions after |ins| into the join block.
    385  while (rest != end()) {
    386    MInstruction* ins = *rest++;
    387    moveToNewBlock(ins, join);
    388  }
    389  MOZ_ASSERT(!hasLastIns());
    390  MOZ_ASSERT(join->hasLastIns());
    391 
    392  graph_.insertBlockAfter(this, join);
    393 
    394  // Create the fast path block.
    395  MBasicBlock* fastpathBlock =
    396      MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
    397  if (!fastpathBlock) {
    398    return false;
    399  }
    400  graph_.insertBlockAfter(this, fastpathBlock);
    401  fastpathBlock->add(fastpath);
    402  fastpathBlock->end(MGoto::New(graph_.alloc(), join));
    403 
    404  // Create the slowpath block.
    405  MBasicBlock* slowpathBlock =
    406      MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
    407  if (!slowpathBlock) {
    408    return false;
    409  }
    410  graph_.insertBlockAfter(fastpathBlock, slowpathBlock);
    411  moveToNewBlock(ins, slowpathBlock);
    412  slowpathBlock->end(MGoto::New(graph_.alloc(), join));
    413 
    414  // Connect current block to fastpath and slowpath.
    415  add(condition);
    416  end(MTest::New(graph_.alloc(), condition, fastpathBlock, slowpathBlock));
    417 
    418  // Update predecessors.
    419  if (!fastpathBlock->addPredecessorWithoutPhis(this) ||
    420      !slowpathBlock->addPredecessorWithoutPhis(this) ||
    421      !join->addPredecessorWithoutPhis(fastpathBlock) ||
    422      !join->addPredecessorWithoutPhis(slowpathBlock)) {
    423    return false;
    424  }
    425 
    426  if (ins->hasUses()) {
    427    // Insert phi.
    428    MPhi* phi = MPhi::New(graph_.alloc());
    429    if (!phi->reserveLength(2)) {
    430      return false;
    431    }
    432    phi->addInput(fastpath);
    433    fastpathBlock->setSuccessorWithPhis(join, 0);
    434    phi->addInput(ins);
    435    slowpathBlock->setSuccessorWithPhis(join, 1);
    436    join->addPhi(phi);
    437 
    438    for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e;) {
    439      MUse* use = *i++;
    440      if (use->consumer() != phi && use->consumer() != ins->resumePoint()) {
    441        use->replaceProducer(phi);
    442      }
    443    }
    444  }
    445 
    446  moveOuterResumePointTo(join);
    447 
    448  return true;
    449 }
    450 
    451 MBasicBlock* MBasicBlock::NewInternal(MIRGraph& graph, MBasicBlock* orig,
    452                                      MResumePoint* resumePoint) {
    453  jsbytecode* pc = IsResumeAfter(resumePoint->mode())
    454                       ? GetNextPc(resumePoint->pc())
    455                       : resumePoint->pc();
    456 
    457  BytecodeSite* site =
    458      new (graph.alloc()) BytecodeSite(orig->trackedTree(), pc);
    459  MBasicBlock* block =
    460      new (graph.alloc()) MBasicBlock(graph, orig->info(), site, INTERNAL);
    461  if (!block->init()) {
    462    return nullptr;
    463  }
    464 
    465  // Propagate the caller resume point from the original block.
    466  block->callerResumePoint_ = orig->callerResumePoint();
    467 
    468  // Copy the resume point.
    469  block->stackPosition_ = resumePoint->stackDepth();
    470  MResumePoint* entryResumePoint =
    471      new (graph.alloc()) MResumePoint(block, pc, ResumeMode::ResumeAt);
    472  if (!entryResumePoint->init(graph.alloc())) {
    473    return nullptr;
    474  }
    475  for (size_t i = 0; i < resumePoint->stackDepth(); i++) {
    476    entryResumePoint->initOperand(i, resumePoint->getOperand(i));
    477  }
    478  block->entryResumePoint_ = entryResumePoint;
    479 
    480  block->setLoopDepth(orig->loopDepth());
    481 
    482  return block;
    483 }
    484 
    485 MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info,
    486                              MBasicBlock* pred, Kind kind) {
    487  BytecodeSite* site = new (graph.alloc()) BytecodeSite();
    488  MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
    489  if (!block->init()) {
    490    return nullptr;
    491  }
    492 
    493  if (pred) {
    494    block->stackPosition_ = pred->stackPosition_;
    495 
    496    if (block->kind_ == PENDING_LOOP_HEADER) {
    497      size_t nphis = block->stackPosition_;
    498 
    499      size_t nfree = graph.phiFreeListLength();
    500 
    501      TempAllocator& alloc = graph.alloc();
    502      MPhi* phis = nullptr;
    503      if (nphis > nfree) {
    504        phis = alloc.allocateArray<MPhi>(nphis - nfree);
    505        if (!phis) {
    506          return nullptr;
    507        }
    508      }
    509 
    510      // Note: Phis are inserted in the same order as the slots.
    511      for (size_t i = 0; i < nphis; i++) {
    512        MDefinition* predSlot = pred->getSlot(i);
    513 
    514        MOZ_ASSERT(predSlot->type() != MIRType::Value);
    515 
    516        MPhi* phi;
    517        if (i < nfree) {
    518          phi = graph.takePhiFromFreeList();
    519        } else {
    520          phi = phis + (i - nfree);
    521        }
    522        new (phi) MPhi(alloc, predSlot->type());
    523 
    524        phi->addInlineInput(predSlot);
    525 
    526        // Add append Phis in the block.
    527        block->addPhi(phi);
    528        block->setSlot(i, phi);
    529      }
    530    } else {
    531      if (!block->ensureHasSlots(0)) {
    532        return nullptr;
    533      }
    534      block->copySlots(pred);
    535    }
    536 
    537    if (!block->predecessors_.append(pred)) {
    538      return nullptr;
    539    }
    540  }
    541 
    542  return block;
    543 }
    544 
    545 // Create an empty and unreachable block which jumps to |header|. Used
    546 // when the normal entry into a loop is removed (but the loop is still
    547 // reachable due to OSR) to preserve the invariant that every loop
    548 // header has two predecessors, which is needed for building the
    549 // dominator tree. The new block is inserted immediately before the
    550 // header, which preserves the graph ordering (post-order/RPO). These
    551 // blocks will all be removed before lowering.
    552 MBasicBlock* MBasicBlock::NewFakeLoopPredecessor(MIRGraph& graph,
    553                                                 MBasicBlock* header) {
    554  MOZ_ASSERT(graph.osrBlock());
    555 
    556  MBasicBlock* backedge = header->backedge();
    557  MBasicBlock* fake = MBasicBlock::New(graph, header->info(), nullptr,
    558                                       MBasicBlock::FAKE_LOOP_PRED);
    559  if (!fake) {
    560    return nullptr;
    561  }
    562 
    563  graph.insertBlockBefore(header, fake);
    564  fake->setUnreachable();
    565 
    566  // Create fake defs to use as inputs for any phis in |header|.
    567  for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
    568       iter != end; ++iter) {
    569    if (!graph.alloc().ensureBallast()) {
    570      return nullptr;
    571    }
    572    MPhi* phi = *iter;
    573    auto* fakeDef = MUnreachableResult::New(graph.alloc(), phi->type());
    574    fake->add(fakeDef);
    575    if (!phi->addInputSlow(fakeDef)) {
    576      return nullptr;
    577    }
    578  }
    579 
    580  fake->end(MGoto::New(graph.alloc(), header));
    581 
    582  if (!header->addPredecessorWithoutPhis(fake)) {
    583    return nullptr;
    584  }
    585 
    586  // The backedge is always the last predecessor, but we have added a
    587  // new pred. Restore |backedge| as |header|'s loop backedge.
    588  header->clearLoopHeader();
    589  header->setLoopHeader(backedge);
    590 
    591  return fake;
    592 }
    593 
    594 void MIRGraph::removeFakeLoopPredecessors() {
    595  MOZ_ASSERT(osrBlock());
    596  size_t id = 0;
    597  for (ReversePostorderIterator it = rpoBegin(); it != rpoEnd();) {
    598    MBasicBlock* block = *it++;
    599    if (block->isFakeLoopPred()) {
    600      MOZ_ASSERT(block->unreachable());
    601      MBasicBlock* succ = block->getSingleSuccessor();
    602      succ->removePredecessor(block);
    603      removeBlock(block);
    604    } else {
    605      block->setId(id++);
    606    }
    607  }
    608 #ifdef DEBUG
    609  canBuildDominators_ = false;
    610 #endif
    611 }
    612 
    613 MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info,
    614                         BytecodeSite* site, Kind kind)
    615    : graph_(graph),
    616      info_(info),
    617      predecessors_(graph.alloc()),
    618      stackPosition_(info_.firstStackSlot()),
    619      id_(0),
    620      domIndex_(0),
    621      numDominated_(0),
    622      lir_(nullptr),
    623      callerResumePoint_(nullptr),
    624      entryResumePoint_(nullptr),
    625      outerResumePoint_(nullptr),
    626      successorWithPhis_(nullptr),
    627      positionInPhiSuccessor_(0),
    628      loopDepth_(0),
    629      kind_(kind),
    630      mark_(false),
    631      immediatelyDominated_(graph.alloc()),
    632      immediateDominator_(nullptr),
    633      trackedSite_(site) {
    634  MOZ_ASSERT(trackedSite_, "trackedSite_ is non-nullptr");
    635 }
    636 
    637 bool MBasicBlock::init() { return slots_.init(graph_.alloc(), info_.nslots()); }
    638 
    639 bool MBasicBlock::increaseSlots(size_t num) {
    640  return slots_.growBy(graph_.alloc(), num);
    641 }
    642 
    643 bool MBasicBlock::ensureHasSlots(size_t num) {
    644  size_t depth = stackDepth() + num;
    645  if (depth > nslots()) {
    646    if (!increaseSlots(depth - nslots())) {
    647      return false;
    648    }
    649  }
    650  return true;
    651 }
    652 
    653 void MBasicBlock::copySlots(MBasicBlock* from) {
    654  MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
    655  MOZ_ASSERT(stackPosition_ <= nslots());
    656 
    657  MDefinition** thisSlots = slots_.begin();
    658  MDefinition** fromSlots = from->slots_.begin();
    659  for (size_t i = 0, e = stackPosition_; i < e; ++i) {
    660    thisSlots[i] = fromSlots[i];
    661  }
    662 }
    663 
    664 bool MBasicBlock::inherit(TempAllocator& alloc, size_t stackDepth,
    665                          MBasicBlock* maybePred, uint32_t popped) {
    666  MOZ_ASSERT_IF(maybePred, maybePred->stackDepth() == stackDepth);
    667 
    668  MOZ_ASSERT(stackDepth >= popped);
    669  stackDepth -= popped;
    670  stackPosition_ = stackDepth;
    671 
    672  if (maybePred && kind_ != PENDING_LOOP_HEADER) {
    673    copySlots(maybePred);
    674  }
    675 
    676  MOZ_ASSERT(info_.nslots() >= stackPosition_);
    677  MOZ_ASSERT(!entryResumePoint_);
    678 
    679  // Propagate the caller resume point from the inherited block.
    680  callerResumePoint_ = maybePred ? maybePred->callerResumePoint() : nullptr;
    681 
    682  // Create a resume point using our initial stack state.
    683  entryResumePoint_ =
    684      new (alloc) MResumePoint(this, pc(), ResumeMode::ResumeAt);
    685  if (!entryResumePoint_->init(alloc)) {
    686    return false;
    687  }
    688 
    689  if (maybePred) {
    690    if (!predecessors_.append(maybePred)) {
    691      return false;
    692    }
    693 
    694    if (kind_ == PENDING_LOOP_HEADER) {
    695      for (size_t i = 0; i < stackDepth; i++) {
    696        MPhi* phi = MPhi::New(alloc.fallible());
    697        if (!phi) {
    698          return false;
    699        }
    700        phi->addInlineInput(maybePred->getSlot(i));
    701        addPhi(phi);
    702        setSlot(i, phi);
    703        entryResumePoint()->initOperand(i, phi);
    704      }
    705    } else {
    706      for (size_t i = 0; i < stackDepth; i++) {
    707        entryResumePoint()->initOperand(i, getSlot(i));
    708      }
    709    }
    710  } else {
    711    /*
    712     * Don't leave the operands uninitialized for the caller, as it may not
    713     * initialize them later on.
    714     */
    715    for (size_t i = 0; i < stackDepth; i++) {
    716      entryResumePoint()->clearOperand(i);
    717    }
    718  }
    719 
    720  return true;
    721 }
    722 
    723 void MBasicBlock::inheritSlots(MBasicBlock* parent) {
    724  stackPosition_ = parent->stackPosition_;
    725  copySlots(parent);
    726 }
    727 
    728 bool MBasicBlock::initEntrySlots(TempAllocator& alloc) {
    729  // Remove the previous resume point.
    730  discardResumePoint(entryResumePoint_);
    731 
    732  // Create a resume point using our initial stack state.
    733  entryResumePoint_ =
    734      MResumePoint::New(alloc, this, pc(), ResumeMode::ResumeAt);
    735  if (!entryResumePoint_) {
    736    return false;
    737  }
    738  return true;
    739 }
    740 
    741 MDefinition* MBasicBlock::environmentChain() {
    742  return getSlot(info().environmentChainSlot());
    743 }
    744 
    745 MDefinition* MBasicBlock::argumentsObject() {
    746  return getSlot(info().argsObjSlot());
    747 }
    748 
    749 void MBasicBlock::setEnvironmentChain(MDefinition* scopeObj) {
    750  setSlot(info().environmentChainSlot(), scopeObj);
    751 }
    752 
    753 void MBasicBlock::setArgumentsObject(MDefinition* argsObj) {
    754  setSlot(info().argsObjSlot(), argsObj);
    755 }
    756 
    757 void MBasicBlock::pick(int32_t depth) {
    758  // pick takes a value and moves it to the top.
    759  // pick(-2):
    760  //   A B C D E
    761  //   A B D C E [ swapAt(-2) ]
    762  //   A B D E C [ swapAt(-1) ]
    763  for (; depth < 0; depth++) {
    764    swapAt(depth);
    765  }
    766 }
    767 
    768 void MBasicBlock::unpick(int32_t depth) {
    769  // unpick takes the value on top of the stack and moves it under the depth-th
    770  // element;
    771  // unpick(-2):
    772  //   A B C D E
    773  //   A B C E D [ swapAt(-1) ]
    774  //   A B E C D [ swapAt(-2) ]
    775  for (int32_t n = -1; n >= depth; n--) {
    776    swapAt(n);
    777  }
    778 }
    779 
    780 void MBasicBlock::swapAt(int32_t depth) {
    781  uint32_t lhsDepth = stackPosition_ + depth - 1;
    782  uint32_t rhsDepth = stackPosition_ + depth;
    783 
    784  MDefinition* temp = slots_[lhsDepth];
    785  slots_[lhsDepth] = slots_[rhsDepth];
    786  slots_[rhsDepth] = temp;
    787 }
    788 
    789 void MBasicBlock::discardLastIns() { discard(lastIns()); }
    790 
    791 MConstant* MBasicBlock::optimizedOutConstant(TempAllocator& alloc) {
    792  // If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
    793  // then reuse it.
    794  MInstruction* ins = *begin();
    795  if (ins->type() == MIRType::MagicOptimizedOut) {
    796    return ins->toConstant();
    797  }
    798 
    799  MConstant* constant = MConstant::NewMagic(alloc, JS_OPTIMIZED_OUT);
    800  insertBefore(ins, constant);
    801  return constant;
    802 }
    803 
    804 void MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins) {
    805  // Remove |ins| from the current block.
    806  MOZ_ASSERT(ins->block() == this);
    807  instructions_.remove(ins);
    808 
    809  // Insert into new block, which may be distinct.
    810  // Uses and operands are untouched.
    811  ins->setInstructionBlock(at->block(), at->trackedSite());
    812  at->block()->instructions_.insertBefore(at, ins);
    813 }
    814 
    815 MInstruction* MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore) {
    816  MOZ_ASSERT(graph().osrBlock() != this,
    817             "We are not supposed to add any instruction in OSR blocks.");
    818 
    819  // Beta nodes and interrupt checks are required to be located at the
    820  // beginnings of basic blocks, so we must insert new instructions after any
    821  // such instructions.
    822  MInstructionIterator insertIter =
    823      !ins || ins->isPhi() ? begin() : begin(ins->toInstruction());
    824  while (insertIter->isBeta() || insertIter->isInterruptCheck() ||
    825         insertIter->isConstant() || insertIter->isParameter() ||
    826         (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout())) {
    827    insertIter++;
    828  }
    829 
    830  return *insertIter;
    831 }
    832 
    833 void MBasicBlock::discardResumePoint(
    834    MResumePoint* rp, ReferencesType refType /* = RefType_Default */) {
    835  if (refType & RefType_DiscardOperands) {
    836    rp->releaseUses();
    837  }
    838  rp->setDiscarded();
    839  removeResumePoint(rp);
    840 }
    841 
    842 void MBasicBlock::removeResumePoint(MResumePoint* rp) {
    843 #ifdef DEBUG
    844  MResumePointIterator iter = resumePointsBegin();
    845  while (*iter != rp) {
    846    // We should reach it before reaching the end.
    847    MOZ_ASSERT(iter != resumePointsEnd());
    848    iter++;
    849  }
    850  resumePoints_.removeAt(iter);
    851 #endif
    852 }
    853 
    854 void MBasicBlock::prepareForDiscard(
    855    MInstruction* ins, ReferencesType refType /* = RefType_Default */) {
    856  // Only remove instructions from the same basic block.  This is needed for
    857  // correctly removing the resume point if any.
    858  MOZ_ASSERT(ins->block() == this);
    859 
    860  MResumePoint* rp = ins->resumePoint();
    861  if ((refType & RefType_DiscardResumePoint) && rp) {
    862    discardResumePoint(rp, refType);
    863  }
    864 
    865  // We need to assert that instructions have no uses after removing the their
    866  // resume points operands as they could be captured by their own resume
    867  // point.
    868  MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
    869 
    870  const uint32_t InstructionOperands =
    871      RefType_DiscardOperands | RefType_DiscardInstruction;
    872  if ((refType & InstructionOperands) == InstructionOperands) {
    873    for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
    874      ins->releaseOperand(i);
    875    }
    876  }
    877 
    878  ins->setDiscarded();
    879 }
    880 
    881 void MBasicBlock::discard(MInstruction* ins) {
    882  prepareForDiscard(ins);
    883  instructions_.remove(ins);
    884 }
    885 
    886 void MBasicBlock::discardIgnoreOperands(MInstruction* ins) {
    887 #ifdef DEBUG
    888  for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
    889    MOZ_ASSERT(!ins->hasOperand(i));
    890  }
    891 #endif
    892 
    893  prepareForDiscard(ins, RefType_IgnoreOperands);
    894  instructions_.remove(ins);
    895 }
    896 
    897 void MBasicBlock::discardAllInstructions() {
    898  MInstructionIterator iter = begin();
    899  discardAllInstructionsStartingAt(iter);
    900 }
    901 
    902 void MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter) {
    903  while (iter != end()) {
    904    // Discard operands and resume point operands and flag the instruction
    905    // as discarded.  Also we do not assert that we have no uses as blocks
    906    // might be removed in reverse post order.
    907    MInstruction* ins = *iter++;
    908    prepareForDiscard(ins, RefType_DefaultNoAssert);
    909    instructions_.remove(ins);
    910  }
    911 }
    912 
    913 void MBasicBlock::discardAllPhis() {
    914  for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
    915    iter->removeAllOperands();
    916  }
    917 
    918  for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end();
    919       pred++) {
    920    (*pred)->clearSuccessorWithPhis();
    921  }
    922 
    923  phis_.clear();
    924 }
    925 
    926 void MBasicBlock::discardAllResumePoints(bool discardEntry) {
    927  if (outerResumePoint_) {
    928    clearOuterResumePoint();
    929  }
    930 
    931  if (discardEntry && entryResumePoint_) {
    932    clearEntryResumePoint();
    933  }
    934 
    935 #ifdef DEBUG
    936  if (!entryResumePoint()) {
    937    MOZ_ASSERT(resumePointsEmpty());
    938  } else {
    939    MResumePointIterator iter(resumePointsBegin());
    940    MOZ_ASSERT(iter != resumePointsEnd());
    941    iter++;
    942    MOZ_ASSERT(iter == resumePointsEnd());
    943  }
    944 #endif
    945 }
    946 
    947 void MBasicBlock::clear() {
    948  discardAllInstructions();
    949  discardAllResumePoints();
    950  discardAllPhis();
    951 }
    952 
    953 void MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins) {
    954  MOZ_ASSERT(at->block() == this);
    955  ins->setInstructionBlock(this, at->trackedSite());
    956  graph().allocDefinitionId(ins);
    957  instructions_.insertBefore(at, ins);
    958 }
    959 
    960 void MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins) {
    961  MOZ_ASSERT(at->block() == this);
    962  ins->setInstructionBlock(this, at->trackedSite());
    963  graph().allocDefinitionId(ins);
    964  instructions_.insertAfter(at, ins);
    965 }
    966 
    967 void MBasicBlock::insertAtEnd(MInstruction* ins) {
    968  if (hasLastIns()) {
    969    insertBefore(lastIns(), ins);
    970  } else {
    971    add(ins);
    972  }
    973 }
    974 
    975 void MBasicBlock::addPhi(MPhi* phi) {
    976  phis_.pushBack(phi);
    977  phi->setPhiBlock(this);
    978  graph().allocDefinitionId(phi);
    979 }
    980 
    981 void MBasicBlock::discardPhi(MPhi* phi) {
    982  MOZ_ASSERT(!phis_.empty());
    983 
    984  phi->removeAllOperands();
    985  phi->setDiscarded();
    986 
    987  phis_.remove(phi);
    988 
    989  if (phis_.empty()) {
    990    for (MBasicBlock* pred : predecessors_) {
    991      pred->clearSuccessorWithPhis();
    992    }
    993  }
    994 }
    995 
    996 MResumePoint* MBasicBlock::activeResumePoint(MInstruction* ins) {
    997  for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
    998    if (iter->resumePoint() && *iter != ins) {
    999      return iter->resumePoint();
   1000    }
   1001  }
   1002 
   1003  // If none, take the entry resume point.
   1004  return entryResumePoint();
   1005 }
   1006 
   1007 void MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins) {
   1008  MResumePoint* rp = activeResumePoint(ins);
   1009 
   1010  // The only blocks which do not have any entryResumePoint in Ion, are the
   1011  // SplitEdge blocks.  SplitEdge blocks only have a Goto instruction before
   1012  // Range Analysis phase.  In adjustInputs, we are manipulating instructions
   1013  // which have a TypePolicy.  So, as a Goto has no operand and no type
   1014  // policy, the entry resume point should exist.
   1015  MOZ_ASSERT(rp);
   1016 
   1017  // Flag all operands as being potentially used.
   1018  while (rp) {
   1019    for (size_t i = 0, end = rp->numOperands(); i < end; i++) {
   1020      rp->getOperand(i)->setImplicitlyUsedUnchecked();
   1021    }
   1022    rp = rp->caller();
   1023  }
   1024 }
   1025 
   1026 bool MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred) {
   1027  return addPredecessorPopN(alloc, pred, 0);
   1028 }
   1029 
   1030 bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
   1031                                     uint32_t popped) {
   1032  MOZ_ASSERT(pred);
   1033  MOZ_ASSERT(predecessors_.length() > 0);
   1034 
   1035  // Predecessors must be finished, and at the correct stack depth.
   1036  MOZ_ASSERT(pred->hasLastIns());
   1037  MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
   1038 
   1039  for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
   1040    MDefinition* mine = getSlot(i);
   1041    MDefinition* other = pred->getSlot(i);
   1042 
   1043    if (mine != other) {
   1044      MIRType phiType = mine->type();
   1045      if (phiType != other->type()) {
   1046        phiType = MIRType::Value;
   1047      }
   1048 
   1049      // If the current instruction is a phi, and it was created in this
   1050      // basic block, then we have already placed this phi and should
   1051      // instead append to its operands.
   1052      if (mine->isPhi() && mine->block() == this) {
   1053        MOZ_ASSERT(predecessors_.length());
   1054        MOZ_ASSERT(!mine->hasDefUses(),
   1055                   "should only change type of newly created phis");
   1056        mine->setResultType(phiType);
   1057        if (!mine->toPhi()->addInputSlow(other)) {
   1058          return false;
   1059        }
   1060      } else {
   1061        // Otherwise, create a new phi node.
   1062        MPhi* phi = MPhi::New(alloc.fallible(), phiType);
   1063        if (!phi) {
   1064          return false;
   1065        }
   1066        addPhi(phi);
   1067 
   1068        // Prime the phi for each predecessor, so input(x) comes from
   1069        // predecessor(x).
   1070        if (!phi->reserveLength(predecessors_.length() + 1)) {
   1071          return false;
   1072        }
   1073 
   1074        for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds;
   1075             ++j) {
   1076          MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
   1077          phi->addInput(mine);
   1078        }
   1079        phi->addInput(other);
   1080 
   1081        setSlot(i, phi);
   1082        if (entryResumePoint()) {
   1083          entryResumePoint()->replaceOperand(i, phi);
   1084        }
   1085      }
   1086    }
   1087  }
   1088 
   1089  return predecessors_.append(pred);
   1090 }
   1091 
   1092 bool MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred,
   1093                                             MBasicBlock* existingPred) {
   1094  MOZ_ASSERT(pred);
   1095  MOZ_ASSERT(predecessors_.length() > 0);
   1096 
   1097  // Predecessors must be finished, and at the correct stack depth.
   1098  MOZ_ASSERT(pred->hasLastIns());
   1099  MOZ_ASSERT(!pred->successorWithPhis());
   1100 
   1101  if (!phisEmpty()) {
   1102    size_t existingPosition = indexForPredecessor(existingPred);
   1103    for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
   1104      if (!iter->addInputSlow(iter->getOperand(existingPosition))) {
   1105        return false;
   1106      }
   1107    }
   1108  }
   1109 
   1110  if (!predecessors_.append(pred)) {
   1111    return false;
   1112  }
   1113  return true;
   1114 }
   1115 
   1116 bool MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred) {
   1117  // Predecessors must be finished.
   1118  MOZ_ASSERT(pred && pred->hasLastIns());
   1119  return predecessors_.append(pred);
   1120 }
   1121 
   1122 bool MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child) {
   1123  return immediatelyDominated_.append(child);
   1124 }
   1125 
   1126 void MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child) {
   1127  for (size_t i = 0;; ++i) {
   1128    MOZ_ASSERT(i < immediatelyDominated_.length(),
   1129               "Dominated block to remove not present");
   1130    if (immediatelyDominated_[i] == child) {
   1131      immediatelyDominated_[i] = immediatelyDominated_.back();
   1132      immediatelyDominated_.popBack();
   1133      return;
   1134    }
   1135  }
   1136 }
   1137 
   1138 bool MBasicBlock::setBackedge(MBasicBlock* pred) {
   1139  // Predecessors must be finished, and at the correct stack depth.
   1140  MOZ_ASSERT(hasLastIns());
   1141  MOZ_ASSERT(pred->hasLastIns());
   1142  MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
   1143 
   1144  // We must be a pending loop header
   1145  MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
   1146 
   1147  // Add exit definitions to each corresponding phi at the entry.
   1148  if (!inheritPhisFromBackedge(pred)) {
   1149    return false;
   1150  }
   1151 
   1152  // We are now a loop header proper
   1153  kind_ = LOOP_HEADER;
   1154 
   1155  return predecessors_.append(pred);
   1156 }
   1157 
   1158 bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred, size_t paramCount) {
   1159  // Predecessors must be finished, and at the correct stack depth.
   1160  MOZ_ASSERT(hasLastIns());
   1161  MOZ_ASSERT(pred->hasLastIns());
   1162  MOZ_ASSERT(stackDepth() + paramCount == pred->stackDepth());
   1163 
   1164  // We must be a pending loop header
   1165  MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
   1166 
   1167  // Add exit definitions to each corresponding phi at the entry.
   1168  // Note: Phis are inserted in the same order as the slots. (see
   1169  // MBasicBlock::New)
   1170  size_t slot = 0;
   1171  for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
   1172    MPhi* entryDef = *phi;
   1173    MDefinition* exitDef = pred->getSlot(slot);
   1174 
   1175    // Assert that we already placed phis for each slot.
   1176    MOZ_ASSERT(entryDef->block() == this);
   1177 
   1178    // Assert that the phi already has the correct type.
   1179    MOZ_ASSERT(entryDef->type() == exitDef->type());
   1180    MOZ_ASSERT(entryDef->type() != MIRType::Value);
   1181 
   1182    if (entryDef == exitDef) {
   1183      // If the exit def is the same as the entry def, make a redundant
   1184      // phi. Since loop headers have exactly two incoming edges, we
   1185      // know that that's just the first input.
   1186      //
   1187      // Note that we eliminate later rather than now, to avoid any
   1188      // weirdness around pending continue edges which might still hold
   1189      // onto phis.
   1190      exitDef = entryDef->getOperand(0);
   1191    }
   1192 
   1193    // Phis always have room for 2 operands, so this can't fail.
   1194    MOZ_ASSERT(phi->numOperands() == 1);
   1195    entryDef->addInlineInput(exitDef);
   1196 
   1197    // Two cases here: phis that correspond to locals, and phis that correspond
   1198    // to loop parameters.  Only phis for locals go in slots.
   1199    if (slot < stackDepth()) {
   1200      setSlot(slot, entryDef);
   1201    }
   1202  }
   1203 
   1204  // We are now a loop header proper
   1205  kind_ = LOOP_HEADER;
   1206 
   1207  return predecessors_.append(pred);
   1208 }
   1209 
   1210 void MBasicBlock::clearLoopHeader() {
   1211  MOZ_ASSERT(isLoopHeader());
   1212  kind_ = NORMAL;
   1213 }
   1214 
   1215 void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) {
   1216  MOZ_ASSERT(!isLoopHeader());
   1217  kind_ = LOOP_HEADER;
   1218 
   1219  size_t numPreds = numPredecessors();
   1220  MOZ_ASSERT(numPreds != 0);
   1221 
   1222  size_t lastIndex = numPreds - 1;
   1223  size_t oldIndex = 0;
   1224  for (;; ++oldIndex) {
   1225    MOZ_ASSERT(oldIndex < numPreds);
   1226    MBasicBlock* pred = getPredecessor(oldIndex);
   1227    if (pred == newBackedge) {
   1228      break;
   1229    }
   1230  }
   1231 
   1232  // Set the loop backedge to be the last element in predecessors_.
   1233  std::swap(predecessors_[oldIndex], predecessors_[lastIndex]);
   1234 
   1235  // If we have phis, reorder their operands accordingly.
   1236  if (!phisEmpty()) {
   1237    getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
   1238    getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
   1239    for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
   1240      MPhi* phi = *iter;
   1241      MDefinition* last = phi->getOperand(oldIndex);
   1242      MDefinition* old = phi->getOperand(lastIndex);
   1243      phi->replaceOperand(oldIndex, old);
   1244      phi->replaceOperand(lastIndex, last);
   1245    }
   1246  }
   1247 
   1248  MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
   1249  MOZ_ASSERT(backedge() == newBackedge);
   1250 }
   1251 
   1252 size_t MBasicBlock::getSuccessorIndex(MBasicBlock* block) const {
   1253  MOZ_ASSERT(lastIns());
   1254  for (size_t i = 0; i < numSuccessors(); i++) {
   1255    if (getSuccessor(i) == block) {
   1256      return i;
   1257    }
   1258  }
   1259  MOZ_CRASH("Invalid successor");
   1260 }
   1261 
   1262 size_t MBasicBlock::getPredecessorIndex(MBasicBlock* block) const {
   1263  for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
   1264    if (getPredecessor(i) == block) {
   1265      return i;
   1266    }
   1267  }
   1268  MOZ_CRASH("Invalid predecessor");
   1269 }
   1270 
   1271 void MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split) {
   1272  MOZ_ASSERT(lastIns());
   1273 
   1274  // Note, during split-critical-edges, successors-with-phis is not yet set.
   1275  // During PAA, this case is handled before we enter.
   1276  MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
   1277 
   1278  lastIns()->replaceSuccessor(pos, split);
   1279 }
   1280 
   1281 void MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split) {
   1282  for (size_t i = 0; i < numPredecessors(); i++) {
   1283    if (getPredecessor(i) == old) {
   1284      predecessors_[i] = split;
   1285 
   1286 #ifdef DEBUG
   1287      // The same block should not appear twice in the predecessor list.
   1288      for (size_t j = i; j < numPredecessors(); j++) {
   1289        MOZ_ASSERT(predecessors_[j] != old);
   1290      }
   1291 #endif
   1292 
   1293      return;
   1294    }
   1295  }
   1296 
   1297  MOZ_CRASH("predecessor was not found");
   1298 }
   1299 
   1300 void MBasicBlock::clearDominatorInfo() {
   1301  setImmediateDominator(nullptr);
   1302  immediatelyDominated_.clear();
   1303  numDominated_ = 0;
   1304 }
   1305 
   1306 void MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred,
   1307                                                      size_t predIndex) {
   1308  // If we're removing the last backedge, this is no longer a loop.
   1309  if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred) {
   1310    clearLoopHeader();
   1311  }
   1312 
   1313  // Adjust phis.  Note that this can leave redundant phis behind.
   1314  // Don't adjust successorWithPhis() if we haven't constructed this
   1315  // information yet.
   1316  if (pred->successorWithPhis()) {
   1317    MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
   1318    pred->clearSuccessorWithPhis();
   1319    for (size_t j = predIndex + 1; j < numPredecessors(); j++) {
   1320      getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
   1321    }
   1322  }
   1323 
   1324  // Remove from pred list.
   1325  predecessors_.erase(predecessors_.begin() + predIndex);
   1326 }
   1327 
   1328 void MBasicBlock::removePredecessor(MBasicBlock* pred) {
   1329  size_t predIndex = getPredecessorIndex(pred);
   1330 
   1331  // Remove the phi operands.
   1332  for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
   1333    iter->removeOperand(predIndex);
   1334  }
   1335 
   1336  // Now we can call the underlying function, which expects that phi
   1337  // operands have been removed.
   1338  removePredecessorWithoutPhiOperands(pred, predIndex);
   1339 }
   1340 
   1341 bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge) {
   1342  // We must be a pending loop header
   1343  MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
   1344 
   1345  size_t stackDepth = entryResumePoint()->stackDepth();
   1346  for (size_t slot = 0; slot < stackDepth; slot++) {
   1347    // Get the value stack-slot of the back edge.
   1348    MDefinition* exitDef = backedge->getSlot(slot);
   1349 
   1350    // Get the value of the loop header.
   1351    MDefinition* loopDef = entryResumePoint()->getOperand(slot);
   1352    if (loopDef->block() != this) {
   1353      // If we are finishing a pending loop header, then we need to ensure
   1354      // that all operands are phis. This is usualy the case, except for
   1355      // object/arrays build with generators, in which case we share the
   1356      // same allocations across all blocks.
   1357      MOZ_ASSERT(loopDef->block()->id() < id());
   1358      MOZ_ASSERT(loopDef == exitDef);
   1359      continue;
   1360    }
   1361 
   1362    // Phis are allocated by NewPendingLoopHeader.
   1363    MPhi* entryDef = loopDef->toPhi();
   1364    MOZ_ASSERT(entryDef->block() == this);
   1365 
   1366    if (entryDef == exitDef) {
   1367      // If the exit def is the same as the entry def, make a redundant
   1368      // phi. Since loop headers have exactly two incoming edges, we
   1369      // know that that's just the first input.
   1370      //
   1371      // Note that we eliminate later rather than now, to avoid any
   1372      // weirdness around pending continue edges which might still hold
   1373      // onto phis.
   1374      exitDef = entryDef->getOperand(0);
   1375    }
   1376 
   1377    if (!entryDef->addInputSlow(exitDef)) {
   1378      return false;
   1379    }
   1380  }
   1381 
   1382  return true;
   1383 }
   1384 
   1385 MTest* MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection) {
   1386  *pdirection = FALSE_BRANCH;
   1387 
   1388  if (numPredecessors() != 1) {
   1389    return nullptr;
   1390  }
   1391 
   1392  MBasicBlock* dom = immediateDominator();
   1393  if (dom != getPredecessor(0)) {
   1394    return nullptr;
   1395  }
   1396 
   1397  // Look for a trailing MTest branching to this block.
   1398  MInstruction* ins = dom->lastIns();
   1399  if (ins->isTest()) {
   1400    MTest* test = ins->toTest();
   1401 
   1402    MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
   1403    if (test->ifTrue() == this && test->ifFalse() == this) {
   1404      return nullptr;
   1405    }
   1406 
   1407    *pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
   1408    return test;
   1409  }
   1410 
   1411  return nullptr;
   1412 }
   1413 
   1414 void MBasicBlock::dumpStack(GenericPrinter& out) {
   1415 #ifdef DEBUG
   1416  out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
   1417  out.printf("-------------------------------------------\n");
   1418  for (uint32_t i = 0; i < stackPosition_; i++) {
   1419    out.printf(" %-3u", i);
   1420    out.printf(" %-16p\n", (void*)slots_[i]);
   1421  }
   1422 #endif
   1423 }
   1424 
   1425 void MBasicBlock::dumpStack() {
   1426  Fprinter out(stderr);
   1427  dumpStack(out);
   1428  out.finish();
   1429 }
   1430 
   1431 void MIRGraph::dump(GenericPrinter& out) {
   1432 #ifdef JS_JITSPEW
   1433  for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
   1434    iter->dump(out);
   1435    out.printf("\n");
   1436  }
   1437 #endif
   1438 }
   1439 
   1440 void MIRGraph::dump() {
   1441  Fprinter out(stderr);
   1442  dump(out);
   1443  out.finish();
   1444 }
   1445 
   1446 void MBasicBlock::dump(GenericPrinter& out) {
   1447 #ifdef JS_JITSPEW
   1448  out.printf("block%u:%s%s%s\n", id(), isLoopHeader() ? " (loop header)" : "",
   1449             unreachable() ? " (unreachable)" : "",
   1450             isMarked() ? " (marked)" : "");
   1451  if (MResumePoint* resume = entryResumePoint()) {
   1452    resume->dump(out);
   1453  }
   1454  for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++) {
   1455    iter->dump(out);
   1456  }
   1457  for (MInstructionIterator iter(begin()); iter != end(); iter++) {
   1458    iter->dump(out);
   1459  }
   1460 #endif
   1461 }
   1462 
   1463 void MBasicBlock::dump() {
   1464  Fprinter out(stderr);
   1465  dump(out);
   1466  out.finish();
   1467 }