tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

RegisterAllocator.cpp (20822B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/RegisterAllocator.h"
      8 
      9 using namespace js;
     10 using namespace js::jit;
     11 
     12 #ifdef DEBUG
     13 bool AllocationIntegrityState::record() {
     14  // Ignore repeated record() calls.
     15  if (!instructions.empty()) {
     16    return true;
     17  }
     18 
     19  if (!instructions.appendN(InstructionInfo(), graph.numInstructions())) {
     20    return false;
     21  }
     22 
     23  if (!virtualRegisters.appendN((LDefinition*)nullptr,
     24                                graph.numVirtualRegisters())) {
     25    return false;
     26  }
     27 
     28  if (!blocks.reserve(graph.numBlocks())) {
     29    return false;
     30  }
     31  for (size_t i = 0; i < graph.numBlocks(); i++) {
     32    blocks.infallibleAppend(BlockInfo());
     33    LBlock* block = graph.getBlock(i);
     34    MOZ_ASSERT(block->mir()->id() == i);
     35 
     36    BlockInfo& blockInfo = blocks[i];
     37    if (!blockInfo.phis.reserve(block->numPhis())) {
     38      return false;
     39    }
     40 
     41    for (size_t j = 0; j < block->numPhis(); j++) {
     42      blockInfo.phis.infallibleAppend(InstructionInfo());
     43      InstructionInfo& info = blockInfo.phis[j];
     44      LPhi* phi = block->getPhi(j);
     45      MOZ_ASSERT(phi->numDefs() == 1);
     46      uint32_t vreg = phi->getDef(0)->virtualRegister();
     47      virtualRegisters[vreg] = phi->getDef(0);
     48      if (!info.outputs.append(*phi->getDef(0))) {
     49        return false;
     50      }
     51      for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
     52        if (!info.inputs.append(*phi->getOperand(k))) {
     53          return false;
     54        }
     55      }
     56    }
     57 
     58    for (LInstructionIterator iter = block->begin(); iter != block->end();
     59         iter++) {
     60      LInstruction* ins = *iter;
     61      InstructionInfo& info = instructions[ins->id()];
     62 
     63      for (size_t k = 0; k < ins->numTemps(); k++) {
     64        if (!ins->getTemp(k)->isBogusTemp()) {
     65          uint32_t vreg = ins->getTemp(k)->virtualRegister();
     66          virtualRegisters[vreg] = ins->getTemp(k);
     67        }
     68        if (!info.temps.append(*ins->getTemp(k))) {
     69          return false;
     70        }
     71      }
     72      for (size_t k = 0; k < ins->numDefs(); k++) {
     73        if (!ins->getDef(k)->isBogusTemp()) {
     74          uint32_t vreg = ins->getDef(k)->virtualRegister();
     75          virtualRegisters[vreg] = ins->getDef(k);
     76        }
     77        if (!info.outputs.append(*ins->getDef(k))) {
     78          return false;
     79        }
     80      }
     81      for (LInstruction::InputIter alloc(*ins); alloc.more(); alloc.next()) {
     82        if (!info.inputs.append(**alloc)) {
     83          return false;
     84        }
     85      }
     86    }
     87  }
     88 
     89  return true;
     90 }
     91 
     92 bool AllocationIntegrityState::check() {
     93  MOZ_ASSERT(!instructions.empty());
     94 
     95 #  ifdef JS_JITSPEW
     96  if (JitSpewEnabled(JitSpew_RegAlloc)) {
     97    dump();
     98  }
     99 #  endif
    100  for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
    101    LBlock* block = graph.getBlock(blockIndex);
    102 
    103    // Check that all instruction inputs and outputs have been assigned an
    104    // allocation.
    105    for (LInstructionIterator iter = block->begin(); iter != block->end();
    106         iter++) {
    107      LInstruction* ins = *iter;
    108 
    109      for (LInstruction::InputIter alloc(*ins); alloc.more(); alloc.next()) {
    110        MOZ_ASSERT(!alloc->isUse());
    111      }
    112 
    113      for (size_t i = 0; i < ins->numDefs(); i++) {
    114        LDefinition* def = ins->getDef(i);
    115        MOZ_ASSERT(!def->output()->isUse());
    116 
    117        LDefinition oldDef = instructions[ins->id()].outputs[i];
    118        MOZ_ASSERT_IF(
    119            oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
    120            *def->output() == *ins->getOperand(oldDef.getReusedInput()));
    121      }
    122 
    123      for (size_t i = 0; i < ins->numTemps(); i++) {
    124        LDefinition* temp = ins->getTemp(i);
    125        MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isAnyRegister());
    126 
    127        LDefinition oldTemp = instructions[ins->id()].temps[i];
    128        MOZ_ASSERT_IF(
    129            oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
    130            *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
    131      }
    132    }
    133  }
    134 
    135  // Check that the register assignment and move groups preserve the original
    136  // semantics of the virtual registers. Each virtual register has a single
    137  // write (owing to the SSA representation), but the allocation may move the
    138  // written value around between registers and memory locations along
    139  // different paths through the script.
    140  //
    141  // For each use of an allocation, follow the physical value which is read
    142  // backward through the script, along all paths to the value's virtual
    143  // register's definition.
    144  for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
    145    LBlock* block = graph.getBlock(blockIndex);
    146    for (LInstructionIterator iter = block->begin(); iter != block->end();
    147         iter++) {
    148      LInstruction* ins = *iter;
    149      const InstructionInfo& info = instructions[ins->id()];
    150 
    151      LSafepoint* safepoint = ins->safepoint();
    152      if (safepoint) {
    153        // Call instructions have empty liveRegs/clobberedRegs sets.
    154        MOZ_ASSERT_IF(ins->isCall(), safepoint->liveRegs().empty());
    155 #  ifdef CHECK_OSIPOINT_REGISTERS
    156        MOZ_ASSERT_IF(ins->isCall(), safepoint->clobberedRegs().empty());
    157 #  endif
    158 
    159        // Temps are included in safepoints.
    160        for (LInstruction::TempIter temp(ins); !temp.done(); temp++) {
    161          uint32_t vreg = info.temps[temp.index()].virtualRegister();
    162          LAllocation* alloc = temp->output();
    163          checkSafepointAllocation(ins, vreg, *alloc);
    164        }
    165      }
    166 
    167      size_t inputIndex = 0;
    168      for (LInstruction::InputIter alloc(*ins); alloc.more();
    169           inputIndex++, alloc.next()) {
    170        LAllocation oldInput = info.inputs[inputIndex];
    171        if (!oldInput.isUse()) {
    172          continue;
    173        }
    174 
    175        uint32_t vreg = oldInput.toUse()->virtualRegister();
    176 
    177        if (safepoint) {
    178          checkSafepointAllocation(ins, vreg, **alloc);
    179        }
    180 
    181        // Temps must never alias inputs (even at-start uses) unless explicitly
    182        // requested.
    183        for (LInstruction::TempIter temp(ins); !temp.done(); temp++) {
    184          LAllocation* tempAlloc = temp->output();
    185 
    186          // Fixed uses and fixed temps are allowed to alias.
    187          size_t i = temp.index();
    188          if (oldInput.toUse()->isFixedRegister() && info.temps[i].isFixed()) {
    189            continue;
    190          }
    191 
    192          // MUST_REUSE_INPUT temps will alias their input.
    193          if (info.temps[i].policy() == LDefinition::MUST_REUSE_INPUT &&
    194              info.temps[i].getReusedInput() == inputIndex) {
    195            continue;
    196          }
    197 
    198          MOZ_ASSERT(!tempAlloc->aliases(**alloc));
    199        }
    200 
    201        // Start checking at the previous instruction, in case this
    202        // instruction reuses its input register for an output.
    203        LInstructionReverseIterator riter = block->rbegin(ins);
    204        riter++;
    205        if (!checkIntegrity(block, *riter, vreg, **alloc)) {
    206          return false;
    207        }
    208 
    209        while (!worklist.empty()) {
    210          IntegrityItem item = worklist.popCopy();
    211          if (!checkIntegrity(item.block, *item.block->rbegin(), item.vreg,
    212                              item.alloc)) {
    213            return false;
    214          }
    215        }
    216      }
    217    }
    218  }
    219 
    220  return true;
    221 }
    222 
    223 bool AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
    224                                              uint32_t vreg,
    225                                              LAllocation alloc) {
    226  for (LInstructionReverseIterator iter(block->rbegin(ins));
    227       iter != block->rend(); iter++) {
    228    ins = *iter;
    229 
    230    // Follow values through assignments in move groups. All assignments in
    231    // a move group are considered to happen simultaneously, so stop after
    232    // the first matching move is found.
    233    if (ins->isMoveGroup()) {
    234      LMoveGroup* group = ins->toMoveGroup();
    235      for (int i = group->numMoves() - 1; i >= 0; i--) {
    236        if (group->getMove(i).to() == alloc) {
    237          alloc = group->getMove(i).from();
    238          break;
    239        }
    240      }
    241    }
    242 
    243    const InstructionInfo& info = instructions[ins->id()];
    244 
    245    // Make sure the physical location being tracked is not clobbered by
    246    // another instruction, and that if the originating vreg definition is
    247    // found that it is writing to the tracked location.
    248 
    249    for (LInstruction::OutputIter output(ins); !output.done(); output++) {
    250      LDefinition* def = *output;
    251      if (info.outputs[output.index()].virtualRegister() == vreg) {
    252 #  ifdef JS_JITSPEW
    253        // If the following assertion is about to fail, print some useful info.
    254        if (!(*def->output() == alloc) && JitSpewEnabled(JitSpew_RegAlloc)) {
    255          CodePosition inputPos(ins->id(), CodePosition::INPUT);
    256          CodePosition outputPos(ins->id(), CodePosition::OUTPUT);
    257          JitSpew(JitSpew_RegAlloc,
    258                  "Instruction at %u-%u, output number %u:", inputPos.bits(),
    259                  outputPos.bits(), unsigned(output.index()));
    260          JitSpew(JitSpew_RegAlloc,
    261                  "  Error: conflicting allocations: %s vs %s",
    262                  (*def->output()).toString().get(), alloc.toString().get());
    263        }
    264 #  endif
    265        MOZ_ASSERT(*def->output() == alloc);
    266 
    267        // Found the original definition, done scanning.
    268        return true;
    269      } else {
    270        MOZ_ASSERT(*def->output() != alloc);
    271      }
    272    }
    273 
    274    for (LInstruction::TempIter temp(ins); !temp.done(); temp++) {
    275      MOZ_ASSERT(*temp->output() != alloc);
    276    }
    277 
    278    if (ins->safepoint()) {
    279      checkSafepointAllocation(ins, vreg, alloc);
    280    }
    281  }
    282 
    283  // Phis are effectless, but change the vreg we are tracking. Check if there
    284  // is one which produced this vreg. We need to follow back through the phi
    285  // inputs as it is not guaranteed the register allocator filled in physical
    286  // allocations for the inputs and outputs of the phis.
    287  for (size_t i = 0; i < block->numPhis(); i++) {
    288    const InstructionInfo& info = blocks[block->mir()->id()].phis[i];
    289    LPhi* phi = block->getPhi(i);
    290    if (info.outputs[0].virtualRegister() == vreg) {
    291      for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
    292        uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
    293        LBlock* predecessor = block->mir()->getPredecessor(j)->lir();
    294        if (!addPredecessor(predecessor, newvreg, alloc)) {
    295          return false;
    296        }
    297      }
    298      return true;
    299    }
    300  }
    301 
    302  // No phi which defined the vreg we are tracking, follow back through all
    303  // predecessors with the existing vreg.
    304  for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
    305    LBlock* predecessor = block->mir()->getPredecessor(i)->lir();
    306    if (!addPredecessor(predecessor, vreg, alloc)) {
    307      return false;
    308    }
    309  }
    310 
    311  return true;
    312 }
    313 
    314 void AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
    315                                                        uint32_t vreg,
    316                                                        LAllocation alloc) {
    317  LSafepoint* safepoint = ins->safepoint();
    318  MOZ_ASSERT(safepoint);
    319 
    320  if (ins->isCall() && alloc.isAnyRegister()) {
    321    return;
    322  }
    323 
    324  if (alloc.isAnyRegister()) {
    325    MOZ_ASSERT(safepoint->liveRegs().has(alloc.toAnyRegister()));
    326  }
    327 
    328  LDefinition::Type type = virtualRegisters[vreg]
    329                               ? virtualRegisters[vreg]->type()
    330                               : LDefinition::GENERAL;
    331 
    332  switch (type) {
    333    case LDefinition::OBJECT:
    334      MOZ_ASSERT(safepoint->hasGcPointer(alloc));
    335      break;
    336    case LDefinition::STACKRESULTS:
    337      MOZ_ASSERT(safepoint->hasAllWasmAnyRefsFromStackArea(alloc));
    338      break;
    339    case LDefinition::SLOTS:
    340      MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
    341      break;
    342    case LDefinition::WASM_ANYREF:
    343      MOZ_ASSERT(safepoint->hasWasmAnyRef(alloc));
    344      break;
    345 #  ifdef JS_NUNBOX32
    346    case LDefinition::TYPE:
    347      MOZ_ASSERT(safepoint->hasNunboxPart(/* isType = */ true, alloc));
    348      break;
    349    case LDefinition::PAYLOAD:
    350      MOZ_ASSERT(safepoint->hasNunboxPart(/* isType = */ false, alloc));
    351      break;
    352 #  else
    353    case LDefinition::BOX:
    354      MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
    355      break;
    356 #  endif
    357    default:
    358      break;
    359  }
    360 }
    361 
    362 bool AllocationIntegrityState::addPredecessor(LBlock* block, uint32_t vreg,
    363                                              LAllocation alloc) {
    364  // There is no need to reanalyze if we have already seen this predecessor.
    365  // We share the seen allocations across analysis of each use, as there will
    366  // likely be common ground between different uses of the same vreg.
    367  IntegrityItem item;
    368  item.block = block;
    369  item.vreg = vreg;
    370  item.alloc = alloc;
    371  item.index = seen.count();
    372 
    373  IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
    374  if (p) {
    375    return true;
    376  }
    377  if (!seen.add(p, item)) {
    378    return false;
    379  }
    380 
    381  return worklist.append(item);
    382 }
    383 
    384 void AllocationIntegrityState::dump() {
    385 #  ifdef JS_JITSPEW
    386  JitSpewCont(JitSpew_RegAlloc, "\n");
    387  JitSpew(JitSpew_RegAlloc, "Register Allocation Integrity State:");
    388 
    389  for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
    390    LBlock* block = graph.getBlock(blockIndex);
    391    MBasicBlock* mir = block->mir();
    392 
    393    JitSpewHeader(JitSpew_RegAlloc);
    394    JitSpewCont(JitSpew_RegAlloc, "  Block %lu",
    395                static_cast<unsigned long>(blockIndex));
    396    for (size_t i = 0; i < mir->numSuccessors(); i++) {
    397      JitSpewCont(JitSpew_RegAlloc, " [successor %u]",
    398                  mir->getSuccessor(i)->id());
    399    }
    400    JitSpewCont(JitSpew_RegAlloc, "\n");
    401 
    402    for (size_t i = 0; i < block->numPhis(); i++) {
    403      const InstructionInfo& info = blocks[blockIndex].phis[i];
    404      LPhi* phi = block->getPhi(i);
    405      CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
    406      CodePosition output(block->getPhi(block->numPhis() - 1)->id(),
    407                          CodePosition::OUTPUT);
    408 
    409      JitSpewHeader(JitSpew_RegAlloc);
    410      JitSpewCont(JitSpew_RegAlloc, "    %u-%u Phi [def %s] ", input.bits(),
    411                  output.bits(), phi->getDef(0)->toString().get());
    412      for (size_t j = 0; j < phi->numOperands(); j++) {
    413        JitSpewCont(JitSpew_RegAlloc, " [use %s]",
    414                    info.inputs[j].toString().get());
    415      }
    416      JitSpewCont(JitSpew_RegAlloc, "\n");
    417    }
    418 
    419    for (LInstructionIterator iter = block->begin(); iter != block->end();
    420         iter++) {
    421      LInstruction* ins = *iter;
    422      const InstructionInfo& info = instructions[ins->id()];
    423 
    424      CodePosition input(ins->id(), CodePosition::INPUT);
    425      CodePosition output(ins->id(), CodePosition::OUTPUT);
    426 
    427      JitSpewHeader(JitSpew_RegAlloc);
    428      JitSpewCont(JitSpew_RegAlloc, "    ");
    429      if (input != CodePosition::MIN) {
    430        JitSpewCont(JitSpew_RegAlloc, "%u-%u ", input.bits(), output.bits());
    431      }
    432      JitSpewCont(JitSpew_RegAlloc, "%s", ins->opName());
    433 
    434      if (ins->isMoveGroup()) {
    435        LMoveGroup* group = ins->toMoveGroup();
    436        for (int i = group->numMoves() - 1; i >= 0; i--) {
    437          JitSpewCont(JitSpew_RegAlloc, " [%s <- %s]",
    438                      group->getMove(i).to().toString().get(),
    439                      group->getMove(i).from().toString().get());
    440        }
    441        JitSpewCont(JitSpew_RegAlloc, "\n");
    442        continue;
    443      }
    444 
    445      for (LInstruction::OutputIter output(ins); !output.done(); output++) {
    446        JitSpewCont(JitSpew_RegAlloc, " [def %s]", output->toString().get());
    447      }
    448      for (LInstruction::TempIter temp(ins); !temp.done(); temp++) {
    449        JitSpewCont(JitSpew_RegAlloc, " [temp v%u %s]",
    450                    info.temps[temp.index()].virtualRegister(),
    451                    temp->toString().get());
    452      }
    453 
    454      size_t index = 0;
    455      for (LInstruction::InputIter alloc(*ins); alloc.more(); alloc.next()) {
    456        JitSpewCont(JitSpew_RegAlloc, " [use %s",
    457                    info.inputs[index++].toString().get());
    458        if (!alloc->isConstant()) {
    459          JitSpewCont(JitSpew_RegAlloc, " %s", alloc->toString().get());
    460        }
    461        JitSpewCont(JitSpew_RegAlloc, "]");
    462      }
    463 
    464      JitSpewCont(JitSpew_RegAlloc, "\n");
    465    }
    466  }
    467 
    468  // Print discovered allocations at the ends of blocks, in the order they
    469  // were discovered.
    470 
    471  Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
    472  if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
    473    fprintf(stderr, "OOM while dumping allocations\n");
    474    return;
    475  }
    476 
    477  for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
    478    IntegrityItem item = iter.front();
    479    seenOrdered[item.index] = item;
    480  }
    481 
    482  if (!seenOrdered.empty()) {
    483    fprintf(stderr, "Intermediate Allocations:\n");
    484 
    485    for (size_t i = 0; i < seenOrdered.length(); i++) {
    486      IntegrityItem item = seenOrdered[i];
    487      fprintf(stderr, "  block %u reg v%u alloc %s\n", item.block->mir()->id(),
    488              item.vreg, item.alloc.toString().get());
    489    }
    490  }
    491 
    492  fprintf(stderr, "\n");
    493 #  endif
    494 }
    495 #endif  // DEBUG
    496 
    497 const CodePosition CodePosition::MAX(UINT_MAX);
    498 const CodePosition CodePosition::MIN(0);
    499 
    500 LMoveGroup* RegisterAllocator::getInputMoveGroup(LInstruction* ins) {
    501  MOZ_ASSERT(!ins->fixReuseMoves());
    502  if (ins->inputMoves()) {
    503    return ins->inputMoves();
    504  }
    505 
    506  LMoveGroup* moves = LMoveGroup::New(alloc());
    507  ins->setInputMoves(moves);
    508  ins->block()->insertBefore(ins, moves);
    509  return moves;
    510 }
    511 
    512 LMoveGroup* RegisterAllocator::getFixReuseMoveGroup(LInstruction* ins) {
    513  if (ins->fixReuseMoves()) {
    514    return ins->fixReuseMoves();
    515  }
    516 
    517  LMoveGroup* moves = LMoveGroup::New(alloc());
    518  ins->setFixReuseMoves(moves);
    519  ins->block()->insertBefore(ins, moves);
    520  return moves;
    521 }
    522 
    523 LMoveGroup* RegisterAllocator::getMoveGroupAfter(LInstruction* ins) {
    524  if (ins->movesAfter()) {
    525    return ins->movesAfter();
    526  }
    527 
    528  LMoveGroup* moves = LMoveGroup::New(alloc());
    529  ins->setMovesAfter(moves);
    530 
    531  ins->block()->insertAfter(ins, moves);
    532  return moves;
    533 }
    534 
    535 void RegisterAllocator::dumpInstructions(const char* who) {
    536 #ifdef JS_JITSPEW
    537  JitSpew(JitSpew_RegAlloc, "LIR instructions %s", who);
    538 
    539  for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
    540    LBlock* block = graph.getBlock(blockIndex);
    541    MBasicBlock* mir = block->mir();
    542 
    543    JitSpewHeader(JitSpew_RegAlloc);
    544    JitSpewCont(JitSpew_RegAlloc, "  Block %lu",
    545                static_cast<unsigned long>(blockIndex));
    546    for (size_t i = 0; i < mir->numSuccessors(); i++) {
    547      JitSpewCont(JitSpew_RegAlloc, " [successor %u]",
    548                  mir->getSuccessor(i)->id());
    549    }
    550    JitSpewCont(JitSpew_RegAlloc, "\n");
    551 
    552    for (size_t i = 0; i < block->numPhis(); i++) {
    553      LPhi* phi = block->getPhi(i);
    554 
    555      JitSpewHeader(JitSpew_RegAlloc);
    556      JitSpewCont(JitSpew_RegAlloc, "    %u-%u Phi [def %s]",
    557                  inputOf(phi).bits(), outputOf(phi).bits(),
    558                  phi->getDef(0)->toString().get());
    559      for (size_t j = 0; j < phi->numOperands(); j++) {
    560        JitSpewCont(JitSpew_RegAlloc, " [use %s]",
    561                    phi->getOperand(j)->toString().get());
    562      }
    563      JitSpewCont(JitSpew_RegAlloc, "\n");
    564    }
    565 
    566    for (LInstructionIterator iter = block->begin(); iter != block->end();
    567         iter++) {
    568      LInstruction* ins = *iter;
    569 
    570      JitSpewHeader(JitSpew_RegAlloc);
    571      JitSpewCont(JitSpew_RegAlloc, "    ");
    572      if (ins->id() != 0) {
    573        JitSpewCont(JitSpew_RegAlloc, "%u-%u ", inputOf(ins).bits(),
    574                    outputOf(ins).bits());
    575      }
    576      JitSpewCont(JitSpew_RegAlloc, "%s", ins->opName());
    577 
    578      if (ins->isMoveGroup()) {
    579        LMoveGroup* group = ins->toMoveGroup();
    580        for (int i = group->numMoves() - 1; i >= 0; i--) {
    581          // Use two printfs, as LAllocation::toString is not reentant.
    582          JitSpewCont(JitSpew_RegAlloc, " [%s",
    583                      group->getMove(i).to().toString().get());
    584          JitSpewCont(JitSpew_RegAlloc, " <- %s]",
    585                      group->getMove(i).from().toString().get());
    586        }
    587        JitSpewCont(JitSpew_RegAlloc, "\n");
    588        continue;
    589      }
    590 
    591      for (LInstruction::OutputIter output(ins); !output.done(); output++) {
    592        JitSpewCont(JitSpew_RegAlloc, " [def %s]", output->toString().get());
    593      }
    594 
    595      for (LInstruction::TempIter temp(ins); !temp.done(); temp++) {
    596        JitSpewCont(JitSpew_RegAlloc, " [temp %s]", temp->toString().get());
    597      }
    598 
    599      for (LInstruction::InputIter alloc(*ins); alloc.more(); alloc.next()) {
    600        if (!alloc->isBogus()) {
    601          JitSpewCont(JitSpew_RegAlloc, " [use %s]", alloc->toString().get());
    602        }
    603      }
    604 
    605      JitSpewCont(JitSpew_RegAlloc, "\n");
    606    }
    607  }
    608  JitSpewCont(JitSpew_RegAlloc, "\n");
    609 #endif  // JS_JITSPEW
    610 }