tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MoveResolver.cpp (14054B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/MoveResolver.h"
      8 
      9 #include "mozilla/ScopeExit.h"
     10 
     11 #include "jit/MacroAssembler.h"
     12 #include "jit/RegisterSets.h"
     13 
     14 using namespace js;
     15 using namespace js::jit;
     16 
     17 MoveOperand::MoveOperand(MacroAssembler& masm, const ABIArg& arg) : disp_(0) {
     18  switch (arg.kind()) {
     19    case ABIArg::GPR:
     20      kind_ = Kind::Reg;
     21      code_ = arg.gpr().code();
     22      break;
     23 #ifdef JS_CODEGEN_REGISTER_PAIR
     24    case ABIArg::GPR_PAIR:
     25      kind_ = Kind::RegPair;
     26      code_ = arg.evenGpr().code();
     27      MOZ_ASSERT(code_ % 2 == 0);
     28      MOZ_ASSERT(code_ + 1 == arg.oddGpr().code());
     29      break;
     30 #endif
     31    case ABIArg::FPU:
     32      kind_ = Kind::FloatReg;
     33      code_ = arg.fpu().code();
     34      break;
     35    case ABIArg::Stack:
     36      kind_ = Kind::Memory;
     37      if (IsHiddenSP(masm.getStackPointer())) {
     38        MOZ_CRASH(
     39            "Hidden SP cannot be represented as register code on this "
     40            "platform");
     41      } else {
     42        code_ = AsRegister(masm.getStackPointer()).code();
     43      }
     44      disp_ = arg.offsetFromArgBase();
     45      break;
     46    case ABIArg::Uninitialized:
     47      MOZ_CRASH("Uninitialized ABIArg kind");
     48  }
     49 }
     50 
     51 MoveResolver::MoveResolver() : numCycles_(0), curCycles_(0) {}
     52 
     53 void MoveResolver::resetState() {
     54  numCycles_ = 0;
     55  curCycles_ = 0;
     56 }
     57 
     58 bool MoveResolver::addMove(const MoveOperand& from, const MoveOperand& to,
     59                           MoveOp::Type type) {
     60  // Assert that we're not doing no-op moves.
     61  MOZ_ASSERT(!(from == to));
     62  PendingMove* pm = movePool_.allocate(from, to, type);
     63  if (!pm) {
     64    return false;
     65  }
     66  pending_.pushBack(pm);
     67  return true;
     68 }
     69 
     70 // Given move (A -> B), this function attempts to find any move (B -> *) in the
     71 // pending move list, and returns the first one.
     72 MoveResolver::PendingMove* MoveResolver::findBlockingMove(
     73    const PendingMove* last) {
     74  for (PendingMoveIterator iter = pending_.begin(); iter != pending_.end();
     75       iter++) {
     76    PendingMove* other = *iter;
     77 
     78    if (other->from().aliases(last->to())) {
     79      // We now have pairs in the form (A -> X) (X -> y). The second pair
     80      // blocks the move in the first pair, so return it.
     81      return other;
     82    }
     83  }
     84 
     85  // No blocking moves found.
     86  return nullptr;
     87 }
     88 
     89 // Given move (A -> B), this function attempts to find any move (B -> *) in the
     90 // move list iterator, and returns the first one.
     91 // N.B. It is unclear if a single move can complete more than one cycle, so to
     92 // be conservative, this function operates on iterators, so the caller can
     93 // process all instructions that start a cycle.
     94 MoveResolver::PendingMove* MoveResolver::findCycledMove(
     95    PendingMoveIterator* iter, PendingMoveIterator end,
     96    const PendingMove* last) {
     97  for (; *iter != end; (*iter)++) {
     98    PendingMove* other = **iter;
     99    if (other->from().aliases(last->to())) {
    100      // We now have pairs in the form (A -> X) (X -> y). The second pair
    101      // blocks the move in the first pair, so return it.
    102      (*iter)++;
    103      return other;
    104    }
    105  }
    106  // No blocking moves found.
    107  return nullptr;
    108 }
    109 
    110 #ifdef JS_CODEGEN_ARM
    111 static inline bool MoveIsDouble(const MoveOperand& move) {
    112  if (!move.isFloatReg()) {
    113    return false;
    114  }
    115  return move.floatReg().isDouble();
    116 }
    117 #endif
    118 
    119 #ifdef JS_CODEGEN_ARM
    120 static inline bool MoveIsSingle(const MoveOperand& move) {
    121  if (!move.isFloatReg()) {
    122    return false;
    123  }
    124  return move.floatReg().isSingle();
    125 }
    126 #endif
    127 
    128 #ifdef JS_CODEGEN_ARM
    129 bool MoveResolver::isDoubleAliasedAsSingle(const MoveOperand& move) {
    130  if (!MoveIsDouble(move)) {
    131    return false;
    132  }
    133 
    134  for (auto iter = pending_.begin(); iter != pending_.end(); ++iter) {
    135    PendingMove* other = *iter;
    136    if (other->from().aliases(move) && MoveIsSingle(other->from())) {
    137      return true;
    138    }
    139    if (other->to().aliases(move) && MoveIsSingle(other->to())) {
    140      return true;
    141    }
    142  }
    143  return false;
    144 }
    145 #endif
    146 
    147 #ifdef JS_CODEGEN_ARM
    148 static MoveOperand SplitIntoLowerHalf(const MoveOperand& move) {
    149  if (MoveIsDouble(move)) {
    150    FloatRegister lowerSingle = move.floatReg().asSingle();
    151    return MoveOperand(lowerSingle);
    152  }
    153 
    154  MOZ_ASSERT(move.isMemoryOrEffectiveAddress());
    155  return move;
    156 }
    157 #endif
    158 
    159 #ifdef JS_CODEGEN_ARM
    160 static MoveOperand SplitIntoUpperHalf(const MoveOperand& move) {
    161  if (MoveIsDouble(move)) {
    162    FloatRegister lowerSingle = move.floatReg().asSingle();
    163    FloatRegister upperSingle =
    164        VFPRegister(lowerSingle.code() + 1, VFPRegister::Single);
    165    return MoveOperand(upperSingle);
    166  }
    167 
    168  MOZ_ASSERT(move.isMemoryOrEffectiveAddress());
    169  return MoveOperand(move.base(), move.disp() + sizeof(float));
    170 }
    171 #endif
    172 
    173 // Resolves the pending_ list to a list in orderedMoves_.
    174 bool MoveResolver::resolve() {
    175  resetState();
    176  orderedMoves_.clear();
    177 
    178  // Upon return from this function, the pending_ list must be cleared.
    179  auto clearPending = mozilla::MakeScopeExit([this]() { pending_.clear(); });
    180 
    181 #ifdef JS_CODEGEN_ARM
    182  // Some of ARM's double registers alias two of its single registers,
    183  // but the algorithm below assumes that every register can participate
    184  // in at most one cycle. To satisfy the algorithm, any double registers
    185  // that may conflict are split into their single-register halves.
    186  //
    187  // This logic is only applicable because ARM only uses registers d0-d15,
    188  // all of which alias s0-s31. Double registers d16-d31 are unused.
    189  // Therefore there is never a double move that cannot be split.
    190  // If this changes in the future, the algorithm will have to be fixed.
    191 
    192  bool splitDoubles = false;
    193  for (auto iter = pending_.begin(); iter != pending_.end(); ++iter) {
    194    PendingMove* pm = *iter;
    195 
    196    if (isDoubleAliasedAsSingle(pm->from()) ||
    197        isDoubleAliasedAsSingle(pm->to())) {
    198      splitDoubles = true;
    199      break;
    200    }
    201  }
    202 
    203  if (splitDoubles) {
    204    for (auto iter = pending_.begin(); iter != pending_.end(); ++iter) {
    205      PendingMove* pm = *iter;
    206 
    207      if (!MoveIsDouble(pm->from()) && !MoveIsDouble(pm->to())) {
    208        continue;
    209      }
    210 
    211      MoveOperand fromLower = SplitIntoLowerHalf(pm->from());
    212      MoveOperand toLower = SplitIntoLowerHalf(pm->to());
    213 
    214      PendingMove* lower =
    215          movePool_.allocate(fromLower, toLower, MoveOp::FLOAT32);
    216      if (!lower) {
    217        return false;
    218      }
    219 
    220      // Insert the new node before the current position to not affect
    221      // iteration.
    222      pending_.insertBefore(pm, lower);
    223 
    224      // Overwrite pm in place for the upper move. Iteration proceeds as normal.
    225      MoveOperand fromUpper = SplitIntoUpperHalf(pm->from());
    226      MoveOperand toUpper = SplitIntoUpperHalf(pm->to());
    227      pm->overwrite(fromUpper, toUpper, MoveOp::FLOAT32);
    228    }
    229  }
    230 #endif
    231 
    232  InlineList<PendingMove> stack;
    233 
    234  // This is a depth-first-search without recursion, which tries to find
    235  // cycles in a list of moves.
    236  //
    237  // Algorithm.
    238  //
    239  // S = Traversal stack.
    240  // P = Pending move list.
    241  // O = Ordered list of moves.
    242  //
    243  // As long as there are pending moves in P:
    244  //      Let |root| be any pending move removed from P
    245  //      Add |root| to the traversal stack.
    246  //      As long as S is not empty:
    247  //          Let |L| be the most recent move added to S.
    248  //
    249  //          Find any pending move M whose source is L's destination, thus
    250  //          preventing L's move until M has completed.
    251  //
    252  //          If a move M was found,
    253  //              Remove M from the pending list.
    254  //              If M's destination is |root|,
    255  //                  Annotate M and |root| as cycles.
    256  //                  Add M to S.
    257  //                  do not Add M to O, since M may have other conflictors in P
    258  //                  that have not yet been processed.
    259  //              Otherwise,
    260  //                  Add M to S.
    261  //         Otherwise,
    262  //              Remove L from S.
    263  //              Add L to O.
    264  //
    265  while (!pending_.empty()) {
    266    PendingMove* pm = pending_.popBack();
    267 
    268    // Add this pending move to the cycle detection stack.
    269    stack.pushBack(pm);
    270 
    271    while (!stack.empty()) {
    272      PendingMove* blocking = findBlockingMove(stack.peekBack());
    273 
    274      if (blocking) {
    275        PendingMoveIterator stackiter = stack.begin();
    276        PendingMove* cycled = findCycledMove(&stackiter, stack.end(), blocking);
    277        if (cycled) {
    278          // Find the cycle's start.
    279          // We annotate cycles at each move in the cycle, and
    280          // assert that we do not find two cycles in one move chain
    281          // traversal (which would indicate two moves to the same
    282          // destination).
    283          // Since there can be more than one cycle, find them all.
    284          do {
    285            cycled->setCycleEnd(curCycles_);
    286            cycled = findCycledMove(&stackiter, stack.end(), blocking);
    287          } while (cycled);
    288 
    289          blocking->setCycleBegin(pm->type(), curCycles_);
    290          curCycles_++;
    291          pending_.remove(blocking);
    292          stack.pushBack(blocking);
    293        } else {
    294          // This is a new link in the move chain, so keep
    295          // searching for a cycle.
    296          pending_.remove(blocking);
    297          stack.pushBack(blocking);
    298        }
    299      } else {
    300        // Otherwise, pop the last move on the search stack because it's
    301        // complete and not participating in a cycle. The resulting
    302        // move can safely be added to the ordered move list.
    303        PendingMove* done = stack.popBack();
    304        if (!addOrderedMove(*done)) {
    305          return false;
    306        }
    307        movePool_.free(done);
    308      }
    309    }
    310    // If the current queue is empty, it is certain that there are
    311    // all previous cycles cannot conflict with future cycles,
    312    // so re-set the counter of pending cycles, while keeping a high-water mark.
    313    if (numCycles_ < curCycles_) {
    314      numCycles_ = curCycles_;
    315    }
    316    curCycles_ = 0;
    317  }
    318 
    319  return true;
    320 }
    321 
    322 bool MoveResolver::addOrderedMove(const MoveOp& move) {
    323  // Sometimes the register allocator generates move groups where multiple
    324  // moves have the same source. Try to optimize these cases when the source
    325  // is in memory and the target of one of the moves is in a register.
    326  MOZ_ASSERT(!move.from().aliases(move.to()));
    327 
    328  if (!move.from().isMemory() || move.isCycleBegin() || move.isCycleEnd()) {
    329    return orderedMoves_.append(move);
    330  }
    331 
    332  // Look for an earlier move with the same source, where no intervening move
    333  // touches either the source or destination of the new move.
    334  for (int i = orderedMoves_.length() - 1; i >= 0; i--) {
    335    const MoveOp& existing = orderedMoves_[i];
    336 
    337    if (existing.from() == move.from() && !existing.to().aliases(move.to()) &&
    338        existing.type() == move.type() && !existing.isCycleBegin() &&
    339        !existing.isCycleEnd()) {
    340      MoveOp* after = orderedMoves_.begin() + i + 1;
    341      if (existing.to().isGeneralReg() || existing.to().isFloatReg()) {
    342        MoveOp nmove(existing.to(), move.to(), move.type());
    343        return orderedMoves_.insert(after, nmove);
    344      } else if (move.to().isGeneralReg() || move.to().isFloatReg()) {
    345        MoveOp nmove(move.to(), existing.to(), move.type());
    346        orderedMoves_[i] = move;
    347        return orderedMoves_.insert(after, nmove);
    348      }
    349    }
    350 
    351    if (existing.aliases(move)) {
    352      break;
    353    }
    354  }
    355 
    356  return orderedMoves_.append(move);
    357 }
    358 
    359 void MoveResolver::reorderMove(size_t from, size_t to) {
    360  MOZ_ASSERT(from != to);
    361 
    362  MoveOp op = orderedMoves_[from];
    363  if (from < to) {
    364    for (size_t i = from; i < to; i++) {
    365      orderedMoves_[i] = orderedMoves_[i + 1];
    366    }
    367  } else {
    368    for (size_t i = from; i > to; i--) {
    369      orderedMoves_[i] = orderedMoves_[i - 1];
    370    }
    371  }
    372  orderedMoves_[to] = op;
    373 }
    374 
    375 void MoveResolver::sortMemoryToMemoryMoves() {
    376  // Try to reorder memory->memory moves so that they are executed right
    377  // before a move that clobbers some register. This will allow the move
    378  // emitter to use that clobbered register as a scratch register for the
    379  // memory->memory move, if necessary.
    380  for (size_t i = 0; i < orderedMoves_.length(); i++) {
    381    const MoveOp& base = orderedMoves_[i];
    382    if (!base.from().isMemory() || !base.to().isMemory()) {
    383      continue;
    384    }
    385    if (base.type() != MoveOp::GENERAL && base.type() != MoveOp::INT32) {
    386      continue;
    387    }
    388 
    389    // Look for an earlier move clobbering a register.
    390    bool found = false;
    391    for (int j = i - 1; j >= 0; j--) {
    392      const MoveOp& previous = orderedMoves_[j];
    393      if (previous.aliases(base) || previous.isCycleBegin() ||
    394          previous.isCycleEnd()) {
    395        break;
    396      }
    397 
    398      if (previous.to().isGeneralReg()) {
    399        reorderMove(i, j);
    400        found = true;
    401        break;
    402      }
    403    }
    404    if (found) {
    405      continue;
    406    }
    407 
    408    // Look for a later move clobbering a register.
    409    if (i + 1 < orderedMoves_.length()) {
    410      bool found = false, skippedRegisterUse = false;
    411      for (size_t j = i + 1; j < orderedMoves_.length(); j++) {
    412        const MoveOp& later = orderedMoves_[j];
    413        if (later.aliases(base) || later.isCycleBegin() || later.isCycleEnd()) {
    414          break;
    415        }
    416 
    417        if (later.to().isGeneralReg()) {
    418          if (skippedRegisterUse) {
    419            reorderMove(i, j);
    420            found = true;
    421          } else {
    422            // There is no move that uses a register between the
    423            // original memory->memory move and this move that
    424            // clobbers a register. The move should already be able
    425            // to use a scratch register, so don't shift anything
    426            // around.
    427          }
    428          break;
    429        }
    430 
    431        if (later.from().isGeneralReg()) {
    432          skippedRegisterUse = true;
    433        }
    434      }
    435 
    436      if (found) {
    437        // Redo the search for memory->memory moves at the current
    438        // index, so we don't skip the move just shifted back.
    439        i--;
    440      }
    441    }
    442  }
    443 }