tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-shared.cpp (9478B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/shared/Lowering-shared-inl.h"
      8 
      9 #include "jit/LIR.h"
     10 #include "jit/Lowering.h"
     11 #include "jit/MIR-wasm.h"
     12 #include "jit/MIR.h"
     13 #include "jit/ScalarTypeUtils.h"
     14 
     15 #include "vm/SymbolType.h"
     16 
     17 using namespace js;
     18 using namespace jit;
     19 
     20 using mozilla::Maybe;
     21 using mozilla::Nothing;
     22 using mozilla::Some;
     23 
     24 bool LIRGeneratorShared::ShouldReorderCommutative(MDefinition* lhs,
     25                                                  MDefinition* rhs,
     26                                                  MInstruction* ins) {
     27  // lhs and rhs are used by the commutative operator.
     28  MOZ_ASSERT(lhs->hasDefUses());
     29  MOZ_ASSERT(rhs->hasDefUses());
     30 
     31  // Ensure that if there is a constant, then it is in rhs.
     32  if (rhs->isConstant()) {
     33    return false;
     34  }
     35  if (lhs->isConstant()) {
     36    return true;
     37  }
     38 
     39  // Since clobbering binary operations clobber the left operand, prefer a
     40  // non-constant lhs operand with no further uses. To be fully precise, we
     41  // should check whether this is the *last* use, but checking hasOneDefUse()
     42  // is a decent approximation which doesn't require any extra analysis.
     43  bool rhsSingleUse = rhs->hasOneDefUse();
     44  bool lhsSingleUse = lhs->hasOneDefUse();
     45  if (rhsSingleUse) {
     46    if (!lhsSingleUse) {
     47      return true;
     48    }
     49  } else {
     50    if (lhsSingleUse) {
     51      return false;
     52    }
     53  }
     54 
     55  // If this is a reduction-style computation, such as
     56  //
     57  //   sum = 0;
     58  //   for (...)
     59  //      sum += ...;
     60  //
     61  // put the phi on the left to promote coalescing. This is fairly specific.
     62  if (rhsSingleUse && rhs->isPhi() && rhs->block()->isLoopHeader() &&
     63      ins == rhs->toPhi()->getLoopBackedgeOperand()) {
     64    return true;
     65  }
     66 
     67  return false;
     68 }
     69 
     70 void LIRGeneratorShared::ReorderCommutative(MDefinition** lhsp,
     71                                            MDefinition** rhsp,
     72                                            MInstruction* ins) {
     73  MDefinition* lhs = *lhsp;
     74  MDefinition* rhs = *rhsp;
     75 
     76  if (ShouldReorderCommutative(lhs, rhs, ins)) {
     77    *rhsp = lhs;
     78    *lhsp = rhs;
     79  }
     80 }
     81 
     82 void LIRGeneratorShared::definePhiOneRegister(MPhi* phi, size_t lirIndex) {
     83  LPhi* lir = current->getPhi(lirIndex);
     84 
     85  uint32_t vreg = getVirtualRegister();
     86 
     87  phi->setVirtualRegister(vreg);
     88  lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type())));
     89  annotate(lir);
     90 }
     91 
     92 #ifdef JS_NUNBOX32
     93 void LIRGeneratorShared::definePhiTwoRegisters(MPhi* phi, size_t lirIndex) {
     94  LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
     95  LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
     96 
     97  uint32_t typeVreg = getVirtualRegister();
     98  phi->setVirtualRegister(typeVreg);
     99 
    100  uint32_t payloadVreg = getVirtualRegister();
    101  MOZ_ASSERT_IF(!errored(), typeVreg + 1 == payloadVreg);
    102 
    103  type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
    104  payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
    105  annotate(type);
    106  annotate(payload);
    107 }
    108 #endif
    109 
    110 void LIRGeneratorShared::lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition,
    111                                            LBlock* block, size_t lirIndex) {
    112  MDefinition* operand = phi->getOperand(inputPosition);
    113  LPhi* lir = block->getPhi(lirIndex);
    114  lir->setOperand(inputPosition, LUse(operand->virtualRegister(), LUse::ANY));
    115 }
    116 
    117 LRecoverInfo* LIRGeneratorShared::getRecoverInfo(MResumePoint* rp) {
    118  if (cachedRecoverInfo_ && cachedRecoverInfo_->mir() == rp) {
    119    return cachedRecoverInfo_;
    120  }
    121 
    122  LRecoverInfo* recoverInfo = LRecoverInfo::New(gen, rp);
    123  if (!recoverInfo) {
    124    return nullptr;
    125  }
    126 
    127  cachedRecoverInfo_ = recoverInfo;
    128  return recoverInfo;
    129 }
    130 
    131 #ifdef DEBUG
    132 bool LRecoverInfo::OperandIter::canOptimizeOutIfUnused() {
    133  MDefinition* ins = **this;
    134 
    135  // We check ins->type() in addition to ins->isUnused() because
    136  // EliminateDeadResumePointOperands may replace nodes with the constant
    137  // MagicValue(JS_OPTIMIZED_OUT).
    138  if ((ins->isUnused() || ins->type() == MIRType::MagicOptimizedOut) &&
    139      (*it_)->isResumePoint()) {
    140    return !(*it_)->toResumePoint()->isObservableOperand(op_);
    141  }
    142 
    143  return true;
    144 }
    145 #endif
    146 
    147 LAllocation LIRGeneratorShared::useRegisterOrIndexConstant(MDefinition* mir,
    148                                                           Scalar::Type type) {
    149  if (CanUseInt32Constant(mir)) {
    150    MConstant* cst = mir->toConstant();
    151    int32_t val =
    152        cst->type() == MIRType::Int32 ? cst->toInt32() : cst->toIntPtr();
    153    int32_t offset;
    154    if (ArrayOffsetFitsInInt32(val, type, &offset)) {
    155      return LAllocation(mir->toConstant());
    156    }
    157  }
    158  return useRegister(mir);
    159 }
    160 
    161 #ifdef JS_NUNBOX32
    162 LSnapshot* LIRGeneratorShared::buildSnapshot(MResumePoint* rp,
    163                                             BailoutKind kind) {
    164  LRecoverInfo* recoverInfo = getRecoverInfo(rp);
    165  if (!recoverInfo) {
    166    return nullptr;
    167  }
    168 
    169  LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
    170  if (!snapshot) {
    171    return nullptr;
    172  }
    173 
    174  size_t index = 0;
    175  for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
    176    // Check that optimized out operands are in eliminable slots.
    177    MOZ_ASSERT(it.canOptimizeOutIfUnused());
    178 
    179    MDefinition* ins = *it;
    180 
    181    if (ins->isRecoveredOnBailout()) {
    182      continue;
    183    }
    184 
    185    LAllocation* type = snapshot->typeOfSlot(index);
    186    LAllocation* payload = snapshot->payloadOfSlot(index);
    187    ++index;
    188 
    189    if (ins->isBox()) {
    190      ins = ins->toBox()->getOperand(0);
    191    }
    192 
    193    // Guards should never be eliminated.
    194    MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());
    195 
    196    // Snapshot operands other than constants should never be
    197    // emitted-at-uses. Try-catch support depends on there being no
    198    // code between an instruction and the LOsiPoint that follows it.
    199    MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
    200 
    201    // The register allocation will fill these fields in with actual
    202    // register/stack assignments. During code generation, we can restore
    203    // interpreter state with the given information. Note that for
    204    // constants, including known types, we record a dummy placeholder,
    205    // since we can recover the same information, much cleaner, from MIR.
    206    if (ins->isConstant() || ins->isUnused()) {
    207      *type = LAllocation();
    208      *payload = LAllocation();
    209    } else if (ins->type() == MIRType::Int64) {
    210      LInt64Allocation alloc = useInt64(ins, LUse::KEEPALIVE);
    211      *type = *alloc.low().toUse();
    212      *payload = *alloc.high().toUse();
    213    } else if (ins->type() != MIRType::Value) {
    214      *type = LAllocation();
    215      *payload = use(ins, LUse(LUse::KEEPALIVE));
    216    } else {
    217      *type = useType(ins, LUse::KEEPALIVE);
    218      *payload = usePayload(ins, LUse::KEEPALIVE);
    219    }
    220  }
    221 
    222  return snapshot;
    223 }
    224 
    225 #elif JS_PUNBOX64
    226 
    227 LSnapshot* LIRGeneratorShared::buildSnapshot(MResumePoint* rp,
    228                                             BailoutKind kind) {
    229  LRecoverInfo* recoverInfo = getRecoverInfo(rp);
    230  if (!recoverInfo) {
    231    return nullptr;
    232  }
    233 
    234  LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
    235  if (!snapshot) {
    236    return nullptr;
    237  }
    238 
    239  size_t index = 0;
    240  for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
    241    // Check that optimized out operands are in eliminable slots.
    242    MOZ_ASSERT(it.canOptimizeOutIfUnused());
    243 
    244    MDefinition* def = *it;
    245 
    246    if (def->isRecoveredOnBailout()) {
    247      continue;
    248    }
    249 
    250    if (def->isBox()) {
    251      def = def->toBox()->getOperand(0);
    252    }
    253 
    254    // Guards should never be eliminated.
    255    MOZ_ASSERT_IF(def->isUnused(), !def->isGuard());
    256 
    257    // Snapshot operands other than constants should never be
    258    // emitted-at-uses. Try-catch support depends on there being no
    259    // code between an instruction and the LOsiPoint that follows it.
    260    MOZ_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
    261 
    262    LAllocation* a = snapshot->getEntry(index++);
    263 
    264    if (def->isUnused()) {
    265      *a = LAllocation();
    266      continue;
    267    }
    268 
    269    *a = useKeepaliveOrConstant(def);
    270  }
    271 
    272  return snapshot;
    273 }
    274 #endif
    275 
    276 void LIRGeneratorShared::assignSnapshot(LInstruction* ins, BailoutKind kind) {
    277  // assignSnapshot must be called before define/add, since
    278  // it may add new instructions for emitted-at-use operands.
    279  MOZ_ASSERT(ins->id() == 0);
    280  MOZ_ASSERT(kind != BailoutKind::Unknown);
    281 
    282  LSnapshot* snapshot = buildSnapshot(lastResumePoint_, kind);
    283  if (!snapshot) {
    284    abort(AbortReason::Alloc, "buildSnapshot failed");
    285    return;
    286  }
    287 
    288  ins->assignSnapshot(snapshot);
    289 }
    290 
    291 void LIRGeneratorShared::assignSafepoint(LInstruction* ins, MInstruction* mir,
    292                                         BailoutKind kind) {
    293  MOZ_ASSERT(!osiPoint_);
    294  MOZ_ASSERT(!ins->safepoint());
    295 
    296  ins->initSafepoint(alloc());
    297 
    298  MResumePoint* mrp =
    299      mir->resumePoint() ? mir->resumePoint() : lastResumePoint_;
    300  LSnapshot* postSnapshot = buildSnapshot(mrp, kind);
    301  if (!postSnapshot) {
    302    abort(AbortReason::Alloc, "buildSnapshot failed");
    303    return;
    304  }
    305 
    306  osiPoint_ = new (alloc()) LOsiPoint(ins->safepoint(), postSnapshot);
    307  lirGraph_.noteNeedsSafepoint(ins);
    308 }
    309 
    310 void LIRGeneratorShared::assignWasmSafepoint(LInstruction* ins) {
    311  MOZ_ASSERT(!osiPoint_);
    312  MOZ_ASSERT(!ins->safepoint());
    313 
    314  ins->initSafepoint(alloc());
    315  lirGraph_.noteNeedsSafepoint(ins);
    316 }