tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator-shared.cpp (36777B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/shared/CodeGenerator-shared-inl.h"
      8 
      9 #include "mozilla/DebugOnly.h"
     10 
     11 #include <utility>
     12 
     13 #include "jit/CodeGenerator.h"
     14 #include "jit/CompactBuffer.h"
     15 #include "jit/CompileInfo.h"
     16 #include "jit/InlineScriptTree.h"
     17 #include "jit/JitcodeMap.h"
     18 #include "jit/JitFrames.h"
     19 #include "jit/JitSpewer.h"
     20 #include "jit/MacroAssembler.h"
     21 #include "jit/MIR-wasm.h"
     22 #include "jit/MIR.h"
     23 #include "jit/MIRGenerator.h"
     24 #include "jit/SafepointIndex.h"
     25 #include "js/Conversions.h"
     26 #include "util/Memory.h"
     27 
     28 #include "jit/MacroAssembler-inl.h"
     29 #include "vm/JSScript-inl.h"
     30 
     31 using namespace js;
     32 using namespace js::jit;
     33 
     34 using mozilla::BitwiseCast;
     35 using mozilla::DebugOnly;
     36 
     37 namespace js {
     38 namespace jit {
     39 
     40 MacroAssembler& CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg,
     41                                                TempAllocator& alloc,
     42                                                CompileRealm* realm) {
     43  if (masmArg) {
     44    return *masmArg;
     45  }
     46  maybeMasm_.emplace(alloc, realm);
     47  return *maybeMasm_;
     48 }
     49 
     50 CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph,
     51                                         MacroAssembler* masmArg,
     52                                         const wasm::CodeMetadata* wasmCodeMeta)
     53    : masm(ensureMasm(masmArg, gen->alloc(), gen->realm)),
     54      gen(gen),
     55      graph(*graph),
     56      wasmCodeMeta_(wasmCodeMeta),
     57      current(nullptr),
     58      recovers_(),
     59 #ifdef DEBUG
     60      pushedArgs_(0),
     61 #endif
     62      lastOsiPointOffset_(0),
     63      safepoints_(graph->localSlotsSize(),
     64                  (gen->outerInfo().nargs() + 1) * sizeof(Value)),
     65      returnLabel_(),
     66      inboundStackArgBytes_(0),
     67      safepointIndices_(gen->alloc()),
     68      nativeToBytecodeMap_(nullptr),
     69      nativeToBytecodeMapSize_(0),
     70      nativeToBytecodeTableOffset_(0),
     71 #ifdef CHECK_OSIPOINT_REGISTERS
     72      checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
     73 #endif
     74      frameDepth_(0) {
     75  if (gen->isProfilerInstrumentationEnabled()) {
     76    masm.enableProfilingInstrumentation();
     77  }
     78 
     79  if (gen->compilingWasm()) {
     80    offsetOfArgsFromFP_ = sizeof(wasm::Frame);
     81 
     82 #ifdef JS_CODEGEN_ARM64
     83    // Ensure SP is aligned to 16 bytes.
     84    frameDepth_ = AlignBytes(graph->localSlotsSize(), WasmStackAlignment);
     85 #else
     86    frameDepth_ = AlignBytes(graph->localSlotsSize(), sizeof(uintptr_t));
     87 #endif
     88 
     89 #ifdef ENABLE_WASM_SIMD
     90 #  if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
     91      defined(JS_CODEGEN_ARM64)
     92    // On X64/x86 and ARM64, we don't need alignment for Wasm SIMD at this time.
     93 #  else
     94 #    error \
     95        "we may need padding so that local slots are SIMD-aligned and the stack must be kept SIMD-aligned too."
     96 #  endif
     97 #endif
     98 
     99    if (gen->needsStaticStackAlignment()) {
    100      // Since wasm uses the system ABI which does not necessarily use a
    101      // regular array where all slots are sizeof(Value), it maintains the max
    102      // argument stack depth separately.
    103      MOZ_ASSERT(graph->argumentSlotCount() == 0);
    104 
    105      // An MWasmCall does not align the stack pointer at calls sites but
    106      // instead relies on the a priori stack adjustment. We need to insert
    107      // padding so that pushing the callee's frame maintains frame alignment.
    108      uint32_t calleeFramePadding = ComputeByteAlignment(
    109          sizeof(wasm::Frame) + frameDepth_, WasmStackAlignment);
    110 
    111      // Tail calls expect the size of stack arguments to be a multiple of
    112      // stack alignment when collapsing frames. This ensures that future tail
    113      // calls don't overwrite any locals.
    114      uint32_t stackArgsWithPadding =
    115          AlignBytes(gen->wasmMaxStackArgBytes(), WasmStackAlignment);
    116 
    117      // Add the callee frame padding and stack args to frameDepth.
    118      frameDepth_ += calleeFramePadding + stackArgsWithPadding;
    119    }
    120 
    121 #ifdef JS_CODEGEN_ARM64
    122    MOZ_ASSERT((frameDepth_ % WasmStackAlignment) == 0,
    123               "Trap exit stub needs 16-byte aligned stack pointer");
    124 #endif
    125  } else {
    126    offsetOfArgsFromFP_ = sizeof(JitFrameLayout);
    127 
    128    // Allocate space for local slots (register allocator spills). Round to
    129    // JitStackAlignment, and implicitly to sizeof(Value) as JitStackAlignment
    130    // is a multiple of sizeof(Value). This was originally implemented for
    131    // SIMD.js, but now lets us use faster ABI calls via setupAlignedABICall.
    132    frameDepth_ = AlignBytes(graph->localSlotsSize(), JitStackAlignment);
    133 
    134    // Allocate space for argument Values passed to callee functions.
    135    offsetOfPassedArgSlots_ = frameDepth_;
    136    MOZ_ASSERT((offsetOfPassedArgSlots_ % sizeof(JS::Value)) == 0);
    137    frameDepth_ += graph->argumentSlotCount() * sizeof(JS::Value);
    138 
    139    MOZ_ASSERT((frameDepth_ % JitStackAlignment) == 0);
    140  }
    141 }
    142 
    143 bool CodeGeneratorShared::generatePrologue() {
    144  MOZ_ASSERT(masm.framePushed() == 0);
    145  MOZ_ASSERT(!gen->compilingWasm());
    146 
    147 #ifdef JS_USE_LINK_REGISTER
    148  masm.pushReturnAddress();
    149 #endif
    150 
    151  // Frame prologue.
    152  masm.push(FramePointer);
    153  masm.moveStackPtrTo(FramePointer);
    154 
    155  // Ensure that the Ion frame is properly aligned.
    156  masm.assertStackAlignment(JitStackAlignment, 0);
    157 
    158  // If profiling, save the current frame pointer to a per-thread global field.
    159  if (isProfilerInstrumentationEnabled()) {
    160    masm.profilerEnterFrame(FramePointer, CallTempReg0);
    161  }
    162 
    163  // Note that this automatically sets MacroAssembler::framePushed().
    164  masm.reserveStack(frameSize());
    165  MOZ_ASSERT(masm.framePushed() == frameSize());
    166  masm.checkStackAlignment();
    167 
    168  return true;
    169 }
    170 
    171 bool CodeGeneratorShared::generateEpilogue() {
    172  MOZ_ASSERT(!gen->compilingWasm());
    173  masm.bind(&returnLabel_);
    174 
    175  // If profiling, jump to a trampoline to reset the JitActivation's
    176  // lastProfilingFrame to point to the previous frame and return to the caller.
    177  if (isProfilerInstrumentationEnabled()) {
    178    masm.profilerExitFrame();
    179  }
    180 
    181  MOZ_ASSERT(masm.framePushed() == frameSize());
    182  masm.moveToStackPtr(FramePointer);
    183  masm.pop(FramePointer);
    184  masm.setFramePushed(0);
    185 
    186  masm.ret();
    187 
    188  // On systems that use a constant pool, this is a good time to emit.
    189  masm.flushBuffer();
    190  return true;
    191 }
    192 
    193 bool CodeGeneratorShared::generateOutOfLineCode() {
    194  AutoCreatedBy acb(masm, "CodeGeneratorShared::generateOutOfLineCode");
    195 
    196  // OOL paths should not attempt to use |current| as it's the last block
    197  // instead of the block corresponding to the OOL path.
    198  current = nullptr;
    199 
    200  for (OutOfLineCode* ool : outOfLineCode_) {
    201    if (gen->shouldCancel("Generate Code (OOL code loop)")) {
    202      return false;
    203    }
    204 
    205    // Add native => bytecode mapping entries for OOL->sites.
    206    // Not enabled on wasm yet since it doesn't contain bytecode mappings.
    207    if (!gen->compilingWasm()) {
    208      if (!addNativeToBytecodeEntry(ool->bytecodeSite())) {
    209        return false;
    210      }
    211    }
    212 
    213    if (!gen->alloc().ensureBallast()) {
    214      return false;
    215    }
    216 
    217    JitSpew(JitSpew_Codegen, "# Emitting out of line code");
    218 
    219    masm.setFramePushed(ool->framePushed());
    220    ool->bind(&masm);
    221 
    222    ool->generate(this);
    223  }
    224 
    225  return !masm.oom();
    226 }
    227 
    228 void CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code,
    229                                           const MInstruction* mir) {
    230  MOZ_ASSERT(mir);
    231  addOutOfLineCode(code, mir->trackedSite());
    232 }
    233 
    234 void CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code,
    235                                           const BytecodeSite* site) {
    236  MOZ_ASSERT_IF(!gen->compilingWasm(), site->script()->containsPC(site->pc()));
    237  code->setFramePushed(masm.framePushed());
    238  code->setBytecodeSite(site);
    239  outOfLineCode_.pushBack(code);
    240 }
    241 
    242 bool CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site) {
    243  MOZ_ASSERT(site);
    244  MOZ_ASSERT(site->tree());
    245  MOZ_ASSERT(site->pc());
    246 
    247  // Skip the table entirely if profiling is not enabled.
    248  if (!isProfilerInstrumentationEnabled()) {
    249    return true;
    250  }
    251 
    252  // Fails early if the last added instruction caused the macro assembler to
    253  // run out of memory as continuity assumption below do not hold.
    254  if (masm.oom()) {
    255    return false;
    256  }
    257 
    258  InlineScriptTree* tree = site->tree();
    259  jsbytecode* pc = site->pc();
    260  uint32_t nativeOffset = masm.currentOffset();
    261 
    262  MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
    263 
    264  if (!nativeToBytecodeList_.empty()) {
    265    size_t lastIdx = nativeToBytecodeList_.length() - 1;
    266    NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
    267 
    268    MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
    269 
    270    // If the new entry is for the same inlineScriptTree and same
    271    // bytecodeOffset, but the nativeOffset has changed, do nothing.
    272    // The same site just generated some more code.
    273    if (lastEntry.tree == tree && lastEntry.pc == pc) {
    274      JitSpew(JitSpew_Profiling, " => In-place update [%zu-%" PRIu32 "]",
    275              lastEntry.nativeOffset.offset(), nativeOffset);
    276      return true;
    277    }
    278 
    279    // If the new entry is for the same native offset, then update the
    280    // previous entry with the new bytecode site, since the previous
    281    // bytecode site did not generate any native code.
    282    if (lastEntry.nativeOffset.offset() == nativeOffset) {
    283      lastEntry.tree = tree;
    284      lastEntry.pc = pc;
    285      JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
    286 
    287      // This overwrite might have made the entry merge-able with a
    288      // previous one.  If so, merge it.
    289      if (lastIdx > 0) {
    290        NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
    291        if (nextToLastEntry.tree == lastEntry.tree &&
    292            nextToLastEntry.pc == lastEntry.pc) {
    293          JitSpew(JitSpew_Profiling, " => Merging with previous region");
    294          nativeToBytecodeList_.erase(&lastEntry);
    295        }
    296      }
    297 
    298      dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
    299      return true;
    300    }
    301  }
    302 
    303  // Otherwise, some native code was generated for the previous bytecode site.
    304  // Add a new entry for code that is about to be generated.
    305  NativeToBytecode entry;
    306  entry.nativeOffset = CodeOffset(nativeOffset);
    307  entry.tree = tree;
    308  entry.pc = pc;
    309  if (!nativeToBytecodeList_.append(entry)) {
    310    return false;
    311  }
    312 
    313  JitSpew(JitSpew_Profiling, " => Push new entry.");
    314  dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
    315  return true;
    316 }
    317 
    318 void CodeGeneratorShared::dumpNativeToBytecodeEntries() {
    319 #ifdef JS_JITSPEW
    320  InlineScriptTree* topTree = gen->outerInfo().inlineScriptTree();
    321  JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%u:%u\n",
    322               topTree->script()->filename(), topTree->script()->lineno(),
    323               topTree->script()->column().oneOriginValue());
    324  for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++) {
    325    dumpNativeToBytecodeEntry(i);
    326  }
    327 #endif
    328 }
    329 
    330 void CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx) {
    331 #ifdef JS_JITSPEW
    332  NativeToBytecode& ref = nativeToBytecodeList_[idx];
    333  InlineScriptTree* tree = ref.tree;
    334  JSScript* script = tree->script();
    335  uint32_t nativeOffset = ref.nativeOffset.offset();
    336  unsigned nativeDelta = 0;
    337  unsigned pcDelta = 0;
    338  if (idx + 1 < nativeToBytecodeList_.length()) {
    339    NativeToBytecode* nextRef = &ref + 1;
    340    nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
    341    if (nextRef->tree == ref.tree) {
    342      pcDelta = nextRef->pc - ref.pc;
    343    }
    344  }
    345  JitSpewStart(
    346      JitSpew_Profiling, "    %08zx [+%-6u] => %-6ld [%-4u] {%-10s} (%s:%u:%u",
    347      ref.nativeOffset.offset(), nativeDelta, (long)(ref.pc - script->code()),
    348      pcDelta, CodeName(JSOp(*ref.pc)), script->filename(), script->lineno(),
    349      script->column().oneOriginValue());
    350 
    351  for (tree = tree->caller(); tree; tree = tree->caller()) {
    352    JitSpewCont(JitSpew_Profiling, " <= %s:%u:%u", tree->script()->filename(),
    353                tree->script()->lineno(),
    354                tree->script()->column().oneOriginValue());
    355  }
    356  JitSpewCont(JitSpew_Profiling, ")");
    357  JitSpewFin(JitSpew_Profiling);
    358 #endif
    359 }
    360 
    361 // see OffsetOfFrameSlot
    362 static inline int32_t ToStackIndex(LAllocation* a) {
    363  if (a->isStackSlot()) {
    364    MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
    365    return a->toStackSlot()->slot();
    366  }
    367  return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
    368 }
    369 
    370 void CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot,
    371                                           MDefinition* mir,
    372                                           uint32_t* allocIndex,
    373                                           bool hasSideEffects) {
    374  if (mir->isBox()) {
    375    mir = mir->toBox()->getOperand(0);
    376  }
    377 
    378  MIRType type = mir->isRecoveredOnBailout() ? MIRType::None
    379                 : mir->isUnused()           ? MIRType::MagicOptimizedOut
    380                                             : mir->type();
    381 
    382  RValueAllocation alloc;
    383 
    384  switch (type) {
    385    case MIRType::None: {
    386      MOZ_ASSERT(mir->isRecoveredOnBailout());
    387      uint32_t index = 0;
    388      LRecoverInfo* recoverInfo = snapshot->recoverInfo();
    389      MNode** it = recoverInfo->begin();
    390      MNode** end = recoverInfo->end();
    391      while (it != end && mir != *it) {
    392        ++it;
    393        ++index;
    394      }
    395 
    396      // This MDefinition is recovered, thus it should be listed in the
    397      // LRecoverInfo.
    398      MOZ_ASSERT(it != end && mir == *it);
    399 
    400      // Lambda should have a default value readable for iterating over the
    401      // inner frames.
    402      MConstant* functionOperand = nullptr;
    403      if (mir->isLambda()) {
    404        functionOperand = mir->toLambda()->functionOperand();
    405      } else if (mir->isFunctionWithProto()) {
    406        functionOperand = mir->toFunctionWithProto()->functionOperand();
    407      }
    408      if (functionOperand) {
    409        uint32_t cstIndex;
    410        masm.propagateOOM(
    411            graph.addConstantToPool(functionOperand->toJSValue(), &cstIndex));
    412        alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
    413        break;
    414      }
    415 
    416      alloc = RValueAllocation::RecoverInstruction(index);
    417      break;
    418    }
    419    case MIRType::Undefined:
    420      alloc = RValueAllocation::Undefined();
    421      break;
    422    case MIRType::Null:
    423      alloc = RValueAllocation::Null();
    424      break;
    425    case MIRType::Int32:
    426    case MIRType::String:
    427    case MIRType::Symbol:
    428    case MIRType::BigInt:
    429    case MIRType::Object:
    430    case MIRType::Shape:
    431    case MIRType::Boolean:
    432    case MIRType::Double: {
    433      LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
    434      if (payload->isConstant()) {
    435        MConstant* constant = mir->toConstant();
    436        uint32_t index;
    437        masm.propagateOOM(
    438            graph.addConstantToPool(constant->toJSValue(), &index));
    439        alloc = RValueAllocation::ConstantPool(index);
    440        break;
    441      }
    442 
    443      JSValueType valueType = ValueTypeFromMIRType(type);
    444 
    445      MOZ_DIAGNOSTIC_ASSERT(payload->isMemory() || payload->isAnyRegister());
    446      if (payload->isMemory()) {
    447        MOZ_ASSERT_IF(payload->isStackSlot(),
    448                      payload->toStackSlot()->width() ==
    449                          LStackSlot::width(LDefinition::TypeFrom(type)));
    450        alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
    451      } else if (payload->isGeneralReg()) {
    452        alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
    453      } else if (payload->isFloatReg()) {
    454        alloc = RValueAllocation::Double(ToFloatRegister(payload));
    455      } else {
    456        MOZ_CRASH("Unexpected payload type.");
    457      }
    458      break;
    459    }
    460    case MIRType::Float32: {
    461      LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
    462      if (payload->isConstant()) {
    463        MConstant* constant = mir->toConstant();
    464        uint32_t index;
    465        masm.propagateOOM(
    466            graph.addConstantToPool(constant->toJSValue(), &index));
    467        alloc = RValueAllocation::ConstantPool(index);
    468        break;
    469      }
    470 
    471      if (payload->isFloatReg()) {
    472        alloc = RValueAllocation::Float32(ToFloatRegister(payload));
    473      } else if (payload->isMemory()) {
    474        MOZ_ASSERT_IF(payload->isStackSlot(),
    475                      payload->toStackSlot()->width() ==
    476                          LStackSlot::width(LDefinition::TypeFrom(type)));
    477        alloc = RValueAllocation::Float32(ToStackIndex(payload));
    478      } else {
    479        MOZ_CRASH("Unexpected payload type.");
    480      }
    481      break;
    482    }
    483    case MIRType::IntPtr: {
    484      LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
    485      if (payload->isConstant()) {
    486        intptr_t constant = mir->toConstant()->toIntPtr();
    487 #if !defined(JS_64BIT)
    488        uint32_t index;
    489        masm.propagateOOM(
    490            graph.addConstantToPool(Int32Value(constant), &index));
    491 
    492        alloc = RValueAllocation::IntPtrConstant(index);
    493 #else
    494        uint32_t lowIndex;
    495        masm.propagateOOM(
    496            graph.addConstantToPool(Int32Value(constant), &lowIndex));
    497 
    498        uint32_t highIndex;
    499        masm.propagateOOM(
    500            graph.addConstantToPool(Int32Value(constant >> 32), &highIndex));
    501 
    502        alloc = RValueAllocation::IntPtrConstant(lowIndex, highIndex);
    503 #endif
    504        break;
    505      }
    506 
    507      if (payload->isGeneralReg()) {
    508        alloc = RValueAllocation::IntPtr(ToRegister(payload));
    509      } else if (payload->isStackSlot()) {
    510        LStackSlot::Width width = payload->toStackSlot()->width();
    511        MOZ_ASSERT(width == LStackSlot::width(LDefinition::GENERAL) ||
    512                   width == LStackSlot::width(LDefinition::INT32));
    513 
    514        if (width == LStackSlot::width(LDefinition::GENERAL)) {
    515          alloc = RValueAllocation::IntPtr(ToStackIndex(payload));
    516        } else {
    517          alloc = RValueAllocation::IntPtrInt32(ToStackIndex(payload));
    518        }
    519      } else {
    520        MOZ_CRASH("Unexpected payload type.");
    521      }
    522      break;
    523    }
    524    case MIRType::MagicOptimizedOut:
    525    case MIRType::MagicUninitializedLexical:
    526    case MIRType::MagicIsConstructing: {
    527      uint32_t index;
    528      JSWhyMagic why = JS_GENERIC_MAGIC;
    529      switch (type) {
    530        case MIRType::MagicOptimizedOut:
    531          why = JS_OPTIMIZED_OUT;
    532          break;
    533        case MIRType::MagicUninitializedLexical:
    534          why = JS_UNINITIALIZED_LEXICAL;
    535          break;
    536        case MIRType::MagicIsConstructing:
    537          why = JS_IS_CONSTRUCTING;
    538          break;
    539        default:
    540          MOZ_CRASH("Invalid Magic MIRType");
    541      }
    542 
    543      Value v = MagicValue(why);
    544      masm.propagateOOM(graph.addConstantToPool(v, &index));
    545      alloc = RValueAllocation::ConstantPool(index);
    546      break;
    547    }
    548    case MIRType::Int64: {
    549      LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
    550      if (payload->isConstant()) {
    551        int64_t constant = mir->toConstant()->toInt64();
    552 
    553        uint32_t lowIndex;
    554        masm.propagateOOM(
    555            graph.addConstantToPool(Int32Value(constant), &lowIndex));
    556 
    557        uint32_t highIndex;
    558        masm.propagateOOM(
    559            graph.addConstantToPool(Int32Value(constant >> 32), &highIndex));
    560 
    561        alloc = RValueAllocation::Int64Constant(lowIndex, highIndex);
    562        break;
    563      }
    564 
    565 #ifdef JS_NUNBOX32
    566      LAllocation* type = snapshot->typeOfSlot(*allocIndex);
    567 
    568      MOZ_ASSERT_IF(payload->isStackSlot(),
    569                    payload->toStackSlot()->width() ==
    570                        LStackSlot::width(LDefinition::GENERAL));
    571      MOZ_ASSERT_IF(type->isStackSlot(),
    572                    type->toStackSlot()->width() ==
    573                        LStackSlot::width(LDefinition::GENERAL));
    574 
    575      if (payload->isGeneralReg()) {
    576        if (type->isGeneralReg()) {
    577          alloc =
    578              RValueAllocation::Int64(ToRegister(type), ToRegister(payload));
    579        } else if (type->isStackSlot()) {
    580          alloc =
    581              RValueAllocation::Int64(ToStackIndex(type), ToRegister(payload));
    582        } else {
    583          MOZ_CRASH("Unexpected payload type.");
    584        }
    585      } else if (payload->isStackSlot()) {
    586        if (type->isGeneralReg()) {
    587          alloc =
    588              RValueAllocation::Int64(ToRegister(type), ToStackIndex(payload));
    589        } else if (type->isStackSlot()) {
    590          alloc = RValueAllocation::Int64(ToStackIndex(type),
    591                                          ToStackIndex(payload));
    592        } else {
    593          MOZ_CRASH("Unexpected payload type.");
    594        }
    595      } else {
    596        MOZ_CRASH("Unexpected payload type.");
    597      }
    598 #elif JS_PUNBOX64
    599      if (payload->isGeneralReg()) {
    600        alloc = RValueAllocation::Int64(ToRegister(payload));
    601      } else if (payload->isStackSlot()) {
    602        MOZ_ASSERT(payload->toStackSlot()->width() ==
    603                   LStackSlot::width(LDefinition::GENERAL));
    604        alloc = RValueAllocation::Int64(ToStackIndex(payload));
    605      } else {
    606        MOZ_CRASH("Unexpected payload type.");
    607      }
    608 #endif
    609      break;
    610    }
    611    case MIRType::Value: {
    612      LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
    613 #ifdef JS_NUNBOX32
    614      LAllocation* type = snapshot->typeOfSlot(*allocIndex);
    615      if (type->isGeneralReg()) {
    616        if (payload->isGeneralReg()) {
    617          alloc =
    618              RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
    619        } else {
    620          alloc = RValueAllocation::Untyped(ToRegister(type),
    621                                            ToStackIndex(payload));
    622        }
    623      } else {
    624        if (payload->isGeneralReg()) {
    625          alloc = RValueAllocation::Untyped(ToStackIndex(type),
    626                                            ToRegister(payload));
    627        } else {
    628          alloc = RValueAllocation::Untyped(ToStackIndex(type),
    629                                            ToStackIndex(payload));
    630        }
    631      }
    632 #elif JS_PUNBOX64
    633      if (payload->isGeneralReg()) {
    634        alloc = RValueAllocation::Untyped(ToRegister(payload));
    635      } else {
    636        alloc = RValueAllocation::Untyped(ToStackIndex(payload));
    637      }
    638 #endif
    639      break;
    640    }
    641    default:
    642      MOZ_CRASH("Unexpected MIR type");
    643  }
    644  MOZ_DIAGNOSTIC_ASSERT(alloc.valid());
    645 
    646  // This set an extra bit as part of the RValueAllocation, such that we know
    647  // that recover instruction have to be executed without wrapping the
    648  // instruction in a no-op recover instruction.
    649  //
    650  // If the instruction claims to have side-effect but none are registered in
    651  // the list of recover instructions, then omit the annotation of the
    652  // RValueAllocation as requiring the execution of these side effects before
    653  // being readable.
    654  if (mir->isIncompleteObject() && hasSideEffects) {
    655    alloc.setNeedSideEffect();
    656  }
    657 
    658  masm.propagateOOM(snapshots_.add(alloc));
    659 
    660  *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
    661 }
    662 
    663 void CodeGeneratorShared::encode(LRecoverInfo* recover) {
    664  if (recover->recoverOffset() != INVALID_RECOVER_OFFSET) {
    665    return;
    666  }
    667 
    668  uint32_t numInstructions = recover->numInstructions();
    669  JitSpew(JitSpew_IonSnapshots,
    670          "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
    671          (void*)recover, recover->mir()->frameCount(), numInstructions);
    672 
    673  RecoverOffset offset = recovers_.startRecover(numInstructions);
    674 
    675  for (MNode* insn : *recover) {
    676    recovers_.writeInstruction(insn);
    677  }
    678 
    679  recovers_.endRecover();
    680  recover->setRecoverOffset(offset);
    681  masm.propagateOOM(!recovers_.oom());
    682 }
    683 
    684 void CodeGeneratorShared::encode(LSnapshot* snapshot) {
    685  if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET) {
    686    return;
    687  }
    688 
    689  LRecoverInfo* recoverInfo = snapshot->recoverInfo();
    690  encode(recoverInfo);
    691 
    692  RecoverOffset recoverOffset = recoverInfo->recoverOffset();
    693  MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
    694 
    695  JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
    696          (void*)snapshot, (void*)recoverInfo);
    697 
    698  SnapshotOffset offset =
    699      snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
    700 
    701 #ifdef TRACK_SNAPSHOTS
    702  uint32_t pcOpcode = 0;
    703  uint32_t lirOpcode = 0;
    704  uint32_t lirId = 0;
    705  uint32_t mirOpcode = 0;
    706  uint32_t mirId = 0;
    707 
    708  if (LInstruction* ins = instruction()) {
    709    lirOpcode = uint32_t(ins->op());
    710    lirId = ins->id();
    711    if (MDefinition* mir = ins->mirRaw()) {
    712      mirOpcode = uint32_t(mir->op());
    713      mirId = mir->id();
    714      if (jsbytecode* pc = mir->trackedSite()->pc()) {
    715        pcOpcode = *pc;
    716      }
    717    }
    718  }
    719  snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
    720 #endif
    721 
    722  bool hasSideEffects = recoverInfo->hasSideEffects();
    723  uint32_t allocIndex = 0;
    724  for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
    725    DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
    726    encodeAllocation(snapshot, *it, &allocIndex, hasSideEffects);
    727    MOZ_ASSERT_IF(!snapshots_.oom(),
    728                  allocWritten + 1 == snapshots_.allocWritten());
    729  }
    730 
    731  MOZ_ASSERT(allocIndex == snapshot->numSlots());
    732  snapshots_.endSnapshot();
    733  snapshot->setSnapshotOffset(offset);
    734  masm.propagateOOM(!snapshots_.oom());
    735 }
    736 
    737 bool CodeGeneratorShared::encodeSafepoints() {
    738  for (CodegenSafepointIndex& index : safepointIndices_) {
    739    LSafepoint* safepoint = index.safepoint();
    740 
    741    if (!safepoint->encoded()) {
    742      safepoints_.encode(safepoint);
    743    }
    744  }
    745 
    746  return !safepoints_.oom();
    747 }
    748 
    749 bool CodeGeneratorShared::createNativeToBytecodeScriptList(
    750    JSContext* cx, IonEntry::ScriptList& scripts) {
    751  MOZ_ASSERT(scripts.empty());
    752 
    753  InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
    754  for (;;) {
    755    // Add script from current tree.
    756    bool found = false;
    757    for (uint32_t i = 0; i < scripts.length(); i++) {
    758      if (scripts[i].sourceAndExtent.matches(tree->script())) {
    759        found = true;
    760        break;
    761      }
    762    }
    763    if (!found) {
    764      UniqueChars str =
    765          GeckoProfilerRuntime::allocProfileString(cx, tree->script());
    766      if (!str) {
    767        return false;
    768      }
    769      if (!scripts.emplaceBack(tree->script(), std::move(str))) {
    770        return false;
    771      }
    772    }
    773 
    774    // Process rest of tree
    775 
    776    // If children exist, emit children.
    777    if (tree->hasChildren()) {
    778      tree = tree->firstChild();
    779      continue;
    780    }
    781 
    782    // Otherwise, find the first tree up the chain (including this one)
    783    // that contains a next sibling.
    784    while (!tree->hasNextCallee() && tree->hasCaller()) {
    785      tree = tree->caller();
    786    }
    787 
    788    // If we found a sibling, use it.
    789    if (tree->hasNextCallee()) {
    790      tree = tree->nextCallee();
    791      continue;
    792    }
    793 
    794    // Otherwise, we must have reached the top without finding any siblings.
    795    MOZ_ASSERT(tree->isOutermostCaller());
    796    break;
    797  }
    798 
    799  return true;
    800 }
    801 
    802 bool CodeGeneratorShared::generateCompactNativeToBytecodeMap(
    803    JSContext* cx, JitCode* code, IonEntry::ScriptList& scripts) {
    804  MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
    805  MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
    806  MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
    807 
    808  if (!createNativeToBytecodeScriptList(cx, scripts)) {
    809    return false;
    810  }
    811 
    812  CompactBufferWriter writer;
    813  uint32_t tableOffset = 0;
    814  uint32_t numRegions = 0;
    815 
    816  if (!JitcodeIonTable::WriteIonTable(
    817          writer, scripts, &nativeToBytecodeList_[0],
    818          &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
    819          &tableOffset, &numRegions)) {
    820    return false;
    821  }
    822 
    823  MOZ_ASSERT(tableOffset > 0);
    824  MOZ_ASSERT(numRegions > 0);
    825 
    826  // Writer is done, copy it to sized buffer.
    827  uint8_t* data = cx->pod_malloc<uint8_t>(writer.length());
    828  if (!data) {
    829    return false;
    830  }
    831 
    832  memcpy(data, writer.buffer(), writer.length());
    833  nativeToBytecodeMap_.reset(data);
    834  nativeToBytecodeMapSize_ = writer.length();
    835  nativeToBytecodeTableOffset_ = tableOffset;
    836 
    837  verifyCompactNativeToBytecodeMap(code, scripts, numRegions);
    838 
    839  JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]", data,
    840          data + nativeToBytecodeMapSize_);
    841 
    842  return true;
    843 }
    844 
    845 void CodeGeneratorShared::verifyCompactNativeToBytecodeMap(
    846    JitCode* code, const IonEntry::ScriptList& scripts, uint32_t numRegions) {
    847 #ifdef DEBUG
    848  MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
    849  MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
    850  MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
    851  MOZ_ASSERT(numRegions > 0);
    852 
    853  // The pointer to the table must be 4-byte aligned
    854  const uint8_t* tablePtr =
    855      nativeToBytecodeMap_.get() + nativeToBytecodeTableOffset_;
    856  MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
    857 
    858  // Verify that numRegions was encoded correctly.
    859  const JitcodeIonTable* ionTable =
    860      reinterpret_cast<const JitcodeIonTable*>(tablePtr);
    861  MOZ_ASSERT(ionTable->numRegions() == numRegions);
    862 
    863  // Region offset for first region should be at the start of the payload
    864  // region. Since the offsets are backward from the start of the table, the
    865  // first entry backoffset should be equal to the forward table offset from the
    866  // start of the allocated data.
    867  MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
    868 
    869  // Verify each region.
    870  for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
    871    // Back-offset must point into the payload region preceding the table, not
    872    // before it.
    873    MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
    874 
    875    // Back-offset must point to a later area in the payload region than
    876    // previous back-offset.  This means that back-offsets decrease
    877    // monotonically.
    878    MOZ_ASSERT_IF(i > 0,
    879                  ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
    880 
    881    JitcodeRegionEntry entry = ionTable->regionEntry(i);
    882 
    883    // Ensure native code offset for region falls within jitcode.
    884    MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
    885 
    886    // Obtain the original nativeOffset.
    887    uint32_t curNativeOffset = entry.nativeOffset();
    888 
    889    // Read out nativeDeltas and pcDeltas and verify.
    890    JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
    891    while (deltaIter.hasMore()) {
    892      uint32_t nativeDelta = 0;
    893      int32_t pcDelta = 0;
    894      deltaIter.readNext(&nativeDelta, &pcDelta);
    895 
    896      curNativeOffset += nativeDelta;
    897 
    898      // Ensure that nativeOffset still falls within jitcode after delta.
    899      MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
    900    }
    901  }
    902 #endif  // DEBUG
    903 }
    904 
    905 void CodeGeneratorShared::markSafepoint(LInstruction* ins) {
    906  markSafepointAt(masm.currentOffset(), ins);
    907 }
    908 
    909 void CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins) {
    910  MOZ_ASSERT_IF(
    911      !safepointIndices_.empty() && !masm.oom(),
    912      offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
    913  masm.propagateOOM(safepointIndices_.append(
    914      CodegenSafepointIndex(offset, ins->safepoint())));
    915 }
    916 
    917 void CodeGeneratorShared::ensureOsiSpace() {
    918  // For a refresher, an invalidation point is of the form:
    919  // 1: call <target>
    920  // 2: ...
    921  // 3: <osipoint>
    922  //
    923  // The four bytes *before* instruction 2 are overwritten with an offset.
    924  // Callers must ensure that the instruction itself has enough bytes to
    925  // support this.
    926  //
    927  // The bytes *at* instruction 3 are overwritten with an invalidation jump.
    928  // jump. These bytes may be in a completely different IR sequence, but
    929  // represent the join point of the call out of the function.
    930  //
    931  // At points where we want to ensure that invalidation won't corrupt an
    932  // important instruction, we make sure to pad with nops.
    933  if (masm.currentOffset() - lastOsiPointOffset_ <
    934      Assembler::PatchWrite_NearCallSize()) {
    935    int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
    936    paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
    937    for (int32_t i = 0; i < paddingSize; ++i) {
    938      masm.nop();
    939    }
    940  }
    941  MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - lastOsiPointOffset_ >=
    942                                 Assembler::PatchWrite_NearCallSize());
    943 }
    944 
    945 uint32_t CodeGeneratorShared::markOsiPoint(LOsiPoint* ins) {
    946  encode(ins->snapshot());
    947  ensureOsiSpace();
    948 
    949  uint32_t offset = masm.currentOffset();
    950  SnapshotOffset so = ins->snapshot()->snapshotOffset();
    951  masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
    952  lastOsiPointOffset_ = offset;
    953 
    954  return offset;
    955 }
    956 
    957 class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared> {
    958  FloatRegister src_;
    959  Register dest_;
    960  bool widenFloatToDouble_;
    961  wasm::BytecodeOffset bytecodeOffset_;
    962 
    963 public:
    964  OutOfLineTruncateSlow(FloatRegister src, Register dest,
    965                        bool widenFloatToDouble,
    966                        wasm::BytecodeOffset bytecodeOffset)
    967      : src_(src),
    968        dest_(dest),
    969        widenFloatToDouble_(widenFloatToDouble),
    970        bytecodeOffset_(bytecodeOffset) {}
    971 
    972  void accept(CodeGeneratorShared* codegen) override {
    973    codegen->visitOutOfLineTruncateSlow(this);
    974  }
    975  FloatRegister src() const { return src_; }
    976  Register dest() const { return dest_; }
    977  bool widenFloatToDouble() const { return widenFloatToDouble_; }
    978  wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
    979 };
    980 
    981 OutOfLineCode* CodeGeneratorShared::oolTruncateDouble(
    982    FloatRegister src, Register dest, MInstruction* mir,
    983    wasm::BytecodeOffset bytecodeOffset) {
    984  MOZ_ASSERT_IF(IsCompilingWasm(), bytecodeOffset.isValid());
    985 
    986  OutOfLineTruncateSlow* ool = new (alloc())
    987      OutOfLineTruncateSlow(src, dest, /* float32 */ false, bytecodeOffset);
    988  addOutOfLineCode(ool, mir);
    989  return ool;
    990 }
    991 
    992 void CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest,
    993                                             MInstruction* mir) {
    994  MOZ_ASSERT(mir->isTruncateToInt32() || mir->isWasmBuiltinTruncateToInt32());
    995  wasm::BytecodeOffset bytecodeOffset =
    996      mir->isTruncateToInt32()
    997          ? mir->toTruncateToInt32()->trapSiteDesc().bytecodeOffset
    998          : mir->toWasmBuiltinTruncateToInt32()->trapSiteDesc().bytecodeOffset;
    999  OutOfLineCode* ool = oolTruncateDouble(src, dest, mir, bytecodeOffset);
   1000 
   1001  masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
   1002  masm.bind(ool->rejoin());
   1003 }
   1004 
   1005 void CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest,
   1006                                              MInstruction* mir) {
   1007  MOZ_ASSERT(mir->isTruncateToInt32() || mir->isWasmBuiltinTruncateToInt32());
   1008  wasm::BytecodeOffset bytecodeOffset =
   1009      mir->isTruncateToInt32()
   1010          ? mir->toTruncateToInt32()->trapSiteDesc().bytecodeOffset
   1011          : mir->toWasmBuiltinTruncateToInt32()->trapSiteDesc().bytecodeOffset;
   1012  OutOfLineTruncateSlow* ool = new (alloc())
   1013      OutOfLineTruncateSlow(src, dest, /* float32 */ true, bytecodeOffset);
   1014  addOutOfLineCode(ool, mir);
   1015 
   1016  masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
   1017  masm.bind(ool->rejoin());
   1018 }
   1019 
   1020 void CodeGeneratorShared::visitOutOfLineTruncateSlow(
   1021    OutOfLineTruncateSlow* ool) {
   1022  FloatRegister src = ool->src();
   1023  Register dest = ool->dest();
   1024 
   1025  saveVolatile(dest);
   1026  masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(),
   1027                             gen->compilingWasm(), ool->bytecodeOffset());
   1028  restoreVolatile(dest);
   1029 
   1030  masm.jump(ool->rejoin());
   1031 }
   1032 
   1033 bool CodeGeneratorShared::omitOverRecursedStackCheck() const {
   1034  // If the current function makes no calls (which means it isn't recursive)
   1035  // and it uses only a small amount of stack space, it doesn't need a
   1036  // stack overflow check. Note that the actual number here is somewhat
   1037  // arbitrary, and codegen actually uses small bounded amounts of
   1038  // additional stack space in some cases too.
   1039  return frameSize() < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
   1040         !gen->needsOverrecursedCheck();
   1041 }
   1042 
   1043 bool CodeGeneratorShared::omitOverRecursedInterruptCheck() const {
   1044  return !gen->needsOverrecursedCheck();
   1045 }
   1046 
   1047 void CodeGeneratorShared::emitPreBarrier(Address address) {
   1048  masm.guardedCallPreBarrier(address, MIRType::Value);
   1049 }
   1050 
   1051 void CodeGeneratorShared::emitPreBarrier(BaseObjectElementIndex address) {
   1052  masm.guardedCallPreBarrier(address, MIRType::Value);
   1053 }
   1054 
   1055 void CodeGeneratorShared::jumpToBlock(MBasicBlock* mir) {
   1056  // Skip past trivial blocks.
   1057  mir = skipTrivialBlocks(mir);
   1058 
   1059  // No jump necessary if we can fall through to the next block.
   1060  if (isNextBlock(mir->lir())) {
   1061    return;
   1062  }
   1063 
   1064  masm.jump(mir->lir()->label());
   1065 }
   1066 
   1067 Label* CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block) {
   1068  // Skip past trivial blocks.
   1069  return skipTrivialBlocks(block)->lir()->label();
   1070 }
   1071 
   1072 // This function is not used for MIPS64/LOONG64/RISCV64. They have
   1073 // branchToBlock.
   1074 #if !defined(JS_CODEGEN_MIPS64) && !defined(JS_CODEGEN_LOONG64) && \
   1075    !defined(JS_CODEGEN_RISCV64)
   1076 void CodeGeneratorShared::jumpToBlock(MBasicBlock* mir,
   1077                                      Assembler::Condition cond) {
   1078  // Skip past trivial blocks.
   1079  masm.j(cond, skipTrivialBlocks(mir)->lir()->label());
   1080 }
   1081 #endif
   1082 
   1083 }  // namespace jit
   1084 }  // namespace js