tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmGenerator.cpp (52648B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2015 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #include "wasm/WasmGenerator.h"
     20 
     21 #include <algorithm>
     22 
     23 #include "jit/Assembler.h"
     24 #include "jit/JitOptions.h"
     25 #include "js/Printf.h"
     26 #include "threading/Thread.h"
     27 #include "util/Memory.h"
     28 #include "util/Text.h"
     29 #include "vm/HelperThreads.h"
     30 #include "vm/Time.h"
     31 #include "wasm/WasmBaselineCompile.h"
     32 #include "wasm/WasmCompile.h"
     33 #include "wasm/WasmGC.h"
     34 #include "wasm/WasmIonCompile.h"
     35 #include "wasm/WasmStubs.h"
     36 
     37 using namespace js;
     38 using namespace js::jit;
     39 using namespace js::wasm;
     40 
     41 bool CompiledCode::swap(MacroAssembler& masm) {
     42  MOZ_ASSERT(bytes.empty());
     43  if (!masm.swapBuffer(bytes)) {
     44    return false;
     45  }
     46 
     47  inliningContext.swap(masm.inliningContext());
     48  callSites.swap(masm.callSites());
     49  callSiteTargets.swap(masm.callSiteTargets());
     50  trapSites.swap(masm.trapSites());
     51  symbolicAccesses.swap(masm.symbolicAccesses());
     52  tryNotes.swap(masm.tryNotes());
     53  codeRangeUnwindInfos.swap(masm.codeRangeUnwindInfos());
     54  callRefMetricsPatches.swap(masm.callRefMetricsPatches());
     55  allocSitesPatches.swap(masm.allocSitesPatches());
     56  codeLabels.swap(masm.codeLabels());
     57  return true;
     58 }
     59 
     60 // ****************************************************************************
     61 // ModuleGenerator
     62 
     63 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
     64 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
     65 
     66 ModuleGenerator::MacroAssemblerScope::MacroAssemblerScope(LifoAlloc& lifo)
     67    : masmAlloc(&lifo), masm(masmAlloc, /* limitedSize= */ false) {}
     68 
     69 ModuleGenerator::ModuleGenerator(const CodeMetadata& codeMeta,
     70                                 const CompilerEnvironment& compilerEnv,
     71                                 CompileState compileState,
     72                                 const mozilla::Atomic<bool>* cancelled,
     73                                 UniqueChars* error,
     74                                 UniqueCharsVector* warnings)
     75    : compileArgs_(codeMeta.compileArgs.get()),
     76      compileState_(compileState),
     77      error_(error),
     78      warnings_(warnings),
     79      cancelled_(cancelled),
     80      codeMeta_(&codeMeta),
     81      compilerEnv_(&compilerEnv),
     82      featureUsage_(FeatureUsage::None),
     83      codeBlock_(nullptr),
     84      linkData_(nullptr),
     85      lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE, js::MallocArena),
     86      masm_(nullptr),
     87      debugStubCodeOffset_(0),
     88      requestTierUpStubCodeOffset_(0),
     89      updateCallRefMetricsStubCodeOffset_(0),
     90      lastPatchedCallSite_(0),
     91      startOfUnpatchedCallsites_(0),
     92      numCallRefMetrics_(0),
     93      numAllocSites_(0),
     94      parallel_(false),
     95      outstanding_(0),
     96      currentTask_(nullptr),
     97      batchedBytecode_(0),
     98      finishedFuncDefs_(false) {
     99  MOZ_ASSERT(codeMeta_->isPreparedForCompile());
    100 }
    101 
    102 ModuleGenerator::~ModuleGenerator() {
    103  MOZ_ASSERT_IF(finishedFuncDefs_, !batchedBytecode_);
    104  MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
    105 
    106  if (parallel_) {
    107    if (outstanding_) {
    108      AutoLockHelperThreadState lock;
    109 
    110      // Remove any pending compilation tasks from the worklist.
    111      size_t removed =
    112          RemovePendingWasmCompileTasks(taskState_, compileState_, lock);
    113      MOZ_ASSERT(outstanding_ >= removed);
    114      outstanding_ -= removed;
    115 
    116      // Wait until all active compilation tasks have finished.
    117      while (true) {
    118        MOZ_ASSERT(outstanding_ >= taskState_.finished().length());
    119        outstanding_ -= taskState_.finished().length();
    120        taskState_.finished().clear();
    121 
    122        MOZ_ASSERT(outstanding_ >= taskState_.numFailed());
    123        outstanding_ -= taskState_.numFailed();
    124        taskState_.numFailed() = 0;
    125 
    126        if (!outstanding_) {
    127          break;
    128        }
    129 
    130        taskState_.condVar().wait(lock); /* failed or finished */
    131      }
    132    }
    133  } else {
    134    MOZ_ASSERT(!outstanding_);
    135  }
    136 
    137  // Propagate error state.
    138  if (error_ && !*error_) {
    139    AutoLockHelperThreadState lock;
    140    *error_ = std::move(taskState_.errorMessage());
    141  }
    142 }
    143 
    144 bool ModuleGenerator::initializeCompleteTier(
    145    CodeMetadataForAsmJS* codeMetaForAsmJS) {
    146  MOZ_ASSERT(compileState_ != CompileState::LazyTier2);
    147 
    148  // Initialize our task system
    149  if (!initTasks()) {
    150    return false;
    151  }
    152 
    153  // If codeMetaForAsmJS is null, we're compiling wasm; else we're compiling
    154  // asm.js, in whih case it contains wasm::Code-lifetime asm.js-specific
    155  // information.
    156  MOZ_ASSERT(isAsmJS() == !!codeMetaForAsmJS);
    157  codeMetaForAsmJS_ = codeMetaForAsmJS;
    158 
    159  // Generate the shared stubs block, if we're compiling tier-1
    160  if (compilingTier1() && !prepareTier1()) {
    161    return false;
    162  }
    163 
    164  return startCompleteTier();
    165 }
    166 
    167 bool ModuleGenerator::initializePartialTier(const Code& code,
    168                                            uint32_t funcIndex) {
    169  MOZ_ASSERT(compileState_ == CompileState::LazyTier2);
    170  MOZ_ASSERT(!isAsmJS());
    171 
    172  // The implied codeMeta must be consistent with the one we already have.
    173  MOZ_ASSERT(&code.codeMeta() == codeMeta_);
    174 
    175  MOZ_ASSERT(!partialTieringCode_);
    176  partialTieringCode_ = &code;
    177 
    178  // Initialize our task system and start this partial tier
    179  return initTasks() && startPartialTier(funcIndex);
    180 }
    181 
    182 bool ModuleGenerator::funcIsCompiledInBlock(uint32_t funcIndex) const {
    183  return codeBlock_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE;
    184 }
    185 
    186 const CodeRange& ModuleGenerator::funcCodeRangeInBlock(
    187    uint32_t funcIndex) const {
    188  MOZ_ASSERT(funcIsCompiledInBlock(funcIndex));
    189  const CodeRange& cr =
    190      codeBlock_->codeRanges[codeBlock_->funcToCodeRange[funcIndex]];
    191  MOZ_ASSERT(cr.isFunction());
    192  return cr;
    193 }
    194 
    195 static bool InRange(uint32_t caller, uint32_t callee) {
    196  // We assume JumpImmediateRange is defined conservatively enough that the
    197  // slight difference between 'caller' (which is really the return address
    198  // offset) and the actual base of the relative displacement computation
    199  // isn't significant.
    200  uint32_t range = std::min(JitOptions.jumpThreshold, JumpImmediateRange);
    201  if (caller < callee) {
    202    return callee - caller < range;
    203  }
    204  return caller - callee < range;
    205 }
    206 
    207 using OffsetMap =
    208    HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
    209 
    210 bool ModuleGenerator::linkCallSites() {
    211  AutoCreatedBy acb(*masm_, "linkCallSites");
    212 
    213  masm_->haltingAlign(CodeAlignment);
    214 
    215  // Create far jumps for calls that have relative offsets that may otherwise
    216  // go out of range. This method is called both between function bodies (at a
    217  // frequency determined by the ISA's jump range) and once at the very end of
    218  // a module's codegen after all possible calls/traps have been emitted.
    219 
    220  OffsetMap existingCallFarJumps;
    221  for (; lastPatchedCallSite_ < codeBlock_->callSites.length();
    222       lastPatchedCallSite_++) {
    223    CallSiteKind kind = codeBlock_->callSites.kind(lastPatchedCallSite_);
    224    uint32_t callerOffset =
    225        codeBlock_->callSites.returnAddressOffset(lastPatchedCallSite_);
    226    const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_];
    227    switch (kind) {
    228      case CallSiteKind::Import:
    229      case CallSiteKind::Indirect:
    230      case CallSiteKind::IndirectFast:
    231      case CallSiteKind::Symbolic:
    232      case CallSiteKind::Breakpoint:
    233      case CallSiteKind::EnterFrame:
    234      case CallSiteKind::LeaveFrame:
    235      case CallSiteKind::CollapseFrame:
    236      case CallSiteKind::FuncRef:
    237      case CallSiteKind::FuncRefFast:
    238      case CallSiteKind::ReturnStub:
    239      case CallSiteKind::StackSwitch:
    240      case CallSiteKind::RequestTierUp:
    241        break;
    242      case CallSiteKind::ReturnFunc:
    243      case CallSiteKind::Func: {
    244        auto patch = [this, kind](uint32_t callerOffset,
    245                                  uint32_t calleeOffset) {
    246          if (kind == CallSiteKind::ReturnFunc) {
    247            masm_->patchFarJump(CodeOffset(callerOffset), calleeOffset);
    248          } else {
    249            MOZ_ASSERT(kind == CallSiteKind::Func);
    250            masm_->patchCall(callerOffset, calleeOffset);
    251          }
    252        };
    253        if (funcIsCompiledInBlock(target.funcIndex())) {
    254          uint32_t calleeOffset =
    255              funcCodeRangeInBlock(target.funcIndex()).funcUncheckedCallEntry();
    256          if (InRange(callerOffset, calleeOffset)) {
    257            patch(callerOffset, calleeOffset);
    258            break;
    259          }
    260        }
    261 
    262        OffsetMap::AddPtr p =
    263            existingCallFarJumps.lookupForAdd(target.funcIndex());
    264        if (!p) {
    265          Offsets offsets;
    266          offsets.begin = masm_->currentOffset();
    267          if (!callFarJumps_.emplaceBack(target.funcIndex(),
    268                                         masm_->farJumpWithPatch().offset())) {
    269            return false;
    270          }
    271          offsets.end = masm_->currentOffset();
    272          if (masm_->oom()) {
    273            return false;
    274          }
    275          if (!codeBlock_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
    276                                                  offsets)) {
    277            return false;
    278          }
    279          if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) {
    280            return false;
    281          }
    282        }
    283 
    284        patch(callerOffset, p->value());
    285        break;
    286      }
    287    }
    288  }
    289 
    290  masm_->flushBuffer();
    291  return !masm_->oom();
    292 }
    293 
    294 void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex,
    295                                    const CodeRange& codeRange) {
    296  switch (codeRange.kind()) {
    297    case CodeRange::Function:
    298      MOZ_ASSERT(codeBlock_->funcToCodeRange[codeRange.funcIndex()] ==
    299                 BAD_CODE_RANGE);
    300      codeBlock_->funcToCodeRange.insertInfallible(codeRange.funcIndex(),
    301                                                   codeRangeIndex);
    302      break;
    303    case CodeRange::InterpEntry:
    304      codeBlock_->lookupFuncExport(codeRange.funcIndex())
    305          .initEagerInterpEntryOffset(codeRange.begin());
    306      break;
    307    case CodeRange::JitEntry:
    308      // Nothing to do: jit entries are linked in the jump tables.
    309      break;
    310    case CodeRange::ImportJitExit:
    311      funcImports_[codeRange.funcIndex()].initJitExitOffset(codeRange.begin());
    312      break;
    313    case CodeRange::ImportInterpExit:
    314      funcImports_[codeRange.funcIndex()].initInterpExitOffset(
    315          codeRange.begin());
    316      break;
    317    case CodeRange::DebugStub:
    318      MOZ_ASSERT(!debugStubCodeOffset_);
    319      debugStubCodeOffset_ = codeRange.begin();
    320      break;
    321    case CodeRange::RequestTierUpStub:
    322      MOZ_ASSERT(!requestTierUpStubCodeOffset_);
    323      requestTierUpStubCodeOffset_ = codeRange.begin();
    324      break;
    325    case CodeRange::UpdateCallRefMetricsStub:
    326      MOZ_ASSERT(!updateCallRefMetricsStubCodeOffset_);
    327      updateCallRefMetricsStubCodeOffset_ = codeRange.begin();
    328      break;
    329    case CodeRange::TrapExit:
    330      MOZ_ASSERT(!linkData_->trapOffset);
    331      linkData_->trapOffset = codeRange.begin();
    332      break;
    333    case CodeRange::Throw:
    334      // Jumped to by other stubs, so nothing to do.
    335      break;
    336    case CodeRange::FarJumpIsland:
    337    case CodeRange::BuiltinThunk:
    338      MOZ_CRASH("Unexpected CodeRange kind");
    339  }
    340 }
    341 
    342 // Append every element from `srcVec` where `filterOp(srcElem) == true`.
    343 // Applies `mutateOp(dstElem)` to every element that is appended.
    344 template <class Vec, class FilterOp, class MutateOp>
    345 static bool AppendForEach(Vec* dstVec, const Vec& srcVec, FilterOp filterOp,
    346                          MutateOp mutateOp) {
    347  // Eagerly grow the vector to the whole src vector. Any filtered elements
    348  // will be trimmed later.
    349  if (!dstVec->growByUninitialized(srcVec.length())) {
    350    return false;
    351  }
    352 
    353  using T = typename Vec::ElementType;
    354 
    355  T* dstBegin = dstVec->begin();
    356  T* dstEnd = dstVec->end();
    357 
    358  // We appended srcVec.length() elements at the beginning, so we append
    359  // elements starting at the first uninitialized element.
    360  T* dst = dstEnd - srcVec.length();
    361 
    362  for (const T* src = srcVec.begin(); src != srcVec.end(); src++) {
    363    if (!filterOp(src)) {
    364      continue;
    365    }
    366    new (dst) T(*src);
    367    mutateOp(dst - dstBegin, dst);
    368    dst++;
    369  }
    370 
    371  // Trim off the filtered out elements that were eagerly added at the
    372  // beginning
    373  size_t newSize = dst - dstBegin;
    374  if (newSize != dstVec->length()) {
    375    dstVec->shrinkTo(newSize);
    376  }
    377 
    378  return true;
    379 }
    380 
    381 template <typename T>
    382 bool FilterNothing(const T* element) {
    383  return true;
    384 }
    385 
    386 // The same as the above `AppendForEach`, without performing any filtering.
    387 template <class Vec, class MutateOp>
    388 static bool AppendForEach(Vec* dstVec, const Vec& srcVec, MutateOp mutateOp) {
    389  using T = typename Vec::ElementType;
    390  return AppendForEach(dstVec, srcVec, &FilterNothing<T>, mutateOp);
    391 }
    392 
    393 bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
    394  AutoCreatedBy acb(*masm_, "ModuleGenerator::linkCompiledCode");
    395  JitContext jcx;
    396 
    397  // Combine observed features from the compiled code into the metadata
    398  featureUsage_ |= code.featureUsage;
    399 
    400  // Fold in compilation stats from all compiled functions in this block
    401  tierStats_.mergeCompileStats(code.compileStats);
    402 
    403  if (compilingTier1() && mode() == CompileMode::LazyTiering) {
    404    // All the CallRefMetrics from this batch of functions will start indexing
    405    // at our current length of metrics.
    406    uint32_t startOfCallRefMetrics = numCallRefMetrics_;
    407 
    408    for (const FuncCompileOutput& func : code.funcs) {
    409      // We only compile defined functions, not imported functions
    410      MOZ_ASSERT(func.index >= codeMeta_->numFuncImports);
    411      uint32_t funcDefIndex = func.index - codeMeta_->numFuncImports;
    412 
    413      // This function should only be compiled once
    414      MOZ_ASSERT(funcDefFeatureUsages_[funcDefIndex] == FeatureUsage::None);
    415 
    416      // Track the feature usage for this function
    417      funcDefFeatureUsages_[funcDefIndex] = func.featureUsage;
    418 
    419      // Record the range of CallRefMetrics this function owns. The metrics
    420      // will be processed below when we patch the offsets into code.
    421      MOZ_ASSERT(func.callRefMetricsRange.begin +
    422                     func.callRefMetricsRange.length <=
    423                 code.callRefMetricsPatches.length());
    424      funcDefCallRefMetrics_[funcDefIndex] = func.callRefMetricsRange;
    425      funcDefCallRefMetrics_[funcDefIndex].offsetBy(startOfCallRefMetrics);
    426    }
    427  } else {
    428    MOZ_ASSERT(funcDefFeatureUsages_.empty());
    429    MOZ_ASSERT(funcDefCallRefMetrics_.empty());
    430    MOZ_ASSERT(code.callRefMetricsPatches.empty());
    431 #ifdef DEBUG
    432    for (const FuncCompileOutput& func : code.funcs) {
    433      MOZ_ASSERT(func.callRefMetricsRange.length == 0);
    434    }
    435 #endif
    436  }
    437 
    438  if (compilingTier1()) {
    439    // All the AllocSites from this batch of functions will start indexing
    440    // at our current length.
    441    uint32_t startOfAllocSites = numAllocSites_;
    442 
    443    for (const FuncCompileOutput& func : code.funcs) {
    444      // We only compile defined functions, not imported functions
    445      MOZ_ASSERT(func.index >= codeMeta_->numFuncImports);
    446      uint32_t funcDefIndex = func.index - codeMeta_->numFuncImports;
    447 
    448      MOZ_ASSERT(func.allocSitesRange.begin + func.allocSitesRange.length <=
    449                 code.allocSitesPatches.length());
    450      funcDefAllocSites_[funcDefIndex] = func.allocSitesRange;
    451      funcDefAllocSites_[funcDefIndex].offsetBy(startOfAllocSites);
    452    }
    453  } else {
    454    MOZ_ASSERT(funcDefAllocSites_.empty());
    455    MOZ_ASSERT(code.allocSitesPatches.empty());
    456 #ifdef DEBUG
    457    for (const FuncCompileOutput& func : code.funcs) {
    458      MOZ_ASSERT(func.allocSitesRange.length == 0);
    459    }
    460 #endif
    461  }
    462 
    463  // Grab the perf spewers that were generated for these functions.
    464  if (!funcIonSpewers_.appendAll(std::move(code.funcIonSpewers)) ||
    465      !funcBaselineSpewers_.appendAll(std::move(code.funcBaselineSpewers))) {
    466    return false;
    467  }
    468 
    469  // Before merging in new code, if calls in a prior code range might go out of
    470  // range, insert far jumps to extend the range.
    471 
    472  if (!InRange(startOfUnpatchedCallsites_,
    473               masm_->size() + code.bytes.length())) {
    474    startOfUnpatchedCallsites_ = masm_->size();
    475    if (!linkCallSites()) {
    476      return false;
    477    }
    478  }
    479 
    480  // All code offsets in 'code' must be incremented by their position in the
    481  // overall module when the code was appended.
    482 
    483  masm_->haltingAlign(CodeAlignment);
    484  const size_t offsetInModule = masm_->size();
    485  if (code.bytes.length() != 0 &&
    486      !masm_->appendRawCode(code.bytes.begin(), code.bytes.length())) {
    487    return false;
    488  }
    489 
    490  auto codeRangeOp = [offsetInModule, this](uint32_t codeRangeIndex,
    491                                            CodeRange* codeRange) {
    492    codeRange->offsetBy(offsetInModule);
    493    noteCodeRange(codeRangeIndex, *codeRange);
    494  };
    495  if (!AppendForEach(&codeBlock_->codeRanges, code.codeRanges, codeRangeOp)) {
    496    return false;
    497  }
    498 
    499  InlinedCallerOffsetIndex baseInlinedCallerOffsetIndex =
    500      InlinedCallerOffsetIndex(codeBlock_->inliningContext.length());
    501  if (!codeBlock_->inliningContext.appendAll(std::move(code.inliningContext))) {
    502    return false;
    503  }
    504 
    505  if (!codeBlock_->callSites.appendAll(std::move(code.callSites),
    506                                       offsetInModule,
    507                                       baseInlinedCallerOffsetIndex)) {
    508    return false;
    509  }
    510 
    511  if (!callSiteTargets_.appendAll(code.callSiteTargets)) {
    512    return false;
    513  }
    514 
    515  if (!codeBlock_->trapSites.appendAll(std::move(code.trapSites),
    516                                       offsetInModule,
    517                                       baseInlinedCallerOffsetIndex)) {
    518    return false;
    519  }
    520 
    521  for (const SymbolicAccess& access : code.symbolicAccesses) {
    522    uint32_t patchAt = offsetInModule + access.patchAt.offset();
    523    if (!linkData_->symbolicLinks[access.target].append(patchAt)) {
    524      return false;
    525    }
    526  }
    527 
    528  for (const CallRefMetricsPatch& patch : code.callRefMetricsPatches) {
    529    if (!patch.hasOffsetOfOffsetPatch()) {
    530      numCallRefMetrics_ += 1;
    531      continue;
    532    }
    533 
    534    CodeOffset offset = CodeOffset(patch.offsetOfOffsetPatch());
    535    offset.offsetBy(offsetInModule);
    536 
    537    size_t callRefIndex = numCallRefMetrics_;
    538    numCallRefMetrics_ += 1;
    539    size_t callRefMetricOffset = callRefIndex * sizeof(CallRefMetrics);
    540 
    541    // Compute the offset of the metrics, and patch it. This may overflow,
    542    // in which case we report an OOM. We might need to do something smarter
    543    // here.
    544    if (callRefMetricOffset > (INT32_MAX / sizeof(CallRefMetrics))) {
    545      return false;
    546    }
    547 
    548    masm_->patchMove32(offset, Imm32(int32_t(callRefMetricOffset)));
    549  }
    550 
    551  // Use numAllocSites_ to patch bytecode specific AllocSite to its index in
    552  // the map.
    553  for (const AllocSitePatch& patch : code.allocSitesPatches) {
    554    uint32_t index = numAllocSites_;
    555    numAllocSites_ += 1;
    556    if (!patch.hasPatchOffset()) {
    557      continue;
    558    }
    559 
    560    CodeOffset offset = CodeOffset(patch.patchOffset());
    561    offset.offsetBy(offsetInModule);
    562 
    563    // Compute the offset of the AllocSite, and patch it. This may overflow,
    564    // in which case we report an OOM.
    565    if (index > INT32_MAX / sizeof(gc::AllocSite)) {
    566      return false;
    567    }
    568    uintptr_t allocSiteOffset = uintptr_t(index) * sizeof(gc::AllocSite);
    569    masm_->patchMove32(offset, Imm32(allocSiteOffset));
    570  }
    571 
    572  for (const CodeLabel& codeLabel : code.codeLabels) {
    573    LinkData::InternalLink link;
    574    link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
    575    link.targetOffset = offsetInModule + codeLabel.target().offset();
    576 #ifdef JS_CODELABEL_LINKMODE
    577    link.mode = codeLabel.linkMode();
    578 #endif
    579    if (!linkData_->internalLinks.append(link)) {
    580      return false;
    581    }
    582  }
    583 
    584  // Transfer all stackmaps with the offset in module.
    585  if (!codeBlock_->stackMaps.appendAll(code.stackMaps, offsetInModule)) {
    586    return false;
    587  }
    588 
    589  auto unwindInfoOp = [=](uint32_t, CodeRangeUnwindInfo* i) {
    590    i->offsetBy(offsetInModule);
    591  };
    592  if (!AppendForEach(&codeBlock_->codeRangeUnwindInfos,
    593                     code.codeRangeUnwindInfos, unwindInfoOp)) {
    594    return false;
    595  }
    596 
    597  auto tryNoteFilter = [](const TryNote* tn) {
    598    // Filter out all try notes that were never given a try body. This may
    599    // happen due to dead code elimination.
    600    return tn->hasTryBody();
    601  };
    602  auto tryNoteOp = [=](uint32_t, TryNote* tn) { tn->offsetBy(offsetInModule); };
    603  return AppendForEach(&codeBlock_->tryNotes, code.tryNotes, tryNoteFilter,
    604                       tryNoteOp);
    605 }
    606 
    607 static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
    608  MOZ_ASSERT(task->lifo.isEmpty());
    609  MOZ_ASSERT(task->output.empty());
    610 
    611  switch (task->compilerEnv.tier()) {
    612    case Tier::Optimized:
    613      if (!IonCompileFunctions(task->codeMeta, task->codeTailMeta,
    614                               task->compilerEnv, task->lifo, task->inputs,
    615                               &task->output, error)) {
    616        return false;
    617      }
    618      break;
    619    case Tier::Baseline:
    620      if (!BaselineCompileFunctions(task->codeMeta, task->compilerEnv,
    621                                    task->lifo, task->inputs, &task->output,
    622                                    error)) {
    623        return false;
    624      }
    625      break;
    626  }
    627 
    628  MOZ_ASSERT(task->lifo.isEmpty());
    629  MOZ_ASSERT(task->inputs.length() == task->output.codeRanges.length());
    630  task->inputs.clear();
    631  return true;
    632 }
    633 
    634 void CompileTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
    635  UniqueChars error;
    636  bool ok;
    637 
    638  {
    639    AutoUnlockHelperThreadState unlock(lock);
    640    ok = ExecuteCompileTask(this, &error);
    641  }
    642 
    643  // Don't release the lock between updating our state and returning from this
    644  // method.
    645 
    646  if (!ok || !state.finished().append(this)) {
    647    state.numFailed()++;
    648    if (!state.errorMessage()) {
    649      state.errorMessage() = std::move(error);
    650    }
    651  }
    652 
    653  state.condVar().notify_one(); /* failed or finished */
    654 }
    655 
    656 ThreadType CompileTask::threadType() {
    657  switch (compileState) {
    658    case CompileState::Once:
    659    case CompileState::EagerTier1:
    660    case CompileState::LazyTier1:
    661      return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER1;
    662    case CompileState::EagerTier2:
    663    case CompileState::LazyTier2:
    664      return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER2;
    665    default:
    666      MOZ_CRASH();
    667  }
    668 }
    669 
    670 bool ModuleGenerator::initTasks() {
    671  // Determine whether parallel or sequential compilation is to be used and
    672  // initialize the CompileTasks that will be used in either mode.
    673 
    674  MOZ_ASSERT(GetHelperThreadCount() > 1);
    675 
    676  MOZ_ASSERT(!parallel_);
    677  uint32_t numTasks = 1;
    678  if (  // "obvious" prerequisites for doing off-thread compilation
    679      CanUseExtraThreads() && GetHelperThreadCPUCount() > 1 &&
    680      // For lazy tier 2 compilations, the current thread -- running a
    681      // WasmPartialTier2CompileTask -- is already dedicated to compiling the
    682      // to-be-tiered-up function.  So don't create a new task for it.
    683      compileState_ != CompileState::LazyTier2) {
    684    parallel_ = true;
    685    numTasks = 2 * GetMaxWasmCompilationThreads();
    686  }
    687 
    688  const CodeTailMetadata* codeTailMeta = nullptr;
    689  if (partialTieringCode_) {
    690    codeTailMeta = &partialTieringCode_->codeTailMeta();
    691  }
    692 
    693  if (!tasks_.initCapacity(numTasks)) {
    694    return false;
    695  }
    696  for (size_t i = 0; i < numTasks; i++) {
    697    tasks_.infallibleEmplaceBack(*codeMeta_, codeTailMeta, *compilerEnv_,
    698                                 compileState_, taskState_,
    699                                 COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
    700  }
    701 
    702  if (!freeTasks_.reserve(numTasks)) {
    703    return false;
    704  }
    705  for (size_t i = 0; i < numTasks; i++) {
    706    freeTasks_.infallibleAppend(&tasks_[i]);
    707  }
    708  return true;
    709 }
    710 
    711 bool ModuleGenerator::locallyCompileCurrentTask() {
    712  if (!ExecuteCompileTask(currentTask_, error_)) {
    713    return false;
    714  }
    715  if (!finishTask(currentTask_)) {
    716    return false;
    717  }
    718  currentTask_ = nullptr;
    719  batchedBytecode_ = 0;
    720  return true;
    721 }
    722 
    723 bool ModuleGenerator::finishTask(CompileTask* task) {
    724  AutoCreatedBy acb(*masm_, "ModuleGenerator::finishTask");
    725 
    726  masm_->haltingAlign(CodeAlignment);
    727 
    728  if (!linkCompiledCode(task->output)) {
    729    return false;
    730  }
    731 
    732  task->output.clear();
    733 
    734  MOZ_ASSERT(task->inputs.empty());
    735  MOZ_ASSERT(task->output.empty());
    736  MOZ_ASSERT(task->lifo.isEmpty());
    737  freeTasks_.infallibleAppend(task);
    738  return true;
    739 }
    740 
    741 bool ModuleGenerator::launchBatchCompile() {
    742  MOZ_ASSERT(currentTask_);
    743 
    744  if (cancelled_ && *cancelled_) {
    745    return false;
    746  }
    747 
    748  if (!parallel_) {
    749    return locallyCompileCurrentTask();
    750  }
    751 
    752  if (!StartOffThreadWasmCompile(currentTask_, compileState_)) {
    753    return false;
    754  }
    755  outstanding_++;
    756  currentTask_ = nullptr;
    757  batchedBytecode_ = 0;
    758  return true;
    759 }
    760 
    761 bool ModuleGenerator::finishOutstandingTask() {
    762  MOZ_ASSERT(parallel_);
    763 
    764  CompileTask* task = nullptr;
    765  {
    766    AutoLockHelperThreadState lock;
    767    while (true) {
    768      MOZ_ASSERT(outstanding_ > 0);
    769 
    770      if (taskState_.numFailed() > 0) {
    771        return false;
    772      }
    773 
    774      if (!taskState_.finished().empty()) {
    775        outstanding_--;
    776        task = taskState_.finished().popCopy();
    777        break;
    778      }
    779 
    780      taskState_.condVar().wait(lock); /* failed or finished */
    781    }
    782  }
    783 
    784  // Call outside of the compilation lock.
    785  return finishTask(task);
    786 }
    787 
    788 bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
    789                                     uint32_t lineOrBytecode,
    790                                     const uint8_t* begin, const uint8_t* end,
    791                                     Uint32Vector&& lineNums) {
    792  MOZ_ASSERT(!finishedFuncDefs_);
    793  MOZ_ASSERT(funcIndex < codeMeta_->numFuncs());
    794 
    795  if (compilingTier1()) {
    796    static_assert(MaxFunctionBytes < UINT32_MAX);
    797    uint32_t bodyLength = (uint32_t)(end - begin);
    798    funcDefRanges_.infallibleAppend(BytecodeRange(lineOrBytecode, bodyLength));
    799  }
    800 
    801  uint32_t threshold;
    802  switch (tier()) {
    803    case Tier::Baseline:
    804      threshold = JitOptions.wasmBatchBaselineThreshold;
    805      break;
    806    case Tier::Optimized:
    807      threshold = JitOptions.wasmBatchIonThreshold;
    808      break;
    809    default:
    810      MOZ_CRASH("Invalid tier value");
    811      break;
    812  }
    813 
    814  uint32_t funcBytecodeLength = end - begin;
    815 
    816  // Do not go over the threshold if we can avoid it: spin off the compilation
    817  // before appending the function if we would go over.  (Very large single
    818  // functions may still exceed the threshold but this is fine; it'll be very
    819  // uncommon and is in any case safely handled by the MacroAssembler's buffer
    820  // limit logic.)
    821 
    822  if (currentTask_ && currentTask_->inputs.length() &&
    823      batchedBytecode_ + funcBytecodeLength > threshold) {
    824    if (!launchBatchCompile()) {
    825      return false;
    826    }
    827  }
    828 
    829  if (!currentTask_) {
    830    if (freeTasks_.empty() && !finishOutstandingTask()) {
    831      return false;
    832    }
    833    currentTask_ = freeTasks_.popCopy();
    834  }
    835 
    836  if (!currentTask_->inputs.emplaceBack(funcIndex, lineOrBytecode, begin, end,
    837                                        std::move(lineNums))) {
    838    return false;
    839  }
    840 
    841  batchedBytecode_ += funcBytecodeLength;
    842  MOZ_ASSERT(batchedBytecode_ <= MaxCodeSectionBytes);
    843  return true;
    844 }
    845 
    846 bool ModuleGenerator::finishFuncDefs() {
    847  MOZ_ASSERT(!finishedFuncDefs_);
    848 
    849  if (currentTask_ && !locallyCompileCurrentTask()) {
    850    return false;
    851  }
    852 
    853  finishedFuncDefs_ = true;
    854  return true;
    855 }
    856 
    857 static void CheckCodeBlock(const CodeBlock& codeBlock) {
    858 #if defined(DEBUG)
    859  // Assert all sorted metadata is sorted.
    860  uint32_t last = 0;
    861  for (const CodeRange& codeRange : codeBlock.codeRanges) {
    862    MOZ_ASSERT(codeRange.begin() >= last);
    863    last = codeRange.end();
    864  }
    865 
    866  codeBlock.callSites.checkInvariants();
    867  codeBlock.trapSites.checkInvariants(codeBlock.base());
    868 
    869  last = 0;
    870  for (const CodeRangeUnwindInfo& info : codeBlock.codeRangeUnwindInfos) {
    871    MOZ_ASSERT(info.offset() >= last);
    872    last = info.offset();
    873  }
    874 
    875  // Try notes should be sorted so that the end of ranges are in rising order
    876  // so that the innermost catch handler is chosen.
    877  last = 0;
    878  for (const wasm::TryNote& tryNote : codeBlock.tryNotes) {
    879    MOZ_ASSERT(tryNote.tryBodyEnd() >= last);
    880    MOZ_ASSERT(tryNote.tryBodyEnd() > tryNote.tryBodyBegin());
    881    last = tryNote.tryBodyBegin();
    882  }
    883 
    884  codeBlock.stackMaps.checkInvariants(codeBlock.base());
    885 
    886 #endif
    887 }
    888 
    889 bool ModuleGenerator::startCodeBlock(CodeBlockKind kind) {
    890  MOZ_ASSERT(!masmScope_ && !linkData_ && !codeBlock_);
    891  masmScope_.emplace(lifo_);
    892  masm_ = &masmScope_->masm;
    893  linkData_ = js::MakeUnique<LinkData>();
    894  codeBlock_ = js::MakeUnique<CodeBlock>(kind);
    895  return !!linkData_ && !!codeBlock_;
    896 }
    897 
    898 bool ModuleGenerator::finishCodeBlock(CodeBlockResult* result) {
    899  // Now that all functions and stubs are generated and their CodeRanges
    900  // known, patch all calls (which can emit far jumps) and far jumps. Linking
    901  // can emit tiny far-jump stubs, so there is an ordering dependency here.
    902 
    903  if (!linkCallSites()) {
    904    return false;
    905  }
    906 
    907  for (CallFarJump far : callFarJumps_) {
    908    if (funcIsCompiledInBlock(far.targetFuncIndex)) {
    909      masm_->patchFarJump(
    910          jit::CodeOffset(far.jumpOffset),
    911          funcCodeRangeInBlock(far.targetFuncIndex).funcUncheckedCallEntry());
    912    } else if (!linkData_->callFarJumps.append(far)) {
    913      return false;
    914    }
    915  }
    916 
    917  lastPatchedCallSite_ = 0;
    918  startOfUnpatchedCallsites_ = 0;
    919  callSiteTargets_.clear();
    920  callFarJumps_.clear();
    921 
    922  // None of the linking or far-jump operations should emit masm metadata.
    923 
    924  MOZ_ASSERT(masm_->inliningContext().empty());
    925  MOZ_ASSERT(masm_->callSites().empty());
    926  MOZ_ASSERT(masm_->callSiteTargets().empty());
    927  MOZ_ASSERT(masm_->trapSites().empty());
    928  MOZ_ASSERT(masm_->symbolicAccesses().empty());
    929  MOZ_ASSERT(masm_->tryNotes().empty());
    930  MOZ_ASSERT(masm_->codeLabels().empty());
    931 
    932  masm_->finish();
    933  if (masm_->oom()) {
    934    return false;
    935  }
    936 
    937  // The try notes also need to be sorted to simplify lookup.
    938  std::sort(codeBlock_->tryNotes.begin(), codeBlock_->tryNotes.end());
    939 
    940  // These Vectors can get large and the excess capacity can be significant,
    941  // so realloc them down to size.
    942 
    943  codeBlock_->funcToCodeRange.shrinkStorageToFit();
    944  codeBlock_->codeRanges.shrinkStorageToFit();
    945  codeBlock_->inliningContext.shrinkStorageToFit();
    946  codeBlock_->callSites.shrinkStorageToFit();
    947  codeBlock_->trapSites.shrinkStorageToFit();
    948  codeBlock_->tryNotes.shrinkStorageToFit();
    949 
    950  // Mark the inlining context as done.
    951  codeBlock_->inliningContext.setImmutable();
    952 
    953  // Allocate the code storage, copy/link the code from `masm_` into it, set up
    954  // `codeBlock_->segment / codeBase / codeLength`, and adjust the metadata
    955  // offsets on `codeBlock_` accordingly.
    956  uint8_t* codeStart = nullptr;
    957  uint32_t codeLength = 0;
    958  if (partialTieringCode_) {
    959    // We're compiling a single function during tiering.  Place it in its own
    960    // hardware page, inside an existing CodeSegment if possible, or allocate a
    961    // new one and use that.  Either way, the chosen CodeSegment will be owned
    962    // by Code::lazyFuncSegments.
    963    MOZ_ASSERT(mode() == CompileMode::LazyTiering);
    964 
    965    // Try to allocate from Code::lazyFuncSegments. We do not allow a last-ditch
    966    // GC here as we may be running in OOL-code that is not ready for a GC.
    967    codeBlock_->segment = partialTieringCode_->createFuncCodeSegmentFromPool(
    968        *masm_, *linkData_, /* allowLastDitchGC = */ false, &codeStart,
    969        &codeLength);
    970  } else {
    971    // Create a new CodeSegment for the code and use that.
    972    CodeSource codeSource(*masm_, linkData_.get(), nullptr);
    973    codeLength = codeSource.lengthBytes();
    974    uint32_t allocationLength;
    975    codeBlock_->segment = CodeSegment::allocate(codeSource, nullptr,
    976                                                /* allowLastDitchGC */ true,
    977                                                &codeStart, &allocationLength);
    978    // Record the code usage for this tier.
    979    tierStats_.codeBytesUsed += codeLength;
    980    tierStats_.codeBytesMapped += allocationLength;
    981  }
    982 
    983  if (!codeBlock_->segment) {
    984    warnf("failed to allocate executable memory for module");
    985    return false;
    986  }
    987 
    988  codeBlock_->codeBase = codeStart;
    989  codeBlock_->codeLength = codeLength;
    990 
    991  // Check that metadata is consistent with the actual code we generated,
    992  // linked, and loaded.
    993  CheckCodeBlock(*codeBlock_);
    994 
    995  // Free the macro assembler scope, and reset our masm pointer
    996  masm_ = nullptr;
    997  masmScope_ = mozilla::Nothing();
    998 
    999  result->codeBlock = std::move(codeBlock_);
   1000  result->linkData = std::move(linkData_);
   1001  result->funcIonSpewers = std::move(funcIonSpewers_);
   1002  result->funcBaselineSpewers = std::move(funcBaselineSpewers_);
   1003  return true;
   1004 }
   1005 
   1006 bool ModuleGenerator::prepareTier1() {
   1007  if (!startCodeBlock(CodeBlockKind::SharedStubs)) {
   1008    return false;
   1009  }
   1010 
   1011  // Initialize function definition ranges
   1012  if (!funcDefRanges_.reserve(codeMeta_->numFuncDefs())) {
   1013    return false;
   1014  }
   1015 
   1016  // Initialize function definition feature usages (only used for lazy tiering
   1017  // and inlining right now).
   1018  if (mode() == CompileMode::LazyTiering &&
   1019      (!funcDefFeatureUsages_.resize(codeMeta_->numFuncDefs()) ||
   1020       !funcDefCallRefMetrics_.resize(codeMeta_->numFuncDefs()))) {
   1021    return false;
   1022  }
   1023 
   1024  // Initialize function definition alloc site ranges
   1025  if (!funcDefAllocSites_.resize(codeMeta_->numFuncDefs())) {
   1026    return false;
   1027  }
   1028 
   1029  // Initialize function import metadata
   1030  if (!funcImports_.resize(codeMeta_->numFuncImports)) {
   1031    return false;
   1032  }
   1033 
   1034  // The shared stubs code will contains function definitions for each imported
   1035  // function.
   1036  if (!FuncToCodeRangeMap::createDense(0, codeMeta_->numFuncImports,
   1037                                       &codeBlock_->funcToCodeRange)) {
   1038    return false;
   1039  }
   1040 
   1041  uint32_t exportedFuncCount = 0;
   1042  for (uint32_t funcIndex = 0; funcIndex < codeMeta_->numFuncImports;
   1043       funcIndex++) {
   1044    const FuncDesc& func = codeMeta_->funcs[funcIndex];
   1045    if (func.isExported()) {
   1046      exportedFuncCount++;
   1047    }
   1048  }
   1049  if (!codeBlock_->funcExports.reserve(exportedFuncCount)) {
   1050    return false;
   1051  }
   1052 
   1053  for (uint32_t funcIndex = 0; funcIndex < codeMeta_->numFuncImports;
   1054       funcIndex++) {
   1055    const FuncDesc& func = codeMeta_->funcs[funcIndex];
   1056    if (!func.isExported()) {
   1057      continue;
   1058    }
   1059 
   1060    codeBlock_->funcExports.infallibleEmplaceBack(
   1061        FuncExport(funcIndex, func.isEager()));
   1062  }
   1063 
   1064  // Generate the stubs for the module first
   1065  CompiledCode& stubCode = tasks_[0].output;
   1066  MOZ_ASSERT(stubCode.empty());
   1067 
   1068  if (!GenerateStubs(*codeMeta_, funcImports_, codeBlock_->funcExports,
   1069                     &stubCode) ||
   1070      !linkCompiledCode(stubCode)) {
   1071    return false;
   1072  }
   1073  stubCode.clear();
   1074 
   1075  return finishCodeBlock(&sharedStubs_);
   1076 }
   1077 
   1078 bool ModuleGenerator::startCompleteTier() {
   1079 #ifdef JS_JITSPEW
   1080  completeTierStartTime_ = mozilla::TimeStamp::Now();
   1081  JS_LOG(wasmPerf, Info,
   1082         "CM=..%06lx  ModuleGenerator::startCompleteTier (%s, %u imports, %u "
   1083         "functions)",
   1084         (unsigned long)(uintptr_t(codeMeta_) & 0xFFFFFFL),
   1085         tier() == Tier::Baseline ? "baseline" : "optimizing",
   1086         (uint32_t)codeMeta_->numFuncImports,
   1087         (uint32_t)codeMeta_->numFuncDefs());
   1088 #endif
   1089 
   1090  if (!startCodeBlock(CodeBlock::kindFromTier(tier()))) {
   1091    return false;
   1092  }
   1093 
   1094  // funcToCodeRange maps function indices to code-range indices and all
   1095  // elements will be initialized by the time module generation is finished.
   1096 
   1097  if (!FuncToCodeRangeMap::createDense(
   1098          codeMeta_->numFuncImports,
   1099          codeMeta_->funcs.length() - codeMeta_->numFuncImports,
   1100          &codeBlock_->funcToCodeRange)) {
   1101    return false;
   1102  }
   1103 
   1104  // Pre-reserve space for large Vectors to avoid the significant cost of the
   1105  // final reallocs. In particular, the MacroAssembler can be enormous, so be
   1106  // extra conservative. Since large over-reservations may fail when the
   1107  // actual allocations will succeed, ignore OOM failures. Note,
   1108  // shrinkStorageToFit calls at the end will trim off unneeded capacity.
   1109 
   1110  size_t codeSectionSize =
   1111      codeMeta_->codeSectionRange ? codeMeta_->codeSectionRange->size() : 0;
   1112 
   1113  size_t estimatedCodeSize =
   1114      size_t(1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize));
   1115  (void)masm_->reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
   1116 
   1117  (void)codeBlock_->codeRanges.reserve(2 * codeMeta_->numFuncDefs());
   1118 
   1119  const size_t ByteCodesPerCallSite = 50;
   1120  (void)codeBlock_->callSites.reserve(codeSectionSize / ByteCodesPerCallSite);
   1121 
   1122  const size_t ByteCodesPerOOBTrap = 10;
   1123  (void)codeBlock_->trapSites.reserve(Trap::OutOfBounds,
   1124                                      codeSectionSize / ByteCodesPerOOBTrap);
   1125 
   1126  // Accumulate all exported functions:
   1127  // - explicitly marked as such;
   1128  // - implicitly exported by being an element of function tables;
   1129  // - implicitly exported by being the start function;
   1130  // - implicitly exported by being used in global ref.func initializer
   1131  // ModuleEnvironment accumulates this information for us during decoding,
   1132  // transfer it to the FuncExportVector stored in Metadata.
   1133 
   1134  uint32_t exportedFuncCount = 0;
   1135  for (uint32_t funcIndex = codeMeta_->numFuncImports;
   1136       funcIndex < codeMeta_->funcs.length(); funcIndex++) {
   1137    const FuncDesc& func = codeMeta_->funcs[funcIndex];
   1138    if (func.isExported()) {
   1139      exportedFuncCount++;
   1140    }
   1141  }
   1142  if (!codeBlock_->funcExports.reserve(exportedFuncCount)) {
   1143    return false;
   1144  }
   1145 
   1146  for (uint32_t funcIndex = codeMeta_->numFuncImports;
   1147       funcIndex < codeMeta_->funcs.length(); funcIndex++) {
   1148    const FuncDesc& func = codeMeta_->funcs[funcIndex];
   1149 
   1150    if (!func.isExported()) {
   1151      continue;
   1152    }
   1153 
   1154    codeBlock_->funcExports.infallibleEmplaceBack(
   1155        FuncExport(funcIndex, func.isEager()));
   1156  }
   1157 
   1158  return true;
   1159 }
   1160 
   1161 bool ModuleGenerator::startPartialTier(uint32_t funcIndex) {
   1162 #ifdef JS_JITSPEW
   1163  UTF8Bytes name;
   1164  if (!codeMeta_->getFuncNameForWasm(
   1165          NameContext::Standalone, funcIndex,
   1166          partialTieringCode_->codeTailMeta().nameSectionPayload.get(),
   1167          &name) ||
   1168      !name.append("\0", 1)) {
   1169    return false;
   1170  }
   1171  uint32_t bytecodeLength =
   1172      partialTieringCode_->codeTailMeta().funcDefRange(funcIndex).size();
   1173  JS_LOG(wasmPerf, Info,
   1174         "CM=..%06lx  ModuleGenerator::startPartialTier  fI=%-5u  sz=%-5u  %s",
   1175         (unsigned long)(uintptr_t(codeMeta_) & 0xFFFFFFL), funcIndex,
   1176         bytecodeLength, name.length() > 0 ? name.begin() : "(unknown-name)");
   1177 #endif
   1178 
   1179  if (!startCodeBlock(CodeBlock::kindFromTier(tier()))) {
   1180    return false;
   1181  }
   1182 
   1183  if (!FuncToCodeRangeMap::createDense(funcIndex, 1,
   1184                                       &codeBlock_->funcToCodeRange)) {
   1185    return false;
   1186  }
   1187 
   1188  const FuncDesc& func = codeMeta_->funcs[funcIndex];
   1189  if (func.isExported() && !codeBlock_->funcExports.emplaceBack(
   1190                               FuncExport(funcIndex, func.isEager()))) {
   1191    return false;
   1192  }
   1193 
   1194  return true;
   1195 }
   1196 
   1197 bool ModuleGenerator::finishTier(CompileAndLinkStats* tierStats,
   1198                                 CodeBlockResult* result) {
   1199  MOZ_ASSERT(finishedFuncDefs_);
   1200 
   1201  while (outstanding_ > 0) {
   1202    if (!finishOutstandingTask()) {
   1203      return false;
   1204    }
   1205  }
   1206 
   1207 #ifdef DEBUG
   1208  if (mode() != CompileMode::LazyTiering) {
   1209    codeBlock_->funcToCodeRange.assertAllInitialized();
   1210  }
   1211 #endif
   1212 
   1213  // Now that all funcs have been compiled, we can generate entry stubs for
   1214  // the ones that have been exported.
   1215 
   1216  CompiledCode& stubCode = tasks_[0].output;
   1217  MOZ_ASSERT(stubCode.empty());
   1218 
   1219  if (!GenerateEntryStubs(*codeMeta_, codeBlock_->funcExports, &stubCode)) {
   1220    return false;
   1221  }
   1222 
   1223  if (!linkCompiledCode(stubCode)) {
   1224    return false;
   1225  }
   1226 
   1227  // Return the tier statistics and clear them
   1228  *tierStats = tierStats_;
   1229  tierStats_.clear();
   1230 
   1231  return finishCodeBlock(result);
   1232 }
   1233 
   1234 // Complete all tier-1 construction and return the resulting Module.  For this
   1235 // we will need both codeMeta_ (and maybe codeMetaForAsmJS_) and moduleMeta_.
   1236 SharedModule ModuleGenerator::finishModule(
   1237    const BytecodeBufferOrSource& bytecode, ModuleMetadata& moduleMeta,
   1238    JS::OptimizedEncodingListener* maybeCompleteTier2Listener) {
   1239  MOZ_ASSERT(compilingTier1());
   1240 
   1241  CodeBlockResult tier1Result;
   1242  CompileAndLinkStats tier1Stats;
   1243  if (!finishTier(&tier1Stats, &tier1Result)) {
   1244    return nullptr;
   1245  }
   1246 
   1247  // Record what features we encountered in this module
   1248  moduleMeta.featureUsage = featureUsage_;
   1249 
   1250  // Copy over data from the Bytecode, which is going away at the end of
   1251  // compilation.
   1252  //
   1253  // In particular, convert the data- and custom-section ranges in the
   1254  // ModuleMetadata into their full-fat versions by copying the underlying
   1255  // data blocks.
   1256 
   1257  const BytecodeSource& bytecodeSource = bytecode.source();
   1258  MOZ_ASSERT(moduleMeta.dataSegments.empty());
   1259  if (!moduleMeta.dataSegments.reserve(moduleMeta.dataSegmentRanges.length())) {
   1260    return nullptr;
   1261  }
   1262  for (const DataSegmentRange& srcRange : moduleMeta.dataSegmentRanges) {
   1263    MutableDataSegment dstSeg = js_new<DataSegment>();
   1264    if (!dstSeg) {
   1265      return nullptr;
   1266    }
   1267    if (!dstSeg->init(bytecodeSource, srcRange)) {
   1268      return nullptr;
   1269    }
   1270    moduleMeta.dataSegments.infallibleAppend(std::move(dstSeg));
   1271  }
   1272 
   1273  MOZ_ASSERT(moduleMeta.customSections.empty());
   1274  if (!moduleMeta.customSections.reserve(
   1275          codeMeta_->customSectionRanges.length())) {
   1276    return nullptr;
   1277  }
   1278  for (const CustomSectionRange& srcRange : codeMeta_->customSectionRanges) {
   1279    BytecodeSpan nameSpan = bytecodeSource.getSpan(srcRange.name);
   1280    CustomSection sec;
   1281    if (!sec.name.append(nameSpan.data(), nameSpan.size())) {
   1282      return nullptr;
   1283    }
   1284    MutableBytes payload = js_new<ShareableBytes>();
   1285    if (!payload) {
   1286      return nullptr;
   1287    }
   1288    BytecodeSpan payloadSpan = bytecodeSource.getSpan(srcRange.payload);
   1289    if (!payload->append(payloadSpan.data(), payloadSpan.size())) {
   1290      return nullptr;
   1291    }
   1292    sec.payload = std::move(payload);
   1293    moduleMeta.customSections.infallibleAppend(std::move(sec));
   1294  }
   1295 
   1296  // Allocate and initialize the code tail metadata now that we have seen the
   1297  // entire module.
   1298  MutableCodeTailMetadata codeTailMeta =
   1299      js_new<CodeTailMetadata>(*moduleMeta.codeMeta);
   1300  if (!codeTailMeta) {
   1301    return nullptr;
   1302  }
   1303  moduleMeta.codeTailMeta = codeTailMeta;
   1304 
   1305  // Transfer the function definition ranges
   1306  MOZ_ASSERT(funcDefRanges_.length() == codeMeta_->numFuncDefs());
   1307  codeTailMeta->funcDefRanges = std::move(funcDefRanges_);
   1308 
   1309  // Transfer the function definition feature usages
   1310  codeTailMeta->funcDefFeatureUsages = std::move(funcDefFeatureUsages_);
   1311  codeTailMeta->funcDefCallRefs = std::move(funcDefCallRefMetrics_);
   1312  codeTailMeta->funcDefAllocSites = std::move(funcDefAllocSites_);
   1313  MOZ_ASSERT_IF(mode() != CompileMode::LazyTiering, numCallRefMetrics_ == 0);
   1314  codeTailMeta->numCallRefMetrics = numCallRefMetrics_;
   1315 
   1316  if (tier() == Tier::Baseline) {
   1317    codeTailMeta->numAllocSites = numAllocSites_;
   1318  } else {
   1319    MOZ_ASSERT(numAllocSites_ == 0);
   1320    // Even if funcDefAllocSites were not created, e.g. single tier of
   1321    // optimized compilation, the AllocSite array will exist.
   1322    codeTailMeta->numAllocSites = codeMeta_->numTypes();
   1323  }
   1324 
   1325  // Initialize debuggable module state
   1326  if (debugEnabled()) {
   1327    // We cannot use lazy or eager tiering with debugging
   1328    MOZ_ASSERT(mode() == CompileMode::Once);
   1329 
   1330    // Mark the flag
   1331    codeTailMeta->debugEnabled = true;
   1332 
   1333    // Grab or allocate a full copy of the bytecode of this module
   1334    if (!bytecode.getOrCreateBuffer(&codeTailMeta->debugBytecode)) {
   1335      return nullptr;
   1336    }
   1337    codeTailMeta->codeSectionBytecode =
   1338        codeTailMeta->debugBytecode.codeSection();
   1339 
   1340    // Compute the hash for this module
   1341    static_assert(sizeof(ModuleHash) <= sizeof(mozilla::SHA1Sum::Hash),
   1342                  "The ModuleHash size shall not exceed the SHA1 hash size.");
   1343    mozilla::SHA1Sum::Hash hash;
   1344    bytecodeSource.computeHash(&hash);
   1345    memcpy(codeTailMeta->debugHash, hash, sizeof(ModuleHash));
   1346  }
   1347 
   1348  // Initialize lazy tiering module state
   1349  if (mode() == CompileMode::LazyTiering) {
   1350    // We cannot debug and use lazy tiering
   1351    MOZ_ASSERT(!debugEnabled());
   1352 
   1353    // Grab or allocate a reference to the code section for this module
   1354    if (bytecodeSource.hasCodeSection()) {
   1355      codeTailMeta->codeSectionBytecode = bytecode.getOrCreateCodeSection();
   1356      if (!codeTailMeta->codeSectionBytecode) {
   1357        return nullptr;
   1358      }
   1359    }
   1360 
   1361    // Create call_ref hints
   1362    codeTailMeta->callRefHints = MutableCallRefHints(
   1363        js_pod_calloc<MutableCallRefHint>(numCallRefMetrics_));
   1364    if (!codeTailMeta->callRefHints) {
   1365      return nullptr;
   1366    }
   1367  }
   1368 
   1369  // Store a reference to the name section on the code metadata
   1370  if (codeMeta_->nameSection) {
   1371    codeTailMeta->nameSectionPayload =
   1372        moduleMeta.customSections[codeMeta_->nameSection->customSectionIndex]
   1373            .payload;
   1374  } else {
   1375    MOZ_ASSERT(codeTailMeta->nameSectionPayload == nullptr);
   1376  }
   1377 
   1378  // Now that we have the name section we can send our blocks to the profiler.
   1379  sharedStubs_.codeBlock->sendToProfiler(
   1380      *codeMeta_, *codeTailMeta, codeMetaForAsmJS_,
   1381      FuncIonPerfSpewerSpan(sharedStubs_.funcIonSpewers),
   1382      FuncBaselinePerfSpewerSpan(sharedStubs_.funcBaselineSpewers));
   1383  tier1Result.codeBlock->sendToProfiler(
   1384      *codeMeta_, *codeTailMeta, codeMetaForAsmJS_,
   1385      FuncIonPerfSpewerSpan(tier1Result.funcIonSpewers),
   1386      FuncBaselinePerfSpewerSpan(tier1Result.funcBaselineSpewers));
   1387 
   1388  MutableCode code =
   1389      js_new<Code>(mode(), *codeMeta_, *codeTailMeta, codeMetaForAsmJS_);
   1390  if (!code || !code->initialize(std::move(funcImports_),
   1391                                 std::move(sharedStubs_.codeBlock),
   1392                                 std::move(sharedStubs_.linkData),
   1393                                 std::move(tier1Result.codeBlock),
   1394                                 std::move(tier1Result.linkData), tier1Stats)) {
   1395    return nullptr;
   1396  }
   1397 
   1398  // Copy in a couple of offsets.
   1399  code->setDebugStubOffset(debugStubCodeOffset_);
   1400  code->setRequestTierUpStubOffset(requestTierUpStubCodeOffset_);
   1401  code->setUpdateCallRefMetricsStubOffset(updateCallRefMetricsStubCodeOffset_);
   1402 
   1403  // All the components are finished, so create the complete Module and start
   1404  // tier-2 compilation if requested.
   1405 
   1406  MutableModule module = js_new<Module>(moduleMeta, *code);
   1407  if (!module) {
   1408    return nullptr;
   1409  }
   1410 
   1411  // If we can serialize (not asm.js), are not planning on serializing already
   1412  // and are testing serialization, then do a roundtrip through serialization
   1413  // to test it out.
   1414  if (!isAsmJS() && compileArgs_->features.testSerialization &&
   1415      module->canSerialize()) {
   1416    MOZ_RELEASE_ASSERT(mode() == CompileMode::Once &&
   1417                       tier() == Tier::Serialized);
   1418 
   1419    Bytes serializedBytes;
   1420    if (!module->serialize(&serializedBytes)) {
   1421      return nullptr;
   1422    }
   1423 
   1424    MutableModule deserializedModule =
   1425        Module::deserialize(serializedBytes.begin(), serializedBytes.length());
   1426    if (!deserializedModule) {
   1427      return nullptr;
   1428    }
   1429    module = deserializedModule;
   1430 
   1431    // Perform storeOptimizedEncoding here instead of below so we don't have to
   1432    // re-serialize the module.
   1433    if (maybeCompleteTier2Listener && module->canSerialize()) {
   1434      maybeCompleteTier2Listener->storeOptimizedEncoding(
   1435          serializedBytes.begin(), serializedBytes.length());
   1436      maybeCompleteTier2Listener = nullptr;
   1437    }
   1438  }
   1439 
   1440  if (compileState_ == CompileState::EagerTier1) {
   1441    // Grab or allocate a copy of the code section bytecode
   1442    SharedBytes codeSection;
   1443    if (bytecodeSource.hasCodeSection()) {
   1444      codeSection = bytecode.getOrCreateCodeSection();
   1445      if (!codeSection) {
   1446        return nullptr;
   1447      }
   1448    }
   1449 
   1450    // Kick off a background tier-2 compile task
   1451    module->startTier2(codeSection, maybeCompleteTier2Listener);
   1452  } else if (tier() == Tier::Serialized && maybeCompleteTier2Listener &&
   1453             module->canSerialize()) {
   1454    Bytes bytes;
   1455    if (module->serialize(&bytes)) {
   1456      maybeCompleteTier2Listener->storeOptimizedEncoding(bytes.begin(),
   1457                                                         bytes.length());
   1458    }
   1459  }
   1460 
   1461 #ifdef JS_JITSPEW
   1462  size_t bytecodeSize = codeMeta_->codeSectionSize();
   1463  double wallclockSeconds =
   1464      (mozilla::TimeStamp::Now() - completeTierStartTime_).ToSeconds();
   1465  JS_LOG(wasmPerf, Info,
   1466         "CM=..%06lx  ModuleGenerator::finishModule      "
   1467         "(%s, %.2f MB in %.3fs = %.2f MB/s)",
   1468         (unsigned long)(uintptr_t(codeMeta_) & 0xFFFFFFL),
   1469         tier() == Tier::Baseline ? "baseline" : "optimizing",
   1470         double(bytecodeSize) / 1.0e6, wallclockSeconds,
   1471         double(bytecodeSize) / 1.0e6 / wallclockSeconds);
   1472 #endif
   1473 
   1474  return module;
   1475 }
   1476 
   1477 // Complete all tier-2 construction.  This merely augments the existing Code
   1478 // and does not require moduleMeta_.
   1479 bool ModuleGenerator::finishTier2(const Module& module) {
   1480  MOZ_ASSERT(!compilingTier1());
   1481  MOZ_ASSERT(compileState_ == CompileState::EagerTier2);
   1482  MOZ_ASSERT(tier() == Tier::Optimized);
   1483  MOZ_ASSERT(!compilerEnv_->debugEnabled());
   1484 
   1485  if (cancelled_ && *cancelled_) {
   1486    return false;
   1487  }
   1488 
   1489  CodeBlockResult tier2Result;
   1490  CompileAndLinkStats tier2Stats;
   1491  if (!finishTier(&tier2Stats, &tier2Result)) {
   1492    return false;
   1493  }
   1494 
   1495  if (MOZ_UNLIKELY(JitOptions.wasmDelayTier2)) {
   1496    // Introduce an artificial delay when testing wasmDelayTier2, since we
   1497    // want to exercise both tier1 and tier2 code in this case.
   1498    ThisThread::SleepMilliseconds(500);
   1499  }
   1500 
   1501  // While we still have the func spewers, send the code block to the profiler.
   1502  tier2Result.codeBlock->sendToProfiler(
   1503      *codeMeta_, module.codeTailMeta(), codeMetaForAsmJS_,
   1504      FuncIonPerfSpewerSpan(tier2Result.funcIonSpewers),
   1505      FuncBaselinePerfSpewerSpan(tier2Result.funcBaselineSpewers));
   1506 
   1507  return module.finishTier2(std::move(tier2Result.codeBlock),
   1508                            std::move(tier2Result.linkData), tier2Stats);
   1509 }
   1510 
   1511 bool ModuleGenerator::finishPartialTier2() {
   1512  MOZ_ASSERT(!compilingTier1());
   1513  MOZ_ASSERT(compileState_ == CompileState::LazyTier2);
   1514  MOZ_ASSERT(tier() == Tier::Optimized);
   1515  MOZ_ASSERT(!compilerEnv_->debugEnabled());
   1516 
   1517  if (cancelled_ && *cancelled_) {
   1518    return false;
   1519  }
   1520 
   1521  CodeBlockResult tier2Result;
   1522  CompileAndLinkStats tier2Stats;
   1523  if (!finishTier(&tier2Stats, &tier2Result)) {
   1524    return false;
   1525  }
   1526 
   1527  // While we still have the func spewers, send the code block to the profiler.
   1528  tier2Result.codeBlock->sendToProfiler(
   1529      *codeMeta_, partialTieringCode_->codeTailMeta(), codeMetaForAsmJS_,
   1530      FuncIonPerfSpewerSpan(tier2Result.funcIonSpewers),
   1531      FuncBaselinePerfSpewerSpan(tier2Result.funcBaselineSpewers));
   1532 
   1533  return partialTieringCode_->finishTier2(std::move(tier2Result.codeBlock),
   1534                                          std::move(tier2Result.linkData),
   1535                                          tier2Stats);
   1536 }
   1537 
   1538 void ModuleGenerator::warnf(const char* msg, ...) {
   1539  if (!warnings_) {
   1540    return;
   1541  }
   1542 
   1543  va_list ap;
   1544  va_start(ap, msg);
   1545  UniqueChars str(JS_vsmprintf(msg, ap));
   1546  va_end(ap);
   1547  if (!str) {
   1548    return;
   1549  }
   1550 
   1551  (void)warnings_->append(std::move(str));
   1552 }
   1553 
   1554 size_t CompiledCode::sizeOfExcludingThis(
   1555    mozilla::MallocSizeOf mallocSizeOf) const {
   1556  return funcs.sizeOfExcludingThis(mallocSizeOf) +
   1557         funcIonSpewers.sizeOfExcludingThis(mallocSizeOf) +
   1558         funcBaselineSpewers.sizeOfExcludingThis(mallocSizeOf) +
   1559         bytes.sizeOfExcludingThis(mallocSizeOf) +
   1560         codeRanges.sizeOfExcludingThis(mallocSizeOf) +
   1561         inliningContext.sizeOfExcludingThis(mallocSizeOf) +
   1562         callSites.sizeOfExcludingThis(mallocSizeOf) +
   1563         callSiteTargets.sizeOfExcludingThis(mallocSizeOf) +
   1564         trapSites.sizeOfExcludingThis(mallocSizeOf) +
   1565         symbolicAccesses.sizeOfExcludingThis(mallocSizeOf) +
   1566         tryNotes.sizeOfExcludingThis(mallocSizeOf) +
   1567         codeRangeUnwindInfos.sizeOfExcludingThis(mallocSizeOf) +
   1568         callRefMetricsPatches.sizeOfExcludingThis(mallocSizeOf) +
   1569         allocSitesPatches.sizeOfExcludingThis(mallocSizeOf) +
   1570         codeLabels.sizeOfExcludingThis(mallocSizeOf);
   1571 }
   1572 
   1573 size_t CompileTask::sizeOfExcludingThis(
   1574    mozilla::MallocSizeOf mallocSizeOf) const {
   1575  return lifo.sizeOfExcludingThis(mallocSizeOf) +
   1576         inputs.sizeOfExcludingThis(mallocSizeOf) +
   1577         output.sizeOfExcludingThis(mallocSizeOf);
   1578 }