tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmCode.cpp (56019B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2016 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #include "wasm/WasmCode.h"
     20 
     21 #include "mozilla/Atomics.h"
     22 #include "mozilla/BinarySearch.h"
     23 #include "mozilla/EnumeratedRange.h"
     24 #include "mozilla/Sprintf.h"
     25 
     26 #include <algorithm>
     27 
     28 #include "jsnum.h"
     29 
     30 #include "jit/Disassemble.h"
     31 #include "jit/ExecutableAllocator.h"
     32 #include "jit/FlushICache.h"  // for FlushExecutionContextForAllThreads
     33 #include "jit/MacroAssembler.h"
     34 #include "jit/PerfSpewer.h"
     35 #include "util/Poison.h"
     36 #include "vm/HelperThreadState.h"  // PartialTier2CompileTask
     37 #ifdef MOZ_VTUNE
     38 #  include "vtune/VTuneWrapper.h"
     39 #endif
     40 #include "wasm/WasmModule.h"
     41 #include "wasm/WasmProcess.h"
     42 #include "wasm/WasmSerialize.h"
     43 #include "wasm/WasmStubs.h"
     44 #include "wasm/WasmUtility.h"
     45 
     46 using namespace js;
     47 using namespace js::jit;
     48 using namespace js::wasm;
     49 using mozilla::Atomic;
     50 using mozilla::BinarySearch;
     51 using mozilla::BinarySearchIf;
     52 using mozilla::DebugOnly;
     53 using mozilla::MakeEnumeratedRange;
     54 using mozilla::MallocSizeOf;
     55 using mozilla::Maybe;
     56 
     57 size_t LinkData::SymbolicLinkArray::sizeOfExcludingThis(
     58    MallocSizeOf mallocSizeOf) const {
     59  size_t size = 0;
     60  for (const Uint32Vector& offsets : *this) {
     61    size += offsets.sizeOfExcludingThis(mallocSizeOf);
     62  }
     63  return size;
     64 }
     65 
     66 static uint32_t RoundupExecutableCodePageSize(uint32_t codeLength) {
     67  static_assert(MaxCodeBytesPerProcess <= INT32_MAX, "rounding won't overflow");
     68  // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
     69  return RoundUp(codeLength, ExecutableCodePageSize);
     70 }
     71 
     72 UniqueCodeBytes wasm::AllocateCodeBytes(
     73    Maybe<AutoMarkJitCodeWritableForThread>& writable, uint32_t codeLength,
     74    bool allowLastDitchGC) {
     75  if (codeLength > MaxCodeBytesPerProcess) {
     76    return nullptr;
     77  }
     78 
     79  MOZ_RELEASE_ASSERT(codeLength == RoundupExecutableCodePageSize(codeLength));
     80  void* p = AllocateExecutableMemory(codeLength, ProtectionSetting::Writable,
     81                                     MemCheckKind::MakeUndefined);
     82 
     83  // If the allocation failed and the embedding gives us a last-ditch attempt
     84  // to purge all memory (which, in gecko, does a purging GC/CC/GC), do that
     85  // then retry the allocation.
     86  if (!p && allowLastDitchGC) {
     87    if (OnLargeAllocationFailure) {
     88      OnLargeAllocationFailure();
     89      p = AllocateExecutableMemory(codeLength, ProtectionSetting::Writable,
     90                                   MemCheckKind::MakeUndefined);
     91    }
     92  }
     93 
     94  if (!p) {
     95    return nullptr;
     96  }
     97 
     98  // Construct AutoMarkJitCodeWritableForThread after allocating memory, to
     99  // ensure it's not nested (OnLargeAllocationFailure can trigger GC).
    100  writable.emplace();
    101 
    102  // We account for the bytes allocated in WasmModuleObject::create, where we
    103  // have the necessary JSContext.
    104  return UniqueCodeBytes((uint8_t*)p, FreeCode(codeLength));
    105 }
    106 
    107 void FreeCode::operator()(uint8_t* bytes) {
    108  MOZ_ASSERT(codeLength);
    109  MOZ_ASSERT(codeLength == RoundupExecutableCodePageSize(codeLength));
    110 
    111 #ifdef MOZ_VTUNE
    112  vtune::UnmarkBytes(bytes, codeLength);
    113 #endif
    114  DeallocateExecutableMemory(bytes, codeLength);
    115 }
    116 
    117 bool wasm::StaticallyLink(jit::AutoMarkJitCodeWritableForThread& writable,
    118                          uint8_t* base, const LinkData& linkData,
    119                          const Code* maybeCode) {
    120  if (!EnsureBuiltinThunksInitialized(writable)) {
    121    return false;
    122  }
    123 
    124  for (LinkData::InternalLink link : linkData.internalLinks) {
    125    CodeLabel label;
    126    label.patchAt()->bind(link.patchAtOffset);
    127    label.target()->bind(link.targetOffset);
    128 #ifdef JS_CODELABEL_LINKMODE
    129    label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
    130 #endif
    131    Assembler::Bind(base, label);
    132  }
    133 
    134  for (CallFarJump far : linkData.callFarJumps) {
    135    MOZ_ASSERT(maybeCode && maybeCode->mode() == CompileMode::LazyTiering);
    136    const CodeBlock& bestBlock = maybeCode->funcCodeBlock(far.targetFuncIndex);
    137    uint32_t stubRangeIndex = bestBlock.funcToCodeRange[far.targetFuncIndex];
    138    const CodeRange& stubRange = bestBlock.codeRanges[stubRangeIndex];
    139    uint8_t* stubBase = bestBlock.base();
    140    MacroAssembler::patchFarJump(base + far.jumpOffset,
    141                                 stubBase + stubRange.funcUncheckedCallEntry());
    142  }
    143 
    144  for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
    145    const Uint32Vector& offsets = linkData.symbolicLinks[imm];
    146    if (offsets.empty()) {
    147      continue;
    148    }
    149 
    150    void* target = SymbolicAddressTarget(imm);
    151    for (uint32_t offset : offsets) {
    152      uint8_t* patchAt = base + offset;
    153      Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
    154                                         PatchedImmPtr(target),
    155                                         PatchedImmPtr((void*)-1));
    156    }
    157  }
    158 
    159  return true;
    160 }
    161 
    162 void wasm::StaticallyUnlink(uint8_t* base, const LinkData& linkData) {
    163  for (LinkData::InternalLink link : linkData.internalLinks) {
    164    CodeLabel label;
    165    label.patchAt()->bind(link.patchAtOffset);
    166    label.target()->bind(-size_t(base));  // to reset immediate to null
    167 #ifdef JS_CODELABEL_LINKMODE
    168    label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
    169 #endif
    170    Assembler::Bind(base, label);
    171  }
    172 
    173  for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
    174    const Uint32Vector& offsets = linkData.symbolicLinks[imm];
    175    if (offsets.empty()) {
    176      continue;
    177    }
    178 
    179    void* target = SymbolicAddressTarget(imm);
    180    for (uint32_t offset : offsets) {
    181      uint8_t* patchAt = base + offset;
    182      Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
    183                                         PatchedImmPtr((void*)-1),
    184                                         PatchedImmPtr(target));
    185    }
    186  }
    187 }
    188 
    189 CodeSource::CodeSource(jit::MacroAssembler& masm, const LinkData* linkData,
    190                       const Code* code)
    191    : masm_(&masm),
    192      bytes_(nullptr),
    193      length_(masm.bytesNeeded()),
    194      linkData_(linkData),
    195      code_(code) {}
    196 
    197 CodeSource::CodeSource(const uint8_t* bytes, uint32_t length,
    198                       const LinkData& linkData, const Code* code)
    199    : masm_(nullptr),
    200      bytes_(bytes),
    201      length_(length),
    202      linkData_(&linkData),
    203      code_(code) {}
    204 
    205 bool CodeSource::copyAndLink(jit::AutoMarkJitCodeWritableForThread& writable,
    206                             uint8_t* codeStart) const {
    207  // Copy the machine code over
    208  if (masm_) {
    209    masm_->executableCopy(codeStart);
    210  } else {
    211    memcpy(codeStart, bytes_, length_);
    212  }
    213 
    214  // Use link data if we have it, or else fall back to basic linking using the
    215  // MacroAssembler.
    216  if (linkData_) {
    217    return StaticallyLink(writable, codeStart, *linkData_, code_);
    218  }
    219 
    220  // We must always have link data if we're coming from raw bytes.
    221  MOZ_ASSERT(masm_);
    222  // If we didn't provide link data, then we shouldn't have provided the code
    223  // object.
    224  MOZ_ASSERT(!code_);
    225  PatchDebugSymbolicAccesses(codeStart, *masm_);
    226  for (const CodeLabel& label : masm_->codeLabels()) {
    227    Assembler::Bind(codeStart, label);
    228  }
    229  return true;
    230 }
    231 
    232 size_t CodeSegment::AllocationAlignment() {
    233  // If we are write-protecting code, all new code allocations must be rounded
    234  // to the system page size.
    235  if (JitOptions.writeProtectCode) {
    236    return gc::SystemPageSize();
    237  }
    238 
    239  // Otherwise we can just use the standard JIT code alignment.
    240  return jit::CodeAlignment;
    241 }
    242 
    243 size_t CodeSegment::AlignAllocationBytes(uintptr_t bytes) {
    244  return AlignBytes(bytes, AllocationAlignment());
    245 }
    246 
    247 bool CodeSegment::IsAligned(uintptr_t bytes) {
    248  return bytes == AlignAllocationBytes(bytes);
    249 }
    250 
    251 bool CodeSegment::hasSpace(size_t bytes) const {
    252  MOZ_ASSERT(CodeSegment::IsAligned(bytes));
    253  return bytes <= capacityBytes() && lengthBytes_ <= capacityBytes() - bytes;
    254 }
    255 
    256 void CodeSegment::claimSpace(size_t bytes, uint8_t** claimedBase) {
    257  MOZ_RELEASE_ASSERT(hasSpace(bytes));
    258  *claimedBase = base() + lengthBytes_;
    259  lengthBytes_ += bytes;
    260 }
    261 
    262 /* static */
    263 SharedCodeSegment CodeSegment::create(
    264    mozilla::Maybe<jit::AutoMarkJitCodeWritableForThread>& writable,
    265    size_t capacityBytes, bool allowLastDitchGC) {
    266  MOZ_RELEASE_ASSERT(capacityBytes ==
    267                     RoundupExecutableCodePageSize(capacityBytes));
    268 
    269  UniqueCodeBytes codeBytes;
    270  if (capacityBytes != 0) {
    271    codeBytes = AllocateCodeBytes(writable, capacityBytes, allowLastDitchGC);
    272    if (!codeBytes) {
    273      return nullptr;
    274    }
    275  }
    276 
    277  return js_new<CodeSegment>(std::move(codeBytes), /*lengthBytes=*/0,
    278                             capacityBytes);
    279 }
    280 
    281 // When allocating a single stub to a page, we should not always place the stub
    282 // at the beginning of the page as the stubs will tend to thrash the icache by
    283 // creating conflicts (everything ends up in the same cache set).  Instead,
    284 // locate stubs at different line offsets up to 3/4 the system page size (the
    285 // code allocation quantum).
    286 //
    287 // This may be called on background threads, hence the atomic.
    288 static uint32_t RandomPaddingForCodeLength(uint32_t codeLength) {
    289  // The counter serves only to spread the code out, it has no other meaning and
    290  // can wrap around.
    291  static mozilla::Atomic<uint32_t, mozilla::MemoryOrdering::ReleaseAcquire>
    292      counter(0);
    293  // We assume that the icache line size is 64 bytes, which is close to
    294  // universally true.
    295  const size_t cacheLineSize = 64;
    296  const size_t systemPageSize = gc::SystemPageSize();
    297 
    298  // If we're not write-protecting code, then we do not need to add any padding
    299  if (!JitOptions.writeProtectCode) {
    300    return 0;
    301  }
    302 
    303  // Don't add more than 3/4 of a page of padding
    304  size_t maxPadBytes = ((systemPageSize * 3) / 4);
    305  size_t maxPadLines = maxPadBytes / cacheLineSize;
    306 
    307  // If code length is close to a page boundary, avoid pushing it to a new page
    308  size_t remainingBytesInPage =
    309      AlignBytes(codeLength, systemPageSize) - codeLength;
    310  size_t remainingLinesInPage = remainingBytesInPage / cacheLineSize;
    311 
    312  // Limit padding to the smallest of the above
    313  size_t padLinesAvailable = std::min(maxPadLines, remainingLinesInPage);
    314 
    315  // Don't add any padding if none is available
    316  if (padLinesAvailable == 0) {
    317    return 0;
    318  }
    319 
    320  uint32_t random = counter++;
    321  uint32_t padding = (random % padLinesAvailable) * cacheLineSize;
    322  // "adding on the padding area doesn't change the total number of pages
    323  //  required"
    324  MOZ_ASSERT(AlignBytes(codeLength + padding, systemPageSize) ==
    325             AlignBytes(codeLength, systemPageSize));
    326  return padding;
    327 }
    328 
    329 /* static */
    330 SharedCodeSegment CodeSegment::allocate(const CodeSource& codeSource,
    331                                        SharedCodeSegmentVector* segmentPool,
    332                                        bool allowLastDitchGC,
    333                                        uint8_t** codeStart,
    334                                        uint32_t* allocationLength) {
    335  mozilla::Maybe<AutoMarkJitCodeWritableForThread> writable;
    336  uint32_t codeLength = codeSource.lengthBytes();
    337  uint32_t paddingLength = RandomPaddingForCodeLength(codeLength);
    338  *allocationLength =
    339      CodeSegment::AlignAllocationBytes(paddingLength + codeLength);
    340 
    341  // If we have a pool of segments, try to find one that has enough space. We
    342  // just check the last segment in the pool for simplicity.
    343  SharedCodeSegment segment;
    344  if (segmentPool && !segmentPool->empty() &&
    345      segmentPool->back()->hasSpace(*allocationLength)) {
    346    segment = segmentPool->back();
    347  } else {
    348    uint32_t newSegmentCapacity =
    349        RoundupExecutableCodePageSize(*allocationLength);
    350    segment =
    351        CodeSegment::create(writable, newSegmentCapacity, allowLastDitchGC);
    352    if (!segment) {
    353      return nullptr;
    354    }
    355    if (segmentPool && !segmentPool->append(segment)) {
    356      return nullptr;
    357    }
    358  }
    359 
    360  // Claim space in the segment we found or created
    361  uint8_t* allocationStart = nullptr;
    362  segment->claimSpace(*allocationLength, &allocationStart);
    363  *codeStart = allocationStart + paddingLength;
    364 
    365  // Check our constraints
    366  MOZ_ASSERT(CodeSegment::IsAligned(uintptr_t(segment->base())));
    367  MOZ_ASSERT(CodeSegment::IsAligned(allocationStart - segment->base()));
    368  MOZ_ASSERT(CodeSegment::IsAligned(uintptr_t(allocationStart)));
    369  MOZ_ASSERT(*codeStart >= allocationStart);
    370  MOZ_ASSERT(codeLength <= *allocationLength);
    371  MOZ_ASSERT_IF(JitOptions.writeProtectCode,
    372                uintptr_t(allocationStart) % gc::SystemPageSize() == 0 &&
    373                    *allocationLength % gc::SystemPageSize() == 0);
    374  MOZ_ASSERT(uintptr_t(*codeStart) % jit::CodeAlignment == 0);
    375 
    376  if (!writable) {
    377    writable.emplace();
    378  }
    379  if (!codeSource.copyAndLink(*writable, *codeStart)) {
    380    return nullptr;
    381  }
    382 
    383  // Clear the padding between the end of the code and the end of the
    384  // allocation.
    385  uint8_t* allocationEnd = allocationStart + *allocationLength;
    386  uint8_t* codeEnd = *codeStart + codeLength;
    387  MOZ_ASSERT(codeEnd <= allocationEnd);
    388  size_t paddingAfterCode = allocationEnd - codeEnd;
    389  // The swept code pattern is guaranteed to crash if it is ever executed.
    390  memset(codeEnd, JS_SWEPT_CODE_PATTERN, paddingAfterCode);
    391 
    392  // Optimized compilation finishes on a background thread, so we must make sure
    393  // to flush the icaches of all the executing threads.
    394  // Reprotect the whole region to avoid having separate RW and RX mappings.
    395  if (*allocationLength != 0 &&
    396      !ExecutableAllocator::makeExecutableAndFlushICache(allocationStart,
    397                                                         *allocationLength)) {
    398    return nullptr;
    399  }
    400 
    401  return segment;
    402 }
    403 
    404 void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
    405                                size_t* data) const {
    406  *code += capacityBytes();
    407  *data += mallocSizeOf(this);
    408 }
    409 
    410 size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
    411  return mallocSizeOf(get());
    412 }
    413 
    414 static constexpr unsigned LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE = 8 * 1024;
    415 
    416 bool Code::createManyLazyEntryStubs(const WriteGuard& guard,
    417                                    const Uint32Vector& funcExportIndices,
    418                                    const CodeBlock& tierCodeBlock,
    419                                    size_t* stubBlockIndex) const {
    420  MOZ_ASSERT(funcExportIndices.length());
    421 
    422  LifoAlloc lifo(LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE, js::MallocArena);
    423  TempAllocator alloc(&lifo);
    424  JitContext jitContext;
    425  WasmMacroAssembler masm(alloc);
    426 
    427  const FuncExportVector& funcExports = tierCodeBlock.funcExports;
    428  uint8_t* codeBase = tierCodeBlock.base();
    429 
    430  CodeRangeVector codeRanges;
    431  DebugOnly<uint32_t> numExpectedRanges = 0;
    432  for (uint32_t funcExportIndex : funcExportIndices) {
    433    const FuncExport& fe = funcExports[funcExportIndex];
    434    const FuncType& funcType = codeMeta_->getFuncType(fe.funcIndex());
    435    // Exports that don't support a jit entry get only the interp entry.
    436    numExpectedRanges += (funcType.canHaveJitEntry() ? 2 : 1);
    437    void* calleePtr =
    438        codeBase + tierCodeBlock.codeRange(fe).funcUncheckedCallEntry();
    439    Maybe<ImmPtr> callee;
    440    callee.emplace(calleePtr, ImmPtr::NoCheckToken());
    441    if (!GenerateEntryStubs(masm, funcExportIndex, fe, funcType, callee,
    442                            /* asmjs */ false, &codeRanges)) {
    443      return false;
    444    }
    445  }
    446  MOZ_ASSERT(codeRanges.length() == numExpectedRanges,
    447             "incorrect number of entries per function");
    448 
    449  masm.finish();
    450 
    451  MOZ_ASSERT(masm.inliningContext().empty());
    452  MOZ_ASSERT(masm.callSites().empty());
    453  MOZ_ASSERT(masm.callSiteTargets().empty());
    454  MOZ_ASSERT(masm.trapSites().empty());
    455  MOZ_ASSERT(masm.tryNotes().empty());
    456  MOZ_ASSERT(masm.codeRangeUnwindInfos().empty());
    457 
    458  if (masm.oom()) {
    459    return false;
    460  }
    461 
    462  UniqueCodeBlock stubCodeBlock =
    463      MakeUnique<CodeBlock>(CodeBlockKind::LazyStubs);
    464  if (!stubCodeBlock) {
    465    return false;
    466  }
    467 
    468  // Allocate space in a code segment we can use
    469  uint32_t codeLength = masm.bytesNeeded();
    470  uint8_t* codeStart;
    471  uint32_t allocationLength;
    472  CodeSource codeSource(masm, nullptr, nullptr);
    473  stubCodeBlock->segment = CodeSegment::allocate(
    474      codeSource, &guard->lazyStubSegments,
    475      /* allowLastDitchGC = */ true, &codeStart, &allocationLength);
    476  if (!stubCodeBlock->segment) {
    477    return false;
    478  }
    479 
    480  stubCodeBlock->codeBase = codeStart;
    481  stubCodeBlock->codeLength = codeLength;
    482  stubCodeBlock->codeRanges = std::move(codeRanges);
    483 
    484  *stubBlockIndex = guard->blocks.length();
    485 
    486  if (!guard->lazyExports.reserve(guard->lazyExports.length() +
    487                                  funcExportIndices.length()) ||
    488      !addCodeBlock(guard, std::move(stubCodeBlock), nullptr)) {
    489    return false;
    490  }
    491 
    492  // Everything after this point must be guaranteed to succeed. A failure after
    493  // this point can leave things in an inconsistent state, and be observed if we
    494  // retry to create a lazy stub.
    495 
    496  uint32_t codeRangeIndex = 0;
    497  for (uint32_t funcExportIndex : funcExportIndices) {
    498    const FuncExport& fe = funcExports[funcExportIndex];
    499    const FuncType& funcType = codeMeta_->getFuncType(fe.funcIndex());
    500 
    501    LazyFuncExport lazyExport(fe.funcIndex(), *stubBlockIndex, codeRangeIndex,
    502                              tierCodeBlock.kind);
    503 
    504    codeRangeIndex += 1;
    505 
    506    if (funcType.canHaveJitEntry()) {
    507      codeRangeIndex += 1;
    508    }
    509 
    510    size_t exportIndex;
    511    const uint32_t targetFunctionIndex = fe.funcIndex();
    512 
    513    if (BinarySearchIf(
    514            guard->lazyExports, 0, guard->lazyExports.length(),
    515            [targetFunctionIndex](const LazyFuncExport& funcExport) {
    516              return targetFunctionIndex - funcExport.funcIndex;
    517            },
    518            &exportIndex)) {
    519      DebugOnly<CodeBlockKind> oldKind =
    520          guard->lazyExports[exportIndex].funcKind;
    521      MOZ_ASSERT(oldKind == CodeBlockKind::SharedStubs ||
    522                 oldKind == CodeBlockKind::BaselineTier);
    523      guard->lazyExports[exportIndex] = std::move(lazyExport);
    524    } else {
    525      // We reserved memory earlier, this should not fail.
    526      MOZ_RELEASE_ASSERT(guard->lazyExports.insert(
    527          guard->lazyExports.begin() + exportIndex, std::move(lazyExport)));
    528    }
    529  }
    530 
    531  guard->blocks[*stubBlockIndex]->sendToProfiler(
    532      *codeMeta_, *codeTailMeta_, codeMetaForAsmJS_, FuncIonPerfSpewerSpan(),
    533      FuncBaselinePerfSpewerSpan());
    534  return true;
    535 }
    536 
    537 bool Code::createOneLazyEntryStub(const WriteGuard& guard,
    538                                  uint32_t funcExportIndex,
    539                                  const CodeBlock& tierCodeBlock,
    540                                  void** interpEntry) const {
    541  Uint32Vector funcExportIndexes;
    542  if (!funcExportIndexes.append(funcExportIndex)) {
    543    return false;
    544  }
    545 
    546  size_t stubBlockIndex;
    547  if (!createManyLazyEntryStubs(guard, funcExportIndexes, tierCodeBlock,
    548                                &stubBlockIndex)) {
    549    return false;
    550  }
    551 
    552  const CodeBlock& block = *guard->blocks[stubBlockIndex];
    553  const CodeRangeVector& codeRanges = block.codeRanges;
    554 
    555  const FuncExport& fe = tierCodeBlock.funcExports[funcExportIndex];
    556  const FuncType& funcType = codeMeta_->getFuncType(fe.funcIndex());
    557 
    558  // We created one or two stubs, depending on the function type.
    559  uint32_t funcEntryRanges = funcType.canHaveJitEntry() ? 2 : 1;
    560  MOZ_ASSERT(codeRanges.length() >= funcEntryRanges);
    561 
    562  // The first created range is the interp entry
    563  const CodeRange& interpRange =
    564      codeRanges[codeRanges.length() - funcEntryRanges];
    565  MOZ_ASSERT(interpRange.isInterpEntry());
    566  *interpEntry = block.base() + interpRange.begin();
    567 
    568  // The second created range is the jit entry
    569  if (funcType.canHaveJitEntry()) {
    570    const CodeRange& jitRange =
    571        codeRanges[codeRanges.length() - funcEntryRanges + 1];
    572    MOZ_ASSERT(jitRange.isJitEntry());
    573    jumpTables_.setJitEntry(jitRange.funcIndex(),
    574                            block.base() + jitRange.begin());
    575  }
    576  return true;
    577 }
    578 
    579 bool Code::getOrCreateInterpEntry(uint32_t funcIndex,
    580                                  const FuncExport** funcExport,
    581                                  void** interpEntry) const {
    582  size_t funcExportIndex;
    583  const CodeBlock& codeBlock = funcCodeBlock(funcIndex);
    584  *funcExport = &codeBlock.lookupFuncExport(funcIndex, &funcExportIndex);
    585 
    586  const FuncExport& fe = **funcExport;
    587  if (fe.hasEagerStubs()) {
    588    *interpEntry = codeBlock.base() + fe.eagerInterpEntryOffset();
    589    return true;
    590  }
    591 
    592  MOZ_ASSERT(!codeMetaForAsmJS_, "only wasm can lazily export functions");
    593 
    594  auto guard = data_.writeLock();
    595  *interpEntry = lookupLazyInterpEntry(guard, funcIndex);
    596  if (*interpEntry) {
    597    return true;
    598  }
    599 
    600  return createOneLazyEntryStub(guard, funcExportIndex, codeBlock, interpEntry);
    601 }
    602 
    603 bool Code::createTier2LazyEntryStubs(const WriteGuard& guard,
    604                                     const CodeBlock& tier2Code,
    605                                     Maybe<size_t>* outStubBlockIndex) const {
    606  if (!guard->lazyExports.length()) {
    607    return true;
    608  }
    609 
    610  Uint32Vector funcExportIndices;
    611  if (!funcExportIndices.reserve(guard->lazyExports.length())) {
    612    return false;
    613  }
    614 
    615  for (size_t i = 0; i < tier2Code.funcExports.length(); i++) {
    616    const FuncExport& fe = tier2Code.funcExports[i];
    617    const LazyFuncExport* lfe = lookupLazyFuncExport(guard, fe.funcIndex());
    618    if (lfe) {
    619      MOZ_ASSERT(lfe->funcKind == CodeBlockKind::BaselineTier);
    620      funcExportIndices.infallibleAppend(i);
    621    }
    622  }
    623 
    624  if (funcExportIndices.length() == 0) {
    625    return true;
    626  }
    627 
    628  size_t stubBlockIndex;
    629  if (!createManyLazyEntryStubs(guard, funcExportIndices, tier2Code,
    630                                &stubBlockIndex)) {
    631    return false;
    632  }
    633 
    634  outStubBlockIndex->emplace(stubBlockIndex);
    635  return true;
    636 }
    637 
    638 class Module::PartialTier2CompileTaskImpl : public PartialTier2CompileTask {
    639  const SharedCode code_;
    640  uint32_t funcIndex_;
    641  Atomic<bool> cancelled_;
    642 
    643 public:
    644  PartialTier2CompileTaskImpl(const Code& code, uint32_t funcIndex)
    645      : code_(&code), funcIndex_(funcIndex), cancelled_(false) {}
    646 
    647  void cancel() override { cancelled_ = true; }
    648 
    649  void runHelperThreadTask(AutoLockHelperThreadState& locked) override {
    650    if (!cancelled_) {
    651      AutoUnlockHelperThreadState unlock(locked);
    652 
    653      // In the case `!success && !cancelled_`, compilation has failed
    654      // and this function will be stuck in state TierUpState::Requested
    655      // forever.
    656      UniqueChars error;
    657      UniqueCharsVector warnings;
    658      bool success = CompilePartialTier2(*code_, funcIndex_, &error, &warnings,
    659                                         &cancelled_);
    660      ReportTier2ResultsOffThread(
    661          cancelled_, success, mozilla::Some(funcIndex_),
    662          code_->codeMeta().scriptedCaller(), error, warnings);
    663    }
    664 
    665    // The task is finished, release it.
    666    js_delete(this);
    667  }
    668 
    669  ThreadType threadType() override {
    670    return ThreadType::THREAD_TYPE_WASM_COMPILE_PARTIAL_TIER2;
    671  }
    672 };
    673 
    674 bool Code::requestTierUp(uint32_t funcIndex) const {
    675  // Note: this runs on the requesting (wasm-running) thread, not on a
    676  // compilation-helper thread.
    677  MOZ_ASSERT(mode_ == CompileMode::LazyTiering);
    678  FuncState& state = funcStates_[funcIndex - codeMeta_->numFuncImports];
    679  if (!state.tierUpState.compareExchange(TierUpState::NotRequested,
    680                                         TierUpState::Requested)) {
    681    return true;
    682  }
    683 
    684  auto task =
    685      js::MakeUnique<Module::PartialTier2CompileTaskImpl>(*this, funcIndex);
    686  if (!task) {
    687    // Effect is (I think), if we OOM here, the request is ignored.
    688    // See bug 1911060.
    689    return false;
    690  }
    691 
    692  StartOffThreadWasmPartialTier2Compile(std::move(task));
    693  return true;
    694 }
    695 
    696 bool Code::finishTier2(UniqueCodeBlock tier2CodeBlock,
    697                       UniqueLinkData tier2LinkData,
    698                       const CompileAndLinkStats& tier2Stats) const {
    699  MOZ_RELEASE_ASSERT(mode_ == CompileMode::EagerTiering ||
    700                     mode_ == CompileMode::LazyTiering);
    701  MOZ_RELEASE_ASSERT(hasCompleteTier2_ == false &&
    702                     tier2CodeBlock->tier() == Tier::Optimized);
    703  // Acquire the write guard before we start mutating anything. We hold this
    704  // for the minimum amount of time necessary.
    705  CodeBlock* tier2CodePointer;
    706  {
    707    auto guard = data_.writeLock();
    708 
    709    // Record the tier2 stats.
    710    guard->tier2Stats.merge(tier2Stats);
    711 
    712    // Borrow the tier2 pointer before moving it into the block vector. This
    713    // ensures we maintain the invariant that completeTier2_ is never read if
    714    // hasCompleteTier2_ is false.
    715    tier2CodePointer = tier2CodeBlock.get();
    716 
    717    // Publish this code to the process wide map.
    718    if (!addCodeBlock(guard, std::move(tier2CodeBlock),
    719                      std::move(tier2LinkData))) {
    720      return false;
    721    }
    722 
    723    // Before we can make tier-2 live, we need to compile tier2 versions of any
    724    // extant tier1 lazy stubs (otherwise, tiering would break the assumption
    725    // that any extant exported wasm function has had a lazy entry stub already
    726    // compiled for it).
    727    //
    728    // Also see doc block for stubs in WasmJS.cpp.
    729    Maybe<size_t> stub2Index;
    730    if (!createTier2LazyEntryStubs(guard, *tier2CodePointer, &stub2Index)) {
    731      return false;
    732    }
    733 
    734    // Initializing the code above will have flushed the icache for all cores.
    735    // However, there could still be stale data in the execution pipeline of
    736    // other cores on some platforms. Force an execution context flush on all
    737    // threads to fix this before we commit the code.
    738    //
    739    // This is safe due to the check in `PlatformCanTier` in WasmCompile.cpp
    740    jit::FlushExecutionContextForAllThreads();
    741 
    742    // Now that we can't fail or otherwise abort tier2, make it live.
    743    if (mode_ == CompileMode::EagerTiering) {
    744      completeTier2_ = tier2CodePointer;
    745      hasCompleteTier2_ = true;
    746 
    747      // We don't need to update funcStates, because we're doing eager tiering
    748      MOZ_ASSERT(!funcStates_.get());
    749    } else {
    750      for (const CodeRange& cr : tier2CodePointer->codeRanges) {
    751        if (!cr.isFunction()) {
    752          continue;
    753        }
    754        FuncState& state =
    755            funcStates_.get()[cr.funcIndex() - codeMeta_->numFuncImports];
    756        state.bestTier = tier2CodePointer;
    757        state.tierUpState = TierUpState::Finished;
    758      }
    759    }
    760 
    761    // Update jump vectors with pointers to tier-2 lazy entry stubs, if any.
    762    if (stub2Index) {
    763      const CodeBlock& block = *guard->blocks[*stub2Index];
    764      for (const CodeRange& cr : block.codeRanges) {
    765        if (!cr.isJitEntry()) {
    766          continue;
    767        }
    768        jumpTables_.setJitEntry(cr.funcIndex(), block.base() + cr.begin());
    769      }
    770    }
    771  }
    772 
    773  // And we update the jump vectors with pointers to tier-2 functions and eager
    774  // stubs.  Callers will continue to invoke tier-1 code until, suddenly, they
    775  // will invoke tier-2 code.  This is benign.
    776  uint8_t* base = tier2CodePointer->base();
    777  for (const CodeRange& cr : tier2CodePointer->codeRanges) {
    778    // These are racy writes that we just want to be visible, atomically,
    779    // eventually.  All hardware we care about will do this right.  But
    780    // we depend on the compiler not splitting the stores hidden inside the
    781    // set*Entry functions.
    782    if (cr.isFunction()) {
    783      jumpTables_.setTieringEntry(cr.funcIndex(), base + cr.funcTierEntry());
    784    } else if (cr.isJitEntry()) {
    785      jumpTables_.setJitEntry(cr.funcIndex(), base + cr.begin());
    786    }
    787  }
    788  return true;
    789 }
    790 
    791 bool Code::addCodeBlock(const WriteGuard& guard, UniqueCodeBlock block,
    792                        UniqueLinkData maybeLinkData) const {
    793  // Don't bother saving the link data if the block won't be serialized
    794  if (maybeLinkData && !block->isSerializable()) {
    795    maybeLinkData = nullptr;
    796  }
    797 
    798  CodeBlock* blockPtr = block.get();
    799  size_t codeBlockIndex = guard->blocks.length();
    800 
    801  if (!guard->blocks.reserve(guard->blocks.length() + 1) ||
    802      !guard->blocksLinkData.reserve(guard->blocksLinkData.length() + 1)) {
    803    return false;
    804  }
    805 
    806  // If anything fails here, be careful to reset our state back so that we are
    807  // not in an inconsistent state.
    808  if (!blockPtr->initialize(*this, codeBlockIndex)) {
    809    return false;
    810  }
    811 
    812  if (!blockMap_.insert(blockPtr)) {
    813    // We don't need to deinitialize the blockPtr, because that will be
    814    // automatically handled by its destructor.
    815    return false;
    816  }
    817 
    818  guard->blocks.infallibleAppend(std::move(block));
    819  guard->blocksLinkData.infallibleAppend(std::move(maybeLinkData));
    820 
    821  return true;
    822 }
    823 
    824 SharedCodeSegment Code::createFuncCodeSegmentFromPool(
    825    jit::MacroAssembler& masm, const LinkData& linkData, bool allowLastDitchGC,
    826    uint8_t** codeStartOut, uint32_t* codeLengthOut) const {
    827  uint32_t codeLength = masm.bytesNeeded();
    828 
    829  // Allocate the code segment
    830  uint8_t* codeStart;
    831  uint32_t allocationLength;
    832  SharedCodeSegment segment;
    833  {
    834    auto guard = data_.writeLock();
    835    CodeSource codeSource(masm, &linkData, this);
    836    segment =
    837        CodeSegment::allocate(codeSource, &guard->lazyFuncSegments,
    838                              allowLastDitchGC, &codeStart, &allocationLength);
    839    if (!segment) {
    840      return nullptr;
    841    }
    842 
    843    // This function is always used with tier-2
    844    guard->tier2Stats.codeBytesMapped += allocationLength;
    845    guard->tier2Stats.codeBytesUsed += codeLength;
    846  }
    847 
    848  *codeStartOut = codeStart;
    849  *codeLengthOut = codeLength;
    850  return segment;
    851 }
    852 
    853 const LazyFuncExport* Code::lookupLazyFuncExport(const WriteGuard& guard,
    854                                                 uint32_t funcIndex) const {
    855  size_t match;
    856  if (!BinarySearchIf(
    857          guard->lazyExports, 0, guard->lazyExports.length(),
    858          [funcIndex](const LazyFuncExport& funcExport) {
    859            return funcIndex - funcExport.funcIndex;
    860          },
    861          &match)) {
    862    return nullptr;
    863  }
    864  return &guard->lazyExports[match];
    865 }
    866 
    867 void* Code::lookupLazyInterpEntry(const WriteGuard& guard,
    868                                  uint32_t funcIndex) const {
    869  const LazyFuncExport* fe = lookupLazyFuncExport(guard, funcIndex);
    870  if (!fe) {
    871    return nullptr;
    872  }
    873  const CodeBlock& block = *guard->blocks[fe->lazyStubBlockIndex];
    874  return block.base() + block.codeRanges[fe->funcCodeRangeIndex].begin();
    875 }
    876 
    877 CodeBlock::~CodeBlock() {
    878  if (unregisterOnDestroy_) {
    879    UnregisterCodeBlock(this);
    880  }
    881 }
    882 
    883 bool CodeBlock::initialize(const Code& code, size_t codeBlockIndex) {
    884  MOZ_ASSERT(!initialized());
    885  this->code = &code;
    886  this->codeBlockIndex = codeBlockIndex;
    887  segment->setCode(code);
    888 
    889  // In the case of tiering, RegisterCodeBlock() immediately makes this code
    890  // block live to access from other threads executing the containing
    891  // module. So only call once the CodeBlock is fully initialized.
    892  if (!RegisterCodeBlock(this)) {
    893    return false;
    894  }
    895 
    896  // This bool is only used by the destructor which cannot be called racily
    897  // and so it is not a problem to mutate it after RegisterCodeBlock().
    898  MOZ_ASSERT(!unregisterOnDestroy_);
    899  unregisterOnDestroy_ = true;
    900 
    901  MOZ_ASSERT(initialized());
    902  return true;
    903 }
    904 
    905 static JS::UniqueChars DescribeCodeRangeForProfiler(
    906    const wasm::CodeMetadata& codeMeta,
    907    const wasm::CodeTailMetadata& codeTailMeta,
    908    const CodeMetadataForAsmJS* codeMetaForAsmJS, const CodeRange& codeRange,
    909    CodeBlockKind codeBlockKind) {
    910  uint32_t funcIndex = codeRange.funcIndex();
    911  UTF8Bytes name;
    912  bool ok;
    913  if (codeMetaForAsmJS) {
    914    ok = codeMetaForAsmJS->getFuncNameForAsmJS(funcIndex, &name);
    915  } else {
    916    ok = codeMeta.getFuncNameForWasm(NameContext::Standalone, funcIndex,
    917                                     codeTailMeta.nameSectionPayload.get(),
    918                                     &name);
    919  }
    920  if (!ok) {
    921    return nullptr;
    922  }
    923  if (!name.append('\0')) {
    924    return nullptr;
    925  }
    926 
    927  const char* category = "";
    928  const char* filename = codeMeta.scriptedCaller().filename.get();
    929  const char* suffix = "";
    930  if (codeRange.isFunction()) {
    931    category = "Wasm";
    932    if (codeBlockKind == CodeBlockKind::BaselineTier) {
    933      suffix = " [baseline]";
    934    } else if (codeBlockKind == CodeBlockKind::OptimizedTier) {
    935      suffix = " [optimized]";
    936    }
    937  } else if (codeRange.isInterpEntry()) {
    938    category = "WasmTrampoline";
    939    suffix = " slow entry";
    940  } else if (codeRange.isJitEntry()) {
    941    category = "WasmTrampoline";
    942    suffix = " fast entry";
    943  } else if (codeRange.isImportInterpExit()) {
    944    category = "WasmTrampoline";
    945    suffix = " slow exit";
    946  } else if (codeRange.isImportJitExit()) {
    947    category = "WasmTrampoline";
    948    suffix = " fast exit";
    949  }
    950 
    951  return JS_smprintf("%s: %s: Function %s%s (WASM:%u)", category, filename,
    952                     name.begin(), suffix, funcIndex);
    953 }
    954 
    955 void CodeBlock::sendToProfiler(
    956    const CodeMetadata& codeMeta, const CodeTailMetadata& codeTailMeta,
    957    const CodeMetadataForAsmJS* codeMetaForAsmJS,
    958    FuncIonPerfSpewerSpan ionSpewers,
    959    FuncBaselinePerfSpewerSpan baselineSpewers) const {
    960  bool enabled = false;
    961  enabled |= PerfEnabled();
    962 #ifdef MOZ_VTUNE
    963  enabled |= vtune::IsProfilingActive();
    964 #endif
    965  if (!enabled) {
    966    return;
    967  }
    968 
    969  // We only ever have ion or baseline spewers, and they correspond with our
    970  // code block kind.
    971  MOZ_ASSERT(ionSpewers.empty() || baselineSpewers.empty());
    972  MOZ_ASSERT_IF(kind == CodeBlockKind::BaselineTier, ionSpewers.empty());
    973  MOZ_ASSERT_IF(kind == CodeBlockKind::OptimizedTier, baselineSpewers.empty());
    974  bool hasSpewers = !ionSpewers.empty() || !baselineSpewers.empty();
    975 
    976  // Save the collected Ion perf spewers with their IR/source information.
    977  for (FuncIonPerfSpewer& funcIonSpewer : ionSpewers) {
    978    const CodeRange& codeRange = this->codeRange(funcIonSpewer.funcIndex);
    979    UniqueChars desc = DescribeCodeRangeForProfiler(
    980        codeMeta, codeTailMeta, codeMetaForAsmJS, codeRange, kind);
    981    if (!desc) {
    982      return;
    983    }
    984    uintptr_t start = uintptr_t(base() + codeRange.begin());
    985    uintptr_t size = codeRange.end() - codeRange.begin();
    986    funcIonSpewer.spewer.saveWasmProfile(start, size, desc);
    987  }
    988 
    989  // Save the collected baseline perf spewers with their IR/source information.
    990  for (FuncBaselinePerfSpewer& funcBaselineSpewer : baselineSpewers) {
    991    const CodeRange& codeRange = this->codeRange(funcBaselineSpewer.funcIndex);
    992    UniqueChars desc = DescribeCodeRangeForProfiler(
    993        codeMeta, codeTailMeta, codeMetaForAsmJS, codeRange, kind);
    994    if (!desc) {
    995      return;
    996    }
    997    uintptr_t start = uintptr_t(base() + codeRange.begin());
    998    uintptr_t size = codeRange.end() - codeRange.begin();
    999    funcBaselineSpewer.spewer.saveProfile(start, size, desc);
   1000  }
   1001 
   1002  // Save the rest of the code ranges.
   1003  for (const CodeRange& codeRange : codeRanges) {
   1004    if (!codeRange.hasFuncIndex()) {
   1005      continue;
   1006    }
   1007 
   1008    // Skip functions when they have corresponding spewers, as they will have
   1009    // already handled the function.
   1010    if (codeRange.isFunction() && hasSpewers) {
   1011      continue;
   1012    }
   1013 
   1014    UniqueChars desc = DescribeCodeRangeForProfiler(
   1015        codeMeta, codeTailMeta, codeMetaForAsmJS, codeRange, kind);
   1016    if (!desc) {
   1017      return;
   1018    }
   1019 
   1020    uintptr_t start = uintptr_t(base() + codeRange.begin());
   1021    uintptr_t size = codeRange.end() - codeRange.begin();
   1022 
   1023 #ifdef MOZ_VTUNE
   1024    if (vtune::IsProfilingActive()) {
   1025      vtune::MarkWasm(vtune::GenerateUniqueMethodID(), desc.get(), (void*)start,
   1026                      size);
   1027    }
   1028 #endif
   1029 
   1030    if (PerfEnabled()) {
   1031      CollectPerfSpewerWasmMap(start, size, std::move(desc));
   1032    }
   1033  }
   1034 }
   1035 
   1036 void CodeBlock::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
   1037                              size_t* data) const {
   1038  segment->addSizeOfMisc(mallocSizeOf, code, data);
   1039  *data += funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) +
   1040           codeRanges.sizeOfExcludingThis(mallocSizeOf) +
   1041           inliningContext.sizeOfExcludingThis(mallocSizeOf) +
   1042           callSites.sizeOfExcludingThis(mallocSizeOf) +
   1043           tryNotes.sizeOfExcludingThis(mallocSizeOf) +
   1044           codeRangeUnwindInfos.sizeOfExcludingThis(mallocSizeOf) +
   1045           trapSites.sizeOfExcludingThis(mallocSizeOf) +
   1046           stackMaps.sizeOfExcludingThis(mallocSizeOf) +
   1047           funcExports.sizeOfExcludingThis(mallocSizeOf);
   1048  ;
   1049 }
   1050 
   1051 const CodeRange* CodeBlock::lookupRange(const void* pc) const {
   1052  CodeRange::OffsetInCode target((uint8_t*)pc - base());
   1053  return LookupInSorted(codeRanges, target);
   1054 }
   1055 
   1056 bool CodeBlock::lookupCallSite(void* pc, CallSite* callSite) const {
   1057  uint32_t target = ((uint8_t*)pc) - base();
   1058  return callSites.lookup(target, inliningContext, callSite);
   1059 }
   1060 
   1061 const StackMap* CodeBlock::lookupStackMap(uint8_t* pc) const {
   1062  // We need to subtract the offset from the beginning of the codeblock.
   1063  uint32_t offsetInCodeBlock = pc - base();
   1064  return stackMaps.lookup(offsetInCodeBlock);
   1065 }
   1066 
   1067 const wasm::TryNote* CodeBlock::lookupTryNote(const void* pc) const {
   1068  size_t target = (uint8_t*)pc - base();
   1069 
   1070  // We find the first hit (there may be multiple) to obtain the innermost
   1071  // handler, which is why we cannot binary search here.
   1072  for (const auto& tryNote : tryNotes) {
   1073    if (tryNote.offsetWithinTryBody(target)) {
   1074      return &tryNote;
   1075    }
   1076  }
   1077 
   1078  return nullptr;
   1079 }
   1080 
   1081 bool CodeBlock::lookupTrap(void* pc, Trap* kindOut, TrapSite* trapOut) const {
   1082  MOZ_ASSERT(containsCodePC(pc));
   1083  uint32_t target = ((uint8_t*)pc) - base();
   1084  return trapSites.lookup(target, inliningContext, kindOut, trapOut);
   1085 }
   1086 
   1087 struct UnwindInfoPCOffset {
   1088  const CodeRangeUnwindInfoVector& info;
   1089  explicit UnwindInfoPCOffset(const CodeRangeUnwindInfoVector& info)
   1090      : info(info) {}
   1091  uint32_t operator[](size_t index) const { return info[index].offset(); }
   1092 };
   1093 
   1094 const CodeRangeUnwindInfo* CodeBlock::lookupUnwindInfo(void* pc) const {
   1095  uint32_t target = ((uint8_t*)pc) - base();
   1096  size_t match;
   1097  const CodeRangeUnwindInfo* info = nullptr;
   1098  if (BinarySearch(UnwindInfoPCOffset(codeRangeUnwindInfos), 0,
   1099                   codeRangeUnwindInfos.length(), target, &match)) {
   1100    info = &codeRangeUnwindInfos[match];
   1101  } else {
   1102    // Exact match is not found, using insertion point to get the previous
   1103    // info entry; skip if info is outside of codeRangeUnwindInfos.
   1104    if (match == 0) return nullptr;
   1105    if (match == codeRangeUnwindInfos.length()) {
   1106      MOZ_ASSERT(
   1107          codeRangeUnwindInfos[codeRangeUnwindInfos.length() - 1].unwindHow() ==
   1108          CodeRangeUnwindInfo::Normal);
   1109      return nullptr;
   1110    }
   1111    info = &codeRangeUnwindInfos[match - 1];
   1112  }
   1113  return info->unwindHow() == CodeRangeUnwindInfo::Normal ? nullptr : info;
   1114 }
   1115 
   1116 struct ProjectFuncIndex {
   1117  const FuncExportVector& funcExports;
   1118  explicit ProjectFuncIndex(const FuncExportVector& funcExports)
   1119      : funcExports(funcExports) {}
   1120  uint32_t operator[](size_t index) const {
   1121    return funcExports[index].funcIndex();
   1122  }
   1123 };
   1124 
   1125 FuncExport& CodeBlock::lookupFuncExport(
   1126    uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) {
   1127  size_t match;
   1128  if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(),
   1129                    funcIndex, &match)) {
   1130    MOZ_CRASH("missing function export");
   1131  }
   1132  if (funcExportIndex) {
   1133    *funcExportIndex = match;
   1134  }
   1135  return funcExports[match];
   1136 }
   1137 
   1138 const FuncExport& CodeBlock::lookupFuncExport(uint32_t funcIndex,
   1139                                              size_t* funcExportIndex) const {
   1140  return const_cast<CodeBlock*>(this)->lookupFuncExport(funcIndex,
   1141                                                        funcExportIndex);
   1142 }
   1143 
   1144 bool JumpTables::initialize(CompileMode mode, const CodeMetadata& codeMeta,
   1145                            const CodeBlock& sharedStubs,
   1146                            const CodeBlock& tier1) {
   1147  static_assert(JSScript::offsetOfJitCodeRaw() == 0,
   1148                "wasm fast jit entry is at (void*) jit[funcIndex]");
   1149 
   1150  mode_ = mode;
   1151  numFuncs_ = codeMeta.numFuncs();
   1152 
   1153  if (mode_ != CompileMode::Once) {
   1154    tiering_ = TablePointer(js_pod_calloc<void*>(numFuncs_));
   1155    if (!tiering_) {
   1156      return false;
   1157    }
   1158  }
   1159 
   1160  // The number of jit entries is overestimated, but it is simpler when
   1161  // filling/looking up the jit entries and safe (worst case we'll crash
   1162  // because of a null deref when trying to call the jit entry of an
   1163  // unexported function).
   1164  jit_ = TablePointer(js_pod_calloc<void*>(numFuncs_));
   1165  if (!jit_) {
   1166    return false;
   1167  }
   1168 
   1169  uint8_t* codeBase = sharedStubs.base();
   1170  for (const CodeRange& cr : sharedStubs.codeRanges) {
   1171    if (cr.isFunction()) {
   1172      setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
   1173    } else if (cr.isJitEntry()) {
   1174      setJitEntry(cr.funcIndex(), codeBase + cr.begin());
   1175    }
   1176  }
   1177 
   1178  codeBase = tier1.base();
   1179  for (const CodeRange& cr : tier1.codeRanges) {
   1180    if (cr.isFunction()) {
   1181      setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
   1182    } else if (cr.isJitEntry()) {
   1183      setJitEntry(cr.funcIndex(), codeBase + cr.begin());
   1184    }
   1185  }
   1186  return true;
   1187 }
   1188 
   1189 Code::Code(CompileMode mode, const CodeMetadata& codeMeta,
   1190           const CodeTailMetadata& codeTailMeta,
   1191           const CodeMetadataForAsmJS* codeMetaForAsmJS)
   1192    : mode_(mode),
   1193      data_(mutexid::WasmCodeProtected),
   1194      codeMeta_(&codeMeta),
   1195      codeTailMeta_(&codeTailMeta),
   1196      codeMetaForAsmJS_(codeMetaForAsmJS),
   1197      completeTier1_(nullptr),
   1198      completeTier2_(nullptr),
   1199      profilingLabels_(mutexid::WasmCodeProfilingLabels,
   1200                       CacheableCharsVector()),
   1201      trapCode_(nullptr),
   1202      debugStubOffset_(0),
   1203      requestTierUpStubOffset_(0),
   1204      updateCallRefMetricsStubOffset_(0) {}
   1205 
   1206 Code::~Code() { printStats(); }
   1207 
   1208 void Code::printStats() const {
   1209 #ifdef JS_JITSPEW
   1210  auto guard = data_.readLock();
   1211 
   1212  JS_LOG(wasmPerf, Info, "CM=..%06lx  Code::~Code <<<<",
   1213         0xFFFFFF & (unsigned long)uintptr_t(codeMeta_.get()));
   1214 
   1215  // Module information
   1216  JS_LOG(wasmPerf, Info, "    %7zu functions in module", codeMeta_->numFuncs());
   1217  JS_LOG(wasmPerf, Info, "    %7zu bytecode bytes in module",
   1218         codeMeta_->codeSectionSize());
   1219  uint32_t numCallRefs = codeTailMeta_->numCallRefMetrics == UINT32_MAX
   1220                             ? 0
   1221                             : codeTailMeta_->numCallRefMetrics;
   1222  JS_LOG(wasmPerf, Info, "    %7u call_refs in module", numCallRefs);
   1223 
   1224  // Tier information
   1225  JS_LOG(wasmPerf, Info, "            ------ Tier 1 ------");
   1226  guard->tier1Stats.print();
   1227  if (mode() != CompileMode::Once) {
   1228    JS_LOG(wasmPerf, Info, "            ------ Tier 2 ------");
   1229    guard->tier2Stats.print();
   1230  }
   1231 
   1232  JS_LOG(wasmPerf, Info, ">>>>");
   1233 #endif
   1234 }
   1235 
   1236 bool Code::initialize(FuncImportVector&& funcImports,
   1237                      UniqueCodeBlock sharedStubs,
   1238                      UniqueLinkData sharedStubsLinkData,
   1239                      UniqueCodeBlock tier1CodeBlock,
   1240                      UniqueLinkData tier1LinkData,
   1241                      const CompileAndLinkStats& tier1Stats) {
   1242  funcImports_ = std::move(funcImports);
   1243 
   1244  auto guard = data_.writeLock();
   1245 
   1246  MOZ_ASSERT(guard->tier1Stats.empty());
   1247  guard->tier1Stats = tier1Stats;
   1248 
   1249  sharedStubs_ = sharedStubs.get();
   1250  completeTier1_ = tier1CodeBlock.get();
   1251  trapCode_ = sharedStubs_->base() + sharedStubsLinkData->trapOffset;
   1252  if (!jumpTables_.initialize(mode_, *codeMeta_, *sharedStubs_,
   1253                              *completeTier1_) ||
   1254      !addCodeBlock(guard, std::move(sharedStubs),
   1255                    std::move(sharedStubsLinkData)) ||
   1256      !addCodeBlock(guard, std::move(tier1CodeBlock),
   1257                    std::move(tier1LinkData))) {
   1258    return false;
   1259  }
   1260 
   1261  if (mode_ == CompileMode::LazyTiering) {
   1262    uint32_t numFuncDefs = codeMeta_->numFuncs() - codeMeta_->numFuncImports;
   1263    funcStates_ = FuncStatesPointer(js_pod_calloc<FuncState>(numFuncDefs));
   1264    if (!funcStates_) {
   1265      return false;
   1266    }
   1267    for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs;
   1268         funcDefIndex++) {
   1269      funcStates_.get()[funcDefIndex].bestTier = completeTier1_;
   1270      funcStates_.get()[funcDefIndex].tierUpState = TierUpState::NotRequested;
   1271    }
   1272  }
   1273 
   1274  return true;
   1275 }
   1276 
   1277 Tiers Code::completeTiers() const {
   1278  if (hasCompleteTier2_) {
   1279    return Tiers(completeTier1_->tier(), completeTier2_->tier());
   1280  }
   1281  return Tiers(completeTier1_->tier());
   1282 }
   1283 
   1284 bool Code::hasCompleteTier(Tier t) const {
   1285  if (hasCompleteTier2_ && completeTier2_->tier() == t) {
   1286    return true;
   1287  }
   1288  return completeTier1_->tier() == t;
   1289 }
   1290 
   1291 Tier Code::stableCompleteTier() const { return completeTier1_->tier(); }
   1292 
   1293 Tier Code::bestCompleteTier() const {
   1294  if (hasCompleteTier2_) {
   1295    return completeTier2_->tier();
   1296  }
   1297  return completeTier1_->tier();
   1298 }
   1299 
   1300 const CodeBlock& Code::completeTierCodeBlock(Tier tier) const {
   1301  switch (tier) {
   1302    case Tier::Baseline:
   1303      if (completeTier1_->tier() == Tier::Baseline) {
   1304        MOZ_ASSERT(completeTier1_->initialized());
   1305        return *completeTier1_;
   1306      }
   1307      MOZ_CRASH("No code segment at this tier");
   1308    case Tier::Optimized:
   1309      if (completeTier1_->tier() == Tier::Optimized) {
   1310        MOZ_ASSERT(completeTier1_->initialized());
   1311        return *completeTier1_;
   1312      }
   1313      // It is incorrect to ask for the optimized tier without there being such
   1314      // a tier and the tier having been committed.  The guard here could
   1315      // instead be `if (hasCompleteTier2_) ... ` but codeBlock(t) should not be
   1316      // called in contexts where that test is necessary.
   1317      MOZ_RELEASE_ASSERT(hasCompleteTier2_);
   1318      MOZ_ASSERT(completeTier2_->initialized());
   1319      return *completeTier2_;
   1320  }
   1321  MOZ_CRASH();
   1322 }
   1323 
   1324 const LinkData* Code::codeBlockLinkData(const CodeBlock& block) const {
   1325  auto guard = data_.readLock();
   1326  MOZ_ASSERT(block.initialized() && block.code == this);
   1327  return guard->blocksLinkData[block.codeBlockIndex].get();
   1328 }
   1329 
   1330 void Code::clearLinkData() const {
   1331  auto guard = data_.writeLock();
   1332  for (UniqueLinkData& linkData : guard->blocksLinkData) {
   1333    linkData = nullptr;
   1334  }
   1335 }
   1336 
   1337 // When enabled, generate profiling labels for every name in funcNames_ that is
   1338 // the name of some Function CodeRange. This involves malloc() so do it now
   1339 // since, once we start sampling, we'll be in a signal-handing context where we
   1340 // cannot malloc.
   1341 void Code::ensureProfilingLabels(bool profilingEnabled) const {
   1342  auto labels = profilingLabels_.lock();
   1343 
   1344  if (!profilingEnabled) {
   1345    labels->clear();
   1346    return;
   1347  }
   1348 
   1349  if (!labels->empty()) {
   1350    return;
   1351  }
   1352 
   1353  // Any tier will do, we only need tier-invariant data that are incidentally
   1354  // stored with the code ranges.
   1355  const CodeBlock& sharedStubsCodeBlock = sharedStubs();
   1356  const CodeBlock& tier1CodeBlock = completeTierCodeBlock(stableCompleteTier());
   1357 
   1358  // Ignore any OOM failures, nothing we can do about it
   1359  (void)appendProfilingLabels(labels, sharedStubsCodeBlock);
   1360  (void)appendProfilingLabels(labels, tier1CodeBlock);
   1361 }
   1362 
   1363 bool Code::appendProfilingLabels(
   1364    const ExclusiveData<CacheableCharsVector>::Guard& labels,
   1365    const CodeBlock& codeBlock) const {
   1366  for (const CodeRange& codeRange : codeBlock.codeRanges) {
   1367    if (!codeRange.isFunction()) {
   1368      continue;
   1369    }
   1370 
   1371    Int32ToCStringBuf cbuf;
   1372    size_t bytecodeStrLen;
   1373    const char* bytecodeStr = Uint32ToCString(
   1374        &cbuf, codeTailMeta().funcBytecodeOffset(codeRange.funcIndex()),
   1375        &bytecodeStrLen);
   1376    MOZ_ASSERT(bytecodeStr);
   1377 
   1378    UTF8Bytes name;
   1379    bool ok;
   1380    if (codeMetaForAsmJS()) {
   1381      ok =
   1382          codeMetaForAsmJS()->getFuncNameForAsmJS(codeRange.funcIndex(), &name);
   1383    } else {
   1384      ok = codeMeta().getFuncNameForWasm(
   1385          NameContext::Standalone, codeRange.funcIndex(),
   1386          codeTailMeta().nameSectionPayload.get(), &name);
   1387    }
   1388    if (!ok || !name.append(" (", 2)) {
   1389      return false;
   1390    }
   1391 
   1392    if (const char* filename = codeMeta().scriptedCaller().filename.get()) {
   1393      if (!name.append(filename, strlen(filename))) {
   1394        return false;
   1395      }
   1396    } else {
   1397      if (!name.append('?')) {
   1398        return false;
   1399      }
   1400    }
   1401 
   1402    if (!name.append(':') || !name.append(bytecodeStr, bytecodeStrLen) ||
   1403        !name.append(")\0", 2)) {
   1404      return false;
   1405    }
   1406 
   1407    UniqueChars label(name.extractOrCopyRawBuffer());
   1408    if (!label) {
   1409      return false;
   1410    }
   1411 
   1412    if (codeRange.funcIndex() >= labels->length()) {
   1413      if (!labels->resize(codeRange.funcIndex() + 1)) {
   1414        return false;
   1415      }
   1416    }
   1417 
   1418    ((CacheableCharsVector&)labels)[codeRange.funcIndex()] = std::move(label);
   1419  }
   1420  return true;
   1421 }
   1422 
   1423 const char* Code::profilingLabel(uint32_t funcIndex) const {
   1424  auto labels = profilingLabels_.lock();
   1425 
   1426  if (funcIndex >= labels->length() ||
   1427      !((CacheableCharsVector&)labels)[funcIndex]) {
   1428    return "?";
   1429  }
   1430  return ((CacheableCharsVector&)labels)[funcIndex].get();
   1431 }
   1432 
   1433 void Code::addSizeOfMiscIfNotSeen(
   1434    MallocSizeOf mallocSizeOf, CodeMetadata::SeenSet* seenCodeMeta,
   1435    CodeMetadataForAsmJS::SeenSet* seenCodeMetaForAsmJS,
   1436    Code::SeenSet* seenCode, size_t* code, size_t* data) const {
   1437  auto p = seenCode->lookupForAdd(this);
   1438  if (p) {
   1439    return;
   1440  }
   1441  bool ok = seenCode->add(p, this);
   1442  (void)ok;  // oh well
   1443 
   1444  auto guard = data_.readLock();
   1445  *data +=
   1446      mallocSizeOf(this) + guard->blocks.sizeOfExcludingThis(mallocSizeOf) +
   1447      guard->blocksLinkData.sizeOfExcludingThis(mallocSizeOf) +
   1448      guard->lazyExports.sizeOfExcludingThis(mallocSizeOf) +
   1449      (codeMetaForAsmJS() ? codeMetaForAsmJS()->sizeOfIncludingThisIfNotSeen(
   1450                                mallocSizeOf, seenCodeMetaForAsmJS)
   1451                          : 0) +
   1452      funcImports_.sizeOfExcludingThis(mallocSizeOf) +
   1453      profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) +
   1454      jumpTables_.sizeOfMiscExcludingThis();
   1455  for (const SharedCodeSegment& stub : guard->lazyStubSegments) {
   1456    stub->addSizeOfMisc(mallocSizeOf, code, data);
   1457  }
   1458 
   1459  sharedStubs().addSizeOfMisc(mallocSizeOf, code, data);
   1460  for (auto t : completeTiers()) {
   1461    completeTierCodeBlock(t).addSizeOfMisc(mallocSizeOf, code, data);
   1462  }
   1463 }
   1464 
   1465 void CodeBlock::disassemble(JSContext* cx, int kindSelection,
   1466                            PrintCallback printString) const {
   1467  for (const CodeRange& range : codeRanges) {
   1468    if (kindSelection & (1 << range.kind())) {
   1469      MOZ_ASSERT(range.begin() < segment->lengthBytes());
   1470      MOZ_ASSERT(range.end() < segment->lengthBytes());
   1471 
   1472      const char* kind;
   1473      char kindbuf[128];
   1474      switch (range.kind()) {
   1475        case CodeRange::Function:
   1476          kind = "Function";
   1477          break;
   1478        case CodeRange::InterpEntry:
   1479          kind = "InterpEntry";
   1480          break;
   1481        case CodeRange::JitEntry:
   1482          kind = "JitEntry";
   1483          break;
   1484        case CodeRange::ImportInterpExit:
   1485          kind = "ImportInterpExit";
   1486          break;
   1487        case CodeRange::ImportJitExit:
   1488          kind = "ImportJitExit";
   1489          break;
   1490        default:
   1491          SprintfLiteral(kindbuf, "CodeRange::Kind(%d)", range.kind());
   1492          kind = kindbuf;
   1493          break;
   1494      }
   1495      const char* separator =
   1496          "\n--------------------------------------------------\n";
   1497      // The buffer is quite large in order to accomodate mangled C++ names;
   1498      // lengths over 3500 have been observed in the wild.
   1499      char buf[4096];
   1500      if (range.hasFuncIndex()) {
   1501        const char* funcName = "(unknown)";
   1502        UTF8Bytes namebuf;
   1503        bool ok;
   1504        if (code->codeMetaForAsmJS()) {
   1505          ok = code->codeMetaForAsmJS()->getFuncNameForAsmJS(range.funcIndex(),
   1506                                                             &namebuf);
   1507        } else {
   1508          ok = code->codeMeta().getFuncNameForWasm(
   1509              NameContext::Standalone, range.funcIndex(),
   1510              code->codeTailMeta().nameSectionPayload.get(), &namebuf);
   1511        }
   1512        if (ok && namebuf.append('\0')) {
   1513          funcName = namebuf.begin();
   1514        }
   1515        SprintfLiteral(buf, "%sKind = %s, index = %d, name = %s:\n", separator,
   1516                       kind, range.funcIndex(), funcName);
   1517      } else {
   1518        SprintfLiteral(buf, "%sKind = %s\n", separator, kind);
   1519      }
   1520      printString(buf);
   1521 
   1522      uint8_t* theCode = base() + range.begin();
   1523      jit::Disassemble(theCode, range.end() - range.begin(), printString);
   1524    }
   1525  }
   1526 }
   1527 
   1528 void Code::disassemble(JSContext* cx, Tier tier, int kindSelection,
   1529                       PrintCallback printString) const {
   1530  this->sharedStubs().disassemble(cx, kindSelection, printString);
   1531  this->completeTierCodeBlock(tier).disassemble(cx, kindSelection, printString);
   1532 }
   1533 
   1534 // Return a map with names and associated statistics
   1535 MetadataAnalysisHashMap Code::metadataAnalysis(JSContext* cx) const {
   1536  MetadataAnalysisHashMap hashmap;
   1537  if (!hashmap.reserve(14)) {
   1538    return hashmap;
   1539  }
   1540 
   1541  for (auto t : completeTiers()) {
   1542    const CodeBlock& codeBlock = completeTierCodeBlock(t);
   1543    size_t length = codeBlock.funcToCodeRange.numEntries();
   1544    length += codeBlock.codeRanges.length();
   1545    length += codeBlock.callSites.length();
   1546    length += codeBlock.trapSites.sumOfLengths();
   1547    length += codeBlock.funcExports.length();
   1548    length += codeBlock.stackMaps.length();
   1549    length += codeBlock.tryNotes.length();
   1550 
   1551    hashmap.putNewInfallible("metadata length", length);
   1552 
   1553    // Iterate over the Code Ranges and accumulate all pieces of code.
   1554    size_t code_size = 0;
   1555    for (const CodeRange& codeRange : codeBlock.codeRanges) {
   1556      if (!codeRange.isFunction()) {
   1557        continue;
   1558      }
   1559      code_size += codeRange.end() - codeRange.begin();
   1560    }
   1561 
   1562    hashmap.putNewInfallible("stackmaps number", codeBlock.stackMaps.length());
   1563    hashmap.putNewInfallible("trapSites number",
   1564                             codeBlock.trapSites.sumOfLengths());
   1565    hashmap.putNewInfallible("codeRange size in bytes", code_size);
   1566    hashmap.putNewInfallible("code segment capacity",
   1567                             codeBlock.segment->capacityBytes());
   1568 
   1569    auto mallocSizeOf = cx->runtime()->debuggerMallocSizeOf;
   1570 
   1571    hashmap.putNewInfallible(
   1572        "funcToCodeRange size",
   1573        codeBlock.funcToCodeRange.sizeOfExcludingThis(mallocSizeOf));
   1574    hashmap.putNewInfallible(
   1575        "codeRanges size",
   1576        codeBlock.codeRanges.sizeOfExcludingThis(mallocSizeOf));
   1577    hashmap.putNewInfallible(
   1578        "callSites size",
   1579        codeBlock.callSites.sizeOfExcludingThis(mallocSizeOf));
   1580    hashmap.putNewInfallible(
   1581        "tryNotes size", codeBlock.tryNotes.sizeOfExcludingThis(mallocSizeOf));
   1582    hashmap.putNewInfallible(
   1583        "trapSites size",
   1584        codeBlock.trapSites.sizeOfExcludingThis(mallocSizeOf));
   1585    hashmap.putNewInfallible(
   1586        "stackMaps size",
   1587        codeBlock.stackMaps.sizeOfExcludingThis(mallocSizeOf));
   1588    hashmap.putNewInfallible(
   1589        "funcExports size",
   1590        codeBlock.funcExports.sizeOfExcludingThis(mallocSizeOf));
   1591  }
   1592 
   1593  return hashmap;
   1594 }
   1595 
   1596 void wasm::PatchDebugSymbolicAccesses(uint8_t* codeBase, MacroAssembler& masm) {
   1597 #ifdef WASM_CODEGEN_DEBUG
   1598  for (auto& access : masm.symbolicAccesses()) {
   1599    switch (access.target) {
   1600      case SymbolicAddress::PrintI32:
   1601      case SymbolicAddress::PrintPtr:
   1602      case SymbolicAddress::PrintF32:
   1603      case SymbolicAddress::PrintF64:
   1604      case SymbolicAddress::PrintText:
   1605        break;
   1606      default:
   1607        MOZ_CRASH("unexpected symbol in PatchDebugSymbolicAccesses");
   1608    }
   1609    ABIFunctionType abiType;
   1610    void* target = AddressOf(access.target, &abiType);
   1611    uint8_t* patchAt = codeBase + access.patchAt.offset();
   1612    Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
   1613                                       PatchedImmPtr(target),
   1614                                       PatchedImmPtr((void*)-1));
   1615  }
   1616 #else
   1617  MOZ_ASSERT(masm.symbolicAccesses().empty());
   1618 #endif
   1619 }