tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmCode.h (46217B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2016 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #ifndef wasm_code_h
     20 #define wasm_code_h
     21 
     22 #include "mozilla/Assertions.h"
     23 #include "mozilla/Atomics.h"
     24 #include "mozilla/Attributes.h"
     25 #include "mozilla/BinarySearch.h"
     26 #include "mozilla/DebugOnly.h"
     27 #include "mozilla/EnumeratedArray.h"
     28 #include "mozilla/Maybe.h"
     29 #include "mozilla/MemoryReporting.h"
     30 #include "mozilla/RefPtr.h"
     31 #include "mozilla/ScopeExit.h"
     32 #include "mozilla/UniquePtr.h"
     33 
     34 #include <stddef.h>
     35 #include <stdint.h>
     36 #include <string.h>
     37 #include <utility>
     38 
     39 #include "jstypes.h"
     40 
     41 #include "gc/Memory.h"
     42 #include "jit/ProcessExecutableMemory.h"
     43 #include "js/AllocPolicy.h"
     44 #include "js/UniquePtr.h"
     45 #include "js/Utility.h"
     46 #include "js/Vector.h"
     47 #include "threading/ExclusiveData.h"
     48 #include "util/Memory.h"
     49 #include "vm/MutexIDs.h"
     50 #include "wasm/AsmJS.h"  // CodeMetadataForAsmJS::SeenSet
     51 #include "wasm/WasmBuiltinModule.h"
     52 #include "wasm/WasmBuiltins.h"
     53 #include "wasm/WasmCodegenConstants.h"
     54 #include "wasm/WasmCodegenTypes.h"
     55 #include "wasm/WasmCompileArgs.h"
     56 #include "wasm/WasmConstants.h"
     57 #include "wasm/WasmExprType.h"
     58 #include "wasm/WasmGC.h"
     59 #include "wasm/WasmLog.h"
     60 #include "wasm/WasmMetadata.h"
     61 #include "wasm/WasmModuleTypes.h"
     62 #include "wasm/WasmSerialize.h"
     63 #include "wasm/WasmShareable.h"
     64 #include "wasm/WasmTypeDecls.h"
     65 #include "wasm/WasmTypeDef.h"
     66 #include "wasm/WasmValType.h"
     67 
     68 struct JS_PUBLIC_API JSContext;
     69 class JSFunction;
     70 
     71 namespace js {
     72 
     73 namespace jit {
     74 class MacroAssembler;
     75 };
     76 
     77 namespace wasm {
     78 
     79 // LinkData contains all the metadata necessary to patch all the locations
     80 // that depend on the absolute address of a CodeSegment. This happens in a
     81 // "linking" step after compilation and after the module's code is serialized.
     82 // The LinkData is serialized along with the Module but does not (normally, see
     83 // Module::debugLinkData_ comment) persist after (de)serialization, which
     84 // distinguishes it from Metadata, which is stored in the Code object.
     85 
     86 struct LinkDataCacheablePod {
     87  uint32_t trapOffset = 0;
     88 
     89  WASM_CHECK_CACHEABLE_POD(trapOffset);
     90 
     91  LinkDataCacheablePod() = default;
     92 };
     93 
     94 WASM_DECLARE_CACHEABLE_POD(LinkDataCacheablePod);
     95 
     96 WASM_CHECK_CACHEABLE_POD_PADDING(LinkDataCacheablePod)
     97 
     98 struct LinkData : LinkDataCacheablePod {
     99  LinkData() = default;
    100 
    101  LinkDataCacheablePod& pod() { return *this; }
    102  const LinkDataCacheablePod& pod() const { return *this; }
    103 
    104  struct InternalLink {
    105    uint32_t patchAtOffset;
    106    uint32_t targetOffset;
    107 #ifdef JS_CODELABEL_LINKMODE
    108    uint32_t mode;
    109 #endif
    110 
    111    WASM_CHECK_CACHEABLE_POD(patchAtOffset, targetOffset);
    112 #ifdef JS_CODELABEL_LINKMODE
    113    WASM_CHECK_CACHEABLE_POD(mode)
    114 #endif
    115  };
    116  using InternalLinkVector = Vector<InternalLink, 0, SystemAllocPolicy>;
    117 
    118  struct SymbolicLinkArray
    119      : mozilla::EnumeratedArray<SymbolicAddress, Uint32Vector,
    120                                 size_t(SymbolicAddress::Limit)> {
    121    bool isEmpty() const {
    122      for (const Uint32Vector& symbolicLinks : *this) {
    123        if (symbolicLinks.length() != 0) {
    124          return false;
    125        }
    126      }
    127      return true;
    128    }
    129    void clear() {
    130      for (SymbolicAddress symbolicAddress :
    131           mozilla::MakeEnumeratedRange(SymbolicAddress::Limit)) {
    132        (*this)[symbolicAddress].clear();
    133      }
    134    }
    135 
    136    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
    137  };
    138 
    139  InternalLinkVector internalLinks;
    140  CallFarJumpVector callFarJumps;
    141  SymbolicLinkArray symbolicLinks;
    142 
    143  bool isEmpty() const {
    144    return internalLinks.length() == 0 && callFarJumps.length() == 0 &&
    145           symbolicLinks.isEmpty();
    146  }
    147  void clear() {
    148    internalLinks.clear();
    149    callFarJumps.clear();
    150    symbolicLinks.clear();
    151  }
    152 
    153  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
    154 };
    155 
    156 WASM_DECLARE_CACHEABLE_POD(LinkData::InternalLink);
    157 
    158 using UniqueLinkData = UniquePtr<LinkData>;
    159 using UniqueLinkDataVector = Vector<UniqueLinkData, 0, SystemAllocPolicy>;
    160 
    161 // Executable code must be deallocated specially.
    162 
    163 struct FreeCode {
    164  uint32_t codeLength;
    165  FreeCode() : codeLength(0) {}
    166  explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
    167  void operator()(uint8_t* codeBytes);
    168 };
    169 
    170 using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
    171 
    172 class Code;
    173 class CodeBlock;
    174 
    175 using UniqueCodeBlock = UniquePtr<CodeBlock>;
    176 using UniqueConstCodeBlock = UniquePtr<const CodeBlock>;
    177 using UniqueConstCodeBlockVector =
    178    Vector<UniqueConstCodeBlock, 0, SystemAllocPolicy>;
    179 using RawCodeBlockVector = Vector<const CodeBlock*, 0, SystemAllocPolicy>;
    180 
    181 enum class CodeBlockKind {
    182  SharedStubs,
    183  BaselineTier,
    184  OptimizedTier,
    185  LazyStubs
    186 };
    187 
    188 // A source of machine code for creating an executable code segment.
    189 class CodeSource {
    190  // The macro assembler to use as the source. If this is set then there is
    191  // no `bytes_` pointer.
    192  jit::MacroAssembler* masm_ = nullptr;
    193  // A raw pointer to the unlinked machine code bytes. If this is set then
    194  // there is no `masm_` pointer.
    195  const uint8_t* bytes_ = nullptr;
    196 
    197  // The length in bytes for either case. This is always valid and set to
    198  // masm.bytesNeeded() if masm_ is present.
    199  uint32_t length_ = 0;
    200 
    201  // The link data to use, if any. This is always present if we are linking
    202  // from raw bytes. Otherwise it may or may not be present when we are linking
    203  // masm. If it is not present for masm we will fall back to basic linking of
    204  // code labels and debug symbolic accesses.
    205  const LinkData* linkData_;
    206 
    207  // The code object to use, if any, for linking. This is optionally present
    208  // in either case. This will not be present if we are doing basic linking
    209  // without a link data.
    210  const Code* code_;
    211 
    212 public:
    213  // Get the machine code from a macro assembler, optional link data, and
    214  // optional code object.
    215  CodeSource(jit::MacroAssembler& masm, const LinkData* linkData,
    216             const Code* code);
    217 
    218  // Get the machine code from a raw bytes range, link data, and optional code
    219  // object.
    220  CodeSource(const uint8_t* bytes, uint32_t length, const LinkData& linkData,
    221             const Code* code);
    222 
    223  // The length of machine code in bytes.
    224  uint32_t lengthBytes() const { return length_; }
    225 
    226  // Copy and link the machine code into `codeStart`.
    227  bool copyAndLink(jit::AutoMarkJitCodeWritableForThread& writable,
    228                   uint8_t* codeStart) const;
    229 };
    230 
    231 // CodeSegment is a fixed-size chunk of executable memory that we can
    232 // bump-allocate smaller allocations from.
    233 class CodeSegment : public ShareableBase<CodeSegment> {
    234 private:
    235  const UniqueCodeBytes bytes_;
    236  uint32_t lengthBytes_;
    237  const uint32_t capacityBytes_;
    238  const Code* code_;
    239 
    240  // Create a new, empty code segment with a given capacity. The capacity must
    241  // have granularity of ExecutableCodePageSize (64KB).
    242  static RefPtr<CodeSegment> create(
    243      mozilla::Maybe<jit::AutoMarkJitCodeWritableForThread>& writable,
    244      size_t capacityBytes, bool allowLastDitchGC = true);
    245 
    246  // Returns the alignment that all allocations within a code segment must be.
    247  //
    248  // If we are write-protecting code, then we must start every new allocation
    249  // on a new system page, otherwise we can re-use system pages for new
    250  // allocations.
    251  static size_t AllocationAlignment();
    252  // Align `bytes` to be at least the allocation alignment. See above.
    253  static size_t AlignAllocationBytes(uintptr_t bytes);
    254  // Returns whether `bytes` is aligned to the allocation alignment.
    255  static bool IsAligned(uintptr_t bytes);
    256 
    257  // Checks if this code segment has enough room for an allocation of bytes.
    258  // The bytes must be aligned to allocation alignment.
    259  bool hasSpace(size_t bytes) const;
    260 
    261  // Claims space in this code segment for an allocation of bytes. The bytes
    262  // must be aligned to allocation alignment.
    263  void claimSpace(size_t bytes, uint8_t** claimedBase);
    264 
    265 public:
    266  CodeSegment(UniqueCodeBytes bytes, uint32_t lengthBytes,
    267              uint32_t capacityBytes)
    268      : bytes_(std::move(bytes)),
    269        lengthBytes_(lengthBytes),
    270        capacityBytes_(capacityBytes),
    271        code_(nullptr) {}
    272 
    273  // Copies, links, and makes the machine code executable from the given code
    274  // source. Returns the code segment the code was allocated into. An optional
    275  // pool of code segments may be provided to allocate from.
    276  //
    277  // There are two important ranges created, an 'allocation' range and a 'code
    278  // range'.
    279  //
    280  // The allocation range is a superset of the code range. The allocation start
    281  // offset will be aligned to `AllocationAlignment` which is either the system
    282  // page size or just executable code alignment.
    283  //
    284  // The code range will be within the allocation range and may have some
    285  // padding inserted before the start of the allocation. The code start offset
    286  // will always be aligned to the executable code alignment.
    287  //
    288  // Random padding is added before the code range when we are aligning to the
    289  // system page size, the start addressess of all the code memories will not
    290  // conflict in associative icaches.
    291  //
    292  // Here's a picture that illustrates the resulting structure of allocations:
    293  //
    294  // This is an example for a machine with a 4KB page size, for a codeLength
    295  // which requires more than one page but less than two, in a segment where
    296  // the first page is already allocated.
    297  //
    298  // Note: if !JitOptions.writeProtectCode, then allocationStart and
    299  //   allocationLength will be a multiple of jit::CodeAlignment, not the
    300  //   system page size.
    301  //
    302  // segment->base() (aligned at 4K = hardware page size)
    303  // :
    304  // :                      +4k                     +8k                    +12k
    305  // :                       :                       :                       :
    306  // +-----------------------+          +---------------------------------+   :
    307  // |        IN USE         |          |   CODE              CODE        |   :
    308  // +-----------------------+----------+---------------------------------+---+
    309  // .                       :          :                                 :
    310  // :                       :          :     allocationLength            :
    311  // :                       :<------------------------------------------>:
    312  // .                       :          :                                 :
    313  // :                       :  padding :           codeLength            :
    314  // :<--------------------->:<-------->:<------------------------------->:
    315  // :                       :          :
    316  // :                       :          :
    317  // :<-------------------------------->:
    318  //                         :          :
    319  //                         :          codeStart
    320  //                         :
    321  //                         allocationStart
    322  static RefPtr<CodeSegment> allocate(
    323      const CodeSource& codeSource,
    324      Vector<RefPtr<CodeSegment>, 0, SystemAllocPolicy>* segmentPool,
    325      bool allowLastDitchGC, uint8_t** codeStartOut,
    326      uint32_t* allocationLengthOut);
    327 
    328  void setCode(const Code& code) { code_ = &code; }
    329 
    330  uint8_t* base() const { return bytes_.get(); }
    331  uint32_t lengthBytes() const {
    332    MOZ_ASSERT(lengthBytes_ != UINT32_MAX);
    333    return lengthBytes_;
    334  }
    335  uint32_t capacityBytes() const {
    336    MOZ_ASSERT(capacityBytes_ != UINT32_MAX);
    337    return capacityBytes_;
    338  }
    339 
    340  const Code& code() const { return *code_; }
    341 
    342  void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
    343                     size_t* data) const;
    344  WASM_DECLARE_FRIEND_SERIALIZE(CodeSegment);
    345 };
    346 
    347 using SharedCodeSegment = RefPtr<CodeSegment>;
    348 using SharedCodeSegmentVector = Vector<SharedCodeSegment, 0, SystemAllocPolicy>;
    349 
    350 extern UniqueCodeBytes AllocateCodeBytes(
    351    mozilla::Maybe<jit::AutoMarkJitCodeWritableForThread>& writable,
    352    uint32_t codeLength, bool allowLastDitchGC);
    353 extern bool StaticallyLink(jit::AutoMarkJitCodeWritableForThread& writable,
    354                           uint8_t* base, const LinkData& linkData,
    355                           const Code* maybeCode);
    356 extern void StaticallyUnlink(uint8_t* base, const LinkData& linkData);
    357 
    358 enum class TierUpState : uint32_t {
    359  NotRequested,
    360  Requested,
    361  Finished,
    362 };
    363 
    364 struct FuncState {
    365  mozilla::Atomic<const CodeBlock*> bestTier;
    366  mozilla::Atomic<TierUpState> tierUpState;
    367 };
    368 using FuncStatesPointer = mozilla::UniquePtr<FuncState[], JS::FreePolicy>;
    369 
    370 // LazyFuncExport helps to efficiently lookup a CodeRange from a given function
    371 // index. It is inserted in a vector sorted by function index, to perform
    372 // binary search on it later.
    373 
    374 struct LazyFuncExport {
    375  size_t funcIndex;
    376  size_t lazyStubBlockIndex;
    377  size_t funcCodeRangeIndex;
    378  // Used to make sure we only upgrade a lazy stub from baseline to ion.
    379  mozilla::DebugOnly<CodeBlockKind> funcKind;
    380 
    381  LazyFuncExport(size_t funcIndex, size_t lazyStubBlockIndex,
    382                 size_t funcCodeRangeIndex, CodeBlockKind funcKind)
    383      : funcIndex(funcIndex),
    384        lazyStubBlockIndex(lazyStubBlockIndex),
    385        funcCodeRangeIndex(funcCodeRangeIndex),
    386        funcKind(funcKind) {}
    387 };
    388 
    389 using LazyFuncExportVector = Vector<LazyFuncExport, 0, SystemAllocPolicy>;
    390 
    391 // A FuncExport represents a single function definition inside a wasm Module
    392 // that has been exported one or more times. A FuncExport represents an
    393 // internal entry point that can be called via function definition index by
    394 // Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
    395 // function definition index, the FuncExportVector is stored sorted by
    396 // function definition index.
    397 
    398 class FuncExport {
    399  uint32_t funcIndex_;
    400  uint32_t eagerInterpEntryOffset_;  // Machine code offset
    401 
    402  WASM_CHECK_CACHEABLE_POD(funcIndex_, eagerInterpEntryOffset_);
    403 
    404  // Sentinel value that this FuncExport will get eager stubs
    405  static constexpr uint32_t PENDING_EAGER_STUBS = UINT32_MAX - 1;
    406 
    407  // Sentinel value that this FuncExport will not eager stubs
    408  static constexpr uint32_t NO_EAGER_STUBS = UINT32_MAX;
    409 
    410 public:
    411  FuncExport() = default;
    412  explicit FuncExport(uint32_t funcIndex, bool hasEagerStubs) {
    413    funcIndex_ = funcIndex;
    414    eagerInterpEntryOffset_ =
    415        hasEagerStubs ? PENDING_EAGER_STUBS : NO_EAGER_STUBS;
    416  }
    417  void initEagerInterpEntryOffset(uint32_t entryOffset) {
    418    MOZ_ASSERT(eagerInterpEntryOffset_ == PENDING_EAGER_STUBS);
    419    MOZ_ASSERT(entryOffset != PENDING_EAGER_STUBS &&
    420               entryOffset != NO_EAGER_STUBS);
    421    MOZ_ASSERT(hasEagerStubs());
    422    eagerInterpEntryOffset_ = entryOffset;
    423  }
    424 
    425  bool hasEagerStubs() const {
    426    return eagerInterpEntryOffset_ != NO_EAGER_STUBS;
    427  }
    428  uint32_t funcIndex() const { return funcIndex_; }
    429  uint32_t eagerInterpEntryOffset() const {
    430    MOZ_ASSERT(eagerInterpEntryOffset_ != PENDING_EAGER_STUBS);
    431    MOZ_ASSERT(hasEagerStubs());
    432    return eagerInterpEntryOffset_;
    433  }
    434  void offsetBy(uint32_t delta) {
    435    if (hasEagerStubs()) {
    436      eagerInterpEntryOffset_ += delta;
    437    }
    438  }
    439 };
    440 
    441 WASM_DECLARE_CACHEABLE_POD(FuncExport);
    442 
    443 using FuncExportVector = Vector<FuncExport, 0, SystemAllocPolicy>;
    444 
    445 // A FuncImport contains the runtime metadata needed to implement a call to an
    446 // imported function. Each function import has two call stubs: an optimized path
    447 // into JIT code and a slow path into the generic C++ js::Invoke and these
    448 // offsets of these stubs are stored so that function-import callsites can be
    449 // dynamically patched at runtime.
    450 
    451 class FuncImport {
    452 private:
    453  uint32_t interpExitCodeOffset_;  // Machine code offset
    454  uint32_t jitExitCodeOffset_;     // Machine code offset
    455 
    456  WASM_CHECK_CACHEABLE_POD(interpExitCodeOffset_, jitExitCodeOffset_);
    457 
    458 public:
    459  FuncImport() : interpExitCodeOffset_(0), jitExitCodeOffset_(0) {}
    460 
    461  void initInterpExitOffset(uint32_t off) {
    462    MOZ_ASSERT(!interpExitCodeOffset_);
    463    interpExitCodeOffset_ = off;
    464  }
    465  void initJitExitOffset(uint32_t off) {
    466    MOZ_ASSERT(!jitExitCodeOffset_);
    467    jitExitCodeOffset_ = off;
    468  }
    469 
    470  uint32_t interpExitCodeOffset() const { return interpExitCodeOffset_; }
    471  uint32_t jitExitCodeOffset() const { return jitExitCodeOffset_; }
    472 };
    473 
    474 WASM_DECLARE_CACHEABLE_POD(FuncImport)
    475 
    476 using FuncImportVector = Vector<FuncImport, 0, SystemAllocPolicy>;
    477 
    478 static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
    479 
    480 class FuncToCodeRangeMap {
    481  uint32_t startFuncIndex_ = 0;
    482  Uint32Vector funcToCodeRange_;
    483 
    484  bool denseHasFuncIndex(uint32_t funcIndex) const {
    485    return funcIndex >= startFuncIndex_ &&
    486           funcIndex - startFuncIndex_ < funcToCodeRange_.length();
    487  }
    488 
    489  FuncToCodeRangeMap(uint32_t startFuncIndex, Uint32Vector&& funcToCodeRange)
    490      : startFuncIndex_(startFuncIndex),
    491        funcToCodeRange_(std::move(funcToCodeRange)) {}
    492 
    493 public:
    494  [[nodiscard]] static bool createDense(uint32_t startFuncIndex,
    495                                        uint32_t numFuncs,
    496                                        FuncToCodeRangeMap* result) {
    497    Uint32Vector funcToCodeRange;
    498    if (!funcToCodeRange.appendN(BAD_CODE_RANGE, numFuncs)) {
    499      return false;
    500    }
    501    *result = FuncToCodeRangeMap(startFuncIndex, std::move(funcToCodeRange));
    502    return true;
    503  }
    504 
    505  FuncToCodeRangeMap() = default;
    506  FuncToCodeRangeMap(FuncToCodeRangeMap&& rhs) = default;
    507  FuncToCodeRangeMap& operator=(FuncToCodeRangeMap&& rhs) = default;
    508  FuncToCodeRangeMap(const FuncToCodeRangeMap& rhs) = delete;
    509  FuncToCodeRangeMap& operator=(const FuncToCodeRangeMap& rhs) = delete;
    510 
    511  uint32_t lookup(uint32_t funcIndex) const {
    512    if (!denseHasFuncIndex(funcIndex)) {
    513      return BAD_CODE_RANGE;
    514    }
    515    return funcToCodeRange_[funcIndex - startFuncIndex_];
    516  }
    517 
    518  uint32_t operator[](uint32_t funcIndex) const { return lookup(funcIndex); }
    519 
    520  [[nodiscard]] bool insert(uint32_t funcIndex, uint32_t codeRangeIndex) {
    521    if (!denseHasFuncIndex(funcIndex)) {
    522      return false;
    523    }
    524    funcToCodeRange_[funcIndex - startFuncIndex_] = codeRangeIndex;
    525    return true;
    526  }
    527  void insertInfallible(uint32_t funcIndex, uint32_t codeRangeIndex) {
    528    bool result = insert(funcIndex, codeRangeIndex);
    529    MOZ_RELEASE_ASSERT(result);
    530  }
    531 
    532  void shrinkStorageToFit() { funcToCodeRange_.shrinkStorageToFit(); }
    533 
    534  void assertAllInitialized() {
    535 #ifdef DEBUG
    536    for (uint32_t codeRangeIndex : funcToCodeRange_) {
    537      MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE);
    538    }
    539 #endif
    540  }
    541 
    542  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
    543    return funcToCodeRange_.sizeOfExcludingThis(mallocSizeOf);
    544  }
    545 
    546  size_t numEntries() const { return funcToCodeRange_.length(); }
    547 
    548  WASM_DECLARE_FRIEND_SERIALIZE(FuncToCodeRangeMap);
    549 };
    550 
    551 // CodeBlock contains all the data related to a given compilation tier. It is
    552 // built during module generation and then immutably stored in a Code.
    553 //
    554 // Code contains a map from PC to containing code block. The map is thread-safe
    555 // to support lookups from multiple threads (see ThreadSafeCodeBlockMap). This
    556 // is safe because code blocks are immutable after creation, so there won't
    557 // be any concurrent modification during a metadata lookup.
    558 
    559 class CodeBlock {
    560 public:
    561  // Weak reference to the code that owns us, not serialized.
    562  const Code* code;
    563  // The index we are held inside our containing Code::data::blocks_ vector.
    564  size_t codeBlockIndex;
    565 
    566  // The following information is all serialized
    567  // Which kind of code is being stored in this block. Most consumers don't
    568  // care about this.
    569  const CodeBlockKind kind;
    570 
    571  // The code segment our JIT code is within.
    572  SharedCodeSegment segment;
    573 
    574  // Pointer to the beginning of the CodeBlock.
    575  uint8_t* codeBase;
    576  size_t codeLength;
    577 
    578  // Metadata about the code we have contributed to the segment.
    579  //
    580  // All offsets are relative to `codeBase` not the segment base.
    581  FuncToCodeRangeMap funcToCodeRange;
    582  CodeRangeVector codeRanges;
    583  InliningContext inliningContext;
    584  CallSites callSites;
    585  TrapSites trapSites;
    586  FuncExportVector funcExports;
    587  StackMaps stackMaps;
    588  TryNoteVector tryNotes;
    589  CodeRangeUnwindInfoVector codeRangeUnwindInfos;
    590 
    591  // Track whether we are registered in the process map of code blocks.
    592  bool unregisterOnDestroy_;
    593 
    594  static constexpr CodeBlockKind kindFromTier(Tier tier) {
    595    if (tier == Tier::Optimized) {
    596      return CodeBlockKind::OptimizedTier;
    597    }
    598    MOZ_ASSERT(tier == Tier::Baseline);
    599    return CodeBlockKind::BaselineTier;
    600  }
    601 
    602  explicit CodeBlock(CodeBlockKind kind)
    603      : code(nullptr),
    604        codeBlockIndex((size_t)-1),
    605        kind(kind),
    606        codeBase(nullptr),
    607        codeLength(0),
    608        unregisterOnDestroy_(false) {}
    609  ~CodeBlock();
    610 
    611  bool initialized() const {
    612    if (code) {
    613      // Initialize should have given us an index too.
    614      MOZ_ASSERT(codeBlockIndex != (size_t)-1);
    615      return true;
    616    }
    617    return false;
    618  }
    619 
    620  bool initialize(const Code& code, size_t codeBlockIndex);
    621  void sendToProfiler(const CodeMetadata& codeMeta,
    622                      const CodeTailMetadata& codeTailMeta,
    623                      const CodeMetadataForAsmJS* codeMetaForAsmJS,
    624                      FuncIonPerfSpewerSpan ionSpewers,
    625                      FuncBaselinePerfSpewerSpan baselineSpewers) const;
    626 
    627  // Gets the tier for this code block. Only valid for non-lazy stub code.
    628  Tier tier() const {
    629    switch (kind) {
    630      case CodeBlockKind::BaselineTier:
    631        return Tier::Baseline;
    632      case CodeBlockKind::OptimizedTier:
    633        return Tier::Optimized;
    634      default:
    635        MOZ_CRASH();
    636    }
    637  }
    638 
    639  // Returns whether this code block should be considered for serialization.
    640  bool isSerializable() const {
    641    return kind == CodeBlockKind::SharedStubs ||
    642           kind == CodeBlockKind::OptimizedTier;
    643  }
    644 
    645  uint8_t* base() const { return codeBase; }
    646  uint32_t length() const { return codeLength; }
    647  bool containsCodePC(const void* pc) const {
    648    return pc >= base() && pc < (base() + length());
    649  }
    650 
    651  const CodeRange& codeRange(uint32_t funcIndex) const {
    652    return codeRanges[funcToCodeRange[funcIndex]];
    653  }
    654  const CodeRange& codeRange(const FuncExport& funcExport) const {
    655    return codeRanges[funcToCodeRange[funcExport.funcIndex()]];
    656  }
    657 
    658  const CodeRange* lookupRange(const void* pc) const;
    659  bool lookupCallSite(void* pc, CallSite* callSite) const;
    660  const StackMap* lookupStackMap(uint8_t* pc) const;
    661  const TryNote* lookupTryNote(const void* pc) const;
    662  bool lookupTrap(void* pc, Trap* kindOut, TrapSite* trapOut) const;
    663  const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const;
    664  FuncExport& lookupFuncExport(uint32_t funcIndex,
    665                               size_t* funcExportIndex = nullptr);
    666  const FuncExport& lookupFuncExport(uint32_t funcIndex,
    667                                     size_t* funcExportIndex = nullptr) const;
    668 
    669  void disassemble(JSContext* cx, int kindSelection,
    670                   PrintCallback printString) const;
    671 
    672  void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
    673                     size_t* data) const;
    674 
    675  WASM_DECLARE_FRIEND_SERIALIZE_ARGS(CodeBlock, const wasm::LinkData& data);
    676 };
    677 
    678 // Because of profiling, the thread running wasm might need to know to which
    679 // CodeBlock the current PC belongs, during a call to lookup(). A lookup
    680 // is a read-only operation, and we don't want to take a lock then
    681 // (otherwise, we could have a deadlock situation if an async lookup
    682 // happened on a given thread that was holding mutatorsMutex_ while getting
    683 // sampled). Since the writer could be modifying the data that is getting
    684 // looked up, the writer functions use spin-locks to know if there are any
    685 // observers (i.e. calls to lookup()) of the atomic data.
    686 
    687 class ThreadSafeCodeBlockMap {
    688  // Since writes (insertions or removals) can happen on any background
    689  // thread at the same time, we need a lock here.
    690 
    691  Mutex mutatorsMutex_ MOZ_UNANNOTATED;
    692 
    693  RawCodeBlockVector segments1_;
    694  RawCodeBlockVector segments2_;
    695 
    696  // Except during swapAndWait(), there are no lookup() observers of the
    697  // vector pointed to by mutableCodeBlocks_
    698 
    699  RawCodeBlockVector* mutableCodeBlocks_;
    700  mozilla::Atomic<const RawCodeBlockVector*> readonlyCodeBlocks_;
    701  mozilla::Atomic<size_t> numActiveLookups_;
    702 
    703  struct CodeBlockPC {
    704    const void* pc;
    705    explicit CodeBlockPC(const void* pc) : pc(pc) {}
    706    int operator()(const CodeBlock* cb) const {
    707      if (cb->containsCodePC(pc)) {
    708        return 0;
    709      }
    710      if (pc < cb->base()) {
    711        return -1;
    712      }
    713      return 1;
    714    }
    715  };
    716 
    717  void swapAndWait() {
    718    // Both vectors are consistent for lookup at this point although their
    719    // contents are different: there is no way for the looked up PC to be
    720    // in the code segment that is getting registered, because the code
    721    // segment is not even fully created yet.
    722 
    723    // If a lookup happens before this instruction, then the
    724    // soon-to-become-former read-only pointer is used during the lookup,
    725    // which is valid.
    726 
    727    mutableCodeBlocks_ = const_cast<RawCodeBlockVector*>(
    728        readonlyCodeBlocks_.exchange(mutableCodeBlocks_));
    729 
    730    // If a lookup happens after this instruction, then the updated vector
    731    // is used, which is valid:
    732    // - in case of insertion, it means the new vector contains more data,
    733    // but it's fine since the code segment is getting registered and thus
    734    // isn't even fully created yet, so the code can't be running.
    735    // - in case of removal, it means the new vector contains one less
    736    // entry, but it's fine since unregistering means the code segment
    737    // isn't used by any live instance anymore, thus PC can't be in the
    738    // to-be-removed code segment's range.
    739 
    740    // A lookup could have happened on any of the two vectors. Wait for
    741    // observers to be done using any vector before mutating.
    742 
    743    while (numActiveLookups_ > 0) {
    744    }
    745  }
    746 
    747 public:
    748  ThreadSafeCodeBlockMap()
    749      : mutatorsMutex_(mutexid::WasmCodeBlockMap),
    750        mutableCodeBlocks_(&segments1_),
    751        readonlyCodeBlocks_(&segments2_),
    752        numActiveLookups_(0) {}
    753 
    754  ~ThreadSafeCodeBlockMap() {
    755    MOZ_RELEASE_ASSERT(numActiveLookups_ == 0);
    756    segments1_.clearAndFree();
    757    segments2_.clearAndFree();
    758  }
    759 
    760  size_t numActiveLookups() const { return numActiveLookups_; }
    761 
    762  bool insert(const CodeBlock* cs) {
    763    LockGuard<Mutex> lock(mutatorsMutex_);
    764 
    765    size_t index;
    766    MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeBlocks_, 0,
    767                                    mutableCodeBlocks_->length(),
    768                                    CodeBlockPC(cs->base()), &index));
    769 
    770    if (!mutableCodeBlocks_->insert(mutableCodeBlocks_->begin() + index, cs)) {
    771      return false;
    772    }
    773 
    774    swapAndWait();
    775 
    776 #ifdef DEBUG
    777    size_t otherIndex;
    778    MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeBlocks_, 0,
    779                                    mutableCodeBlocks_->length(),
    780                                    CodeBlockPC(cs->base()), &otherIndex));
    781    MOZ_ASSERT(index == otherIndex);
    782 #endif
    783 
    784    // Although we could simply revert the insertion in the read-only
    785    // vector, it is simpler to just crash and given that each CodeBlock
    786    // consumes multiple pages, it is unlikely this insert() would OOM in
    787    // practice
    788    AutoEnterOOMUnsafeRegion oom;
    789    if (!mutableCodeBlocks_->insert(mutableCodeBlocks_->begin() + index, cs)) {
    790      oom.crash("when inserting a CodeBlock in the process-wide map");
    791    }
    792 
    793    return true;
    794  }
    795 
    796  size_t remove(const CodeBlock* cs) {
    797    LockGuard<Mutex> lock(mutatorsMutex_);
    798 
    799    size_t index;
    800    MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeBlocks_, 0,
    801                                   mutableCodeBlocks_->length(),
    802                                   CodeBlockPC(cs->base()), &index));
    803 
    804    mutableCodeBlocks_->erase(mutableCodeBlocks_->begin() + index);
    805    size_t newCodeBlockCount = mutableCodeBlocks_->length();
    806 
    807    swapAndWait();
    808 
    809 #ifdef DEBUG
    810    size_t otherIndex;
    811    MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeBlocks_, 0,
    812                                   mutableCodeBlocks_->length(),
    813                                   CodeBlockPC(cs->base()), &otherIndex));
    814    MOZ_ASSERT(index == otherIndex);
    815 #endif
    816 
    817    mutableCodeBlocks_->erase(mutableCodeBlocks_->begin() + index);
    818    return newCodeBlockCount;
    819  }
    820 
    821  const CodeBlock* lookup(const void* pc,
    822                          const CodeRange** codeRange = nullptr) {
    823    auto decObserver = mozilla::MakeScopeExit([&] {
    824      MOZ_ASSERT(numActiveLookups_ > 0);
    825      numActiveLookups_--;
    826    });
    827    numActiveLookups_++;
    828 
    829    const RawCodeBlockVector* readonly = readonlyCodeBlocks_;
    830 
    831    size_t index;
    832    if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeBlockPC(pc),
    833                        &index)) {
    834      if (codeRange) {
    835        *codeRange = nullptr;
    836      }
    837      return nullptr;
    838    }
    839 
    840    // It is fine returning a raw CodeBlock*, because we assume we are
    841    // looking up a live PC in code which is on the stack, keeping the
    842    // CodeBlock alive.
    843 
    844    const CodeBlock* result = (*readonly)[index];
    845    if (codeRange) {
    846      *codeRange = result->lookupRange(pc);
    847    }
    848    return result;
    849  }
    850 };
    851 
    852 // Jump tables that implement function tiering and fast js-to-wasm calls.
    853 //
    854 // There is one JumpTable object per Code object, holding two jump tables: the
    855 // tiering jump table and the jit-entry jump table.  The JumpTable is not
    856 // serialized with its Code, but is a run-time entity only.  At run-time it is
    857 // shared across threads with its owning Code (and the Module that owns the
    858 // Code).  Values in the JumpTable /must/ /always/ be JSContext-agnostic and
    859 // Instance-agnostic, because of this sharing.
    860 //
    861 // Both jump tables have a number of entries equal to the number of functions in
    862 // their Module, including imports.  In the tiering table, the elements
    863 // corresponding to the Module's imported functions are unused; in the jit-entry
    864 // table, the elements corresponding to the Module's non-exported functions are
    865 // unused.  (Functions can be exported explicitly via the exports section or
    866 // implicitly via a mention of their indices outside function bodies.)  See
    867 // comments at JumpTables::init() and WasmInstanceObject::getExportedFunction().
    868 // The entries are void*.  Unused entries are null.
    869 //
    870 // The tiering jump table.
    871 //
    872 // This table holds code pointers that are used by baseline functions to enter
    873 // optimized code.  See the large comment block in WasmCompile.cpp for
    874 // information about how tiering works.
    875 //
    876 // The jit-entry jump table.
    877 //
    878 // The jit-entry jump table entry for a function holds a stub that allows Jitted
    879 // JS code to call wasm using the JS JIT ABI.  See large comment block at
    880 // WasmInstanceObject::getExportedFunction() for more about exported functions
    881 // and stubs and the lifecycle of the entries in the jit-entry table - there are
    882 // complex invariants.
    883 
    884 class JumpTables {
    885  using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>;
    886 
    887  CompileMode mode_;
    888  TablePointer tiering_;
    889  TablePointer jit_;
    890  size_t numFuncs_;
    891 
    892  static_assert(
    893      JumpTableJitEntryOffset == 0,
    894      "Each jit entry in table must have compatible layout with BaseScript and"
    895      "SelfHostedLazyScript");
    896 
    897 public:
    898  bool initialize(CompileMode mode, const CodeMetadata& codeMeta,
    899                  const CodeBlock& sharedStubs, const CodeBlock& tier1);
    900 
    901  void setJitEntry(size_t i, void* target) const {
    902    // Make sure that write is atomic; see comment in wasm::Module::finishTier2
    903    // to that effect.
    904    MOZ_ASSERT(i < numFuncs_);
    905    __atomic_store_n(&jit_.get()[i], target, __ATOMIC_RELAXED);
    906  }
    907  void setJitEntryIfNull(size_t i, void* target) const {
    908    // Make sure that compare-and-write is atomic; see comment in
    909    // wasm::Module::finishTier2 to that effect.
    910    MOZ_ASSERT(i < numFuncs_);
    911    void* expected = nullptr;
    912    (void)__atomic_compare_exchange_n(&jit_.get()[i], &expected, target,
    913                                      /*weak=*/false,
    914                                      /*success_memorder=*/__ATOMIC_RELAXED,
    915                                      /*failure_memorder=*/__ATOMIC_RELAXED);
    916  }
    917  void** getAddressOfJitEntry(size_t i) const {
    918    MOZ_ASSERT(i < numFuncs_);
    919    MOZ_ASSERT(jit_.get()[i]);
    920    return &jit_.get()[i];
    921  }
    922  uint32_t funcIndexFromJitEntry(void** target) const {
    923    MOZ_ASSERT(target >= &jit_.get()[0]);
    924    MOZ_ASSERT(target <= &(jit_.get()[numFuncs_ - 1]));
    925    size_t index = (intptr_t*)target - (intptr_t*)&jit_.get()[0];
    926    MOZ_ASSERT(index < wasm::MaxFuncs);
    927    return (uint32_t)index;
    928  }
    929 
    930  void setTieringEntry(size_t i, void* target) const {
    931    MOZ_ASSERT(i < numFuncs_);
    932    // See comment in wasm::Module::finishTier2.
    933    if (mode_ != CompileMode::Once) {
    934      tiering_.get()[i] = target;
    935    }
    936  }
    937  void** tiering() const { return tiering_.get(); }
    938 
    939  size_t sizeOfMiscExcludingThis() const {
    940    // 2 words per function for the jit entry table, plus maybe 1 per
    941    // function if we're tiering.
    942    return sizeof(void*) * (2 + (tiering_ ? 1 : 0)) * numFuncs_;
    943  }
    944 };
    945 
    946 // Code objects own executable code and the metadata that describe it. A single
    947 // Code object is normally shared between a module and all its instances.
    948 //
    949 // profilingLabels_ is lazily initialized, but behind a lock.
    950 
    951 using SharedCode = RefPtr<const Code>;
    952 using MutableCode = RefPtr<Code>;
    953 using MetadataAnalysisHashMap =
    954    HashMap<const char*, uint32_t, mozilla::CStringHasher, SystemAllocPolicy>;
    955 
    956 class Code : public ShareableBase<Code> {
    957  struct ProtectedData {
    958    // A vector of all of the code blocks owned by this code. Each code block
    959    // is immutable once added to the vector, but this vector may grow.
    960    UniqueConstCodeBlockVector blocks;
    961    // A vector of link data paired 1:1 with `blocks`. Entries may be null if
    962    // the code block is not serializable. This is separate from CodeBlock so
    963    // that we may clear it out after serialization has happened.
    964    UniqueLinkDataVector blocksLinkData;
    965 
    966    // A vector of code segments that we can allocate lazy segments into
    967    SharedCodeSegmentVector lazyStubSegments;
    968    // A sorted vector of LazyFuncExport
    969    LazyFuncExportVector lazyExports;
    970 
    971    // A vector of code segments that we can lazily allocate functions into
    972    SharedCodeSegmentVector lazyFuncSegments;
    973 
    974    // Statistics for tiers of code.
    975    CompileAndLinkStats tier1Stats;
    976    CompileAndLinkStats tier2Stats;
    977  };
    978  using ReadGuard = RWExclusiveData<ProtectedData>::ReadGuard;
    979  using WriteGuard = RWExclusiveData<ProtectedData>::WriteGuard;
    980 
    981  // The compile mode this code is used with.
    982  const CompileMode mode_;
    983 
    984  // Core data that is not thread-safe and must acquire a lock in order to
    985  // access.
    986  RWExclusiveData<ProtectedData> data_;
    987 
    988  // Thread-safe mutable map from code pointer to code block that contains it.
    989  mutable ThreadSafeCodeBlockMap blockMap_;
    990 
    991  // Metadata for this module that is needed for the lifetime of Code. This is
    992  // always non-null.
    993  SharedCodeMetadata codeMeta_;
    994  // Metadata for this module that is needed for the lifetime of Code, and is
    995  // only available after the whole module has been decoded. This is always
    996  // non-null.
    997  SharedCodeTailMetadata codeTailMeta_;
    998  // This is null for a wasm module, non-null for asm.js
    999  SharedCodeMetadataForAsmJS codeMetaForAsmJS_;
   1000 
   1001  const CodeBlock* sharedStubs_;
   1002  const CodeBlock* completeTier1_;
   1003 
   1004  // [SMDOC] Tier-2 data
   1005  //
   1006  // hasCompleteTier2_ and completeTier2_ implement a three-state protocol for
   1007  // broadcasting tier-2 data; this also amounts to a single-writer/
   1008  // multiple-reader setup.
   1009  //
   1010  // Initially hasCompleteTier2_ is false and completeTier2_ is null.
   1011  //
   1012  // While hasCompleteTier2_ is false, *no* thread may read completeTier2_, but
   1013  // one thread may make completeTier2_ non-null (this will be the tier-2
   1014  // compiler thread).  That same thread must then later set hasCompleteTier2_
   1015  // to true to broadcast the completeTier2_ value and its availability.  Note
   1016  // that the writing thread may not itself read completeTier2_ before setting
   1017  // hasCompleteTier2_, in order to simplify reasoning about global invariants.
   1018  //
   1019  // Once hasCompleteTier2_ is true, *no* thread may write completeTier2_ and
   1020  // *no* thread may read completeTier2_ without having observed
   1021  // hasCompleteTier2_ as true first.  Once hasCompleteTier2_ is true, it stays
   1022  // true.
   1023  mutable const CodeBlock* completeTier2_;
   1024  mutable mozilla::Atomic<bool> hasCompleteTier2_;
   1025 
   1026  // State for every defined function (not imported) in this module. This is
   1027  // only needed if we're doing partial tiering.
   1028  mutable FuncStatesPointer funcStates_;
   1029 
   1030  FuncImportVector funcImports_;
   1031  ExclusiveData<CacheableCharsVector> profilingLabels_;
   1032  JumpTables jumpTables_;
   1033 
   1034  // Where to redirect PC to for handling traps from the signal handler.
   1035  uint8_t* trapCode_;
   1036 
   1037  // Offset of the debug stub in the `sharedStubs_` CodeBlock.  Not serialized.
   1038  uint32_t debugStubOffset_;
   1039 
   1040  // Offset of the request-tier-up stub in the `sharedStubs_` CodeBlock.
   1041  uint32_t requestTierUpStubOffset_;
   1042 
   1043  // Offset of the update-call-ref-metrics stub in the `sharedStubs_`
   1044  // CodeBlock.
   1045  uint32_t updateCallRefMetricsStubOffset_;
   1046 
   1047  // Methods for getting complete tiers, private while we're moving to partial
   1048  // tiering.
   1049  Tiers completeTiers() const;
   1050 
   1051  [[nodiscard]] bool addCodeBlock(const WriteGuard& guard,
   1052                                  UniqueCodeBlock block,
   1053                                  UniqueLinkData maybeLinkData) const;
   1054 
   1055  [[nodiscard]] const LazyFuncExport* lookupLazyFuncExport(
   1056      const WriteGuard& guard, uint32_t funcIndex) const;
   1057 
   1058  // Returns a pointer to the raw interpreter entry of a given function for
   1059  // which stubs have been lazily generated.
   1060  [[nodiscard]] void* lookupLazyInterpEntry(const WriteGuard& guard,
   1061                                            uint32_t funcIndex) const;
   1062 
   1063  [[nodiscard]] bool createOneLazyEntryStub(const WriteGuard& guard,
   1064                                            uint32_t funcExportIndex,
   1065                                            const CodeBlock& tierCodeBlock,
   1066                                            void** interpEntry) const;
   1067  [[nodiscard]] bool createManyLazyEntryStubs(
   1068      const WriteGuard& guard, const Uint32Vector& funcExportIndices,
   1069      const CodeBlock& tierCodeBlock, size_t* stubBlockIndex) const;
   1070  // Create one lazy stub for all the functions in funcExportIndices, putting
   1071  // them in a single stub. Jit entries won't be used until
   1072  // setJitEntries() is actually called, after the Code owner has committed
   1073  // tier2.
   1074  [[nodiscard]] bool createTier2LazyEntryStubs(
   1075      const WriteGuard& guard, const CodeBlock& tier2Code,
   1076      mozilla::Maybe<size_t>* outStubBlockIndex) const;
   1077  [[nodiscard]] bool appendProfilingLabels(
   1078      const ExclusiveData<CacheableCharsVector>::Guard& labels,
   1079      const CodeBlock& codeBlock) const;
   1080 
   1081  void printStats() const;
   1082 
   1083 public:
   1084  Code(CompileMode mode, const CodeMetadata& codeMeta,
   1085       const CodeTailMetadata& codeTailMeta,
   1086       const CodeMetadataForAsmJS* codeMetaForAsmJS);
   1087  ~Code();
   1088 
   1089  [[nodiscard]] bool initialize(FuncImportVector&& funcImports,
   1090                                UniqueCodeBlock sharedStubs,
   1091                                UniqueLinkData sharedStubsLinkData,
   1092                                UniqueCodeBlock tier1CodeBlock,
   1093                                UniqueLinkData tier1LinkData,
   1094                                const CompileAndLinkStats& tier1Stats);
   1095  [[nodiscard]] bool finishTier2(UniqueCodeBlock tier2CodeBlock,
   1096                                 UniqueLinkData tier2LinkData,
   1097                                 const CompileAndLinkStats& tier2Stats) const;
   1098 
   1099  [[nodiscard]] bool getOrCreateInterpEntry(uint32_t funcIndex,
   1100                                            const FuncExport** funcExport,
   1101                                            void** interpEntry) const;
   1102 
   1103  SharedCodeSegment createFuncCodeSegmentFromPool(
   1104      jit::MacroAssembler& masm, const LinkData& linkData,
   1105      bool allowLastDitchGC, uint8_t** codeStartOut,
   1106      uint32_t* codeLengthOut) const;
   1107 
   1108  bool requestTierUp(uint32_t funcIndex) const;
   1109 
   1110  CompileMode mode() const { return mode_; }
   1111 
   1112  void** tieringJumpTable() const { return jumpTables_.tiering(); }
   1113 
   1114  void setJitEntryIfNull(size_t i, void* target) const {
   1115    jumpTables_.setJitEntryIfNull(i, target);
   1116  }
   1117  void** getAddressOfJitEntry(size_t i) const {
   1118    return jumpTables_.getAddressOfJitEntry(i);
   1119  }
   1120  uint32_t funcIndexFromJitEntry(void** jitEntry) const {
   1121    return jumpTables_.funcIndexFromJitEntry(jitEntry);
   1122  }
   1123 
   1124  uint8_t* trapCode() const { return trapCode_; }
   1125 
   1126  uint32_t debugStubOffset() const { return debugStubOffset_; }
   1127  void setDebugStubOffset(uint32_t offs) { debugStubOffset_ = offs; }
   1128 
   1129  uint32_t requestTierUpStubOffset() const { return requestTierUpStubOffset_; }
   1130  void setRequestTierUpStubOffset(uint32_t offs) {
   1131    requestTierUpStubOffset_ = offs;
   1132  }
   1133 
   1134  uint32_t updateCallRefMetricsStubOffset() const {
   1135    return updateCallRefMetricsStubOffset_;
   1136  }
   1137  void setUpdateCallRefMetricsStubOffset(uint32_t offs) {
   1138    updateCallRefMetricsStubOffset_ = offs;
   1139  }
   1140 
   1141  const FuncImport& funcImport(uint32_t funcIndex) const {
   1142    return funcImports_[funcIndex];
   1143  }
   1144  const FuncImportVector& funcImports() const { return funcImports_; }
   1145 
   1146  bool hasCompleteTier(Tier tier) const;
   1147  // The 'stable' complete tier of code. This is stable during a run/
   1148  Tier stableCompleteTier() const;
   1149  // The 'best' complete tier of code. This may transition from baseline to ion
   1150  // at any time.
   1151  Tier bestCompleteTier() const;
   1152  bool hasSerializableCode() const { return hasCompleteTier(Tier::Serialized); }
   1153 
   1154  const CodeMetadata& codeMeta() const { return *codeMeta_; }
   1155  const CodeMetadataForAsmJS* codeMetaForAsmJS() const {
   1156    return codeMetaForAsmJS_;
   1157  }
   1158  const CodeTailMetadata& codeTailMeta() const { return *codeTailMeta_; }
   1159  bool debugEnabled() const { return codeTailMeta_->debugEnabled; }
   1160 
   1161  const CodeBlock& sharedStubs() const { return *sharedStubs_; }
   1162  const CodeBlock& debugCodeBlock() const {
   1163    MOZ_ASSERT(debugEnabled());
   1164    MOZ_ASSERT(completeTier1_->tier() == Tier::Debug);
   1165    return *completeTier1_;
   1166  }
   1167  const CodeBlock& completeTierCodeBlock(Tier tier) const;
   1168  const CodeBlock& funcCodeBlock(uint32_t funcIndex) const {
   1169    if (funcIndex < funcImports_.length()) {
   1170      return *sharedStubs_;
   1171    }
   1172    if (mode_ == CompileMode::LazyTiering) {
   1173      return *funcStates_.get()[funcIndex - codeMeta_->numFuncImports].bestTier;
   1174    }
   1175    return completeTierCodeBlock(bestCompleteTier());
   1176  }
   1177  bool funcHasTier(uint32_t funcIndex, Tier tier) const {
   1178    if (funcIndex < funcImports_.length()) {
   1179      return false;
   1180    }
   1181    return funcCodeBlock(funcIndex).tier() == tier;
   1182  }
   1183  Tier funcTier(uint32_t funcIndex) const {
   1184    MOZ_ASSERT(funcIndex >= funcImports_.length());
   1185    return funcCodeBlock(funcIndex).tier();
   1186  }
   1187  void funcCodeRange(uint32_t funcIndex, const wasm::CodeRange** range,
   1188                     uint8_t** codeBase) const {
   1189    const CodeBlock& codeBlock = funcCodeBlock(funcIndex);
   1190    *range = &codeBlock.codeRanges[codeBlock.funcToCodeRange[funcIndex]];
   1191    *codeBase = codeBlock.base();
   1192  }
   1193 
   1194  const LinkData* codeBlockLinkData(const CodeBlock& block) const;
   1195  void clearLinkData() const;
   1196 
   1197  // Code metadata lookup:
   1198  bool lookupCallSite(void* pc, CallSite* callSite) const {
   1199    const CodeBlock* block = blockMap_.lookup(pc);
   1200    if (!block) {
   1201      return false;
   1202    }
   1203    return block->lookupCallSite(pc, callSite);
   1204  }
   1205  const CodeRange* lookupFuncRange(void* pc) const {
   1206    const CodeBlock* block = blockMap_.lookup(pc);
   1207    if (!block) {
   1208      return nullptr;
   1209    }
   1210    const CodeRange* result = block->lookupRange(pc);
   1211    if (result && result->isFunction()) {
   1212      return result;
   1213    }
   1214    return nullptr;
   1215  }
   1216  const StackMap* lookupStackMap(uint8_t* pc) const {
   1217    const CodeBlock* block = blockMap_.lookup(pc);
   1218    if (!block) {
   1219      return nullptr;
   1220    }
   1221    return block->lookupStackMap(pc);
   1222  }
   1223  const wasm::TryNote* lookupTryNote(void* pc, const CodeBlock** block) const {
   1224    *block = blockMap_.lookup(pc);
   1225    if (!*block) {
   1226      return nullptr;
   1227    }
   1228    return (*block)->lookupTryNote(pc);
   1229  }
   1230  bool lookupTrap(void* pc, Trap* kindOut, TrapSite* trapOut) const {
   1231    const CodeBlock* block = blockMap_.lookup(pc);
   1232    if (!block) {
   1233      return false;
   1234    }
   1235    return block->lookupTrap(pc, kindOut, trapOut);
   1236  }
   1237  const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const {
   1238    const CodeBlock* block = blockMap_.lookup(pc);
   1239    if (!block) {
   1240      return nullptr;
   1241    }
   1242    return block->lookupUnwindInfo(pc);
   1243  }
   1244 
   1245  // To save memory, profilingLabels_ are generated lazily when profiling mode
   1246  // is enabled.
   1247 
   1248  void ensureProfilingLabels(bool profilingEnabled) const;
   1249  const char* profilingLabel(uint32_t funcIndex) const;
   1250 
   1251  // Wasm disassembly support
   1252 
   1253  void disassemble(JSContext* cx, Tier tier, int kindSelection,
   1254                   PrintCallback printString) const;
   1255 
   1256  // Wasm metadata size analysis
   1257  MetadataAnalysisHashMap metadataAnalysis(JSContext* cx) const;
   1258 
   1259  // about:memory reporting:
   1260 
   1261  void addSizeOfMiscIfNotSeen(
   1262      mozilla::MallocSizeOf mallocSizeOf, CodeMetadata::SeenSet* seenCodeMeta,
   1263      CodeMetadataForAsmJS::SeenSet* seenCodeMetaForAsmJS,
   1264      Code::SeenSet* seenCode, size_t* code, size_t* data) const;
   1265 
   1266  size_t tier1CodeMemoryUsed() const {
   1267    return completeTier1_->segment->capacityBytes();
   1268  }
   1269 
   1270  WASM_DECLARE_FRIEND_SERIALIZE_ARGS(SharedCode,
   1271                                     const wasm::LinkData& sharedStubsLinkData,
   1272                                     const wasm::LinkData& optimizedLinkData);
   1273 };
   1274 
   1275 void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm);
   1276 
   1277 }  // namespace wasm
   1278 }  // namespace js
   1279 
   1280 #endif  // wasm_code_h