tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmCodegenTypes.h (60568B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2021 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #ifndef wasm_codegen_types_h
     20 #define wasm_codegen_types_h
     21 
     22 #include "mozilla/CheckedInt.h"
     23 #include "mozilla/EnumeratedArray.h"
     24 #include "mozilla/Span.h"
     25 
     26 #include <stdint.h>
     27 
     28 #include "jit/IonTypes.h"
     29 #include "jit/PerfSpewer.h"
     30 #include "threading/ExclusiveData.h"
     31 #include "wasm/WasmBuiltins.h"
     32 #include "wasm/WasmCodegenConstants.h"
     33 #include "wasm/WasmConstants.h"
     34 #include "wasm/WasmInstanceData.h"
     35 #include "wasm/WasmSerialize.h"
     36 #include "wasm/WasmShareable.h"
     37 #include "wasm/WasmTypeDef.h"
     38 #include "wasm/WasmUtility.h"
     39 
     40 namespace js {
     41 
     42 namespace jit {
     43 template <class VecT>
     44 class ABIArgIter;
     45 }  // namespace jit
     46 
     47 namespace wasm {
     48 
     49 struct CodeMetadata;
     50 struct TableDesc;
     51 struct V128;
     52 
     53 // ArgTypeVector type.
     54 //
     55 // Functions usually receive one ABI argument per WebAssembly argument.  However
     56 // if a function has multiple results and some of those results go to the stack,
     57 // then it additionally receives a synthetic ABI argument holding a pointer to
     58 // the stack result area.
     59 //
     60 // Given the presence of synthetic arguments, sometimes we need a name for
     61 // non-synthetic arguments.  We call those "natural" arguments.
     62 
     63 enum class StackResults { HasStackResults, NoStackResults };
     64 
     65 class ArgTypeVector {
     66  const ValTypeVector& args_;
     67  bool hasStackResults_;
     68 
     69  // To allow ABIArgIter<VecT, jit::ABIKind>, we define a private
     70  // length() method.  To prevent accidental errors, other users need to be
     71  // explicit and call lengthWithStackResults() or
     72  // lengthWithoutStackResults().
     73  size_t length() const { return args_.length() + size_t(hasStackResults_); }
     74  template <class VecT>
     75  friend class jit::ABIArgIter;
     76 
     77 public:
     78  ArgTypeVector(const ValTypeVector& args, StackResults stackResults)
     79      : args_(args),
     80        hasStackResults_(stackResults == StackResults::HasStackResults) {}
     81  explicit ArgTypeVector(const FuncType& funcType);
     82 
     83  bool hasSyntheticStackResultPointerArg() const { return hasStackResults_; }
     84  StackResults stackResults() const {
     85    return hasSyntheticStackResultPointerArg() ? StackResults::HasStackResults
     86                                               : StackResults::NoStackResults;
     87  }
     88  size_t lengthWithoutStackResults() const { return args_.length(); }
     89  bool isSyntheticStackResultPointerArg(size_t idx) const {
     90    // The pointer to stack results area, if present, is a synthetic argument
     91    // tacked on at the end.
     92    MOZ_ASSERT(idx < lengthWithStackResults());
     93    return idx == args_.length();
     94  }
     95  bool isNaturalArg(size_t idx) const {
     96    return !isSyntheticStackResultPointerArg(idx);
     97  }
     98  size_t naturalIndex(size_t idx) const {
     99    MOZ_ASSERT(isNaturalArg(idx));
    100    // Because the synthetic argument, if present, is tacked on the end, an
    101    // argument index that isn't synthetic is natural.
    102    return idx;
    103  }
    104 
    105  size_t lengthWithStackResults() const { return length(); }
    106  jit::MIRType operator[](size_t i) const {
    107    MOZ_ASSERT(i < lengthWithStackResults());
    108    if (isSyntheticStackResultPointerArg(i)) {
    109      return jit::MIRType::StackResults;
    110    }
    111    return args_[naturalIndex(i)].toMIRType();
    112  }
    113 };
    114 
    115 // A wrapper around the bytecode offset of a wasm instruction within a whole
    116 // module, used for trap offsets or call offsets. These offsets should refer to
    117 // the first byte of the instruction that triggered the trap / did the call and
    118 // should ultimately derive from OpIter::bytecodeOffset.
    119 
    120 class BytecodeOffset {
    121  static const uint32_t INVALID = UINT32_MAX;
    122  static_assert(INVALID > wasm::MaxModuleBytes);
    123  uint32_t offset_;
    124 
    125  WASM_CHECK_CACHEABLE_POD(offset_);
    126 
    127 public:
    128  BytecodeOffset() : offset_(INVALID) {}
    129  explicit BytecodeOffset(uint32_t offset) : offset_(offset) {}
    130 
    131  bool isValid() const { return offset_ != INVALID; }
    132  uint32_t offset() const {
    133    MOZ_ASSERT(isValid());
    134    return offset_;
    135  }
    136 };
    137 
    138 WASM_DECLARE_CACHEABLE_POD(BytecodeOffset);
    139 using BytecodeOffsetVector =
    140    mozilla::Vector<BytecodeOffset, 4, SystemAllocPolicy>;
    141 using BytecodeOffsetSpan = mozilla::Span<const BytecodeOffset>;
    142 using ShareableBytecodeOffsetVector =
    143    ShareableVector<BytecodeOffset, 4, SystemAllocPolicy>;
    144 using SharedBytecodeOffsetVector = RefPtr<const ShareableBytecodeOffsetVector>;
    145 using MutableBytecodeOffsetVector = RefPtr<ShareableBytecodeOffsetVector>;
    146 
    147 // A TrapMachineInsn describes roughly what kind of machine instruction has
    148 // caused a trap.  This is used only for validation of trap placement in debug
    149 // builds, in ModuleGenerator::finishMetadataTier, and is not necessary for
    150 // execution of wasm code.
    151 enum class TrapMachineInsn {
    152  // The "official" undefined insn for the target, or something equivalent
    153  // that we use for that purpose.  The key property is that it always raises
    154  // SIGILL when executed.  For example, UD2 on Intel.
    155  OfficialUD,
    156  // Loads and stores that move 8, 16, 32, 64 or 128 bits of data, regardless
    157  // of their type and how they are subsequently used (widened or duplicated).
    158  Load8,
    159  Load16,
    160  Load32,
    161  Load64,
    162  Load128,
    163  Store8,
    164  Store16,
    165  Store32,
    166  Store64,
    167  Store128,
    168  // Any kind of atomic r-m-w or CAS memory transaction, but not including
    169  // Load-Linked or Store-Checked style insns -- those count as plain LoadX
    170  // and StoreX.
    171  Atomic
    172 };
    173 using TrapMachineInsnVector =
    174    mozilla::Vector<TrapMachineInsn, 0, SystemAllocPolicy>;
    175 
    176 static inline TrapMachineInsn TrapMachineInsnForLoad(int byteSize) {
    177  switch (byteSize) {
    178    case 1:
    179      return TrapMachineInsn::Load8;
    180    case 2:
    181      return TrapMachineInsn::Load16;
    182    case 4:
    183      return TrapMachineInsn::Load32;
    184    case 8:
    185      return TrapMachineInsn::Load64;
    186    case 16:
    187      return TrapMachineInsn::Load128;
    188    default:
    189      MOZ_CRASH("TrapMachineInsnForLoad");
    190  }
    191 }
    192 static inline TrapMachineInsn TrapMachineInsnForLoadWord() {
    193  return TrapMachineInsnForLoad(sizeof(void*));
    194 }
    195 
    196 static inline TrapMachineInsn TrapMachineInsnForStore(int byteSize) {
    197  switch (byteSize) {
    198    case 1:
    199      return TrapMachineInsn::Store8;
    200    case 2:
    201      return TrapMachineInsn::Store16;
    202    case 4:
    203      return TrapMachineInsn::Store32;
    204    case 8:
    205      return TrapMachineInsn::Store64;
    206    case 16:
    207      return TrapMachineInsn::Store128;
    208    default:
    209      MOZ_CRASH("TrapMachineInsnForStore");
    210  }
    211 }
    212 static inline TrapMachineInsn TrapMachineInsnForStoreWord() {
    213  return TrapMachineInsnForStore(sizeof(void*));
    214 }
    215 #ifdef DEBUG
    216 const char* ToString(Trap trap);
    217 const char* ToString(TrapMachineInsn tmi);
    218 #endif
    219 
    220 // This holds an assembler buffer offset, which indicates the offset of a
    221 // faulting instruction, and is used for the construction of TrapSites below.
    222 // It is wrapped up as a new type only to avoid getting it confused with any
    223 // other uint32_t or with CodeOffset.
    224 
    225 class FaultingCodeOffset {
    226  static constexpr uint32_t INVALID = UINT32_MAX;
    227  uint32_t offset_;
    228 
    229 public:
    230  FaultingCodeOffset() : offset_(INVALID) {}
    231  explicit FaultingCodeOffset(uint32_t offset) : offset_(offset) {
    232    MOZ_ASSERT(offset != INVALID);
    233  }
    234  bool isValid() const { return offset_ != INVALID; }
    235  uint32_t get() const {
    236    MOZ_ASSERT(isValid());
    237    return offset_;
    238  }
    239 };
    240 static_assert(sizeof(FaultingCodeOffset) == 4);
    241 
    242 // And this holds two such offsets.  Needed for 64-bit integer transactions on
    243 // 32-bit targets.
    244 using FaultingCodeOffsetPair =
    245    std::pair<FaultingCodeOffset, FaultingCodeOffset>;
    246 static_assert(sizeof(FaultingCodeOffsetPair) == 8);
    247 
    248 // The bytecode offsets of all the callers of a function that has been inlined.
    249 // See CallSiteDesc/TrapSiteDesc for uses of this.
    250 using InlinedCallerOffsets = BytecodeOffsetVector;
    251 
    252 // An index into InliningContext to get an InlinedCallerOffsets. This may be
    253 // 'None' to indicate an empty InlinedCallerOffsets.
    254 struct InlinedCallerOffsetIndex {
    255 private:
    256  // Sentinel value for an empty InlinedCallerOffsets.
    257  static constexpr uint32_t NONE = UINT32_MAX;
    258 
    259  uint32_t value_;
    260 
    261 public:
    262  // The maximum value allowed here, checked by assertions. InliningContext
    263  // will OOM if this value is exceeded.
    264  static constexpr uint32_t MAX = UINT32_MAX - 1;
    265 
    266  // Construct 'none'.
    267  InlinedCallerOffsetIndex() : value_(NONE) {}
    268 
    269  // Construct a non-'none' value. The value must be less than or equal to MAX.
    270  explicit InlinedCallerOffsetIndex(uint32_t index) : value_(index) {
    271    MOZ_RELEASE_ASSERT(index <= MAX);
    272  }
    273 
    274  // The value of this index, if it is not nothing.
    275  uint32_t value() const {
    276    MOZ_RELEASE_ASSERT(!isNone());
    277    return value_;
    278  }
    279 
    280  // Whether this value is none or not.
    281  bool isNone() const { return value_ == NONE; }
    282 };
    283 static_assert(sizeof(InlinedCallerOffsetIndex) == sizeof(uint32_t));
    284 
    285 // A hash map from some index (either call site or trap site) to
    286 // InlinedCallerOffsetIndex.
    287 using InlinedCallerOffsetsIndexHashMap =
    288    mozilla::HashMap<uint32_t, InlinedCallerOffsetIndex,
    289                     mozilla::DefaultHasher<uint32_t>, SystemAllocPolicy>;
    290 
    291 // A collection of InlinedCallerOffsets for a code block.
    292 class InliningContext {
    293  using Storage = mozilla::Vector<InlinedCallerOffsets, 0, SystemAllocPolicy>;
    294  Storage storage_;
    295  bool mutable_ = true;
    296 
    297 public:
    298  InliningContext() = default;
    299 
    300  bool empty() const { return storage_.empty(); }
    301  uint32_t length() const { return storage_.length(); }
    302 
    303  void setImmutable() {
    304    MOZ_RELEASE_ASSERT(mutable_);
    305    mutable_ = false;
    306  }
    307 
    308  const InlinedCallerOffsets* operator[](InlinedCallerOffsetIndex index) const {
    309    // Don't give out interior pointers into the vector until we've
    310    // transitioned to immutable.
    311    MOZ_RELEASE_ASSERT(!mutable_);
    312    // Index must be in bounds.
    313    MOZ_RELEASE_ASSERT(index.value() < length());
    314    return &storage_[index.value()];
    315  }
    316 
    317  [[nodiscard]] bool append(InlinedCallerOffsets&& inlinedCallerOffsets,
    318                            InlinedCallerOffsetIndex* index) {
    319    MOZ_RELEASE_ASSERT(mutable_);
    320 
    321    // Skip adding an entry if the offset vector is empty and just return an
    322    // 'none' index.
    323    if (inlinedCallerOffsets.empty()) {
    324      *index = InlinedCallerOffsetIndex();
    325      return true;
    326    }
    327 
    328    // OOM if we'll be growing beyond the maximum index allowed, or if we
    329    // fail to append.
    330    if (storage_.length() == InlinedCallerOffsetIndex::MAX ||
    331        !storage_.append(std::move(inlinedCallerOffsets))) {
    332      return false;
    333    }
    334    *index = InlinedCallerOffsetIndex(storage_.length() - 1);
    335    return true;
    336  }
    337 
    338  [[nodiscard]] bool appendAll(InliningContext&& other) {
    339    MOZ_RELEASE_ASSERT(mutable_);
    340    if (!storage_.appendAll(std::move(other.storage_))) {
    341      return false;
    342    }
    343 
    344    // OOM if we just grew beyond the maximum index allowed.
    345    return storage_.length() <= InlinedCallerOffsetIndex::MAX;
    346  }
    347 
    348  void swap(InliningContext& other) {
    349    MOZ_RELEASE_ASSERT(mutable_);
    350    storage_.swap(other.storage_);
    351  }
    352 
    353  void shrinkStorageToFit() { storage_.shrinkStorageToFit(); }
    354 
    355  void clear() {
    356    MOZ_RELEASE_ASSERT(mutable_);
    357    storage_.clear();
    358  }
    359 
    360  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
    361    return storage_.sizeOfExcludingThis(mallocSizeOf);
    362  }
    363 };
    364 
    365 // The fields of a TrapSite that do not depend on code generation.
    366 
    367 struct TrapSiteDesc {
    368  explicit TrapSiteDesc(BytecodeOffset bytecodeOffset,
    369                        InlinedCallerOffsetIndex inlinedCallerOffsetsIndex =
    370                            InlinedCallerOffsetIndex())
    371      : bytecodeOffset(bytecodeOffset),
    372        inlinedCallerOffsetsIndex(inlinedCallerOffsetsIndex) {}
    373  TrapSiteDesc() : TrapSiteDesc(BytecodeOffset(0)) {};
    374 
    375  bool isValid() const { return bytecodeOffset.isValid(); }
    376 
    377  BytecodeOffset bytecodeOffset;
    378  InlinedCallerOffsetIndex inlinedCallerOffsetsIndex;
    379 };
    380 
    381 using MaybeTrapSiteDesc = mozilla::Maybe<TrapSiteDesc>;
    382 
    383 // A TrapSite represents a wasm instruction at a given bytecode offset that
    384 // can fault at the given pc offset.  When such a fault occurs, a signal/
    385 // exception handler looks up the TrapSite to confirm the fault is intended/
    386 // safe and redirects pc to the trap stub.
    387 
    388 struct TrapSite : TrapSiteDesc {
    389  // If this trap site is in a function that was inlined, these are the call
    390  // site bytecode offsets of the caller functions that this trap site was
    391  // inlined into. The direct ancestor of this function (i.e. the one
    392  // directly above it on the stack) is the last entry in the vector.
    393  const InlinedCallerOffsets* inlinedCallerOffsets = nullptr;
    394 
    395  BytecodeOffsetSpan inlinedCallerOffsetsSpan() const {
    396    if (!inlinedCallerOffsets) {
    397      return BytecodeOffsetSpan();
    398    }
    399    return BytecodeOffsetSpan(inlinedCallerOffsets->begin(),
    400                              inlinedCallerOffsets->end());
    401  }
    402 };
    403 
    404 // A collection of TrapSite for a specific trap kind that is optimized for
    405 // compact storage.
    406 //
    407 // The individual fields are split to be in their own vectors to minimize
    408 // overhead due to alignment for small fields like TrapMachineInsn.
    409 class TrapSitesForKind {
    410  // Define our own vectors without any inline storage so they can be used
    411  // with swap.
    412  using Uint32Vector = Vector<uint32_t, 0, SystemAllocPolicy>;
    413  using BytecodeOffsetVector =
    414      mozilla::Vector<BytecodeOffset, 0, SystemAllocPolicy>;
    415 
    416 #ifdef DEBUG
    417  TrapMachineInsnVector machineInsns_;
    418 #endif
    419  Uint32Vector pcOffsets_;
    420  BytecodeOffsetVector bytecodeOffsets_;
    421  InlinedCallerOffsetsIndexHashMap inlinedCallerOffsetsMap_;
    422 
    423 public:
    424  explicit TrapSitesForKind() = default;
    425 
    426  // We limit the maximum amount of trap sites to fit in a uint32_t for better
    427  // compaction of the sparse hash map. This is dynamically enforced, but
    428  // should be safe. The maximum executable memory in a process is at most
    429  // ~2GiB, a trapping machine instruction is at least a byte (realistically
    430  // much more), which would put the limit of trap sites far below UINT32_MAX.
    431  // We subtract one so that this check is not idempotent on 32-bit systems.
    432  static constexpr size_t MAX_LENGTH = UINT32_MAX - 1;
    433 
    434  uint32_t length() const {
    435    size_t result = pcOffsets_.length();
    436    // Enforced by dynamic checks in mutation functions.
    437    MOZ_ASSERT(result <= MAX_LENGTH);
    438    return (uint32_t)result;
    439  }
    440 
    441  bool empty() const { return pcOffsets_.empty(); }
    442 
    443  [[nodiscard]]
    444  bool reserve(size_t length) {
    445    // See comment on MAX_LENGTH for details.
    446    if (length > MAX_LENGTH) {
    447      return false;
    448    }
    449 
    450 #ifdef DEBUG
    451    if (!machineInsns_.reserve(length)) {
    452      return false;
    453    }
    454 #endif
    455    return pcOffsets_.reserve(length) && bytecodeOffsets_.reserve(length);
    456  }
    457 
    458  [[nodiscard]]
    459  bool append(TrapMachineInsn insn, uint32_t pcOffset,
    460              const TrapSiteDesc& desc) {
    461    MOZ_ASSERT(desc.bytecodeOffset.isValid());
    462 
    463 #ifdef DEBUG
    464    if (!machineInsns_.append(insn)) {
    465      return false;
    466    }
    467 #endif
    468 
    469    uint32_t index = length();
    470 
    471    // Add an entry in our map for the trap's inlined caller offsets.
    472    if (!desc.inlinedCallerOffsetsIndex.isNone() &&
    473        !inlinedCallerOffsetsMap_.putNew(index,
    474                                         desc.inlinedCallerOffsetsIndex)) {
    475      return false;
    476    }
    477 
    478    return pcOffsets_.append(pcOffset) &&
    479           bytecodeOffsets_.append(desc.bytecodeOffset);
    480  }
    481 
    482  [[nodiscard]]
    483  bool appendAll(TrapSitesForKind&& other, uint32_t baseCodeOffset,
    484                 InlinedCallerOffsetIndex baseInlinedCallerOffsetIndex) {
    485    // See comment on MAX_LENGTH for details.
    486    mozilla::CheckedUint32 newLength =
    487        mozilla::CheckedUint32(length()) + other.length();
    488    if (!newLength.isValid() || newLength.value() > MAX_LENGTH) {
    489      return false;
    490    }
    491 
    492 #ifdef DEBUG
    493    if (!machineInsns_.appendAll(other.machineInsns_)) {
    494      return false;
    495    }
    496 #endif
    497 
    498    // Copy over the map of `other`s inlined caller offsets. The keys are trap
    499    // site indices, and must be updated for the base index that `other` is
    500    // being inserted into. The values are inlined caller offsets and must be
    501    // updated for the base inlined caller offset that the associated inlining
    502    // context was added to. See ModuleGenerator::linkCompiledCode.
    503    uint32_t baseTrapSiteIndex = length();
    504    for (auto iter = other.inlinedCallerOffsetsMap_.modIter(); !iter.done();
    505         iter.next()) {
    506      uint32_t newTrapSiteIndex = baseTrapSiteIndex + iter.get().key();
    507      uint32_t newInlinedCallerOffsetIndex =
    508          iter.get().value().value() + baseInlinedCallerOffsetIndex.value();
    509 
    510      if (!inlinedCallerOffsetsMap_.putNew(newTrapSiteIndex,
    511                                           newInlinedCallerOffsetIndex)) {
    512        return false;
    513      }
    514    }
    515 
    516    // Add the baseCodeOffset to the pcOffsets that we are adding to ourselves.
    517    for (uint32_t& pcOffset : other.pcOffsets_) {
    518      pcOffset += baseCodeOffset;
    519    }
    520 
    521    return pcOffsets_.appendAll(other.pcOffsets_) &&
    522           bytecodeOffsets_.appendAll(other.bytecodeOffsets_);
    523  }
    524 
    525  void clear() {
    526 #ifdef DEBUG
    527    machineInsns_.clear();
    528 #endif
    529    pcOffsets_.clear();
    530    bytecodeOffsets_.clear();
    531    inlinedCallerOffsetsMap_.clear();
    532  }
    533 
    534  void swap(TrapSitesForKind& other) {
    535 #ifdef DEBUG
    536    machineInsns_.swap(other.machineInsns_);
    537 #endif
    538    pcOffsets_.swap(other.pcOffsets_);
    539    bytecodeOffsets_.swap(other.bytecodeOffsets_);
    540    inlinedCallerOffsetsMap_.swap(other.inlinedCallerOffsetsMap_);
    541  }
    542 
    543  void shrinkStorageToFit() {
    544 #ifdef DEBUG
    545    machineInsns_.shrinkStorageToFit();
    546 #endif
    547    pcOffsets_.shrinkStorageToFit();
    548    bytecodeOffsets_.shrinkStorageToFit();
    549    inlinedCallerOffsetsMap_.compact();
    550  }
    551 
    552  bool lookup(uint32_t trapInstructionOffset,
    553              const InliningContext& inliningContext, TrapSite* trapOut) const;
    554 
    555  void checkInvariants(const uint8_t* codeBase) const;
    556 
    557  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
    558    size_t result = 0;
    559 #ifdef DEBUG
    560    result += machineInsns_.sizeOfExcludingThis(mallocSizeOf);
    561 #endif
    562    ShareableBytecodeOffsetVector::SeenSet seen;
    563    return result + pcOffsets_.sizeOfExcludingThis(mallocSizeOf) +
    564           bytecodeOffsets_.sizeOfExcludingThis(mallocSizeOf) +
    565           inlinedCallerOffsetsMap_.shallowSizeOfExcludingThis(mallocSizeOf);
    566  }
    567 
    568  WASM_DECLARE_FRIEND_SERIALIZE(TrapSitesForKind);
    569 };
    570 
    571 // A collection of TrapSite for any kind of trap and optimized for
    572 // compact storage.
    573 class TrapSites {
    574  using TrapSiteVectorArray =
    575      mozilla::EnumeratedArray<Trap, TrapSitesForKind, size_t(Trap::Limit)>;
    576 
    577  TrapSiteVectorArray array_;
    578 
    579 public:
    580  explicit TrapSites() = default;
    581 
    582  bool empty() const {
    583    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    584      if (!array_[trap].empty()) {
    585        return false;
    586      }
    587    }
    588 
    589    return true;
    590  }
    591 
    592  [[nodiscard]]
    593  bool reserve(Trap trap, size_t length) {
    594    return array_[trap].reserve(length);
    595  }
    596 
    597  [[nodiscard]]
    598  bool append(Trap trap, TrapMachineInsn insn, uint32_t pcOffset,
    599              const TrapSiteDesc& desc) {
    600    return array_[trap].append(insn, pcOffset, desc);
    601  }
    602 
    603  [[nodiscard]]
    604  bool appendAll(TrapSites&& other, uint32_t baseCodeOffset,
    605                 InlinedCallerOffsetIndex baseInlinedCallerOffsetIndex) {
    606    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    607      if (!array_[trap].appendAll(std::move(other.array_[trap]), baseCodeOffset,
    608                                  baseInlinedCallerOffsetIndex)) {
    609        return false;
    610      }
    611    }
    612    return true;
    613  }
    614 
    615  void clear() {
    616    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    617      array_[trap].clear();
    618    }
    619  }
    620 
    621  void swap(TrapSites& rhs) {
    622    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    623      array_[trap].swap(rhs.array_[trap]);
    624    }
    625  }
    626 
    627  void shrinkStorageToFit() {
    628    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    629      array_[trap].shrinkStorageToFit();
    630    }
    631  }
    632 
    633  [[nodiscard]]
    634  bool lookup(uint32_t trapInstructionOffset,
    635              const InliningContext& inliningContext, Trap* kindOut,
    636              TrapSite* trapOut) const {
    637    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    638      const TrapSitesForKind& trapSitesForKind = array_[trap];
    639      if (trapSitesForKind.lookup(trapInstructionOffset, inliningContext,
    640                                  trapOut)) {
    641        *kindOut = trap;
    642        return true;
    643      }
    644    }
    645    return false;
    646  }
    647 
    648  void checkInvariants(const uint8_t* codeBase) const {
    649    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    650      array_[trap].checkInvariants(codeBase);
    651    }
    652  }
    653 
    654  size_t sumOfLengths() const {
    655    size_t result = 0;
    656    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    657      result += array_[trap].length();
    658    }
    659    return result;
    660  }
    661 
    662  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
    663    size_t result = 0;
    664    for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
    665      result += array_[trap].sizeOfExcludingThis(mallocSizeOf);
    666    }
    667    return result;
    668  }
    669 
    670  WASM_DECLARE_FRIEND_SERIALIZE(TrapSites);
    671 };
    672 
    673 struct CallFarJump {
    674  uint32_t targetFuncIndex;
    675  uint32_t jumpOffset;
    676  WASM_CHECK_CACHEABLE_POD(targetFuncIndex, jumpOffset);
    677 
    678  CallFarJump(uint32_t targetFuncIndex, uint32_t jumpOffset)
    679      : targetFuncIndex(targetFuncIndex), jumpOffset(jumpOffset) {}
    680 };
    681 WASM_DECLARE_CACHEABLE_POD(CallFarJump);
    682 
    683 using CallFarJumpVector = Vector<CallFarJump, 0, SystemAllocPolicy>;
    684 
    685 class CallRefMetricsPatch {
    686 private:
    687  // The offset of where to patch in the offset of the CallRefMetrics.
    688  uint32_t offsetOfOffsetPatch_;
    689  static constexpr uint32_t NO_OFFSET = UINT32_MAX;
    690 
    691  WASM_CHECK_CACHEABLE_POD(offsetOfOffsetPatch_);
    692 
    693 public:
    694  explicit CallRefMetricsPatch() : offsetOfOffsetPatch_(NO_OFFSET) {}
    695 
    696  bool hasOffsetOfOffsetPatch() const {
    697    return offsetOfOffsetPatch_ != NO_OFFSET;
    698  }
    699  uint32_t offsetOfOffsetPatch() const { return offsetOfOffsetPatch_; }
    700  void setOffset(uint32_t indexOffset) {
    701    MOZ_ASSERT(!hasOffsetOfOffsetPatch());
    702    MOZ_ASSERT(indexOffset != NO_OFFSET);
    703    offsetOfOffsetPatch_ = indexOffset;
    704  }
    705 };
    706 
    707 using CallRefMetricsPatchVector =
    708    Vector<CallRefMetricsPatch, 0, SystemAllocPolicy>;
    709 
    710 class AllocSitePatch {
    711 private:
    712  uint32_t patchOffset_;
    713  static constexpr uint32_t NO_OFFSET = UINT32_MAX;
    714 
    715 public:
    716  explicit AllocSitePatch() : patchOffset_(NO_OFFSET) {}
    717 
    718  bool hasPatchOffset() const { return patchOffset_ != NO_OFFSET; }
    719  uint32_t patchOffset() const { return patchOffset_; }
    720  void setPatchOffset(uint32_t offset) {
    721    MOZ_ASSERT(!hasPatchOffset());
    722    MOZ_ASSERT(offset != NO_OFFSET);
    723    patchOffset_ = offset;
    724  }
    725 };
    726 
    727 using AllocSitePatchVector = Vector<AllocSitePatch, 0, SystemAllocPolicy>;
    728 
    729 // On trap, the bytecode offset to be reported in callstacks is saved.
    730 
    731 struct TrapData {
    732  // The resumePC indicates where, if the trap doesn't throw, the trap stub
    733  // should jump to after restoring all register state.
    734  void* resumePC;
    735 
    736  // The unwoundPC is the PC after adjustment by wasm::StartUnwinding(), which
    737  // basically unwinds partially-construted wasm::Frames when pc is in the
    738  // prologue/epilogue. Stack traces during a trap should use this PC since
    739  // it corresponds to the JitActivation::wasmExitFP.
    740  void* unwoundPC;
    741 
    742  Trap trap;
    743  TrapSite trapSite;
    744 
    745  // A return_call_indirect from the first function in an activation into
    746  // a signature mismatch may leave us with only one frame. This frame is
    747  // validly constructed, but has no debug frame yet.
    748  bool failedUnwindSignatureMismatch;
    749 };
    750 
    751 // The (,Callable,Func)Offsets classes are used to record the offsets of
    752 // different key points in a CodeRange during compilation.
    753 
    754 struct Offsets {
    755  explicit Offsets(uint32_t begin = 0, uint32_t end = 0)
    756      : begin(begin), end(end) {}
    757 
    758  // These define a [begin, end) contiguous range of instructions compiled
    759  // into a CodeRange.
    760  uint32_t begin;
    761  uint32_t end;
    762 
    763  WASM_CHECK_CACHEABLE_POD(begin, end);
    764 };
    765 
    766 WASM_DECLARE_CACHEABLE_POD(Offsets);
    767 
    768 struct CallableOffsets : Offsets {
    769  MOZ_IMPLICIT CallableOffsets(uint32_t ret = 0) : ret(ret) {}
    770 
    771  // The offset of the return instruction precedes 'end' by a variable number
    772  // of instructions due to out-of-line codegen.
    773  uint32_t ret;
    774 
    775  WASM_CHECK_CACHEABLE_POD_WITH_PARENT(Offsets, ret);
    776 };
    777 
    778 WASM_DECLARE_CACHEABLE_POD(CallableOffsets);
    779 
    780 struct ImportOffsets : CallableOffsets {
    781  MOZ_IMPLICIT ImportOffsets() : afterFallbackCheck(0) {}
    782 
    783  // The entry point after initial prologue check.
    784  uint32_t afterFallbackCheck;
    785 
    786  WASM_CHECK_CACHEABLE_POD_WITH_PARENT(CallableOffsets, afterFallbackCheck);
    787 };
    788 
    789 WASM_DECLARE_CACHEABLE_POD(ImportOffsets);
    790 
    791 struct FuncOffsets : CallableOffsets {
    792  MOZ_IMPLICIT FuncOffsets() : uncheckedCallEntry(0), tierEntry(0) {}
    793 
    794  // Function CodeRanges have a checked call entry which takes an extra
    795  // signature argument which is checked against the callee's signature before
    796  // falling through to the normal prologue. The checked call entry is thus at
    797  // the beginning of the CodeRange and the unchecked call entry is at some
    798  // offset after the checked call entry.
    799  //
    800  // Note that there won't always be a checked call entry because not all
    801  // functions require them. See GenerateFunctionPrologue.
    802  uint32_t uncheckedCallEntry;
    803 
    804  // The tierEntry is the point within a function to which the patching code
    805  // within a Tier-1 function jumps.  It could be the instruction following
    806  // the jump in the Tier-1 function, or the point following the standard
    807  // prologue within a Tier-2 function.
    808  uint32_t tierEntry;
    809 
    810  WASM_CHECK_CACHEABLE_POD_WITH_PARENT(CallableOffsets, uncheckedCallEntry,
    811                                       tierEntry);
    812 };
    813 
    814 WASM_DECLARE_CACHEABLE_POD(FuncOffsets);
    815 
    816 using FuncOffsetsVector = Vector<FuncOffsets, 0, SystemAllocPolicy>;
    817 
    818 // A CodeRange describes a single contiguous range of code within a wasm
    819 // module's code segment. A CodeRange describes what the code does and, for
    820 // function bodies, the name and source coordinates of the function.
    821 
    822 class CodeRange {
    823 public:
    824  enum Kind {
    825    Function,                  // function definition
    826    InterpEntry,               // calls into wasm from C++
    827    JitEntry,                  // calls into wasm from jit code
    828    ImportInterpExit,          // slow-path calling from wasm into C++ interp
    829    ImportJitExit,             // fast-path calling from wasm into jit code
    830    BuiltinThunk,              // fast-path calling from wasm into a C++ native
    831    TrapExit,                  // calls C++ to report and jumps to throw stub
    832    DebugStub,                 // calls C++ to handle debug event
    833    RequestTierUpStub,         // calls C++ to request tier-2 compilation
    834    UpdateCallRefMetricsStub,  // updates a CallRefMetrics
    835    FarJumpIsland,  // inserted to connect otherwise out-of-range insns
    836    Throw           // special stack-unwinding stub jumped to by other stubs
    837  };
    838 
    839 private:
    840  // All fields are treated as cacheable POD:
    841  uint32_t begin_;
    842  uint32_t ret_;
    843  uint32_t end_;
    844  union {
    845    struct {
    846      uint32_t funcIndex_;
    847      union {
    848        struct {
    849          uint16_t beginToUncheckedCallEntry_;
    850          uint16_t beginToTierEntry_;
    851          bool hasUnwindInfo_;
    852        } func;
    853        uint16_t jitExitEntry_;
    854      };
    855    };
    856    Trap trap_;
    857  } u;
    858  Kind kind_ : 8;
    859 
    860  WASM_CHECK_CACHEABLE_POD(begin_, ret_, end_, u.funcIndex_,
    861                           u.func.beginToUncheckedCallEntry_,
    862                           u.func.beginToTierEntry_, u.func.hasUnwindInfo_,
    863                           u.trap_, kind_);
    864 
    865 public:
    866  CodeRange() = default;
    867  CodeRange(Kind kind, Offsets offsets);
    868  CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets);
    869  CodeRange(Kind kind, CallableOffsets offsets);
    870  CodeRange(Kind kind, uint32_t funcIndex, CallableOffsets);
    871  CodeRange(Kind kind, uint32_t funcIndex, ImportOffsets offsets);
    872  CodeRange(uint32_t funcIndex, FuncOffsets offsets, bool hasUnwindInfo);
    873 
    874  void offsetBy(uint32_t offset) {
    875    begin_ += offset;
    876    end_ += offset;
    877    if (hasReturn()) {
    878      ret_ += offset;
    879    }
    880  }
    881 
    882  // All CodeRanges have a begin and end.
    883 
    884  uint32_t begin() const { return begin_; }
    885  uint32_t end() const { return end_; }
    886 
    887  // Other fields are only available for certain CodeRange::Kinds.
    888 
    889  Kind kind() const { return kind_; }
    890 
    891  bool isFunction() const { return kind() == Function; }
    892  bool isImportExit() const {
    893    return kind() == ImportJitExit || kind() == ImportInterpExit ||
    894           kind() == BuiltinThunk;
    895  }
    896  bool isImportInterpExit() const { return kind() == ImportInterpExit; }
    897  bool isImportJitExit() const { return kind() == ImportJitExit; }
    898  bool isTrapExit() const { return kind() == TrapExit; }
    899  bool isDebugStub() const { return kind() == DebugStub; }
    900  bool isRequestTierUpStub() const { return kind() == RequestTierUpStub; }
    901  bool isUpdateCallRefMetricsStub() const {
    902    return kind() == UpdateCallRefMetricsStub;
    903  }
    904  bool isThunk() const { return kind() == FarJumpIsland; }
    905 
    906  // Functions, import exits, debug stubs and JitEntry stubs have standard
    907  // callable prologues and epilogues. Asynchronous frame iteration needs to
    908  // know the offset of the return instruction to calculate the frame pointer.
    909 
    910  bool hasReturn() const {
    911    return isFunction() || isImportExit() || isDebugStub() ||
    912           isRequestTierUpStub() || isUpdateCallRefMetricsStub() ||
    913           isJitEntry();
    914  }
    915  uint32_t ret() const {
    916    MOZ_ASSERT(hasReturn());
    917    return ret_;
    918  }
    919 
    920  // Functions, export stubs and import stubs all have an associated function
    921  // index.
    922 
    923  bool isJitEntry() const { return kind() == JitEntry; }
    924  bool isInterpEntry() const { return kind() == InterpEntry; }
    925  bool isEntry() const { return isInterpEntry() || isJitEntry(); }
    926  bool hasFuncIndex() const {
    927    return isFunction() || isImportExit() || isEntry();
    928  }
    929  uint32_t funcIndex() const {
    930    MOZ_ASSERT(hasFuncIndex());
    931    return u.funcIndex_;
    932  }
    933 
    934  // TrapExit CodeRanges have a Trap field.
    935 
    936  Trap trap() const {
    937    MOZ_ASSERT(isTrapExit());
    938    return u.trap_;
    939  }
    940 
    941  // Function CodeRanges have two entry points: one for normal calls (with a
    942  // known signature) and one for table calls (which involves dynamic
    943  // signature checking).
    944 
    945  uint32_t funcCheckedCallEntry() const {
    946    MOZ_ASSERT(isFunction());
    947    // not all functions have the checked call prologue;
    948    // see GenerateFunctionPrologue
    949    MOZ_ASSERT(u.func.beginToUncheckedCallEntry_ != 0);
    950    return begin_;
    951  }
    952  uint32_t funcUncheckedCallEntry() const {
    953    MOZ_ASSERT(isFunction());
    954    return begin_ + u.func.beginToUncheckedCallEntry_;
    955  }
    956  uint32_t funcTierEntry() const {
    957    MOZ_ASSERT(isFunction());
    958    return begin_ + u.func.beginToTierEntry_;
    959  }
    960  bool funcHasUnwindInfo() const {
    961    MOZ_ASSERT(isFunction());
    962    return u.func.hasUnwindInfo_;
    963  }
    964  uint32_t importJitExitEntry() const {
    965    MOZ_ASSERT(isImportJitExit());
    966    return begin_ + u.jitExitEntry_;
    967  }
    968 
    969  // A sorted array of CodeRanges can be looked up via BinarySearch and
    970  // OffsetInCode.
    971 
    972  struct OffsetInCode {
    973    size_t offset;
    974    explicit OffsetInCode(size_t offset) : offset(offset) {}
    975    bool operator==(const CodeRange& rhs) const {
    976      return offset >= rhs.begin() && offset < rhs.end();
    977    }
    978    bool operator<(const CodeRange& rhs) const { return offset < rhs.begin(); }
    979  };
    980 };
    981 
    982 WASM_DECLARE_CACHEABLE_POD(CodeRange);
    983 WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
    984 
    985 extern const CodeRange* LookupInSorted(const CodeRangeVector& codeRanges,
    986                                       CodeRange::OffsetInCode target);
    987 
    988 // While the frame-pointer chain allows the stack to be unwound without
    989 // metadata, Error.stack still needs to know the line/column of every call in
    990 // the chain. A CallSiteDesc describes a single callsite to which CallSite adds
    991 // the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
    992 // adds the function index of the callee.
    993 
    994 enum class CallSiteKind : uint8_t {
    995  Func,           // pc-relative call to a specific function
    996  Import,         // wasm import call
    997  Indirect,       // dynamic callee called via register, context on stack
    998  IndirectFast,   // dynamically determined to be same-instance
    999  FuncRef,        // call using direct function reference
   1000  FuncRefFast,    // call using direct function reference within same-instance
   1001  ReturnFunc,     // return call to a specific function
   1002  ReturnStub,     // return call trampoline
   1003  Symbolic,       // call to a single symbolic callee
   1004  EnterFrame,     // call to a enter frame handler
   1005  LeaveFrame,     // call to a leave frame handler
   1006  CollapseFrame,  // call to a leave frame handler during tail call
   1007  StackSwitch,    // stack switch point
   1008  Breakpoint,     // call to instruction breakpoint
   1009  RequestTierUp   // call to request tier-2 compilation of this function
   1010 };
   1011 
   1012 WASM_DECLARE_CACHEABLE_POD(CallSiteKind);
   1013 WASM_DECLARE_POD_VECTOR(CallSiteKind, CallSiteKindVector)
   1014 
   1015 class CallSiteDesc {
   1016  // The line of bytecode offset that this call site is at.
   1017  uint32_t lineOrBytecode_;
   1018  // If this call site has been inlined into another function, the inlined
   1019  // caller functions. The direct ancestor of this function (i.e. the one
   1020  // directly above it on the stack) is the last entry in the vector.
   1021  InlinedCallerOffsetIndex inlinedCallerOffsetsIndex_;
   1022  CallSiteKind kind_;
   1023 
   1024 public:
   1025  // Some call sites do not have a bytecode offset associated with them
   1026  // (such as ones in import function wrappers). We represent them using '0' as
   1027  // the bytecode offset. This should never be confused with a real offset,
   1028  // because the binary format has overhead from the magic number and section
   1029  // headers.
   1030  static constexpr uint32_t NO_LINE_OR_BYTECODE = 0;
   1031  static constexpr uint32_t FIRST_VALID_BYTECODE_OFFSET =
   1032      NO_LINE_OR_BYTECODE + 1;
   1033  static_assert(NO_LINE_OR_BYTECODE < sizeof(wasm::MagicNumber));
   1034  // Limit lines or bytecodes to the maximum module size.
   1035  static constexpr uint32_t MAX_LINE_OR_BYTECODE_VALUE = wasm::MaxModuleBytes;
   1036 
   1037  CallSiteDesc()
   1038      : lineOrBytecode_(NO_LINE_OR_BYTECODE), kind_(CallSiteKind::Func) {}
   1039  explicit CallSiteDesc(CallSiteKind kind)
   1040      : lineOrBytecode_(NO_LINE_OR_BYTECODE), kind_(kind) {
   1041    MOZ_ASSERT(kind == CallSiteKind(kind_));
   1042  }
   1043  CallSiteDesc(uint32_t lineOrBytecode, CallSiteKind kind)
   1044      : lineOrBytecode_(lineOrBytecode), kind_(kind) {
   1045    MOZ_ASSERT(kind == CallSiteKind(kind_));
   1046    MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
   1047  }
   1048  CallSiteDesc(BytecodeOffset bytecodeOffset, CallSiteKind kind)
   1049      : lineOrBytecode_(bytecodeOffset.offset()), kind_(kind) {
   1050    MOZ_ASSERT(kind == CallSiteKind(kind_));
   1051    MOZ_ASSERT(bytecodeOffset.offset() == lineOrBytecode_);
   1052  }
   1053  CallSiteDesc(uint32_t lineOrBytecode,
   1054               InlinedCallerOffsetIndex inlinedCallerOffsetsIndex,
   1055               CallSiteKind kind)
   1056      : lineOrBytecode_(lineOrBytecode),
   1057        inlinedCallerOffsetsIndex_(inlinedCallerOffsetsIndex),
   1058        kind_(kind) {
   1059    MOZ_ASSERT(kind == CallSiteKind(kind_));
   1060    MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
   1061  }
   1062  CallSiteDesc(BytecodeOffset bytecodeOffset,
   1063               uint32_t inlinedCallerOffsetsIndex, CallSiteKind kind)
   1064      : lineOrBytecode_(bytecodeOffset.offset()),
   1065        inlinedCallerOffsetsIndex_(inlinedCallerOffsetsIndex),
   1066        kind_(kind) {
   1067    MOZ_ASSERT(kind == CallSiteKind(kind_));
   1068    MOZ_ASSERT(bytecodeOffset.offset() == lineOrBytecode_);
   1069  }
   1070  uint32_t lineOrBytecode() const { return lineOrBytecode_; }
   1071  InlinedCallerOffsetIndex inlinedCallerOffsetsIndex() const {
   1072    return inlinedCallerOffsetsIndex_;
   1073  }
   1074  TrapSiteDesc toTrapSiteDesc() const {
   1075    return TrapSiteDesc(wasm::BytecodeOffset(lineOrBytecode()),
   1076                        inlinedCallerOffsetsIndex_);
   1077  }
   1078  CallSiteKind kind() const { return kind_; }
   1079  bool isImportCall() const { return kind() == CallSiteKind::Import; }
   1080  bool isIndirectCall() const { return kind() == CallSiteKind::Indirect; }
   1081  bool isFuncRefCall() const { return kind() == CallSiteKind::FuncRef; }
   1082  bool isReturnStub() const { return kind() == CallSiteKind::ReturnStub; }
   1083  bool isStackSwitch() const { return kind() == CallSiteKind::StackSwitch; }
   1084  bool mightBeCrossInstance() const {
   1085    return isImportCall() || isIndirectCall() || isFuncRefCall() ||
   1086           isReturnStub() || isStackSwitch();
   1087  }
   1088 };
   1089 
   1090 using CallSiteDescVector = mozilla::Vector<CallSiteDesc, 0, SystemAllocPolicy>;
   1091 
   1092 class CallSite : public CallSiteDesc {
   1093  uint32_t returnAddressOffset_;
   1094  const InlinedCallerOffsets* inlinedCallerOffsets_;
   1095 
   1096  CallSite(const CallSiteDesc& desc, uint32_t returnAddressOffset,
   1097           const InlinedCallerOffsets* inlinedCallerOffsets)
   1098      : CallSiteDesc(desc),
   1099        returnAddressOffset_(returnAddressOffset),
   1100        inlinedCallerOffsets_(inlinedCallerOffsets) {}
   1101  friend class CallSites;
   1102 
   1103 public:
   1104  CallSite() : returnAddressOffset_(0), inlinedCallerOffsets_(nullptr) {}
   1105 
   1106  uint32_t returnAddressOffset() const { return returnAddressOffset_; }
   1107  BytecodeOffsetSpan inlinedCallerOffsetsSpan() const {
   1108    if (!inlinedCallerOffsets_) {
   1109      return BytecodeOffsetSpan();
   1110    }
   1111    return BytecodeOffsetSpan(inlinedCallerOffsets_->begin(),
   1112                              inlinedCallerOffsets_->end());
   1113  }
   1114  const InlinedCallerOffsets* inlinedCallerOffsets() const {
   1115    return inlinedCallerOffsets_;
   1116  }
   1117 };
   1118 
   1119 // A collection of CallSite that is optimized for compact storage.
   1120 //
   1121 // The individual fields are split to be in their own vectors to minimize
   1122 // overhead due to alignment for small fields like CallSiteKind.
   1123 //
   1124 // The `inlinedCallerOffsets` field is split into a sparse hash map as it's
   1125 // expected that many call sites will not be in inlined functions.
   1126 class CallSites {
   1127  // Define our own Uint32Vector without any inline storage so it can be used
   1128  // with swap.
   1129  using Uint32Vector = Vector<uint32_t, 0, SystemAllocPolicy>;
   1130 
   1131  CallSiteKindVector kinds_;
   1132  Uint32Vector lineOrBytecodes_;
   1133  Uint32Vector returnAddressOffsets_;
   1134  InlinedCallerOffsetsIndexHashMap inlinedCallerOffsetsMap_;
   1135 
   1136 public:
   1137  explicit CallSites() = default;
   1138 
   1139  // We limit the maximum amount of call sites to fit in a uint32_t for better
   1140  // compaction of the sparse hash map. This is dynamically enforced, but
   1141  // should be safe. The maximum executable memory in a process is at most
   1142  // ~2GiB, a machine call instruction is at least two bytes (realistically
   1143  // much more), which would put the limit of call sites far below UINT32_MAX.
   1144  // We subtract one so that this check is not idempotent on 32-bit systems.
   1145  static constexpr size_t MAX_LENGTH = UINT32_MAX - 1;
   1146 
   1147  uint32_t length() const {
   1148    size_t result = kinds_.length();
   1149    // Enforced by dynamic checks in mutation functions.
   1150    MOZ_ASSERT(result <= MAX_LENGTH);
   1151    return (uint32_t)result;
   1152  }
   1153 
   1154  bool empty() const { return kinds_.empty(); }
   1155 
   1156  CallSiteKind kind(size_t index) const { return kinds_[index]; }
   1157  BytecodeOffset bytecodeOffset(size_t index) const {
   1158    return BytecodeOffset(lineOrBytecodes_[index]);
   1159  }
   1160  uint32_t returnAddressOffset(size_t index) const {
   1161    return returnAddressOffsets_[index];
   1162  }
   1163 
   1164  CallSite get(size_t index, const InliningContext& inliningContext) const {
   1165    InlinedCallerOffsetIndex inlinedCallerOffsetsIndex;
   1166    const InlinedCallerOffsets* inlinedCallerOffsets = nullptr;
   1167    if (auto entry = inlinedCallerOffsetsMap_.readonlyThreadsafeLookup(index)) {
   1168      inlinedCallerOffsetsIndex = entry->value();
   1169      inlinedCallerOffsets = inliningContext[entry->value()];
   1170    }
   1171    return CallSite(CallSiteDesc(lineOrBytecodes_[index],
   1172                                 inlinedCallerOffsetsIndex, kinds_[index]),
   1173                    returnAddressOffsets_[index], inlinedCallerOffsets);
   1174  }
   1175 
   1176  [[nodiscard]]
   1177  bool lookup(uint32_t returnAddressOffset,
   1178              const InliningContext& inliningContext, CallSite* callSite) const;
   1179 
   1180  [[nodiscard]]
   1181  bool append(const CallSiteDesc& callSiteDesc, uint32_t returnAddressOffset) {
   1182    // See comment on MAX_LENGTH for details.
   1183    if (length() == MAX_LENGTH) {
   1184      return false;
   1185    }
   1186 
   1187    uint32_t index = length();
   1188 
   1189    // If there are inline caller offsets, then insert an entry in our hash map.
   1190    InlinedCallerOffsetIndex inlinedCallerOffsetsIndex =
   1191        callSiteDesc.inlinedCallerOffsetsIndex();
   1192    if (!inlinedCallerOffsetsIndex.isNone() &&
   1193        !inlinedCallerOffsetsMap_.putNew(index, inlinedCallerOffsetsIndex)) {
   1194      return false;
   1195    }
   1196 
   1197    return kinds_.append(callSiteDesc.kind()) &&
   1198           lineOrBytecodes_.append(callSiteDesc.lineOrBytecode()) &&
   1199           returnAddressOffsets_.append(returnAddressOffset);
   1200  }
   1201 
   1202  [[nodiscard]]
   1203  bool appendAll(CallSites&& other, uint32_t baseCodeOffset,
   1204                 InlinedCallerOffsetIndex baseInlinedCallerOffsetIndex) {
   1205    // See comment on MAX_LENGTH for details.
   1206    mozilla::CheckedUint32 newLength =
   1207        mozilla::CheckedUint32(length()) + other.length();
   1208    if (!newLength.isValid() || newLength.value() > MAX_LENGTH) {
   1209      return false;
   1210    }
   1211 
   1212    // Copy over the map of `other`s inlined caller offsets. The keys are call
   1213    // site indices, and must be updated for the base index that `other` is
   1214    // being inserted into. The values are inlined caller offsets and must be
   1215    // updated for the base inlined caller offset that the associated inlining
   1216    // context was added to. See ModuleGenerator::linkCompiledCode.
   1217    uint32_t baseCallSiteIndex = length();
   1218    for (auto iter = other.inlinedCallerOffsetsMap_.modIter(); !iter.done();
   1219         iter.next()) {
   1220      uint32_t newCallSiteIndex = iter.get().key() + baseCallSiteIndex;
   1221      uint32_t newInlinedCallerOffsetIndex =
   1222          iter.get().value().value() + baseInlinedCallerOffsetIndex.value();
   1223 
   1224      if (!inlinedCallerOffsetsMap_.putNew(newCallSiteIndex,
   1225                                           newInlinedCallerOffsetIndex)) {
   1226        return false;
   1227      }
   1228    }
   1229 
   1230    // Add the baseCodeOffset to the pcOffsets that we are adding to ourselves.
   1231    for (uint32_t& pcOffset : other.returnAddressOffsets_) {
   1232      pcOffset += baseCodeOffset;
   1233    }
   1234 
   1235    return kinds_.appendAll(other.kinds_) &&
   1236           lineOrBytecodes_.appendAll(other.lineOrBytecodes_) &&
   1237           returnAddressOffsets_.appendAll(other.returnAddressOffsets_);
   1238  }
   1239 
   1240  void swap(CallSites& other) {
   1241    kinds_.swap(other.kinds_);
   1242    lineOrBytecodes_.swap(other.lineOrBytecodes_);
   1243    returnAddressOffsets_.swap(other.returnAddressOffsets_);
   1244    inlinedCallerOffsetsMap_.swap(other.inlinedCallerOffsetsMap_);
   1245  }
   1246 
   1247  void clear() {
   1248    kinds_.clear();
   1249    lineOrBytecodes_.clear();
   1250    returnAddressOffsets_.clear();
   1251    inlinedCallerOffsetsMap_.clear();
   1252  }
   1253 
   1254  [[nodiscard]]
   1255  bool reserve(size_t length) {
   1256    // See comment on MAX_LENGTH for details.
   1257    if (length > MAX_LENGTH) {
   1258      return false;
   1259    }
   1260 
   1261    return kinds_.reserve(length) && lineOrBytecodes_.reserve(length) &&
   1262           returnAddressOffsets_.reserve(length);
   1263  }
   1264 
   1265  void shrinkStorageToFit() {
   1266    kinds_.shrinkStorageToFit();
   1267    lineOrBytecodes_.shrinkStorageToFit();
   1268    returnAddressOffsets_.shrinkStorageToFit();
   1269    inlinedCallerOffsetsMap_.compact();
   1270  }
   1271 
   1272  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1273    return kinds_.sizeOfExcludingThis(mallocSizeOf) +
   1274           lineOrBytecodes_.sizeOfExcludingThis(mallocSizeOf) +
   1275           returnAddressOffsets_.sizeOfExcludingThis(mallocSizeOf) +
   1276           inlinedCallerOffsetsMap_.shallowSizeOfExcludingThis(mallocSizeOf);
   1277  }
   1278 
   1279  void checkInvariants() const {
   1280 #ifdef DEBUG
   1281    MOZ_ASSERT(kinds_.length() == lineOrBytecodes_.length());
   1282    MOZ_ASSERT(kinds_.length() == returnAddressOffsets_.length());
   1283    uint32_t last = 0;
   1284    for (uint32_t returnAddressOffset : returnAddressOffsets_) {
   1285      MOZ_ASSERT(returnAddressOffset >= last);
   1286      last = returnAddressOffset;
   1287    }
   1288    for (auto iter = inlinedCallerOffsetsMap_.iter(); !iter.done();
   1289         iter.next()) {
   1290      MOZ_ASSERT(iter.get().key() < length());
   1291      MOZ_ASSERT(!iter.get().value().isNone());
   1292    }
   1293 #endif
   1294  }
   1295 
   1296  WASM_DECLARE_FRIEND_SERIALIZE(CallSites);
   1297 };
   1298 
   1299 // A CallSiteTarget describes the callee of a CallSite, either a function or a
   1300 // trap exit. Although checked in debug builds, a CallSiteTarget doesn't
   1301 // officially know whether it targets a function or trap, relying on the Kind of
   1302 // the CallSite to discriminate.
   1303 
   1304 class CallSiteTarget {
   1305  uint32_t packed_;
   1306 
   1307  WASM_CHECK_CACHEABLE_POD(packed_);
   1308 #ifdef DEBUG
   1309  enum Kind { None, FuncIndex, TrapExit } kind_;
   1310  WASM_CHECK_CACHEABLE_POD(kind_);
   1311 #endif
   1312 
   1313 public:
   1314  explicit CallSiteTarget()
   1315      : packed_(UINT32_MAX)
   1316 #ifdef DEBUG
   1317        ,
   1318        kind_(None)
   1319 #endif
   1320  {
   1321  }
   1322 
   1323  explicit CallSiteTarget(uint32_t funcIndex)
   1324      : packed_(funcIndex)
   1325 #ifdef DEBUG
   1326        ,
   1327        kind_(FuncIndex)
   1328 #endif
   1329  {
   1330  }
   1331 
   1332  explicit CallSiteTarget(Trap trap)
   1333      : packed_(uint32_t(trap))
   1334 #ifdef DEBUG
   1335        ,
   1336        kind_(TrapExit)
   1337 #endif
   1338  {
   1339  }
   1340 
   1341  uint32_t funcIndex() const {
   1342    MOZ_ASSERT(kind_ == FuncIndex);
   1343    return packed_;
   1344  }
   1345 
   1346  Trap trap() const {
   1347    MOZ_ASSERT(kind_ == TrapExit);
   1348    MOZ_ASSERT(packed_ < uint32_t(Trap::Limit));
   1349    return Trap(packed_);
   1350  }
   1351 };
   1352 
   1353 WASM_DECLARE_CACHEABLE_POD(CallSiteTarget);
   1354 
   1355 using CallSiteTargetVector = Vector<CallSiteTarget, 0, SystemAllocPolicy>;
   1356 
   1357 // TryNotes are stored in a vector that acts as an exception table for
   1358 // wasm try-catch blocks. These represent the information needed to take
   1359 // exception handling actions after a throw is executed.
   1360 struct TryNote {
   1361 private:
   1362  // Sentinel value to detect a try note that has not been given a try body.
   1363  static const uint32_t BEGIN_NONE = UINT32_MAX;
   1364 
   1365  // Sentinel value used in `entryPointOrIsDelegate_`.
   1366  static const uint32_t IS_DELEGATE = UINT32_MAX;
   1367 
   1368  // Begin code offset of the try body.
   1369  uint32_t begin_;
   1370  // Exclusive end code offset of the try body.
   1371  uint32_t end_;
   1372  // Either a marker that this is a 'delegate' or else the code offset of the
   1373  // landing pad to jump to.
   1374  uint32_t entryPointOrIsDelegate_;
   1375  // If this is a delegate, then this is the code offset to delegate to,
   1376  // otherwise this is the offset from the frame pointer of the stack pointer
   1377  // to use when jumping to the landing pad.
   1378  uint32_t framePushedOrDelegateOffset_;
   1379 
   1380  WASM_CHECK_CACHEABLE_POD(begin_, end_, entryPointOrIsDelegate_,
   1381                           framePushedOrDelegateOffset_);
   1382 
   1383 public:
   1384  explicit TryNote()
   1385      : begin_(BEGIN_NONE),
   1386        end_(0),
   1387        entryPointOrIsDelegate_(0),
   1388        framePushedOrDelegateOffset_(0) {}
   1389 
   1390  // Returns whether a try note has been assigned a range for the try body.
   1391  bool hasTryBody() const { return begin_ != BEGIN_NONE; }
   1392 
   1393  // The code offset of the beginning of the try body.
   1394  uint32_t tryBodyBegin() const { return begin_; }
   1395 
   1396  // The code offset of the exclusive end of the try body.
   1397  uint32_t tryBodyEnd() const { return end_; }
   1398 
   1399  // Returns whether an offset is within this try note's body.
   1400  bool offsetWithinTryBody(uint32_t offset) const {
   1401    return offset > begin_ && offset <= end_;
   1402  }
   1403 
   1404  // Check if the unwinder should delegate the handling of this try note to the
   1405  // try note given at the delegate offset.
   1406  bool isDelegate() const { return entryPointOrIsDelegate_ == IS_DELEGATE; }
   1407 
   1408  // The code offset to delegate the handling of this try note to.
   1409  uint32_t delegateOffset() const {
   1410    MOZ_ASSERT(isDelegate());
   1411    return framePushedOrDelegateOffset_;
   1412  }
   1413 
   1414  // The code offset of the entry to the landing pad.
   1415  uint32_t landingPadEntryPoint() const {
   1416    MOZ_ASSERT(!isDelegate());
   1417    return entryPointOrIsDelegate_;
   1418  }
   1419 
   1420  // The stack frame pushed amount at the entry to the landing pad.
   1421  uint32_t landingPadFramePushed() const {
   1422    MOZ_ASSERT(!isDelegate());
   1423    return framePushedOrDelegateOffset_;
   1424  }
   1425 
   1426  // Set the beginning of the try body.
   1427  void setTryBodyBegin(uint32_t begin) {
   1428    // There must not be a begin to the try body yet
   1429    MOZ_ASSERT(begin_ == BEGIN_NONE);
   1430    begin_ = begin;
   1431  }
   1432 
   1433  // Set the end of the try body.
   1434  void setTryBodyEnd(uint32_t end) {
   1435    // There must be a begin to the try body
   1436    MOZ_ASSERT(begin_ != BEGIN_NONE);
   1437    end_ = end;
   1438    // We do not allow empty try bodies
   1439    MOZ_ASSERT(end_ > begin_);
   1440  }
   1441 
   1442  // Mark this try note as a delegate, requesting the unwinder to use the try
   1443  // note found at the delegate offset.
   1444  void setDelegate(uint32_t delegateOffset) {
   1445    entryPointOrIsDelegate_ = IS_DELEGATE;
   1446    framePushedOrDelegateOffset_ = delegateOffset;
   1447  }
   1448 
   1449  // Set the entry point and frame pushed of the landing pad.
   1450  void setLandingPad(uint32_t entryPoint, uint32_t framePushed) {
   1451    MOZ_ASSERT(!isDelegate());
   1452    entryPointOrIsDelegate_ = entryPoint;
   1453    framePushedOrDelegateOffset_ = framePushed;
   1454  }
   1455 
   1456  // Adjust all code offsets in this try note by a delta.
   1457  void offsetBy(uint32_t offset) {
   1458    begin_ += offset;
   1459    end_ += offset;
   1460    if (isDelegate()) {
   1461      framePushedOrDelegateOffset_ += offset;
   1462    } else {
   1463      entryPointOrIsDelegate_ += offset;
   1464    }
   1465  }
   1466 
   1467  bool operator<(const TryNote& other) const {
   1468    // Special case comparison with self. This avoids triggering the assertion
   1469    // about non-intersection below. This case can arise in std::sort.
   1470    if (this == &other) {
   1471      return false;
   1472    }
   1473    // Try notes must be properly nested without touching at begin and end
   1474    MOZ_ASSERT(end_ <= other.begin_ || begin_ >= other.end_ ||
   1475               (begin_ > other.begin_ && end_ < other.end_) ||
   1476               (other.begin_ > begin_ && other.end_ < end_));
   1477    // A total order is therefore given solely by comparing end points. This
   1478    // order will be such that the first try note to intersect a point is the
   1479    // innermost try note for that point.
   1480    return end_ < other.end_;
   1481  }
   1482 };
   1483 
   1484 WASM_DECLARE_CACHEABLE_POD(TryNote);
   1485 WASM_DECLARE_POD_VECTOR(TryNote, TryNoteVector)
   1486 
   1487 class CodeRangeUnwindInfo {
   1488 public:
   1489  enum UnwindHow {
   1490    Normal,
   1491    RestoreFpRa,
   1492    RestoreFp,
   1493    UseFpLr,
   1494    UseFp,
   1495  };
   1496 
   1497 private:
   1498  uint32_t offset_;
   1499  UnwindHow unwindHow_;
   1500 
   1501  WASM_CHECK_CACHEABLE_POD(offset_, unwindHow_);
   1502 
   1503 public:
   1504  CodeRangeUnwindInfo(uint32_t offset, UnwindHow unwindHow)
   1505      : offset_(offset), unwindHow_(unwindHow) {}
   1506 
   1507  uint32_t offset() const { return offset_; }
   1508  UnwindHow unwindHow() const { return unwindHow_; }
   1509 
   1510  // Adjust all code offsets in this info by a delta.
   1511  void offsetBy(uint32_t offset) { offset_ += offset; }
   1512 };
   1513 
   1514 WASM_DECLARE_CACHEABLE_POD(CodeRangeUnwindInfo);
   1515 WASM_DECLARE_POD_VECTOR(CodeRangeUnwindInfo, CodeRangeUnwindInfoVector)
   1516 
   1517 enum class CallIndirectIdKind {
   1518  // Generate a no-op signature check prologue, asm.js function tables are
   1519  // homogenous.
   1520  AsmJS,
   1521  // Use a machine code immediate for the signature check, only works on simple
   1522  // function types, without super types, and without siblings in their
   1523  // recursion group.
   1524  Immediate,
   1525  // Use the full type definition and subtyping machinery when performing the
   1526  // signature check.
   1527  Global,
   1528  // Don't generate any signature check prologue, for functions that cannot be
   1529  // stored in tables.
   1530  None
   1531 };
   1532 
   1533 // CallIndirectId describes how to compile a call_indirect and matching
   1534 // signature check in the function prologue for a given function type.
   1535 
   1536 class CallIndirectId {
   1537  CallIndirectIdKind kind_;
   1538  union {
   1539    size_t immediate_;
   1540    struct {
   1541      size_t instanceDataOffset_;
   1542      bool hasSuperType_;
   1543    } global_;
   1544  };
   1545 
   1546  explicit CallIndirectId(CallIndirectIdKind kind) : kind_(kind) {}
   1547 
   1548 public:
   1549  CallIndirectId() : kind_(CallIndirectIdKind::None) {}
   1550 
   1551  // Get a CallIndirectId for an asm.js function which will generate a no-op
   1552  // checked call prologue.
   1553  static CallIndirectId forAsmJSFunc();
   1554 
   1555  // Get the CallIndirectId for a function in a specific module.
   1556  static CallIndirectId forFunc(const CodeMetadata& codeMeta,
   1557                                uint32_t funcIndex);
   1558 
   1559  // Get the CallIndirectId for a function type in a specific module.
   1560  static CallIndirectId forFuncType(const CodeMetadata& codeMeta,
   1561                                    uint32_t funcTypeIndex);
   1562 
   1563  CallIndirectIdKind kind() const { return kind_; }
   1564  bool isGlobal() const { return kind_ == CallIndirectIdKind::Global; }
   1565 
   1566  // The bit-packed representation of simple function types. See FuncType in
   1567  // WasmTypeDef.h for more information.
   1568  uint32_t immediate() const {
   1569    MOZ_ASSERT(kind_ == CallIndirectIdKind::Immediate);
   1570    return immediate_;
   1571  }
   1572 
   1573  // The offset of the TypeDefInstanceData for the function type.
   1574  uint32_t instanceDataOffset() const {
   1575    MOZ_ASSERT(kind_ == CallIndirectIdKind::Global);
   1576    return global_.instanceDataOffset_;
   1577  }
   1578 
   1579  // Whether the TypeDef has any super types.
   1580  bool hasSuperType() const {
   1581    MOZ_ASSERT(kind_ == CallIndirectIdKind::Global);
   1582    return global_.hasSuperType_;
   1583  }
   1584 };
   1585 
   1586 // CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
   1587 // This is hoisted into WasmCodegenTypes.h for sharing between Ion and Baseline.
   1588 
   1589 class CalleeDesc {
   1590 public:
   1591  enum Which {
   1592    // Calls a function defined in the same module by its index.
   1593    Func,
   1594 
   1595    // Calls the import identified by the offset of its FuncImportInstanceData
   1596    // in
   1597    // thread-local data.
   1598    Import,
   1599 
   1600    // Calls a WebAssembly table (heterogeneous, index must be bounds
   1601    // checked, callee instance depends on TableDesc).
   1602    WasmTable,
   1603 
   1604    // Calls an asm.js table (homogeneous, masked index, same-instance).
   1605    AsmJSTable,
   1606 
   1607    // Call a C++ function identified by SymbolicAddress.
   1608    Builtin,
   1609 
   1610    // Like Builtin, but automatically passes Instance* as first argument.
   1611    BuiltinInstanceMethod,
   1612 
   1613    // Calls a function reference.
   1614    FuncRef,
   1615  };
   1616 
   1617 private:
   1618  // which_ shall be initialized in the static constructors
   1619  MOZ_INIT_OUTSIDE_CTOR Which which_;
   1620  union U {
   1621    U() : funcIndex_(0) {}
   1622    uint32_t funcIndex_;
   1623    struct {
   1624      uint32_t instanceDataOffset_;
   1625    } import;
   1626    struct {
   1627      uint32_t instanceDataOffset_;
   1628      uint64_t minLength_;
   1629      mozilla::Maybe<uint64_t> maxLength_;
   1630      CallIndirectId callIndirectId_;
   1631    } table;
   1632    SymbolicAddress builtin_;
   1633  } u;
   1634 
   1635 public:
   1636  CalleeDesc() = default;
   1637  static CalleeDesc function(uint32_t funcIndex);
   1638  static CalleeDesc import(uint32_t instanceDataOffset);
   1639  static CalleeDesc wasmTable(const CodeMetadata& codeMeta,
   1640                              const TableDesc& desc, uint32_t tableIndex,
   1641                              CallIndirectId callIndirectId);
   1642  static CalleeDesc asmJSTable(const CodeMetadata& codeMeta,
   1643                               uint32_t tableIndex);
   1644  static CalleeDesc builtin(SymbolicAddress callee);
   1645  static CalleeDesc builtinInstanceMethod(SymbolicAddress callee);
   1646  static CalleeDesc wasmFuncRef();
   1647  Which which() const { return which_; }
   1648  uint32_t funcIndex() const {
   1649    MOZ_ASSERT(which_ == Func);
   1650    return u.funcIndex_;
   1651  }
   1652  uint32_t importInstanceDataOffset() const {
   1653    MOZ_ASSERT(which_ == Import);
   1654    return u.import.instanceDataOffset_;
   1655  }
   1656  bool isTable() const { return which_ == WasmTable || which_ == AsmJSTable; }
   1657  uint32_t tableLengthInstanceDataOffset() const {
   1658    MOZ_ASSERT(isTable());
   1659    return u.table.instanceDataOffset_ + offsetof(TableInstanceData, length);
   1660  }
   1661  uint32_t tableFunctionBaseInstanceDataOffset() const {
   1662    MOZ_ASSERT(isTable());
   1663    return u.table.instanceDataOffset_ + offsetof(TableInstanceData, elements);
   1664  }
   1665  CallIndirectId wasmTableSigId() const {
   1666    MOZ_ASSERT(which_ == WasmTable);
   1667    return u.table.callIndirectId_;
   1668  }
   1669  uint64_t wasmTableMinLength() const {
   1670    MOZ_ASSERT(which_ == WasmTable);
   1671    return u.table.minLength_;
   1672  }
   1673  mozilla::Maybe<uint64_t> wasmTableMaxLength() const {
   1674    MOZ_ASSERT(which_ == WasmTable);
   1675    return u.table.maxLength_;
   1676  }
   1677  SymbolicAddress builtin() const {
   1678    MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
   1679    return u.builtin_;
   1680  }
   1681  bool isFuncRef() const { return which_ == FuncRef; }
   1682 };
   1683 
   1684 struct FuncIonPerfSpewer {
   1685  uint32_t funcIndex = 0;
   1686  jit::IonPerfSpewer spewer;
   1687 
   1688  FuncIonPerfSpewer() = default;
   1689  FuncIonPerfSpewer(uint32_t funcIndex, jit::IonPerfSpewer&& spewer)
   1690      : funcIndex(funcIndex), spewer(std::move(spewer)) {}
   1691  FuncIonPerfSpewer(FuncIonPerfSpewer&) = delete;
   1692  FuncIonPerfSpewer(FuncIonPerfSpewer&&) = default;
   1693  FuncIonPerfSpewer& operator=(FuncIonPerfSpewer&) = delete;
   1694  FuncIonPerfSpewer& operator=(FuncIonPerfSpewer&&) = default;
   1695 };
   1696 
   1697 using FuncIonPerfSpewerVector = Vector<FuncIonPerfSpewer, 8, SystemAllocPolicy>;
   1698 using FuncIonPerfSpewerSpan = mozilla::Span<FuncIonPerfSpewer>;
   1699 
   1700 struct FuncBaselinePerfSpewer {
   1701  uint32_t funcIndex = 0;
   1702  jit::WasmBaselinePerfSpewer spewer;
   1703 
   1704  FuncBaselinePerfSpewer() = default;
   1705  FuncBaselinePerfSpewer(uint32_t funcIndex,
   1706                         jit::WasmBaselinePerfSpewer&& spewer)
   1707      : funcIndex(funcIndex), spewer(std::move(spewer)) {}
   1708  FuncBaselinePerfSpewer(FuncBaselinePerfSpewer&) = delete;
   1709  FuncBaselinePerfSpewer(FuncBaselinePerfSpewer&&) = default;
   1710  FuncBaselinePerfSpewer& operator=(FuncBaselinePerfSpewer&) = delete;
   1711  FuncBaselinePerfSpewer& operator=(FuncBaselinePerfSpewer&&) = default;
   1712 };
   1713 
   1714 using FuncBaselinePerfSpewerVector =
   1715    Vector<FuncBaselinePerfSpewer, 8, SystemAllocPolicy>;
   1716 using FuncBaselinePerfSpewerSpan = mozilla::Span<FuncBaselinePerfSpewer>;
   1717 
   1718 // This holds stats relating to compilation of some arbitrary set of functions.
   1719 // If you add fields, don't forget to update its `clear` and `empty` methods.
   1720 struct CompileStats {
   1721  // number of functions in the set
   1722  size_t numFuncs;
   1723  // bytecode size of the functions
   1724  size_t bytecodeSize;
   1725  // number of direct-call / call-ref sites inlined
   1726  size_t inlinedDirectCallCount;
   1727  size_t inlinedCallRefCount;
   1728  // total extra bytecode size from direct-call / call-ref inlining
   1729  size_t inlinedDirectCallBytecodeSize;
   1730  size_t inlinedCallRefBytecodeSize;
   1731  // number of funcs for which inlining stopped due to budget overrun
   1732  size_t numInliningBudgetOverruns;
   1733  // number of funcs for which inlining was made less aggressive because the
   1734  // function was already large
   1735  size_t numLargeFunctionBackoffs = 0;
   1736 
   1737  void clear() {
   1738    numFuncs = 0;
   1739    bytecodeSize = 0;
   1740    inlinedDirectCallCount = 0;
   1741    inlinedCallRefCount = 0;
   1742    inlinedDirectCallBytecodeSize = 0;
   1743    inlinedCallRefBytecodeSize = 0;
   1744    numInliningBudgetOverruns = 0;
   1745    numLargeFunctionBackoffs = 0;
   1746  }
   1747  CompileStats() { clear(); }
   1748 
   1749  bool empty() const {
   1750    return 0 == (numFuncs | bytecodeSize | inlinedDirectCallCount |
   1751                 inlinedCallRefCount | inlinedDirectCallBytecodeSize |
   1752                 inlinedCallRefBytecodeSize | numInliningBudgetOverruns |
   1753                 numLargeFunctionBackoffs);
   1754  }
   1755 
   1756  // Merge in the counts from `other`.  When using this, be careful to avoid
   1757  // double-accounting bugs -- conceptually, `other` should be zeroed out as a
   1758  // result of the merge.  Doing that as part of this routine would be nice but
   1759  // unfortunately interferes with `const` qualification and thread-safety, so
   1760  // that isn't done.
   1761  void merge(const CompileStats& other);
   1762 };
   1763 
   1764 // Same as CompileStats, but includes info about compiled-code size.
   1765 struct CompileAndLinkStats : public CompileStats {
   1766  // total mapped addr space for generated code (a multiple of the page size)
   1767  size_t codeBytesMapped;
   1768  // total used space for generated code (will be less than the above)
   1769  size_t codeBytesUsed;
   1770 
   1771  void clear() {
   1772    CompileStats::clear();
   1773    codeBytesMapped = 0;
   1774    codeBytesUsed = 0;
   1775  }
   1776  CompileAndLinkStats() { clear(); }
   1777 
   1778  bool empty() const {
   1779    return 0 == (codeBytesMapped | codeBytesUsed) && CompileStats::empty();
   1780  }
   1781 
   1782  // Same comments as for CompileStats::merge apply.
   1783  void merge(const CompileAndLinkStats& other);
   1784 
   1785  // Merge in just CompileStats from `other`.
   1786  void mergeCompileStats(const CompileStats& other) {
   1787    CompileStats::merge(other);
   1788  }
   1789 
   1790  void print() const;
   1791 };
   1792 
   1793 }  // namespace wasm
   1794 }  // namespace js
   1795 
   1796 #endif  // wasm_codegen_types_h