tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmGC.h (23690B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2019 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #ifndef wasm_gc_h
     20 #define wasm_gc_h
     21 
     22 #include "jit/ABIArgGenerator.h"  // For ABIArgIter
     23 #include "js/AllocPolicy.h"
     24 #include "js/Vector.h"
     25 #include "util/Memory.h"
     26 #include "wasm/WasmBuiltins.h"
     27 #include "wasm/WasmFrame.h"
     28 #include "wasm/WasmSerialize.h"
     29 
     30 namespace js {
     31 
     32 namespace jit {
     33 class Label;
     34 class MacroAssembler;
     35 }  // namespace jit
     36 
     37 namespace wasm {
     38 
     39 class ArgTypeVector;
     40 class BytecodeOffset;
     41 
     42 // Definitions for stackmaps.
     43 
     44 using ExitStubMapVector = Vector<bool, 32, SystemAllocPolicy>;
     45 
     46 struct StackMapHeader {
     47  explicit StackMapHeader(uint32_t numMappedWords = 0)
     48      : numMappedWords(numMappedWords),
     49 #ifdef DEBUG
     50        numExitStubWords(0),
     51 #endif
     52        frameOffsetFromTop(0),
     53        hasDebugFrameWithLiveRefs(0) {
     54    MOZ_ASSERT(numMappedWords <= maxMappedWords);
     55  }
     56 
     57  // The total number of stack words covered by the map ..
     58  static constexpr size_t MappedWordsBits = 18;
     59  static_assert(((1 << MappedWordsBits) - 1) * sizeof(void*) >= MaxFrameSize);
     60  uint32_t numMappedWords : MappedWordsBits;
     61 
     62  // .. of which this many are "exit stub" extras
     63  static constexpr size_t ExitStubWordsBits = 6;
     64 #ifdef DEBUG
     65  uint32_t numExitStubWords : ExitStubWordsBits;
     66 #endif
     67 
     68  // Where is Frame* relative to the top?  This is an offset in words.  On every
     69  // platform, FrameOffsetBits needs to be at least
     70  // ceil(log2(MaxParams*sizeof-biggest-param-type-in-words)).  The most
     71  // constraining platforms are 32-bit with SIMD support, currently x86-32.
     72  static constexpr size_t FrameOffsetBits = 12;
     73  uint32_t frameOffsetFromTop : FrameOffsetBits;
     74 
     75  // Notes the presence of a DebugFrame with possibly-live references.  A
     76  // DebugFrame may or may not contain GC-managed data; in situations when it is
     77  // possible that any pointers in the DebugFrame are non-null, the DebugFrame
     78  // gets a stackmap.
     79  uint32_t hasDebugFrameWithLiveRefs : 1;
     80 
     81  WASM_CHECK_CACHEABLE_POD(numMappedWords,
     82 #ifdef DEBUG
     83                           numExitStubWords,
     84 #endif
     85                           frameOffsetFromTop, hasDebugFrameWithLiveRefs);
     86 
     87  static constexpr uint32_t maxMappedWords = (1 << MappedWordsBits) - 1;
     88  static constexpr uint32_t maxExitStubWords = (1 << ExitStubWordsBits) - 1;
     89  static constexpr uint32_t maxFrameOffsetFromTop = (1 << FrameOffsetBits) - 1;
     90 
     91  static constexpr size_t MaxParamSize =
     92      std::max(sizeof(jit::FloatRegisters::RegisterContent),
     93               sizeof(jit::Registers::RegisterContent));
     94 
     95  // Add 16 words to account for the size of FrameWithInstances including any
     96  // shadow stack (at worst 8 words total), and then a little headroom in case
     97  // the argument area had to be aligned.
     98  static_assert(sizeof(FrameWithInstances) / sizeof(void*) <= 8);
     99  static_assert(maxFrameOffsetFromTop >=
    100                    (MaxParams * MaxParamSize / sizeof(void*)) + 16,
    101                "limited size of the offset field");
    102 
    103  bool operator==(const StackMapHeader& rhs) const {
    104    return numMappedWords == rhs.numMappedWords &&
    105 #ifdef DEBUG
    106           numExitStubWords == rhs.numExitStubWords &&
    107 #endif
    108           frameOffsetFromTop == rhs.frameOffsetFromTop &&
    109           hasDebugFrameWithLiveRefs == rhs.hasDebugFrameWithLiveRefs;
    110  }
    111  bool operator!=(const StackMapHeader& rhs) const { return !(*this == rhs); }
    112 };
    113 
    114 WASM_DECLARE_CACHEABLE_POD(StackMapHeader);
    115 
    116 #ifndef DEBUG
    117 // This is the expected size for the header, when in release builds
    118 static_assert(sizeof(StackMapHeader) == 4,
    119              "wasm::StackMapHeader has unexpected size");
    120 #endif
    121 
    122 // A StackMap is a bit-array containing numMappedWords*2 bits, two bits per
    123 // word of stack. Index zero is for the lowest addressed word in the range.
    124 //
    125 // This is a variable-length structure whose size must be known at creation
    126 // time.
    127 //
    128 // Users of the map will know the address of the wasm::Frame that is covered
    129 // by this map. In order that they can calculate the exact address range
    130 // covered by the map, the map also stores the offset, from the highest
    131 // addressed word of the map, of the embedded wasm::Frame. This is an offset
    132 // down from the highest address, rather than up from the lowest, so as to
    133 // limit its range to FrameOffsetBits bits.
    134 //
    135 // The stackmap may also cover a DebugFrame (all DebugFrames which may
    136 // potentially contain live pointers into the JS heap get a map). If so, that
    137 // can be noted, since users of the map need to trace pointers in a
    138 // DebugFrame.
    139 //
    140 // Finally, for sanity checking only, for stackmaps associated with a wasm
    141 // trap exit stub, the number of words used by the trap exit stub save area
    142 // is also noted.  This is used in Instance::traceFrame to check that the
    143 // TrapExitDummyValue is in the expected place in the frame.
    144 struct StackMap final {
    145  friend class StackMaps;
    146 
    147  // The header contains the constant-sized fields before the variable-sized
    148  // bitmap that follows.
    149  StackMapHeader header;
    150 
    151  enum Kind : uint32_t {
    152    POD = 0,
    153    AnyRef = 1,
    154 
    155    // The data pointer for a WasmStructObject that requires OOL storage.
    156    StructDataPointer = 2,
    157 
    158    // The data pointer for a WasmArrayObject, which is either an interior
    159    // pointer to the object itself, or a pointer to OOL storage managed by
    160    // BufferAllocator. See WasmArrayObject::data_/inlineStorage.
    161    ArrayDataPointer = 3,
    162 
    163    Limit,
    164  };
    165 
    166 private:
    167  // The variable-sized bitmap.
    168  uint32_t bitmap[1];
    169 
    170  explicit StackMap(uint32_t numMappedWords) : header(numMappedWords) {
    171    const uint32_t nBitmap = calcBitmapNumElems(header.numMappedWords);
    172    memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
    173  }
    174 
    175 public:
    176  // Returns the size of a `StackMap` allocated with `numMappedWords`.
    177  static size_t allocationSizeInBytes(uint32_t numMappedWords) {
    178    uint32_t nBitmap = calcBitmapNumElems(numMappedWords);
    179    return sizeof(StackMap) + (nBitmap - 1) * sizeof(bitmap[0]);
    180  }
    181 
    182  // Returns the allocated size of this `StackMap`.
    183  size_t allocationSizeInBytes() const {
    184    return allocationSizeInBytes(header.numMappedWords);
    185  }
    186 
    187  // Record the number of words in the map used as a wasm trap exit stub
    188  // save area.  See comment above.
    189  void setExitStubWords(uint32_t nWords) {
    190    MOZ_RELEASE_ASSERT(nWords <= header.maxExitStubWords);
    191 #ifdef DEBUG
    192    MOZ_ASSERT(header.numExitStubWords == 0);
    193    MOZ_ASSERT(nWords <= header.numMappedWords);
    194    header.numExitStubWords = nWords;
    195 #endif
    196  }
    197 
    198  // Record the offset from the highest-addressed word of the map, that the
    199  // wasm::Frame lives at.  See comment above.
    200  void setFrameOffsetFromTop(uint32_t nWords) {
    201    MOZ_ASSERT(header.frameOffsetFromTop == 0);
    202    MOZ_RELEASE_ASSERT(nWords <= StackMapHeader::maxFrameOffsetFromTop);
    203    MOZ_ASSERT(header.frameOffsetFromTop < header.numMappedWords);
    204    header.frameOffsetFromTop = nWords;
    205  }
    206 
    207  // If the frame described by this StackMap includes a DebugFrame, call here to
    208  // record that fact.
    209  void setHasDebugFrameWithLiveRefs() {
    210    MOZ_ASSERT(header.hasDebugFrameWithLiveRefs == 0);
    211    header.hasDebugFrameWithLiveRefs = 1;
    212  }
    213 
    214  inline void set(uint32_t index, Kind kind) {
    215    MOZ_ASSERT(index < header.numMappedWords);
    216    MOZ_ASSERT(kind < Kind::Limit);
    217    // Because we don't zero out the field before writing it ..
    218    MOZ_ASSERT(get(index) == (Kind)0);
    219    uint32_t wordIndex = index / mappedWordsPerBitmapElem;
    220    uint32_t wordOffset = index % mappedWordsPerBitmapElem * bitsPerMappedWord;
    221    bitmap[wordIndex] |= (kind << wordOffset);
    222  }
    223 
    224  inline Kind get(uint32_t index) const {
    225    MOZ_ASSERT(index < header.numMappedWords);
    226    uint32_t wordIndex = index / mappedWordsPerBitmapElem;
    227    uint32_t wordOffset = index % mappedWordsPerBitmapElem * bitsPerMappedWord;
    228    Kind result = Kind((bitmap[wordIndex] >> wordOffset) & valueMask);
    229    return result;
    230  }
    231 
    232  inline uint8_t* rawBitmap() { return (uint8_t*)&bitmap; }
    233  inline const uint8_t* rawBitmap() const { return (const uint8_t*)&bitmap; }
    234  inline size_t rawBitmapLengthInBytes() const {
    235    return calcBitmapNumElems(header.numMappedWords) * sizeof(bitmap[0]);
    236  }
    237 
    238 private:
    239  static constexpr uint32_t bitsPerMappedWord = 2;
    240  static constexpr uint32_t mappedWordsPerBitmapElem =
    241      sizeof(bitmap[0]) * CHAR_BIT / bitsPerMappedWord;
    242  static constexpr uint32_t valueMask = js::BitMask(bitsPerMappedWord);
    243  static_assert(8 % bitsPerMappedWord == 0);
    244  static_assert(Kind::Limit - 1 <= valueMask);
    245 
    246  static uint32_t calcBitmapNumElems(uint32_t numMappedWords) {
    247    MOZ_RELEASE_ASSERT(numMappedWords <= StackMapHeader::maxMappedWords);
    248    uint32_t nBitmap = js::HowMany(numMappedWords, mappedWordsPerBitmapElem);
    249    return nBitmap == 0 ? 1 : nBitmap;
    250  }
    251 
    252 public:
    253  bool operator==(const StackMap& rhs) const {
    254    // Check the header first, as it determines the bitmap length
    255    if (header != rhs.header) {
    256      return false;
    257    }
    258    // Compare the bitmap data
    259    return memcmp(bitmap, rhs.bitmap, rawBitmapLengthInBytes()) == 0;
    260  }
    261 };
    262 
    263 #ifndef DEBUG
    264 // This is the expected size for a map that covers 32 or fewer words.
    265 static_assert(sizeof(StackMap) == 8, "wasm::StackMap has unexpected size");
    266 #endif
    267 
    268 // A map from an offset relative to the beginning of a code block to a StackMap
    269 using StackMapHashMap =
    270    HashMap<uint32_t, StackMap*, DefaultHasher<uint32_t>, SystemAllocPolicy>;
    271 
    272 class StackMaps {
    273 private:
    274  // The primary allocator for stack maps. The LifoAlloc will malloc chunks of
    275  // memory to be linearly allocated as stack maps, giving us pointer stability
    276  // while avoiding lock contention from malloc across compilation threads. It
    277  // also allows us to undo a stack map allocation.
    278  LifoAlloc stackMaps_;
    279  // Map for finding a stack map at a specific code offset.
    280  StackMapHashMap codeOffsetToStackMap_;
    281 
    282  // The StackMap most recently finalized. Used for deduplication.
    283  StackMap* lastAdded_ = nullptr;
    284  // A LifoAlloc marker before the most recently allocated StackMap. Will be set
    285  // by create() and cleared by finalize().
    286  LifoAlloc::Mark beforeLastCreated_;
    287 #ifdef DEBUG
    288  // The StackMap that will be undone by `beforeLastCreated_`. Used to validate
    289  // correct usage of this class.
    290  StackMap* createdButNotFinalized_ = nullptr;
    291 #endif
    292 
    293 public:
    294  StackMaps() : stackMaps_(4096, js::BackgroundMallocArena) {}
    295 
    296  // Allocates a new empty stack map. After configuring the stack map to your
    297  // liking, you must call finalize().
    298  [[nodiscard]] StackMap* create(uint32_t numMappedWords) {
    299    MOZ_ASSERT(!createdButNotFinalized_,
    300               "a previous StackMap has been created but not finalized");
    301 
    302    beforeLastCreated_ = stackMaps_.mark();
    303    void* mem =
    304        stackMaps_.alloc(StackMap::allocationSizeInBytes(numMappedWords));
    305    if (!mem) {
    306      return nullptr;
    307    }
    308    StackMap* newMap = new (mem) StackMap(numMappedWords);
    309 #ifdef DEBUG
    310    createdButNotFinalized_ = newMap;
    311 #endif
    312    return newMap;
    313  }
    314 
    315  // Allocates a new stack map with a given header, e.g. one that had been
    316  // previously serialized. After configuring the stack map to your liking, you
    317  // must call finalize().
    318  [[nodiscard]] StackMap* create(const StackMapHeader& header) {
    319    StackMap* map = create(header.numMappedWords);
    320    if (!map) {
    321      return nullptr;
    322    }
    323    map->header = header;
    324    return map;
    325  }
    326 
    327  // Finalizes a stack map allocated by create(). The `map` is no longer valid
    328  // to access as it may have been deduplicated. The returned stack map must be
    329  // used instead. This operation is infallible.
    330  [[nodiscard]] StackMap* finalize(StackMap* map) {
    331 #ifdef DEBUG
    332    MOZ_ASSERT(
    333        map == createdButNotFinalized_,
    334        "the provided stack map was not from the most recent call to create()");
    335    createdButNotFinalized_ = nullptr;
    336 #endif
    337 
    338    if (lastAdded_ && *map == *lastAdded_) {
    339      // This stack map is a duplicate of the last one we added. Unwind the
    340      // allocation that created the new map and add the existing one to the
    341      // hash map.
    342      stackMaps_.release(beforeLastCreated_);
    343      return lastAdded_;
    344    }
    345 
    346    // This stack map is new.
    347    lastAdded_ = map;
    348    stackMaps_.cancelMark(beforeLastCreated_);
    349    return map;
    350  }
    351 
    352  // Add a finalized stack map with a given code offset.
    353  [[nodiscard]] bool add(uint32_t codeOffset, StackMap* map) {
    354    MOZ_ASSERT(!createdButNotFinalized_);
    355    MOZ_ASSERT(stackMaps_.contains(map));
    356    return codeOffsetToStackMap_.put(codeOffset, map);
    357  }
    358 
    359  // Finalizes a stack map created by create() and adds it to the given code
    360  // offset. The `map` is no longer valid to use as it may be deduplicated and
    361  // freed.
    362  [[nodiscard]] bool finalize(uint32_t codeOffset, StackMap* map) {
    363    return add(codeOffset, finalize(map));
    364  }
    365 
    366  void clear() {
    367    MOZ_ASSERT(!createdButNotFinalized_);
    368    codeOffsetToStackMap_.clear();
    369    stackMaps_.freeAll();
    370    lastAdded_ = nullptr;
    371  }
    372  bool empty() const { return length() == 0; }
    373  // Return the number of stack maps contained in this.
    374  size_t length() const { return codeOffsetToStackMap_.count(); }
    375 
    376  // Add all the stack maps from the other collection to this collection.
    377  // Apply an optional offset while adding the stack maps.
    378  [[nodiscard]] bool appendAll(StackMaps& other, uint32_t offsetInModule) {
    379    MOZ_ASSERT(!other.createdButNotFinalized_);
    380 
    381    // Reserve space for the new mappings so that we don't have to handle
    382    // failure in the loop below.
    383    if (!codeOffsetToStackMap_.reserve(codeOffsetToStackMap_.count() +
    384                                       other.codeOffsetToStackMap_.count())) {
    385      return false;
    386    }
    387 
    388    // Transfer chunks from other LifoAlloc for ownership. Pointers will stay
    389    // stable. We must not fail from this point onward.
    390    stackMaps_.transferFrom(&other.stackMaps_);
    391 
    392    // Copy hash map entries. This is safe because we took ownership of the
    393    // underlying storage.
    394    for (auto iter = other.codeOffsetToStackMap_.modIter(); !iter.done();
    395         iter.next()) {
    396      uint32_t newOffset = iter.get().key() + offsetInModule;
    397      StackMap* stackMap = iter.get().value();
    398      codeOffsetToStackMap_.putNewInfallible(newOffset, stackMap);
    399    }
    400 
    401    other.clear();
    402    return true;
    403  }
    404 
    405  const StackMap* lookup(uint32_t codeOffset) const {
    406    auto ptr = codeOffsetToStackMap_.readonlyThreadsafeLookup(codeOffset);
    407    if (!ptr) {
    408      return nullptr;
    409    }
    410 
    411    return ptr->value();
    412  }
    413 
    414  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
    415    return codeOffsetToStackMap_.shallowSizeOfExcludingThis(mallocSizeOf) +
    416           stackMaps_.sizeOfExcludingThis(mallocSizeOf);
    417  }
    418 
    419  void checkInvariants(const uint8_t* base) const;
    420 
    421  WASM_DECLARE_FRIEND_SERIALIZE(StackMaps);
    422 };
    423 
    424 // Supporting code for creation of stackmaps.
    425 
    426 // StackArgAreaSizeUnaligned returns the size, in bytes, of the stack arg area
    427 // size needed to pass |argTypes|, excluding any alignment padding beyond the
    428 // size of the area as a whole.  The size is as determined by the platforms
    429 // native ABI.
    430 //
    431 // StackArgAreaSizeAligned returns the same, but rounded up to the nearest 16
    432 // byte boundary.
    433 //
    434 // Note, StackArgAreaSize{Unaligned,Aligned}() must process all the arguments
    435 // in order to take into account all necessary alignment constraints.  The
    436 // signature must include any receiver argument -- in other words, it must be
    437 // the complete native-ABI-level call signature.
    438 template <class T>
    439 static inline size_t StackArgAreaSizeUnaligned(const T& argTypes,
    440                                               jit::ABIKind kind) {
    441  jit::ABIArgIter<const T> i(argTypes, kind);
    442  while (!i.done()) {
    443    i++;
    444  }
    445  return i.stackBytesConsumedSoFar();
    446 }
    447 
    448 static inline size_t StackArgAreaSizeUnaligned(
    449    const SymbolicAddressSignature& saSig, jit::ABIKind kind) {
    450  // WasmABIArgIter::ABIArgIter wants the items to be iterated over to be
    451  // presented in some type that has methods length() and operator[].  So we
    452  // have to wrap up |saSig|'s array of types in this API-matching class.
    453  class MOZ_STACK_CLASS ItemsAndLength {
    454    const jit::MIRType* items_;
    455    size_t length_;
    456 
    457   public:
    458    ItemsAndLength(const jit::MIRType* items, size_t length)
    459        : items_(items), length_(length) {}
    460    size_t length() const { return length_; }
    461    jit::MIRType operator[](size_t i) const { return items_[i]; }
    462  };
    463 
    464  // Assert, at least crudely, that we're not accidentally going to run off
    465  // the end of the array of types, nor into undefined parts of it, while
    466  // iterating.
    467  MOZ_ASSERT(saSig.numArgs <
    468             sizeof(saSig.argTypes) / sizeof(saSig.argTypes[0]));
    469  MOZ_ASSERT(saSig.argTypes[saSig.numArgs] ==
    470             jit::MIRType::None /*the end marker*/);
    471 
    472  ItemsAndLength itemsAndLength(saSig.argTypes, saSig.numArgs);
    473  return StackArgAreaSizeUnaligned(itemsAndLength, kind);
    474 }
    475 
    476 static inline size_t AlignStackArgAreaSize(size_t unalignedSize) {
    477  return AlignBytes(unalignedSize, jit::WasmStackAlignment);
    478 }
    479 
    480 // Generate a stackmap for a function's stack-overflow-at-entry trap, with
    481 // the structure:
    482 //
    483 //    <reg dump area>
    484 //    |       ++ <space reserved before trap, if any>
    485 //    |               ++ <space for Frame>
    486 //    |                       ++ <inbound arg area>
    487 //    |                                           |
    488 //    Lowest Addr                                 Highest Addr
    489 //
    490 // The caller owns the resulting stackmap.  This assumes a grow-down stack.
    491 //
    492 // For non-debug builds, if the stackmap would contain no pointers, no
    493 // stackmap is created, and nullptr is returned.  For a debug build, a
    494 // stackmap is always created and returned.
    495 //
    496 // The "space reserved before trap" is the space reserved by
    497 // MacroAssembler::wasmReserveStackChecked, in the case where the frame is
    498 // "small", as determined by that function.
    499 [[nodiscard]] bool CreateStackMapForFunctionEntryTrap(
    500    const ArgTypeVector& argTypes, const jit::RegisterOffsets& trapExitLayout,
    501    size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
    502    size_t nInboundStackArgBytes, wasm::StackMaps& stackMaps,
    503    wasm::StackMap** result);
    504 
    505 // At a resumable wasm trap, the machine's registers are saved on the stack by
    506 // (code generated by) GenerateTrapExit().  This function writes into |args| a
    507 // vector of booleans describing the ref-ness of the saved integer registers.
    508 // |args[0]| corresponds to the low addressed end of the described section of
    509 // the save area.
    510 [[nodiscard]] bool GenerateStackmapEntriesForTrapExit(
    511    const ArgTypeVector& args, const jit::RegisterOffsets& trapExitLayout,
    512    const size_t trapExitLayoutNumWords, ExitStubMapVector* extras);
    513 
    514 // Shared write barrier code.
    515 //
    516 // A barriered store looks like this:
    517 //
    518 //   Label skipPreBarrier;
    519 //   EmitWasmPreBarrierGuard(..., &skipPreBarrier);
    520 //   <COMPILER-SPECIFIC ACTIONS HERE>
    521 //   EmitWasmPreBarrierCall(...);
    522 //   bind(&skipPreBarrier);
    523 //
    524 //   <STORE THE VALUE IN MEMORY HERE>
    525 //
    526 //   Label skipPostBarrier;
    527 //   <COMPILER-SPECIFIC ACTIONS HERE>
    528 //   EmitWasmPostBarrierGuard(..., &skipPostBarrier);
    529 //   <CALL POST-BARRIER HERE IN A COMPILER-SPECIFIC WAY>
    530 //   bind(&skipPostBarrier);
    531 //
    532 // The actions are divided up to allow other actions to be placed between
    533 // them, such as saving and restoring live registers.  The postbarrier call
    534 // invokes C++ and will kill all live registers.
    535 
    536 // Before storing a GC pointer value in memory, skip to `skipBarrier` if the
    537 // prebarrier is not needed.  Will clobber `scratch`.
    538 //
    539 // It is OK for `instance` and `scratch` to be the same register.
    540 //
    541 // If `trapSiteDesc` is something, then metadata to catch a null access and
    542 // emit a null pointer exception will be emitted. This will only catch a null
    543 // access due to an incremental GC being in progress, the write that follows
    544 // this pre-barrier guard must also be guarded against null.
    545 template <class Addr>
    546 void EmitWasmPreBarrierGuard(jit::MacroAssembler& masm, jit::Register instance,
    547                             jit::Register scratch, Addr addr,
    548                             jit::Label* skipBarrier,
    549                             MaybeTrapSiteDesc trapSiteDesc);
    550 
    551 // Before storing a GC pointer value in memory, call out-of-line prebarrier
    552 // code. This assumes `PreBarrierReg` contains the address that will be
    553 // updated. On ARM64 it also assums that x28 (the PseudoStackPointer) has the
    554 // same value as SP.  `PreBarrierReg` is preserved by the barrier function.
    555 // Will clobber `scratch`.
    556 //
    557 // It is OK for `instance` and `scratch` to be the same register.
    558 void EmitWasmPreBarrierCallImmediate(jit::MacroAssembler& masm,
    559                                     jit::Register instance,
    560                                     jit::Register scratch,
    561                                     jit::Register valueAddr,
    562                                     size_t valueOffset);
    563 // The equivalent of EmitWasmPreBarrierCallImmediate, but for a
    564 // jit::BaseIndex. Will clobber `scratch1` and `scratch2`.
    565 //
    566 // It is OK for `instance` and `scratch1` to be the same register.
    567 void EmitWasmPreBarrierCallIndex(jit::MacroAssembler& masm,
    568                                 jit::Register instance, jit::Register scratch1,
    569                                 jit::Register scratch2, jit::BaseIndex addr);
    570 
    571 // After storing a GC pointer value in memory, skip to `skipBarrier` if a
    572 // postbarrier is not needed.  If the location being set is in an
    573 // heap-allocated object then `object` must reference that object; otherwise
    574 // it should be None. The value that was stored is `setValue`.  Will clobber
    575 // `otherScratch` and will use other available scratch registers.
    576 //
    577 // `otherScratch` cannot be a designated scratch register.
    578 void EmitWasmPostBarrierGuard(jit::MacroAssembler& masm,
    579                              const mozilla::Maybe<jit::Register>& object,
    580                              jit::Register otherScratch,
    581                              jit::Register setValue, jit::Label* skipBarrier);
    582 
    583 // Before calling Instance::postBarrierWholeCell, we can check the object
    584 // against the store buffer's last element cache, skipping the post barrier if
    585 // that object had already been barriered.
    586 //
    587 // `instance` and `temp` can be the same register; if so, instance will be
    588 // clobbered, otherwise instance will be preserved.
    589 void CheckWholeCellLastElementCache(jit::MacroAssembler& masm,
    590                                    jit::Register instance,
    591                                    jit::Register object, jit::Register temp,
    592                                    jit::Label* skipBarrier);
    593 
    594 #ifdef DEBUG
    595 // Check (approximately) whether `nextPC` is a valid code address for a
    596 // stackmap created by this compiler.  This is done by examining the
    597 // instruction at `nextPC`.  The matching is inexact, so it may err on the
    598 // side of returning `true` if it doesn't know.  Doing so reduces the
    599 // effectiveness of the MOZ_ASSERTs that use this function, so at least for
    600 // the four primary platforms we should keep it as exact as possible.
    601 
    602 bool IsPlausibleStackMapKey(const uint8_t* nextPC);
    603 #endif
    604 
    605 }  // namespace wasm
    606 }  // namespace js
    607 
    608 #endif  // wasm_gc_h