commit 6402dbb7d0ff4bc558210accad2e4f99ecbd0822
parent d80b64a01e501747c6ebf914861cb0f32ff2dd72
Author: Ben Visness <bvisness@mozilla.com>
Date: Mon, 13 Oct 2025 12:29:31 +0000
Bug 1992014: Allocate StackMaps using LifoAlloc, deduplicate. r=rhunt
Stack maps were individually malloced, which produced significant
amounts of lock contention and cost a significant amount of time in
baseline compiles.
This patch updates the StackMaps class to use a LifoAlloc to allocate
stack maps. The LifoAlloc will malloc chunks of memory to fill with
stack maps; after being added to the LifoAlloc, the pointer to the stack
map is added to the already-existing hash map and remains stable.
Stack maps are also now deduplicated on add. They are compared to the
most recently added stack map, and if they are identical, the existing
stack map is used again, shrinking the number of stack maps we have to
store.
Differential Revision: https://phabricator.services.mozilla.com/D267110
Diffstat:
7 files changed, 169 insertions(+), 96 deletions(-)
diff --git a/js/src/ds/LifoAlloc.h b/js/src/ds/LifoAlloc.h
@@ -979,10 +979,8 @@ class LifoAlloc {
void release(Mark mark);
- private:
void cancelMark(Mark mark) { markCount--; }
- public:
void releaseAll() {
MOZ_ASSERT(!markCount);
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
@@ -16770,6 +16770,7 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
const RegisterOffsets& trapExitLayout,
size_t trapExitLayoutNumWords,
size_t nInboundStackArgBytes,
+ wasm::StackMaps& stackMaps,
wasm::StackMap** result) {
// Ensure this is defined on all return paths.
*result = nullptr;
@@ -16824,8 +16825,7 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
}
#endif
- wasm::StackMap* stackMap =
- wasm::StackMap::create(nTotalBytes / sizeof(void*));
+ wasm::StackMap* stackMap = stackMaps.create(nTotalBytes / sizeof(void*));
if (!stackMap) {
return false;
}
@@ -17011,7 +17011,7 @@ bool CodeGenerator::generateWasm(wasm::CallIndirectId callIndirectId,
wasm::StackMap* functionEntryStackMap = nullptr;
if (!CreateStackMapForFunctionEntryTrap(
argTypes, trapExitLayout, trapExitLayoutNumWords,
- nBytesReservedBeforeTrap, nInboundStackArgBytes,
+ nBytesReservedBeforeTrap, nInboundStackArgBytes, *stackMaps,
&functionEntryStackMap)) {
return false;
}
@@ -17021,8 +17021,7 @@ bool CodeGenerator::generateWasm(wasm::CallIndirectId callIndirectId,
MOZ_ASSERT(functionEntryStackMap);
if (functionEntryStackMap &&
- !stackMaps->add(trapInsnOffset.offset(), functionEntryStackMap)) {
- functionEntryStackMap->destroy();
+ !stackMaps->finalize(trapInsnOffset.offset(), functionEntryStackMap)) {
return false;
}
}
@@ -17063,9 +17062,9 @@ bool CodeGenerator::generateWasm(wasm::CallIndirectId callIndirectId,
// collection thereof.
for (CodegenSafepointIndex& index : safepointIndices_) {
wasm::StackMap* stackMap = nullptr;
- if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
- trapExitLayoutNumWords,
- nInboundStackArgBytes, &stackMap)) {
+ if (!CreateStackMapFromLSafepoint(
+ *index.safepoint(), trapExitLayout, trapExitLayoutNumWords,
+ nInboundStackArgBytes, *stackMaps, &stackMap)) {
return false;
}
@@ -17075,8 +17074,7 @@ bool CodeGenerator::generateWasm(wasm::CallIndirectId callIndirectId,
continue;
}
- if (!stackMaps->add(index.displacement(), stackMap)) {
- stackMap->destroy();
+ if (!stackMaps->finalize(index.displacement(), stackMap)) {
return false;
}
}
diff --git a/js/src/wasm/WasmBCFrame.cpp b/js/src/wasm/WasmBCFrame.cpp
@@ -369,7 +369,7 @@ bool StackMapGenerator::createStackMap(
const uint32_t augmentedMstWords = augmentedMst.length();
const uint32_t numMappedWords =
numStackArgPaddingWords + extraWords + augmentedMstWords;
- StackMap* stackMap = StackMap::create(numMappedWords);
+ StackMap* stackMap = stackMaps_->create(numMappedWords);
if (!stackMap) {
return false;
}
@@ -422,12 +422,6 @@ bool StackMapGenerator::createStackMap(
stackMap->setHasDebugFrameWithLiveRefs();
}
- // Add the completed map to the running collection thereof.
- if (!stackMaps_->add(assemblerOffset, stackMap)) {
- stackMap->destroy();
- return false;
- }
-
#ifdef DEBUG
{
// Crosscheck the map pointer counting.
@@ -442,7 +436,8 @@ bool StackMapGenerator::createStackMap(
}
#endif
- return true;
+ // Add the completed map to the running collection thereof.
+ return stackMaps_->finalize(assemblerOffset, stackMap);
}
//////////////////////////////////////////////////////////////////////////////
diff --git a/js/src/wasm/WasmGC.cpp b/js/src/wasm/WasmGC.cpp
@@ -46,7 +46,8 @@ using namespace js::wasm;
bool wasm::CreateStackMapForFunctionEntryTrap(
const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
- size_t nInboundStackArgBytes, wasm::StackMap** result) {
+ size_t nInboundStackArgBytes, wasm::StackMaps& stackMaps,
+ wasm::StackMap** result) {
// Ensure this is defined on all return paths.
*result = nullptr;
@@ -82,8 +83,7 @@ bool wasm::CreateStackMapForFunctionEntryTrap(
}
#endif
- wasm::StackMap* stackMap =
- wasm::StackMap::create(nTotalBytes / sizeof(void*));
+ wasm::StackMap* stackMap = stackMaps.create(nTotalBytes / sizeof(void*));
if (!stackMap) {
return false;
}
@@ -329,7 +329,7 @@ void StackMaps::checkInvariants(const uint8_t* base) const {
#ifdef DEBUG
// Chech that each entry points from the stackmap structure points
// to a plausible instruction.
- for (auto iter = mapping_.iter(); !iter.done(); iter.next()) {
+ for (auto iter = codeOffsetToStackMap_.iter(); !iter.done(); iter.next()) {
MOZ_ASSERT(IsPlausibleStackMapKey(base + iter.get().key()),
"wasm stackmap does not reference a valid insn");
}
diff --git a/js/src/wasm/WasmGC.h b/js/src/wasm/WasmGC.h
@@ -99,6 +99,16 @@ struct StackMapHeader {
static_assert(maxFrameOffsetFromTop >=
(MaxParams * MaxParamSize / sizeof(void*)) + 16,
"limited size of the offset field");
+
+ bool operator==(const StackMapHeader& rhs) const {
+ return numMappedWords == rhs.numMappedWords &&
+#ifdef DEBUG
+ numExitStubWords == rhs.numExitStubWords &&
+#endif
+ frameOffsetFromTop == rhs.frameOffsetFromTop &&
+ hasDebugFrameWithLiveRefs == rhs.hasDebugFrameWithLiveRefs;
+ }
+ bool operator!=(const StackMapHeader& rhs) const { return !(*this == rhs); }
};
WASM_DECLARE_CACHEABLE_POD(StackMapHeader);
@@ -132,6 +142,8 @@ static_assert(sizeof(StackMapHeader) == 4,
// is also noted. This is used in Instance::traceFrame to check that the
// TrapExitDummyValue is in the expected place in the frame.
struct StackMap final {
+ friend class StackMaps;
+
// The header contains the constant-sized fields before the variable-sized
// bitmap that follows.
StackMapHeader header;
@@ -155,31 +167,8 @@ struct StackMap final {
const uint32_t nBitmap = calcBitmapNumElems(header.numMappedWords);
memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
}
- explicit StackMap(const StackMapHeader& header) : header(header) {
- const uint32_t nBitmap = calcBitmapNumElems(header.numMappedWords);
- memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
- }
public:
- static StackMap* create(uint32_t numMappedWords) {
- size_t size = allocationSizeInBytes(numMappedWords);
- char* buf = (char*)js_malloc(size);
- if (!buf) {
- return nullptr;
- }
- return ::new (buf) StackMap(numMappedWords);
- }
- static StackMap* create(const StackMapHeader& header) {
- size_t size = allocationSizeInBytes(header.numMappedWords);
- char* buf = (char*)js_malloc(size);
- if (!buf) {
- return nullptr;
- }
- return ::new (buf) StackMap(header);
- }
-
- void destroy() { js_free((char*)this); }
-
// Returns the size of a `StackMap` allocated with `numMappedWords`.
static size_t allocationSizeInBytes(uint32_t numMappedWords) {
uint32_t nBitmap = calcBitmapNumElems(numMappedWords);
@@ -253,6 +242,16 @@ struct StackMap final {
uint32_t nBitmap = js::HowMany(numMappedWords, mappedWordsPerBitmapElem);
return nBitmap == 0 ? 1 : nBitmap;
}
+
+ public:
+ bool operator==(const StackMap& rhs) const {
+ // Check the header first, as it determines the bitmap length
+ if (header != rhs.header) {
+ return false;
+ }
+ // Compare the bitmap data
+ return memcmp(bitmap, rhs.bitmap, rawBitmapLengthInBytes()) == 0;
+ }
};
#ifndef DEBUG
@@ -266,48 +265,126 @@ using StackMapHashMap =
class StackMaps {
private:
+ // The primary allocator for stack maps. The LifoAlloc will malloc chunks of
+ // memory to be linearly allocated as stack maps, giving us pointer stability
+ // while avoiding lock contention from malloc across compilation threads. It
+ // also allows us to undo a stack map allocation.
+ LifoAlloc stackMaps_;
// Map for finding a stack map at a specific code offset.
- StackMapHashMap mapping_;
+ StackMapHashMap codeOffsetToStackMap_;
+
+ // The StackMap most recently finalized. Used for deduplication.
+ StackMap* lastAdded_ = nullptr;
+ // A LifoAlloc marker before the most recently allocated StackMap. Will be set
+ // by create() and cleared by finalize().
+ LifoAlloc::Mark beforeLastCreated_;
+#ifdef DEBUG
+ // The StackMap that will be undone by `beforeLastCreated_`. Used to validate
+ // correct usage of this class.
+ StackMap* createdButNotFinalized_ = nullptr;
+#endif
public:
- StackMaps() {}
- ~StackMaps() {
- for (auto iter = mapping_.modIter(); !iter.done(); iter.next()) {
- StackMap* stackmap = iter.getMutable().value();
- stackmap->destroy();
+ StackMaps() : stackMaps_(4096, js::BackgroundMallocArena) {}
+
+ // Allocates a new empty StackMap. After configuring the StackMap to your
+ // liking, you must call finalize().
+ StackMap* create(uint32_t numMappedWords) {
+ MOZ_ASSERT(!createdButNotFinalized_,
+ "a previous StackMap has been created but not finalized");
+
+ beforeLastCreated_ = stackMaps_.mark();
+ void* mem =
+ stackMaps_.alloc(StackMap::allocationSizeInBytes(numMappedWords));
+ if (!mem) {
+ return nullptr;
+ }
+ StackMap* newMap = new (mem) StackMap(numMappedWords);
+#ifdef DEBUG
+ createdButNotFinalized_ = newMap;
+#endif
+ return newMap;
+ }
+
+ // Allocates a new StackMap with a given header, e.g. one that had been
+ // previously serialized. After configuring the StackMap to your liking, you
+ // must call finalize().
+ StackMap* create(const StackMapHeader& header) {
+ StackMap* map = create(header.numMappedWords);
+ if (!map) {
+ return nullptr;
+ }
+ map->header = header;
+ return map;
+ }
+
+ // Finalizes a StackMap allocated by create(), adding it to the hash map
+ // with a particular code offset. Upon calling finalize(), `map` is "moved"
+ // into the StackMaps class and must no longer be accessed. (This is because
+ // it may be deduplicated.)
+ [[nodiscard]] bool finalize(uint32_t codeOffset, StackMap* map) {
+#ifdef DEBUG
+ MOZ_ASSERT(
+ map == createdButNotFinalized_,
+ "the provided stack map was not from the most recent call to create()");
+ createdButNotFinalized_ = nullptr;
+#endif
+
+ if (lastAdded_ && *map == *lastAdded_) {
+ // This stack map is a duplicate of the last one we added. Unwind the
+ // allocation that created the new map and add the existing one to the
+ // hash map.
+ stackMaps_.release(beforeLastCreated_);
+ return codeOffsetToStackMap_.put(codeOffset, lastAdded_);
}
- mapping_.clear();
+
+ // This stack map is new.
+ lastAdded_ = map;
+ stackMaps_.cancelMark(beforeLastCreated_);
+ return codeOffsetToStackMap_.put(codeOffset, map);
}
- [[nodiscard]] bool add(uint32_t codeOffset, StackMap* map) {
- return mapping_.put(codeOffset, map);
+ void clear() {
+ MOZ_ASSERT(!createdButNotFinalized_);
+ codeOffsetToStackMap_.clear();
+ stackMaps_.freeAll();
+ lastAdded_ = nullptr;
}
- void clear() { mapping_.clear(); }
- bool empty() const { return mapping_.empty(); }
+ bool empty() const { return length() == 0; }
// Return the number of stack maps contained in this.
- size_t length() const { return mapping_.count(); }
+ size_t length() const { return codeOffsetToStackMap_.count(); }
// Add all the stack maps from the other collection to this collection.
// Apply an optional offset while adding the stack maps.
[[nodiscard]] bool appendAll(StackMaps& other, uint32_t offsetInModule) {
+ MOZ_ASSERT(!other.createdButNotFinalized_);
+
// Reserve space for the new mappings so that we don't have to handle
// failure in the loop below.
- if (!mapping_.reserve(mapping_.count() + other.mapping_.count())) {
+ if (!codeOffsetToStackMap_.reserve(codeOffsetToStackMap_.count() +
+ other.codeOffsetToStackMap_.count())) {
return false;
}
- for (auto iter = other.mapping_.modIter(); !iter.done(); iter.next()) {
+ // Transfer chunks from other LifoAlloc for ownership. Pointers will stay
+ // stable. We must not fail from this point onward.
+ stackMaps_.transferFrom(&other.stackMaps_);
+
+ // Copy hash map entries. This is safe because we took ownership of the
+ // underlying storage.
+ for (auto iter = other.codeOffsetToStackMap_.modIter(); !iter.done();
+ iter.next()) {
uint32_t newOffset = iter.get().key() + offsetInModule;
StackMap* stackMap = iter.get().value();
- mapping_.putNewInfallible(newOffset, stackMap);
+ codeOffsetToStackMap_.putNewInfallible(newOffset, stackMap);
}
- other.mapping_.clear();
+ other.clear();
return true;
}
const StackMap* lookup(uint32_t codeOffset) const {
- auto ptr = mapping_.readonlyThreadsafeLookup(codeOffset);
+ auto ptr = codeOffsetToStackMap_.readonlyThreadsafeLookup(codeOffset);
if (!ptr) {
return nullptr;
}
@@ -316,7 +393,8 @@ class StackMaps {
}
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
- return mapping_.shallowSizeOfExcludingThis(mallocSizeOf);
+ return codeOffsetToStackMap_.shallowSizeOfExcludingThis(mallocSizeOf) +
+ stackMaps_.sizeOfExcludingThis(mallocSizeOf);
}
void checkInvariants(const uint8_t* base) const;
@@ -402,7 +480,8 @@ static inline size_t AlignStackArgAreaSize(size_t unalignedSize) {
[[nodiscard]] bool CreateStackMapForFunctionEntryTrap(
const ArgTypeVector& argTypes, const jit::RegisterOffsets& trapExitLayout,
size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
- size_t nInboundStackArgBytes, wasm::StackMap** result);
+ size_t nInboundStackArgBytes, wasm::StackMaps& stackMaps,
+ wasm::StackMap** result);
// At a resumable wasm trap, the machine's registers are saved on the stack by
// (code generated by) GenerateTrapExit(). This function writes into |args| a
@@ -431,19 +510,19 @@ static inline size_t AlignStackArgAreaSize(size_t unalignedSize) {
// <CALL POST-BARRIER HERE IN A COMPILER-SPECIFIC WAY>
// bind(&skipPostBarrier);
//
-// The actions are divided up to allow other actions to be placed between them,
-// such as saving and restoring live registers. The postbarrier call invokes
-// C++ and will kill all live registers.
+// The actions are divided up to allow other actions to be placed between
+// them, such as saving and restoring live registers. The postbarrier call
+// invokes C++ and will kill all live registers.
// Before storing a GC pointer value in memory, skip to `skipBarrier` if the
// prebarrier is not needed. Will clobber `scratch`.
//
// It is OK for `instance` and `scratch` to be the same register.
//
-// If `trapSiteDesc` is something, then metadata to catch a null access and emit
-// a null pointer exception will be emitted. This will only catch a null access
-// due to an incremental GC being in progress, the write that follows this
-// pre-barrier guard must also be guarded against null.
+// If `trapSiteDesc` is something, then metadata to catch a null access and
+// emit a null pointer exception will be emitted. This will only catch a null
+// access due to an incremental GC being in progress, the write that follows
+// this pre-barrier guard must also be guarded against null.
template <class Addr>
void EmitWasmPreBarrierGuard(jit::MacroAssembler& masm, jit::Register instance,
jit::Register scratch, Addr addr,
@@ -451,10 +530,10 @@ void EmitWasmPreBarrierGuard(jit::MacroAssembler& masm, jit::Register instance,
MaybeTrapSiteDesc trapSiteDesc);
// Before storing a GC pointer value in memory, call out-of-line prebarrier
-// code. This assumes `PreBarrierReg` contains the address that will be updated.
-// On ARM64 it also assums that x28 (the PseudoStackPointer) has the same value
-// as SP. `PreBarrierReg` is preserved by the barrier function. Will clobber
-// `scratch`.
+// code. This assumes `PreBarrierReg` contains the address that will be
+// updated. On ARM64 it also assums that x28 (the PseudoStackPointer) has the
+// same value as SP. `PreBarrierReg` is preserved by the barrier function.
+// Will clobber `scratch`.
//
// It is OK for `instance` and `scratch` to be the same register.
void EmitWasmPreBarrierCallImmediate(jit::MacroAssembler& masm,
@@ -462,8 +541,8 @@ void EmitWasmPreBarrierCallImmediate(jit::MacroAssembler& masm,
jit::Register scratch,
jit::Register valueAddr,
size_t valueOffset);
-// The equivalent of EmitWasmPreBarrierCallImmediate, but for a jit::BaseIndex.
-// Will clobber `scratch1` and `scratch2`.
+// The equivalent of EmitWasmPreBarrierCallImmediate, but for a
+// jit::BaseIndex. Will clobber `scratch1` and `scratch2`.
//
// It is OK for `instance` and `scratch1` to be the same register.
void EmitWasmPreBarrierCallIndex(jit::MacroAssembler& masm,
@@ -471,10 +550,10 @@ void EmitWasmPreBarrierCallIndex(jit::MacroAssembler& masm,
jit::Register scratch2, jit::BaseIndex addr);
// After storing a GC pointer value in memory, skip to `skipBarrier` if a
-// postbarrier is not needed. If the location being set is in an heap-allocated
-// object then `object` must reference that object; otherwise it should be None.
-// The value that was stored is `setValue`. Will clobber `otherScratch` and
-// will use other available scratch registers.
+// postbarrier is not needed. If the location being set is in an
+// heap-allocated object then `object` must reference that object; otherwise
+// it should be None. The value that was stored is `setValue`. Will clobber
+// `otherScratch` and will use other available scratch registers.
//
// `otherScratch` cannot be a designated scratch register.
void EmitWasmPostBarrierGuard(jit::MacroAssembler& masm,
diff --git a/js/src/wasm/WasmSerialize.cpp b/js/src/wasm/WasmSerialize.cpp
@@ -1019,14 +1019,15 @@ CoderResult CodeCompileArgs(Coder<mode>& coder,
// WasmGC.h
CoderResult CodeStackMap(Coder<MODE_DECODE>& coder,
- CoderArg<MODE_DECODE, wasm::StackMap*> item) {
+ CoderArg<MODE_DECODE, wasm::StackMap*> item,
+ wasm::StackMaps* stackMaps) {
WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMap, 12);
// Decode the stack map header
StackMapHeader header;
MOZ_TRY(CodePod(coder, &header));
// Allocate a stack map for the header
- StackMap* map = StackMap::create(header);
+ StackMap* map = stackMaps->create(header);
if (!map) {
return Err(OutOfMemory());
}
@@ -1055,7 +1056,7 @@ CoderResult CodeStackMap(Coder<mode>& coder,
CoderResult CodeStackMaps(Coder<MODE_DECODE>& coder,
CoderArg<MODE_DECODE, wasm::StackMaps> item) {
- WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMaps, 40);
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMaps, 200);
// Decode the amount of stack maps
size_t length;
MOZ_TRY(CodePod(coder, &length));
@@ -1067,10 +1068,10 @@ CoderResult CodeStackMaps(Coder<MODE_DECODE>& coder,
// Decode the stack map
StackMap* map;
- MOZ_TRY(CodeStackMap(coder, &map));
+ MOZ_TRY(CodeStackMap(coder, &map, item));
// Add it to the map
- if (!item->add(codeOffset, map)) {
+ if (!item->finalize(codeOffset, map)) {
return Err(OutOfMemory());
}
}
@@ -1081,14 +1082,15 @@ CoderResult CodeStackMaps(Coder<MODE_DECODE>& coder,
template <CoderMode mode>
CoderResult CodeStackMaps(Coder<mode>& coder,
CoderArg<mode, wasm::StackMaps> item) {
- WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMaps, 40);
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMaps, 200);
STATIC_ASSERT_ENCODING_OR_SIZING;
// Encode the amount of stack maps
size_t length = item->length();
MOZ_TRY(CodePod(coder, &length));
- for (auto iter = item->mapping_.iter(); !iter.done(); iter.next()) {
+ for (auto iter = item->codeOffsetToStackMap_.iter(); !iter.done();
+ iter.next()) {
uint32_t codeOffset = iter.get().key();
// Encode the offset
@@ -1299,7 +1301,7 @@ CoderResult CodeFuncToCodeRangeMap(
CoderResult CodeCodeBlock(Coder<MODE_DECODE>& coder,
wasm::UniqueCodeBlock* item,
const wasm::LinkData& linkData) {
- WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeBlock, 2624);
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeBlock, 2784);
*item = js::MakeUnique<CodeBlock>(CodeBlock::kindFromTier(Tier::Serialized));
if (!*item) {
return Err(OutOfMemory());
@@ -1340,7 +1342,7 @@ template <CoderMode mode>
CoderResult CodeCodeBlock(Coder<mode>& coder,
CoderArg<mode, wasm::CodeBlock> item,
const wasm::LinkData& linkData) {
- WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeBlock, 2624);
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeBlock, 2784);
STATIC_ASSERT_ENCODING_OR_SIZING;
MOZ_TRY(Magic(coder, Marker::CodeBlock));
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
@@ -1889,17 +1889,18 @@ static bool AddStackCheckForImportFunctionEntry(jit::MacroAssembler& masm,
wasm::StackMap* stackMap = nullptr;
if (!CreateStackMapForFunctionEntryTrap(
argTypes, trapExitLayout, trapExitLayoutNumWords,
- nBytesReservedBeforeTrap, nInboundStackArgBytes, &stackMap)) {
+ nBytesReservedBeforeTrap, nInboundStackArgBytes, *stackMaps,
+ &stackMap)) {
return false;
}
// In debug builds, we'll always have a stack map, even if there are no
// refs to track.
MOZ_ASSERT(stackMap);
- if (stackMap && !stackMaps->add(trapInsnOffset.offset(), stackMap)) {
- stackMap->destroy();
- return false;
+ if (stackMap) {
+ return stackMaps->finalize(trapInsnOffset.offset(), stackMap);
}
+
return true;
}