tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 76eabb0febd4db7e9faf628147606c168ae38460
parent 2526b984dc979690a82d1d9759bd0d507d22b0bb
Author: Jon Coppeard <jcoppeard@mozilla.com>
Date:   Tue,  6 Jan 2026 11:12:02 +0000

Bug 2005448 - Make Wasm GC array postbarriers less conservative so they work with semispace nursery r=sfink,rhunt

The problem here is that wasm GC barriers for arrays work by casting elements
to GCPtr, and that is conservative about what it considers tenured. Without
semispace this is suboptimal but works. It doesn't work with semispace enabled.

The conservative part comes from CellPtrEdge::maybeInRememberedSet which checks
the address against the nursery. This will return true for  allocations owned
by nursery things that are not allocated in nursery memory, in this case wasm
GC array trailers.

This has the effect that we will add store buffer entries for edges we don't
need to. Without semispace enabled this means we will tenure more things than
we need to in minor GC, including things only reachable through dead nursery
things. With semispace enabled we may keep storebuffer edges in dead nursery
things which results in UAF on the next nursery collection.

The approach of using individual cell pointer edges for the array element post
barrier is itself suboptimal which may be why this problem has not shown up
elsewhere. For JSObject arrays we use the slots edge store buffer where an
entry can apply to a range of slots, rather than add an entrie for each
element. The JIT may just use the whole cell store buffer.

The patch fixes this by selecting the barrier wrapper class to use based on
whether the owner is tenured or not. GCPtr is appopriate for tenured things
(and has the postbarrier) and PreBarriered is used for nursery thing (and
doesn't have the postbarrier). This approach is kind of hacky, but is used in
other places in the engine (e.g. weakmaps). I previously filed bug 2003769
about this.

This also adds some storebuffer APIs to add edges you know are from tenured
objects to skip the maybeInRememberedSet check. For wasm GC this means we can
check this in JIT code and maybe skip the VM call for the precise barrier
(which turned out not tto be so precise).

There were some performance changes for the shell when I tested on macOS but
Linux wasn't much affected. I'm in the process of running performance tests for
the browser.

Differential Revision: https://phabricator.services.mozilla.com/D276638

Diffstat:
Mjs/src/gc/Cell.h | 6++++--
Mjs/src/gc/StoreBuffer.h | 69++++++++++++++++++++++++++++++++++++++++++++++++---------------------
Mjs/src/jit/ABIFunctionType.yaml | 3+++
Mjs/src/wasm/WasmBaselineCompile.cpp | 7+++++++
Mjs/src/wasm/WasmBuiltins.cpp | 36+++++++++++++++++++++++++-----------
Mjs/src/wasm/WasmGcObject.cpp | 15++++++++++-----
Mjs/src/wasm/WasmInstance.cpp | 31+++++++++++++++++++++++--------
Mjs/src/wasm/WasmInstance.h | 2--
Mjs/src/wasm/WasmIonCompile.cpp | 6+++---
Mjs/src/wasm/WasmValue.cpp | 7+++++++
Mjs/src/wasm/WasmValue.h | 14+++++++++++++-
11 files changed, 143 insertions(+), 53 deletions(-)

diff --git a/js/src/gc/Cell.h b/js/src/gc/Cell.h @@ -358,8 +358,10 @@ inline uintptr_t Cell::address() const { ChunkBase* Cell::chunk() const { uintptr_t addr = uintptr_t(this); MOZ_ASSERT(addr % CellAlignBytes == 0); - addr &= ~ChunkMask; - return reinterpret_cast<ChunkBase*>(addr); + auto* chunk = reinterpret_cast<ChunkBase*>(addr & ~ChunkMask); + MOZ_ASSERT(chunk->isNurseryChunk() || + chunk->kind == ChunkKind::TenuredArenas); + return chunk; } inline StoreBuffer* Cell::storeBuffer() const { return chunk()->storeBuffer; } diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h @@ -406,7 +406,7 @@ class StoreBuffer { #endif template <typename Buffer, typename Edge> - void unput(Buffer& buffer, const Edge& edge) { + void unputEdge(Buffer& buffer, const Edge& edge) { checkAccess(); if (!isEnabled()) { return; @@ -418,7 +418,7 @@ class StoreBuffer { } template <typename Buffer, typename Edge> - void put(Buffer& buffer, const Edge& edge, JS::GCReason overflowReason) { + void putEdge(Buffer& buffer, const Edge& edge, JS::GCReason overflowReason) { checkAccess(); if (!isEnabled()) { return; @@ -437,6 +437,23 @@ class StoreBuffer { } } + template <typename Buffer, typename Edge> + void putEdgeFromTenured(Buffer& buffer, const Edge& edge, + JS::GCReason overflowReason) { + MOZ_ASSERT(edge.maybeInRememberedSet(nursery_)); + checkAccess(); + + if (!isEnabled()) { + return; + } + + mozilla::ReentrancyGuard g(*this); + PutResult r = buffer.put(edge); + if (MOZ_UNLIKELY(r == PutResult::AboutToOverflow)) { + setAboutToOverflow(overflowReason); + } + } + MonoTypeBuffer<ValueEdge> bufferVal; MonoTypeBuffer<StringPtrEdge> bufStrCell; MonoTypeBuffer<BigIntPtrEdge> bufBigIntCell; @@ -492,34 +509,40 @@ class StoreBuffer { /* Insert a single edge into the buffer/remembered set. */ void putValue(JS::Value* vp) { - put(bufferVal, ValueEdge(vp), JS::GCReason::FULL_VALUE_BUFFER); + putEdge(bufferVal, ValueEdge(vp), JS::GCReason::FULL_VALUE_BUFFER); } - void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); } + void unputValue(JS::Value* vp) { unputEdge(bufferVal, ValueEdge(vp)); } void putCell(JSString** strp) { - put(bufStrCell, StringPtrEdge(strp), - JS::GCReason::FULL_CELL_PTR_STR_BUFFER); + putEdge(bufStrCell, StringPtrEdge(strp), + JS::GCReason::FULL_CELL_PTR_STR_BUFFER); + } + void unputCell(JSString** strp) { + unputEdge(bufStrCell, StringPtrEdge(strp)); } - void unputCell(JSString** strp) { unput(bufStrCell, StringPtrEdge(strp)); } void putCell(JS::BigInt** bip) { - put(bufBigIntCell, BigIntPtrEdge(bip), - JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER); + putEdge(bufBigIntCell, BigIntPtrEdge(bip), + JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER); + } + void unputCell(JS::BigInt** bip) { + unputEdge(bufBigIntCell, BigIntPtrEdge(bip)); } - void unputCell(JS::BigInt** bip) { unput(bufBigIntCell, BigIntPtrEdge(bip)); } void putCell(JSObject** strp) { - put(bufObjCell, ObjectPtrEdge(strp), - JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER); + putEdge(bufObjCell, ObjectPtrEdge(strp), + JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER); + } + void unputCell(JSObject** strp) { + unputEdge(bufObjCell, ObjectPtrEdge(strp)); } - void unputCell(JSObject** strp) { unput(bufObjCell, ObjectPtrEdge(strp)); } void putCell(js::GetterSetter** gsp) { - put(bufGetterSetterCell, GetterSetterPtrEdge(gsp), - JS::GCReason::FULL_CELL_PTR_GETTER_SETTER_BUFFER); + putEdge(bufGetterSetterCell, GetterSetterPtrEdge(gsp), + JS::GCReason::FULL_CELL_PTR_GETTER_SETTER_BUFFER); } void unputCell(js::GetterSetter** gsp) { - unput(bufGetterSetterCell, GetterSetterPtrEdge(gsp)); + unputEdge(bufGetterSetterCell, GetterSetterPtrEdge(gsp)); } void putSlot(NativeObject* obj, int kind, uint32_t start, uint32_t count) { @@ -527,16 +550,20 @@ class StoreBuffer { if (bufferSlot.last_.touches(edge)) { bufferSlot.last_.merge(edge); } else { - put(bufferSlot, edge, JS::GCReason::FULL_SLOT_BUFFER); + putEdge(bufferSlot, edge, JS::GCReason::FULL_SLOT_BUFFER); } } void putWasmAnyRef(wasm::AnyRef* vp) { - put(bufferWasmAnyRef, WasmAnyRefEdge(vp), - JS::GCReason::FULL_WASM_ANYREF_BUFFER); + putEdge(bufferWasmAnyRef, WasmAnyRefEdge(vp), + JS::GCReason::FULL_WASM_ANYREF_BUFFER); + } + void putWasmAnyRefEdgeFromTenured(wasm::AnyRef* vp) { + putEdgeFromTenured(bufferWasmAnyRef, WasmAnyRefEdge(vp), + JS::GCReason::FULL_WASM_ANYREF_BUFFER); } void unputWasmAnyRef(wasm::AnyRef* vp) { - unput(bufferWasmAnyRef, WasmAnyRefEdge(vp)); + unputEdge(bufferWasmAnyRef, WasmAnyRefEdge(vp)); } static inline bool isInWholeCellBuffer(Cell* cell); @@ -549,7 +576,7 @@ class StoreBuffer { /* Insert an entry into the generic buffer. */ template <typename T> void putGeneric(const T& t) { - put(bufferGeneric, t, JS::GCReason::FULL_GENERIC_BUFFER); + putEdge(bufferGeneric, t, JS::GCReason::FULL_GENERIC_BUFFER); } void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; } diff --git a/js/src/jit/ABIFunctionType.yaml b/js/src/jit/ABIFunctionType.yaml @@ -321,4 +321,7 @@ args: [General, Int32, General, Int32, Int32] - ret: Void + args: [General, General, Int32, General, Int32, Int32] + +- ret: Void args: [General, Int32, General, Int32, Int32, Int32] diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp @@ -7337,6 +7337,13 @@ bool BaseCompiler::emitPostBarrierEdgeImprecise(const Maybe<RegRef>& object, bool BaseCompiler::emitPostBarrierEdgePrecise(const Maybe<RegRef>& object, RegPtr valueAddr, RegRef prevValue, RegRef value) { + // Currently this is only called to write into wasm tables. + // + // If this changes and we use this method to write into objects which might be + // in the nursery then we need to check for that here and skip the barrier (we + // only need to record pointers from the tenured heap into the nursery). + MOZ_ASSERT(object.isNothing()); + // Push `object` and `value` to preserve them across the call. if (object) { pushRef(*object); diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp @@ -144,8 +144,12 @@ constexpr SymbolicAddressSignature SASigArrayMemMove = { 6, {_WAD, _I32, _WAD, _I32, _I32, _I32, _END}}; constexpr SymbolicAddressSignature SASigArrayRefsMove = { - SymbolicAddress::ArrayRefsMove, _VOID, _Infallible, _NoTrap, 5, - {_WAD, _I32, _WAD, _I32, _I32, _END}}; + SymbolicAddress::ArrayRefsMove, + _VOID, + _Infallible, + _NoTrap, + 6, + {_RoN, _WAD, _I32, _WAD, _I32, _I32, _END}}; constexpr SymbolicAddressSignature SASigMemoryGrowM32 = { SymbolicAddress::MemoryGrowM32, _I32, _Infallible, _NoTrap, 3, {_PTR, _I32, _I32, _END}}; @@ -1323,17 +1327,27 @@ static void WasmArrayMemMove(uint8_t* destArrayData, uint32_t destIndex, size_t(elementSize) * count); } -static void WasmArrayRefsMove(GCPtr<AnyRef>* destArrayData, uint32_t destIndex, - AnyRef* srcArrayData, uint32_t srcIndex, - uint32_t count) { +static void WasmArrayRefsMove(WasmArrayObject* destArrayObject, + WriteBarriered<AnyRef>* destArrayData, + uint32_t destIndex, AnyRef* srcArrayData, + uint32_t srcIndex, uint32_t count) { AutoUnsafeCallWithABI unsafe; - GCPtr<AnyRef>* dstBegin = destArrayData + destIndex; + + // Using std::copy will call set() on the barrier wrapper under the hood. + auto copyElements = [count](auto* dstBegin, auto* srcBegin) { + if (uintptr_t(dstBegin) < uintptr_t(srcBegin)) { + std::copy(srcBegin, srcBegin + count, dstBegin); + } else { + std::copy_backward(srcBegin, srcBegin + count, dstBegin + count); + } + }; + + WriteBarriered<AnyRef>* dstBegin = destArrayData + destIndex; AnyRef* srcBegin = srcArrayData + srcIndex; - // The std::copy performs GCPtr::set() operation under the hood. - if (uintptr_t(dstBegin) < uintptr_t(srcBegin)) { - std::copy(srcBegin, srcBegin + count, dstBegin); + if (destArrayObject->isTenured()) { + copyElements((GCPtr<AnyRef>*)dstBegin, srcBegin); } else { - std::copy_backward(srcBegin, srcBegin + count, dstBegin + count); + copyElements((PreBarriered<AnyRef>*)dstBegin, srcBegin); } } @@ -1514,7 +1528,7 @@ void* wasm::AddressOf(SymbolicAddress imm, ABIFunctionType* abiType) { *abiType = Args_Void_GeneralInt32GeneralInt32Int32Int32; return FuncCast(WasmArrayMemMove, *abiType); case SymbolicAddress::ArrayRefsMove: - *abiType = Args_Void_GeneralInt32GeneralInt32Int32; + *abiType = Args_Void_GeneralGeneralInt32GeneralInt32Int32; return FuncCast(WasmArrayRefsMove, *abiType); case SymbolicAddress::MemoryGrowM32: diff --git a/js/src/wasm/WasmGcObject.cpp b/js/src/wasm/WasmGcObject.cpp @@ -217,7 +217,8 @@ bool WasmGcObject::obj_newEnumerate(JSContext* cx, HandleObject obj, return true; } -static void WriteValTo(const Val& val, StorageType ty, void* dest) { +static void WriteValTo(WasmGcObject* owner, const Val& val, StorageType ty, + void* dest) { switch (ty.kind()) { case StorageType::I8: *((uint8_t*)dest) = val.i32(); @@ -241,7 +242,11 @@ static void WriteValTo(const Val& val, StorageType ty, void* dest) { *((V128*)dest) = val.v128(); break; case StorageType::Ref: - *((GCPtr<AnyRef>*)dest) = val.ref(); + if (owner->isTenured()) { + *((GCPtr<AnyRef>*)dest) = val.ref(); + } else { + *((PreBarriered<AnyRef>*)dest) = val.ref(); + } break; } } @@ -371,7 +376,7 @@ void WasmArrayObject::storeVal(const Val& val, uint32_t itemIndex) { size_t elementSize = arrayType.elementType().size(); MOZ_ASSERT(itemIndex < numElements_); uint8_t* data = data_ + elementSize * itemIndex; - WriteValTo(val, arrayType.elementType(), data); + WriteValTo(this, val, arrayType.elementType(), data); } void WasmArrayObject::fillVal(const Val& val, uint32_t itemIndex, @@ -381,7 +386,7 @@ void WasmArrayObject::fillVal(const Val& val, uint32_t itemIndex, uint8_t* data = data_ + elementSize * itemIndex; MOZ_ASSERT(itemIndex <= numElements_ && len <= numElements_ - itemIndex); for (uint32_t i = 0; i < len; i++) { - WriteValTo(val, arrayType.elementType(), data); + WriteValTo(this, val, arrayType.elementType(), data); data += elementSize; } } @@ -551,7 +556,7 @@ void WasmStructObject::storeVal(const Val& val, uint32_t fieldIndex) { StorageType fieldType = structType.fields_[fieldIndex].type; uint8_t* data = fieldIndexToAddress(fieldIndex); - WriteValTo(val, fieldType, data); + WriteValTo(this, val, fieldType, data); } static const JSClassOps WasmStructObjectOutlineClassOps = { diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp @@ -1619,9 +1619,16 @@ static bool ArrayCopyFromElem(JSContext* cx, Handle<WasmArrayObject*> arrayObj, return false; } - GCPtr<AnyRef>* dst = reinterpret_cast<GCPtr<AnyRef>*>(arrayObj->data_); - for (uint32_t i = 0; i < numElements; i++) { - dst[arrayIndex + i] = seg[segOffset + i]; + auto copyElements = [&](auto* dst) { + for (uint32_t i = 0; i < numElements; i++) { + dst[arrayIndex + i] = seg[segOffset + i]; + } + }; + + if (arrayObj->isTenured()) { + copyElements(reinterpret_cast<GCPtr<AnyRef>*>(arrayObj->data_)); + } else { + copyElements(reinterpret_cast<PreBarriered<AnyRef>*>(arrayObj->data_)); } return true; @@ -1906,14 +1913,22 @@ static bool ArrayCopyFromElem(JSContext* cx, Handle<WasmArrayObject*> arrayObj, return 0; } - GCPtr<AnyRef>* dst = (GCPtr<AnyRef>*)dstBase; AnyRef* src = (AnyRef*)srcBase; - // The std::copy performs GCPtr::set() operation under the hood. - if (uintptr_t(dstBase) < uintptr_t(srcBase)) { - std::copy(src, src + numElements, dst); + // Using std::copy will call set() on the barrier wrapper under the hood. + auto copyElements = [&](auto* dst) { + if (uintptr_t(dst) < uintptr_t(src)) { + std::copy(src, src + numElements, dst); + } else { + std::copy_backward(src, src + numElements, dst + numElements); + } + }; + + if (dstArrayObj->isTenured()) { + copyElements((GCPtr<AnyRef>*)dstBase); } else { - std::copy_backward(src, src + numElements, dst + numElements); + copyElements((PreBarriered<AnyRef>*)dstBase); } + return 0; } diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h @@ -618,8 +618,6 @@ class alignas(16) Instance { static int32_t arrayCopy(Instance* instance, void* dstArray, uint32_t dstIndex, void* srcArray, uint32_t srcIndex, uint32_t numElements, uint32_t elementSize); - static int32_t arrayFill(Instance* instance, void* array, uint32_t index, - uint32_t numElements); static int32_t refTest(Instance* instance, void* refPtr, const wasm::TypeDef* typeDef); static int32_t intrI8VecMul(Instance* instance, uint32_t dest, uint32_t src1, diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp @@ -5550,9 +5550,9 @@ class FunctionCompiler { if (elemsAreRefTyped) { MOZ_RELEASE_ASSERT(elemSize == sizeof(void*)); - if (!builtinCall5(SASigArrayRefsMove, lineOrBytecode, dstData, - dstArrayIndex, srcData, srcArrayIndex, numElements, - nullptr)) { + if (!builtinCall6(SASigArrayRefsMove, lineOrBytecode, dstArrayObject, + dstData, dstArrayIndex, srcData, srcArrayIndex, + numElements, nullptr)) { return false; } } else { diff --git a/js/src/wasm/WasmValue.cpp b/js/src/wasm/WasmValue.cpp @@ -900,3 +900,10 @@ Value wasm::UnboxFuncRef(FuncRef val) { result.setObjectOrNull(fn); return result; } + +#ifdef DEBUG +void wasm::AssertEdgeSourceNotInsideNursery(void* vp) { + Nursery& nursery = TlsContext.get()->runtime()->gc.nursery(); + MOZ_ASSERT(!nursery.isInside(vp)); +} +#endif diff --git a/js/src/wasm/WasmValue.h b/js/src/wasm/WasmValue.h @@ -405,6 +405,11 @@ extern bool ToJSValue(JSContext* cx, const void* src, ValType type, CoercionLevel level = CoercionLevel::Spec); template <typename Debug = NoDebug> extern bool ToJSValueMayGC(ValType type); + +#ifdef DEBUG +void AssertEdgeSourceNotInsideNursery(void* vp); +#endif + } // namespace wasm template <> @@ -420,6 +425,13 @@ struct InternalBarrierMethods<wasm::AnyRef> { static MOZ_ALWAYS_INLINE void postBarrier(wasm::AnyRef* vp, const wasm::AnyRef prev, const wasm::AnyRef next) { + // This assumes that callers have already checked that |vp| is in the + // tenured heap. Don't use GCPtr<AnyRef> for things that could be in the + // nursery! +#ifdef DEBUG + AssertEdgeSourceNotInsideNursery(vp); +#endif + // If the target needs an entry, add it. gc::StoreBuffer* sb; if (next.isGCThing() && (sb = next.toGCThing()->storeBuffer())) { @@ -430,7 +442,7 @@ struct InternalBarrierMethods<wasm::AnyRef> { if (prev.isGCThing() && prev.toGCThing()->storeBuffer()) { return; } - sb->putWasmAnyRef(vp); + sb->putWasmAnyRefEdgeFromTenured(vp); return; } // Remove the prev entry if the new value does not need it.