tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 1e3056aabe563607243f09c16028da94443f6f03
parent 9b4615ce61d33853b7cd054f0a11463a1a54aedd
Author: Hannes Verschore <hv1989@gmail.com>
Date:   Thu, 11 Dec 2025 22:43:12 +0000

Bug 1999828: Part 3 - Implement GuardMultipleShapesToOffset. r=iain

Implement smarter version of GuardMultipleShapes. Instead of only guarding for the shapes,
this variant also keeps track of the specific offsets in the shape that are accessed.
The output of the GuardMultipleShapesToOffset can be forwarded to {Load|Store}{Fixed|Dynamic}SlotFromOffset.
Which allows for more cases where we don't have a Megamorphic call.

Differential Revision: https://phabricator.services.mozilla.com/D274204

Diffstat:
Mjs/src/jit/CacheIR.h | 2++
Mjs/src/jit/CacheIRCompiler.cpp | 48+++++++++++++++++++++++++++++++++++++++++++-----
Mjs/src/jit/CacheIROps.yaml | 10++++++++++
Mjs/src/jit/CacheIRWriter.h | 11+++++++++--
Mjs/src/jit/CodeGenerator.cpp | 20++++++++++++++++++--
Mjs/src/jit/JitScript.cpp | 16++++++++++++++++
Mjs/src/jit/LIROps.yaml | 8++++++++
Mjs/src/jit/Lowering.cpp | 11+++++++++++
Mjs/src/jit/MIR.cpp | 8++++++++
Mjs/src/jit/MIROps.yaml | 10++++++++++
Mjs/src/jit/MacroAssembler.cpp | 40+++++++++++++++++++++++++++-------------
Mjs/src/jit/MacroAssembler.h | 19+++++++++++++++----
Mjs/src/jit/ShapeList.cpp | 96+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mjs/src/jit/ShapeList.h | 22++++++++++++++++++++++
Mjs/src/jit/StubFolding.cpp | 180++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
Mjs/src/jit/WarpCacheIRTranspiler.cpp | 15+++++++++++++++
16 files changed, 475 insertions(+), 41 deletions(-)

diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h @@ -81,6 +81,8 @@ class ValOperandId : public OperandId { public: ValOperandId() = default; explicit ValOperandId(uint16_t id) : OperandId(id) {} + + bool operator==(const ValOperandId& other) const { return id_ == other.id_; } }; class ValueTagOperandId : public OperandId { diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp @@ -1478,19 +1478,26 @@ bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const { return true; } -bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData, - uint32_t ignoreOffset) const { +bool CacheIRWriter::stubDataEqualsIgnoringShapeAndOffset( + const uint8_t* stubData, uint32_t shapeFieldOffset, + mozilla::Maybe<uint32_t> offsetFieldOffset) const { MOZ_ASSERT(!failed()); uint32_t offset = 0; for (const StubField& field : stubFields_) { - if (offset != ignoreOffset) { + if (offset == shapeFieldOffset) { + // Don't compare shapeField. + } else if (offsetFieldOffset.isSome() && offset == *offsetFieldOffset) { + // Skip offsetField, the "FromOffset" variant doesn't have this. + continue; + } else { if (field.sizeIsWord()) { uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset); if (field.asWord() != raw) { return false; } } else { + MOZ_ASSERT(field.sizeIsInt64()); uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset); if (field.asInt64() != raw) { return false; @@ -9919,8 +9926,39 @@ bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId, emitLoadStubField(shapeArray, shapes); masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes); - masm.branchTestObjShapeList(Assembler::NotEqual, obj, shapes, scratch, - scratch2, spectreScratch, failure->label()); + masm.branchTestObjShapeList(obj, shapes, scratch, scratch2, spectreScratch, + failure->label()); + return true; +} + +bool CacheIRCompiler::emitGuardMultipleShapesToOffset(ObjOperandId objId, + uint32_t shapesOffset, + Int32OperandId offsetId) { + JitSpew(JitSpew_Codegen, "%s", __FUNCTION__); + Register obj = allocator.useRegister(masm, objId); + Register offset = allocator.defineRegister(masm, offsetId); + AutoScratchRegister shapes(allocator, masm); + AutoScratchRegister scratch(allocator, masm); + AutoScratchRegister scratch2(allocator, masm); + + bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId); + + // We can re-use the output (offset) as scratch spectre register, + // since the output is only set after all branches. + Register spectreScratch = needSpectreMitigations ? offset : InvalidReg; + + FailurePath* failure; + if (!addFailurePath(&failure)) { + return false; + } + + // The stub field contains a ListObject. Load its elements. + StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject); + emitLoadStubField(shapeArray, shapes); + masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes); + + masm.branchTestObjShapeListSetOffset(obj, shapes, offset, scratch, scratch2, + spectreScratch, failure->label()); return true; } diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml @@ -248,6 +248,16 @@ obj: ObjId shapes: ObjectField +- name: GuardMultipleShapesToOffset + shared: true + transpile: true + cost_estimate: 2 + custom_writer: true + args: + obj: ObjId + shapes: ObjectField + result: Int32Id + - name: GuardProto shared: false transpile: true diff --git a/js/src/jit/CacheIRWriter.h b/js/src/jit/CacheIRWriter.h @@ -393,8 +393,9 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter { size_t stubDataSize() const { return stubDataSize_; } void copyStubData(uint8_t* dest) const; bool stubDataEquals(const uint8_t* stubData) const; - bool stubDataEqualsIgnoring(const uint8_t* stubData, - uint32_t ignoreOffset) const; + bool stubDataEqualsIgnoringShapeAndOffset( + const uint8_t* stubData, uint32_t shapeFieldOffset, + mozilla::Maybe<uint32_t> offsetFieldOffset) const; bool operandIsDead(uint32_t operandId, uint32_t currentInstruction) const { if (operandId >= operandLastUsed_.length()) { @@ -729,6 +730,12 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter { guardMultipleShapes_(obj, shapes); } + Int32OperandId guardMultipleShapesToOffset(ObjOperandId obj, + ListObject* shapes) { + MOZ_ASSERT(shapes->length() > 0); + return guardMultipleShapesToOffset_(obj, shapes); + } + friend class CacheIRCloner; CACHE_IR_WRITER_GENERATED diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp @@ -4496,8 +4496,7 @@ void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) { Label bail; masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp); - masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3, - spectre, &bail); + masm.branchTestObjShapeList(obj, temp, temp2, temp3, spectre, &bail); bailoutFrom(&bail, guard->snapshot()); } @@ -4539,6 +4538,23 @@ void CodeGenerator::visitGuardShapeList(LGuardShapeList* guard) { bailoutFrom(&bail, guard->snapshot()); } +void CodeGenerator::visitGuardMultipleShapesToOffset( + LGuardMultipleShapesToOffset* guard) { + Register obj = ToRegister(guard->object()); + Register shapeList = ToRegister(guard->shapeList()); + Register temp = ToRegister(guard->temp0()); + Register temp1 = ToRegister(guard->temp1()); + Register temp2 = ToRegister(guard->temp2()); + Register offset = ToRegister(guard->output()); + Register spectre = JitOptions.spectreObjectMitigations ? offset : InvalidReg; + + Label bail; + masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp); + masm.branchTestObjShapeListSetOffset(obj, temp, offset, temp1, temp2, spectre, + &bail); + bailoutFrom(&bail, guard->snapshot()); +} + void CodeGenerator::visitGuardProto(LGuardProto* guard) { Register obj = ToRegister(guard->object()); Register expected = ToRegister(guard->expected()); diff --git a/js/src/jit/JitScript.cpp b/js/src/jit/JitScript.cpp @@ -1024,6 +1024,22 @@ HashNumber ICScript::hash(JSContext* cx) { } break; } + case CacheOp::GuardMultipleShapesToOffset: { + auto args = reader.argsForGuardMultipleShapesToOffset(); + JSObject* shapes = + stubInfo->getStubField<StubField::Type::JSObject>( + stub->toCacheIRStub(), args.shapesOffset); + auto* shapesObject = &shapes->as<ShapeListWithOffsetsObject>(); + size_t numShapes = shapesObject->numShapes(); + if (ShapeListSnapshot::shouldSnapshot(numShapes)) { + for (size_t i = 0; i < numShapes; i++) { + Shape* shape = shapesObject->getShapeUnbarriered(i); + h = mozilla::AddToHash(h, shape); + h = mozilla::AddToHash(h, shapesObject->getOffset(i)); + } + } + break; + } default: reader.skip(CacheIROpInfos[size_t(op)].argLength); break; diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml @@ -3034,6 +3034,14 @@ num: WordSized mir_op: true +- name: GuardMultipleShapesToOffset + result_type: WordSized + operands: + object: WordSized + shapeList: WordSized + num_temps: 3 + mir_op: true + - name: GuardProto operands: object: WordSized diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp @@ -5825,6 +5825,17 @@ void LIRGenerator::visitGuardShapeList(MGuardShapeList* ins) { } } +void LIRGenerator::visitGuardMultipleShapesToOffset( + MGuardMultipleShapesToOffset* ins) { + MOZ_ASSERT(ins->object()->type() == MIRType::Object); + + auto* lir = new (alloc()) LGuardMultipleShapesToOffset( + useRegister(ins->object()), useRegister(ins->shapeList()), temp(), temp(), + temp()); + assignSnapshot(lir, ins->bailoutKind()); + define(lir, ins); +} + void LIRGenerator::visitGuardProto(MGuardProto* ins) { MOZ_ASSERT(ins->object()->type() == MIRType::Object); MOZ_ASSERT(ins->expected()->type() == MIRType::Object); diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp @@ -570,6 +570,10 @@ const MDefinition* MDefinition::skipObjectGuards() const { result = result->toGuardMultipleShapes()->object(); continue; } + if (result->isGuardMultipleShapesToOffset()) { + result = result->toGuardMultipleShapesToOffset()->object(); + continue; + } if (result->isGuardNullProto()) { result = result->toGuardNullProto()->object(); continue; @@ -7307,6 +7311,10 @@ AliasSet MGuardMultipleShapes::getAliasSet() const { return AliasSet::Load(AliasSet::ObjectFields); } +AliasSet MGuardMultipleShapesToOffset::getAliasSet() const { + return AliasSet::Load(AliasSet::ObjectFields); +} + AliasSet MGuardGlobalGeneration::getAliasSet() const { return AliasSet::Load(AliasSet::GlobalGenerationCounter); } diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml @@ -2238,6 +2238,16 @@ generate_lir: true lir_temps: 2 +- name: GuardMultipleShapesToOffset + operands: + object: Object + shapeList: Object + result_type: Int32 + guard: true + movable: true + congruent_to: if_operands_equal + alias_set: custom + - name: GuardProto gen_boilerplate: false diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp @@ -5763,16 +5763,13 @@ void MacroAssembler::branchTestType(Condition cond, Register tag, } } -void MacroAssembler::branchTestObjShapeList( - Condition cond, Register obj, Register shapeElements, Register shapeScratch, - Register endScratch, Register spectreScratch, Label* label) { - MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); - +void MacroAssembler::branchTestObjShapeListImpl( + Register obj, Register shapeElements, size_t itemSize, + Register shapeScratch, Register endScratch, Register spectreScratch, + Label* fail) { bool needSpectreMitigations = spectreScratch != InvalidReg; Label done; - Label* onMatch = cond == Assembler::Equal ? label : &done; - Label* onNoMatch = cond == Assembler::Equal ? &done : label; // Load the object's shape pointer into shapeScratch, and prepare to compare // it with the shapes in the list. The shapes are stored as private values so @@ -5783,7 +5780,7 @@ void MacroAssembler::branchTestObjShapeList( Address lengthAddr(shapeElements, ObjectElements::offsetOfInitializedLength()); load32(lengthAddr, endScratch); - branch32(Assembler::Equal, endScratch, Imm32(0), onNoMatch); + branch32(Assembler::Equal, endScratch, Imm32(0), fail); BaseObjectElementIndex endPtrAddr(shapeElements, endScratch); computeEffectiveAddress(endPtrAddr, endScratch); @@ -5797,21 +5794,38 @@ void MacroAssembler::branchTestObjShapeList( if (needSpectreMitigations) { move32(Imm32(0), spectreScratch); } - branchPtr(Assembler::Equal, Address(shapeElements, 0), shapeScratch, onMatch); + branchPtr(Assembler::Equal, Address(shapeElements, 0), shapeScratch, &done); if (needSpectreMitigations) { spectreMovePtr(Assembler::Equal, spectreScratch, obj); } // Advance to next shape and loop if not finished. - addPtr(Imm32(sizeof(Value)), shapeElements); + addPtr(Imm32(itemSize), shapeElements); branchPtr(Assembler::Below, shapeElements, endScratch, &loop); - if (cond == Assembler::NotEqual) { - jump(label); - } + jump(fail); bind(&done); } +void MacroAssembler::branchTestObjShapeList( + Register obj, Register shapeElements, Register shapeScratch, + Register endScratch, Register spectreScratch, Label* fail) { + branchTestObjShapeListImpl(obj, shapeElements, sizeof(Value), shapeScratch, + endScratch, spectreScratch, fail); +} + +void MacroAssembler::branchTestObjShapeListSetOffset( + Register obj, Register shapeElements, Register offset, + Register shapeScratch, Register endScratch, Register spectreScratch, + Label* fail) { + branchTestObjShapeListImpl(obj, shapeElements, 2 * sizeof(Value), + shapeScratch, endScratch, spectreScratch, fail); + + // The shapeElements register points to the matched shape (if found). + // The corresponding offset is saved in the array as the next value. + load32(Address(shapeElements, sizeof(Value)), offset); +} + void MacroAssembler::branchTestObjCompartment(Condition cond, Register obj, const Address& compartment, Register scratch, Label* label) { diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h @@ -1843,10 +1843,21 @@ class MacroAssembler : public MacroAssemblerSpecific { const Shape* shape, Label* label); - void branchTestObjShapeList(Condition cond, Register obj, - Register shapeElements, Register shapeScratch, - Register endScratch, Register spectreScratch, - Label* label); + private: + void branchTestObjShapeListImpl(Register obj, Register shapeElements, + size_t itemSize, Register shapeScratch, + Register endScratch, Register spectreScratch, + Label* fail); + + public: + void branchTestObjShapeList(Register obj, Register shapeElements, + Register shapeScratch, Register endScratch, + Register spectreScratch, Label* fail); + + void branchTestObjShapeListSetOffset(Register obj, Register shapeElements, + Register offset, Register shapeScratch, + Register endScratch, + Register spectreScratch, Label* fail); inline void branchTestClassIsFunction(Condition cond, Register clasp, Label* label); diff --git a/js/src/jit/ShapeList.cpp b/js/src/jit/ShapeList.cpp @@ -94,3 +94,99 @@ bool ShapeListObject::traceWeak(JSTracer* trc) { return length != 0; } + +const JSClass ShapeListWithOffsetsObject::class_ = { + "JIT ShapeList", + 0, + &classOps_, +}; + +const JSClassOps ShapeListWithOffsetsObject::classOps_ = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + nullptr, // finalize + nullptr, // call + nullptr, // construct + ShapeListWithOffsetsObject::trace, // trace +}; + +/* static */ ShapeListWithOffsetsObject* ShapeListWithOffsetsObject::create( + JSContext* cx) { + NativeObject* obj = NewTenuredObjectWithGivenProto(cx, &class_, nullptr); + if (!obj) { + return nullptr; + } + + // Register this object so the GC can sweep its weak pointers. + if (!cx->zone()->registerObjectWithWeakPointers(obj)) { + ReportOutOfMemory(cx); + return nullptr; + } + + return &obj->as<ShapeListWithOffsetsObject>(); +} + +Shape* ShapeListWithOffsetsObject::getShape(uint32_t index) const { + Shape* shape = getShapeUnbarriered(index); + gc::ReadBarrier(shape); + return shape; +} + +Shape* ShapeListWithOffsetsObject::getShapeUnbarriered(uint32_t index) const { + Value value = ListObject::get(index * 2); + return static_cast<Shape*>(value.toPrivate()); +} + +uint32_t ShapeListWithOffsetsObject::getOffset(uint32_t index) const { + Value value = ListObject::get(index * 2 + 1); + return value.toPrivateUint32(); +} + +uint32_t ShapeListWithOffsetsObject::numShapes() const { + MOZ_ASSERT(length() % 2 == 0); + return length() / 2; +}; + +void ShapeListWithOffsetsObject::trace(JSTracer* trc, JSObject* obj) { + if (trc->traceWeakEdges()) { + obj->as<ShapeListWithOffsetsObject>().traceWeak(trc); + } +} + +bool ShapeListWithOffsetsObject::traceWeak(JSTracer* trc) { + uint32_t length = getDenseInitializedLength(); + if (length == 0) { + return false; // Object may be uninitialized. + } + + const HeapSlot* src = elements_; + const HeapSlot* end = src + length; + HeapSlot* dst = elements_; + while (src != end) { + Shape* shape = static_cast<Shape*>(src[0].toPrivate()); + uint32_t offset = src[1].toPrivateUint32(); + MOZ_ASSERT(shape->is<Shape>()); + if (TraceManuallyBarrieredWeakEdge(trc, &shape, + "ShapeListWithOffsetsObject shape")) { + dst[0].unbarrieredSet(PrivateValue(shape)); + dst[1].unbarrieredSet(PrivateUint32Value(offset)); + dst += 2; + } + src += 2; + } + + MOZ_ASSERT(dst <= end); + uint32_t newLength = dst - elements_; + setDenseInitializedLength(newLength); + + if (length != newLength) { + JitSpew(JitSpew_StubFolding, "Cleared %u/%u shapes from %p", + (length - newLength) / 2, (length) / 2, this); + } + + return length != 0; +} diff --git a/js/src/jit/ShapeList.h b/js/src/jit/ShapeList.h @@ -35,6 +35,28 @@ class ShapeListObject : public ListObject { bool traceWeak(JSTracer* trc); }; +// Similar to ShapeListObject. But here we keep a list of the shape and the +// corresponding offset of a particular access (e.g. obj.a). +// The values are saved as: [shape1, offset1, shape2, offset2 ...]. +class ShapeListWithOffsetsObject : public ListObject { + public: + static const JSClass class_; + static const JSClassOps classOps_; + + static constexpr size_t MaxLength = 16; + + static ShapeListWithOffsetsObject* create(JSContext* cx); + static void trace(JSTracer* trc, JSObject* obj); + + uint32_t numShapes() const; + Shape* getShape(uint32_t index) const; + Shape* getShapeUnbarriered(uint32_t index) const; + + uint32_t getOffset(uint32_t index) const; + + bool traceWeak(JSTracer* trc); +}; + } // namespace js::jit #endif // jit_ShapeList_h diff --git a/js/src/jit/StubFolding.cpp b/js/src/jit/StubFolding.cpp @@ -27,7 +27,7 @@ using namespace js::jit; static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, JSScript* script, ICScript* icScript) { // Try folding similar stubs with GuardShapes - // into GuardMultipleShapes + // into GuardMultipleShapes or GuardMultipleShapesToOffset ICEntry* icEntry = icScript->icEntryForStub(fallback); ICStub* entryStub = icEntry->firstStub(); @@ -42,18 +42,20 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, // Check to see if: // a) all of the stubs in this chain have the exact same code. - // b) all of the stubs have the same stub field data, except - // for a single GuardShape where they differ. + // b) all of the stubs have the same stub field data, except for a single + // GuardShape (and/or consecutive RawInt32) where they differ. // c) at least one stub after the first has a non-zero entry count. // d) All shapes in the GuardShape have the same realm. // // If all of these conditions hold, then we generate a single stub - // that covers all the existing cases by replacing GuardShape with - // GuardMultipleShapes. + // that covers all the existing cases by + // 1) replacing GuardShape with GuardMultipleShapes. uint32_t numActive = 0; mozilla::Maybe<uint32_t> foldableShapeOffset; + mozilla::Maybe<uint32_t> foldableOffsetOffset; GCVector<Value, 8> shapeList(cx); + GCVector<Value, 8> offsetList(cx); // Helper function: Keep list of different shapes. // Can fail on OOM or for cross-realm shapes. @@ -76,6 +78,30 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, return true; }; + // Helper function: Keep list of "possible" different offsets (slotOffset). + // At this stage we don't know if they differ. Therefore only keep track + // of the first offset until we see a different offset and fill list equal to + // shapeList if that happens. + auto lazyAddOffset = [&offsetList, &shapeList, cx](uintptr_t slotOffset) { + Value v = PrivateUint32Value(static_cast<uint32_t>(slotOffset)); + if (offsetList.length() == 1) { + if (v == offsetList[0]) return true; + + while (offsetList.length() + 1 < shapeList.length()) { + if (!offsetList.append(offsetList[0])) { + cx->recoverFromOutOfMemory(); + return false; + } + } + } + + if (!offsetList.append(v)) { + cx->recoverFromOutOfMemory(); + return false; + } + return true; + }; + #ifdef JS_JITSPEW JitSpew(JitSpew_StubFolding, "Trying to fold stubs at offset %u @ %s:%u:%u", fallback->pcOffset(), script->filename(), script->lineno(), @@ -96,6 +122,8 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, #endif // Find the offset of the first Shape that differs. + // Also see if the next field is RawInt32, which is + // the case for a Fixed/Dynamic slot if it follows the ShapeGuard. for (ICCacheIRStub* other = firstStub->nextCacheIR(); other; other = other->nextCacheIR()) { // Verify that the stubs share the same code. @@ -144,6 +172,14 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, // Save the offset foldableShapeOffset.emplace(offset); + + // Test if the consecutive field is potentially Load{Fixed|Dynamic}Slot + offset += StubField::sizeInBytes(fieldType); + fieldIndex++; + if (stubInfo->fieldType(fieldIndex) == StubField::Type::RawInt32) { + foldableOffsetOffset.emplace(offset); + } + break; } } @@ -156,8 +192,8 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, return true; } - // Make sure the shape is the only value that differ. - // Collect the shape values at the same time. + // Make sure the shape and offset is the only value that differ. + // Collect the shape and offset values at the same time. for (ICCacheIRStub* stub = firstStub; stub; stub = stub->nextCacheIR()) { const uint8_t* stubData = stub->stubDataStart(); uint32_t fieldIndex = 0; @@ -172,6 +208,14 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, if (!addShape(raw)) { return true; } + } else if (foldableOffsetOffset.isSome() && + offset == *foldableOffsetOffset) { + // Save the offsets of all stubs. + MOZ_ASSERT(fieldType == StubField::Type::RawInt32); + uintptr_t raw = stubInfo->getStubRawWord(stubData, offset); + if (!lazyAddOffset(raw)) { + return true; + } } else { // Check all other fields are the same. if (StubField::sizeIsInt64(fieldType)) { @@ -195,9 +239,13 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, // Clone the CacheIR and replace // - specific GuardShape with GuardMultipleShapes. + // or + // (multiple distinct values in offsetList) + // - specific GuardShape with GuardMultipleShapesToOffset. CacheIRWriter writer(cx); CacheIRReader reader(stubInfo); CacheIRCloner cloner(firstStub); + bool hasSlotOffsets = offsetList.length() > 1; // Initialize the operands. CacheKind cacheKind = stubInfo->kind(); @@ -210,24 +258,39 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, { gc::AutoSuppressGC suppressGC(cx); - shapeObj.set(ShapeListObject::create(cx)); + if (!hasSlotOffsets) { + shapeObj.set(ShapeListObject::create(cx)); + } else { + shapeObj.set(ShapeListWithOffsetsObject::create(cx)); + } if (!shapeObj) { return false; } + MOZ_ASSERT_IF(hasSlotOffsets, shapeList.length() == offsetList.length()); + for (uint32_t i = 0; i < shapeList.length(); i++) { if (!shapeObj->append(cx, shapeList[i])) { cx->recoverFromOutOfMemory(); return false; } + if (hasSlotOffsets) { + if (!shapeObj->append(cx, offsetList[i])) { + cx->recoverFromOutOfMemory(); + return false; + } + } + MOZ_ASSERT(static_cast<Shape*>(shapeList[i].toPrivate())->realm() == shapeObj->realm()); } } + mozilla::Maybe<Int32OperandId> offsetId; bool shapeSuccess = false; + bool offsetSuccess = false; while (reader.more()) { CacheOp op = reader.readOp(); switch (op) { @@ -242,7 +305,11 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, break; } - writer.guardMultipleShapes(objId, shapeObj); + if (hasSlotOffsets) { + offsetId.emplace(writer.guardMultipleShapesToOffset(objId, shapeObj)); + } else { + writer.guardMultipleShapes(objId, shapeObj); + } shapeSuccess = true; break; } @@ -264,6 +331,20 @@ static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback, return true; } + if (hasSlotOffsets && !offsetSuccess) { + // If we found a differing offset field but it was not part of the + // Load{Fixed | Dynamic}SlotResult then we can't fold these stubs + // together. + JitSpew(JitSpew_StubFolding, + "Failed to fold GuardShape into GuardMultipleShapesToOffset at " + "offset %u " + "(icScript: %p) with %zu shapes (%s:%u:%u)", + fallback->pcOffset(), icScript, shapeList.length(), + script->filename(), script->lineno(), + script->column().oneOriginValue()); + return true; + } + // Replace the existing stubs with the new folded stub. fallback->discardStubs(cx->zone(), icEntry); @@ -336,7 +417,9 @@ bool js::jit::AddToFoldedStub(JSContext* cx, const CacheIRWriter& writer, const uint8_t* stubData = stub->stubDataStart(); mozilla::Maybe<uint32_t> shapeFieldOffset; + mozilla::Maybe<uint32_t> offsetFieldOffset; RootedValue newShape(cx); + RootedValue newOffset(cx); Rooted<ListObject*> shapeList(cx); CacheIRReader stubReader(stubInfo); @@ -398,6 +481,61 @@ bool js::jit::AddToFoldedStub(JSContext* cx, const CacheIRWriter& writer, break; } + case CacheOp::GuardMultipleShapesToOffset: { + // Check that the new stub has a corresponding GuardShape. + if (newOp != CacheOp::GuardShape) { + return false; + } + // Check that the object being guarded is the same. + if (newReader.objOperandId() != stubReader.objOperandId()) { + return false; + } + + // Check that the shape offset is the same. + uint32_t newShapeOffset = newReader.stubOffset(); + uint32_t stubShapesOffset = stubReader.stubOffset(); + if (newShapeOffset != stubShapesOffset) { + return false; + } + + MOZ_ASSERT(shapeList == nullptr); + shapeFieldOffset.emplace(newShapeOffset); + + // Get the shape from the new stub + StubField shapeField = + writer.readStubField(newShapeOffset, StubField::Type::WeakShape); + Shape* shape = reinterpret_cast<Shape*>(shapeField.asWord()); + newShape = PrivateValue(shape); + + // Get the shape array from the old stub. + JSObject* obj = stubInfo->getStubField<StubField::Type::JSObject>( + stub, stubShapesOffset); + shapeList = &obj->as<ShapeListWithOffsetsObject>(); + MOZ_ASSERT(shapeList->compartment() == shape->compartment()); + + // Don't add a shape if it's from a different realm than the first + // shape. + // + // Since the list was created in the realm which guarded all the shapes + // added to it, we can use its realm to check and ensure we're not + // adding a cross-realm shape. + // + // The assert verifies this property by checking the first element has + // the same realm (and since everything in the list has the same realm, + // checking the first element suffices) + Realm* shapesRealm = shapeList->realm(); + MOZ_ASSERT_IF( + !shapeList->isEmpty(), + shapeList->as<ShapeListWithOffsetsObject>().getShape(0)->realm() == + shapesRealm); + if (shapesRealm != shape->realm()) { + return false; + } + + // Consume the offsetId argument. + stubReader.skip(); + break; + } default: { // Check that the op is the same. if (newOp != stubOp) { @@ -421,28 +559,40 @@ bool js::jit::AddToFoldedStub(JSContext* cx, const CacheIRWriter& writer, return false; } - if (!writer.stubDataEqualsIgnoring(stubData, *shapeFieldOffset)) { + if (!writer.stubDataEqualsIgnoringShapeAndOffset(stubData, *shapeFieldOffset, + offsetFieldOffset)) { return false; } - ShapeListObject* obj = &shapeList->as<ShapeListObject>(); + // ShapeListWithSlotsObject uses two spaces per shape. + uint32_t numShapes = offsetFieldOffset.isNothing() ? shapeList->length() + : shapeList->length() / 2; // Limit the maximum number of shapes we will add before giving up. // If we give up, transition the stub. - if (obj->length() == ShapeListObject::MaxLength) { + if (numShapes == ShapeListObject::MaxLength) { MOZ_ASSERT(fallback->state().mode() != ICState::Mode::Generic); fallback->state().forceTransition(); fallback->discardStubs(cx->zone(), icEntry); return false; } - if (!obj->append(cx, newShape)) { + if (!shapeList->append(cx, newShape)) { cx->recoverFromOutOfMemory(); return false; } - JitSpew(JitSpew_StubFolding, "ShapeListObject %p: new length: %u", - shapeList.get(), shapeList->length()); + if (offsetFieldOffset.isSome()) { + if (!shapeList->append(cx, newOffset)) { + // Drop corresponding shape if we failed adding offset. + shapeList->shrinkElements(cx, shapeList->length() - 1); + cx->recoverFromOutOfMemory(); + return false; + } + } + JitSpew(JitSpew_StubFolding, "ShapeList%sObject %p: new length: %u", + offsetFieldOffset.isNothing() ? "" : "WithOffset", shapeList.get(), + shapeList->length()); return true; } diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp @@ -529,6 +529,21 @@ bool WarpCacheIRTranspiler::emitGuardMultipleShapes(ObjOperandId objId, return true; } +bool WarpCacheIRTranspiler::emitGuardMultipleShapesToOffset( + ObjOperandId objId, uint32_t shapesOffset, Int32OperandId offsetId) { + MDefinition* obj = getOperand(objId); + + // Use MGuardShapeListToOffset if we snapshotted the list of shapes on the + // main thread. + MInstruction* ins; + MInstruction* shapeList = objectStubField(shapesOffset); + ins = MGuardMultipleShapesToOffset::New(alloc(), obj, shapeList); + ins->setBailoutKind(BailoutKind::StubFoldingGuardMultipleShapes); + add(ins); + + return defineOperand(offsetId, ins); +} + bool WarpCacheIRTranspiler::emitGuardNullProto(ObjOperandId objId) { MDefinition* def = getOperand(objId);