commit 7ef21c323fbce385ee230dc6be03658081af012d
parent 52eb4078f83b37f8a5400d81d7564230fb492e1b
Author: alexical <dothayer@mozilla.com>
Date: Wed, 19 Nov 2025 21:05:46 +0000
Bug 1995077 - Scalar replace Object.keys r=iain
Differential Revision: https://phabricator.services.mozilla.com/D269127
Diffstat:
25 files changed, 865 insertions(+), 331 deletions(-)
diff --git a/js/src/jit-test/tests/ion/object-keys-04.js b/js/src/jit-test/tests/ion/object-keys-04.js
@@ -26,13 +26,6 @@ function objKeysLength(obj, expected, i) {
return len;
}
-// This is the same test as above, except that the branch which is being removed
-// cause the introduction of a different resume point to be inserted in the
-// middle. At the moment we expect this circustances to to disable the
-// optimization.
-//
-// Removing this limitation would be useful but would require more verification
-// when applying the optimization.
function objKeysLengthDiffBlock(obj, expected, i) {
var keys = Object.keys(obj);
if (i >= 99) {
@@ -42,19 +35,19 @@ function objKeysLengthDiffBlock(obj, expected, i) {
assertEq(arraysEqual(keys, expected), true);
}
let len = keys.length;
- assertRecoveredOnBailout(keys, false);
+ assertRecoveredOnBailout(keys, true);
return len;
}
// Mutating the object in-between the call from Object.keys and the evaluation
-// of the length property should prevent the optimization from being reflected
-// as the mutation of the object would cause the a different result of
-// Object.keys evaluation.
+// of the length property should still allow the optimization to take place
+// since the iterator we held onto should be from the previous shape of the
+// object.
function objKeysLengthMutate0(obj, expected, i) {
var keys = Object.keys(obj);
obj.foo = 42;
let len = keys.length;
- assertRecoveredOnBailout(keys, false);
+ assertRecoveredOnBailout(keys, true);
if (i >= 99) {
bailout();
assertEq(arraysEqual(keys, expected), true);
@@ -65,7 +58,7 @@ function objKeysLengthMutate0(obj, expected, i) {
function objKeysLengthMutate1(obj, expected, i) {
var keys = Object.keys(obj);
let len = keys.length;
- assertRecoveredOnBailout(keys, false);
+ assertRecoveredOnBailout(keys, true);
obj.foo = 42;
if (i >= 99) {
bailout();
@@ -77,7 +70,7 @@ function objKeysLengthMutate1(obj, expected, i) {
function objKeysLengthMutate2(obj, expected, i) {
var keys = Object.keys(obj);
let len = keys.length;
- assertRecoveredOnBailout(keys, false);
+ assertRecoveredOnBailout(keys, true);
if (i >= 99) {
bailout();
assertEq(arraysEqual(keys, expected), true);
@@ -91,8 +84,6 @@ function objKeysLengthMutate3(obj, expected, i) {
let len = keys.length;
assertRecoveredOnBailout(keys, true);
if (i >= 99) {
- // When branches are pruned, Warp/Ion is not aware and would recover the
- // keys on bailout, and this is fine.
obj.foo = 42;
bailout();
assertEq(arraysEqual(keys, expected), true);
@@ -101,9 +92,6 @@ function objKeysLengthMutate3(obj, expected, i) {
}
function objKeysLengthMutate4(obj, expected, i) {
- // Mutating the objects ahead of keying the keys does not prevent optimizing
- // the keys length query, given that all side-effects are already acted by
- // the time we query the keys.
obj.foo = 42;
var keys = Object.keys(obj);
let len = keys.length;
@@ -123,7 +111,7 @@ function doNotInlineSideEffect() {
function objKeysLengthSideEffect0(obj, expected, i) {
var keys = Object.keys(obj);
let len = keys.length;
- assertRecoveredOnBailout(keys, false);
+ assertRecoveredOnBailout(keys, true);
doNotInlineSideEffect();
if (i >= 99) {
bailout();
@@ -135,7 +123,7 @@ function objKeysLengthSideEffect0(obj, expected, i) {
function objKeysLengthSideEffect1(obj, expected, i) {
var keys = Object.keys(obj);
let len = keys.length;
- assertRecoveredOnBailout(keys, false);
+ assertRecoveredOnBailout(keys, true);
if (i >= 99) {
bailout();
assertEq(arraysEqual(keys, expected), true);
@@ -170,6 +158,48 @@ function objKeysLengthSideEffect3(obj, expected, i) {
return len;
}
+function objKeysLengthMutateElements0(obj, expected, i) {
+ var keys = Object.keys(obj);
+ obj[0] = 42;
+ obj[1] = 42;
+ obj[2] = 42;
+ let len = keys.length;
+ assertRecoveredOnBailout(keys, true);
+ if (i >= 99) {
+ bailout();
+ assertEq(arraysEqual(keys, expected), true);
+ }
+ return len;
+}
+
+function objKeysLengthMutateElements1(obj, expected, i) {
+ obj[0] = 42;
+ var keys = Object.keys(obj);
+ obj[1] = 42;
+ obj[2] = 42;
+ let len = keys.length;
+ assertRecoveredOnBailout(keys, true);
+ if (i >= 99) {
+ bailout();
+ assertEq(arraysEqual(keys, expected), true);
+ }
+ return len;
+}
+
+function objKeysLengthMutateElements2(obj, expected, i) {
+ obj[0] = 42;
+ obj[1] = 42;
+ var keys = Object.keys(obj);
+ obj[2] = 42;
+ let len = keys.length;
+ assertRecoveredOnBailout(keys, true);
+ if (i >= 99) {
+ bailout();
+ assertEq(arraysEqual(keys, expected), true);
+ }
+ return len;
+}
+
// Check what we are doing as optimizations when the object is fully known and
// when it does not escape.
//
@@ -204,5 +234,8 @@ for (let i = 0; i < 100; i++) {
objKeysLengthSideEffect1({...obj}, ["a", "b", "c", "d"], i);
objKeysLengthSideEffect2({...obj}, ["a", "b", "c", "d"], i);
objKeysLengthSideEffect3({...obj}, ["a", "b", "c", "d"], i);
+ objKeysLengthMutateElements0({...obj}, ["a", "b", "c", "d"], i);
+ objKeysLengthMutateElements1({...obj}, ["0", "a", "b", "c", "d"], i);
+ objKeysLengthMutateElements2({...obj}, ["0", "1", "a", "b", "c", "d"], i);
nonEscapedObjKeysLength(i);
}
diff --git a/js/src/jit-test/tests/ion/object-keys-06.js b/js/src/jit-test/tests/ion/object-keys-06.js
@@ -0,0 +1,40 @@
+load(libdir + 'array-compare.js'); // arraysEqual
+
+// Ion eager fails the test below because we have not yet created any
+// Cache IR IC in baseline before running the content of the top-level
+// function.
+if (getJitCompilerOptions()["ion.warmup.trigger"] <= 100)
+ setJitCompilerOption("ion.warmup.trigger", 100);
+
+// This test case checks that we are capable of recovering the Object.keys array
+// even if we optimized it away. It also checks that we indeed optimize it away
+// as we would expect.
+
+// Prevent GC from cancelling/discarding Ion compilations.
+gczeal(0);
+
+function objKeysIterate(obj, expected, i) {
+ var keys = Object.keys(obj);
+ for (let i = 0; i < keys.length; i++) {
+ assertEq(keys[i], expected[i]);
+ }
+ assertRecoveredOnBailout(keys, true);
+}
+
+function objKeysIterateMutated(obj, expected, i) {
+ var keys = Object.keys(obj);
+ obj.foo = 42;
+ for (let i = 0; i < keys.length; i++) {
+ assertEq(keys[i], expected[i]);
+ }
+ assertRecoveredOnBailout(keys, true);
+}
+
+// Prevent compilation of the top-level.
+eval(`${arraysEqual}`);
+let obj = {a: 0, b: 1, c: 2, d: 3};
+
+for (let i = 0; i < 100; i++) {
+ objKeysIterate({...obj}, ["a", "b", "c", "d"], i);
+ objKeysIterateMutated({...obj}, ["a", "b", "c", "d"], i);
+}
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
@@ -10463,8 +10463,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachObjectKeys() {
// Guard against proxies.
writer.guardIsNotProxy(argObjId);
+ Shape* expectedObjKeysShape =
+ GlobalObject::getArrayShapeWithDefaultProto(cx_);
+ if (!expectedObjKeysShape) {
+ cx_->recoverFromOutOfMemory();
+ return AttachDecision::NoAction;
+ }
+
// Compute the keys array.
- writer.objectKeysResult(argObjId);
+ writer.objectKeysResult(argObjId, expectedObjKeysShape);
writer.returnFromIC();
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
@@ -6340,7 +6340,7 @@ bool CacheIRCompiler::emitObjectToIteratorResult(
Label callVM, done;
masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
- &callVM);
+ &callVM, /* exclusive = */ true);
masm.loadPrivate(
Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
@@ -6434,7 +6434,8 @@ bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
return true;
}
-bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId) {
+bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId,
+ uint32_t resultShapeOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
@@ -1188,6 +1188,7 @@
cost_estimate: 6
args:
obj: ObjId
+ resultShape: ShapeField
- name: PackedArrayPopResult
shared: true
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
@@ -16652,6 +16652,24 @@ void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
masm.iteratorMore(obj, output, temp);
}
+void CodeGenerator::visitIteratorLength(LIteratorLength* lir) {
+ Register obj = ToRegister(lir->iter());
+ Register output = ToRegister(lir->output());
+ masm.iteratorLength(obj, output);
+}
+
+void CodeGenerator::visitLoadIteratorElement(LLoadIteratorElement* lir) {
+ Register obj = ToRegister(lir->iter());
+ Register output = ToRegister(lir->output());
+ if (lir->index()->isConstant()) {
+ int32_t index = ToInt32(lir->index());
+ masm.iteratorLoadElement(obj, index, output);
+ } else {
+ Register index = ToRegister(lir->index());
+ masm.iteratorLoadElement(obj, index, output);
+ }
+}
+
void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
ValueOperand input = ToValue(lir->input());
Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
@@ -18470,14 +18488,39 @@ void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
Register temp3 = ToRegister(lir->temp2());
using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
- OutOfLineCode* ool = (lir->mir()->wantsIndices())
- ? oolCallVM<Fn, GetIteratorWithIndices>(
- lir, ArgList(obj), StoreRegisterTo(iterObj))
- : oolCallVM<Fn, GetIterator>(
- lir, ArgList(obj), StoreRegisterTo(iterObj));
+ OutOfLineCode* ool = nullptr;
+
+ if (lir->mir()->skipRegistration()) {
+ if (lir->mir()->wantsIndices()) {
+ ool = oolCallVM<Fn, GetIteratorWithIndicesUnregistered>(
+ lir, ArgList(obj), StoreRegisterTo(iterObj));
+ } else {
+ ool = oolCallVM<Fn, GetIteratorUnregistered>(lir, ArgList(obj),
+ StoreRegisterTo(iterObj));
+ }
+ } else {
+ if (lir->mir()->wantsIndices()) {
+ ool = oolCallVM<Fn, GetIteratorWithIndices>(lir, ArgList(obj),
+ StoreRegisterTo(iterObj));
+ } else {
+ ool = oolCallVM<Fn, GetIterator>(lir, ArgList(obj),
+ StoreRegisterTo(iterObj));
+ }
+ }
+
+#ifdef DEBUG
+ if (!lir->mir()->getAliasSet().isStore()) {
+ MOZ_ASSERT(lir->mir()->skipRegistration());
+ Label done;
+ masm.branchTestObjectIsProxy(false, obj, temp, &done);
+ masm.assumeUnreachable("ObjectToIterator on a proxy must be a store.");
+ masm.bind(&done);
+ }
+#endif
masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
- ool->entry());
+ ool->entry(),
+ !lir->mir()->skipRegistration());
Register nativeIter = temp;
masm.loadPrivate(
@@ -18492,36 +18535,38 @@ void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
// do a VM call to replace the cached iterator with a fresh iterator
// including indices.
masm.branchTest32(Assembler::NonZero, iterFlagsAddr,
- Imm32(NativeIterator::Flags::IndicesSupported),
- ool->entry());
+ Imm32(NativeIterator::Flags::IndicesSupported), ool->entry());
}
- masm.storePtr(
- obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
- masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
+ if (!lir->mir()->skipRegistration()) {
+ masm.storePtr(obj, Address(nativeIter,
+ NativeIterator::offsetOfObjectBeingIterated()));
+ masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
- Register enumeratorsAddr = temp2;
- masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
- masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
+ Register enumeratorsAddr = temp2;
+ masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
+ masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
- // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
- // We already know that |iterObj| is tenured, so we only have to check |obj|.
- Label skipBarrier;
- masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
- {
- LiveRegisterSet save = liveVolatileRegs(lir);
- save.takeUnchecked(temp);
- save.takeUnchecked(temp2);
- save.takeUnchecked(temp3);
- if (iterObj.volatile_()) {
- save.addUnchecked(iterObj);
- }
+ // Generate post-write barrier for storing to
+ // |iterObj->objectBeingIterated_|. We already know that |iterObj| is
+ // tenured, so we only have to check |obj|.
+ Label skipBarrier;
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
+ {
+ LiveRegisterSet save = liveVolatileRegs(lir);
+ save.takeUnchecked(temp);
+ save.takeUnchecked(temp2);
+ save.takeUnchecked(temp3);
+ if (iterObj.volatile_()) {
+ save.addUnchecked(iterObj);
+ }
- masm.PushRegsInMask(save);
- emitPostWriteBarrier(iterObj);
- masm.PopRegsInMask(save);
+ masm.PushRegsInMask(save);
+ emitPostWriteBarrier(iterObj);
+ masm.PopRegsInMask(save);
+ }
+ masm.bind(&skipBarrier);
}
- masm.bind(&skipBarrier);
masm.bind(ool->rejoin());
}
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
@@ -132,6 +132,9 @@ DefaultJitOptions::DefaultJitOptions() {
// Whether the Baseline Interpreter is enabled.
SET_DEFAULT(baselineInterpreter, true);
+ // Whether replacing Object.keys with NativeIterators is globally disabled.
+ SET_DEFAULT(disableObjectKeysScalarReplacement, false);
+
#ifdef ENABLE_PORTABLE_BASELINE_INTERP
// Whether the Portable Baseline Interpreter is enabled.
SET_DEFAULT(portableBaselineInterpreter, false);
diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
@@ -70,6 +70,7 @@ struct DefaultJitOptions {
bool disableRedundantShapeGuards;
bool disableRedundantGCBarriers;
bool disableBailoutLoopCheck;
+ bool disableObjectKeysScalarReplacement;
#ifdef ENABLE_PORTABLE_BASELINE_INTERP
bool portableBaselineInterpreter;
#endif
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
@@ -5040,6 +5040,10 @@ void LIRGenerator::visitObjectKeys(MObjectKeys* ins) {
assignSafepoint(lir, ins);
}
+void LIRGenerator::visitObjectKeysFromIterator(MObjectKeysFromIterator* ins) {
+ MOZ_CRASH("ObjectKeysFromIterator is purely for recovery purposes.");
+}
+
void LIRGenerator::visitObjectKeysLength(MObjectKeysLength* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
MOZ_ASSERT(ins->type() == MIRType::Int32);
@@ -6365,6 +6369,24 @@ void LIRGenerator::visitCloseIterCache(MCloseIterCache* ins) {
assignSafepoint(lir, ins);
}
+void LIRGenerator::visitLoadIteratorElement(MLoadIteratorElement* ins) {
+ MOZ_ASSERT(ins->iter()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->type() == MIRType::String);
+
+ auto* lir = new (alloc()) LLoadIteratorElement(
+ useRegister(ins->iter()), useRegisterOrConstant(ins->index()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitIteratorLength(MIteratorLength* ins) {
+ MOZ_ASSERT(ins->iter()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Int32);
+
+ auto* lir = new (alloc()) LIteratorLength(useRegister(ins->iter()));
+ define(lir, ins);
+}
+
void LIRGenerator::visitOptimizeGetIteratorCache(
MOptimizeGetIteratorCache* ins) {
MDefinition* value = ins->value();
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
@@ -9710,6 +9710,7 @@ class MObjectToIterator : public MUnaryInstruction,
public ObjectPolicy<0>::Data {
NativeIteratorListHead* enumeratorsAddr_;
bool wantsIndices_ = false;
+ bool skipRegistration_ = false;
explicit MObjectToIterator(MDefinition* object,
NativeIteratorListHead* enumeratorsAddr)
@@ -9724,8 +9725,17 @@ class MObjectToIterator : public MUnaryInstruction,
TRIVIAL_NEW_WRAPPERS
NAMED_OPERANDS((0, object))
+ AliasSet getAliasSet() const override {
+ return skipRegistration_
+ ? AliasSet::Load(AliasSet::ObjectFields | AliasSet::Element)
+ : AliasSet::Store(AliasSet::Any);
+ }
+
bool wantsIndices() const { return wantsIndices_; }
void setWantsIndices(bool value) { wantsIndices_ = value; }
+
+ bool skipRegistration() const { return skipRegistration_; }
+ void setSkipRegistration(bool value) { skipRegistration_ = value; }
};
class MPostIntPtrConversion : public MUnaryInstruction,
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
@@ -2073,14 +2073,17 @@
- name: ObjectKeys
operands:
object: Object
+ arguments:
+ resultShape: Shape*
result_type: Object
-
- # We can recover Object.keys on bailout as long as the object is not escaped
- # or that the object is not mutated by any aliased calls in-between the
- # instruction and the recovered location, as the recovered keys array might be
- # different.
can_recover: custom
+- name: ObjectKeysFromIterator
+ operands:
+ iterator: Object
+ result_type: Object
+ can_recover: true
+
# Used to fold Object.keys(obj).length into a single operation.
#
# This should not be used with a Proxy, as proxies can have a user-defined
@@ -2797,6 +2800,20 @@
- name: ObjectToIterator
gen_boilerplate: false
+- name: IteratorLength
+ operands:
+ iter: Object
+ result_type: Int32
+ generate_lir: true
+
+- name: LoadIteratorElement
+ operands:
+ iter: Object
+ index: Int32
+ result_type: String
+ alias_set: none
+ generate_lir: true
+
- name: ValueToIterator
operands:
value: Value
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
@@ -9404,8 +9404,8 @@ static void LoadNativeIterator(MacroAssembler& masm, Register obj,
// Otherwise, jump to |failure|.
void MacroAssembler::maybeLoadIteratorFromShape(Register obj, Register dest,
Register temp, Register temp2,
- Register temp3,
- Label* failure) {
+ Register temp3, Label* failure,
+ bool exclusive) {
// Register usage:
// obj: always contains the input object
// temp: walks the obj->shape->baseshape->proto->shape->... chain
@@ -9441,7 +9441,10 @@ void MacroAssembler::maybeLoadIteratorFromShape(Register obj, Register dest,
// Load the native iterator and verify that it's reusable.
andPtr(Imm32(~ShapeCachePtr::MASK), dest);
LoadNativeIterator(*this, dest, nativeIterator);
- branchIfNativeIteratorNotReusable(nativeIterator, failure);
+
+ if (exclusive) {
+ branchIfNativeIteratorNotReusable(nativeIterator, failure);
+ }
Label skipIndices;
load32(Address(nativeIterator, NativeIterator::offsetOfPropertyCount()),
@@ -9539,6 +9542,29 @@ void MacroAssembler::iteratorMore(Register obj, ValueOperand output,
bind(&done);
}
+void MacroAssembler::iteratorLength(Register obj, Register output) {
+ LoadNativeIterator(*this, obj, output);
+ load32(Address(output, NativeIterator::offsetOfOwnPropertyCount()), output);
+}
+
+void MacroAssembler::iteratorLoadElement(Register obj, Register index,
+ Register output) {
+ LoadNativeIterator(*this, obj, output);
+ loadPtr(BaseIndex(output, index, ScalePointer,
+ NativeIterator::offsetOfFirstProperty()),
+ output);
+ andPtr(Imm32(int32_t(~IteratorProperty::DeletedBit)), output);
+}
+
+void MacroAssembler::iteratorLoadElement(Register obj, int32_t index,
+ Register output) {
+ LoadNativeIterator(*this, obj, output);
+ loadPtr(Address(output, index * sizeof(IteratorProperty) +
+ NativeIterator::offsetOfFirstProperty()),
+ output);
+ andPtr(Imm32(int32_t(~IteratorProperty::DeletedBit)), output);
+}
+
void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
Register temp3) {
LoadNativeIterator(*this, obj, temp1);
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
@@ -5397,11 +5397,14 @@ class MacroAssembler : public MacroAssemblerSpecific {
void maybeLoadIteratorFromShape(Register obj, Register dest, Register temp,
Register temp2, Register temp3,
- Label* failure);
+ Label* failure, bool exclusive);
void iteratorMore(Register obj, ValueOperand output, Register temp);
void iteratorClose(Register obj, Register temp1, Register temp2,
Register temp3);
+ void iteratorLength(Register obj, Register output);
+ void iteratorLoadElement(Register obj, Register index, Register output);
+ void iteratorLoadElement(Register obj, int32_t index, Register output);
void registerIterator(Register enumeratorsList, Register iter, Register temp);
void prepareOOBStoreElement(Register object, Register index,
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
@@ -2279,6 +2279,28 @@ bool RObjectKeys::recover(JSContext* cx, SnapshotIterator& iter) const {
return true;
}
+bool MObjectKeysFromIterator::writeRecoverData(
+ CompactBufferWriter& writer) const {
+ MOZ_ASSERT(canRecoverOnBailout());
+ writer.writeUnsigned(uint32_t(RInstruction::Recover_ObjectKeysFromIterator));
+ return true;
+}
+
+RObjectKeysFromIterator::RObjectKeysFromIterator(CompactBufferReader& reader) {}
+
+bool RObjectKeysFromIterator::recover(JSContext* cx,
+ SnapshotIterator& iter) const {
+ Rooted<JSObject*> iterObj(cx, iter.readObject());
+
+ JSObject* resultKeys = ObjectKeysFromIterator(cx, iterObj);
+ if (!resultKeys) {
+ return false;
+ }
+
+ iter.storeInstructionResult(ObjectValue(*resultKeys));
+ return true;
+}
+
bool MObjectState::writeRecoverData(CompactBufferWriter& writer) const {
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_ObjectState));
diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
@@ -145,6 +145,7 @@ namespace jit {
_(Callee) \
_(FunctionEnvironment) \
_(ObjectKeys) \
+ _(ObjectKeysFromIterator) \
_(ObjectState) \
_(ArrayState) \
_(AtomicIsLockFree) \
@@ -1005,6 +1006,14 @@ class RObjectKeys final : public RInstruction {
SnapshotIterator& iter) const override;
};
+class RObjectKeysFromIterator final : public RInstruction {
+ public:
+ RINSTRUCTION_HEADER_NUM_OP_(ObjectKeysFromIterator, 1)
+
+ [[nodiscard]] bool recover(JSContext* cx,
+ SnapshotIterator& iter) const override;
+};
+
class RObjectState final : public RInstruction {
private:
uint32_t numSlots_; // Number of slots.
diff --git a/js/src/jit/ScalarReplacement.cpp b/js/src/jit/ScalarReplacement.cpp
@@ -1262,22 +1262,146 @@ static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) {
return false;
}
+// This is just a class designed to extract the common elements across
+// several different Array replacement strategies to avoid code duplication.
+// There is nothing essential or sacred about it, it just felt like this
+// was some pretty basic stuff we often want to do when we're replacing a
+// true JS array with something which cheaply approximates it. When
+// inheriting from this in the future, please validate that each of its
+// core visit functions is safe to do in your new context.
+class GenericArrayReplacer : public MDefinitionVisitorDefaultNoop {
+ protected:
+ TempAllocator& alloc_;
+ MInstruction* arr_;
+
+ bool isTargetElements(MDefinition* elements);
+ void discardInstruction(MInstruction* ins, MDefinition* elements);
+ void visitLength(MInstruction* ins, MDefinition* elements);
+
+ GenericArrayReplacer(TempAllocator& alloc, MInstruction* arr)
+ : alloc_(alloc), arr_(arr) {}
+
+ public:
+ void visitGuardToClass(MGuardToClass* ins);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
+ void visitUnbox(MUnbox* ins);
+ void visitCompare(MCompare* ins);
+ void visitGuardElementsArePacked(MGuardElementsArePacked* ins);
+};
+
+bool GenericArrayReplacer::isTargetElements(MDefinition* elements) {
+ return elements->isElements() && elements->toElements()->object() == arr_;
+}
+
+void GenericArrayReplacer::discardInstruction(MInstruction* ins,
+ MDefinition* elements) {
+ MOZ_ASSERT(elements->isElements());
+ ins->block()->discard(ins);
+ if (!elements->hasLiveDefUses()) {
+ elements->block()->discard(elements->toInstruction());
+ }
+}
+
+void GenericArrayReplacer::visitGuardToClass(MGuardToClass* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != arr_) {
+ return;
+ }
+ MOZ_ASSERT(ins->getClass() == &ArrayObject::class_);
+
+ // Replace the guard with the array object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void GenericArrayReplacer::visitGuardShape(MGuardShape* ins) {
+ // Skip guards on other objects.
+ if (ins->object() != arr_) {
+ return;
+ }
+
+ // Replace the guard with the array object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove the guard.
+ ins->block()->discard(ins);
+}
+
+void GenericArrayReplacer::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
+ // Skip guards on other objects.
+ if (ins->array() != arr_) {
+ return;
+ }
+
+ // Replace the guard by its object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void GenericArrayReplacer::visitUnbox(MUnbox* ins) {
+ // Skip unrelated unboxes.
+ if (ins->input() != arr_) {
+ return;
+ }
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ // Replace the unbox with the array object.
+ ins->replaceAllUsesWith(arr_);
+
+ // Remove the unbox.
+ ins->block()->discard(ins);
+}
+
+void GenericArrayReplacer::visitCompare(MCompare* ins) {
+ // Skip unrelated comparisons.
+ if (ins->lhs() != arr_ && ins->rhs() != arr_) {
+ return;
+ }
+
+ bool folded;
+ MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
+
+ auto* cst = MConstant::NewBoolean(alloc_, folded);
+ ins->block()->insertBefore(ins, cst);
+
+ // Replace the comparison with a constant.
+ ins->replaceAllUsesWith(cst);
+
+ // Remove original instruction.
+ ins->block()->discard(ins);
+}
+
+void GenericArrayReplacer::visitGuardElementsArePacked(
+ MGuardElementsArePacked* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isTargetElements(elements)) {
+ return;
+ }
+
+ // Remove original instruction.
+ discardInstruction(ins, elements);
+}
+
// This class replaces every MStoreElement and MSetInitializedLength by an
// MArrayState which emulates the content of the array. All MLoadElement,
// MInitializedLength and MArrayLength are replaced by the corresponding value.
//
// In order to restore the value of the array correctly in case of bailouts, we
// replace all reference of the allocation by the MArrayState definition.
-class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
+class ArrayMemoryView : public GenericArrayReplacer {
public:
using BlockState = MArrayState;
static const char* phaseName;
private:
- TempAllocator& alloc_;
MConstant* undefinedVal_;
MConstant* length_;
- MInstruction* arr_;
MBasicBlock* startBlock_;
BlockState* state_;
@@ -1315,15 +1439,9 @@ class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
void visitLoadElement(MLoadElement* ins);
void visitSetInitializedLength(MSetInitializedLength* ins);
void visitInitializedLength(MInitializedLength* ins);
- void visitGuardElementsArePacked(MGuardElementsArePacked* ins);
void visitArrayLength(MArrayLength* ins);
void visitPostWriteBarrier(MPostWriteBarrier* ins);
void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins);
- void visitGuardShape(MGuardShape* ins);
- void visitGuardToClass(MGuardToClass* ins);
- void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
- void visitUnbox(MUnbox* ins);
- void visitCompare(MCompare* ins);
void visitApplyArray(MApplyArray* ins);
void visitConstructArray(MConstructArray* ins);
};
@@ -1331,10 +1449,9 @@ class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
- : alloc_(alloc),
+ : GenericArrayReplacer(alloc, arr),
undefinedVal_(nullptr),
length_(nullptr),
- arr_(arr),
startBlock_(arr->block()),
state_(nullptr),
lastResumePoint_(nullptr),
@@ -1584,18 +1701,6 @@ void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) {
discardInstruction(ins, elements);
}
-void ArrayMemoryView::visitGuardElementsArePacked(
- MGuardElementsArePacked* ins) {
- // Skip other array objects.
- MDefinition* elements = ins->elements();
- if (!isArrayStateElements(elements)) {
- return;
- }
-
- // Remove original instruction.
- discardInstruction(ins, elements);
-}
-
void ArrayMemoryView::visitArrayLength(MArrayLength* ins) {
// Skip other array objects.
MDefinition* elements = ins->elements();
@@ -1635,78 +1740,6 @@ void ArrayMemoryView::visitPostWriteElementBarrier(
ins->block()->discard(ins);
}
-void ArrayMemoryView::visitGuardShape(MGuardShape* ins) {
- // Skip guards on other objects.
- if (ins->object() != arr_) {
- return;
- }
-
- // Replace the guard by its object.
- ins->replaceAllUsesWith(arr_);
-
- // Remove original instruction.
- ins->block()->discard(ins);
-}
-
-void ArrayMemoryView::visitGuardToClass(MGuardToClass* ins) {
- // Skip guards on other objects.
- if (ins->object() != arr_) {
- return;
- }
-
- // Replace the guard by its object.
- ins->replaceAllUsesWith(arr_);
-
- // Remove original instruction.
- ins->block()->discard(ins);
-}
-
-void ArrayMemoryView::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
- // Skip guards on other objects.
- if (ins->array() != arr_) {
- return;
- }
-
- // Replace the guard by its object.
- ins->replaceAllUsesWith(arr_);
-
- // Remove original instruction.
- ins->block()->discard(ins);
-}
-
-void ArrayMemoryView::visitUnbox(MUnbox* ins) {
- // Skip unrelated unboxes.
- if (ins->getOperand(0) != arr_) {
- return;
- }
- MOZ_ASSERT(ins->type() == MIRType::Object);
-
- // Replace the unbox with the array object.
- ins->replaceAllUsesWith(arr_);
-
- // Remove the unbox.
- ins->block()->discard(ins);
-}
-
-void ArrayMemoryView::visitCompare(MCompare* ins) {
- // Skip unrelated comparisons.
- if (ins->lhs() != arr_ && ins->rhs() != arr_) {
- return;
- }
-
- bool folded;
- MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
-
- auto* cst = MConstant::NewBoolean(alloc_, folded);
- ins->block()->insertBefore(ins, cst);
-
- // Replace the comparison with a constant.
- ins->replaceAllUsesWith(cst);
-
- // Remove original instruction.
- ins->block()->discard(ins);
-}
-
void ArrayMemoryView::visitApplyArray(MApplyArray* ins) {
// Skip other array objects.
MDefinition* elements = ins->getElements();
@@ -2571,29 +2604,18 @@ static inline bool IsOptimizableRestInstruction(MInstruction* ins) {
return ins->isRest();
}
-class RestReplacer : public MDefinitionVisitorDefaultNoop {
+class RestReplacer : public GenericArrayReplacer {
private:
const MIRGenerator* mir_;
MIRGraph& graph_;
- MInstruction* rest_;
-
- TempAllocator& alloc() { return graph_.alloc(); }
- MRest* rest() const { return rest_->toRest(); }
- bool isRestElements(MDefinition* elements);
- void discardInstruction(MInstruction* ins, MDefinition* elements);
+ MRest* rest() const { return arr_->toRest(); }
MDefinition* restLength(MInstruction* ins);
- void visitLength(MInstruction* ins, MDefinition* elements);
- void visitGuardToClass(MGuardToClass* ins);
- void visitGuardShape(MGuardShape* ins);
- void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
- void visitUnbox(MUnbox* ins);
- void visitCompare(MCompare* ins);
+ void visitLength(MInstruction* ins, MDefinition* elements);
void visitLoadElement(MLoadElement* ins);
void visitArrayLength(MArrayLength* ins);
void visitInitializedLength(MInitializedLength* ins);
- void visitGuardElementsArePacked(MGuardElementsArePacked* ins);
void visitApplyArray(MApplyArray* ins);
void visitConstructArray(MConstructArray* ins);
@@ -2601,8 +2623,8 @@ class RestReplacer : public MDefinitionVisitorDefaultNoop {
public:
RestReplacer(const MIRGenerator* mir, MIRGraph& graph, MInstruction* rest)
- : mir_(mir), graph_(graph), rest_(rest) {
- MOZ_ASSERT(IsOptimizableRestInstruction(rest_));
+ : GenericArrayReplacer(graph.alloc(), rest), mir_(mir), graph_(graph) {
+ MOZ_ASSERT(IsOptimizableRestInstruction(arr_));
}
bool escapes(MInstruction* ins);
@@ -2610,6 +2632,11 @@ class RestReplacer : public MDefinitionVisitorDefaultNoop {
void assertSuccess();
};
+void RestReplacer::assertSuccess() {
+ MOZ_ASSERT(arr_->canRecoverOnBailout());
+ MOZ_ASSERT(!arr_->hasLiveDefUses());
+}
+
// Returns false if the rest array object does not escape.
bool RestReplacer::escapes(MInstruction* ins) {
MOZ_ASSERT(ins->type() == MIRType::Object);
@@ -2779,7 +2806,7 @@ bool RestReplacer::escapes(MElements* ins) {
// Replacing the rest array object is simpler than replacing an object or array,
// because the rest array object does not change state.
bool RestReplacer::run() {
- MBasicBlock* startBlock = rest_->block();
+ MBasicBlock* startBlock = arr_->block();
// Iterate over each basic block.
for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
@@ -2803,7 +2830,7 @@ bool RestReplacer::run() {
MIR_OPCODE_LIST(MIR_OP)
#undef MIR_OP
}
- if (!graph_.alloc().ensureBallast()) {
+ if (!alloc_.ensureBallast()) {
return false;
}
}
@@ -2813,143 +2840,52 @@ bool RestReplacer::run() {
return true;
}
-void RestReplacer::assertSuccess() {
- MOZ_ASSERT(rest_->canRecoverOnBailout());
- MOZ_ASSERT(!rest_->hasLiveDefUses());
-}
-
-bool RestReplacer::isRestElements(MDefinition* elements) {
- return elements->isElements() && elements->toElements()->object() == rest_;
-}
-
-void RestReplacer::discardInstruction(MInstruction* ins,
- MDefinition* elements) {
- MOZ_ASSERT(elements->isElements());
- ins->block()->discard(ins);
- if (!elements->hasLiveDefUses()) {
- elements->block()->discard(elements->toInstruction());
- }
-}
-
-void RestReplacer::visitGuardToClass(MGuardToClass* ins) {
- // Skip guards on other objects.
- if (ins->object() != rest_) {
+void RestReplacer::visitLoadElement(MLoadElement* ins) {
+ // Skip other array objects.
+ MDefinition* elements = ins->elements();
+ if (!isTargetElements(elements)) {
return;
}
- MOZ_ASSERT(ins->getClass() == &ArrayObject::class_);
-
- // Replace the guard with the array object.
- ins->replaceAllUsesWith(rest_);
-
- // Remove the guard.
- ins->block()->discard(ins);
-}
-void RestReplacer::visitGuardShape(MGuardShape* ins) {
- // Skip guards on other objects.
- if (ins->object() != rest_) {
- return;
- }
+ MDefinition* index = ins->index();
- // Replace the guard with the array object.
- ins->replaceAllUsesWith(rest_);
+ // Adjust the index to skip any extra formals.
+ if (uint32_t formals = rest()->numFormals()) {
+ auto* numFormals = MConstant::NewInt32(alloc_, formals);
+ ins->block()->insertBefore(ins, numFormals);
- // Remove the guard.
- ins->block()->discard(ins);
-}
+ auto* add = MAdd::New(alloc_, index, numFormals, TruncateKind::Truncate);
+ ins->block()->insertBefore(ins, add);
-void RestReplacer::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
- // Skip guards on other objects.
- if (ins->array() != rest_) {
- return;
+ index = add;
}
- // Replace the guard by its object.
- ins->replaceAllUsesWith(rest_);
+ auto* loadArg = MGetFrameArgument::New(alloc_, index);
+
+ ins->block()->insertBefore(ins, loadArg);
+ ins->replaceAllUsesWith(loadArg);
// Remove original instruction.
- ins->block()->discard(ins);
+ discardInstruction(ins, elements);
}
-void RestReplacer::visitUnbox(MUnbox* ins) {
- // Skip unrelated unboxes.
- if (ins->input() != rest_) {
- return;
- }
- MOZ_ASSERT(ins->type() == MIRType::Object);
+MDefinition* RestReplacer::restLength(MInstruction* ins) {
+ // Compute |Math.max(numActuals - numFormals, 0)| for the rest array length.
- // Replace the unbox with the array object.
- ins->replaceAllUsesWith(rest_);
-
- // Remove the unbox.
- ins->block()->discard(ins);
-}
-
-void RestReplacer::visitCompare(MCompare* ins) {
- // Skip unrelated comparisons.
- if (ins->lhs() != rest_ && ins->rhs() != rest_) {
- return;
- }
-
- bool folded;
- MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
-
- auto* cst = MConstant::NewBoolean(alloc(), folded);
- ins->block()->insertBefore(ins, cst);
-
- // Replace the comparison with a constant.
- ins->replaceAllUsesWith(cst);
-
- // Remove original instruction.
- ins->block()->discard(ins);
-}
-
-void RestReplacer::visitLoadElement(MLoadElement* ins) {
- // Skip other array objects.
- MDefinition* elements = ins->elements();
- if (!isRestElements(elements)) {
- return;
- }
-
- MDefinition* index = ins->index();
-
- // Adjust the index to skip any extra formals.
- if (uint32_t formals = rest()->numFormals()) {
- auto* numFormals = MConstant::NewInt32(alloc(), formals);
- ins->block()->insertBefore(ins, numFormals);
-
- auto* add = MAdd::New(alloc(), index, numFormals, TruncateKind::Truncate);
- ins->block()->insertBefore(ins, add);
-
- index = add;
- }
-
- auto* loadArg = MGetFrameArgument::New(alloc(), index);
-
- ins->block()->insertBefore(ins, loadArg);
- ins->replaceAllUsesWith(loadArg);
-
- // Remove original instruction.
- discardInstruction(ins, elements);
-}
-
-MDefinition* RestReplacer::restLength(MInstruction* ins) {
- // Compute |Math.max(numActuals - numFormals, 0)| for the rest array length.
-
- auto* numActuals = rest()->numActuals();
+ auto* numActuals = rest()->numActuals();
if (uint32_t formals = rest()->numFormals()) {
- auto* numFormals = MConstant::NewInt32(alloc(), formals);
+ auto* numFormals = MConstant::NewInt32(alloc_, formals);
ins->block()->insertBefore(ins, numFormals);
- auto* length = MSub::New(alloc(), numActuals, numFormals, MIRType::Int32);
+ auto* length = MSub::New(alloc_, numActuals, numFormals, MIRType::Int32);
length->setTruncateKind(TruncateKind::Truncate);
ins->block()->insertBefore(ins, length);
- auto* zero = MConstant::NewInt32(alloc(), 0);
+ auto* zero = MConstant::NewInt32(alloc_, 0);
ins->block()->insertBefore(ins, zero);
- auto* minmax = MMinMax::NewMax(alloc(), length, zero, MIRType::Int32);
+ auto* minmax = MMinMax::NewMax(alloc_, length, zero, MIRType::Int32);
ins->block()->insertBefore(ins, minmax);
return minmax;
@@ -2962,7 +2898,7 @@ void RestReplacer::visitLength(MInstruction* ins, MDefinition* elements) {
MOZ_ASSERT(ins->isArrayLength() || ins->isInitializedLength());
// Skip other array objects.
- if (!isRestElements(elements)) {
+ if (!isTargetElements(elements)) {
return;
}
@@ -2983,28 +2919,17 @@ void RestReplacer::visitInitializedLength(MInitializedLength* ins) {
visitLength(ins, ins->elements());
}
-void RestReplacer::visitGuardElementsArePacked(MGuardElementsArePacked* ins) {
- // Skip other array objects.
- MDefinition* elements = ins->elements();
- if (!isRestElements(elements)) {
- return;
- }
-
- // Remove original instruction.
- discardInstruction(ins, elements);
-}
-
void RestReplacer::visitApplyArray(MApplyArray* ins) {
// Skip other array objects.
MDefinition* elements = ins->getElements();
- if (!isRestElements(elements)) {
+ if (!isTargetElements(elements)) {
return;
}
auto* numActuals = restLength(ins);
auto* apply =
- MApplyArgs::New(alloc(), ins->getSingleTarget(), ins->getFunction(),
+ MApplyArgs::New(alloc_, ins->getSingleTarget(), ins->getFunction(),
numActuals, ins->getThis(), rest()->numFormals());
apply->setBailoutKind(ins->bailoutKind());
if (!ins->maybeCrossRealm()) {
@@ -3026,14 +2951,14 @@ void RestReplacer::visitApplyArray(MApplyArray* ins) {
void RestReplacer::visitConstructArray(MConstructArray* ins) {
// Skip other array objects.
MDefinition* elements = ins->getElements();
- if (!isRestElements(elements)) {
+ if (!isTargetElements(elements)) {
return;
}
auto* numActuals = restLength(ins);
auto* construct = MConstructArgs::New(
- alloc(), ins->getSingleTarget(), ins->getFunction(), numActuals,
+ alloc_, ins->getSingleTarget(), ins->getFunction(), numActuals,
ins->getThis(), ins->getNewTarget(), rest()->numFormals());
construct->setBailoutKind(ins->bailoutKind());
if (!ins->maybeCrossRealm()) {
@@ -3898,6 +3823,282 @@ WasmStructMemoryView::WasmStructMemoryView(TempAllocator& alloc,
state_(nullptr),
oom_(false) {}
+static inline bool IsOptimizableObjectKeysInstruction(MInstruction* ins) {
+ return ins->isObjectKeys();
+}
+
+class ObjectKeysReplacer : public GenericArrayReplacer {
+ private:
+ const MIRGenerator* mir_;
+ MIRGraph& graph_;
+ MObjectToIterator* objToIter_ = nullptr;
+
+ MObjectKeys* objectKeys() const { return arr_->toObjectKeys(); }
+
+ MDefinition* objectKeysLength(MInstruction* ins);
+ void visitLength(MInstruction* ins, MDefinition* elements);
+
+ void visitLoadElement(MLoadElement* ins);
+ void visitArrayLength(MArrayLength* ins);
+ void visitInitializedLength(MInitializedLength* ins);
+
+ bool escapes(MElements* ins);
+
+ public:
+ ObjectKeysReplacer(const MIRGenerator* mir, MIRGraph& graph,
+ MInstruction* arr)
+ : GenericArrayReplacer(graph.alloc(), arr), mir_(mir), graph_(graph) {
+ MOZ_ASSERT(IsOptimizableObjectKeysInstruction(arr_));
+ }
+
+ bool escapes(MInstruction* ins);
+ bool run(MInstructionIterator& outerIterator);
+ void assertSuccess();
+};
+
+// Returns false if the Object.keys array object does not escape.
+bool ObjectKeysReplacer::escapes(MInstruction* ins) {
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+
+ JitSpewDef(JitSpew_Escape, "Check Object.keys array\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ // Check all uses to see whether they can be supported without allocating an
+ // ArrayObject for the Object.keys parameter.
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ MNode* consumer = (*i)->consumer();
+
+ // If a resume point can observe this instruction, we can only optimize
+ // if it is recoverable.
+ if (consumer->isResumePoint()) {
+ if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
+ JitSpew(JitSpew_Escape,
+ "Observable Object.keys array cannot be recovered");
+ return true;
+ }
+ continue;
+ }
+
+ MDefinition* def = consumer->toDefinition();
+ switch (def->op()) {
+ case MDefinition::Opcode::Elements: {
+ auto* elem = def->toElements();
+ MOZ_ASSERT(elem->object() == ins);
+ if (escapes(elem)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardShape: {
+ const Shape* shape = objectKeys()->resultShape();
+ MOZ_DIAGNOSTIC_ASSERT(shape);
+ auto* guard = def->toGuardShape();
+ MOZ_DIAGNOSTIC_ASSERT(shape == guard->shape());
+ if (shape != guard->shape()) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def);
+ return true;
+ }
+ if (escapes(guard)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardToClass: {
+ auto* guard = def->toGuardToClass();
+ if (guard->getClass() != &ArrayObject::class_) {
+ JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", def);
+ return true;
+ }
+ if (escapes(guard)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::GuardArrayIsPacked: {
+ // Object.keys arrays are always packed as long as they aren't modified.
+ auto* guard = def->toGuardArrayIsPacked();
+ if (escapes(guard)) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ case MDefinition::Opcode::Unbox: {
+ if (def->type() != MIRType::Object) {
+ JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
+ return true;
+ }
+ if (escapes(def->toInstruction())) {
+ JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
+ return true;
+ }
+ break;
+ }
+
+ // This instruction is supported for |JSOp::OptimizeSpreadCall|.
+ case MDefinition::Opcode::Compare: {
+ bool canFold;
+ if (!def->toCompare()->tryFold(&canFold)) {
+ JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
+ return true;
+ }
+ break;
+ }
+
+ // This instruction is a no-op used to test that scalar replacement is
+ // working as expected.
+ case MDefinition::Opcode::AssertRecoveredOnBailout:
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Object.keys array object is not escaped");
+ return false;
+}
+
+bool ObjectKeysReplacer::escapes(MElements* ins) {
+ JitSpewDef(JitSpew_Escape, "Check Object.keys array elements\n", ins);
+ JitSpewIndent spewIndent(JitSpew_Escape);
+
+ for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
+ // The MIRType::Elements cannot be captured in a resume point as it does not
+ // represent a value allocation.
+ MDefinition* def = (*i)->consumer()->toDefinition();
+
+ switch (def->op()) {
+ case MDefinition::Opcode::LoadElement: {
+ MOZ_ASSERT(def->toLoadElement()->elements() == ins);
+ break;
+ }
+
+ case MDefinition::Opcode::ArrayLength:
+ MOZ_ASSERT(def->toArrayLength()->elements() == ins);
+ break;
+
+ case MDefinition::Opcode::InitializedLength:
+ MOZ_ASSERT(def->toInitializedLength()->elements() == ins);
+ break;
+
+ case MDefinition::Opcode::GuardElementsArePacked:
+ MOZ_ASSERT(def->toGuardElementsArePacked()->elements() == ins);
+ break;
+
+ default:
+ JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
+ return true;
+ }
+ }
+
+ JitSpew(JitSpew_Escape, "Object.keys array object is not escaped");
+ return false;
+}
+
+bool ObjectKeysReplacer::run(MInstructionIterator& outerIterator) {
+ MBasicBlock* startBlock = arr_->block();
+
+ objToIter_ = MObjectToIterator::New(alloc_, objectKeys()->object(), nullptr);
+ objToIter_->setSkipRegistration(true);
+ arr_->block()->insertBefore(arr_, objToIter_);
+
+ // Iterate over each basic block.
+ for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
+ block != graph_.rpoEnd(); block++) {
+ if (mir_->shouldCancel("Scalar replacement of Object.keys array object")) {
+ return false;
+ }
+
+ // Iterates over phis and instructions.
+ // We do not have to visit resume points. Any resume points that capture the
+ // Object.keys array object will be handled by the Sink pass.
+ for (MDefinitionIterator iter(*block); iter;) {
+ // Increment the iterator before visiting the instruction, as the visit
+ // function might discard itself from the basic block.
+ MDefinition* def = *iter++;
+ switch (def->op()) {
+#define MIR_OP(op) \
+ case MDefinition::Opcode::op: \
+ visit##op(def->to##op()); \
+ break;
+ MIR_OPCODE_LIST(MIR_OP)
+#undef MIR_OP
+ }
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+ }
+ }
+
+ assertSuccess();
+
+ auto* forRecovery = MObjectKeysFromIterator::New(alloc_, objToIter_);
+ arr_->block()->insertBefore(arr_, forRecovery);
+ forRecovery->stealResumePoint(arr_);
+ arr_->replaceAllUsesWith(forRecovery);
+
+ // We need to explicitly discard the instruction since it's marked as
+ // effectful and we stole its resume point, which will trip assertion
+ // failures later. We can't discard the instruction out from underneath
+ // the iterator though, and we can't do the trick where we increment the
+ // iterator at the top of the loop because we might discard the *next*
+ // instruction, so we do this goofiness.
+ outerIterator--;
+ arr_->block()->discard(arr_);
+
+ if (!graph_.alloc().ensureBallast()) {
+ return false;
+ }
+
+ return true;
+}
+
+void ObjectKeysReplacer::assertSuccess() {
+ MOZ_ASSERT(!arr_->hasLiveDefUses());
+}
+
+void ObjectKeysReplacer::visitLoadElement(MLoadElement* ins) {
+ if (!isTargetElements(ins->elements())) {
+ return;
+ }
+
+ auto* load = MLoadIteratorElement::New(alloc_, objToIter_, ins->index());
+ ins->block()->insertBefore(ins, load);
+
+ ins->replaceAllUsesWith(load);
+ discardInstruction(ins, ins->elements());
+}
+
+void ObjectKeysReplacer::visitLength(MInstruction* ins, MDefinition* elements) {
+ if (!isTargetElements(elements)) {
+ return;
+ }
+
+ auto* newLen = MIteratorLength::New(alloc_, objToIter_);
+ ins->block()->insertBefore(ins, newLen);
+
+ ins->replaceAllUsesWith(newLen);
+ discardInstruction(ins, elements);
+}
+
+void ObjectKeysReplacer::visitArrayLength(MArrayLength* ins) {
+ visitLength(ins, ins->elements());
+}
+
+void ObjectKeysReplacer::visitInitializedLength(MInitializedLength* ins) {
+ // The initialized length of an Object.keys array is equal to its length.
+ visitLength(ins, ins->elements());
+}
+
bool ScalarReplacement(const MIRGenerator* mir, MIRGraph& graph) {
JitSpew(JitSpew_Escape, "Begin (ScalarReplacement)");
@@ -3995,5 +4196,32 @@ bool ScalarReplacement(const MIRGenerator* mir, MIRGraph& graph) {
return true;
}
+bool ReplaceObjectKeys(const MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_Escape, "Begin (Object.Keys Replacement)");
+
+ for (ReversePostorderIterator block = graph.rpoBegin();
+ block != graph.rpoEnd(); block++) {
+ if (mir->shouldCancel("Object.Keys Replacement (main loop)")) {
+ return false;
+ }
+
+ for (MInstructionIterator ins = block->begin(); ins != block->end();
+ ins++) {
+ if (IsOptimizableObjectKeysInstruction(*ins)) {
+ ObjectKeysReplacer replacer(mir, graph, *ins);
+ if (replacer.escapes(*ins)) {
+ continue;
+ }
+ if (!replacer.run(ins)) {
+ return false;
+ }
+ continue;
+ }
+ }
+ }
+
+ return true;
+}
+
} /* namespace jit */
} /* namespace js */
diff --git a/js/src/jit/ScalarReplacement.h b/js/src/jit/ScalarReplacement.h
@@ -15,6 +15,7 @@ class MIRGenerator;
class MIRGraph;
[[nodiscard]] bool ScalarReplacement(const MIRGenerator* mir, MIRGraph& graph);
+[[nodiscard]] bool ReplaceObjectKeys(const MIRGenerator* mir, MIRGraph& graph);
} // namespace jit
} // namespace js
diff --git a/js/src/jit/VMFunctionList-inl.h b/js/src/jit/VMFunctionList-inl.h
@@ -205,7 +205,10 @@ namespace jit {
_(GetImportOperation, js::GetImportOperation) \
_(GetIntrinsicValue, js::jit::GetIntrinsicValue) \
_(GetIterator, js::GetIterator) \
+ _(GetIteratorUnregistered, js::GetIteratorUnregistered) \
_(GetIteratorWithIndices, js::GetIteratorWithIndices) \
+ _(GetIteratorWithIndicesUnregistered, \
+ js::GetIteratorWithIndicesUnregistered) \
_(GetNonSyntacticGlobalThis, js::GetNonSyntacticGlobalThis) \
_(GetOrCreateModuleMetaObject, js::GetOrCreateModuleMetaObject) \
_(GetPendingExceptionStack, js::GetPendingExceptionStack) \
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
@@ -1492,6 +1492,27 @@ JSObject* ObjectKeys(JSContext* cx, HandleObject obj) {
return argv[0].toObjectOrNull();
}
+JSObject* ObjectKeysFromIterator(JSContext* cx, HandleObject iterObj) {
+ MOZ_RELEASE_ASSERT(iterObj->is<PropertyIteratorObject>());
+ NativeIterator* iter =
+ iterObj->as<PropertyIteratorObject>().getNativeIterator();
+
+ size_t length = iter->ownPropertyCount();
+ Rooted<ArrayObject*> array(cx, NewDenseFullyAllocatedArray(cx, length));
+ if (!array) {
+ return nullptr;
+ }
+
+ array->ensureDenseInitializedLength(0, length);
+
+ for (size_t i = 0; i < length; ++i) {
+ array->initDenseElement(
+ i, StringValue((iter->propertiesBegin() + i)->asString()));
+ }
+
+ return array;
+}
+
bool ObjectKeysLength(JSContext* cx, HandleObject obj, int32_t* length) {
MOZ_ASSERT(!obj->is<ProxyObject>());
return js::obj_keys_length(cx, obj, *length);
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
@@ -521,6 +521,7 @@ void JitWasmAnyRefPreWriteBarrier(JSRuntime* rt, wasm::AnyRef* refp);
bool ObjectIsCallable(JSObject* obj);
bool ObjectIsConstructor(JSObject* obj);
JSObject* ObjectKeys(JSContext* cx, HandleObject obj);
+JSObject* ObjectKeysFromIterator(JSContext* cx, HandleObject iterObj);
bool ObjectKeysLength(JSContext* cx, HandleObject obj, int32_t* length);
[[nodiscard]] bool ThrowRuntimeLexicalError(JSContext* cx,
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -4328,10 +4328,12 @@ bool WarpCacheIRTranspiler::emitArrayJoinResult(ObjOperandId objId,
return resumeAfter(join);
}
-bool WarpCacheIRTranspiler::emitObjectKeysResult(ObjOperandId objId) {
+bool WarpCacheIRTranspiler::emitObjectKeysResult(ObjOperandId objId,
+ uint32_t resultShapeOffset) {
MDefinition* obj = getOperand(objId);
+ Shape* resultShape = shapeStubField(resultShapeOffset);
- auto* join = MObjectKeys::New(alloc(), obj);
+ auto* join = MObjectKeys::New(alloc(), obj, resultShape);
addEffectful(join);
pushResult(join);
diff --git a/js/src/shell/fuzz-flags.txt b/js/src/shell/fuzz-flags.txt
@@ -60,6 +60,8 @@
--monomorphic-inlining=always
--monomorphic-inlining=never
--disable-main-thread-denormals
+--object-keys-scalar-replacement=off
+--object-keys-scalar-replacement=on
# GC-related
# These 2 flags can cause the shell to slow down
diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp
@@ -13025,6 +13025,9 @@ bool InitOptionParser(OptionParser& op) {
!op.addBoolOption('\0', "disable-main-thread-denormals",
"Disable Denormals on the main thread only, to "
"emulate WebAudio worklets.") ||
+ !op.addStringOption('\0', "object-keys-scalar-replacement", "on/off",
+ "Replace Object.keys with a NativeIterators "
+ "(default: on)") ||
!op.addBoolOption('\0', "baseline",
"Enable baseline compiler (default)") ||
!op.addBoolOption('\0', "no-baseline", "Disable baseline compiler") ||
@@ -13989,6 +13992,16 @@ bool SetContextJITOptions(JSContext* cx, const OptionParser& op) {
#endif
}
+ if (const char* str = op.getStringOption("object-keys-scalar-replacement")) {
+ if (strcmp(str, "on") == 0) {
+ jit::JitOptions.disableObjectKeysScalarReplacement = false;
+ } else if (strcmp(str, "off") == 0) {
+ jit::JitOptions.disableObjectKeysScalarReplacement = true;
+ } else {
+ return OptionFailure("object-keys-scalar-replacement", str);
+ }
+ }
+
int32_t warmUpThreshold = op.getIntOption("ion-warmup-threshold");
if (warmUpThreshold >= 0) {
jit::JitOptions.setNormalIonWarmUpThreshold(warmUpThreshold);
diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
@@ -735,8 +735,9 @@ JS_PUBLIC_API bool js::GetPropertyKeys(JSContext* cx, HandleObject obj,
return enumerator.snapshot(cx);
}
-static inline void RegisterEnumerator(JSContext* cx, NativeIterator* ni) {
- MOZ_ASSERT(ni->objectBeingIterated());
+static inline void RegisterEnumerator(JSContext* cx, NativeIterator* ni,
+ HandleObject obj) {
+ ni->initObjectBeingIterated(*obj);
// Register non-escaping native enumerators (for-in) with the current
// context.
@@ -853,7 +854,7 @@ NativeIterator::NativeIterator(JSContext* cx,
HandleIdVector props, bool supportsIndices,
PropertyIndexVector* indices, uint32_t numShapes,
uint32_t ownPropertyCount, bool* hadError)
- : objectBeingIterated_(objBeingIterated),
+ : objectBeingIterated_(nullptr),
iterObj_(propIter),
objShape_(numShapes > 0 ? objBeingIterated->shape() : nullptr),
// This holds the allocated property count until we're done with
@@ -1009,7 +1010,8 @@ static bool CanStoreInIteratorCache(JSObject* obj) {
}
static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInShapeIteratorCache(
- JSContext* cx, JSObject* obj, uint32_t* cacheableProtoChainLength) {
+ JSContext* cx, JSObject* obj, uint32_t* cacheableProtoChainLength,
+ bool exclusive) {
if (!obj->shape()->cache().isIterator() ||
!CanCompareIterableObjectToCache(obj)) {
return nullptr;
@@ -1017,7 +1019,7 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInShapeIteratorCache(
PropertyIteratorObject* iterobj = obj->shape()->cache().toIterator();
NativeIterator* ni = iterobj->getNativeIterator();
MOZ_ASSERT(ni->objShape() == obj->shape());
- if (!ni->isReusable()) {
+ if (exclusive && !ni->isReusable()) {
return nullptr;
}
@@ -1040,11 +1042,12 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInShapeIteratorCache(
}
static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInIteratorCache(
- JSContext* cx, JSObject* obj, uint32_t* cacheableProtoChainLength) {
+ JSContext* cx, JSObject* obj, uint32_t* cacheableProtoChainLength,
+ bool exclusive) {
MOZ_ASSERT(*cacheableProtoChainLength == 0);
- if (PropertyIteratorObject* shapeCached =
- LookupInShapeIteratorCache(cx, obj, cacheableProtoChainLength)) {
+ if (PropertyIteratorObject* shapeCached = LookupInShapeIteratorCache(
+ cx, obj, cacheableProtoChainLength, exclusive)) {
return shapeCached;
}
@@ -1082,7 +1085,7 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInIteratorCache(
MOZ_ASSERT(iterobj->compartment() == cx->compartment());
NativeIterator* ni = iterobj->getNativeIterator();
- if (!ni->isReusable()) {
+ if (exclusive && !ni->isReusable()) {
return nullptr;
}
@@ -1185,7 +1188,7 @@ static bool IndicesAreValid(NativeObject* obj, NativeIterator* ni) {
}
#endif
-template <bool WantIndices>
+template <bool WantIndices, bool SkipRegistration>
static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
HandleObject obj) {
MOZ_ASSERT(!obj->is<PropertyIteratorObject>());
@@ -1193,15 +1196,16 @@ static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
"We may end up allocating shapes in the wrong zone!");
uint32_t cacheableProtoChainLength = 0;
- if (PropertyIteratorObject* iterobj =
- LookupInIteratorCache(cx, obj, &cacheableProtoChainLength)) {
+ if (PropertyIteratorObject* iterobj = LookupInIteratorCache(
+ cx, obj, &cacheableProtoChainLength, !SkipRegistration)) {
NativeIterator* ni = iterobj->getNativeIterator();
bool recreateWithIndices = WantIndices && ni->indicesSupported();
if (!recreateWithIndices) {
MOZ_ASSERT_IF(WantIndices && ni->indicesAvailable(),
IndicesAreValid(&obj->as<NativeObject>(), ni));
- ni->initObjectBeingIterated(*obj);
- RegisterEnumerator(cx, ni);
+ if (!SkipRegistration) {
+ RegisterEnumerator(cx, ni, obj);
+ }
return iterobj;
}
}
@@ -1244,9 +1248,11 @@ static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
//
// In debug builds, AssertDenseElementsNotIterated is used to check the flag
// is set correctly.
- if (obj->is<NativeObject>() &&
- obj->as<NativeObject>().getDenseInitializedLength() > 0) {
- obj->as<NativeObject>().markDenseElementsMaybeInIteration();
+ if (!SkipRegistration) {
+ if (obj->is<NativeObject>() &&
+ obj->as<NativeObject>().getDenseInitializedLength() > 0) {
+ obj->as<NativeObject>().markDenseElementsMaybeInIteration();
+ }
}
PropertyIndexVector* indicesPtr =
@@ -1257,7 +1263,9 @@ static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
if (!iterobj) {
return nullptr;
}
- RegisterEnumerator(cx, iterobj->getNativeIterator());
+ if (!SkipRegistration) {
+ RegisterEnumerator(cx, iterobj->getNativeIterator(), obj);
+ }
cx->check(iterobj);
MOZ_ASSERT_IF(
@@ -1283,24 +1291,34 @@ static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
}
PropertyIteratorObject* js::GetIterator(JSContext* cx, HandleObject obj) {
- return GetIteratorImpl<false>(cx, obj);
+ return GetIteratorImpl<false, false>(cx, obj);
}
PropertyIteratorObject* js::GetIteratorWithIndices(JSContext* cx,
HandleObject obj) {
- return GetIteratorImpl<true>(cx, obj);
+ return GetIteratorImpl<true, false>(cx, obj);
+}
+
+PropertyIteratorObject* js::GetIteratorUnregistered(JSContext* cx,
+ HandleObject obj) {
+ return GetIteratorImpl<false, true>(cx, obj);
+}
+
+PropertyIteratorObject* js::GetIteratorWithIndicesUnregistered(
+ JSContext* cx, HandleObject obj) {
+ return GetIteratorImpl<true, true>(cx, obj);
}
PropertyIteratorObject* js::LookupInIteratorCache(JSContext* cx,
HandleObject obj) {
uint32_t dummy = 0;
- return LookupInIteratorCache(cx, obj, &dummy);
+ return LookupInIteratorCache(cx, obj, &dummy, true);
}
PropertyIteratorObject* js::LookupInShapeIteratorCache(JSContext* cx,
HandleObject obj) {
uint32_t dummy = 0;
- return LookupInShapeIteratorCache(cx, obj, &dummy);
+ return LookupInShapeIteratorCache(cx, obj, &dummy, true);
}
// ES 2017 draft 7.4.7.
diff --git a/js/src/vm/Iteration.h b/js/src/vm/Iteration.h
@@ -708,6 +708,11 @@ PropertyIteratorObject* LookupInShapeIteratorCache(JSContext* cx,
PropertyIteratorObject* GetIterator(JSContext* cx, HandleObject obj);
PropertyIteratorObject* GetIteratorWithIndices(JSContext* cx, HandleObject obj);
+PropertyIteratorObject* GetIteratorUnregistered(JSContext* cx,
+ HandleObject obj);
+PropertyIteratorObject* GetIteratorWithIndicesUnregistered(JSContext* cx,
+ HandleObject obj);
+
PropertyIteratorObject* ValueToIterator(JSContext* cx, HandleValue vp);
void CloseIterator(JSObject* obj);