commit ae8d1d79ec312f4e03bef28921041e1c0b72b0b7
parent 7e570d84ddd935650b6402c20269d9cd228b4cc4
Author: alexical <dothayer@mozilla.com>
Date: Wed, 12 Nov 2025 01:41:21 +0000
Bug 1992813 - Slim down NativeIterator r=iain
Differential Revision: https://phabricator.services.mozilla.com/D267641
Diffstat:
15 files changed, 311 insertions(+), 327 deletions(-)
diff --git a/js/src/jit-test/tests/basic/bug1884706.js b/js/src/jit-test/tests/basic/bug1884706.js
@@ -1,5 +0,0 @@
-const arr = new Int32Array(1 << 26);
-try {
- for (const key in arr) {
- }
-} catch {}
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
@@ -6309,7 +6309,7 @@ void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
#endif
// Mark iterator as active.
- Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
+ Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlags());
masm.storePtr(objBeingIterated, iterObjAddr);
masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
@@ -18358,18 +18358,18 @@ void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
nativeIter);
+ Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlags());
if (lir->mir()->wantsIndices()) {
// At least one consumer of the output of this iterator has been optimized
// to use iterator indices. If the cached iterator doesn't include indices,
// but it was marked to indicate that we can create them if needed, then we
// do a VM call to replace the cached iterator with a fresh iterator
// including indices.
- masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
- NativeIteratorIndices::AvailableOnRequest,
- ool->entry());
+ masm.branchTest32(Assembler::NonZero, iterFlagsAddr,
+ Imm32(NativeIterator::Flags::IndicesSupported),
+ ool->entry());
}
- Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
masm.storePtr(
obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
@@ -18420,13 +18420,14 @@ void CodeGenerator::visitIteratorHasIndicesAndBranch(
Address nativeIterAddr(iterator,
PropertyIteratorObject::offsetOfIteratorSlot());
masm.loadPrivate(nativeIterAddr, temp);
- masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
- NativeIteratorIndices::Valid, ifFalse);
+ masm.branchTest32(Assembler::Zero,
+ Address(temp, NativeIterator::offsetOfFlags()),
+ Imm32(NativeIterator::Flags::IndicesAvailable), ifFalse);
// Guard that the first shape stored in the iterator matches the current
// shape of the iterated object.
- Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
- masm.loadPtr(firstShapeAddr, temp);
+ Address objShapeAddr(temp, NativeIterator::offsetOfObjectShape());
+ masm.loadPtr(objShapeAddr, temp);
masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
ifFalse);
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
@@ -3243,24 +3243,33 @@ void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator,
PropertyIteratorObject::offsetOfIteratorSlot());
loadPrivate(nativeIterAddr, outIndex);
- // Compute offset of propertyCursor_ from propertiesBegin()
- loadPtr(Address(outIndex, NativeIterator::offsetOfPropertyCursor()), outKind);
- subPtr(Address(outIndex, NativeIterator::offsetOfShapesEnd()), outKind);
-
- // Compute offset of current index from indicesBegin(). Note that because
- // propertyCursor has already been incremented, this is actually the offset
- // of the next index. We adjust accordingly below.
- size_t indexAdjustment =
- sizeof(GCPtr<JSLinearString*>) / sizeof(PropertyIndex);
- if (indexAdjustment != 1) {
- MOZ_ASSERT(indexAdjustment == 2);
- rshift32(Imm32(1), outKind);
- }
-
- // Load current index.
- loadPtr(Address(outIndex, NativeIterator::offsetOfPropertiesEnd()), outIndex);
- load32(BaseIndex(outIndex, outKind, Scale::TimesOne,
- -int32_t(sizeof(PropertyIndex))),
+ // Load the property count into outKind.
+ load32(Address(outIndex, NativeIterator::offsetOfPropertyCount()), outKind);
+
+ // We need two bits of wiggle room in a u32 here for the logic below.
+ static_assert(NativeIterator::PropCountLimit <= 1 << 30);
+
+ // Shift up the property count on 64 bit. Ultimately we want
+ // sizeof(IteratorProperty) * count + sizeof(PropertyIndex) * cursor.
+ // If we shift up our count to be on the same scale as cursor right now,
+ // we can do this all with one register.
+ static_assert(sizeof(IteratorProperty) == sizeof(PropertyIndex) ||
+ sizeof(IteratorProperty) == sizeof(PropertyIndex) * 2);
+ if constexpr (sizeof(IteratorProperty) > sizeof(PropertyIndex)) {
+ lshift32(Imm32(1), outKind);
+ }
+
+ // Add the current cursor. This is a uint32_t which has already been
+ // incremented in iteration to index the *next* property, so we'll want to
+ // keep that in mind in our final address calculation.
+ add32(Address(outIndex, NativeIterator::offsetOfPropertyCursor()), outKind);
+
+ // outKind holds the offset in u32's to our PropertyIndex, so just multiply
+ // by four, add it to the offset of the first property, and subtract a
+ // PropertyIndex since we know we already incremented.
+ load32(BaseIndex(outIndex, outKind, Scale::TimesFour,
+ NativeIterator::offsetOfFirstProperty() -
+ int32_t(sizeof(PropertyIndex))),
outIndex);
// Extract kind.
@@ -9366,7 +9375,7 @@ void MacroAssembler::branchIfResizableArrayBufferViewInBounds(Register obj,
void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni,
Label* notReusable) {
// See NativeIterator::isReusable.
- Address flagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
+ Address flagsAddr(ni, NativeIterator::offsetOfFlags());
#ifdef DEBUG
Label niIsInitialized;
@@ -9382,17 +9391,6 @@ void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni,
Imm32(NativeIterator::Flags::NotReusable), notReusable);
}
-void MacroAssembler::branchNativeIteratorIndices(Condition cond, Register ni,
- Register temp,
- NativeIteratorIndices kind,
- Label* label) {
- Address iterFlagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
- load32(iterFlagsAddr, temp);
- and32(Imm32(NativeIterator::IndicesMask), temp);
- uint32_t shiftedKind = uint32_t(kind) << NativeIterator::IndicesShift;
- branch32(cond, temp, Imm32(shiftedKind), label);
-}
-
static void LoadNativeIterator(MacroAssembler& masm, Register obj,
Register dest) {
MOZ_ASSERT(obj != dest);
@@ -9461,16 +9459,25 @@ void MacroAssembler::maybeLoadIteratorFromShape(Register obj, Register dest,
LoadNativeIterator(*this, dest, nativeIterator);
branchIfNativeIteratorNotReusable(nativeIterator, failure);
+ Label skipIndices;
+ load32(Address(nativeIterator, NativeIterator::offsetOfPropertyCount()),
+ temp3);
+ branchTest32(Assembler::Zero,
+ Address(nativeIterator, NativeIterator::offsetOfFlags()),
+ Imm32(NativeIterator::Flags::IndicesAllocated), &skipIndices);
+
+ computeEffectiveAddress(BaseIndex(nativeIterator, temp3, Scale::TimesFour),
+ nativeIterator);
+
+ bind(&skipIndices);
+ computeEffectiveAddress(BaseIndex(nativeIterator, temp3, ScalePointer,
+ NativeIterator::offsetOfFirstProperty()),
+ nativeIterator);
+
+ Register expectedProtoShape = nativeIterator;
+
// We have to compare the shapes in the native iterator with the shapes on the
- // proto chain to ensure the cached iterator is still valid. The shape array
- // always starts at a fixed offset from the base of the NativeIterator, so
- // instead of using an instruction outside the loop to initialize a pointer to
- // the shapes array, we can bake it into the offset and reuse the pointer to
- // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
- // (The first shape corresponds to the object itself. We don't have to check
- // it, because we got the iterator via the shape.)
- size_t nativeIteratorProtoShapeOffset =
- NativeIterator::offsetOfFirstShape() + sizeof(Shape*);
+ // proto chain to ensure the cached iterator is still valid.
// Loop over the proto chain. At the head of the loop, |shape| is the shape of
// the current object, and |iteratorShapes| points to the expected shape of
@@ -9497,11 +9504,11 @@ void MacroAssembler::maybeLoadIteratorFromShape(Register obj, Register dest,
// Compare the shape of the proto to the expected shape.
loadPtr(Address(shapeAndProto, JSObject::offsetOfShape()), shapeAndProto);
- loadPtr(Address(nativeIterator, nativeIteratorProtoShapeOffset), temp3);
+ loadPtr(Address(expectedProtoShape, 0), temp3);
branchPtr(Assembler::NotEqual, shapeAndProto, temp3, failure);
// Increment |iteratorShapes| and jump back to the top of the loop.
- addPtr(Imm32(sizeof(Shape*)), nativeIterator);
+ addPtr(Imm32(sizeof(Shape*)), expectedProtoShape);
jump(&protoLoop);
#ifdef DEBUG
@@ -9523,15 +9530,17 @@ void MacroAssembler::iteratorMore(Register obj, ValueOperand output,
Label iterDone, restart;
bind(&restart);
Address cursorAddr(outputScratch, NativeIterator::offsetOfPropertyCursor());
- Address cursorEndAddr(outputScratch, NativeIterator::offsetOfPropertiesEnd());
- loadPtr(cursorAddr, temp);
- branchPtr(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
+ Address cursorEndAddr(outputScratch, NativeIterator::offsetOfPropertyCount());
+ load32(cursorAddr, temp);
+ branch32(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
// Get next string.
- loadPtr(Address(temp, 0), temp);
+ BaseIndex propAddr(outputScratch, temp, ScalePointer,
+ NativeIterator::offsetOfFirstProperty());
+ loadPtr(propAddr, temp);
// Increase the cursor.
- addPtr(Imm32(sizeof(IteratorProperty)), cursorAddr);
+ addPtr(Imm32(1), cursorAddr);
// Check if the property has been deleted while iterating. Skip it if so.
branchTestPtr(Assembler::NonZero, temp,
@@ -9550,7 +9559,7 @@ void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
Register temp3) {
LoadNativeIterator(*this, obj, temp1);
- Address flagsAddr(temp1, NativeIterator::offsetOfFlagsAndCount());
+ Address flagsAddr(temp1, NativeIterator::offsetOfFlags());
// The shared iterator used for for-in with null/undefined is immutable and
// unlinked. See NativeIterator::isEmptyIteratorSingleton.
@@ -9564,8 +9573,7 @@ void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
storePtr(ImmPtr(nullptr), iterObjAddr);
// Reset property cursor.
- loadPtr(Address(temp1, NativeIterator::offsetOfShapesEnd()), temp2);
- storePtr(temp2, Address(temp1, NativeIterator::offsetOfPropertyCursor()));
+ store32(Imm32(0), Address(temp1, NativeIterator::offsetOfPropertyCursor()));
// Clear deleted bits (only if we have unvisited deletions)
Label clearDeletedLoopStart, clearDeletedLoopEnd;
@@ -9573,7 +9581,13 @@ void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
Imm32(NativeIterator::Flags::HasUnvisitedPropertyDeletion),
&clearDeletedLoopEnd);
- loadPtr(Address(temp1, NativeIterator::offsetOfPropertiesEnd()), temp3);
+ load32(Address(temp1, NativeIterator::offsetOfPropertyCount()), temp3);
+
+ computeEffectiveAddress(BaseIndex(temp1, temp3, ScalePointer,
+ NativeIterator::offsetOfFirstProperty()),
+ temp3);
+ computeEffectiveAddress(
+ Address(temp1, NativeIterator::offsetOfFirstProperty()), temp2);
bind(&clearDeletedLoopStart);
and32(Imm32(~uint32_t(IteratorProperty::DeletedBit)), Address(temp2, 0));
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
@@ -221,8 +221,6 @@ namespace js {
class StaticStrings;
class FixedLengthTypedArrayObject;
-enum class NativeIteratorIndices : uint32_t;
-
namespace wasm {
class CalleeDesc;
class CallSiteDesc;
@@ -1114,6 +1112,7 @@ class MacroAssembler : public MacroAssemblerSpecific {
// explicitly requested. Instead use branch(Add|Sub|Mul|Neg) to test for
// condition flags after performing arithmetic operations.
+ inline void add32(const Address& src, Register dest) PER_SHARED_ARCH;
inline void add32(Register src, Register dest) PER_SHARED_ARCH;
inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
inline void add32(Imm32 imm, Register src, Register dest) PER_SHARED_ARCH;
@@ -5396,8 +5395,6 @@ class MacroAssembler : public MacroAssemblerSpecific {
Label* label);
void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
- void branchNativeIteratorIndices(Condition cond, Register ni, Register temp,
- NativeIteratorIndices kind, Label* label);
void maybeLoadIteratorFromShape(Register obj, Register dest, Register temp,
Register temp2, Register temp3,
diff --git a/js/src/jit/arm/MacroAssembler-arm-inl.h b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -327,6 +327,14 @@ void MacroAssembler::add32(Imm32 imm, const Address& dest) {
ma_str(scratch, dest, scratch2);
}
+void MacroAssembler::add32(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ SecondScratchRegisterScope scratch2(*this);
+
+ ma_ldr(src, scratch, scratch2);
+ ma_add(scratch, dest, SetCC);
+}
+
void MacroAssembler::addPtr(Register src, Register dest) { ma_add(src, dest); }
void MacroAssembler::addPtr(Imm32 imm, Register dest) {
diff --git a/js/src/jit/arm64/MacroAssembler-arm64-inl.h b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -314,6 +314,14 @@ void MacroAssembler::add32(Imm32 imm, const Address& dest) {
Str(scratch32, toMemOperand(dest));
}
+void MacroAssembler::add32(const Address& src, Register dest) {
+ vixl::UseScratchRegisterScope temps(this);
+ const ARMRegister scratch32 = temps.AcquireW();
+ MOZ_ASSERT(scratch32.asUnsized() != src.base);
+ load32(src, scratch32.asUnsized());
+ Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
+}
+
void MacroAssembler::addPtr(Register src, Register dest) {
addPtr(src, dest, dest);
}
diff --git a/js/src/jit/loong64/MacroAssembler-loong64-inl.h b/js/src/jit/loong64/MacroAssembler-loong64-inl.h
@@ -331,6 +331,13 @@ void MacroAssembler::add32(Imm32 imm, const Address& dest) {
store32(scratch, dest);
}
+void MacroAssembler::add32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(asMasm());
+ Register scratch = temps.Acquire();
+ load32(src, scratch);
+ as_add_w(dest, dest, scratch);
+}
+
void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
UseScratchRegisterScope temps(asMasm());
Register scratch = temps.Acquire();
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -158,6 +158,13 @@ void MacroAssembler::add32(Imm32 imm, const Address& dest) {
store32(scratch2, dest);
}
+void MacroAssembler::add32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(*this);
+ Register scratch2 = temps.Acquire();
+ load32(src, scratch2);
+ as_addu(dest, dest, scratch2);
+}
+
void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
UseScratchRegisterScope temps(*this);
Register scratch = temps.Acquire();
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h b/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h
@@ -294,6 +294,14 @@ void MacroAssembler::add32(Imm32 imm, const Address& dest) {
ma_add32(scratch2, scratch2, imm);
store32(scratch2, dest);
}
+
+void MacroAssembler::add32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ load32(src, scratch);
+ ma_add32(dest, dest, scratch);
+}
+
void MacroAssembler::add64(Register64 src, Register64 dest) {
addPtr(src.reg, dest.reg);
}
diff --git a/js/src/jit/wasm32/MacroAssembler-wasm32-inl.h b/js/src/jit/wasm32/MacroAssembler-wasm32-inl.h
@@ -288,6 +288,8 @@ void MacroAssembler::add32(Imm32 imm, Register src, Register dest) {
void MacroAssembler::add32(Imm32 imm, const Address& dest) { MOZ_CRASH(); }
+void MacroAssembler::add32(const Address& src, Register dest) { MOZ_CRASH(); }
+
void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
MOZ_CRASH();
}
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -199,6 +199,10 @@ void MacroAssembler::byteSwap32(Register reg) { bswapl(reg); }
// ===============================================================
// Arithmetic instructions
+void MacroAssembler::add32(const Address& src, Register dest) {
+ addl(Operand(src), dest);
+}
+
void MacroAssembler::add32(Register src, Register dest) { addl(src, dest); }
void MacroAssembler::add32(Imm32 imm, Register dest) { addl(imm, dest); }
diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
@@ -61,28 +61,18 @@ static const gc::AllocKind ITERATOR_FINALIZE_KIND =
// into this code.
void NativeIterator::trace(JSTracer* trc) {
TraceNullableEdge(trc, &objectBeingIterated_, "objectBeingIterated_");
- TraceNullableEdge(trc, &iterObj_, "iterObj");
+ TraceNullableEdge(trc, &iterObj_, "iterObj_");
+ TraceNullableEdge(trc, &objShape_, "objShape_");
// The limits below are correct at every instant of |NativeIterator|
// initialization, with the end-pointer incremented as each new shape is
// created, so they're safe to use here.
- std::for_each(shapesBegin(), shapesEnd(), [trc](GCPtr<Shape*>& shape) {
- TraceEdge(trc, &shape, "iterator_shape");
- });
-
- // But as properties must be created *before* shapes, |propertiesBegin()|
- // that depends on |shapesEnd()| having its final value can't safely be
- // used. Until this is fully initialized, use |propertyCursor_| instead,
- // which points at the start of properties even in partially initialized
- // |NativeIterator|s. (|propertiesEnd()| is safe at all times with respect
- // to the properly-chosen beginning.)
- //
- // Note that we must trace all properties (not just those not yet visited,
- // or just visited, due to |NativeIterator::previousPropertyWas|) for
- // |NativeIterator|s to be reusable.
- IteratorProperty* begin =
- MOZ_LIKELY(isInitialized()) ? propertiesBegin() : propertyCursor_;
- std::for_each(begin, propertiesEnd(),
+ std::for_each(protoShapesBegin(allocatedPropertyCount()), protoShapesEnd(),
+ [trc](GCPtr<Shape*>& shape) {
+ TraceEdge(trc, &shape, "iterator_proto_shape");
+ });
+
+ std::for_each(propertiesBegin(), propertiesEnd(),
[trc](IteratorProperty& prop) { prop.traceString(trc); });
}
@@ -110,7 +100,7 @@ class PropertyEnumerator {
// Every property that has been enumerated so far can be represented as a
// PropertyIndex, but we are not currently producing a list of indices. If
// the state is Valid when we are done enumerating, then the resulting
- // iterator can be marked as NativeIteratorIndices::AvailableOnRequest.
+ // iterator can be marked with NativeIterator::Flags::IndicesSupported.
Valid,
// Every property that has been enumerated so far can be represented as a
@@ -771,23 +761,23 @@ static PropertyIteratorObject* NewPropertyIteratorObject(JSContext* cx) {
return res;
}
-static inline size_t NumTrailingBytes(size_t propertyCount, size_t shapeCount,
- bool hasIndices) {
+static inline size_t NumTrailingBytes(size_t propertyCount,
+ size_t protoShapeCount, bool hasIndices) {
static_assert(alignof(IteratorProperty) <= alignof(NativeIterator));
static_assert(alignof(GCPtr<Shape*>) <= alignof(IteratorProperty));
static_assert(alignof(PropertyIndex) <= alignof(GCPtr<Shape*>));
size_t result = propertyCount * sizeof(IteratorProperty) +
- shapeCount * sizeof(GCPtr<Shape*>);
+ protoShapeCount * sizeof(GCPtr<Shape*>);
if (hasIndices) {
result += propertyCount * sizeof(PropertyIndex);
}
return result;
}
-static inline size_t AllocationSize(size_t propertyCount, size_t shapeCount,
- bool hasIndices) {
+static inline size_t AllocationSize(size_t propertyCount,
+ size_t protoShapeCount, bool hasIndices) {
return sizeof(NativeIterator) +
- NumTrailingBytes(propertyCount, shapeCount, hasIndices);
+ NumTrailingBytes(propertyCount, protoShapeCount, hasIndices);
}
static PropertyIteratorObject* CreatePropertyIterator(
@@ -810,6 +800,11 @@ static PropertyIteratorObject* CreatePropertyIterator(
if (numShapes == 0 && hasIndices) {
numShapes = 1;
}
+ if (numShapes > NativeIterator::ShapeCountLimit) {
+ ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+ uint32_t numProtoShapes = numShapes > 0 ? numShapes - 1 : 0;
Rooted<PropertyIteratorObject*> propIter(cx, NewPropertyIteratorObject(cx));
if (!propIter) {
@@ -817,7 +812,7 @@ static PropertyIteratorObject* CreatePropertyIterator(
}
void* mem = cx->pod_malloc_with_extra<NativeIterator, uint8_t>(
- NumTrailingBytes(props.length(), numShapes, hasIndices));
+ NumTrailingBytes(props.length(), numProtoShapes, hasIndices));
if (!mem) {
return nullptr;
}
@@ -852,17 +847,11 @@ NativeIterator::NativeIterator(JSContext* cx,
bool* hadError)
: objectBeingIterated_(objBeingIterated),
iterObj_(propIter),
- // NativeIterator initially acts (before full initialization) as if it
- // contains no shapes...
- shapesEnd_(shapesBegin()),
- // ...and no properties.
- propertyCursor_(
- reinterpret_cast<IteratorProperty*>(shapesBegin() + numShapes)),
- propertiesEnd_(propertyCursor_),
- shapesHash_(0),
- flagsAndCount_(
- initialFlagsAndCount(props.length())) // note: no Flags::Initialized
-{
+ objShape_(numShapes > 0 ? objBeingIterated->shape() : nullptr),
+ // This holds the allocated property count until we're done with
+ // initialization
+ propertyCursor_(props.length()),
+ shapesHash_(0) {
// If there are shapes, the object and all objects on its prototype chain must
// be native objects. See CanCompareIterableObjectToCache.
MOZ_ASSERT_IF(numShapes > 0,
@@ -873,6 +862,12 @@ NativeIterator::NativeIterator(JSContext* cx,
bool hasActualIndices = !!indices;
MOZ_ASSERT_IF(hasActualIndices, indices->length() == props.length());
+ if (hasActualIndices) {
+ flags_ |= Flags::IndicesAllocated;
+ } else if (supportsIndices) {
+ flags_ |= Flags::IndicesSupported;
+ }
+
// NOTE: This must be done first thing: The caller can't free `this` on error
// because it has GCPtr fields whose barriers have already fired; the
// store buffer has pointers to them. Only the GC can free `this` (via
@@ -885,22 +880,9 @@ NativeIterator::NativeIterator(JSContext* cx,
// shapes, and ensuring that indicesState_.allocated() is true if we've
// allocated space for indices. It's OK for the constructor to fail after
// that.
- size_t nbytes = AllocationSize(props.length(), numShapes, hasActualIndices);
+ size_t nbytes = AllocationSize(
+ props.length(), numShapes > 0 ? numShapes - 1 : 0, hasActualIndices);
AddCellMemory(propIter, nbytes, MemoryUse::NativeIterator);
- if (supportsIndices) {
- if (hasActualIndices) {
- // If the string allocation fails, indicesAllocated() must be true
- // so that this->allocationSize() is correct. Set it to Disabled. It will
- // be updated below.
- setIndicesState(NativeIteratorIndices::Disabled);
- } else {
- // This object supports indices (ie it only has own enumerable
- // properties), but we didn't allocate them because we haven't seen a
- // consumer yet. We mark the iterator so that potential consumers know to
- // request a fresh iterator with indices.
- setIndicesState(NativeIteratorIndices::AvailableOnRequest);
- }
- }
if (numShapes > 0) {
// Construct shapes into the shapes array. Also compute the shapesHash,
@@ -911,8 +893,10 @@ NativeIterator::NativeIterator(JSContext* cx,
for (uint32_t i = 0; i < numShapes; i++) {
MOZ_ASSERT(pobj->is<NativeObject>());
Shape* shape = pobj->shape();
- new (shapesEnd_) GCPtr<Shape*>(shape);
- shapesEnd_++;
+ if (i > 0) {
+ new (protoShapesEnd()) GCPtr<Shape*>(shape);
+ protoShapeCount_++;
+ }
shapesHash = mozilla::AddToHash(shapesHash, HashIteratorShape(shape));
pobj = pobj->staticPrototype();
}
@@ -926,8 +910,8 @@ NativeIterator::NativeIterator(JSContext* cx,
// shape of the iterated object itself (see IteratorHasIndicesAndBranch).
// In the former case, assert that we're storing the entire proto chain.
MOZ_ASSERT_IF(numShapes > 1, pobj == nullptr);
+ MOZ_ASSERT(uintptr_t(protoShapesEnd()) == uintptr_t(this) + nbytes);
}
- MOZ_ASSERT(static_cast<void*>(shapesEnd_) == propertyCursor_);
// Allocate any strings in the nursery until the first minor GC. After this
// point they will end up getting tenured anyway because they are reachable
@@ -951,8 +935,8 @@ NativeIterator::NativeIterator(JSContext* cx,
// We write to our IteratorProperty children only here and in
// PropertyIteratorObject::trace. Here we do not need a pre-barrier
// because we are not overwriting a previous value.
- new (propertiesEnd_) IteratorProperty(str);
- propertiesEnd_++;
+ new (propertiesEnd()) IteratorProperty(str);
+ propertyCount_++;
if (maybeNeedGC && gc::IsInsideNursery(str)) {
maybeNeedGC = false;
cx->runtime()->gc.storeBuffer().putWholeCell(propIter);
@@ -964,19 +948,18 @@ NativeIterator::NativeIterator(JSContext* cx,
for (size_t i = 0; i < numProps; i++) {
*cursor++ = (*indices)[i];
}
- MOZ_ASSERT(uintptr_t(cursor) == uintptr_t(this) + nbytes);
- setIndicesState(NativeIteratorIndices::Valid);
+ flags_ |= Flags::IndicesAvailable;
}
- markInitialized();
+ propertyCursor_ = 0;
+ flags_ |= Flags::Initialized;
MOZ_ASSERT(!*hadError);
}
inline size_t NativeIterator::allocationSize() const {
- size_t numShapes = shapesEnd() - shapesBegin();
-
- return AllocationSize(initialPropertyCount(), numShapes, indicesAllocated());
+ return AllocationSize(allocatedPropertyCount(), protoShapeCount_,
+ indicesAllocated());
}
/* static */
@@ -984,12 +967,13 @@ bool IteratorHashPolicy::match(PropertyIteratorObject* obj,
const Lookup& lookup) {
NativeIterator* ni = obj->getNativeIterator();
if (ni->shapesHash() != lookup.shapesHash ||
- ni->shapeCount() != lookup.numShapes) {
+ ni->protoShapeCount() != lookup.numProtoShapes ||
+ ni->objShape() != lookup.objShape) {
return false;
}
- return ArrayEqual(reinterpret_cast<Shape**>(ni->shapesBegin()), lookup.shapes,
- ni->shapeCount());
+ return ArrayEqual(reinterpret_cast<Shape**>(ni->protoShapesBegin()),
+ lookup.protoShapes, ni->protoShapeCount());
}
static inline bool CanCompareIterableObjectToCache(JSObject* obj) {
@@ -1023,14 +1007,15 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInShapeIteratorCache(
}
PropertyIteratorObject* iterobj = obj->shape()->cache().toIterator();
NativeIterator* ni = iterobj->getNativeIterator();
- MOZ_ASSERT(*ni->shapesBegin() == obj->shape());
+ MOZ_ASSERT(ni->objShape() == obj->shape());
if (!ni->isReusable()) {
return nullptr;
}
// Verify shapes of proto chain.
JSObject* pobj = obj;
- for (GCPtr<Shape*>* s = ni->shapesBegin() + 1; s != ni->shapesEnd(); s++) {
+ for (GCPtr<Shape*>* s = ni->protoShapesBegin(); s != ni->protoShapesEnd();
+ s++) {
Shape* shape = *s;
pobj = pobj->staticPrototype();
if (pobj->shape() != shape) {
@@ -1041,7 +1026,7 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInShapeIteratorCache(
}
}
MOZ_ASSERT(CanStoreInIteratorCache(obj));
- *cacheableProtoChainLength = ni->shapeCount();
+ *cacheableProtoChainLength = ni->objShape() ? ni->protoShapeCount() + 1 : 0;
return iterobj;
}
@@ -1077,8 +1062,8 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInIteratorCache(
MOZ_ASSERT(!shapes.empty());
*cacheableProtoChainLength = shapes.length();
- IteratorHashPolicy::Lookup lookup(shapes.begin(), shapes.length(),
- shapesHash);
+ IteratorHashPolicy::Lookup lookup(shapes[0], shapes.begin() + 1,
+ shapes.length() - 1, shapesHash);
auto p = ObjectRealm::get(obj).iteratorCache.lookup(lookup);
if (!p) {
return nullptr;
@@ -1100,13 +1085,13 @@ static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInIteratorCache(
MOZ_ASSERT(CanStoreInIteratorCache(obj));
NativeIterator* ni = iterobj->getNativeIterator();
- MOZ_ASSERT(ni->shapeCount() > 0);
+ MOZ_ASSERT(ni->objShape());
obj->shape()->maybeCacheIterator(cx, iterobj);
IteratorHashPolicy::Lookup lookup(
- reinterpret_cast<Shape**>(ni->shapesBegin()), ni->shapeCount(),
- ni->shapesHash());
+ ni->objShape(), reinterpret_cast<Shape**>(ni->protoShapesBegin()),
+ ni->protoShapeCount(), ni->shapesHash());
ObjectRealm::IteratorCache& cache = ObjectRealm::get(obj).iteratorCache;
bool ok;
@@ -1142,7 +1127,7 @@ bool js::EnumerateProperties(JSContext* cx, HandleObject obj,
#ifdef DEBUG
static bool IndicesAreValid(NativeObject* obj, NativeIterator* ni) {
- MOZ_ASSERT(ni->hasValidIndices());
+ MOZ_ASSERT(ni->indicesAvailable());
size_t numDenseElements = obj->getDenseInitializedLength();
size_t numFixedSlots = obj->numFixedSlots();
const Value* elements = obj->getDenseElements();
@@ -1202,9 +1187,9 @@ static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
if (PropertyIteratorObject* iterobj =
LookupInIteratorCache(cx, obj, &cacheableProtoChainLength)) {
NativeIterator* ni = iterobj->getNativeIterator();
- bool recreateWithIndices = WantIndices && ni->indicesAvailableOnRequest();
+ bool recreateWithIndices = WantIndices && ni->indicesSupported();
if (!recreateWithIndices) {
- MOZ_ASSERT_IF(WantIndices && ni->hasValidIndices(),
+ MOZ_ASSERT_IF(WantIndices && ni->indicesAvailable(),
IndicesAreValid(&obj->as<NativeObject>(), ni));
ni->initObjectBeingIterated(*obj);
RegisterEnumerator(cx, ni);
@@ -1215,6 +1200,9 @@ static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
if (cacheableProtoChainLength > 0 && !CanStoreInIteratorCache(obj)) {
cacheableProtoChainLength = 0;
}
+ if (cacheableProtoChainLength > NativeIterator::ShapeCountLimit) {
+ cacheableProtoChainLength = 0;
+ }
RootedIdVector keys(cx);
PropertyIndexVector indices(cx);
diff --git a/js/src/vm/Iteration.h b/js/src/vm/Iteration.h
@@ -193,27 +193,6 @@ class NativeIteratorListIter {
}
};
-// If an object only has own data properties, we can store a list of
-// PropertyIndex that can be used in Ion to more efficiently access those
-// properties in cases like `for (var key in obj) { ...obj[key]... }`.
-enum class NativeIteratorIndices : uint32_t {
- // The object being iterated does not support indices.
- Unavailable = 0,
-
- // The object being iterated supports indices, but none have been
- // allocated, because it has not yet been iterated by Ion code that
- // can use indices-based access.
- AvailableOnRequest = 1,
-
- // The object being iterated had indices allocated, but they were
- // disabled due to a deleted property.
- Disabled = 2,
-
- // The object being iterated had indices allocated, and they are
- // still valid.
- Valid = 3
-};
-
class IteratorProperty {
uintptr_t raw_ = 0;
@@ -251,31 +230,12 @@ struct NativeIterator : public NativeIteratorListNode {
// Internal iterator object.
const GCPtr<JSObject*> iterObj_ = {};
-
- // The end of GCPtr<Shape*>s that appear directly after |this|, as part of an
- // overall allocation that stores |*this|, shapes, iterated strings, and maybe
- // indices. Once this has been fully initialized, it also equals the start of
- // iterated strings.
- GCPtr<Shape*>* shapesEnd_; // initialized by constructor
-
- // The next property, pointing into an array of strings directly after any
- // GCPtr<Shape*>s that appear directly after |*this|, as part of an overall
- // allocation that stores |*this|, shapes, iterated strings, and maybe
- // indices. Strings are stored as a JSLinearString* with a low-bit tag
- // indicating whether they were deleted while iterating this object, in which
- // case they should be skipped. The post barrier for writing to this is
- // handled in NativeIterator::NativeIterator by adding iterObj_ to the
- // whole cell buffer, and no pre barrier is required because we never modify
- // these after initialization.
- IteratorProperty* propertyCursor_; // initialized by constructor
-
- // The limit/end of properties to iterate. Once |this| has been fully
- // initialized, it also equals the start of indices, if indices are present,
- // or the end of the full allocation storing |*this|, shapes, and strings, if
- // indices are not present.
- IteratorProperty* propertiesEnd_; // initialized by constructor
-
- HashNumber shapesHash_; // initialized by constructor
+ const GCPtr<Shape*> objShape_ = {};
+ uint32_t propertyCount_ = 0;
+ uint32_t propertyCursor_; // initialized by constructor
+ HashNumber shapesHash_; // initialized by constructor
+ uint16_t protoShapeCount_ = 0;
+ uint8_t flags_ = 0;
public:
// For cacheable native iterators, whether the iterator is currently
@@ -308,6 +268,21 @@ struct NativeIterator : public NativeIteratorListNode {
// null/undefined.
static constexpr uint32_t IsEmptyIteratorSingleton = 0x8;
+ // NOTE: the three flags below pertain to iterator indices optimizations.
+ // If an object only has own data properties, we can store a list of
+ // PropertyIndex that can be used in Ion to more efficiently access those
+ // properties in cases like `for (var key in obj) { ...obj[key]... }`.
+
+ // Whether the object supports indices, in the event that they are
+ // requested. Note that this is exclusive with IndicesAvailable
+ static constexpr uint32_t IndicesSupported = 0x10;
+
+ // Whether space was initially reserved for indices for this iterator.
+ static constexpr uint32_t IndicesAllocated = 0x20;
+
+ // Whether indices are actually valid in the reserved area
+ static constexpr uint32_t IndicesAvailable = 0x40;
+
// If any of these bits are set on a |NativeIterator|, it isn't
// currently reusable. (An active |NativeIterator| can't be stolen
// *right now*; a |NativeIterator| that's had its properties mutated
@@ -316,27 +291,16 @@ struct NativeIterator : public NativeIteratorListNode {
Active | HasUnvisitedPropertyDeletion;
};
- private:
- static constexpr uint32_t FlagsBits = 4;
- static constexpr uint32_t IndicesBits = 2;
-
- static constexpr uint32_t FlagsMask = (1 << FlagsBits) - 1;
-
- static constexpr uint32_t PropCountShift = IndicesBits + FlagsBits;
- static constexpr uint32_t PropCountBits = 32 - PropCountShift;
+ // We have a full u32 for this, but due to the way we compute the address
+ // of indices in the MacroAssembler, we want to have a few extra bits of
+ // wiggle room for shifting
+ static constexpr uint32_t PropCountLimit = 1 << 30;
- public:
- static constexpr uint32_t IndicesShift = FlagsBits;
- static constexpr uint32_t IndicesMask = ((1 << IndicesBits) - 1)
- << IndicesShift;
-
- static constexpr uint32_t PropCountLimit = 1 << PropCountBits;
+ // If it's really important we can increase the size of protoShapeCount_,
+ // but increasing it to 32 bits would add another word.
+ static constexpr uint32_t ShapeCountLimit = 1 << 16;
private:
- // Stores Flags bits and indices state in the lower bits and the initial
- // property count above them.
- uint32_t flagsAndCount_ = 0;
-
#ifdef DEBUG
// If true, this iterator may contain indexed properties that came from
// objects on the prototype chain. This is used by certain debug assertions.
@@ -347,9 +311,8 @@ struct NativeIterator : public NativeIteratorListNode {
// No further fields appear after here *in NativeIterator*, but this class is
// always allocated with space tacked on immediately after |this| to store
- // shapes p to |shapesEnd_|, iterated property names after that up to
- // |propertiesEnd_|, and maybe PropertyIndex values up to |indices_end()|.
-
+ // propertyCount_ IteratorProperty values, optionally propertyCount_
+ // PropertyIndex values, and protoShapeCount_ GCPtr<Shape*> values.
public:
/**
* Initialize a NativeIterator properly allocated for |props.length()|
@@ -377,22 +340,30 @@ struct NativeIterator : public NativeIteratorListNode {
objectBeingIterated_ = nullptr;
}
- GCPtr<Shape*>* shapesBegin() const {
- static_assert(
- alignof(GCPtr<Shape*>) <= alignof(NativeIterator),
- "NativeIterator must be aligned to begin storing "
- "GCPtr<Shape*>s immediately after it with no required padding");
- const NativeIterator* immediatelyAfter = this + 1;
- auto* afterNonConst = const_cast<NativeIterator*>(immediatelyAfter);
- return reinterpret_cast<GCPtr<Shape*>*>(afterNonConst);
+ const GCPtr<Shape*>& objShape() const { return objShape_; }
+
+ GCPtr<Shape*>* protoShapesBegin(size_t numProperties) const {
+ uintptr_t raw = reinterpret_cast<uintptr_t>(this);
+ uintptr_t propertiesStart = raw + offsetOfFirstProperty();
+ uintptr_t propertiesEnd =
+ propertiesStart + numProperties * sizeof(IteratorProperty);
+ uintptr_t result = propertiesEnd;
+ if (flags_ & Flags::IndicesAllocated) {
+ result += numProperties * sizeof(PropertyIndex);
+ }
+ return reinterpret_cast<GCPtr<Shape*>*>(result);
}
- GCPtr<Shape*>* shapesEnd() const { return shapesEnd_; }
+ GCPtr<Shape*>* protoShapesBegin() const {
+ return protoShapesBegin(allocatedPropertyCount());
+ }
- uint32_t shapeCount() const {
- return mozilla::PointerRangeSize(shapesBegin(), shapesEnd());
+ GCPtr<Shape*>* protoShapesEnd() const {
+ return protoShapesBegin() + protoShapeCount_;
}
+ uint32_t protoShapeCount() const { return protoShapeCount_; }
+
IteratorProperty* propertiesBegin() const {
static_assert(
alignof(GCPtr<Shape*>) >= alignof(IteratorProperty),
@@ -406,37 +377,32 @@ struct NativeIterator : public NativeIteratorListNode {
"present, with no padding space required for correct "
"alignment");
- // We *could* just check the assertion below if we wanted, but the
- // incompletely-initialized NativeIterator case matters for so little
- // code that we prefer not imposing the condition-check on every single
- // user.
- MOZ_ASSERT(isInitialized(),
- "NativeIterator must be initialized, or else |shapesEnd_| "
- "isn't necessarily the start of properties and instead "
- "|propertyCursor_| is");
-
- return reinterpret_cast<IteratorProperty*>(shapesEnd_);
+ return reinterpret_cast<IteratorProperty*>(uintptr_t(this) + sizeof(*this));
}
- IteratorProperty* propertiesEnd() const { return propertiesEnd_; }
+ IteratorProperty* propertiesEnd() const {
+ return propertiesBegin() + propertyCount_;
+ }
- IteratorProperty* nextProperty() const { return propertyCursor_; }
+ IteratorProperty* nextProperty() const {
+ return propertiesBegin() + propertyCursor_;
+ }
PropertyIndex* indicesBegin() const {
// PropertyIndex must be able to be appear directly after the properties
// array, with no padding required for correct alignment.
static_assert(alignof(IteratorProperty) >= alignof(PropertyIndex));
- return reinterpret_cast<PropertyIndex*>(propertiesEnd_);
+ return reinterpret_cast<PropertyIndex*>(propertiesEnd());
}
PropertyIndex* indicesEnd() const {
- MOZ_ASSERT(indicesState() == NativeIteratorIndices::Valid);
- return indicesBegin() + numKeys() * sizeof(PropertyIndex);
+ MOZ_ASSERT(flags_ & Flags::IndicesAllocated);
+ return indicesBegin() + propertyCount_ * sizeof(PropertyIndex);
}
MOZ_ALWAYS_INLINE JS::Value nextIteratedValueAndAdvance() {
- while (propertyCursor_ < propertiesEnd_) {
- IteratorProperty& prop = *propertyCursor_;
+ while (propertyCursor_ < propertyCount_) {
+ IteratorProperty& prop = *nextProperty();
incCursor();
if (prop.deleted()) {
continue;
@@ -444,7 +410,6 @@ struct NativeIterator : public NativeIteratorListNode {
return JS::StringValue(prop.asString());
}
- MOZ_ASSERT(propertyCursor_ == propertiesEnd_);
return JS::MagicValue(JS_NO_ITER_VALUE);
}
@@ -467,17 +432,25 @@ struct NativeIterator : public NativeIteratorListNode {
// Note: JIT code inlines |propertyCursor_| resetting when an iterator
// ends: see |MacroAssembler::iteratorClose|.
- propertyCursor_ = propertiesBegin();
+ propertyCursor_ = 0;
}
bool previousPropertyWas(JS::Handle<JSLinearString*> str) {
MOZ_ASSERT(isInitialized());
- return propertyCursor_ > propertiesBegin() &&
- propertyCursor_[-1].asString() == str;
+ return propertyCursor_ > 0 &&
+ propertiesBegin()[propertyCursor_ - 1].asString() == str;
}
- size_t numKeys() const {
- return mozilla::PointerRangeSize(propertiesBegin(), propertiesEnd());
+ size_t numKeys() const { return propertyCount_; }
+
+ size_t allocatedPropertyCount() const {
+ // propertyCursor_ holds the number of allocated properties until
+ // the iterator is initialized. This is so we can know the proper layout
+ // of the trailing bytes if we trigger a GC inside the constructor.
+ if (!isInitialized()) {
+ return propertyCursor_;
+ }
+ return propertyCount_;
}
JSObject* iterObj() const { return iterObj_; }
@@ -489,7 +462,7 @@ struct NativeIterator : public NativeIteratorListNode {
HashNumber shapesHash() const { return shapesHash_; }
- bool isInitialized() const { return flags() & Flags::Initialized; }
+ bool isInitialized() const { return flags_ & Flags::Initialized; }
size_t allocationSize() const;
@@ -503,60 +476,30 @@ struct NativeIterator : public NativeIteratorListNode {
#endif
private:
- uint32_t flags() const { return flagsAndCount_ & FlagsMask; }
-
- NativeIteratorIndices indicesState() const {
- return NativeIteratorIndices((flagsAndCount_ & IndicesMask) >>
- IndicesShift);
- }
-
- uint32_t initialPropertyCount() const {
- return flagsAndCount_ >> PropCountShift;
- }
-
- static uint32_t initialFlagsAndCount(uint32_t count) {
- // No flags are initially set.
- MOZ_ASSERT(count < PropCountLimit);
- return count << PropCountShift;
- }
-
- void setFlags(uint32_t flags) {
- MOZ_ASSERT((flags & ~FlagsMask) == 0);
- flagsAndCount_ = (flagsAndCount_ & ~FlagsMask) | flags;
- }
-
- void setIndicesState(NativeIteratorIndices indices) {
- uint32_t indicesBits = uint32_t(indices) << IndicesShift;
- flagsAndCount_ = (flagsAndCount_ & ~IndicesMask) | indicesBits;
- }
-
- bool indicesAllocated() const {
- return indicesState() >= NativeIteratorIndices::Disabled;
- }
-
- void markInitialized() {
- MOZ_ASSERT(flags() == 0);
- setFlags(Flags::Initialized);
- }
+ bool indicesAllocated() const { return flags_ & Flags::IndicesAllocated; }
bool isUnlinked() const { return !prev_ && !next_; }
public:
+ bool indicesAvailable() const { return flags_ & Flags::IndicesAvailable; }
+
+ bool indicesSupported() const { return flags_ & Flags::IndicesSupported; }
+
// Whether this is the shared empty iterator object used for iterating over
// null/undefined.
bool isEmptyIteratorSingleton() const {
// Note: equivalent code is inlined in MacroAssembler::iteratorClose.
- bool res = flags() & Flags::IsEmptyIteratorSingleton;
+ bool res = flags_ & Flags::IsEmptyIteratorSingleton;
MOZ_ASSERT_IF(
- res, flags() == (Flags::Initialized | Flags::IsEmptyIteratorSingleton));
+ res, flags_ == (Flags::Initialized | Flags::IsEmptyIteratorSingleton));
MOZ_ASSERT_IF(res, !objectBeingIterated_);
- MOZ_ASSERT_IF(res, initialPropertyCount() == 0);
- MOZ_ASSERT_IF(res, shapeCount() == 0);
+ MOZ_ASSERT_IF(res, propertyCount_ == 0);
+ MOZ_ASSERT_IF(res, protoShapeCount_ == 0);
MOZ_ASSERT_IF(res, isUnlinked());
return res;
}
void markEmptyIteratorSingleton() {
- flagsAndCount_ |= Flags::IsEmptyIteratorSingleton;
+ flags_ |= Flags::IsEmptyIteratorSingleton;
// isEmptyIteratorSingleton() has various debug assertions.
MOZ_ASSERT(isEmptyIteratorSingleton());
@@ -565,39 +508,40 @@ struct NativeIterator : public NativeIteratorListNode {
bool isActive() const {
MOZ_ASSERT(isInitialized());
- return flags() & Flags::Active;
+ return flags_ & Flags::Active;
}
void markActive() {
MOZ_ASSERT(isInitialized());
MOZ_ASSERT(!isEmptyIteratorSingleton());
- flagsAndCount_ |= Flags::Active;
+ flags_ |= Flags::Active;
}
void markInactive() {
MOZ_ASSERT(isInitialized());
MOZ_ASSERT(!isEmptyIteratorSingleton());
- flagsAndCount_ &= ~Flags::Active;
+ flags_ &= ~Flags::Active;
}
bool isReusable() const {
MOZ_ASSERT(isInitialized());
- // Cached NativeIterators are reusable if they're not currently active
- // and their properties array hasn't been mutated, i.e. if only
- // |Flags::Initialized| is set. Using |Flags::NotReusable| to test
- // would also work, but this formulation is safer against memory
- // corruption.
- return flags() == Flags::Initialized;
+ if (!(flags_ & Flags::Initialized)) {
+ return false;
+ }
+ if (flags_ & Flags::Active) {
+ return false;
+ }
+ return true;
}
void markHasUnvisitedPropertyDeletion() {
MOZ_ASSERT(isInitialized());
MOZ_ASSERT(!isEmptyIteratorSingleton());
- flagsAndCount_ |= Flags::HasUnvisitedPropertyDeletion;
+ flags_ |= Flags::HasUnvisitedPropertyDeletion;
}
void unmarkHasUnvisitedPropertyDeletion() {
@@ -605,21 +549,13 @@ struct NativeIterator : public NativeIteratorListNode {
MOZ_ASSERT(!isEmptyIteratorSingleton());
MOZ_ASSERT(hasUnvisitedPropertyDeletion());
- flagsAndCount_ &= ~Flags::HasUnvisitedPropertyDeletion;
+ flags_ &= ~Flags::HasUnvisitedPropertyDeletion;
}
bool hasUnvisitedPropertyDeletion() const {
MOZ_ASSERT(isInitialized());
- return flags() & Flags::HasUnvisitedPropertyDeletion;
- }
-
- bool hasValidIndices() const {
- return indicesState() == NativeIteratorIndices::Valid;
- }
-
- bool indicesAvailableOnRequest() const {
- return indicesState() == NativeIteratorIndices::AvailableOnRequest;
+ return flags_ & Flags::HasUnvisitedPropertyDeletion;
}
// Indicates the native iterator may walk prototype properties.
@@ -627,16 +563,16 @@ struct NativeIterator : public NativeIteratorListNode {
// If we can use indices for this iterator, we know it doesn't have
// prototype properties, and so we use this as a check for prototype
// properties.
- return !hasValidIndices() && !indicesAvailableOnRequest();
+ return !indicesAvailable() && !indicesSupported();
}
void disableIndices() {
- // If we have allocated indices, set the state to Disabled.
- // This will ensure that we don't use them, but we still
- // free them correctly.
- if (indicesState() == NativeIteratorIndices::Valid) {
- setIndicesState(NativeIteratorIndices::Disabled);
- }
+ // Clear the IndicesAvailable flag so we won't use the indices on this
+ // iterator, and ensure IndicesSupported is cleared as well, so we don't
+ // re-request an iterator with indices. However, we leave the
+ // IndicesAllocated flag because we need to free them later, and skip them
+ // when looking for shapes.
+ flags_ &= ~(Flags::IndicesAvailable | Flags::IndicesSupported);
}
void link(NativeIteratorListNode* other) {
@@ -671,24 +607,28 @@ struct NativeIterator : public NativeIteratorListNode {
return offsetof(NativeIterator, objectBeingIterated_);
}
- static constexpr size_t offsetOfShapesEnd() {
- return offsetof(NativeIterator, shapesEnd_);
+ static constexpr size_t offsetOfProtoShapeCount() {
+ return offsetof(NativeIterator, protoShapeCount_);
}
static constexpr size_t offsetOfPropertyCursor() {
return offsetof(NativeIterator, propertyCursor_);
}
- static constexpr size_t offsetOfPropertiesEnd() {
- return offsetof(NativeIterator, propertiesEnd_);
+ static constexpr size_t offsetOfPropertyCount() {
+ return offsetof(NativeIterator, propertyCount_);
+ }
+
+ static constexpr size_t offsetOfFlags() {
+ return offsetof(NativeIterator, flags_);
}
- static constexpr size_t offsetOfFlagsAndCount() {
- return offsetof(NativeIterator, flagsAndCount_);
+ static constexpr size_t offsetOfObjectShape() {
+ return offsetof(NativeIterator, objShape_);
}
- static constexpr size_t offsetOfFirstShape() {
- // Shapes are stored directly after |this|.
+ static constexpr size_t offsetOfFirstProperty() {
+ // Properties are stored directly after |this|.
return sizeof(NativeIterator);
}
};
diff --git a/js/src/vm/Realm.h b/js/src/vm/Realm.h
@@ -215,13 +215,18 @@ class PropertyIteratorObject;
struct IteratorHashPolicy {
struct Lookup {
- Shape** shapes;
- size_t numShapes;
+ Shape* objShape;
+ Shape** protoShapes;
+ size_t numProtoShapes;
HashNumber shapesHash;
- Lookup(Shape** shapes, size_t numShapes, HashNumber shapesHash)
- : shapes(shapes), numShapes(numShapes), shapesHash(shapesHash) {
- MOZ_ASSERT(numShapes > 0);
+ Lookup(Shape* objShape, Shape** protoShapes, size_t numProtoShapes,
+ HashNumber shapesHash)
+ : objShape(objShape),
+ protoShapes(protoShapes),
+ numProtoShapes(numProtoShapes),
+ shapesHash(shapesHash) {
+ MOZ_ASSERT(objShape);
}
};
static HashNumber hash(const Lookup& lookup) { return lookup.shapesHash; }