Caches.h (20937B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef vm_Caches_h 8 #define vm_Caches_h 9 10 #include "mozilla/Array.h" 11 #include "mozilla/MathAlgorithms.h" 12 #include "mozilla/Maybe.h" 13 #include "mozilla/MruCache.h" 14 #include "mozilla/UniquePtr.h" 15 16 #include "frontend/ScopeBindingCache.h" 17 #include "gc/Tracer.h" 18 #include "js/RootingAPI.h" 19 #include "js/TypeDecls.h" 20 #include "vm/JSScript.h" 21 #include "vm/Shape.h" 22 #include "vm/StringType.h" 23 24 namespace js { 25 26 struct EvalCacheEntry { 27 JSLinearString* str; 28 JSScript* script; 29 JSScript* callerScript; 30 jsbytecode* pc; 31 32 // We sweep this cache after a nursery collection to update entries with 33 // string keys that have been tenured. 34 // 35 // The entire cache is purged on a major GC, so we don't need to sweep it 36 // then. 37 bool traceWeak(JSTracer* trc) { 38 MOZ_ASSERT(trc->kind() == JS::TracerKind::MinorSweeping); 39 return TraceManuallyBarrieredWeakEdge(trc, &str, "EvalCacheEntry::str"); 40 } 41 }; 42 43 struct EvalCacheLookup { 44 JSLinearString* str = nullptr; 45 JSScript* callerScript = nullptr; 46 MOZ_INIT_OUTSIDE_CTOR jsbytecode* pc = nullptr; 47 48 EvalCacheLookup() = default; 49 EvalCacheLookup(JSLinearString* str, JSScript* callerScript, jsbytecode* pc) 50 : str(str), callerScript(callerScript), pc(pc) {} 51 52 void trace(JSTracer* trc); 53 }; 54 55 struct EvalCacheHashPolicy { 56 using Lookup = EvalCacheLookup; 57 58 static HashNumber hash(const Lookup& l); 59 static bool match(const EvalCacheEntry& entry, const EvalCacheLookup& l); 60 }; 61 62 using EvalCache = 63 GCHashSet<EvalCacheEntry, EvalCacheHashPolicy, SystemAllocPolicy>; 64 65 class MegamorphicCacheEntry { 66 // Receiver object's shape. 67 Shape* shape_ = nullptr; 68 69 // The atom or symbol property being accessed. 70 PropertyKey key_; 71 72 // Slot offset and isFixedSlot flag of the data property. 73 TaggedSlotOffset slotOffset_; 74 75 // This entry is valid iff the generation matches the cache's generation. 76 uint16_t generation_ = 0; 77 78 // This encodes the number of hops on the prototype chain to get to the holder 79 // object, along with information about the kind of property. If the high bit 80 // is 0, the property is a data property. If the high bit is 1 and the value 81 // is <= MaxHopsForGetterProperty, the property is a getter. Otherwise, this 82 // is a sentinel value indicating a missing property lookup. 83 uint8_t hopsAndKind_ = 0; 84 85 friend class MegamorphicCache; 86 87 public: 88 // A simple flag for the JIT to check which, if false, lets it know that it's 89 // just a data property N hops up the prototype chain 90 static constexpr uint8_t NonDataPropertyFlag = 128; 91 92 static constexpr uint8_t MaxHopsForGetterProperty = 253; 93 static constexpr uint8_t NumHopsForMissingProperty = 254; 94 static constexpr uint8_t NumHopsForMissingOwnProperty = 255; 95 96 static constexpr uint8_t MaxHopsForDataProperty = 127; 97 static constexpr uint8_t MaxHopsForAccessorProperty = 125; 98 99 void init(Shape* shape, PropertyKey key, uint16_t generation, uint8_t numHops, 100 TaggedSlotOffset slotOffset) { 101 shape_ = shape; 102 key_ = key; 103 slotOffset_ = slotOffset; 104 generation_ = generation; 105 hopsAndKind_ = numHops; 106 MOZ_ASSERT(hopsAndKind_ == numHops, "numHops must fit in hopsAndKind_"); 107 } 108 bool isMissingProperty() const { 109 return hopsAndKind_ == NumHopsForMissingProperty; 110 } 111 bool isMissingOwnProperty() const { 112 return hopsAndKind_ == NumHopsForMissingOwnProperty; 113 } 114 bool isAccessorProperty() const { 115 return hopsAndKind_ >= NonDataPropertyFlag and 116 hopsAndKind_ <= MaxHopsForGetterProperty; 117 } 118 bool isDataProperty() const { return !(hopsAndKind_ & NonDataPropertyFlag); } 119 uint16_t numHops() const { 120 if (isDataProperty()) { 121 return hopsAndKind_; 122 } else { 123 MOZ_ASSERT(isAccessorProperty()); 124 return hopsAndKind_ & ~NonDataPropertyFlag; 125 } 126 } 127 TaggedSlotOffset slotOffset() const { 128 MOZ_ASSERT(hopsAndKind_ <= MaxHopsForGetterProperty); 129 return slotOffset_; 130 } 131 132 static constexpr size_t offsetOfShape() { 133 return offsetof(MegamorphicCacheEntry, shape_); 134 } 135 136 static constexpr size_t offsetOfKey() { 137 return offsetof(MegamorphicCacheEntry, key_); 138 } 139 140 static constexpr size_t offsetOfGeneration() { 141 return offsetof(MegamorphicCacheEntry, generation_); 142 } 143 144 static constexpr size_t offsetOfSlotOffset() { 145 return offsetof(MegamorphicCacheEntry, slotOffset_); 146 } 147 148 static constexpr size_t offsetOfHopsAndKind() { 149 return offsetof(MegamorphicCacheEntry, hopsAndKind_); 150 } 151 }; 152 153 // [SMDOC] Megamorphic Property Lookup Cache (MegamorphicCache) 154 // 155 // MegamorphicCache is a data structure used to speed up megamorphic property 156 // lookups from JIT code. The same cache is currently used for both GetProp and 157 // HasProp (in, hasOwnProperty) operations. 158 // 159 // This is implemented as a fixed-size array of entries. Lookups are performed 160 // based on the receiver object's Shape + PropertyKey. If found in the cache, 161 // the result of a lookup represents either: 162 // 163 // * A data property on the receiver or on its proto chain (stored as number of 164 // 'hops' up the proto chain + the slot of the data property). 165 // 166 // * A missing property on the receiver or its proto chain. 167 // 168 // * A missing property on the receiver, but it might exist on the proto chain. 169 // This lets us optimize hasOwnProperty better. 170 // 171 // Collisions are handled by simply overwriting the previous entry stored in the 172 // slot. This is sufficient to achieve a high hit rate on typical web workloads 173 // while ensuring cache lookups are always fast and simple. 174 // 175 // Lookups always check the receiver object's shape (ensuring the properties and 176 // prototype are unchanged). Because the cache also caches lookups on the proto 177 // chain, Watchtower is used to invalidate the cache when prototype objects are 178 // mutated. This is done by incrementing the cache's generation counter to 179 // invalidate all entries. 180 // 181 // The cache is also invalidated on each major GC. 182 class MegamorphicCache { 183 public: 184 using Entry = MegamorphicCacheEntry; 185 186 static constexpr size_t NumEntries = 1024; 187 static constexpr uint8_t ShapeHashShift1 = mozilla::FloorLog2(alignof(Shape)); 188 static constexpr uint8_t ShapeHashShift2 = 189 ShapeHashShift1 + mozilla::FloorLog2(NumEntries); 190 191 static_assert(mozilla::IsPowerOfTwo(alignof(Shape)) && 192 mozilla::IsPowerOfTwo(NumEntries), 193 "FloorLog2 is exact because alignof(Shape) and NumEntries are " 194 "both powers of two"); 195 196 private: 197 mozilla::Array<Entry, NumEntries> entries_; 198 199 // Generation counter used to invalidate all entries. 200 uint16_t generation_ = 0; 201 202 // NOTE: this logic is mirrored in MacroAssembler::emitMegamorphicCacheLookup 203 Entry& getEntry(Shape* shape, PropertyKey key) { 204 static_assert(mozilla::IsPowerOfTwo(NumEntries), 205 "NumEntries must be a power-of-two for fast modulo"); 206 uintptr_t hash = uintptr_t(shape) >> ShapeHashShift1; 207 hash ^= uintptr_t(shape) >> ShapeHashShift2; 208 hash += HashAtomOrSymbolPropertyKey(key); 209 return entries_[hash % NumEntries]; 210 } 211 212 public: 213 void bumpGeneration() { 214 generation_++; 215 if (generation_ == 0) { 216 // Generation overflowed. Invalidate the whole cache. 217 for (size_t i = 0; i < NumEntries; i++) { 218 entries_[i].shape_ = nullptr; 219 } 220 } 221 } 222 bool isValidForLookup(const Entry& entry, Shape* shape, PropertyKey key) { 223 return (entry.shape_ == shape && entry.key_ == key && 224 entry.generation_ == generation_); 225 } 226 bool lookup(Shape* shape, PropertyKey key, Entry** entryp) { 227 Entry& entry = getEntry(shape, key); 228 *entryp = &entry; 229 return isValidForLookup(entry, shape, key); 230 } 231 232 void initEntryForMissingProperty(Entry* entry, Shape* shape, 233 PropertyKey key) { 234 entry->init(shape, key, generation_, Entry::NumHopsForMissingProperty, 235 TaggedSlotOffset()); 236 } 237 void initEntryForMissingOwnProperty(Entry* entry, Shape* shape, 238 PropertyKey key) { 239 entry->init(shape, key, generation_, Entry::NumHopsForMissingOwnProperty, 240 TaggedSlotOffset()); 241 } 242 void initEntryForDataProperty(Entry* entry, Shape* shape, PropertyKey key, 243 size_t numHops, TaggedSlotOffset slotOffset) { 244 if (numHops > Entry::MaxHopsForDataProperty) { 245 return; 246 } 247 entry->init(shape, key, generation_, numHops, slotOffset); 248 } 249 void initEntryForAccessorProperty(Entry* entry, Shape* shape, PropertyKey key, 250 size_t numHops, 251 TaggedSlotOffset slotOffset) { 252 if (numHops > Entry::MaxHopsForAccessorProperty) { 253 return; 254 } 255 numHops |= MegamorphicCacheEntry::NonDataPropertyFlag; 256 entry->init(shape, key, generation_, numHops, slotOffset); 257 } 258 259 static constexpr size_t offsetOfEntries() { 260 return offsetof(MegamorphicCache, entries_); 261 } 262 263 static constexpr size_t offsetOfGeneration() { 264 return offsetof(MegamorphicCache, generation_); 265 } 266 }; 267 268 class MegamorphicSetPropCacheEntry { 269 Shape* beforeShape_ = nullptr; 270 Shape* afterShape_ = nullptr; 271 272 // The atom or symbol property being accessed. 273 PropertyKey key_; 274 275 // Slot offset and isFixedSlot flag of the data property. 276 TaggedSlotOffset slotOffset_; 277 278 // If slots need to be grown, this is the new capacity we need. 279 uint16_t newCapacity_ = 0; 280 281 // This entry is valid iff the generation matches the cache's generation. 282 uint16_t generation_ = 0; 283 284 friend class MegamorphicSetPropCache; 285 286 public: 287 void init(Shape* beforeShape, Shape* afterShape, PropertyKey key, 288 uint16_t generation, TaggedSlotOffset slotOffset, 289 uint16_t newCapacity) { 290 beforeShape_ = beforeShape; 291 afterShape_ = afterShape; 292 key_ = key; 293 slotOffset_ = slotOffset; 294 newCapacity_ = newCapacity; 295 generation_ = generation; 296 } 297 TaggedSlotOffset slotOffset() const { return slotOffset_; } 298 Shape* afterShape() const { return afterShape_; } 299 300 static constexpr size_t offsetOfShape() { 301 return offsetof(MegamorphicSetPropCacheEntry, beforeShape_); 302 } 303 static constexpr size_t offsetOfAfterShape() { 304 return offsetof(MegamorphicSetPropCacheEntry, afterShape_); 305 } 306 307 static constexpr size_t offsetOfKey() { 308 return offsetof(MegamorphicSetPropCacheEntry, key_); 309 } 310 311 static constexpr size_t offsetOfNewCapacity() { 312 return offsetof(MegamorphicSetPropCacheEntry, newCapacity_); 313 } 314 315 static constexpr size_t offsetOfGeneration() { 316 return offsetof(MegamorphicSetPropCacheEntry, generation_); 317 } 318 319 static constexpr size_t offsetOfSlotOffset() { 320 return offsetof(MegamorphicSetPropCacheEntry, slotOffset_); 321 } 322 }; 323 324 class MegamorphicSetPropCache { 325 public: 326 using Entry = MegamorphicSetPropCacheEntry; 327 // We can get more hits if we increase this, but this seems to be around 328 // the sweet spot where we are getting most of the hits we would get with 329 // an infinitely sized cache 330 static constexpr size_t NumEntries = 1024; 331 static constexpr uint8_t ShapeHashShift1 = mozilla::FloorLog2(alignof(Shape)); 332 static constexpr uint8_t ShapeHashShift2 = 333 ShapeHashShift1 + mozilla::FloorLog2(NumEntries); 334 335 static_assert(mozilla::IsPowerOfTwo(alignof(Shape)) && 336 mozilla::IsPowerOfTwo(NumEntries), 337 "FloorLog2 is exact because alignof(Shape) and NumEntries are " 338 "both powers of two"); 339 340 private: 341 mozilla::Array<Entry, NumEntries> entries_; 342 343 // Generation counter used to invalidate all entries. 344 uint16_t generation_ = 0; 345 346 Entry& getEntry(Shape* beforeShape, PropertyKey key) { 347 static_assert(mozilla::IsPowerOfTwo(NumEntries), 348 "NumEntries must be a power-of-two for fast modulo"); 349 uintptr_t hash = uintptr_t(beforeShape) >> ShapeHashShift1; 350 hash ^= uintptr_t(beforeShape) >> ShapeHashShift2; 351 hash += HashAtomOrSymbolPropertyKey(key); 352 return entries_[hash % NumEntries]; 353 } 354 355 public: 356 void bumpGeneration() { 357 generation_++; 358 if (generation_ == 0) { 359 // Generation overflowed. Invalidate the whole cache. 360 for (size_t i = 0; i < NumEntries; i++) { 361 entries_[i].beforeShape_ = nullptr; 362 } 363 } 364 } 365 void set(Shape* beforeShape, Shape* afterShape, PropertyKey key, 366 TaggedSlotOffset slotOffset, uint32_t newCapacity) { 367 uint16_t newSlots = (uint16_t)newCapacity; 368 if (newSlots != newCapacity) { 369 return; 370 } 371 Entry& entry = getEntry(beforeShape, key); 372 entry.init(beforeShape, afterShape, key, generation_, slotOffset, newSlots); 373 } 374 375 #ifdef DEBUG 376 bool lookup(Shape* beforeShape, PropertyKey key, Entry** entryp) { 377 Entry& entry = getEntry(beforeShape, key); 378 *entryp = &entry; 379 return (entry.beforeShape_ == beforeShape && entry.key_ == key && 380 entry.generation_ == generation_); 381 } 382 #endif 383 384 static constexpr size_t offsetOfEntries() { 385 return offsetof(MegamorphicSetPropCache, entries_); 386 } 387 388 static constexpr size_t offsetOfGeneration() { 389 return offsetof(MegamorphicSetPropCache, generation_); 390 } 391 }; 392 393 // Cache for AtomizeString, mapping JSString* or JS::Latin1Char* to the 394 // corresponding JSAtom*. The cache has three different optimizations: 395 // 396 // * The two most recent lookups are cached. This has a hit rate of 30-65% on 397 // typical web workloads. 398 // 399 // * MruCache is used for short JS::Latin1Char strings. 400 // 401 // * For longer strings, there's also a JSLinearString* => JSAtom* HashMap, 402 // because hashing the string characters repeatedly can be slow. 403 // This map is also used by nursery GC to de-duplicate strings to atoms. 404 // 405 // This cache is purged on minor and major GC. 406 class StringToAtomCache { 407 public: 408 struct LastLookup { 409 JSString* string = nullptr; 410 JSAtom* atom = nullptr; 411 412 static constexpr size_t offsetOfString() { 413 return offsetof(LastLookup, string); 414 } 415 416 static constexpr size_t offsetOfAtom() { 417 return offsetof(LastLookup, atom); 418 } 419 }; 420 static constexpr size_t NumLastLookups = 2; 421 422 struct AtomTableKey { 423 explicit AtomTableKey(const JS::Latin1Char* str, size_t len) 424 : string_(str), length_(len) { 425 hash_ = mozilla::HashString(string_, length_); 426 } 427 428 const JS::Latin1Char* string_; 429 size_t length_; 430 uint32_t hash_; 431 }; 432 433 private: 434 struct RopeAtomCache 435 : public mozilla::MruCache<AtomTableKey, JSAtom*, RopeAtomCache> { 436 static HashNumber Hash(const AtomTableKey& key) { return key.hash_; } 437 static bool Match(const AtomTableKey& key, const JSAtom* val) { 438 JS::AutoCheckCannotGC nogc; 439 return val->length() == key.length_ && 440 EqualChars(key.string_, val->latin1Chars(nogc), key.length_); 441 } 442 }; 443 using Map = 444 HashMap<JSString*, JSAtom*, PointerHasher<JSString*>, SystemAllocPolicy>; 445 Map map_; 446 mozilla::Array<LastLookup, NumLastLookups> lastLookups_; 447 RopeAtomCache ropeCharCache_; 448 449 public: 450 // Don't use the HashMap for short strings. Hashing them is less expensive. 451 // But the length needs to long enough to cover common identifiers in React. 452 static constexpr size_t MinStringLength = 39; 453 454 JSAtom* lookupInMap(JSString* s) const { 455 MOZ_ASSERT(s->inStringToAtomCache()); 456 MOZ_ASSERT(s->length() >= MinStringLength); 457 458 auto p = map_.lookup(s); 459 JSAtom* atom = p ? p->value() : nullptr; 460 return atom; 461 } 462 463 MOZ_ALWAYS_INLINE JSAtom* lookup(JSString* s) const { 464 MOZ_ASSERT(!s->isAtom()); 465 for (const LastLookup& entry : lastLookups_) { 466 if (entry.string == s) { 467 return entry.atom; 468 } 469 } 470 471 if (!s->inStringToAtomCache()) { 472 MOZ_ASSERT(!map_.lookup(s)); 473 return nullptr; 474 } 475 476 return lookupInMap(s); 477 } 478 479 MOZ_ALWAYS_INLINE JSAtom* lookupWithRopeChars( 480 const JS::Latin1Char* str, size_t len, 481 mozilla::Maybe<AtomTableKey>& key) { 482 MOZ_ASSERT(len < MinStringLength); 483 key.emplace(str, len); 484 if (auto p = ropeCharCache_.Lookup(key.value())) { 485 return p.Data(); 486 } 487 return nullptr; 488 } 489 490 static constexpr size_t offsetOfLastLookups() { 491 return offsetof(StringToAtomCache, lastLookups_); 492 } 493 494 void maybePut(JSString* s, JSAtom* atom, mozilla::Maybe<AtomTableKey>& key) { 495 if (key.isSome()) { 496 ropeCharCache_.Put(key.value(), atom); 497 } 498 499 for (size_t i = NumLastLookups - 1; i > 0; i--) { 500 lastLookups_[i] = lastLookups_[i - 1]; 501 } 502 lastLookups_[0].string = s; 503 lastLookups_[0].atom = atom; 504 505 if (s->length() < MinStringLength) { 506 return; 507 } 508 if (!map_.putNew(s, atom)) { 509 return; 510 } 511 s->setInStringToAtomCache(); 512 } 513 514 void purge() { 515 map_.clearAndCompact(); 516 for (LastLookup& entry : lastLookups_) { 517 entry.string = nullptr; 518 entry.atom = nullptr; 519 } 520 521 ropeCharCache_.Clear(); 522 } 523 }; 524 525 #ifdef MOZ_EXECUTION_TRACING 526 527 // Holds a handful of caches used for tracing JS execution. These effectively 528 // hold onto IDs which let the tracer know that it has already recorded the 529 // entity in question. They need to be cleared on a compacting GC since they 530 // are keyed by pointers. However the IDs must continue incrementing until 531 // the tracer is turned off since entries containing the IDs in question may 532 // linger in the ExecutionTracer's buffer through a GC. 533 class TracingCaches { 534 uint32_t shapeId_ = 0; 535 uint32_t atomId_ = 0; 536 using TracingPointerCache = 537 HashMap<uintptr_t, uint32_t, DefaultHasher<uintptr_t>, SystemAllocPolicy>; 538 TracingPointerCache shapes_; 539 TracingPointerCache atoms_; 540 541 // NOTE: this cache does not need to be cleared on compaction, but still 542 // needs to be cleared at the end of tracing. 543 using TracingU32Set = 544 HashSet<uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>; 545 TracingU32Set scriptSourcesSeen_; 546 547 public: 548 void clearOnCompaction() { 549 atoms_.clear(); 550 shapes_.clear(); 551 } 552 553 void clearAll() { 554 shapeId_ = 0; 555 atomId_ = 0; 556 scriptSourcesSeen_.clear(); 557 atoms_.clear(); 558 shapes_.clear(); 559 } 560 561 enum class GetOrPutResult { 562 OOM, 563 NewlyAdded, 564 WasPresent, 565 }; 566 567 GetOrPutResult getOrPutAtom(JSAtom* atom, uint32_t* id) { 568 TracingPointerCache::AddPtr p = 569 atoms_.lookupForAdd(reinterpret_cast<uintptr_t>(atom)); 570 if (p) { 571 *id = p->value(); 572 return GetOrPutResult::WasPresent; 573 } 574 *id = atomId_++; 575 if (!atoms_.add(p, reinterpret_cast<uintptr_t>(atom), *id)) { 576 return GetOrPutResult::OOM; 577 } 578 return GetOrPutResult::NewlyAdded; 579 } 580 581 GetOrPutResult getOrPutShape(Shape* shape, uint32_t* id) { 582 TracingPointerCache::AddPtr p = 583 shapes_.lookupForAdd(reinterpret_cast<uintptr_t>(shape)); 584 if (p) { 585 *id = p->value(); 586 return GetOrPutResult::WasPresent; 587 } 588 *id = shapeId_++; 589 if (!shapes_.add(p, reinterpret_cast<uintptr_t>(shape), *id)) { 590 return GetOrPutResult::OOM; 591 } 592 return GetOrPutResult::NewlyAdded; 593 } 594 595 // NOTE: scriptSourceId is js::ScriptSource::id value. 596 GetOrPutResult putScriptSourceIfMissing(uint32_t scriptSourceId) { 597 TracingU32Set::AddPtr p = scriptSourcesSeen_.lookupForAdd(scriptSourceId); 598 if (p) { 599 return GetOrPutResult::WasPresent; 600 } 601 if (!scriptSourcesSeen_.add(p, scriptSourceId)) { 602 return GetOrPutResult::OOM; 603 } 604 return GetOrPutResult::NewlyAdded; 605 } 606 }; 607 608 #endif /* MOZ_EXECUTION_TRACING */ 609 610 class RuntimeCaches { 611 public: 612 MegamorphicCache megamorphicCache; 613 UniquePtr<MegamorphicSetPropCache> megamorphicSetPropCache; 614 UncompressedSourceCache uncompressedSourceCache; 615 EvalCache evalCache; 616 StringToAtomCache stringToAtomCache; 617 618 #ifdef MOZ_EXECUTION_TRACING 619 TracingCaches tracingCaches; 620 #endif 621 622 // Delazification: Cache binding for runtime objects which are used during 623 // delazification to quickly resolve NameLocation of bindings without linearly 624 // iterating over the list of bindings. 625 frontend::RuntimeScopeBindingCache scopeCache; 626 627 void sweepAfterMinorGC(JSTracer* trc) { evalCache.traceWeak(trc); } 628 #ifdef JSGC_HASH_TABLE_CHECKS 629 void checkEvalCacheAfterMinorGC(); 630 #endif 631 632 void purgeForCompaction() { 633 evalCache.clear(); 634 stringToAtomCache.purge(); 635 megamorphicCache.bumpGeneration(); 636 if (megamorphicSetPropCache) { 637 // MegamorphicSetPropCache can be null if we failed out of 638 // JSRuntime::init. We will then try to destroy the runtime which will 639 // do a GC and land us here. 640 megamorphicSetPropCache->bumpGeneration(); 641 } 642 scopeCache.purge(); 643 #ifdef MOZ_EXECUTION_TRACING 644 tracingCaches.clearOnCompaction(); 645 #endif 646 } 647 648 void purge() { 649 purgeForCompaction(); 650 uncompressedSourceCache.purge(); 651 } 652 }; 653 654 } // namespace js 655 656 #endif /* vm_Caches_h */