tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

StubFolding.cpp (25119B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/StubFolding.h"
      8 
      9 #include "mozilla/Maybe.h"
     10 
     11 #include "gc/GC.h"
     12 #include "jit/BaselineCacheIRCompiler.h"
     13 #include "jit/BaselineIC.h"
     14 #include "jit/CacheIR.h"
     15 #include "jit/CacheIRCloner.h"
     16 #include "jit/CacheIRCompiler.h"
     17 #include "jit/CacheIRSpewer.h"
     18 #include "jit/CacheIRWriter.h"
     19 #include "jit/JitScript.h"
     20 #include "jit/ShapeList.h"
     21 
     22 #include "vm/List-inl.h"
     23 
     24 using namespace js;
     25 using namespace js::jit;
     26 
     27 static bool TryFoldingGuardShapes(JSContext* cx, ICFallbackStub* fallback,
     28                                  JSScript* script, ICScript* icScript) {
     29  // Try folding similar stubs with GuardShapes
     30  // into GuardMultipleShapes or GuardMultipleShapesToOffset
     31 
     32  ICEntry* icEntry = icScript->icEntryForStub(fallback);
     33  ICStub* entryStub = icEntry->firstStub();
     34  ICCacheIRStub* firstStub = entryStub->toCacheIRStub();
     35 
     36  // The caller guarantees that there are at least two stubs.
     37  MOZ_ASSERT(entryStub != fallback);
     38  MOZ_ASSERT(!firstStub->next()->isFallback());
     39 
     40  const uint8_t* firstStubData = firstStub->stubDataStart();
     41  const CacheIRStubInfo* stubInfo = firstStub->stubInfo();
     42 
     43  // Check to see if:
     44  //   a) all of the stubs in this chain have the exact same code.
     45  //   b) all of the stubs have the same stub field data, except for a single
     46  //      GuardShape (and/or consecutive RawInt32) where they differ.
     47  //   c) at least one stub after the first has a non-zero entry count.
     48  //   d) All shapes in the GuardShape have the same realm.
     49  //
     50  // If all of these conditions hold, then we generate a single stub
     51  // that covers all the existing cases by
     52  // 1) replacing GuardShape with GuardMultipleShapes.
     53  // 2) replacing Load/Store with equivalent LoadToOffset/StoreToOffset
     54 
     55  uint32_t numActive = 0;
     56  mozilla::Maybe<uint32_t> foldableShapeOffset;
     57  mozilla::Maybe<uint32_t> foldableOffsetOffset;
     58  GCVector<Value, 8> shapeList(cx);
     59  GCVector<Value, 8> offsetList(cx);
     60 
     61  // Helper function: Keep list of different shapes.
     62  // Can fail on OOM or for cross-realm shapes.
     63  // Returns true if the shape was successfully added to the list, and false
     64  // (with no pending exception) otherwise.
     65  auto addShape = [&shapeList, cx](uintptr_t rawShape) -> bool {
     66    Shape* shape = reinterpret_cast<Shape*>(rawShape);
     67 
     68    // Only add same realm shapes.
     69    if (shape->realm() != cx->realm()) {
     70      return false;
     71    }
     72 
     73    gc::ReadBarrier(shape);
     74 
     75    if (!shapeList.append(PrivateValue(shape))) {
     76      cx->recoverFromOutOfMemory();
     77      return false;
     78    }
     79    return true;
     80  };
     81 
     82  // Helper function: Keep list of "possible" different offsets (slotOffset).
     83  // At this stage we don't know if they differ. Therefore only keep track
     84  // of the first offset until we see a different offset and fill list equal to
     85  // shapeList if that happens.
     86  auto lazyAddOffset = [&offsetList, &shapeList, cx](uintptr_t slotOffset) {
     87    Value v = PrivateUint32Value(static_cast<uint32_t>(slotOffset));
     88    if (offsetList.length() == 1) {
     89      if (v == offsetList[0]) return true;
     90 
     91      while (offsetList.length() + 1 < shapeList.length()) {
     92        if (!offsetList.append(offsetList[0])) {
     93          cx->recoverFromOutOfMemory();
     94          return false;
     95        }
     96      }
     97    }
     98 
     99    if (!offsetList.append(v)) {
    100      cx->recoverFromOutOfMemory();
    101      return false;
    102    }
    103    return true;
    104  };
    105 
    106 #ifdef JS_JITSPEW
    107  JitSpew(JitSpew_StubFolding, "Trying to fold stubs at offset %u @ %s:%u:%u",
    108          fallback->pcOffset(), script->filename(), script->lineno(),
    109          script->column().oneOriginValue());
    110 
    111  if (JitSpewEnabled(JitSpew_StubFoldingDetails)) {
    112    Fprinter& printer(JitSpewPrinter());
    113    uint32_t i = 0;
    114    for (ICCacheIRStub* stub = firstStub; stub; stub = stub->nextCacheIR()) {
    115      printer.printf("- stub %d (enteredCount: %d)\n", i, stub->enteredCount());
    116 
    117 #  ifdef JS_CACHEIR_SPEW
    118      ICCacheIRStub* cache_stub = stub->toCacheIRStub();
    119      SpewCacheIROps(printer, "  ", cache_stub->stubInfo());
    120 #  endif
    121      i++;
    122    }
    123  }
    124 #endif
    125 
    126  // Find the offset of the first Shape that differs.
    127  // Also see if the next field is RawInt32, which is
    128  // the case for a Fixed/Dynamic slot if it follows the ShapeGuard.
    129  for (ICCacheIRStub* other = firstStub->nextCacheIR(); other;
    130       other = other->nextCacheIR()) {
    131    // Verify that the stubs share the same code.
    132    if (other->stubInfo() != stubInfo) {
    133      return true;
    134    }
    135 
    136    if (other->enteredCount() > 0) {
    137      numActive++;
    138    }
    139 
    140    if (foldableShapeOffset.isSome()) {
    141      // Already found.
    142      // Continue through all stubs to run above code.
    143      continue;
    144    }
    145 
    146    const uint8_t* otherStubData = other->stubDataStart();
    147    uint32_t fieldIndex = 0;
    148    size_t offset = 0;
    149    while (stubInfo->fieldType(fieldIndex) != StubField::Type::Limit) {
    150      StubField::Type fieldType = stubInfo->fieldType(fieldIndex);
    151 
    152      // Continue if the fields have same value.
    153      if (StubField::sizeIsInt64(fieldType)) {
    154        if (stubInfo->getStubRawInt64(firstStubData, offset) ==
    155            stubInfo->getStubRawInt64(otherStubData, offset)) {
    156          offset += StubField::sizeInBytes(fieldType);
    157          fieldIndex++;
    158          continue;
    159        }
    160      } else {
    161        MOZ_ASSERT(StubField::sizeIsWord(fieldType));
    162        if (stubInfo->getStubRawWord(firstStubData, offset) ==
    163            stubInfo->getStubRawWord(otherStubData, offset)) {
    164          offset += StubField::sizeInBytes(fieldType);
    165          fieldIndex++;
    166          continue;
    167        }
    168      }
    169 
    170      // Early abort if it is a non-shape field that differs.
    171      if (fieldType != StubField::Type::WeakShape) {
    172        return true;
    173      }
    174 
    175      // Save the offset
    176      foldableShapeOffset.emplace(offset);
    177 
    178      // Test if the consecutive field is potentially Load{Fixed|Dynamic}Slot
    179      offset += StubField::sizeInBytes(fieldType);
    180      fieldIndex++;
    181      if (stubInfo->fieldType(fieldIndex) == StubField::Type::RawInt32) {
    182        foldableOffsetOffset.emplace(offset);
    183      }
    184 
    185      break;
    186    }
    187  }
    188 
    189  if (foldableShapeOffset.isNothing()) {
    190    return true;
    191  }
    192 
    193  if (numActive == 0) {
    194    return true;
    195  }
    196 
    197  // Make sure the shape and offset is the only value that differ.
    198  // Collect the shape and offset values at the same time.
    199  for (ICCacheIRStub* stub = firstStub; stub; stub = stub->nextCacheIR()) {
    200    const uint8_t* stubData = stub->stubDataStart();
    201    uint32_t fieldIndex = 0;
    202    size_t offset = 0;
    203 
    204    while (stubInfo->fieldType(fieldIndex) != StubField::Type::Limit) {
    205      StubField::Type fieldType = stubInfo->fieldType(fieldIndex);
    206      if (offset == *foldableShapeOffset) {
    207        // Save the shapes of all stubs.
    208        MOZ_ASSERT(fieldType == StubField::Type::WeakShape);
    209        uintptr_t raw = stubInfo->getStubRawWord(stubData, offset);
    210        if (!addShape(raw)) {
    211          return true;
    212        }
    213      } else if (foldableOffsetOffset.isSome() &&
    214                 offset == *foldableOffsetOffset) {
    215        // Save the offsets of all stubs.
    216        MOZ_ASSERT(fieldType == StubField::Type::RawInt32);
    217        uintptr_t raw = stubInfo->getStubRawWord(stubData, offset);
    218        if (!lazyAddOffset(raw)) {
    219          return true;
    220        }
    221      } else {
    222        // Check all other fields are the same.
    223        if (StubField::sizeIsInt64(fieldType)) {
    224          if (stubInfo->getStubRawInt64(firstStubData, offset) !=
    225              stubInfo->getStubRawInt64(stubData, offset)) {
    226            return true;
    227          }
    228        } else {
    229          MOZ_ASSERT(StubField::sizeIsWord(fieldType));
    230          if (stubInfo->getStubRawWord(firstStubData, offset) !=
    231              stubInfo->getStubRawWord(stubData, offset)) {
    232            return true;
    233          }
    234        }
    235      }
    236 
    237      offset += StubField::sizeInBytes(fieldType);
    238      fieldIndex++;
    239    }
    240  }
    241 
    242  // Clone the CacheIR and replace
    243  // - specific GuardShape with GuardMultipleShapes.
    244  // or
    245  // (multiple distinct values in offsetList)
    246  // - specific GuardShape with GuardMultipleShapesToOffset.
    247  // - subsequent Load / Store with LoadToOffset / StoreToOffset
    248  CacheIRWriter writer(cx);
    249  CacheIRReader reader(stubInfo);
    250  CacheIRCloner cloner(firstStub);
    251  bool hasSlotOffsets = offsetList.length() > 1;
    252 
    253  if (JitOptions.disableStubFoldingLoadsAndStores && hasSlotOffsets) {
    254    return true;
    255  }
    256 
    257  // Initialize the operands.
    258  CacheKind cacheKind = stubInfo->kind();
    259  for (uint32_t i = 0; i < NumInputsForCacheKind(cacheKind); i++) {
    260    writer.setInputOperandId(i);
    261  }
    262 
    263  // Create the shapeList to bake in the new stub.
    264  Rooted<ListObject*> shapeObj(cx);
    265  {
    266    gc::AutoSuppressGC suppressGC(cx);
    267 
    268    if (!hasSlotOffsets) {
    269      shapeObj.set(ShapeListObject::create(cx));
    270    } else {
    271      shapeObj.set(ShapeListWithOffsetsObject::create(cx));
    272    }
    273 
    274    if (!shapeObj) {
    275      return false;
    276    }
    277 
    278    MOZ_ASSERT_IF(hasSlotOffsets, shapeList.length() == offsetList.length());
    279 
    280    for (uint32_t i = 0; i < shapeList.length(); i++) {
    281      if (!shapeObj->append(cx, shapeList[i])) {
    282        return false;
    283      }
    284 
    285      if (hasSlotOffsets) {
    286        if (!shapeObj->append(cx, offsetList[i])) {
    287          return false;
    288        }
    289      }
    290 
    291      MOZ_ASSERT(static_cast<Shape*>(shapeList[i].toPrivate())->realm() ==
    292                 shapeObj->realm());
    293    }
    294  }
    295 
    296  mozilla::Maybe<Int32OperandId> offsetId;
    297  bool shapeSuccess = false;
    298  bool offsetSuccess = false;
    299  while (reader.more()) {
    300    CacheOp op = reader.readOp();
    301    switch (op) {
    302      case CacheOp::GuardShape: {
    303        auto [objId, shapeOffset] = reader.argsForGuardShape();
    304        if (shapeOffset != *foldableShapeOffset) {
    305          // Unrelated GuardShape.
    306          WeakHeapPtr<Shape*>& ptr =
    307              stubInfo->getStubField<StubField::Type::WeakShape>(firstStub,
    308                                                                 shapeOffset);
    309          writer.guardShape(objId, ptr.unbarrieredGet());
    310          break;
    311        }
    312 
    313        if (hasSlotOffsets) {
    314          offsetId.emplace(writer.guardMultipleShapesToOffset(objId, shapeObj));
    315        } else {
    316          writer.guardMultipleShapes(objId, shapeObj);
    317        }
    318        shapeSuccess = true;
    319        break;
    320      }
    321      case CacheOp::LoadFixedSlotResult: {
    322        auto [objId, offsetOffset] = reader.argsForLoadFixedSlotResult();
    323        if (!hasSlotOffsets || offsetOffset != *foldableOffsetOffset) {
    324          // Unrelated LoadFixedSlotResult
    325          uint32_t offset = stubInfo->getStubRawWord(firstStub, offsetOffset);
    326          writer.loadFixedSlotResult(objId, offset);
    327          break;
    328        }
    329 
    330        MOZ_ASSERT(offsetId.isSome());
    331        writer.loadFixedSlotFromOffsetResult(objId, offsetId.value());
    332        offsetSuccess = true;
    333        break;
    334      }
    335      case CacheOp::StoreFixedSlot: {
    336        auto [objId, offsetOffset, rhsId] = reader.argsForStoreFixedSlot();
    337        if (!hasSlotOffsets || offsetOffset != *foldableOffsetOffset) {
    338          // Unrelated StoreFixedSlot
    339          uint32_t offset = stubInfo->getStubRawWord(firstStub, offsetOffset);
    340          writer.storeFixedSlot(objId, offset, rhsId);
    341          break;
    342        }
    343 
    344        MOZ_ASSERT(offsetId.isSome());
    345        writer.storeFixedSlotFromOffset(objId, offsetId.value(), rhsId);
    346        offsetSuccess = true;
    347        break;
    348      }
    349      case CacheOp::StoreDynamicSlot: {
    350        auto [objId, offsetOffset, rhsId] = reader.argsForStoreDynamicSlot();
    351        if (!hasSlotOffsets || offsetOffset != *foldableOffsetOffset) {
    352          // Unrelated StoreDynamicSlot
    353          uint32_t offset = stubInfo->getStubRawWord(firstStub, offsetOffset);
    354          writer.storeDynamicSlot(objId, offset, rhsId);
    355          break;
    356        }
    357 
    358        MOZ_ASSERT(offsetId.isSome());
    359        writer.storeDynamicSlotFromOffset(objId, offsetId.value(), rhsId);
    360        offsetSuccess = true;
    361        break;
    362      }
    363      case CacheOp::LoadDynamicSlotResult: {
    364        auto [objId, offsetOffset] = reader.argsForLoadDynamicSlotResult();
    365        if (!hasSlotOffsets || offsetOffset != *foldableOffsetOffset) {
    366          // Unrelated LoadDynamicSlotResult
    367          uint32_t offset = stubInfo->getStubRawWord(firstStub, offsetOffset);
    368          writer.loadDynamicSlotResult(objId, offset);
    369          break;
    370        }
    371 
    372        MOZ_ASSERT(offsetId.isSome());
    373        writer.loadDynamicSlotFromOffsetResult(objId, offsetId.value());
    374        offsetSuccess = true;
    375        break;
    376      }
    377      default:
    378        cloner.cloneOp(op, reader, writer);
    379        break;
    380    }
    381  }
    382 
    383  if (!shapeSuccess) {
    384    // If the shape field that differed was not part of a GuardShape,
    385    // we can't fold these stubs together.
    386    JitSpew(JitSpew_StubFolding,
    387            "Foldable shape field at offset %u was not a GuardShape "
    388            "(icScript: %p) with %zu shapes (%s:%u:%u)",
    389            fallback->pcOffset(), icScript, shapeList.length(),
    390            script->filename(), script->lineno(),
    391            script->column().oneOriginValue());
    392    return true;
    393  }
    394 
    395  if (hasSlotOffsets && !offsetSuccess) {
    396    // If we found a differing offset field but it was not part of the
    397    // Load{Fixed | Dynamic}SlotResult then we can't fold these stubs
    398    // together.
    399    JitSpew(JitSpew_StubFolding,
    400            "Failed to fold GuardShape into GuardMultipleShapesToOffset at "
    401            "offset %u (icScript: %p) with %zu shapes (%s:%u:%u)",
    402            fallback->pcOffset(), icScript, shapeList.length(),
    403            script->filename(), script->lineno(),
    404            script->column().oneOriginValue());
    405    return true;
    406  }
    407 
    408  // Replace the existing stubs with the new folded stub.
    409  fallback->discardStubs(cx->zone(), icEntry);
    410 
    411  ICAttachResult result = AttachBaselineCacheIRStub(
    412      cx, writer, cacheKind, script, icScript, fallback, "StubFold");
    413  if (result == ICAttachResult::OOM) {
    414    ReportOutOfMemory(cx);
    415    return false;
    416  }
    417  MOZ_ASSERT(result == ICAttachResult::Attached);
    418 
    419  JitSpew(JitSpew_StubFolding,
    420          "Folded stub at offset %u (icScript: %p) with %zu shapes (%s:%u:%u)",
    421          fallback->pcOffset(), icScript, shapeList.length(),
    422          script->filename(), script->lineno(),
    423          script->column().oneOriginValue());
    424 
    425 #ifdef JS_JITSPEW
    426  if (JitSpewEnabled(JitSpew_StubFoldingDetails)) {
    427    ICStub* newEntryStub = icEntry->firstStub();
    428 
    429    Fprinter& printer(JitSpewPrinter());
    430    printer.printf("- stub 0 (enteredCount: %d)\n",
    431                   newEntryStub->enteredCount());
    432 #  ifdef JS_CACHEIR_SPEW
    433    ICCacheIRStub* newStub = newEntryStub->toCacheIRStub();
    434    SpewCacheIROps(printer, "  ", newStub->stubInfo());
    435 #  endif
    436  }
    437 #endif
    438 
    439  fallback->setMayHaveFoldedStub();
    440 
    441  return true;
    442 }
    443 
    444 bool js::jit::TryFoldingStubs(JSContext* cx, ICFallbackStub* fallback,
    445                              JSScript* script, ICScript* icScript) {
    446  ICEntry* icEntry = icScript->icEntryForStub(fallback);
    447  ICStub* entryStub = icEntry->firstStub();
    448 
    449  if (JitOptions.disableStubFolding) {
    450    return true;
    451  }
    452 
    453  // Don't fold unless there are at least two stubs.
    454  if (entryStub == fallback) {
    455    return true;
    456  }
    457 
    458  ICCacheIRStub* firstStub = entryStub->toCacheIRStub();
    459  if (firstStub->next()->isFallback()) {
    460    return true;
    461  }
    462 
    463  if (!TryFoldingGuardShapes(cx, fallback, script, icScript)) return false;
    464 
    465  return true;
    466 }
    467 
    468 bool js::jit::AddToFoldedStub(JSContext* cx, const CacheIRWriter& writer,
    469                              ICScript* icScript, ICFallbackStub* fallback) {
    470  ICEntry* icEntry = icScript->icEntryForStub(fallback);
    471  ICStub* entryStub = icEntry->firstStub();
    472 
    473  // We only update folded stubs if they're the only stub in the IC.
    474  if (entryStub == fallback) {
    475    return false;
    476  }
    477  ICCacheIRStub* stub = entryStub->toCacheIRStub();
    478  if (!stub->next()->isFallback()) {
    479    return false;
    480  }
    481 
    482  const CacheIRStubInfo* stubInfo = stub->stubInfo();
    483  const uint8_t* stubData = stub->stubDataStart();
    484 
    485  mozilla::Maybe<uint32_t> shapeFieldOffset;
    486  mozilla::Maybe<uint32_t> offsetFieldOffset;
    487  RootedValue newShape(cx);
    488  RootedValue newOffset(cx);
    489  Rooted<ListObject*> shapeList(cx);
    490 
    491  CacheIRReader stubReader(stubInfo);
    492  CacheIRReader newReader(writer);
    493  while (newReader.more() && stubReader.more()) {
    494    CacheOp newOp = newReader.readOp();
    495    CacheOp stubOp = stubReader.readOp();
    496    switch (stubOp) {
    497      case CacheOp::GuardMultipleShapes: {
    498        // Check that the new stub has a corresponding GuardShape.
    499        if (newOp != CacheOp::GuardShape) {
    500          return false;
    501        }
    502        // Check that the object being guarded is the same.
    503        if (newReader.objOperandId() != stubReader.objOperandId()) {
    504          return false;
    505        }
    506 
    507        // Check that the shape offset is the same.
    508        uint32_t newShapeOffset = newReader.stubOffset();
    509        uint32_t stubShapesOffset = stubReader.stubOffset();
    510        if (newShapeOffset != stubShapesOffset) {
    511          return false;
    512        }
    513 
    514        MOZ_ASSERT(shapeList == nullptr);
    515        shapeFieldOffset.emplace(newShapeOffset);
    516 
    517        // Get the shape from the new stub
    518        StubField shapeField =
    519            writer.readStubField(newShapeOffset, StubField::Type::WeakShape);
    520        Shape* shape = reinterpret_cast<Shape*>(shapeField.asWord());
    521        newShape = PrivateValue(shape);
    522 
    523        // Get the shape array from the old stub.
    524        JSObject* obj = stubInfo->getStubField<StubField::Type::JSObject>(
    525            stub, stubShapesOffset);
    526        shapeList = &obj->as<ShapeListObject>();
    527        MOZ_ASSERT(shapeList->compartment() == shape->compartment());
    528 
    529        // Don't add a shape if it's from a different realm than the first
    530        // shape.
    531        //
    532        // Since the list was created in the realm which guarded all the shapes
    533        // added to it, we can use its realm to check and ensure we're not
    534        // adding a cross-realm shape.
    535        //
    536        // The assert verifies this property by checking the first element has
    537        // the same realm (and since everything in the list has the same realm,
    538        // checking the first element suffices)
    539        Realm* shapesRealm = shapeList->realm();
    540        MOZ_ASSERT_IF(
    541            !shapeList->isEmpty(),
    542            shapeList->as<ShapeListObject>().getUnbarriered(0)->realm() ==
    543                shapesRealm);
    544        if (shapesRealm != shape->realm()) {
    545          return false;
    546        }
    547 
    548        break;
    549      }
    550      case CacheOp::GuardMultipleShapesToOffset: {
    551        // Check that the new stub has a corresponding GuardShape.
    552        if (newOp != CacheOp::GuardShape) {
    553          return false;
    554        }
    555        // Check that the object being guarded is the same.
    556        if (newReader.objOperandId() != stubReader.objOperandId()) {
    557          return false;
    558        }
    559 
    560        // Check that the shape offset is the same.
    561        uint32_t newShapeOffset = newReader.stubOffset();
    562        uint32_t stubShapesOffset = stubReader.stubOffset();
    563        if (newShapeOffset != stubShapesOffset) {
    564          return false;
    565        }
    566 
    567        MOZ_ASSERT(shapeList == nullptr);
    568        shapeFieldOffset.emplace(newShapeOffset);
    569 
    570        // Get the shape from the new stub
    571        StubField shapeField =
    572            writer.readStubField(newShapeOffset, StubField::Type::WeakShape);
    573        Shape* shape = reinterpret_cast<Shape*>(shapeField.asWord());
    574        newShape = PrivateValue(shape);
    575 
    576        // Get the shape array from the old stub.
    577        JSObject* obj = stubInfo->getStubField<StubField::Type::JSObject>(
    578            stub, stubShapesOffset);
    579        shapeList = &obj->as<ShapeListWithOffsetsObject>();
    580        MOZ_ASSERT(shapeList->compartment() == shape->compartment());
    581 
    582        // Don't add a shape if it's from a different realm than the first
    583        // shape.
    584        //
    585        // Since the list was created in the realm which guarded all the shapes
    586        // added to it, we can use its realm to check and ensure we're not
    587        // adding a cross-realm shape.
    588        //
    589        // The assert verifies this property by checking the first element has
    590        // the same realm (and since everything in the list has the same realm,
    591        // checking the first element suffices)
    592        Realm* shapesRealm = shapeList->realm();
    593        MOZ_ASSERT_IF(
    594            !shapeList->isEmpty(),
    595            shapeList->as<ShapeListWithOffsetsObject>().getShape(0)->realm() ==
    596                shapesRealm);
    597        if (shapesRealm != shape->realm()) {
    598          return false;
    599        }
    600 
    601        // Consume the offsetId argument.
    602        stubReader.skip();
    603        break;
    604      }
    605      case CacheOp::LoadFixedSlotFromOffsetResult:
    606      case CacheOp::LoadDynamicSlotFromOffsetResult: {
    607        // Check that the new stub has a corresponding
    608        // Load{Fixed|Dynamic}SlotResult
    609        if (stubOp == CacheOp::LoadFixedSlotFromOffsetResult &&
    610            newOp != CacheOp::LoadFixedSlotResult) {
    611          return false;
    612        }
    613        if (stubOp == CacheOp::LoadDynamicSlotFromOffsetResult &&
    614            newOp != CacheOp::LoadDynamicSlotResult) {
    615          return false;
    616        }
    617 
    618        // Verify operand ID.
    619        if (newReader.objOperandId() != stubReader.objOperandId()) {
    620          return false;
    621        }
    622 
    623        MOZ_ASSERT(offsetFieldOffset.isNothing());
    624        offsetFieldOffset.emplace(newReader.stubOffset());
    625 
    626        // Get the offset from the new stub
    627        StubField offsetField =
    628            writer.readStubField(*offsetFieldOffset, StubField::Type::RawInt32);
    629        newOffset = PrivateUint32Value(offsetField.asWord());
    630 
    631        // Consume the offsetId argument.
    632        stubReader.skip();
    633        break;
    634      }
    635      case CacheOp::StoreFixedSlotFromOffset:
    636      case CacheOp::StoreDynamicSlotFromOffset: {
    637        // Check that the new stub has a corresponding Store{Fixed|Dynamic}Slot
    638        if (stubOp == CacheOp::StoreFixedSlotFromOffset &&
    639            newOp != CacheOp::StoreFixedSlot) {
    640          return false;
    641        }
    642        if (stubOp == CacheOp::StoreDynamicSlotFromOffset &&
    643            newOp != CacheOp::StoreDynamicSlot) {
    644          return false;
    645        }
    646 
    647        // Verify operand ID.
    648        if (newReader.objOperandId() != stubReader.objOperandId()) {
    649          return false;
    650        }
    651 
    652        MOZ_ASSERT(offsetFieldOffset.isNothing());
    653        offsetFieldOffset.emplace(newReader.stubOffset());
    654 
    655        // Get the offset from the new stub
    656        StubField offsetField =
    657            writer.readStubField(*offsetFieldOffset, StubField::Type::RawInt32);
    658        newOffset = PrivateUint32Value(offsetField.asWord());
    659 
    660        // Consume the offsetId argument.
    661        stubReader.skip();
    662 
    663        // Verify rhs ID.
    664        if (newReader.valOperandId() != stubReader.valOperandId()) {
    665          return false;
    666        }
    667 
    668        MOZ_ASSERT(stubReader.peekOp() == CacheOp::ReturnFromIC);
    669        MOZ_ASSERT(newReader.peekOp() == CacheOp::ReturnFromIC);
    670        break;
    671      }
    672      default: {
    673        // Check that the op is the same.
    674        if (newOp != stubOp) {
    675          return false;
    676        }
    677 
    678        // Check that the arguments are the same.
    679        uint32_t argLength = CacheIROpInfos[size_t(newOp)].argLength;
    680        for (uint32_t i = 0; i < argLength; i++) {
    681          if (newReader.readByte() != stubReader.readByte()) {
    682            return false;
    683          }
    684        }
    685      }
    686    }
    687  }
    688 
    689  if (shapeFieldOffset.isNothing()) {
    690    // The stub did not contain the GuardMultipleShapes op. This can happen if a
    691    // folded stub has been discarded by GC sweeping.
    692    return false;
    693  }
    694 
    695  if (!writer.stubDataEqualsIgnoringShapeAndOffset(stubData, *shapeFieldOffset,
    696                                                   offsetFieldOffset)) {
    697    return false;
    698  }
    699 
    700  // ShapeListWithSlotsObject uses two spaces per shape.
    701  uint32_t numShapes = offsetFieldOffset.isNothing() ? shapeList->length()
    702                                                     : shapeList->length() / 2;
    703 
    704  // Limit the maximum number of shapes we will add before giving up.
    705  // If we give up, transition the stub.
    706  if (numShapes == ShapeListObject::MaxLength) {
    707    MOZ_ASSERT(fallback->state().mode() != ICState::Mode::Generic);
    708    fallback->state().forceTransition();
    709    fallback->discardStubs(cx->zone(), icEntry);
    710    return false;
    711  }
    712 
    713  if (!shapeList->append(cx, newShape)) {
    714    cx->recoverFromOutOfMemory();
    715    return false;
    716  }
    717 
    718  if (offsetFieldOffset.isSome()) {
    719    if (!shapeList->append(cx, newOffset)) {
    720      // Drop corresponding shape if we failed adding offset.
    721      shapeList->shrinkElements(cx, shapeList->length() - 1);
    722      cx->recoverFromOutOfMemory();
    723      return false;
    724    }
    725  }
    726 
    727  JitSpew(JitSpew_StubFolding, "ShapeList%sObject %p: new length: %u",
    728          offsetFieldOffset.isNothing() ? "" : "WithOffset", shapeList.get(),
    729          shapeList->length());
    730  return true;
    731 }