tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

JitcodeMap.cpp (35814B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/JitcodeMap.h"
      8 
      9 #include "mozilla/ArrayUtils.h"
     10 #include "mozilla/Maybe.h"
     11 
     12 #include "gc/Marking.h"
     13 #include "gc/Zone.h"
     14 #include "jit/BaselineJIT.h"
     15 #include "jit/InlineScriptTree.h"
     16 #include "jit/JitRuntime.h"
     17 #include "jit/JitSpewer.h"
     18 #include "js/Vector.h"
     19 #include "vm/BytecodeLocation.h"  // for BytecodeLocation
     20 #include "vm/GeckoProfiler.h"
     21 
     22 #include "vm/GeckoProfiler-inl.h"
     23 #include "vm/JSScript-inl.h"
     24 
     25 using mozilla::Maybe;
     26 
     27 namespace js {
     28 namespace jit {
     29 
     30 static inline JitcodeRegionEntry RegionAtAddr(const IonEntry& entry, void* ptr,
     31                                              uint32_t* ptrOffset) {
     32  MOZ_ASSERT(entry.containsPointer(ptr));
     33  *ptrOffset = reinterpret_cast<uint8_t*>(ptr) -
     34               reinterpret_cast<uint8_t*>(entry.nativeStartAddr());
     35 
     36  uint32_t regionIdx = entry.regionTable()->findRegionEntry(*ptrOffset);
     37  MOZ_ASSERT(regionIdx < entry.regionTable()->numRegions());
     38 
     39  return entry.regionTable()->regionEntry(regionIdx);
     40 }
     41 
     42 void* IonEntry::canonicalNativeAddrFor(void* ptr) const {
     43  uint32_t ptrOffset;
     44  JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
     45  return (void*)(((uint8_t*)nativeStartAddr()) + region.nativeOffset());
     46 }
     47 
     48 uint32_t IonEntry::callStackAtAddr(void* ptr, CallStackFrameInfo* results,
     49                                   uint32_t maxResults) const {
     50  MOZ_ASSERT(maxResults >= 1);
     51 
     52  uint32_t ptrOffset;
     53  JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
     54 
     55  JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
     56  MOZ_ASSERT(locationIter.hasMore());
     57  uint32_t count = 0;
     58  while (locationIter.hasMore()) {
     59    uint32_t scriptIdx, pcOffset;
     60 
     61    locationIter.readNext(&scriptIdx, &pcOffset);
     62    MOZ_ASSERT(getStr(scriptIdx));
     63 
     64    results[count].label = getStr(scriptIdx);
     65    results[count].sourceId = getScriptSource(scriptIdx).scriptSource->id();
     66    count++;
     67    if (count >= maxResults) {
     68      break;
     69    }
     70  }
     71 
     72  return count;
     73 }
     74 
     75 IonEntry::~IonEntry() {
     76  // The region table is stored at the tail of the compacted data,
     77  // which means the start of the region table is a pointer to
     78  // the _middle_ of the memory space allocated for it.
     79  //
     80  // When freeing it, obtain the payload start pointer first.
     81  MOZ_ASSERT(regionTable_);
     82  js_free((void*)(regionTable_->payloadStart()));
     83  regionTable_ = nullptr;
     84 }
     85 
     86 static IonEntry& IonEntryForIonIC(JSRuntime* rt, const IonICEntry* icEntry) {
     87  // The table must have an IonEntry for the IC's rejoin address.
     88  auto* table = rt->jitRuntime()->getJitcodeGlobalTable();
     89  auto* entry = table->lookup(icEntry->rejoinAddr());
     90  MOZ_ASSERT(entry);
     91  MOZ_RELEASE_ASSERT(entry->isIon());
     92  return entry->asIon();
     93 }
     94 
     95 void* IonICEntry::canonicalNativeAddrFor(void* ptr) const { return ptr; }
     96 
     97 uint32_t IonICEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
     98                                     CallStackFrameInfo* results,
     99                                     uint32_t maxResults) const {
    100  const IonEntry& entry = IonEntryForIonIC(rt, this);
    101  return entry.callStackAtAddr(rejoinAddr(), results, maxResults);
    102 }
    103 
    104 uint64_t IonICEntry::realmID(JSRuntime* rt) const {
    105  const IonEntry& entry = IonEntryForIonIC(rt, this);
    106  return entry.realmID();
    107 }
    108 
    109 void* BaselineEntry::canonicalNativeAddrFor(void* ptr) const {
    110  // TODO: We can't yet normalize Baseline addresses until we unify
    111  // BaselineScript's PCMappingEntries with JitcodeGlobalTable.
    112  return ptr;
    113 }
    114 
    115 uint32_t BaselineEntry::callStackAtAddr(void* ptr, CallStackFrameInfo* results,
    116                                        uint32_t maxResults) const {
    117  MOZ_ASSERT(containsPointer(ptr));
    118  MOZ_ASSERT(maxResults >= 1);
    119 
    120  results[0].label = str();
    121  results[0].sourceId = scriptSource().scriptSource->id();
    122  return 1;
    123 }
    124 
    125 void* BaselineInterpreterEntry::canonicalNativeAddrFor(void* ptr) const {
    126  return ptr;
    127 }
    128 
    129 uint32_t BaselineInterpreterEntry::callStackAtAddr(void* ptr,
    130                                                   CallStackFrameInfo* results,
    131                                                   uint32_t maxResults) const {
    132  MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
    133 }
    134 
    135 uint64_t BaselineInterpreterEntry::realmID() const {
    136  MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
    137 }
    138 
    139 void* RealmIndependentSharedEntry::canonicalNativeAddrFor(void* ptr) const {
    140  // TODO: We can't yet normalize Baseline addresses until we unify
    141  // BaselineScript's PCMappingEntries with JitcodeGlobalTable.
    142  return ptr;
    143 }
    144 
    145 bool RealmIndependentSharedEntry::callStackAtAddr(
    146    void* ptr, BytecodeLocationVector& results, uint32_t* depth) const {
    147  JitSpew(JitSpew_Profiling,
    148          "Unexpected call - without a script, what can we do here?");
    149  return true;
    150 }
    151 
    152 uint32_t RealmIndependentSharedEntry::callStackAtAddr(
    153    void* ptr, CallStackFrameInfo* results, uint32_t maxResults) const {
    154  MOZ_ASSERT(containsPointer(ptr));
    155  MOZ_ASSERT(maxResults >= 1);
    156 
    157  results[0].label = str();
    158  results[0].sourceId = 0;
    159  return 1;
    160 }
    161 
    162 uint64_t RealmIndependentSharedEntry::realmID() const { return 0; }
    163 
    164 const JitcodeGlobalEntry* JitcodeGlobalTable::lookupForSampler(
    165    void* ptr, JSRuntime* rt, uint64_t samplePosInBuffer) {
    166  JitcodeGlobalEntry* entry = lookupInternal(ptr);
    167  if (!entry) {
    168    return nullptr;
    169  }
    170 
    171  entry->setSamplePositionInBuffer(samplePosInBuffer);
    172 
    173  // IonIC entries must keep their corresponding Ion entries alive.
    174  if (entry->isIonIC()) {
    175    IonEntry& ionEntry = IonEntryForIonIC(rt, &entry->asIonIC());
    176    ionEntry.setSamplePositionInBuffer(samplePosInBuffer);
    177  }
    178 
    179  // JitcodeGlobalEntries are marked at the end of the mark phase. A read
    180  // barrier is not needed. Any JS frames sampled during the sweep phase of
    181  // the GC must be on stack, and on-stack frames must already be marked at
    182  // the beginning of the sweep phase. It's not possible to assert this here
    183  // as we may be off main thread when called from the gecko profiler.
    184 
    185  return entry;
    186 }
    187 
    188 JitcodeGlobalEntry* JitcodeGlobalTable::lookupInternal(void* ptr) {
    189  // Search for an entry containing the one-byte range starting at |ptr|.
    190  JitCodeRange range(ptr, static_cast<uint8_t*>(ptr) + 1);
    191 
    192  if (JitCodeRange** entry = tree_.maybeLookup(&range)) {
    193    MOZ_ASSERT((*entry)->containsPointer(ptr));
    194    return static_cast<JitcodeGlobalEntry*>(*entry);
    195  }
    196 
    197  return nullptr;
    198 }
    199 
    200 bool JitcodeGlobalTable::addEntry(UniqueJitcodeGlobalEntry entry) {
    201  MOZ_ASSERT(entry->isIon() || entry->isIonIC() || entry->isBaseline() ||
    202             entry->isBaselineInterpreter() || entry->isDummy() ||
    203             entry->isRealmIndependentShared());
    204 
    205  // Assert the new entry does not have a code range that's equal to (or
    206  // contained in) one of the existing entries, because that would confuse the
    207  // AVL tree.
    208  MOZ_ASSERT(!tree_.maybeLookup(entry.get()));
    209 
    210  // Suppress profiler sampling while data structures are being mutated.
    211  AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
    212 
    213  if (!entries_.append(std::move(entry))) {
    214    return false;
    215  }
    216  if (!tree_.insert(entries_.back().get())) {
    217    entries_.popBack();
    218    return false;
    219  }
    220 
    221  return true;
    222 }
    223 
    224 void JitcodeGlobalTable::setAllEntriesAsExpired() {
    225  AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
    226  for (EntryVector::Range r(entries_.all()); !r.empty(); r.popFront()) {
    227    auto& entry = r.front();
    228    entry->setAsExpired();
    229  }
    230 }
    231 
    232 bool JitcodeGlobalTable::markIteratively(GCMarker* marker) {
    233  // JitcodeGlobalTable must keep entries that are in the sampler buffer
    234  // alive. This conditionality is akin to holding the entries weakly.
    235  //
    236  // If this table were marked at the beginning of the mark phase, then
    237  // sampling would require a read barrier for sampling in between
    238  // incremental GC slices. However, invoking read barriers from the sampler
    239  // is wildly unsafe. The sampler may run at any time, including during GC
    240  // itself.
    241  //
    242  // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
    243  // phase, along with weak references. The key assumption is the
    244  // following. At the beginning of the sweep phase, any JS frames that the
    245  // sampler may put in its buffer that are not already there at the
    246  // beginning of the mark phase must have already been marked, as either 1)
    247  // the frame was on-stack at the beginning of the sweep phase, or 2) the
    248  // frame was pushed between incremental sweep slices. Frames of case 1)
    249  // are already marked. Frames of case 2) must have been reachable to have
    250  // been newly pushed, and thus are already marked.
    251  //
    252  // The approach above obviates the need for read barriers. The assumption
    253  // above is checked in JitcodeGlobalTable::lookupForSampler.
    254 
    255  MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
    256 
    257  AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
    258 
    259  // If the profiler is off, rangeStart will be Nothing() and all entries are
    260  // considered to be expired.
    261  Maybe<uint64_t> rangeStart =
    262      marker->runtime()->profilerSampleBufferRangeStart();
    263 
    264  bool markedAny = false;
    265  for (EntryVector::Range r(entries_.all()); !r.empty(); r.popFront()) {
    266    auto& entry = r.front();
    267 
    268    // If an entry is not sampled, reset its buffer position to the invalid
    269    // position, and conditionally mark the rest of the entry if its
    270    // JitCode is not already marked. This conditional marking ensures
    271    // that so long as the JitCode *may* be sampled, we keep any
    272    // information that may be handed out to the sampler, like tracked
    273    // types used by optimizations and scripts used for pc to line number
    274    // mapping, alive as well.
    275    if (!rangeStart || !entry->isSampled(*rangeStart)) {
    276      entry->setAsExpired();
    277      if (!entry->isJitcodeMarkedFromAnyThread(marker->runtime())) {
    278        continue;
    279      }
    280    }
    281 
    282    // The table is runtime-wide. Not all zones may be participating in
    283    // the GC.
    284    if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished()) {
    285      continue;
    286    }
    287 
    288    markedAny |= entry->trace(marker->tracer());
    289  }
    290 
    291  return markedAny;
    292 }
    293 
    294 void JitcodeGlobalTable::traceWeak(JSRuntime* rt, JSTracer* trc) {
    295  AutoSuppressProfilerSampling suppressSampling(rt->mainContextFromOwnThread());
    296 
    297  entries_.eraseIf([&](auto& entry) {
    298    if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished()) {
    299      return false;
    300    }
    301 
    302    if (TraceManuallyBarrieredWeakEdge(
    303            trc, entry->jitcodePtr(),
    304            "JitcodeGlobalTable::JitcodeGlobalEntry::jitcode_")) {
    305      return false;
    306    }
    307 
    308    // We have to remove the entry.
    309 #ifdef DEBUG
    310    Maybe<uint64_t> rangeStart = rt->profilerSampleBufferRangeStart();
    311    MOZ_ASSERT_IF(rangeStart, !entry->isSampled(*rangeStart));
    312 #endif
    313    tree_.remove(entry.get());
    314    return true;
    315  });
    316 
    317  MOZ_ASSERT(tree_.empty() == entries_.empty());
    318 }
    319 
    320 bool JitcodeGlobalEntry::traceJitcode(JSTracer* trc) {
    321  if (!IsMarkedUnbarriered(trc->runtime(), jitcode_)) {
    322    TraceManuallyBarrieredEdge(trc, &jitcode_,
    323                               "jitcodglobaltable-baseentry-jitcode");
    324    return true;
    325  }
    326  return false;
    327 }
    328 
    329 bool JitcodeGlobalEntry::isJitcodeMarkedFromAnyThread(JSRuntime* rt) {
    330  return IsMarkedUnbarriered(rt, jitcode_);
    331 }
    332 
    333 uint32_t JitcodeGlobalEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
    334                                             CallStackFrameInfo* results,
    335                                             uint32_t maxResults) const {
    336  switch (kind()) {
    337    case Kind::Ion:
    338      return asIon().callStackAtAddr(ptr, results, maxResults);
    339    case Kind::IonIC:
    340      return asIonIC().callStackAtAddr(rt, ptr, results, maxResults);
    341    case Kind::Baseline:
    342      return asBaseline().callStackAtAddr(ptr, results, maxResults);
    343    case Kind::BaselineInterpreter:
    344      return asBaselineInterpreter().callStackAtAddr(ptr, results, maxResults);
    345    case Kind::Dummy:
    346      return asDummy().callStackAtAddr(rt, ptr, results, maxResults);
    347    case Kind::RealmIndependentShared:
    348      return asRealmIndependentShared().callStackAtAddr(ptr, results,
    349                                                        maxResults);
    350  }
    351  MOZ_CRASH("Invalid kind");
    352 }
    353 
    354 uint64_t JitcodeGlobalEntry::realmID(JSRuntime* rt) const {
    355  switch (kind()) {
    356    case Kind::Ion:
    357      return asIon().realmID();
    358    case Kind::IonIC:
    359      return asIonIC().realmID(rt);
    360    case Kind::Baseline:
    361      return asBaseline().realmID();
    362    case Kind::Dummy:
    363      return asDummy().realmID();
    364    case Kind::RealmIndependentShared:
    365      return asRealmIndependentShared().realmID();
    366    case Kind::BaselineInterpreter:
    367      break;
    368  }
    369  MOZ_CRASH("Invalid kind");
    370 }
    371 
    372 bool JitcodeGlobalEntry::trace(JSTracer* trc) { return traceJitcode(trc); }
    373 
    374 void* JitcodeGlobalEntry::canonicalNativeAddrFor(JSRuntime* rt,
    375                                                 void* ptr) const {
    376  switch (kind()) {
    377    case Kind::Ion:
    378      return asIon().canonicalNativeAddrFor(ptr);
    379    case Kind::IonIC:
    380      return asIonIC().canonicalNativeAddrFor(ptr);
    381    case Kind::Baseline:
    382      return asBaseline().canonicalNativeAddrFor(ptr);
    383    case Kind::Dummy:
    384      return asDummy().canonicalNativeAddrFor(rt, ptr);
    385    case Kind::RealmIndependentShared:
    386      return asRealmIndependentShared().canonicalNativeAddrFor(ptr);
    387    case Kind::BaselineInterpreter:
    388      break;
    389  }
    390  MOZ_CRASH("Invalid kind");
    391 }
    392 
    393 // static
    394 void JitcodeGlobalEntry::DestroyPolicy::operator()(JitcodeGlobalEntry* entry) {
    395  switch (entry->kind()) {
    396    case JitcodeGlobalEntry::Kind::Ion:
    397      js_delete(&entry->asIon());
    398      break;
    399    case JitcodeGlobalEntry::Kind::IonIC:
    400      js_delete(&entry->asIonIC());
    401      break;
    402    case JitcodeGlobalEntry::Kind::Baseline:
    403      js_delete(&entry->asBaseline());
    404      break;
    405    case JitcodeGlobalEntry::Kind::BaselineInterpreter:
    406      js_delete(&entry->asBaselineInterpreter());
    407      break;
    408    case JitcodeGlobalEntry::Kind::Dummy:
    409      js_delete(&entry->asDummy());
    410      break;
    411    case JitcodeGlobalEntry::Kind::RealmIndependentShared:
    412      js_delete(&entry->asRealmIndependentShared());
    413      break;
    414  }
    415 }
    416 
    417 /* static */
    418 void JitcodeRegionEntry::WriteHead(CompactBufferWriter& writer,
    419                                   uint32_t nativeOffset, uint8_t scriptDepth) {
    420  writer.writeUnsigned(nativeOffset);
    421  writer.writeByte(scriptDepth);
    422 }
    423 
    424 /* static */
    425 void JitcodeRegionEntry::ReadHead(CompactBufferReader& reader,
    426                                  uint32_t* nativeOffset,
    427                                  uint8_t* scriptDepth) {
    428  *nativeOffset = reader.readUnsigned();
    429  *scriptDepth = reader.readByte();
    430 }
    431 
    432 /* static */
    433 void JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter& writer,
    434                                       uint32_t scriptIdx, uint32_t pcOffset) {
    435  writer.writeUnsigned(scriptIdx);
    436  writer.writeUnsigned(pcOffset);
    437 }
    438 
    439 /* static */
    440 void JitcodeRegionEntry::ReadScriptPc(CompactBufferReader& reader,
    441                                      uint32_t* scriptIdx, uint32_t* pcOffset) {
    442  *scriptIdx = reader.readUnsigned();
    443  *pcOffset = reader.readUnsigned();
    444 }
    445 
    446 /* static */
    447 void JitcodeRegionEntry::WriteDelta(CompactBufferWriter& writer,
    448                                    uint32_t nativeDelta, int32_t pcDelta) {
    449  if (pcDelta >= 0) {
    450    // 1 and 2-byte formats possible.
    451 
    452    //  NNNN-BBB0
    453    if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
    454      uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
    455                       (nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
    456      writer.writeByte(encVal);
    457      return;
    458    }
    459 
    460    //  NNNN-NNNN BBBB-BB01
    461    if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
    462      uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
    463                        (nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
    464      writer.writeByte(encVal & 0xff);
    465      writer.writeByte((encVal >> 8) & 0xff);
    466      return;
    467    }
    468  }
    469 
    470  //  NNNN-NNNN NNNB-BBBB BBBB-B011
    471  if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
    472      nativeDelta <= ENC3_NATIVE_DELTA_MAX) {
    473    uint32_t encVal =
    474        ENC3_MASK_VAL |
    475        ((uint32_t(pcDelta) << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
    476        (nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
    477    writer.writeByte(encVal & 0xff);
    478    writer.writeByte((encVal >> 8) & 0xff);
    479    writer.writeByte((encVal >> 16) & 0xff);
    480    return;
    481  }
    482 
    483  //  NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
    484  if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
    485      nativeDelta <= ENC4_NATIVE_DELTA_MAX) {
    486    uint32_t encVal =
    487        ENC4_MASK_VAL |
    488        ((uint32_t(pcDelta) << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
    489        (nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
    490    writer.writeByte(encVal & 0xff);
    491    writer.writeByte((encVal >> 8) & 0xff);
    492    writer.writeByte((encVal >> 16) & 0xff);
    493    writer.writeByte((encVal >> 24) & 0xff);
    494    return;
    495  }
    496 
    497  // Should never get here.
    498  MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
    499 }
    500 
    501 /* static */
    502 void JitcodeRegionEntry::ReadDelta(CompactBufferReader& reader,
    503                                   uint32_t* nativeDelta, int32_t* pcDelta) {
    504  // NB:
    505  // It's possible to get nativeDeltas with value 0 in two cases:
    506  //
    507  // 1. The last region's run.  This is because the region table's start
    508  // must be 4-byte aligned, and we must insert padding bytes to align the
    509  // payload section before emitting the table.
    510  //
    511  // 2. A zero-offset nativeDelta with a negative pcDelta.
    512  //
    513  // So if nativeDelta is zero, then pcDelta must be <= 0.
    514 
    515  //  NNNN-BBB0
    516  const uint32_t firstByte = reader.readByte();
    517  if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
    518    uint32_t encVal = firstByte;
    519    *nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
    520    *pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
    521    MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
    522    return;
    523  }
    524 
    525  //  NNNN-NNNN BBBB-BB01
    526  const uint32_t secondByte = reader.readByte();
    527  if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
    528    uint32_t encVal = firstByte | secondByte << 8;
    529    *nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
    530    *pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
    531    MOZ_ASSERT(*pcDelta != 0);
    532    MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
    533    return;
    534  }
    535 
    536  //  NNNN-NNNN NNNB-BBBB BBBB-B011
    537  const uint32_t thirdByte = reader.readByte();
    538  if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
    539    uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
    540    *nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
    541 
    542    uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
    543    // Fix sign if necessary.
    544    if (pcDeltaU > static_cast<uint32_t>(ENC3_PC_DELTA_MAX)) {
    545      pcDeltaU |= ~ENC3_PC_DELTA_MAX;
    546    }
    547    *pcDelta = pcDeltaU;
    548    MOZ_ASSERT(*pcDelta != 0);
    549    MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
    550    return;
    551  }
    552 
    553  //  NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
    554  MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
    555  const uint32_t fourthByte = reader.readByte();
    556  uint32_t encVal =
    557      firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
    558  *nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
    559 
    560  uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
    561  // fix sign if necessary
    562  if (pcDeltaU > static_cast<uint32_t>(ENC4_PC_DELTA_MAX)) {
    563    pcDeltaU |= ~ENC4_PC_DELTA_MAX;
    564  }
    565  *pcDelta = pcDeltaU;
    566 
    567  MOZ_ASSERT(*pcDelta != 0);
    568  MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
    569 }
    570 
    571 /* static */
    572 uint32_t JitcodeRegionEntry::ExpectedRunLength(const NativeToBytecode* entry,
    573                                               const NativeToBytecode* end) {
    574  MOZ_ASSERT(entry < end);
    575 
    576  // We always use the first entry, so runLength starts at 1
    577  uint32_t runLength = 1;
    578 
    579  uint32_t curNativeOffset = entry->nativeOffset.offset();
    580  uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
    581 
    582  for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
    583    // If the next run moves to a different inline site, stop the run.
    584    if (nextEntry->tree != entry->tree) {
    585      break;
    586    }
    587 
    588    uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
    589    uint32_t nextBytecodeOffset =
    590        nextEntry->tree->script()->pcToOffset(nextEntry->pc);
    591    MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
    592 
    593    uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
    594    int32_t bytecodeDelta =
    595        int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
    596 
    597    // If deltas are too large (very unlikely), stop the run.
    598    if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta)) {
    599      break;
    600    }
    601 
    602    runLength++;
    603 
    604    // If the run has grown to its maximum length, stop the run.
    605    if (runLength == MAX_RUN_LENGTH) {
    606      break;
    607    }
    608 
    609    curNativeOffset = nextNativeOffset;
    610    curBytecodeOffset = nextBytecodeOffset;
    611  }
    612 
    613  return runLength;
    614 }
    615 
    616 struct JitcodeMapBufferWriteSpewer {
    617 #ifdef JS_JITSPEW
    618  CompactBufferWriter* writer;
    619  uint32_t startPos;
    620 
    621  static const uint32_t DumpMaxBytes = 50;
    622 
    623  explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w)
    624      : writer(&w), startPos(writer->length()) {}
    625 
    626  void spewAndAdvance(const char* name) {
    627    if (writer->oom()) {
    628      return;
    629    }
    630 
    631    uint32_t curPos = writer->length();
    632    const uint8_t* start = writer->buffer() + startPos;
    633    const uint8_t* end = writer->buffer() + curPos;
    634    const char* MAP = "0123456789ABCDEF";
    635    uint32_t bytes = end - start;
    636 
    637    char buffer[DumpMaxBytes * 3];
    638    for (uint32_t i = 0; i < bytes; i++) {
    639      buffer[i * 3] = MAP[(start[i] >> 4) & 0xf];
    640      buffer[i * 3 + 1] = MAP[(start[i] >> 0) & 0xf];
    641      buffer[i * 3 + 2] = ' ';
    642    }
    643    if (bytes >= DumpMaxBytes) {
    644      buffer[DumpMaxBytes * 3 - 1] = '\0';
    645    } else {
    646      buffer[bytes * 3 - 1] = '\0';
    647    }
    648 
    649    JitSpew(JitSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos),
    650            int(bytes), buffer);
    651 
    652    // Move to the end of the current buffer.
    653    startPos = writer->length();
    654  }
    655 #else   // !JS_JITSPEW
    656  explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w) {}
    657  void spewAndAdvance(const char* name) {}
    658 #endif  // JS_JITSPEW
    659 };
    660 
    661 // Write a run, starting at the given NativeToBytecode entry, into the given
    662 // buffer writer.
    663 /* static */
    664 bool JitcodeRegionEntry::WriteRun(CompactBufferWriter& writer,
    665                                  const IonEntry::ScriptList& scriptList,
    666                                  uint32_t runLength,
    667                                  const NativeToBytecode* entry) {
    668  MOZ_ASSERT(runLength > 0);
    669  MOZ_ASSERT(runLength <= MAX_RUN_LENGTH);
    670 
    671  // Calculate script depth.
    672  MOZ_ASSERT(entry->tree->depth() <= 0xff);
    673  uint8_t scriptDepth = entry->tree->depth();
    674  uint32_t regionNativeOffset = entry->nativeOffset.offset();
    675 
    676  JitcodeMapBufferWriteSpewer spewer(writer);
    677 
    678  // Write the head info.
    679  JitSpew(JitSpew_Profiling, "    Head Info: nativeOffset=%d scriptDepth=%d",
    680          int(regionNativeOffset), int(scriptDepth));
    681  WriteHead(writer, regionNativeOffset, scriptDepth);
    682  spewer.spewAndAdvance("      ");
    683 
    684  // Write each script/pc pair.
    685  {
    686    InlineScriptTree* curTree = entry->tree;
    687    jsbytecode* curPc = entry->pc;
    688    for (uint8_t i = 0; i < scriptDepth; i++) {
    689      // Find the index of the script within the list.
    690      // NB: scriptList is guaranteed to contain curTree->script()
    691      uint32_t scriptIdx = 0;
    692      for (; scriptIdx < scriptList.length(); scriptIdx++) {
    693        if (scriptList[scriptIdx].sourceAndExtent.matches(curTree->script())) {
    694          break;
    695        }
    696      }
    697      MOZ_ASSERT(scriptIdx < scriptList.length());
    698 
    699      uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
    700 
    701      JitSpew(JitSpew_Profiling, "    Script/PC %d: scriptIdx=%d pcOffset=%d",
    702              int(i), int(scriptIdx), int(pcOffset));
    703      WriteScriptPc(writer, scriptIdx, pcOffset);
    704      spewer.spewAndAdvance("      ");
    705 
    706      MOZ_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
    707      curPc = curTree->callerPc();
    708      curTree = curTree->caller();
    709    }
    710  }
    711 
    712  // Start writing runs.
    713  uint32_t curNativeOffset = entry->nativeOffset.offset();
    714  uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
    715 
    716  JitSpew(JitSpew_Profiling,
    717          "  Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
    718          int(curNativeOffset), int(curBytecodeOffset));
    719 
    720  // Skip first entry because it is implicit in the header.  Start at subsequent
    721  // entry.
    722  for (uint32_t i = 1; i < runLength; i++) {
    723    MOZ_ASSERT(entry[i].tree == entry->tree);
    724 
    725    uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
    726    uint32_t nextBytecodeOffset =
    727        entry[i].tree->script()->pcToOffset(entry[i].pc);
    728    MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
    729 
    730    uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
    731    int32_t bytecodeDelta =
    732        int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
    733    MOZ_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
    734 
    735    JitSpew(JitSpew_Profiling,
    736            "    RunEntry native: %d-%d [%d]  bytecode: %d-%d [%d]",
    737            int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
    738            int(curBytecodeOffset), int(nextBytecodeOffset),
    739            int(bytecodeDelta));
    740    WriteDelta(writer, nativeDelta, bytecodeDelta);
    741 
    742    // Spew the bytecode in these ranges.
    743    if (curBytecodeOffset < nextBytecodeOffset) {
    744      JitSpewStart(JitSpew_Profiling, "      OPS: ");
    745      uint32_t curBc = curBytecodeOffset;
    746      while (curBc < nextBytecodeOffset) {
    747        jsbytecode* pc = entry[i].tree->script()->offsetToPC(curBc);
    748 #ifdef JS_JITSPEW
    749        JSOp op = JSOp(*pc);
    750        JitSpewCont(JitSpew_Profiling, "%s ", CodeName(op));
    751 #endif
    752        curBc += GetBytecodeLength(pc);
    753      }
    754      JitSpewFin(JitSpew_Profiling);
    755    }
    756    spewer.spewAndAdvance("      ");
    757 
    758    curNativeOffset = nextNativeOffset;
    759    curBytecodeOffset = nextBytecodeOffset;
    760  }
    761 
    762  if (writer.oom()) {
    763    return false;
    764  }
    765 
    766  return true;
    767 }
    768 
    769 void JitcodeRegionEntry::unpack() {
    770  CompactBufferReader reader(data_, end_);
    771  ReadHead(reader, &nativeOffset_, &scriptDepth_);
    772  MOZ_ASSERT(scriptDepth_ > 0);
    773 
    774  scriptPcStack_ = reader.currentPosition();
    775  // Skip past script/pc stack
    776  for (unsigned i = 0; i < scriptDepth_; i++) {
    777    uint32_t scriptIdx, pcOffset;
    778    ReadScriptPc(reader, &scriptIdx, &pcOffset);
    779  }
    780 
    781  deltaRun_ = reader.currentPosition();
    782 }
    783 
    784 uint32_t JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset,
    785                                          uint32_t startPcOffset) const {
    786  DeltaIterator iter = deltaIterator();
    787  uint32_t curNativeOffset = nativeOffset();
    788  uint32_t curPcOffset = startPcOffset;
    789  while (iter.hasMore()) {
    790    uint32_t nativeDelta;
    791    int32_t pcDelta;
    792    iter.readNext(&nativeDelta, &pcDelta);
    793 
    794    // The start address of the next delta-run entry is counted towards
    795    // the current delta-run entry, because return addresses should
    796    // associate with the bytecode op prior (the call) not the op after.
    797    if (queryNativeOffset <= curNativeOffset + nativeDelta) {
    798      break;
    799    }
    800    curNativeOffset += nativeDelta;
    801    curPcOffset += pcDelta;
    802  }
    803  return curPcOffset;
    804 }
    805 
    806 uint32_t JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const {
    807  static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
    808  uint32_t regions = numRegions();
    809  MOZ_ASSERT(regions > 0);
    810 
    811  // For small region lists, just search linearly.
    812  if (regions <= LINEAR_SEARCH_THRESHOLD) {
    813    JitcodeRegionEntry previousEntry = regionEntry(0);
    814    for (uint32_t i = 1; i < regions; i++) {
    815      JitcodeRegionEntry nextEntry = regionEntry(i);
    816      MOZ_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
    817 
    818      // See note in binary-search code below about why we use '<=' here
    819      // instead of '<'.  Short explanation: regions are closed at their
    820      // ending addresses, and open at their starting addresses.
    821      if (nativeOffset <= nextEntry.nativeOffset()) {
    822        return i - 1;
    823      }
    824 
    825      previousEntry = nextEntry;
    826    }
    827    // If nothing found, assume it falls within last region.
    828    return regions - 1;
    829  }
    830 
    831  // For larger ones, binary search the region table.
    832  uint32_t idx = 0;
    833  uint32_t count = regions;
    834  while (count > 1) {
    835    uint32_t step = count / 2;
    836    uint32_t mid = idx + step;
    837    JitcodeRegionEntry midEntry = regionEntry(mid);
    838 
    839    // A region memory range is closed at its ending address, not starting
    840    // address.  This is because the return address for calls must associate
    841    // with the call's bytecode PC, not the PC of the bytecode operator after
    842    // the call.
    843    //
    844    // So a query is < an entry if the query nativeOffset is <= the start
    845    // address of the entry, and a query is >= an entry if the query
    846    // nativeOffset is > the start address of an entry.
    847    if (nativeOffset <= midEntry.nativeOffset()) {
    848      // Target entry is below midEntry.
    849      count = step;
    850    } else {  // if (nativeOffset > midEntry.nativeOffset())
    851      // Target entry is at midEntry or above.
    852      idx = mid;
    853      count -= step;
    854    }
    855  }
    856  return idx;
    857 }
    858 
    859 /* static */
    860 bool JitcodeIonTable::WriteIonTable(CompactBufferWriter& writer,
    861                                    const IonEntry::ScriptList& scriptList,
    862                                    const NativeToBytecode* start,
    863                                    const NativeToBytecode* end,
    864                                    uint32_t* tableOffsetOut,
    865                                    uint32_t* numRegionsOut) {
    866  MOZ_ASSERT(tableOffsetOut != nullptr);
    867  MOZ_ASSERT(numRegionsOut != nullptr);
    868  MOZ_ASSERT(writer.length() == 0);
    869  MOZ_ASSERT(scriptList.length() > 0);
    870 
    871  JitSpew(JitSpew_Profiling,
    872          "Writing native to bytecode map for %s (offset %u-%u) (%zu entries)",
    873          scriptList[0].sourceAndExtent.scriptSource->filename(),
    874          scriptList[0].sourceAndExtent.toStringStart,
    875          scriptList[0].sourceAndExtent.toStringEnd,
    876          mozilla::PointerRangeSize(start, end));
    877 
    878  JitSpew(JitSpew_Profiling, "  ScriptList of size %u",
    879          unsigned(scriptList.length()));
    880  for (uint32_t i = 0; i < scriptList.length(); i++) {
    881    JitSpew(JitSpew_Profiling, "  Script %u - %s (offset %u-%u)", i,
    882            scriptList[i].sourceAndExtent.scriptSource->filename(),
    883            scriptList[i].sourceAndExtent.toStringStart,
    884            scriptList[i].sourceAndExtent.toStringEnd);
    885  }
    886 
    887  // Write out runs first.  Keep a vector tracking the positive offsets from
    888  // payload start to the run.
    889  const NativeToBytecode* curEntry = start;
    890  js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
    891 
    892  while (curEntry != end) {
    893    // Calculate the length of the next run.
    894    uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
    895    MOZ_ASSERT(runLength > 0);
    896    MOZ_ASSERT(runLength <= uintptr_t(end - curEntry));
    897    JitSpew(JitSpew_Profiling, "  Run at entry %d, length %d, buffer offset %d",
    898            int(curEntry - start), int(runLength), int(writer.length()));
    899 
    900    // Store the offset of the run.
    901    if (!runOffsets.append(writer.length())) {
    902      return false;
    903    }
    904 
    905    // Encode the run.
    906    if (!JitcodeRegionEntry::WriteRun(writer, scriptList, runLength,
    907                                      curEntry)) {
    908      return false;
    909    }
    910 
    911    curEntry += runLength;
    912  }
    913 
    914  // Done encoding regions.  About to start table.  Ensure we are aligned to 4
    915  // bytes since table is composed of uint32_t values.
    916  uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
    917  if (padding == sizeof(uint32_t)) {
    918    padding = 0;
    919  }
    920  JitSpew(JitSpew_Profiling, "  Padding %d bytes after run @%d", int(padding),
    921          int(writer.length()));
    922  for (uint32_t i = 0; i < padding; i++) {
    923    writer.writeByte(0);
    924  }
    925 
    926  // Now at start of table.
    927  uint32_t tableOffset = writer.length();
    928 
    929  // The table being written at this point will be accessed directly via
    930  // uint32_t pointers, so all writes below use native endianness.
    931 
    932  // Write out numRegions
    933  JitSpew(JitSpew_Profiling, "  Writing numRuns=%d", int(runOffsets.length()));
    934  writer.writeNativeEndianUint32_t(runOffsets.length());
    935 
    936  // Write out region offset table.  The offsets in |runOffsets| are currently
    937  // forward offsets from the beginning of the buffer.  We convert them to
    938  // backwards offsets from the start of the table before writing them into
    939  // their table entries.
    940  for (uint32_t i = 0; i < runOffsets.length(); i++) {
    941    JitSpew(JitSpew_Profiling, "  Run %d offset=%d backOffset=%d @%d", int(i),
    942            int(runOffsets[i]), int(tableOffset - runOffsets[i]),
    943            int(writer.length()));
    944    writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
    945  }
    946 
    947  if (writer.oom()) {
    948    return false;
    949  }
    950 
    951  *tableOffsetOut = tableOffset;
    952  *numRegionsOut = runOffsets.length();
    953  return true;
    954 }
    955 
    956 }  // namespace jit
    957 }  // namespace js
    958 
    959 JS::ProfiledFrameHandle::ProfiledFrameHandle(JSRuntime* rt,
    960                                             js::jit::JitcodeGlobalEntry& entry,
    961                                             void* addr, const char* label,
    962                                             uint32_t sourceId, uint32_t depth)
    963    : rt_(rt),
    964      entry_(entry),
    965      addr_(addr),
    966      canonicalAddr_(nullptr),
    967      label_(label),
    968      sourceId_(sourceId),
    969      depth_(depth) {
    970  if (!canonicalAddr_) {
    971    canonicalAddr_ = entry_.canonicalNativeAddrFor(rt_, addr_);
    972  }
    973 }
    974 
    975 JS_PUBLIC_API JS::ProfilingFrameIterator::FrameKind
    976 JS::ProfiledFrameHandle::frameKind() const {
    977  if (entry_.isBaselineInterpreter()) {
    978    return JS::ProfilingFrameIterator::Frame_BaselineInterpreter;
    979  }
    980  if (entry_.isBaseline()) {
    981    return JS::ProfilingFrameIterator::Frame_Baseline;
    982  }
    983  if (entry_.isRealmIndependentShared()) {
    984    return JS::ProfilingFrameIterator::Frame_Baseline;
    985  }
    986  return JS::ProfilingFrameIterator::Frame_Ion;
    987 }
    988 
    989 JS_PUBLIC_API uint64_t JS::ProfiledFrameHandle::realmID() const {
    990  return entry_.realmID(rt_);
    991 }
    992 
    993 JS_PUBLIC_API uint32_t JS::ProfiledFrameHandle::sourceId() const {
    994  return sourceId_;
    995 }
    996 
    997 JS_PUBLIC_API JS::ProfiledFrameRange JS::GetProfiledFrames(JSContext* cx,
    998                                                           void* addr) {
    999  // Ensure ProfiledFrameRange::MaxInliningDepth matches
   1000  // InlineScriptTree::MaxDepth. Please keep them in sync.
   1001  static_assert(ProfiledFrameRange::MaxInliningDepth ==
   1002                    js::jit::InlineScriptTree::MaxDepth,
   1003                "ProfiledFrameRange::MaxInliningDepth must match "
   1004                "InlineScriptTree::MaxDepth");
   1005 
   1006  JSRuntime* rt = cx->runtime();
   1007  js::jit::JitcodeGlobalTable* table =
   1008      rt->jitRuntime()->getJitcodeGlobalTable();
   1009  js::jit::JitcodeGlobalEntry* entry = table->lookup(addr);
   1010 
   1011  ProfiledFrameRange result(rt, addr, entry);
   1012 
   1013  if (entry) {
   1014    result.depth_ = entry->callStackAtAddr(rt, addr, result.frames_,
   1015                                           std::size(result.frames_));
   1016  }
   1017  return result;
   1018 }
   1019 
   1020 JS::ProfiledFrameHandle JS::ProfiledFrameRange::Iter::operator*() const {
   1021  // The iterator iterates in high depth to low depth order. index_ goes up,
   1022  // and the depth we need to pass to ProfiledFrameHandle goes down.
   1023  uint32_t depth = range_.depth_ - 1 - index_;
   1024  return ProfiledFrameHandle(range_.rt_, *range_.entry_, range_.addr_,
   1025                             range_.frames_[depth].label,
   1026                             range_.frames_[depth].sourceId, depth);
   1027 }