tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ProfilingStack.h (23365B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef js_ProfilingStack_h
      8 #define js_ProfilingStack_h
      9 
     10 #include "mozilla/Atomics.h"
     11 #include "mozilla/BaseProfilerMarkersPrerequisites.h"
     12 #include "mozilla/TimeStamp.h"
     13 
     14 #include <stdint.h>
     15 
     16 #include "jstypes.h"
     17 
     18 #include "js/ProfilingCategory.h"
     19 #include "js/TypeDecls.h"
     20 
     21 class JS_PUBLIC_API JSTracer;
     22 class JS_PUBLIC_API ProfilingStack;
     23 
     24 // This file defines the classes ProfilingStack and ProfilingStackFrame.
     25 // The ProfilingStack manages an array of ProfilingStackFrames.
     26 // It keeps track of the "label stack" and the JS interpreter stack.
     27 // The two stack types are interleaved.
     28 //
     29 // Usage:
     30 //
     31 //  ProfilingStack* profilingStack = ...;
     32 //
     33 //  // For label frames:
     34 //  profilingStack->pushLabelFrame(...);
     35 //  // Execute some code. When finished, pop the frame:
     36 //  profilingStack->pop();
     37 //
     38 //  // For JS stack frames:
     39 //  profilingStack->pushJSFrame(...);
     40 //  // Execute some code. When finished, pop the frame:
     41 //  profilingStack->pop();
     42 //
     43 //
     44 // Concurrency considerations
     45 //
     46 // A thread's profiling stack (and the frames inside it) is only modified by
     47 // that thread. However, the profiling stack can be *read* by a different
     48 // thread, the sampler thread: Whenever the profiler wants to sample a given
     49 // thread A, the following happens:
     50 //  (1) Thread A is suspended.
     51 //  (2) The sampler thread (thread S) reads the ProfilingStack of thread A,
     52 //      including all ProfilingStackFrames that are currently in that stack
     53 //      (profilingStack->frames[0..profilingStack->stackSize()]).
     54 //  (3) Thread A is resumed.
     55 //
     56 // Thread suspension is achieved using platform-specific APIs; refer to each
     57 // platform's Sampler::SuspendAndSampleAndResumeThread implementation in
     58 // platform-*.cpp for details.
     59 //
     60 // When the thread is suspended, the values in profilingStack->stackPointer and
     61 // in the stack frame range
     62 // profilingStack->frames[0..profilingStack->stackPointer] need to be in a
     63 // consistent state, so that thread S does not read partially- constructed stack
     64 // frames. More specifically, we have two requirements:
     65 //  (1) When adding a new frame at the top of the stack, its ProfilingStackFrame
     66 //      data needs to be put in place *before* the stackPointer is incremented,
     67 //      and the compiler + CPU need to know that this order matters.
     68 //  (2) When popping an frame from the stack and then preparing the
     69 //      ProfilingStackFrame data for the next frame that is about to be pushed,
     70 //      the decrement of the stackPointer in pop() needs to happen *before* the
     71 //      ProfilingStackFrame for the new frame is being popuplated, and the
     72 //      compiler + CPU need to know that this order matters.
     73 //
     74 // We can express the relevance of these orderings in multiple ways.
     75 // Option A is to make stackPointer an atomic with SequentiallyConsistent
     76 // memory ordering. This would ensure that no writes in thread A would be
     77 // reordered across any writes to stackPointer, which satisfies requirements
     78 // (1) and (2) at the same time. Option A is the simplest.
     79 // Option B is to use ReleaseAcquire memory ordering both for writes to
     80 // stackPointer *and* for writes to ProfilingStackFrame fields. Release-stores
     81 // ensure that all writes that happened *before this write in program order* are
     82 // not reordered to happen after this write. ReleaseAcquire ordering places no
     83 // requirements on the ordering of writes that happen *after* this write in
     84 // program order.
     85 // Using release-stores for writes to stackPointer expresses requirement (1),
     86 // and using release-stores for writes to the ProfilingStackFrame fields
     87 // expresses requirement (2).
     88 //
     89 // Option B is more complicated than option A, but has much better performance
     90 // on x86/64: In a microbenchmark run on a Macbook Pro from 2017, switching
     91 // from option A to option B reduced the overhead of pushing+popping a
     92 // ProfilingStackFrame by 10 nanoseconds.
     93 // On x86/64, release-stores require no explicit hardware barriers or lock
     94 // instructions.
     95 // On ARM/64, option B may be slower than option A, because the compiler will
     96 // generate hardware barriers for every single release-store instead of just
     97 // for the writes to stackPointer. However, the actual performance impact of
     98 // this has not yet been measured on ARM, so we're currently using option B
     99 // everywhere. This is something that we may want to change in the future once
    100 // we've done measurements.
    101 
    102 namespace js {
    103 
    104 // A call stack can be specified to the JS engine such that all JS entry/exits
    105 // to functions push/pop a stack frame to/from the specified stack.
    106 //
    107 // For more detailed information, see vm/GeckoProfiler.h.
    108 //
    109 class ProfilingStackFrame {
    110  // A ProfilingStackFrame represents either a label frame or a JS frame.
    111 
    112  // WARNING WARNING WARNING
    113  //
    114  // All the fields below are Atomic<...,ReleaseAcquire>. This is needed so
    115  // that writes to these fields are release-writes, which ensures that
    116  // earlier writes in this thread don't get reordered after the writes to
    117  // these fields. In particular, the decrement of the stack pointer in
    118  // ProfilingStack::pop() is a write that *must* happen before the values in
    119  // this ProfilingStackFrame are changed. Otherwise, the sampler thread might
    120  // see an inconsistent state where the stack pointer still points to a
    121  // ProfilingStackFrame which has already been popped off the stack and whose
    122  // fields have now been partially repopulated with new values.
    123  // See the "Concurrency considerations" paragraph at the top of this file
    124  // for more details.
    125 
    126  // Descriptive label for this stack frame. Must be a static string! Can be
    127  // an empty string, but not a null pointer.
    128  mozilla::Atomic<const char*, mozilla::ReleaseAcquire> label_;
    129 
    130  // An additional descriptive string of this frame which is combined with
    131  // |label_| in profiler output. Need not be (and usually isn't) static. Can
    132  // be null.
    133  mozilla::Atomic<const char*, mozilla::ReleaseAcquire> dynamicString_;
    134 
    135  // Stack pointer for non-JS stack frames, the script pointer otherwise.
    136  mozilla::Atomic<void*, mozilla::ReleaseAcquire> spOrScript;
    137 
    138  // ID of the JS Realm for JS stack frames.
    139  // Must not be used on non-JS frames; it'll contain either the default 0,
    140  // or a leftover value from a previous JS stack frame that was using this
    141  // ProfilingStackFrame object.
    142  mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> realmID_;
    143 
    144  // ID of the script source for JS stack frames.
    145  // Must not be used on non-JS frames; it'll contain either the default 0,
    146  // or a leftover value from a previous JS stack frame that was using this
    147  // ProfilingStackFrame object.
    148  mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> sourceId_;
    149 
    150  // The bytecode offset for JS stack frames.
    151  // Must not be used on non-JS frames; it'll contain either the default 0,
    152  // or a leftover value from a previous JS stack frame that was using this
    153  // ProfilingStackFrame object.
    154  mozilla::Atomic<int32_t, mozilla::ReleaseAcquire> pcOffsetIfJS_;
    155 
    156  // Bits 0...8 hold the Flags. Bits 9...31 hold the category pair.
    157  mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> flagsAndCategoryPair_;
    158 
    159  static int32_t pcToOffset(JSScript* aScript, jsbytecode* aPc);
    160 
    161 public:
    162  ProfilingStackFrame() = default;
    163  ProfilingStackFrame& operator=(const ProfilingStackFrame& other) {
    164    label_ = other.label();
    165    dynamicString_ = other.dynamicString();
    166    void* spScript = other.spOrScript;
    167    spOrScript = spScript;
    168    int32_t offsetIfJS = other.pcOffsetIfJS_;
    169    pcOffsetIfJS_ = offsetIfJS;
    170    uint64_t realmID = other.realmID_;
    171    realmID_ = realmID;
    172    uint32_t sourceId = other.sourceId_;
    173    sourceId_ = sourceId;
    174    uint32_t flagsAndCategory = other.flagsAndCategoryPair_;
    175    flagsAndCategoryPair_ = flagsAndCategory;
    176    return *this;
    177  }
    178 
    179  // Reserve up to 16 bits for flags, and 16 for category pair.
    180  enum class Flags : uint32_t {
    181    // The first three flags describe the kind of the frame and are
    182    // mutually exclusive. (We still give them individual bits for
    183    // simplicity.)
    184 
    185    // A regular label frame. These usually come from AutoProfilerLabel.
    186    IS_LABEL_FRAME = 1 << 0,
    187 
    188    // A special frame indicating the start of a run of JS profiling stack
    189    // frames. IS_SP_MARKER_FRAME frames are ignored, except for the sp
    190    // field. These frames are needed to get correct ordering between JS
    191    // and LABEL frames because JS frames don't carry sp information.
    192    // SP is short for "stack pointer".
    193    IS_SP_MARKER_FRAME = 1 << 1,
    194 
    195    // A JS frame.
    196    IS_JS_FRAME = 1 << 2,
    197 
    198    // An interpreter JS frame that has OSR-ed into baseline. IS_JS_FRAME
    199    // frames can have this flag set and unset during their lifetime.
    200    // JS_OSR frames are ignored.
    201    JS_OSR = 1 << 3,
    202 
    203    // The next three are mutually exclusive.
    204    // By default, for profiling stack frames that have both a label and a
    205    // dynamic string, the two strings are combined into one string of the
    206    // form "<label> <dynamicString>" during JSON serialization. The
    207    // following flags can be used to change this preset.
    208    STRING_TEMPLATE_METHOD = 1 << 4,  // "<label>.<dynamicString>"
    209    STRING_TEMPLATE_GETTER = 1 << 5,  // "get <label>.<dynamicString>"
    210    STRING_TEMPLATE_SETTER = 1 << 6,  // "set <label>.<dynamicString>"
    211 
    212    // If set, causes this stack frame to be marked as "relevantForJS" in
    213    // the profile JSON, which will make it show up in the "JS only" call
    214    // tree view.
    215    RELEVANT_FOR_JS = 1 << 7,
    216 
    217    // If set, causes the label on this ProfilingStackFrame to be ignored
    218    // and to be replaced by the subcategory's label.
    219    LABEL_DETERMINED_BY_CATEGORY_PAIR = 1 << 8,
    220 
    221    // Frame dynamic string does not contain user data.
    222    NONSENSITIVE = 1 << 9,
    223 
    224    // A JS Baseline Interpreter frame.
    225    IS_BLINTERP_FRAME = 1 << 10,
    226 
    227    FLAGS_BITCOUNT = 16,
    228    FLAGS_MASK = (1 << FLAGS_BITCOUNT) - 1
    229  };
    230 
    231  static_assert(
    232      uint32_t(JS::ProfilingCategoryPair::LAST) <=
    233          (UINT32_MAX >> uint32_t(Flags::FLAGS_BITCOUNT)),
    234      "Too many category pairs to fit into u32 with together with the "
    235      "reserved bits for the flags");
    236 
    237  bool isLabelFrame() const {
    238    return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_LABEL_FRAME);
    239  }
    240 
    241  bool isNonsensitive() const {
    242    return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::NONSENSITIVE);
    243  }
    244 
    245  bool isSpMarkerFrame() const {
    246    return uint32_t(flagsAndCategoryPair_) &
    247           uint32_t(Flags::IS_SP_MARKER_FRAME);
    248  }
    249 
    250  bool isJsFrame() const {
    251    return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_JS_FRAME);
    252  }
    253 
    254  bool isJsBlinterpFrame() const {
    255    return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_BLINTERP_FRAME);
    256  }
    257 
    258  bool isOSRFrame() const {
    259    return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::JS_OSR);
    260  }
    261 
    262  void setIsOSRFrame(bool isOSR) {
    263    if (isOSR) {
    264      flagsAndCategoryPair_ =
    265          uint32_t(flagsAndCategoryPair_) | uint32_t(Flags::JS_OSR);
    266    } else {
    267      flagsAndCategoryPair_ =
    268          uint32_t(flagsAndCategoryPair_) & ~uint32_t(Flags::JS_OSR);
    269    }
    270  }
    271 
    272  void setLabelCategory(JS::ProfilingCategoryPair aCategoryPair) {
    273    MOZ_ASSERT(isLabelFrame());
    274    flagsAndCategoryPair_ =
    275        (uint32_t(aCategoryPair) << uint32_t(Flags::FLAGS_BITCOUNT)) | flags();
    276  }
    277 
    278  const char* label() const {
    279    uint32_t flagsAndCategoryPair = flagsAndCategoryPair_;
    280    if (flagsAndCategoryPair &
    281        uint32_t(Flags::LABEL_DETERMINED_BY_CATEGORY_PAIR)) {
    282      auto categoryPair = JS::ProfilingCategoryPair(
    283          flagsAndCategoryPair >> uint32_t(Flags::FLAGS_BITCOUNT));
    284      return JS::GetProfilingCategoryPairInfo(categoryPair).mLabel;
    285    }
    286    return label_;
    287  }
    288 
    289  const char* dynamicString() const { return dynamicString_; }
    290 
    291  void initLabelFrame(const char* aLabel, const char* aDynamicString, void* sp,
    292                      JS::ProfilingCategoryPair aCategoryPair,
    293                      uint32_t aFlags) {
    294    label_ = aLabel;
    295    dynamicString_ = aDynamicString;
    296    spOrScript = sp;
    297    // pcOffsetIfJS_ is not set and must not be used on label frames.
    298    flagsAndCategoryPair_ =
    299        uint32_t(Flags::IS_LABEL_FRAME) |
    300        (uint32_t(aCategoryPair) << uint32_t(Flags::FLAGS_BITCOUNT)) | aFlags;
    301    sourceId_ = 0;
    302    MOZ_ASSERT(isLabelFrame());
    303  }
    304 
    305  void initSpMarkerFrame(void* sp) {
    306    label_ = "";
    307    dynamicString_ = nullptr;
    308    spOrScript = sp;
    309    // pcOffsetIfJS_ is not set and must not be used on sp marker frames.
    310    flagsAndCategoryPair_ = uint32_t(Flags::IS_SP_MARKER_FRAME) |
    311                            (uint32_t(JS::ProfilingCategoryPair::OTHER)
    312                             << uint32_t(Flags::FLAGS_BITCOUNT));
    313    MOZ_ASSERT(isSpMarkerFrame());
    314  }
    315 
    316  template <JS::ProfilingCategoryPair Category, uint32_t ExtraFlags = 0>
    317  void initJsFrame(const char* aLabel, const char* aDynamicString,
    318                   JSScript* aScript, jsbytecode* aPc, uint64_t aRealmID,
    319                   uint32_t aSourceId) {
    320    label_ = aLabel;
    321    dynamicString_ = aDynamicString;
    322    spOrScript = aScript;
    323    pcOffsetIfJS_ = pcToOffset(aScript, aPc);
    324    realmID_ = aRealmID;
    325    sourceId_ = aSourceId;
    326    flagsAndCategoryPair_ =
    327        (uint32_t(Category) << uint32_t(Flags::FLAGS_BITCOUNT)) |
    328        uint32_t(Flags::IS_JS_FRAME) | ExtraFlags;
    329    MOZ_ASSERT(isJsFrame());
    330  }
    331 
    332  uint32_t flags() const {
    333    return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::FLAGS_MASK);
    334  }
    335 
    336  JS::ProfilingCategoryPair categoryPair() const {
    337    return JS::ProfilingCategoryPair(flagsAndCategoryPair_ >>
    338                                     uint32_t(Flags::FLAGS_BITCOUNT));
    339  }
    340 
    341  uint64_t realmID() const { return realmID_; }
    342 
    343  void* stackAddress() const {
    344    MOZ_ASSERT(!isJsFrame());
    345    return spOrScript;
    346  }
    347 
    348  JS_PUBLIC_API JSScript* script() const;
    349 
    350  JS_PUBLIC_API JSFunction* function() const;
    351 
    352  // Note that the pointer returned might be invalid.
    353  JSScript* rawScript() const {
    354    MOZ_ASSERT(isJsFrame());
    355    void* script = spOrScript;
    356    return static_cast<JSScript*>(script);
    357  }
    358 
    359  // We can't know the layout of JSScript, so look in vm/GeckoProfiler.cpp.
    360  JS_PUBLIC_API jsbytecode* pc() const;
    361  void setPC(jsbytecode* pc);
    362 
    363  void trace(JSTracer* trc);
    364 
    365  JS_PUBLIC_API uint32_t sourceId() const;
    366 
    367  // The offset of a pc into a script's code can actually be 0, so to
    368  // signify a nullptr pc, use a -1 index. This is checked against in
    369  // pc() and setPC() to set/get the right pc.
    370  static const int32_t NullPCOffset = -1;
    371 };
    372 
    373 JS_PUBLIC_API void SetContextProfilingStack(JSContext* cx,
    374                                            ProfilingStack* profilingStack);
    375 
    376 // GetContextProfilingStack also exists, but it's defined in RootingAPI.h.
    377 
    378 JS_PUBLIC_API void EnableContextProfilingStack(JSContext* cx, bool enabled);
    379 
    380 JS_PUBLIC_API void RegisterContextProfilerMarkers(
    381    JSContext* cx,
    382    void (*eventMarker)(mozilla::MarkerCategory, const char*, const char*),
    383    void (*intervalMarker)(mozilla::MarkerCategory, const char*,
    384                           mozilla::TimeStamp, const char*),
    385    void (*flowMarker)(mozilla::MarkerCategory, const char*, uint64_t),
    386    void (*terminatingFlowMarker)(mozilla::MarkerCategory, const char*,
    387                                  uint64_t));
    388 
    389 }  // namespace js
    390 
    391 namespace JS {
    392 
    393 typedef ProfilingStack* (*RegisterThreadCallback)(const char* threadName,
    394                                                  void* stackBase);
    395 
    396 typedef void (*UnregisterThreadCallback)();
    397 
    398 // regiserThread and unregisterThread callbacks are functions which are called
    399 // by other threads without any locking mechanism.
    400 JS_PUBLIC_API void SetProfilingThreadCallbacks(
    401    RegisterThreadCallback registerThread,
    402    UnregisterThreadCallback unregisterThread);
    403 
    404 }  // namespace JS
    405 
    406 // Each thread has its own ProfilingStack. That thread modifies the
    407 // ProfilingStack, pushing and popping elements as necessary.
    408 //
    409 // The ProfilingStack is also read periodically by the profiler's sampler
    410 // thread. This happens only when the thread that owns the ProfilingStack is
    411 // suspended. So there are no genuine parallel accesses.
    412 //
    413 // However, it is possible for pushing/popping to be interrupted by a periodic
    414 // sample. Because of this, we need pushing/popping to be effectively atomic.
    415 //
    416 // - When pushing a new frame, we increment the stack pointer -- making the new
    417 //   frame visible to the sampler thread -- only after the new frame has been
    418 //   fully written. The stack pointer is Atomic<uint32_t,ReleaseAcquire>, so
    419 //   the increment is a release-store, which ensures that this store is not
    420 //   reordered before the writes of the frame.
    421 //
    422 // - When popping an old frame, the only operation is the decrementing of the
    423 //   stack pointer, which is obviously atomic.
    424 //
    425 class JS_PUBLIC_API ProfilingStack final {
    426 public:
    427  ProfilingStack() = default;
    428 
    429  ~ProfilingStack();
    430 
    431  void pushLabelFrame(const char* label, const char* dynamicString, void* sp,
    432                      JS::ProfilingCategoryPair categoryPair,
    433                      uint32_t flags = 0) {
    434    // This thread is the only one that ever changes the value of
    435    // stackPointer.
    436    // Store the value of the atomic in a non-atomic local variable so that
    437    // the compiler won't generate two separate loads from the atomic for
    438    // the size check and the frames[] array indexing operation.
    439    uint32_t stackPointerVal = stackPointer;
    440 
    441    if (MOZ_UNLIKELY(stackPointerVal >= capacity)) {
    442      ensureCapacitySlow();
    443    }
    444    frames[stackPointerVal].initLabelFrame(label, dynamicString, sp,
    445                                           categoryPair, flags);
    446 
    447    // This must happen at the end! The compiler will not reorder this
    448    // update because stackPointer is Atomic<..., ReleaseAcquire>, so any
    449    // the writes above will not be reordered below the stackPointer store.
    450    // Do the read and the write as two separate statements, in order to
    451    // make it clear that we don't need an atomic increment, which would be
    452    // more expensive on x86 than the separate operations done here.
    453    // However, don't use stackPointerVal here; instead, allow the compiler
    454    // to turn this store into a non-atomic increment instruction which
    455    // takes up less code size.
    456    stackPointer = stackPointer + 1;
    457  }
    458 
    459  void pushSpMarkerFrame(void* sp) {
    460    uint32_t oldStackPointer = stackPointer;
    461 
    462    if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
    463      ensureCapacitySlow();
    464    }
    465    frames[oldStackPointer].initSpMarkerFrame(sp);
    466 
    467    // This must happen at the end, see the comment in pushLabelFrame.
    468    stackPointer = oldStackPointer + 1;
    469  }
    470 
    471  void pushJsFrame(const char* label, const char* dynamicString,
    472                   JSScript* script, jsbytecode* pc, uint64_t aRealmID,
    473                   uint32_t aSourceId = 0) {
    474    // This thread is the only one that ever changes the value of
    475    // stackPointer. Only load the atomic once.
    476    uint32_t oldStackPointer = stackPointer;
    477 
    478    if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
    479      ensureCapacitySlow();
    480    }
    481    frames[oldStackPointer]
    482        .initJsFrame<JS::ProfilingCategoryPair::JS_Interpreter>(
    483            label, dynamicString, script, pc, aRealmID, aSourceId);
    484 
    485    // This must happen at the end, see the comment in pushLabelFrame.
    486    stackPointer = stackPointer + 1;
    487  }
    488 
    489  void pop() {
    490    MOZ_ASSERT(stackPointer > 0);
    491    // Do the read and the write as two separate statements, in order to
    492    // make it clear that we don't need an atomic decrement, which would be
    493    // more expensive on x86 than the separate operations done here.
    494    // This thread is the only one that ever changes the value of
    495    // stackPointer.
    496    uint32_t oldStackPointer = stackPointer;
    497    stackPointer = oldStackPointer - 1;
    498  }
    499 
    500  uint32_t stackSize() const { return stackPointer; }
    501  uint32_t stackCapacity() const { return capacity; }
    502 
    503 private:
    504  // Out of line path for expanding the buffer, since otherwise this would get
    505  // inlined in every DOM WebIDL call.
    506  MOZ_COLD void ensureCapacitySlow();
    507 
    508  // No copying.
    509  ProfilingStack(const ProfilingStack&) = delete;
    510  void operator=(const ProfilingStack&) = delete;
    511 
    512  // No moving either.
    513  ProfilingStack(ProfilingStack&&) = delete;
    514  void operator=(ProfilingStack&&) = delete;
    515 
    516  uint32_t capacity = 0;
    517 
    518 public:
    519  // The pointer to the stack frames, this is read from the profiler thread and
    520  // written from the current thread.
    521  //
    522  // This is effectively a unique pointer.
    523  mozilla::Atomic<js::ProfilingStackFrame*, mozilla::SequentiallyConsistent>
    524      frames{nullptr};
    525 
    526  // This may exceed the capacity, so instead use the stackSize() method to
    527  // determine the number of valid frames in stackFrames. When this is less
    528  // than stackCapacity(), it refers to the first free stackframe past the top
    529  // of the in-use stack (i.e. frames[stackPointer - 1] is the top stack
    530  // frame).
    531  //
    532  // WARNING WARNING WARNING
    533  //
    534  // This is an atomic variable that uses ReleaseAcquire memory ordering.
    535  // See the "Concurrency considerations" paragraph at the top of this file
    536  // for more details.
    537  mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> stackPointer{0};
    538 };
    539 
    540 namespace js {
    541 
    542 class AutoGeckoProfilerEntry;
    543 class GeckoProfilerEntryMarker;
    544 class GeckoProfilerBaselineOSRMarker;
    545 
    546 class GeckoProfilerThread {
    547  friend class AutoGeckoProfilerEntry;
    548  friend class GeckoProfilerEntryMarker;
    549  friend class GeckoProfilerBaselineOSRMarker;
    550 
    551  ProfilingStack* profilingStack_;
    552 
    553  // Same as profilingStack_ if the profiler is currently active, otherwise
    554  // null.
    555  ProfilingStack* profilingStackIfEnabled_;
    556 
    557 public:
    558  GeckoProfilerThread();
    559 
    560  uint32_t stackPointer() {
    561    MOZ_ASSERT(infraInstalled());
    562    return profilingStack_->stackPointer;
    563  }
    564  ProfilingStackFrame* stack() { return profilingStack_->frames; }
    565  ProfilingStack* getProfilingStack() { return profilingStack_; }
    566  ProfilingStack* getProfilingStackIfEnabled() {
    567    return profilingStackIfEnabled_;
    568  }
    569 
    570  /*
    571   * True if the profiler infrastructure is setup.  Should be true in builds
    572   * that include profiler support except during early startup or late
    573   * shutdown.  Unrelated to the presence of the Gecko Profiler addon.
    574   */
    575  bool infraInstalled() { return profilingStack_ != nullptr; }
    576 
    577  void setProfilingStack(ProfilingStack* profilingStack, bool enabled);
    578  void enable(bool enable) {
    579    profilingStackIfEnabled_ = enable ? profilingStack_ : nullptr;
    580  }
    581  void trace(JSTracer* trc);
    582 
    583  /*
    584   * Functions which are the actual instrumentation to track run information
    585   *
    586   *   - enter: a function has started to execute
    587   *   - updatePC: updates the pc information about where a function
    588   *               is currently executing
    589   *   - exit: this function has ceased execution, and no further
    590   *           entries/exits will be made
    591   */
    592  bool enter(JSContext* cx, JSScript* script);
    593  void exit(JSContext* cx, JSScript* script);
    594  inline void updatePC(JSContext* cx, JSScript* script, jsbytecode* pc);
    595 };
    596 
    597 }  // namespace js
    598 
    599 #endif /* js_ProfilingStack_h */