tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ProfileChunkedBuffer.h (63088B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef ProfileChunkedBuffer_h
      8 #define ProfileChunkedBuffer_h
      9 
     10 #include "mozilla/Attributes.h"
     11 #include "mozilla/BaseProfilerDetail.h"
     12 #include "mozilla/NotNull.h"
     13 #include "mozilla/ProfileBufferChunkManager.h"
     14 #include "mozilla/ProfileBufferChunkManagerSingle.h"
     15 #include "mozilla/ProfileBufferEntrySerialization.h"
     16 #include "mozilla/ProfileChunkedBufferDetail.h"
     17 #include "mozilla/RefPtr.h"
     18 #include "mozilla/ScopeExit.h"
     19 
     20 #include <utility>
     21 
     22 #ifdef DEBUG
     23 #  include <cstdio>
     24 #endif
     25 
     26 namespace mozilla {
     27 
     28 // Thread-safe buffer that can store blocks of different sizes during defined
     29 // sessions, using Chunks (from a ChunkManager) as storage.
     30 //
     31 // Each *block* contains an *entry* and the entry size:
     32 // [ entry_size | entry ] [ entry_size | entry ] ...
     33 //
     34 // *In-session* is a period of time during which `ProfileChunkedBuffer` allows
     35 // reading and writing.
     36 // *Out-of-session*, the `ProfileChunkedBuffer` object is still valid, but
     37 // contains no data, and gracefully denies accesses.
     38 //
     39 // To write an entry, the buffer reserves a block of sufficient size (to contain
     40 // user data of predetermined size), writes the entry size, and lets the caller
     41 // fill the entry contents using a ProfileBufferEntryWriter. E.g.:
     42 // ```
     43 // ProfileChunkedBuffer cb(...);
     44 // cb.ReserveAndPut([]() { return sizeof(123); },
     45 //                  [&](Maybe<ProfileBufferEntryWriter>& aEW) {
     46 //                    if (aEW) { aEW->WriteObject(123); }
     47 //                  });
     48 // ```
     49 // Other `Put...` functions may be used as shortcuts for simple entries.
     50 // The objects given to the caller's callbacks should only be used inside the
     51 // callbacks and not stored elsewhere, because they keep their own references to
     52 // chunk memory and therefore should not live longer.
     53 // Different type of objects may be serialized into an entry, see
     54 // `ProfileBufferEntryWriter::Serializer` for more information.
     55 //
     56 // When reading data, the buffer iterates over blocks (it knows how to read the
     57 // entry size, and therefore move to the next block), and lets the caller read
     58 // the entry inside of each block. E.g.:
     59 // ```
     60 // cb.ReadEach([](ProfileBufferEntryReader& aER) {
     61 //   /* Use ProfileBufferEntryReader functions to read serialized objects. */
     62 //   int n = aER.ReadObject<int>();
     63 // });
     64 // ```
     65 // Different type of objects may be deserialized from an entry, see
     66 // `ProfileBufferEntryReader::Deserializer` for more information.
     67 //
     68 // Writers may retrieve the block index corresponding to an entry
     69 // (`ProfileBufferBlockIndex` is an opaque type preventing the user from easily
     70 // modifying it). That index may later be used with `ReadAt` to get back to the
     71 // entry in that particular block -- if it still exists.
     72 class ProfileChunkedBuffer {
     73 public:
     74  using Byte = ProfileBufferChunk::Byte;
     75  using Length = ProfileBufferChunk::Length;
     76 
     77  enum class ThreadSafety { WithoutMutex, WithMutex };
     78 
     79  // Default constructor starts out-of-session (nothing to read or write).
     80  explicit ProfileChunkedBuffer(ThreadSafety aThreadSafety)
     81      : mMutex(aThreadSafety != ThreadSafety::WithoutMutex) {}
     82 
     83  // Start in-session with external chunk manager.
     84  ProfileChunkedBuffer(ThreadSafety aThreadSafety,
     85                       ProfileBufferChunkManager& aChunkManager)
     86      : mMutex(aThreadSafety != ThreadSafety::WithoutMutex) {
     87    SetChunkManager(aChunkManager);
     88  }
     89 
     90  // Start in-session with owned chunk manager.
     91  ProfileChunkedBuffer(ThreadSafety aThreadSafety,
     92                       UniquePtr<ProfileBufferChunkManager>&& aChunkManager)
     93      : mMutex(aThreadSafety != ThreadSafety::WithoutMutex) {
     94    SetChunkManager(std::move(aChunkManager));
     95  }
     96 
     97  ~ProfileChunkedBuffer() {
     98    // Do proper clean-up by resetting the chunk manager.
     99    ResetChunkManager();
    100  }
    101 
    102  // This cannot change during the lifetime of this buffer, so there's no need
    103  // to lock.
    104  [[nodiscard]] bool IsThreadSafe() const { return mMutex.IsActivated(); }
    105 
    106  [[nodiscard]] bool IsInSession() const {
    107    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    108    return !!mChunkManager;
    109  }
    110 
    111  // Stop using the current chunk manager.
    112  // If we own the current chunk manager, it will be destroyed.
    113  // This will always clear currently-held chunks, if any.
    114  void ResetChunkManager() {
    115    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    116    (void)ResetChunkManager(lock);
    117  }
    118 
    119  // Set the current chunk manager.
    120  // The caller is responsible for keeping the chunk manager alive as along as
    121  // it's used here (until the next (Re)SetChunkManager, or
    122  // ~ProfileChunkedBuffer).
    123  void SetChunkManager(ProfileBufferChunkManager& aChunkManager) {
    124    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    125    (void)ResetChunkManager(lock);
    126    SetChunkManager(aChunkManager, lock);
    127  }
    128 
    129  // Set the current chunk manager, and keep ownership of it.
    130  void SetChunkManager(UniquePtr<ProfileBufferChunkManager>&& aChunkManager) {
    131    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    132    (void)ResetChunkManager(lock);
    133    mOwnedChunkManager = std::move(aChunkManager);
    134    if (mOwnedChunkManager) {
    135      SetChunkManager(*mOwnedChunkManager, lock);
    136    }
    137  }
    138 
    139  // Set the current chunk manager, except if it's already the one provided.
    140  // The caller is responsible for keeping the chunk manager alive as along as
    141  // it's used here (until the next (Re)SetChunkManager, or
    142  // ~ProfileChunkedBuffer).
    143  void SetChunkManagerIfDifferent(ProfileBufferChunkManager& aChunkManager) {
    144    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    145    if (!mChunkManager || mChunkManager != &aChunkManager) {
    146      (void)ResetChunkManager(lock);
    147      SetChunkManager(aChunkManager, lock);
    148    }
    149  }
    150 
    151  // Clear the contents of this buffer, ready to receive new chunks.
    152  // Note that memory is not freed: No chunks are destroyed, they are all
    153  // receycled.
    154  // Also the range doesn't reset, instead it continues at some point after the
    155  // previous range. This may be useful if the caller may be keeping indexes
    156  // into old chunks that have now been cleared, using these indexes will fail
    157  // gracefully (instead of potentially pointing into new data).
    158  void Clear() {
    159    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    160    if (MOZ_UNLIKELY(!mChunkManager)) {
    161      // Out-of-session.
    162      return;
    163    }
    164 
    165    mRangeStart = mRangeEnd = mNextChunkRangeStart;
    166    mPushedBlockCount = 0;
    167    mClearedBlockCount = 0;
    168    mFailedPutBytes = 0;
    169 
    170    // Recycle all released chunks as "next" chunks. This will reduce the number
    171    // of future allocations. Also, when using ProfileBufferChunkManagerSingle,
    172    // this retrieves the one chunk if it was released.
    173    UniquePtr<ProfileBufferChunk> releasedChunks =
    174        mChunkManager->GetExtantReleasedChunks();
    175    if (releasedChunks) {
    176      // Released chunks should be in the "Done" state, they need to be marked
    177      // "recycled" before they can be reused.
    178      for (ProfileBufferChunk* chunk = releasedChunks.get(); chunk;
    179           chunk = chunk->GetNext()) {
    180        chunk->MarkRecycled();
    181      }
    182      mNextChunks = ProfileBufferChunk::Join(std::move(mNextChunks),
    183                                             std::move(releasedChunks));
    184    }
    185 
    186    if (mCurrentChunk) {
    187      // We already have a current chunk (empty or in-use), mark it "done" and
    188      // then "recycled", ready to be reused.
    189      mCurrentChunk->MarkDone();
    190      mCurrentChunk->MarkRecycled();
    191    } else {
    192      if (!mNextChunks) {
    193        // No current chunk, and no next chunks to recycle, nothing more to do.
    194        // The next "Put" operation will try to allocate a chunk as needed.
    195        return;
    196      }
    197 
    198      // No current chunk, take a next chunk.
    199      mCurrentChunk = std::exchange(mNextChunks, mNextChunks->ReleaseNext());
    200    }
    201 
    202    // Here, there was already a current chunk, or one has just been taken.
    203    // Make sure it's ready to receive new entries.
    204    InitializeCurrentChunk(lock);
    205  }
    206 
    207  // Buffer maximum length in bytes.
    208  Maybe<size_t> BufferLength() const {
    209    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    210    if (!mChunkManager) {
    211      return Nothing{};
    212    }
    213    return Some(mChunkManager->MaxTotalSize());
    214  }
    215 
    216  [[nodiscard]] size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
    217    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    218    return SizeOfExcludingThis(aMallocSizeOf, lock);
    219  }
    220 
    221  [[nodiscard]] size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
    222    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    223    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf, lock);
    224  }
    225 
    226  // Snapshot of the buffer state.
    227  struct State {
    228    // Index to/before the first block.
    229    ProfileBufferIndex mRangeStart = 1;
    230 
    231    // Index past the last block. Equals mRangeStart if empty.
    232    ProfileBufferIndex mRangeEnd = 1;
    233 
    234    // Number of blocks that have been pushed into this buffer.
    235    uint64_t mPushedBlockCount = 0;
    236 
    237    // Number of blocks that have been removed from this buffer.
    238    // Note: Live entries = pushed - cleared.
    239    uint64_t mClearedBlockCount = 0;
    240 
    241    // Number of bytes that could not be put into this buffer.
    242    uint64_t mFailedPutBytes = 0;
    243  };
    244 
    245  // Get a snapshot of the current state.
    246  // When out-of-session, mFirstReadIndex==mNextWriteIndex, and
    247  // mPushedBlockCount==mClearedBlockCount==0.
    248  // Note that these may change right after this thread-safe call, so they
    249  // should only be used for statistical purposes.
    250  [[nodiscard]] State GetState() const {
    251    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    252    return {mRangeStart, mRangeEnd, mPushedBlockCount, mClearedBlockCount,
    253            mFailedPutBytes};
    254  }
    255 
    256  // In in-session, return the start TimeStamp of the earliest chunk.
    257  // If out-of-session, return a null TimeStamp.
    258  [[nodiscard]] TimeStamp GetEarliestChunkStartTimeStamp() const {
    259    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    260    if (MOZ_UNLIKELY(!mChunkManager)) {
    261      // Out-of-session.
    262      return {};
    263    }
    264    return mChunkManager->PeekExtantReleasedChunks(
    265        [&](const ProfileBufferChunk* aOldestChunk) -> TimeStamp {
    266          if (aOldestChunk) {
    267            return aOldestChunk->ChunkHeader().mStartTimeStamp;
    268          }
    269          if (mCurrentChunk) {
    270            return mCurrentChunk->ChunkHeader().mStartTimeStamp;
    271          }
    272          return {};
    273        });
    274  }
    275 
    276  [[nodiscard]] bool IsEmpty() const {
    277    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    278    return mRangeStart == mRangeEnd;
    279  }
    280 
    281  // True if this buffer is already locked on this thread.
    282  // This should be used if some functions may call an already-locked buffer,
    283  // e.g.: Put -> memory hook -> profiler_add_native_allocation_marker -> Put.
    284  [[nodiscard]] bool IsThreadSafeAndLockedOnCurrentThread() const {
    285    return mMutex.IsActivatedAndLockedOnCurrentThread();
    286  }
    287 
    288  // Lock the buffer mutex and run the provided callback.
    289  // This can be useful when the caller needs to explicitly lock down this
    290  // buffer, but not do anything else with it.
    291  template <typename Callback>
    292  auto LockAndRun(Callback&& aCallback) const {
    293    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    294    return std::forward<Callback>(aCallback)();
    295  }
    296 
    297  // Reserve a block that can hold an entry of the given `aCallbackEntryBytes()`
    298  // size, write the entry size (ULEB128-encoded), and invoke and return
    299  // `aCallback(Maybe<ProfileBufferEntryWriter>&)`.
    300  // Note: `aCallbackEntryBytes` is a callback instead of a simple value, to
    301  // delay this potentially-expensive computation until after we're checked that
    302  // we're in-session; use `Put(Length, Callback)` below if you know the size
    303  // already.
    304  template <typename CallbackEntryBytes, typename Callback>
    305  auto ReserveAndPut(CallbackEntryBytes&& aCallbackEntryBytes,
    306                     Callback&& aCallback)
    307      -> decltype(std::forward<Callback>(aCallback)(
    308          std::declval<Maybe<ProfileBufferEntryWriter>&>())) {
    309    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    310 
    311    // This can only be read in the 2nd lambda below after it has been written
    312    // by the first lambda.
    313    Length entryBytes;
    314 
    315    return ReserveAndPutRaw(
    316        [&]() {
    317          entryBytes = std::forward<CallbackEntryBytes>(aCallbackEntryBytes)();
    318          MOZ_ASSERT(entryBytes != 0, "Empty entries are not allowed");
    319          return ULEB128Size(entryBytes) + entryBytes;
    320        },
    321        [&](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
    322          if (aMaybeEntryWriter.isSome()) {
    323            aMaybeEntryWriter->WriteULEB128(entryBytes);
    324            MOZ_ASSERT(aMaybeEntryWriter->RemainingBytes() == entryBytes);
    325          }
    326          return std::forward<Callback>(aCallback)(aMaybeEntryWriter);
    327        },
    328        lock);
    329  }
    330 
    331  template <typename Callback>
    332  auto Put(Length aEntryBytes, Callback&& aCallback) {
    333    return ReserveAndPut([aEntryBytes]() { return aEntryBytes; },
    334                         std::forward<Callback>(aCallback));
    335  }
    336 
    337  // Add a new entry copied from the given buffer, return block index.
    338  ProfileBufferBlockIndex PutFrom(const void* aSrc, Length aBytes) {
    339    return ReserveAndPut(
    340        [aBytes]() { return aBytes; },
    341        [aSrc, aBytes](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
    342          if (aMaybeEntryWriter.isNothing()) {
    343            return ProfileBufferBlockIndex{};
    344          }
    345          aMaybeEntryWriter->WriteBytes(aSrc, aBytes);
    346          return aMaybeEntryWriter->CurrentBlockIndex();
    347        });
    348  }
    349 
    350  // Add a new single entry with *all* given object (using a Serializer for
    351  // each), return block index.
    352  template <typename... Ts>
    353  ProfileBufferBlockIndex PutObjects(const Ts&... aTs) {
    354    static_assert(sizeof...(Ts) > 0,
    355                  "PutObjects must be given at least one object.");
    356    return ReserveAndPut(
    357        [&]() { return ProfileBufferEntryWriter::SumBytes(aTs...); },
    358        [&](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
    359          if (aMaybeEntryWriter.isNothing()) {
    360            return ProfileBufferBlockIndex{};
    361          }
    362          aMaybeEntryWriter->WriteObjects(aTs...);
    363          return aMaybeEntryWriter->CurrentBlockIndex();
    364        });
    365  }
    366 
    367  // Add a new entry copied from the given object, return block index.
    368  template <typename T>
    369  ProfileBufferBlockIndex PutObject(const T& aOb) {
    370    return PutObjects(aOb);
    371  }
    372 
    373  // Get *all* chunks related to this buffer, including extant chunks in its
    374  // ChunkManager, and yet-unused new/recycled chunks.
    375  // We don't expect this buffer to be used again, though it's still possible
    376  // and will allocate the first buffer when needed.
    377  [[nodiscard]] UniquePtr<ProfileBufferChunk> GetAllChunks() {
    378    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    379    if (MOZ_UNLIKELY(!mChunkManager)) {
    380      // Out-of-session.
    381      return nullptr;
    382    }
    383    UniquePtr<ProfileBufferChunk> chunks =
    384        mChunkManager->GetExtantReleasedChunks();
    385    (void)HandleRequestedChunk_IsPending(lock);
    386    if (MOZ_LIKELY(!!mCurrentChunk)) {
    387      mCurrentChunk->MarkDone();
    388      chunks =
    389          ProfileBufferChunk::Join(std::move(chunks), std::move(mCurrentChunk));
    390    }
    391    chunks =
    392        ProfileBufferChunk::Join(std::move(chunks), std::move(mNextChunks));
    393    mChunkManager->ForgetUnreleasedChunks();
    394    mRangeStart = mRangeEnd = mNextChunkRangeStart;
    395    return chunks;
    396  }
    397 
    398  // True if the given index points inside the current chunk (up to the last
    399  // written byte).
    400  // This could be used to check if an index written now would have a good
    401  // chance of referring to a previous block that has not been destroyed yet.
    402  // But use with extreme care: This information may become incorrect right
    403  // after this function returns, because new writes could start a new chunk.
    404  [[nodiscard]] bool IsIndexInCurrentChunk(ProfileBufferIndex aIndex) const {
    405    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    406    if (MOZ_UNLIKELY(!mChunkManager || !mCurrentChunk)) {
    407      // Out-of-session, or no current chunk.
    408      return false;
    409    }
    410    return (mCurrentChunk->RangeStart() <= aIndex) &&
    411           (aIndex < (mCurrentChunk->RangeStart() +
    412                      mCurrentChunk->OffsetPastLastBlock()));
    413  }
    414 
    415  class Reader;
    416 
    417  // Class that can iterate through blocks and provide
    418  // `ProfileBufferEntryReader`s.
    419  // Created through `Reader`, lives within a lock guard lifetime.
    420  class BlockIterator {
    421   public:
    422 #ifdef DEBUG
    423    ~BlockIterator() {
    424      // No BlockIterator should live outside of a mutexed call.
    425      mBuffer->mMutex.AssertCurrentThreadOwns();
    426    }
    427 #endif  // DEBUG
    428 
    429    // Comparison with other iterator, mostly used in range-for loops.
    430    [[nodiscard]] bool operator==(const BlockIterator& aRhs) const {
    431      MOZ_ASSERT(mBuffer == aRhs.mBuffer);
    432      return mCurrentBlockIndex == aRhs.mCurrentBlockIndex;
    433    }
    434    [[nodiscard]] bool operator!=(const BlockIterator& aRhs) const {
    435      MOZ_ASSERT(mBuffer == aRhs.mBuffer);
    436      return mCurrentBlockIndex != aRhs.mCurrentBlockIndex;
    437    }
    438 
    439    // Advance to next BlockIterator.
    440    BlockIterator& operator++() {
    441      mBuffer->mMutex.AssertCurrentThreadOwns();
    442      mCurrentBlockIndex =
    443          ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    444              mNextBlockPointer.GlobalRangePosition());
    445      mCurrentEntry =
    446          mNextBlockPointer.EntryReader(mNextBlockPointer.ReadEntrySize());
    447      return *this;
    448    }
    449 
    450    // Dereferencing creates a `ProfileBufferEntryReader` object for the entry
    451    // inside this block.
    452    // (Note: It would be possible to return a `const
    453    // ProfileBufferEntryReader&`, but not useful in practice, because in most
    454    // case the user will want to read, which is non-const.)
    455    [[nodiscard]] ProfileBufferEntryReader operator*() const {
    456      return mCurrentEntry;
    457    }
    458 
    459    // True if this iterator is just past the last entry.
    460    [[nodiscard]] bool IsAtEnd() const {
    461      return mCurrentEntry.RemainingBytes() == 0;
    462    }
    463 
    464    // Can be used as reference to come back to this entry with `GetEntryAt()`.
    465    [[nodiscard]] ProfileBufferBlockIndex CurrentBlockIndex() const {
    466      return mCurrentBlockIndex;
    467    }
    468 
    469    // Index past the end of this block, which is the start of the next block.
    470    [[nodiscard]] ProfileBufferBlockIndex NextBlockIndex() const {
    471      MOZ_ASSERT(!IsAtEnd());
    472      return ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    473          mNextBlockPointer.GlobalRangePosition());
    474    }
    475 
    476    // Index of the first block in the whole buffer.
    477    [[nodiscard]] ProfileBufferBlockIndex BufferRangeStart() const {
    478      mBuffer->mMutex.AssertCurrentThreadOwns();
    479      return ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    480          mBuffer->mRangeStart);
    481    }
    482 
    483    // Index past the last block in the whole buffer.
    484    [[nodiscard]] ProfileBufferBlockIndex BufferRangeEnd() const {
    485      mBuffer->mMutex.AssertCurrentThreadOwns();
    486      return ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    487          mBuffer->mRangeEnd);
    488    }
    489 
    490   private:
    491    // Only a Reader can instantiate a BlockIterator.
    492    friend class Reader;
    493 
    494    BlockIterator(const ProfileChunkedBuffer& aBuffer,
    495                  const ProfileBufferChunk* aChunks0,
    496                  const ProfileBufferChunk* aChunks1,
    497                  ProfileBufferBlockIndex aBlockIndex)
    498        : mNextBlockPointer(aChunks0, aChunks1, aBlockIndex),
    499          mCurrentBlockIndex(
    500              ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    501                  mNextBlockPointer.GlobalRangePosition())),
    502          mCurrentEntry(
    503              mNextBlockPointer.EntryReader(mNextBlockPointer.ReadEntrySize())),
    504          mBuffer(WrapNotNull(&aBuffer)) {
    505      // No BlockIterator should live outside of a mutexed call.
    506      mBuffer->mMutex.AssertCurrentThreadOwns();
    507    }
    508 
    509    profiler::detail::InChunkPointer mNextBlockPointer;
    510 
    511    ProfileBufferBlockIndex mCurrentBlockIndex;
    512 
    513    ProfileBufferEntryReader mCurrentEntry;
    514 
    515    // Using a non-null pointer instead of a reference, to allow copying.
    516    // This BlockIterator should only live inside one of the thread-safe
    517    // ProfileChunkedBuffer functions, for this reference to stay valid.
    518    NotNull<const ProfileChunkedBuffer*> mBuffer;
    519  };
    520 
    521  // Class that can create `BlockIterator`s (e.g., for range-for), or just
    522  // iterate through entries; lives within a lock guard lifetime.
    523  class MOZ_RAII Reader {
    524   public:
    525    Reader(const Reader&) = delete;
    526    Reader& operator=(const Reader&) = delete;
    527    Reader(Reader&&) = delete;
    528    Reader& operator=(Reader&&) = delete;
    529 
    530 #ifdef DEBUG
    531    ~Reader() {
    532      // No Reader should live outside of a mutexed call.
    533      mBuffer.mMutex.AssertCurrentThreadOwns();
    534    }
    535 #endif  // DEBUG
    536 
    537    // Index of the first block in the whole buffer.
    538    [[nodiscard]] ProfileBufferBlockIndex BufferRangeStart() const {
    539      mBuffer.mMutex.AssertCurrentThreadOwns();
    540      return ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    541          mBuffer.mRangeStart);
    542    }
    543 
    544    // Index past the last block in the whole buffer.
    545    [[nodiscard]] ProfileBufferBlockIndex BufferRangeEnd() const {
    546      mBuffer.mMutex.AssertCurrentThreadOwns();
    547      return ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    548          mBuffer.mRangeEnd);
    549    }
    550 
    551    // Iterators to the first and past-the-last blocks.
    552    // Compatible with range-for (see `ForEach` below as example).
    553    [[nodiscard]] BlockIterator begin() const {
    554      return BlockIterator(mBuffer, mChunks0, mChunks1, nullptr);
    555    }
    556    // Note that a `BlockIterator` at the `end()` should not be dereferenced, as
    557    // there is no actual block there!
    558    [[nodiscard]] BlockIterator end() const {
    559      return BlockIterator(mBuffer, nullptr, nullptr, nullptr);
    560    }
    561 
    562    // Get a `BlockIterator` at the given `ProfileBufferBlockIndex`, clamped to
    563    // the stored range. Note that a `BlockIterator` at the `end()` should not
    564    // be dereferenced, as there is no actual block there!
    565    [[nodiscard]] BlockIterator At(ProfileBufferBlockIndex aBlockIndex) const {
    566      if (aBlockIndex < BufferRangeStart()) {
    567        // Anything before the range (including null ProfileBufferBlockIndex) is
    568        // clamped at the beginning.
    569        return begin();
    570      }
    571      // Otherwise we at least expect the index to be valid (pointing exactly at
    572      // a live block, or just past the end.)
    573      return BlockIterator(mBuffer, mChunks0, mChunks1, aBlockIndex);
    574    }
    575 
    576    // Run `aCallback(ProfileBufferEntryReader&)` on each entry from first to
    577    // last. Callback should not store `ProfileBufferEntryReader`, as it may
    578    // become invalid after this thread-safe call.
    579    template <typename Callback>
    580    void ForEach(Callback&& aCallback) const {
    581      for (ProfileBufferEntryReader reader : *this) {
    582        aCallback(reader);
    583      }
    584    }
    585 
    586    // If this reader only points at one chunk with some data, this data will be
    587    // exposed as a single entry.
    588    [[nodiscard]] ProfileBufferEntryReader SingleChunkDataAsEntry() {
    589      const ProfileBufferChunk* onlyNonEmptyChunk = nullptr;
    590      for (const ProfileBufferChunk* chunkList : {mChunks0, mChunks1}) {
    591        for (const ProfileBufferChunk* chunk = chunkList; chunk;
    592             chunk = chunk->GetNext()) {
    593          if (chunk->OffsetFirstBlock() != chunk->OffsetPastLastBlock()) {
    594            if (onlyNonEmptyChunk) {
    595              // More than one non-empty chunk.
    596              return ProfileBufferEntryReader();
    597            }
    598            onlyNonEmptyChunk = chunk;
    599          }
    600        }
    601      }
    602      if (!onlyNonEmptyChunk) {
    603        // No non-empty chunks.
    604        return ProfileBufferEntryReader();
    605      }
    606      // Here, we have found one chunk that had some data.
    607      return ProfileBufferEntryReader(
    608          onlyNonEmptyChunk->BufferSpan().FromTo(
    609              onlyNonEmptyChunk->OffsetFirstBlock(),
    610              onlyNonEmptyChunk->OffsetPastLastBlock()),
    611          ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    612              onlyNonEmptyChunk->RangeStart()),
    613          ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    614              onlyNonEmptyChunk->RangeStart() +
    615              (onlyNonEmptyChunk->OffsetPastLastBlock() -
    616               onlyNonEmptyChunk->OffsetFirstBlock())));
    617    }
    618 
    619   private:
    620    friend class ProfileChunkedBuffer;
    621 
    622    explicit Reader(const ProfileChunkedBuffer& aBuffer,
    623                    const ProfileBufferChunk* aChunks0,
    624                    const ProfileBufferChunk* aChunks1)
    625        : mBuffer(aBuffer), mChunks0(aChunks0), mChunks1(aChunks1) {
    626      // No Reader should live outside of a mutexed call.
    627      mBuffer.mMutex.AssertCurrentThreadOwns();
    628    }
    629 
    630    // This Reader should only live inside one of the thread-safe
    631    // ProfileChunkedBuffer functions, for this reference to stay valid.
    632    const ProfileChunkedBuffer& mBuffer;
    633    const ProfileBufferChunk* mChunks0;
    634    const ProfileBufferChunk* mChunks1;
    635  };
    636 
    637  // In in-session, call `aCallback(ProfileChunkedBuffer::Reader&)` and return
    638  // true. Callback should not store `Reader`, because it may become invalid
    639  // after this call.
    640  // If out-of-session, return false (callback is not invoked).
    641  template <typename Callback>
    642  [[nodiscard]] auto Read(Callback&& aCallback) const {
    643    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    644    if (MOZ_UNLIKELY(!mChunkManager)) {
    645      // Out-of-session.
    646      return std::forward<Callback>(aCallback)(static_cast<Reader*>(nullptr));
    647    }
    648    return mChunkManager->PeekExtantReleasedChunks(
    649        [&](const ProfileBufferChunk* aOldestChunk) {
    650          Reader reader(*this, aOldestChunk, mCurrentChunk.get());
    651          return std::forward<Callback>(aCallback)(&reader);
    652        });
    653  }
    654 
    655  // Invoke `aCallback(ProfileBufferEntryReader& [, ProfileBufferBlockIndex])`
    656  // on each entry, it must read or at least skip everything. Either/both chunk
    657  // pointers may be null.
    658  template <typename Callback>
    659  static void ReadEach(const ProfileBufferChunk* aChunks0,
    660                       const ProfileBufferChunk* aChunks1,
    661                       Callback&& aCallback) {
    662    static_assert(std::is_invocable_v<Callback, ProfileBufferEntryReader&> ||
    663                      std::is_invocable_v<Callback, ProfileBufferEntryReader&,
    664                                          ProfileBufferBlockIndex>,
    665                  "ReadEach callback must take ProfileBufferEntryReader& and "
    666                  "optionally a ProfileBufferBlockIndex");
    667    profiler::detail::InChunkPointer p{aChunks0, aChunks1};
    668    while (!p.IsNull()) {
    669      // The position right before an entry size *is* a block index.
    670      const ProfileBufferBlockIndex blockIndex =
    671          ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
    672              p.GlobalRangePosition());
    673      Length entrySize = p.ReadEntrySize();
    674      if (entrySize == 0) {
    675        return;
    676      }
    677      ProfileBufferEntryReader entryReader = p.EntryReader(entrySize);
    678      if (entryReader.RemainingBytes() == 0) {
    679        return;
    680      }
    681      MOZ_ASSERT(entryReader.RemainingBytes() == entrySize);
    682      if constexpr (std::is_invocable_v<Callback, ProfileBufferEntryReader&,
    683                                        ProfileBufferBlockIndex>) {
    684        aCallback(entryReader, blockIndex);
    685      } else {
    686        (void)blockIndex;
    687        aCallback(entryReader);
    688      }
    689      MOZ_ASSERT(entryReader.RemainingBytes() == 0);
    690    }
    691  }
    692 
    693  // Invoke `aCallback(ProfileBufferEntryReader& [, ProfileBufferBlockIndex])`
    694  // on each entry, it must read or at least skip everything.
    695  template <typename Callback>
    696  void ReadEach(Callback&& aCallback) const {
    697    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    698    if (MOZ_UNLIKELY(!mChunkManager)) {
    699      // Out-of-session.
    700      return;
    701    }
    702    mChunkManager->PeekExtantReleasedChunks(
    703        [&](const ProfileBufferChunk* aOldestChunk) {
    704          ReadEach(aOldestChunk, mCurrentChunk.get(),
    705                   std::forward<Callback>(aCallback));
    706        });
    707  }
    708 
    709  // Call `aCallback(Maybe<ProfileBufferEntryReader>&&)` on the entry at
    710  // the given ProfileBufferBlockIndex; The `Maybe` will be `Nothing` if
    711  // out-of-session, or if that entry doesn't exist anymore, or if we've reached
    712  // just past the last entry. Return whatever `aCallback` returns. Callback
    713  // should not store `ProfileBufferEntryReader`, because it may become invalid
    714  // after this call.
    715  // Either/both chunk pointers may be null.
    716  template <typename Callback>
    717  [[nodiscard]] static auto ReadAt(ProfileBufferBlockIndex aMinimumBlockIndex,
    718                                   const ProfileBufferChunk* aChunks0,
    719                                   const ProfileBufferChunk* aChunks1,
    720                                   Callback&& aCallback) {
    721    static_assert(
    722        std::is_invocable_v<Callback, Maybe<ProfileBufferEntryReader>&&>,
    723        "ReadAt callback must take a Maybe<ProfileBufferEntryReader>&&");
    724    Maybe<ProfileBufferEntryReader> maybeEntryReader;
    725    if (profiler::detail::InChunkPointer p{aChunks0, aChunks1}; !p.IsNull()) {
    726      // If the pointer position is before the given position, try to advance.
    727      if (p.GlobalRangePosition() >=
    728              aMinimumBlockIndex.ConvertToProfileBufferIndex() ||
    729          p.AdvanceToGlobalRangePosition(
    730              aMinimumBlockIndex.ConvertToProfileBufferIndex())) {
    731        MOZ_ASSERT(p.GlobalRangePosition() >=
    732                   aMinimumBlockIndex.ConvertToProfileBufferIndex());
    733        // Here we're pointing at the start of a block, try to read the entry
    734        // size. (Entries cannot be empty, so 0 means failure.)
    735        if (Length entrySize = p.ReadEntrySize(); entrySize != 0) {
    736          maybeEntryReader.emplace(p.EntryReader(entrySize));
    737          if (maybeEntryReader->RemainingBytes() == 0) {
    738            // An empty entry reader means there was no complete block at the
    739            // given index.
    740            maybeEntryReader.reset();
    741          } else {
    742            MOZ_ASSERT(maybeEntryReader->RemainingBytes() == entrySize);
    743          }
    744        }
    745      }
    746    }
    747 #ifdef DEBUG
    748    auto assertAllRead = MakeScopeExit([&]() {
    749      MOZ_ASSERT(!maybeEntryReader || maybeEntryReader->RemainingBytes() == 0);
    750    });
    751 #endif  // DEBUG
    752    return std::forward<Callback>(aCallback)(std::move(maybeEntryReader));
    753  }
    754 
    755  // Call `aCallback(Maybe<ProfileBufferEntryReader>&&)` on the entry at
    756  // the given ProfileBufferBlockIndex; The `Maybe` will be `Nothing` if
    757  // out-of-session, or if that entry doesn't exist anymore, or if we've reached
    758  // just past the last entry. Return whatever `aCallback` returns. Callback
    759  // should not store `ProfileBufferEntryReader`, because it may become invalid
    760  // after this call.
    761  template <typename Callback>
    762  [[nodiscard]] auto ReadAt(ProfileBufferBlockIndex aBlockIndex,
    763                            Callback&& aCallback) const {
    764    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    765    if (MOZ_UNLIKELY(!mChunkManager)) {
    766      // Out-of-session.
    767      return std::forward<Callback>(aCallback)(Nothing{});
    768    }
    769    return mChunkManager->PeekExtantReleasedChunks(
    770        [&](const ProfileBufferChunk* aOldestChunk) {
    771          return ReadAt(aBlockIndex, aOldestChunk, mCurrentChunk.get(),
    772                        std::forward<Callback>(aCallback));
    773        });
    774  }
    775 
    776  // Append the contents of another ProfileChunkedBuffer to this one.
    777  ProfileBufferBlockIndex AppendContents(const ProfileChunkedBuffer& aSrc) {
    778    ProfileBufferBlockIndex firstBlockIndex;
    779    // If we start failing, we'll stop writing.
    780    bool failed = false;
    781    aSrc.ReadEach([&](ProfileBufferEntryReader& aER) {
    782      if (failed) {
    783        return;
    784      }
    785      failed =
    786          !Put(aER.RemainingBytes(), [&](Maybe<ProfileBufferEntryWriter>& aEW) {
    787            if (aEW.isNothing()) {
    788              return false;
    789            }
    790            if (!firstBlockIndex) {
    791              firstBlockIndex = aEW->CurrentBlockIndex();
    792            }
    793            aEW->WriteFromReader(aER, aER.RemainingBytes());
    794            return true;
    795          });
    796    });
    797    return failed ? nullptr : firstBlockIndex;
    798  }
    799 
    800 #ifdef DEBUG
    801  void Dump(std::FILE* aFile = stdout) const {
    802    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
    803    fprintf(aFile,
    804            "ProfileChunkedBuffer[%p] State: range %u-%u pushed=%u cleared=%u "
    805            "(live=%u) failed-puts=%u bytes",
    806            this, unsigned(mRangeStart), unsigned(mRangeEnd),
    807            unsigned(mPushedBlockCount), unsigned(mClearedBlockCount),
    808            unsigned(mPushedBlockCount) - unsigned(mClearedBlockCount),
    809            unsigned(mFailedPutBytes));
    810    if (MOZ_UNLIKELY(!mChunkManager)) {
    811      fprintf(aFile, " - Out-of-session\n");
    812      return;
    813    }
    814    fprintf(aFile, " - chunks:\n");
    815    bool hasChunks = false;
    816    mChunkManager->PeekExtantReleasedChunks(
    817        [&](const ProfileBufferChunk* aOldestChunk) {
    818          for (const ProfileBufferChunk* chunk = aOldestChunk; chunk;
    819               chunk = chunk->GetNext()) {
    820            fprintf(aFile, "R ");
    821            chunk->Dump(aFile);
    822            hasChunks = true;
    823          }
    824        });
    825    if (mCurrentChunk) {
    826      fprintf(aFile, "C ");
    827      mCurrentChunk->Dump(aFile);
    828      hasChunks = true;
    829    }
    830    for (const ProfileBufferChunk* chunk = mNextChunks.get(); chunk;
    831         chunk = chunk->GetNext()) {
    832      fprintf(aFile, "N ");
    833      chunk->Dump(aFile);
    834      hasChunks = true;
    835    }
    836    switch (mRequestedChunkHolder->GetState()) {
    837      case RequestedChunkRefCountedHolder::State::Unused:
    838        fprintf(aFile, " - No request pending.\n");
    839        break;
    840      case RequestedChunkRefCountedHolder::State::Requested:
    841        fprintf(aFile, " - Request pending.\n");
    842        break;
    843      case RequestedChunkRefCountedHolder::State::Fulfilled:
    844        fprintf(aFile, " - Request fulfilled.\n");
    845        break;
    846    }
    847    if (!hasChunks) {
    848      fprintf(aFile, " No chunks.\n");
    849    }
    850  }
    851 #endif  // DEBUG
    852 
    853 private:
    854  // Used to de/serialize a ProfileChunkedBuffer (e.g., containing a backtrace).
    855  friend ProfileBufferEntryWriter::Serializer<ProfileChunkedBuffer>;
    856  friend ProfileBufferEntryReader::Deserializer<ProfileChunkedBuffer>;
    857  friend ProfileBufferEntryWriter::Serializer<UniquePtr<ProfileChunkedBuffer>>;
    858  friend ProfileBufferEntryReader::Deserializer<
    859      UniquePtr<ProfileChunkedBuffer>>;
    860 
    861  [[nodiscard]] UniquePtr<ProfileBufferChunkManager> ResetChunkManager(
    862      const baseprofiler::detail::BaseProfilerMaybeAutoLock&) {
    863    UniquePtr<ProfileBufferChunkManager> chunkManager;
    864    if (mChunkManager) {
    865      mRequestedChunkHolder = nullptr;
    866      mChunkManager->ForgetUnreleasedChunks();
    867 #ifdef DEBUG
    868      mChunkManager->DeregisteredFrom(this);
    869 #endif
    870      mChunkManager = nullptr;
    871      chunkManager = std::move(mOwnedChunkManager);
    872      if (mCurrentChunk) {
    873        mCurrentChunk->MarkDone();
    874        mCurrentChunk = nullptr;
    875      }
    876      mNextChunks = nullptr;
    877      mNextChunkRangeStart = mRangeEnd;
    878      mRangeStart = mRangeEnd;
    879      mPushedBlockCount = 0;
    880      mClearedBlockCount = 0;
    881      mFailedPutBytes = 0;
    882    }
    883    return chunkManager;
    884  }
    885 
    886  void SetChunkManager(
    887      ProfileBufferChunkManager& aChunkManager,
    888      const baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock) {
    889    MOZ_ASSERT(!mChunkManager);
    890    mChunkManager = &aChunkManager;
    891 #ifdef DEBUG
    892    mChunkManager->RegisteredWith(this);
    893 #endif
    894 
    895    mChunkManager->SetChunkDestroyedCallback(
    896        [this](const ProfileBufferChunk& aChunk) {
    897          for (;;) {
    898            ProfileBufferIndex rangeStart = mRangeStart;
    899            if (MOZ_LIKELY(rangeStart <= aChunk.RangeStart())) {
    900              if (MOZ_LIKELY(mRangeStart.compareExchange(
    901                      rangeStart,
    902                      aChunk.RangeStart() + aChunk.BufferBytes()))) {
    903                break;
    904              }
    905            }
    906          }
    907          mClearedBlockCount += aChunk.BlockCount();
    908        });
    909 
    910    // We start with one chunk right away, and request a following one now
    911    // so it should be available before the current chunk is full.
    912    SetAndInitializeCurrentChunk(mChunkManager->GetChunk(), aLock);
    913    mRequestedChunkHolder = MakeRefPtr<RequestedChunkRefCountedHolder>();
    914    RequestChunk(aLock);
    915  }
    916 
    917  [[nodiscard]] size_t SizeOfExcludingThis(
    918      MallocSizeOf aMallocSizeOf,
    919      const baseprofiler::detail::BaseProfilerMaybeAutoLock&) const {
    920    if (MOZ_UNLIKELY(!mChunkManager)) {
    921      // Out-of-session.
    922      return 0;
    923    }
    924    size_t size = mChunkManager->SizeOfIncludingThis(aMallocSizeOf);
    925    if (mCurrentChunk) {
    926      size += mCurrentChunk->SizeOfIncludingThis(aMallocSizeOf);
    927    }
    928    if (mNextChunks) {
    929      size += mNextChunks->SizeOfIncludingThis(aMallocSizeOf);
    930    }
    931    return size;
    932  }
    933 
    934  void InitializeCurrentChunk(
    935      const baseprofiler::detail::BaseProfilerMaybeAutoLock&) {
    936    MOZ_ASSERT(!!mCurrentChunk);
    937    mCurrentChunk->SetRangeStart(mNextChunkRangeStart);
    938    mNextChunkRangeStart += mCurrentChunk->BufferBytes();
    939    (void)mCurrentChunk->ReserveInitialBlockAsTail(0);
    940  }
    941 
    942  void SetAndInitializeCurrentChunk(
    943      UniquePtr<ProfileBufferChunk>&& aChunk,
    944      const baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock) {
    945    mCurrentChunk = std::move(aChunk);
    946    if (mCurrentChunk) {
    947      InitializeCurrentChunk(aLock);
    948    }
    949  }
    950 
    951  void RequestChunk(
    952      const baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock) {
    953    if (HandleRequestedChunk_IsPending(aLock)) {
    954      // There is already a pending request, don't start a new one.
    955      return;
    956    }
    957 
    958    // Ensure the `RequestedChunkHolder` knows we're starting a request.
    959    mRequestedChunkHolder->StartRequest();
    960 
    961    // Request a chunk, the callback carries a `RefPtr` of the
    962    // `RequestedChunkHolder`, so it's guaranteed to live until it's invoked,
    963    // even if this `ProfileChunkedBuffer` changes its `ChunkManager` or is
    964    // destroyed.
    965    mChunkManager->RequestChunk(
    966        [requestedChunkHolder = RefPtr<RequestedChunkRefCountedHolder>(
    967             mRequestedChunkHolder)](UniquePtr<ProfileBufferChunk> aChunk) {
    968          requestedChunkHolder->AddRequestedChunk(std::move(aChunk));
    969        });
    970  }
    971 
    972  [[nodiscard]] bool HandleRequestedChunk_IsPending(
    973      const baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock) {
    974    MOZ_ASSERT(!!mChunkManager);
    975    MOZ_ASSERT(!!mRequestedChunkHolder);
    976 
    977    if (mRequestedChunkHolder->GetState() ==
    978        RequestedChunkRefCountedHolder::State::Unused) {
    979      return false;
    980    }
    981 
    982    // A request is either in-flight or fulfilled.
    983    Maybe<UniquePtr<ProfileBufferChunk>> maybeChunk =
    984        mRequestedChunkHolder->GetChunkIfFulfilled();
    985    if (maybeChunk.isNothing()) {
    986      // Request is still pending.
    987      return true;
    988    }
    989 
    990    // Since we extracted the provided chunk, the holder should now be unused.
    991    MOZ_ASSERT(mRequestedChunkHolder->GetState() ==
    992               RequestedChunkRefCountedHolder::State::Unused);
    993 
    994    // Request has been fulfilled.
    995    UniquePtr<ProfileBufferChunk>& chunk = *maybeChunk;
    996    if (chunk) {
    997      // Try to use as current chunk if needed.
    998      if (!mCurrentChunk) {
    999        SetAndInitializeCurrentChunk(std::move(chunk), aLock);
   1000        // We've just received a chunk and made it current, request a next chunk
   1001        // for later.
   1002        MOZ_ASSERT(!mNextChunks);
   1003        RequestChunk(aLock);
   1004        return true;
   1005      }
   1006 
   1007      if (!mNextChunks) {
   1008        mNextChunks = std::move(chunk);
   1009      } else {
   1010        mNextChunks->InsertNext(std::move(chunk));
   1011      }
   1012    }
   1013 
   1014    return false;
   1015  }
   1016 
   1017  // Get a pointer to the next chunk available
   1018  [[nodiscard]] ProfileBufferChunk* GetOrCreateCurrentChunk(
   1019      const baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock) {
   1020    ProfileBufferChunk* current = mCurrentChunk.get();
   1021    if (MOZ_UNLIKELY(!current)) {
   1022      // No current chunk ready.
   1023      MOZ_ASSERT(!mNextChunks,
   1024                 "There shouldn't be next chunks when there is no current one");
   1025      // See if a request has recently been fulfilled, ignore pending status.
   1026      (void)HandleRequestedChunk_IsPending(aLock);
   1027      current = mCurrentChunk.get();
   1028      if (MOZ_UNLIKELY(!current)) {
   1029        // There was no pending chunk, try to get one right now.
   1030        // This may still fail, but we can't do anything else about it, the
   1031        // caller must handle the nullptr case.
   1032        // Attempt a request for later.
   1033        SetAndInitializeCurrentChunk(mChunkManager->GetChunk(), aLock);
   1034        current = mCurrentChunk.get();
   1035      }
   1036    }
   1037    return current;
   1038  }
   1039 
   1040  // Get a pointer to the next chunk available
   1041  [[nodiscard]] ProfileBufferChunk* GetOrCreateNextChunk(
   1042      const baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock) {
   1043    MOZ_ASSERT(!!mCurrentChunk,
   1044               "Why ask for a next chunk when there isn't even a current one?");
   1045    ProfileBufferChunk* next = mNextChunks.get();
   1046    if (MOZ_UNLIKELY(!next)) {
   1047      // No next chunk ready, see if a request has recently been fulfilled,
   1048      // ignore pending status.
   1049      (void)HandleRequestedChunk_IsPending(aLock);
   1050      next = mNextChunks.get();
   1051      if (MOZ_UNLIKELY(!next)) {
   1052        // There was no pending chunk, try to get one right now.
   1053        mNextChunks = mChunkManager->GetChunk();
   1054        next = mNextChunks.get();
   1055        // This may still fail, but we can't do anything else about it, the
   1056        // caller must handle the nullptr case.
   1057        if (MOZ_UNLIKELY(!next)) {
   1058          // Attempt a request for later.
   1059          RequestChunk(aLock);
   1060        }
   1061      }
   1062    }
   1063    return next;
   1064  }
   1065 
   1066  // Reserve a block of `aCallbackBlockBytes()` size, and invoke and return
   1067  // `aCallback(Maybe<ProfileBufferEntryWriter>&)`. Note that this is the "raw"
   1068  // version that doesn't write the entry size at the beginning of the block.
   1069  // Note: `aCallbackBlockBytes` is a callback instead of a simple value, to
   1070  // delay this potentially-expensive computation until after we're checked that
   1071  // we're in-session; use `Put(Length, Callback)` below if you know the size
   1072  // already.
   1073  template <typename CallbackBlockBytes, typename Callback>
   1074  auto ReserveAndPutRaw(CallbackBlockBytes&& aCallbackBlockBytes,
   1075                        Callback&& aCallback,
   1076                        baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock,
   1077                        uint64_t aBlockCount = 1) {
   1078    // The entry writer that will point into one or two chunks to write
   1079    // into, empty by default (failure).
   1080    Maybe<ProfileBufferEntryWriter> maybeEntryWriter;
   1081 
   1082    // The current chunk will be filled if we need to write more than its
   1083    // remaining space.
   1084    bool currentChunkFilled = false;
   1085 
   1086    // If the current chunk gets filled, we may or may not initialize the next
   1087    // chunk!
   1088    bool nextChunkInitialized = false;
   1089 
   1090    if (MOZ_LIKELY(mChunkManager)) {
   1091      // In-session.
   1092 
   1093      const Length blockBytes =
   1094          std::forward<CallbackBlockBytes>(aCallbackBlockBytes)();
   1095 
   1096      if (ProfileBufferChunk* current = GetOrCreateCurrentChunk(aLock);
   1097          MOZ_LIKELY(current)) {
   1098        if (blockBytes <= current->RemainingBytes()) {
   1099          // Block fits in current chunk with only one span.
   1100          currentChunkFilled = blockBytes == current->RemainingBytes();
   1101          const auto [mem0, blockIndex] = current->ReserveBlock(blockBytes);
   1102          MOZ_ASSERT(mem0.LengthBytes() == blockBytes);
   1103          maybeEntryWriter.emplace(
   1104              mem0, blockIndex,
   1105              ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
   1106                  blockIndex.ConvertToProfileBufferIndex() + blockBytes));
   1107          MOZ_ASSERT(maybeEntryWriter->RemainingBytes() == blockBytes);
   1108          mRangeEnd += blockBytes;
   1109          mPushedBlockCount += aBlockCount;
   1110        } else if (blockBytes >= current->BufferBytes()) {
   1111          // Currently only two buffer chunks are held at a time and it is not
   1112          // possible to write an object that takes up more space than this. In
   1113          // this scenario, silently discard this block of data if it is unable
   1114          // to fit into the two reserved profiler chunks.
   1115          mFailedPutBytes += blockBytes;
   1116        } else {
   1117          // Block doesn't fit fully in current chunk, it needs to overflow into
   1118          // the next one.
   1119          // Whether or not we can write this entry, the current chunk is now
   1120          // considered full, so it will be released. (Otherwise we could refuse
   1121          // this entry, but later accept a smaller entry into this chunk, which
   1122          // would be somewhat inconsistent.)
   1123          currentChunkFilled = true;
   1124          // Make sure the next chunk is available (from a previous request),
   1125          // otherwise create one on the spot.
   1126          if (ProfileBufferChunk* next = GetOrCreateNextChunk(aLock);
   1127              MOZ_LIKELY(next)) {
   1128            // Here, we know we have a current and a next chunk.
   1129            // Reserve head of block at the end of the current chunk.
   1130            const auto [mem0, blockIndex] =
   1131                current->ReserveBlock(current->RemainingBytes());
   1132            MOZ_ASSERT(mem0.LengthBytes() < blockBytes);
   1133            MOZ_ASSERT(current->RemainingBytes() == 0);
   1134            // Set the next chunk range, and reserve the needed space for the
   1135            // tail of the block.
   1136            next->SetRangeStart(mNextChunkRangeStart);
   1137            mNextChunkRangeStart += next->BufferBytes();
   1138            const auto mem1 = next->ReserveInitialBlockAsTail(
   1139                blockBytes - mem0.LengthBytes());
   1140            MOZ_ASSERT(next->RemainingBytes() != 0);
   1141            nextChunkInitialized = true;
   1142            // Block is split in two spans.
   1143            maybeEntryWriter.emplace(
   1144                mem0, mem1, blockIndex,
   1145                ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
   1146                    blockIndex.ConvertToProfileBufferIndex() + blockBytes));
   1147            MOZ_ASSERT(maybeEntryWriter->RemainingBytes() == blockBytes);
   1148            mRangeEnd += blockBytes;
   1149            mPushedBlockCount += aBlockCount;
   1150          } else {
   1151            // Cannot get a new chunk. Record put failure.
   1152            mFailedPutBytes += blockBytes;
   1153          }
   1154        }
   1155      } else {
   1156        // Cannot get a current chunk. Record put failure.
   1157        mFailedPutBytes += blockBytes;
   1158      }
   1159    }  // end of `if (MOZ_LIKELY(mChunkManager))`
   1160 
   1161    // Here, we either have a `Nothing` (failure), or a non-empty entry writer
   1162    // pointing at the start of the block.
   1163 
   1164    // After we invoke the callback and return, we may need to handle the
   1165    // current chunk being filled.
   1166    auto handleFilledChunk = MakeScopeExit([&]() {
   1167      // If the entry writer was not already empty, the callback *must* have
   1168      // filled the full entry.
   1169      MOZ_ASSERT(!maybeEntryWriter || maybeEntryWriter->RemainingBytes() == 0);
   1170 
   1171      if (currentChunkFilled) {
   1172        // Extract current (now filled) chunk.
   1173        UniquePtr<ProfileBufferChunk> filled = std::move(mCurrentChunk);
   1174 
   1175        if (mNextChunks) {
   1176          // Cycle to the next chunk.
   1177          mCurrentChunk =
   1178              std::exchange(mNextChunks, mNextChunks->ReleaseNext());
   1179 
   1180          // Make sure it is initialized (it is now the current chunk).
   1181          if (!nextChunkInitialized) {
   1182            InitializeCurrentChunk(aLock);
   1183          }
   1184        }
   1185 
   1186        // And finally mark filled chunk done and release it.
   1187        filled->MarkDone();
   1188        mChunkManager->ReleaseChunk(std::move(filled));
   1189 
   1190        // Request another chunk if needed.
   1191        // In most cases, here we should have one current chunk and no next
   1192        // chunk, so we want to do a request so there hopefully will be a next
   1193        // chunk available when the current one gets filled.
   1194        // But we also for a request if we don't even have a current chunk (if
   1195        // it's too late, it's ok because the next `ReserveAndPutRaw` wil just
   1196        // allocate one on the spot.)
   1197        // And if we already have a next chunk, there's no need for more now.
   1198        if (!mCurrentChunk || !mNextChunks) {
   1199          RequestChunk(aLock);
   1200        }
   1201      }
   1202    });
   1203 
   1204    return std::forward<Callback>(aCallback)(maybeEntryWriter);
   1205  }
   1206 
   1207  // Reserve a block of `aBlockBytes` size, and invoke and return
   1208  // `aCallback(Maybe<ProfileBufferEntryWriter>&)`. Note that this is the "raw"
   1209  // version that doesn't write the entry size at the beginning of the block.
   1210  template <typename Callback>
   1211  auto ReserveAndPutRaw(Length aBlockBytes, Callback&& aCallback,
   1212                        uint64_t aBlockCount) {
   1213    baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
   1214    return ReserveAndPutRaw([aBlockBytes]() { return aBlockBytes; },
   1215                            std::forward<Callback>(aCallback), lock,
   1216                            aBlockCount);
   1217  }
   1218 
   1219  // Mutex guarding the following members.
   1220  mutable baseprofiler::detail::BaseProfilerMaybeMutex mMutex;
   1221 
   1222  // Pointer to the current Chunk Manager (or null when out-of-session.)
   1223  // It may be owned locally (see below) or externally.
   1224  ProfileBufferChunkManager* mChunkManager = nullptr;
   1225 
   1226  // Only non-null when we own the current Chunk Manager.
   1227  UniquePtr<ProfileBufferChunkManager> mOwnedChunkManager;
   1228 
   1229  UniquePtr<ProfileBufferChunk> mCurrentChunk;
   1230 
   1231  UniquePtr<ProfileBufferChunk> mNextChunks;
   1232 
   1233  // Class used to transfer requested chunks from a `ChunkManager` to a
   1234  // `ProfileChunkedBuffer`.
   1235  // It needs to be ref-counted because the request may be fulfilled
   1236  // asynchronously, and either side may be destroyed during the request.
   1237  // It cannot use the `ProfileChunkedBuffer` mutex, because that buffer and its
   1238  // mutex could be destroyed during the request.
   1239  class RequestedChunkRefCountedHolder {
   1240   public:
   1241    enum class State { Unused, Requested, Fulfilled };
   1242 
   1243    // Get the current state. Note that it may change after the function
   1244    // returns, so it should be used carefully, e.g., `ProfileChunkedBuffer` can
   1245    // see if a request is pending or fulfilled, to avoid starting another
   1246    // request.
   1247    [[nodiscard]] State GetState() const {
   1248      baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
   1249      return mState;
   1250    }
   1251 
   1252    // Must be called by `ProfileChunkedBuffer` when it requests a chunk.
   1253    // There cannot be more than one request in-flight.
   1254    void StartRequest() {
   1255      baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
   1256      MOZ_ASSERT(mState == State::Unused, "Already requested or fulfilled");
   1257      mState = State::Requested;
   1258    }
   1259 
   1260    // Must be called by the `ChunkManager` with a chunk.
   1261    // If the `ChunkManager` cannot provide a chunk (because of memory limits,
   1262    // or it gets destroyed), it must call this anyway with a nullptr.
   1263    void AddRequestedChunk(UniquePtr<ProfileBufferChunk>&& aChunk) {
   1264      baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
   1265      MOZ_ASSERT(mState == State::Requested);
   1266      mState = State::Fulfilled;
   1267      mRequestedChunk = std::move(aChunk);
   1268    }
   1269 
   1270    // The `ProfileChunkedBuffer` can try to extract the provided chunk after a
   1271    // request:
   1272    // - Nothing -> Request is not fulfilled yet.
   1273    // - Some(nullptr) -> The `ChunkManager` was not able to provide a chunk.
   1274    // - Some(chunk) -> Requested chunk.
   1275    [[nodiscard]] Maybe<UniquePtr<ProfileBufferChunk>> GetChunkIfFulfilled() {
   1276      Maybe<UniquePtr<ProfileBufferChunk>> maybeChunk;
   1277      baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
   1278      MOZ_ASSERT(mState == State::Requested || mState == State::Fulfilled);
   1279      if (mState == State::Fulfilled) {
   1280        mState = State::Unused;
   1281        maybeChunk.emplace(std::move(mRequestedChunk));
   1282      }
   1283      return maybeChunk;
   1284    }
   1285 
   1286    // Ref-counting implementation. Hand-rolled, because mozilla::RefCounted
   1287    // logs AddRefs and Releases in xpcom, but this object could be AddRef'd
   1288    // by the Base Profiler before xpcom starts, then Release'd by the Gecko
   1289    // Profiler in xpcom, leading to apparent negative leaks.
   1290 
   1291    void AddRef() {
   1292      baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
   1293      ++mRefCount;
   1294    }
   1295 
   1296    void Release() {
   1297      {
   1298        baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
   1299        if (--mRefCount > 0) {
   1300          return;
   1301        }
   1302      }
   1303      delete this;
   1304    }
   1305 
   1306   private:
   1307    ~RequestedChunkRefCountedHolder() = default;
   1308 
   1309    // Mutex guarding the following members.
   1310    mutable baseprofiler::detail::BaseProfilerMutex mRequestMutex;
   1311    int mRefCount = 0;
   1312    State mState = State::Unused;
   1313    UniquePtr<ProfileBufferChunk> mRequestedChunk;
   1314  };
   1315 
   1316  // Requested-chunk holder, kept alive when in-session, but may also live
   1317  // longer if a request is in-flight.
   1318  RefPtr<RequestedChunkRefCountedHolder> mRequestedChunkHolder;
   1319 
   1320  // Range start of the next chunk to become current. Starting at 1 because
   1321  // 0 is a reserved index similar to nullptr.
   1322  ProfileBufferIndex mNextChunkRangeStart = 1;
   1323 
   1324  // Index to the first block.
   1325  // Atomic because it may be increased when a Chunk is destroyed, and the
   1326  // callback may be invoked from anywhere, including from inside one of our
   1327  // locked section, so we cannot protect it with a mutex.
   1328  Atomic<ProfileBufferIndex, MemoryOrdering::ReleaseAcquire> mRangeStart{1};
   1329 
   1330  // Index past the last block. Equals mRangeStart if empty.
   1331  ProfileBufferIndex mRangeEnd = 1;
   1332 
   1333  // Number of blocks that have been pushed into this buffer.
   1334  uint64_t mPushedBlockCount = 0;
   1335 
   1336  // Number of blocks that have been removed from this buffer.
   1337  // Note: Live entries = pushed - cleared.
   1338  // Atomic because it may be updated when a Chunk is destroyed, and the
   1339  // callback may be invoked from anywhere, including from inside one of our
   1340  // locked section, so we cannot protect it with a mutex.
   1341  Atomic<uint64_t, MemoryOrdering::ReleaseAcquire> mClearedBlockCount{0};
   1342 
   1343  // Number of bytes that could not be put into this buffer.
   1344  uint64_t mFailedPutBytes = 0;
   1345 };
   1346 
   1347 // ----------------------------------------------------------------------------
   1348 // ProfileChunkedBuffer serialization
   1349 
   1350 // A ProfileChunkedBuffer can hide another one!
   1351 // This will be used to store marker backtraces; They can be read back into a
   1352 // UniquePtr<ProfileChunkedBuffer>.
   1353 // Format: len (ULEB128) | start | end | buffer (len bytes) | pushed | cleared
   1354 // len==0 marks an out-of-session buffer, or empty buffer.
   1355 template <>
   1356 struct ProfileBufferEntryWriter::Serializer<ProfileChunkedBuffer> {
   1357  static Length Bytes(const ProfileChunkedBuffer& aBuffer) {
   1358    return aBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
   1359      if (!aReader) {
   1360        // Out-of-session, we only need 1 byte to store a length of 0.
   1361        return ULEB128Size<Length>(0);
   1362      }
   1363      ProfileBufferEntryReader reader = aReader->SingleChunkDataAsEntry();
   1364      const ProfileBufferIndex start =
   1365          reader.CurrentBlockIndex().ConvertToProfileBufferIndex();
   1366      const ProfileBufferIndex end =
   1367          reader.NextBlockIndex().ConvertToProfileBufferIndex();
   1368      MOZ_ASSERT(end - start <= std::numeric_limits<Length>::max());
   1369      const Length len = static_cast<Length>(end - start);
   1370      if (len == 0) {
   1371        // In-session but empty, also store a length of 0.
   1372        return ULEB128Size<Length>(0);
   1373      }
   1374      // In-session.
   1375      return static_cast<Length>(ULEB128Size(len) + sizeof(start) + len +
   1376                                 sizeof(aBuffer.mPushedBlockCount) +
   1377                                 sizeof(aBuffer.mClearedBlockCount));
   1378    });
   1379  }
   1380 
   1381  static void Write(ProfileBufferEntryWriter& aEW,
   1382                    const ProfileChunkedBuffer& aBuffer) {
   1383    aBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
   1384      if (!aReader) {
   1385        // Out-of-session, only store a length of 0.
   1386        aEW.WriteULEB128<Length>(0);
   1387        return;
   1388      }
   1389      ProfileBufferEntryReader reader = aReader->SingleChunkDataAsEntry();
   1390      const ProfileBufferIndex start =
   1391          reader.CurrentBlockIndex().ConvertToProfileBufferIndex();
   1392      const ProfileBufferIndex end =
   1393          reader.NextBlockIndex().ConvertToProfileBufferIndex();
   1394      MOZ_ASSERT(end - start <= std::numeric_limits<Length>::max());
   1395      const Length len = static_cast<Length>(end - start);
   1396      MOZ_ASSERT(len <= aEW.RemainingBytes());
   1397      if (len == 0) {
   1398        // In-session but empty, only store a length of 0.
   1399        aEW.WriteULEB128<Length>(0);
   1400        return;
   1401      }
   1402      // In-session.
   1403      // Store buffer length, and start index.
   1404      aEW.WriteULEB128(len);
   1405      aEW.WriteObject(start);
   1406      // Write all the bytes.
   1407      aEW.WriteFromReader(reader, reader.RemainingBytes());
   1408      // And write stats.
   1409      aEW.WriteObject(static_cast<uint64_t>(aBuffer.mPushedBlockCount));
   1410      aEW.WriteObject(static_cast<uint64_t>(aBuffer.mClearedBlockCount));
   1411      // Note: Failed pushes are not important to serialize.
   1412    });
   1413  }
   1414 };
   1415 
   1416 // A serialized ProfileChunkedBuffer can be read into an empty buffer (either
   1417 // out-of-session, or in-session with enough room).
   1418 template <>
   1419 struct ProfileBufferEntryReader::Deserializer<ProfileChunkedBuffer> {
   1420  static void ReadInto(ProfileBufferEntryReader& aER,
   1421                       ProfileChunkedBuffer& aBuffer) {
   1422    // Expect an empty buffer, as we're going to overwrite it.
   1423    MOZ_ASSERT(aBuffer.GetState().mRangeStart == aBuffer.GetState().mRangeEnd);
   1424    // Read the stored buffer length.
   1425    const auto len = aER.ReadULEB128<ProfileChunkedBuffer::Length>();
   1426    if (len == 0) {
   1427      // 0-length means an "uninteresting" buffer, just return now.
   1428      return;
   1429    }
   1430    // We have a non-empty buffer to read.
   1431 
   1432    // Read start and end indices.
   1433    const auto start = aER.ReadObject<ProfileBufferIndex>();
   1434    aBuffer.mRangeStart = start;
   1435    // For now, set the end to be the start (the buffer is still empty). It will
   1436    // be updated in `ReserveAndPutRaw()` below.
   1437    aBuffer.mRangeEnd = start;
   1438 
   1439    if (aBuffer.IsInSession()) {
   1440      // Output buffer is in-session (i.e., it already has a memory buffer
   1441      // attached). Make sure the caller allocated enough space.
   1442      MOZ_RELEASE_ASSERT(aBuffer.BufferLength().value() >= len);
   1443    } else {
   1444      // Output buffer is out-of-session, set a new chunk manager that will
   1445      // provide a single chunk of just the right size.
   1446      aBuffer.SetChunkManager(MakeUnique<ProfileBufferChunkManagerSingle>(len));
   1447      MOZ_ASSERT(aBuffer.BufferLength().value() >= len);
   1448    }
   1449 
   1450    // Copy bytes into the buffer.
   1451    aBuffer.ReserveAndPutRaw(
   1452        len,
   1453        [&](Maybe<ProfileBufferEntryWriter>& aEW) {
   1454          MOZ_RELEASE_ASSERT(aEW.isSome());
   1455          aEW->WriteFromReader(aER, len);
   1456        },
   1457        0);
   1458    // Finally copy stats.
   1459    aBuffer.mPushedBlockCount = aER.ReadObject<uint64_t>();
   1460    aBuffer.mClearedBlockCount = aER.ReadObject<uint64_t>();
   1461    // Failed puts are not important to keep.
   1462    aBuffer.mFailedPutBytes = 0;
   1463  }
   1464 
   1465  // We cannot output a ProfileChunkedBuffer object (not copyable), use
   1466  // `ReadInto()` or `aER.ReadObject<UniquePtr<BlocksRinbBuffer>>()` instead.
   1467  static ProfileChunkedBuffer Read(ProfileBufferEntryReader& aER) = delete;
   1468 };
   1469 
   1470 // A ProfileChunkedBuffer is usually refererenced through a UniquePtr, for
   1471 // convenience we support (de)serializing that UniquePtr directly.
   1472 // This is compatible with the non-UniquePtr serialization above, with a null
   1473 // pointer being treated like an out-of-session or empty buffer; and any of
   1474 // these would be deserialized into a null pointer.
   1475 template <>
   1476 struct ProfileBufferEntryWriter::Serializer<UniquePtr<ProfileChunkedBuffer>> {
   1477  static Length Bytes(const UniquePtr<ProfileChunkedBuffer>& aBufferUPtr) {
   1478    if (!aBufferUPtr) {
   1479      // Null pointer, treat it like an empty buffer, i.e., write length of 0.
   1480      return ULEB128Size<Length>(0);
   1481    }
   1482    // Otherwise write the pointed-at ProfileChunkedBuffer (which could be
   1483    // out-of-session or empty.)
   1484    return SumBytes(*aBufferUPtr);
   1485  }
   1486 
   1487  static void Write(ProfileBufferEntryWriter& aEW,
   1488                    const UniquePtr<ProfileChunkedBuffer>& aBufferUPtr) {
   1489    if (!aBufferUPtr) {
   1490      // Null pointer, treat it like an empty buffer, i.e., write length of 0.
   1491      aEW.WriteULEB128<Length>(0);
   1492      return;
   1493    }
   1494    // Otherwise write the pointed-at ProfileChunkedBuffer (which could be
   1495    // out-of-session or empty.)
   1496    aEW.WriteObject(*aBufferUPtr);
   1497  }
   1498 };
   1499 
   1500 // Serialization of a raw pointer to ProfileChunkedBuffer.
   1501 // Use Deserializer<UniquePtr<ProfileChunkedBuffer>> to read it back.
   1502 template <>
   1503 struct ProfileBufferEntryWriter::Serializer<ProfileChunkedBuffer*> {
   1504  static Length Bytes(ProfileChunkedBuffer* aBufferUPtr) {
   1505    if (!aBufferUPtr) {
   1506      // Null pointer, treat it like an empty buffer, i.e., write length of 0.
   1507      return ULEB128Size<Length>(0);
   1508    }
   1509    // Otherwise write the pointed-at ProfileChunkedBuffer (which could be
   1510    // out-of-session or empty.)
   1511    return SumBytes(*aBufferUPtr);
   1512  }
   1513 
   1514  static void Write(ProfileBufferEntryWriter& aEW,
   1515                    ProfileChunkedBuffer* aBufferUPtr) {
   1516    if (!aBufferUPtr) {
   1517      // Null pointer, treat it like an empty buffer, i.e., write length of 0.
   1518      aEW.WriteULEB128<Length>(0);
   1519      return;
   1520    }
   1521    // Otherwise write the pointed-at ProfileChunkedBuffer (which could be
   1522    // out-of-session or empty.)
   1523    aEW.WriteObject(*aBufferUPtr);
   1524  }
   1525 };
   1526 
   1527 template <>
   1528 struct ProfileBufferEntryReader::Deserializer<UniquePtr<ProfileChunkedBuffer>> {
   1529  static void ReadInto(ProfileBufferEntryReader& aER,
   1530                       UniquePtr<ProfileChunkedBuffer>& aBuffer) {
   1531    aBuffer = Read(aER);
   1532  }
   1533 
   1534  static UniquePtr<ProfileChunkedBuffer> Read(ProfileBufferEntryReader& aER) {
   1535    UniquePtr<ProfileChunkedBuffer> bufferUPtr;
   1536    // Keep a copy of the reader before reading the length, so we can restart
   1537    // from here below.
   1538    ProfileBufferEntryReader readerBeforeLen = aER;
   1539    // Read the stored buffer length.
   1540    const auto len = aER.ReadULEB128<ProfileChunkedBuffer::Length>();
   1541    if (len == 0) {
   1542      // 0-length means an "uninteresting" buffer, just return nullptr.
   1543      return bufferUPtr;
   1544    }
   1545    // We have a non-empty buffer.
   1546    // allocate an empty ProfileChunkedBuffer without mutex.
   1547    bufferUPtr = MakeUnique<ProfileChunkedBuffer>(
   1548        ProfileChunkedBuffer::ThreadSafety::WithoutMutex);
   1549    // Rewind the reader before the length and deserialize the contents, using
   1550    // the non-UniquePtr Deserializer.
   1551    aER = readerBeforeLen;
   1552    aER.ReadIntoObject(*bufferUPtr);
   1553    return bufferUPtr;
   1554  }
   1555 };
   1556 
   1557 }  // namespace mozilla
   1558 
   1559 #endif  // ProfileChunkedBuffer_h