ProfileBufferChunkManagerWithLocalLimit.h (17636B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef ProfileBufferChunkManagerWithLocalLimit_h 8 #define ProfileBufferChunkManagerWithLocalLimit_h 9 10 #include "mozilla/BaseProfiler.h" 11 #include "mozilla/BaseProfilerDetail.h" 12 #include "mozilla/ProfileBufferChunkManager.h" 13 #include "mozilla/ProfileBufferControlledChunkManager.h" 14 #include "mozilla/mozalloc.h" 15 16 #include <utility> 17 18 namespace mozilla { 19 20 // Manages the Chunks for this process in a thread-safe manner, with a maximum 21 // size per process. 22 // 23 // "Unreleased" chunks are not owned here, only "released" chunks can be 24 // destroyed or recycled when reaching the memory limit, so it is theoretically 25 // possible to break that limit, if: 26 // - The user of this class doesn't release their chunks, AND/OR 27 // - The limit is too small (e.g., smaller than 2 or 3 chunks, which should be 28 // the usual number of unreleased chunks in flight). 29 // In this case, it just means that we will use more memory than allowed, 30 // potentially risking OOMs. Hopefully this shouldn't happen in real code, 31 // assuming that the user is doing the right thing and releasing chunks ASAP, 32 // and that the memory limit is reasonably large. 33 class ProfileBufferChunkManagerWithLocalLimit final 34 : public ProfileBufferChunkManager, 35 public ProfileBufferControlledChunkManager { 36 public: 37 using Length = ProfileBufferChunk::Length; 38 39 // MaxTotalBytes: Maximum number of bytes allocated in all local Chunks. 40 // ChunkMinBufferBytes: Minimum number of user-available bytes in each Chunk. 41 // Note that Chunks use a bit more memory for their header. 42 explicit ProfileBufferChunkManagerWithLocalLimit(size_t aMaxTotalBytes, 43 Length aChunkMinBufferBytes) 44 : mMaxTotalBytes(aMaxTotalBytes), 45 mChunkMinBufferBytes(aChunkMinBufferBytes) {} 46 47 ~ProfileBufferChunkManagerWithLocalLimit() { 48 if (mUpdateCallback) { 49 // Signal the end of this callback. 50 std::move(mUpdateCallback)(Update(nullptr)); 51 } 52 } 53 54 [[nodiscard]] size_t MaxTotalSize() const final { 55 // `mMaxTotalBytes` is `const` so there is no need to lock the mutex. 56 return mMaxTotalBytes; 57 } 58 59 [[nodiscard]] size_t TotalSize() const { return mTotalBytes; } 60 61 [[nodiscard]] UniquePtr<ProfileBufferChunk> GetChunk() final { 62 AUTO_PROFILER_STATS(Local_GetChunk); 63 64 ChunkAndUpdate chunkAndUpdate = [&]() { 65 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 66 return GetChunk(lock); 67 }(); 68 69 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 70 if (mUpdateCallback && !chunkAndUpdate.second.IsNotUpdate()) { 71 mUpdateCallback(std::move(chunkAndUpdate.second)); 72 } 73 74 return std::move(chunkAndUpdate.first); 75 } 76 77 void RequestChunk(std::function<void(UniquePtr<ProfileBufferChunk>)>&& 78 aChunkReceiver) final { 79 AUTO_PROFILER_STATS(Local_RequestChunk); 80 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 81 if (mChunkReceiver) { 82 // We already have a chunk receiver, meaning a request is pending. 83 return; 84 } 85 // Store the chunk receiver. This indicates that a request is pending, and 86 // it will be handled in the next `FulfillChunkRequests()` call. 87 mChunkReceiver = std::move(aChunkReceiver); 88 } 89 90 void FulfillChunkRequests() final { 91 AUTO_PROFILER_STATS(Local_FulfillChunkRequests); 92 std::function<void(UniquePtr<ProfileBufferChunk>)> chunkReceiver; 93 ChunkAndUpdate chunkAndUpdate = [&]() -> ChunkAndUpdate { 94 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 95 if (!mChunkReceiver) { 96 // No receiver means no pending request, we're done. 97 return {}; 98 } 99 // Otherwise there is a request, extract the receiver to call below. 100 std::swap(chunkReceiver, mChunkReceiver); 101 MOZ_ASSERT(!mChunkReceiver, "mChunkReceiver should have been emptied"); 102 // And allocate the requested chunk. This may fail, it's fine, we're 103 // letting the receiver know about it. 104 AUTO_PROFILER_STATS(Local_FulfillChunkRequests_GetChunk); 105 return GetChunk(lock); 106 }(); 107 108 if (chunkReceiver) { 109 { 110 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 111 if (mUpdateCallback && !chunkAndUpdate.second.IsNotUpdate()) { 112 mUpdateCallback(std::move(chunkAndUpdate.second)); 113 } 114 } 115 116 // Invoke callback outside of lock, so that it can use other chunk manager 117 // functions if needed. 118 // Note that this means there could be a race, where another request 119 // happens now and even gets fulfilled before this one is! It should be 120 // rare, and shouldn't be a problem anyway, the user will still get their 121 // requested chunks, new/recycled chunks look the same so their order 122 // doesn't matter. 123 std::move(chunkReceiver)(std::move(chunkAndUpdate.first)); 124 } 125 } 126 127 void ReleaseChunk(UniquePtr<ProfileBufferChunk> aChunk) final { 128 if (!aChunk) { 129 return; 130 } 131 132 MOZ_RELEASE_ASSERT(!aChunk->GetNext(), "ReleaseChunk only accepts 1 chunk"); 133 MOZ_RELEASE_ASSERT(!aChunk->ChunkHeader().mDoneTimeStamp.IsNull(), 134 "Released chunk should have a 'Done' timestamp"); 135 136 Update update = [&]() { 137 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 138 MOZ_ASSERT(mUser, "Not registered yet"); 139 // Keep a pointer to the first newly-released chunk, so we can use it to 140 // prepare an update (after `aChunk` is moved-from). 141 const ProfileBufferChunk* const newlyReleasedChunk = aChunk.get(); 142 // Transfer the chunk size from the unreleased bucket to the released one. 143 mUnreleasedBufferBytes -= aChunk->BufferBytes(); 144 mReleasedBufferBytes += aChunk->BufferBytes(); 145 if (!mReleasedChunks) { 146 // No other released chunks at the moment, we're starting the list. 147 MOZ_ASSERT(mReleasedBufferBytes == aChunk->BufferBytes()); 148 mReleasedChunks = std::move(aChunk); 149 } else { 150 // Insert aChunk in mReleasedChunks to keep done-timestamp order. 151 const TimeStamp& releasedChunkDoneTimeStamp = 152 aChunk->ChunkHeader().mDoneTimeStamp; 153 if (releasedChunkDoneTimeStamp < 154 mReleasedChunks->ChunkHeader().mDoneTimeStamp) { 155 // aChunk is the oldest -> Insert at the beginning. 156 aChunk->SetLast(std::move(mReleasedChunks)); 157 mReleasedChunks = std::move(aChunk); 158 } else { 159 // Go through the already-released chunk list, and insert aChunk 160 // before the first younger released chunk, or at the end. 161 ProfileBufferChunk* chunk = mReleasedChunks.get(); 162 for (;;) { 163 ProfileBufferChunk* const nextChunk = chunk->GetNext(); 164 if (!nextChunk || releasedChunkDoneTimeStamp < 165 nextChunk->ChunkHeader().mDoneTimeStamp) { 166 // Either we're at the last released chunk, or the next released 167 // chunk is younger -> Insert right after this released chunk. 168 chunk->InsertNext(std::move(aChunk)); 169 break; 170 } 171 chunk = nextChunk; 172 } 173 } 174 } 175 176 return Update(mUnreleasedBufferBytes, mReleasedBufferBytes, 177 mReleasedChunks.get(), newlyReleasedChunk); 178 }(); 179 180 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 181 if (mUpdateCallback && !update.IsNotUpdate()) { 182 mUpdateCallback(std::move(update)); 183 } 184 } 185 186 void SetChunkDestroyedCallback( 187 std::function<void(const ProfileBufferChunk&)>&& aChunkDestroyedCallback) 188 final { 189 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 190 MOZ_ASSERT(mUser, "Not registered yet"); 191 mChunkDestroyedCallback = std::move(aChunkDestroyedCallback); 192 } 193 194 [[nodiscard]] UniquePtr<ProfileBufferChunk> GetExtantReleasedChunks() final { 195 UniquePtr<ProfileBufferChunk> chunks; 196 size_t unreleasedBufferBytes = [&]() { 197 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 198 MOZ_ASSERT(mUser, "Not registered yet"); 199 mReleasedBufferBytes = 0; 200 chunks = std::move(mReleasedChunks); 201 return mUnreleasedBufferBytes; 202 }(); 203 204 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 205 if (mUpdateCallback) { 206 mUpdateCallback(Update(unreleasedBufferBytes, 0, nullptr, nullptr)); 207 } 208 209 return chunks; 210 } 211 212 void ForgetUnreleasedChunks() final { 213 Update update = [&]() { 214 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 215 MOZ_ASSERT(mUser, "Not registered yet"); 216 mUnreleasedBufferBytes = 0; 217 return Update(0, mReleasedBufferBytes, mReleasedChunks.get(), nullptr); 218 }(); 219 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 220 if (mUpdateCallback) { 221 mUpdateCallback(std::move(update)); 222 } 223 } 224 225 [[nodiscard]] size_t SizeOfExcludingThis( 226 MallocSizeOf aMallocSizeOf) const final { 227 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 228 return SizeOfExcludingThis(aMallocSizeOf, lock); 229 } 230 231 [[nodiscard]] size_t SizeOfIncludingThis( 232 MallocSizeOf aMallocSizeOf) const final { 233 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 234 MOZ_ASSERT(mUser, "Not registered yet"); 235 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf, lock); 236 } 237 238 void SetUpdateCallback(UpdateCallback&& aUpdateCallback) final { 239 { 240 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 241 if (mUpdateCallback) { 242 // Signal the end of the previous callback. 243 std::move(mUpdateCallback)(Update(nullptr)); 244 mUpdateCallback = nullptr; 245 } 246 } 247 248 if (aUpdateCallback) { 249 Update initialUpdate = [&]() { 250 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 251 return Update(mUnreleasedBufferBytes, mReleasedBufferBytes, 252 mReleasedChunks.get(), nullptr); 253 }(); 254 255 baseprofiler::detail::BaseProfilerAutoLock lock(mUpdateCallbackMutex); 256 MOZ_ASSERT(!mUpdateCallback, "Only one update callback allowed"); 257 mUpdateCallback = std::move(aUpdateCallback); 258 mUpdateCallback(std::move(initialUpdate)); 259 } 260 } 261 262 void DestroyChunksAtOrBefore(TimeStamp aDoneTimeStamp) final { 263 MOZ_ASSERT(!aDoneTimeStamp.IsNull()); 264 baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); 265 for (;;) { 266 if (!mReleasedChunks) { 267 // We don't own any released chunks (anymore), we're done. 268 break; 269 } 270 if (mReleasedChunks->ChunkHeader().mDoneTimeStamp > aDoneTimeStamp) { 271 // The current chunk is strictly after the given timestamp, we're done. 272 break; 273 } 274 // We've found a chunk at or before the timestamp, discard it. 275 DiscardOldestReleasedChunk(lock); 276 } 277 } 278 279 protected: 280 const ProfileBufferChunk* PeekExtantReleasedChunksAndLock() final 281 MOZ_CAPABILITY_ACQUIRE(mMutex) { 282 mMutex.Lock(); 283 MOZ_ASSERT(mUser, "Not registered yet"); 284 return mReleasedChunks.get(); 285 } 286 void UnlockAfterPeekExtantReleasedChunks() final 287 MOZ_CAPABILITY_RELEASE(mMutex) { 288 mMutex.Unlock(); 289 } 290 291 private: 292 size_t MaybeRecycleChunkAndGetDeallocatedSize( 293 UniquePtr<ProfileBufferChunk>&& chunk, 294 const baseprofiler::detail::BaseProfilerAutoLock& aLock) { 295 // Try to recycle big-enough chunks. (All chunks should have the same size, 296 // but it's a cheap test and may allow future adjustments based on actual 297 // data rate.) 298 if (chunk->BufferBytes() >= mChunkMinBufferBytes) { 299 // We keep up to two recycled chunks at any time. 300 if (!mRecycledChunks) { 301 mRecycledChunks = std::move(chunk); 302 return 0; 303 } else if (!mRecycledChunks->GetNext()) { 304 mRecycledChunks->InsertNext(std::move(chunk)); 305 return 0; 306 } 307 } 308 return moz_malloc_usable_size(chunk.get()); 309 } 310 311 UniquePtr<ProfileBufferChunk> TakeRecycledChunk( 312 const baseprofiler::detail::BaseProfilerAutoLock& aLock) { 313 UniquePtr<ProfileBufferChunk> recycled; 314 if (mRecycledChunks) { 315 recycled = std::exchange(mRecycledChunks, mRecycledChunks->ReleaseNext()); 316 recycled->MarkRecycled(); 317 } 318 return recycled; 319 } 320 321 void DiscardOldestReleasedChunk( 322 const baseprofiler::detail::BaseProfilerAutoLock& aLock) { 323 MOZ_ASSERT(!!mReleasedChunks); 324 UniquePtr<ProfileBufferChunk> oldest = 325 std::exchange(mReleasedChunks, mReleasedChunks->ReleaseNext()); 326 mReleasedBufferBytes -= oldest->BufferBytes(); 327 if (mChunkDestroyedCallback) { 328 // Inform the user that we're going to destroy this chunk. 329 mChunkDestroyedCallback(*oldest); 330 } 331 332 mTotalBytes -= 333 MaybeRecycleChunkAndGetDeallocatedSize(std::move(oldest), aLock); 334 } 335 336 using ChunkAndUpdate = std::pair<UniquePtr<ProfileBufferChunk>, Update>; 337 [[nodiscard]] ChunkAndUpdate GetChunk( 338 const baseprofiler::detail::BaseProfilerAutoLock& aLock) { 339 MOZ_ASSERT(mUser, "Not registered yet"); 340 // After this function, the total memory consumption will be the sum of: 341 // - Bytes from released (i.e., full) chunks, 342 // - Bytes from unreleased (still in use) chunks, 343 // - Bytes from the chunk we want to create/recycle. (Note that we don't 344 // count the extra bytes of chunk header, and of extra allocation ability, 345 // for the new chunk, as it's assumed to be negligible compared to the 346 // total memory limit.) 347 // If this total is higher than the local limit, we'll want to destroy 348 // the oldest released chunks until we're under the limit; if any, we may 349 // recycle one of them to avoid a deallocation followed by an allocation. 350 while (mReleasedBufferBytes + mUnreleasedBufferBytes + 351 mChunkMinBufferBytes >= 352 mMaxTotalBytes && 353 !!mReleasedChunks) { 354 // We have reached the local limit, discard the oldest released chunk. 355 DiscardOldestReleasedChunk(aLock); 356 } 357 358 // Extract the recycled chunk, if any. 359 ChunkAndUpdate chunkAndUpdate{TakeRecycledChunk(aLock), Update()}; 360 UniquePtr<ProfileBufferChunk>& chunk = chunkAndUpdate.first; 361 362 if (!chunk) { 363 // No recycled chunk -> Create a chunk now. (This could still fail.) 364 chunk = ProfileBufferChunk::Create(mChunkMinBufferBytes); 365 mTotalBytes += moz_malloc_usable_size(chunk.get()); 366 } 367 368 if (chunk) { 369 // We do have a chunk (recycled or new), record its size as "unreleased". 370 mUnreleasedBufferBytes += chunk->BufferBytes(); 371 372 chunkAndUpdate.second = 373 Update(mUnreleasedBufferBytes, mReleasedBufferBytes, 374 mReleasedChunks.get(), nullptr); 375 } 376 377 return chunkAndUpdate; 378 } 379 380 [[nodiscard]] size_t SizeOfExcludingThis( 381 MallocSizeOf aMallocSizeOf, 382 const baseprofiler::detail::BaseProfilerAutoLock&) const { 383 MOZ_ASSERT(mUser, "Not registered yet"); 384 size_t size = 0; 385 if (mReleasedChunks) { 386 size += mReleasedChunks->SizeOfIncludingThis(aMallocSizeOf); 387 } 388 if (mRecycledChunks) { 389 size += mRecycledChunks->SizeOfIncludingThis(aMallocSizeOf); 390 } 391 // Note: Missing size of std::function external resources (if any). 392 return size; 393 } 394 395 // Maxumum number of bytes that should be used by all unreleased and released 396 // chunks. Note that only released chunks can be destroyed here, so it is the 397 // responsibility of the user to properly release their chunks when possible. 398 const size_t mMaxTotalBytes; 399 400 // Minimum number of bytes that new chunks should be able to store. 401 // Used when calling `ProfileBufferChunk::Create()`. 402 const Length mChunkMinBufferBytes; 403 404 // Mutex guarding the following members. 405 mutable baseprofiler::detail::BaseProfilerMutex mMutex; 406 407 // Number of bytes currently held in chunks that have been given away (through 408 // `GetChunk` or `RequestChunk`) and not released yet. 409 size_t mUnreleasedBufferBytes = 0; 410 411 // Number of bytes currently held in chunks that have been released and stored 412 // in `mReleasedChunks` below. 413 size_t mReleasedBufferBytes = 0; 414 415 // Total allocated size (used to substract it from memory counters). 416 size_t mTotalBytes = 0; 417 418 // List of all released chunks. The oldest one should be at the start of the 419 // list, and may be destroyed or recycled when the memory limit is reached. 420 UniquePtr<ProfileBufferChunk> mReleasedChunks; 421 422 // This may hold chunks that were released then slated for destruction, they 423 // will be reused next time an allocation would have been needed. 424 UniquePtr<ProfileBufferChunk> mRecycledChunks; 425 426 // Optional callback used to notify the user when a chunk is about to be 427 // destroyed or recycled. (The data content is always destroyed, but the chunk 428 // container may be reused.) 429 std::function<void(const ProfileBufferChunk&)> mChunkDestroyedCallback; 430 431 // Callback set from `RequestChunk()`, until it is serviced in 432 // `FulfillChunkRequests()`. There can only be one request in flight. 433 std::function<void(UniquePtr<ProfileBufferChunk>)> mChunkReceiver; 434 435 // Separate mutex guarding mUpdateCallback, so that it may be invoked outside 436 // of the main buffer `mMutex`. 437 mutable baseprofiler::detail::BaseProfilerMutex mUpdateCallbackMutex; 438 439 UpdateCallback mUpdateCallback; 440 }; 441 442 } // namespace mozilla 443 444 #endif // ProfileBufferChunkManagerWithLocalLimit_h