ProfileBufferChunk.h (21350B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef ProfileBufferChunk_h 8 #define ProfileBufferChunk_h 9 10 #include "mozilla/MemoryReporting.h" 11 #include "mozilla/ProfileBufferIndex.h" 12 #include "mozilla/Span.h" 13 #include "mozilla/TimeStamp.h" 14 #include "mozilla/UniquePtr.h" 15 16 #if defined(MOZ_MEMORY) 17 # include "mozmemory.h" 18 #endif 19 20 #include <algorithm> 21 #include <limits> 22 #include <type_traits> 23 24 #ifdef DEBUG 25 # include <cstdio> 26 #endif 27 28 namespace mozilla { 29 30 // Represents a single chunk of memory, with a link to the next chunk (or null). 31 // 32 // A chunk is made of an internal header (which contains a public part) followed 33 // by user-accessible bytes. 34 // 35 // +---------------+---------+----------------------------------------------+ 36 // | public Header | private | memory containing user blocks | 37 // +---------------+---------+----------------------------------------------+ 38 // <---------------BufferBytes()------------------> 39 // <------------------------------ChunkBytes()------------------------------> 40 // 41 // The chunk can reserve "blocks", but doesn't know the internal contents of 42 // each block, it only knows where the first one starts, and where the last one 43 // ends (which is where the next one will begin, if not already out of range). 44 // It is up to the user to add structure to each block so that they can be 45 // distinguished when later read. 46 // 47 // +---------------+---------+----------------------------------------------+ 48 // | public Header | private | [1st block]...[last full block] | 49 // +---------------+---------+----------------------------------------------+ 50 // ChunkHeader().mOffsetFirstBlock ^ ^ 51 // ChunkHeader().mOffsetPastLastBlock --' 52 // 53 // It is possible to attempt to reserve more than the remaining space, in which 54 // case only what is available is returned. The caller is responsible for using 55 // another chunk, reserving a block "tail" in it, and using both parts to 56 // constitute a full block. (This initial tail may be empty in some chunks.) 57 // 58 // +---------------+---------+----------------------------------------------+ 59 // | public Header | private | tail][1st block]...[last full block][head... | 60 // +---------------+---------+----------------------------------------------+ 61 // ChunkHeader().mOffsetFirstBlock ^ ^ 62 // ChunkHeader().mOffsetPastLastBlock --' 63 // 64 // Each Chunk has an internal state (checked in DEBUG builds) that directs how 65 // to use it during creation, initialization, use, end of life, recycling, and 66 // destruction. See `State` below for details. 67 // In particular: 68 // - `ReserveInitialBlockAsTail()` must be called before the first `Reserve()` 69 // after construction or recycling, even with a size of 0 (no actual tail), 70 // - `MarkDone()` and `MarkRecycled()` must be called as appropriate. 71 class ProfileBufferChunk { 72 public: 73 using Byte = uint8_t; 74 using Length = uint32_t; 75 76 using SpanOfBytes = Span<Byte>; 77 78 // Hint about the size of the metadata (public and private headers). 79 // `Create()` below takes the minimum *buffer* size, so the minimum total 80 // Chunk size is at least `SizeofChunkMetadata() + aMinBufferBytes`. 81 [[nodiscard]] static constexpr Length SizeofChunkMetadata() { 82 return static_cast<Length>(sizeof(InternalHeader)); 83 } 84 85 // Allocate space for a chunk with a given minimum size, and construct it. 86 // The actual size may be higher, to match the actual space taken in the 87 // memory pool. 88 [[nodiscard]] static UniquePtr<ProfileBufferChunk> Create( 89 Length aMinBufferBytes) { 90 // We need at least one byte, to cover the always-present `mBuffer` byte. 91 aMinBufferBytes = std::max(aMinBufferBytes, Length(1)); 92 // Trivial struct with the same alignment as `ProfileBufferChunk`, and size 93 // equal to that alignment, because typically the sizeof of an object is 94 // a multiple of its alignment. 95 struct alignas(alignof(InternalHeader)) ChunkStruct { 96 Byte c[alignof(InternalHeader)]; 97 }; 98 static_assert(std::is_trivial_v<ChunkStruct>, 99 "ChunkStruct must be trivial to avoid any construction"); 100 // Allocate an array of that struct, enough to contain the expected 101 // `ProfileBufferChunk` (with its header+buffer). 102 size_t count = (sizeof(InternalHeader) + aMinBufferBytes + 103 (alignof(InternalHeader) - 1)) / 104 alignof(InternalHeader); 105 #if defined(MOZ_MEMORY) 106 // Potentially expand the array to use more of the effective allocation. 107 count = (malloc_good_size(count * sizeof(ChunkStruct)) + 108 (sizeof(ChunkStruct) - 1)) / 109 sizeof(ChunkStruct); 110 #endif 111 auto chunkStorage = MakeUnique<ChunkStruct[]>(count); 112 MOZ_ASSERT(reinterpret_cast<uintptr_t>(chunkStorage.get()) % 113 alignof(InternalHeader) == 114 0); 115 // After the allocation, compute the actual chunk size (including header). 116 const size_t chunkBytes = count * sizeof(ChunkStruct); 117 MOZ_ASSERT(chunkBytes >= sizeof(ProfileBufferChunk), 118 "Not enough space to construct a ProfileBufferChunk"); 119 MOZ_ASSERT(chunkBytes <= 120 static_cast<size_t>(std::numeric_limits<Length>::max())); 121 // Compute the size of the user-accessible buffer inside the chunk. 122 const Length bufferBytes = 123 static_cast<Length>(chunkBytes - sizeof(InternalHeader)); 124 MOZ_ASSERT(bufferBytes >= aMinBufferBytes, 125 "Not enough space for minimum buffer size"); 126 // Construct the header at the beginning of the allocated array, with the 127 // known buffer size. 128 new (chunkStorage.get()) ProfileBufferChunk(bufferBytes); 129 // We now have a proper `ProfileBufferChunk` object, create the appropriate 130 // UniquePtr for it. 131 UniquePtr<ProfileBufferChunk> chunk{ 132 reinterpret_cast<ProfileBufferChunk*>(chunkStorage.release())}; 133 MOZ_ASSERT( 134 size_t(reinterpret_cast<const char*>( 135 &chunk.get()->BufferSpan()[bufferBytes - 1]) - 136 reinterpret_cast<const char*>(chunk.get())) == chunkBytes - 1, 137 "Buffer span spills out of chunk allocation"); 138 return chunk; 139 } 140 141 #ifdef DEBUG 142 ~ProfileBufferChunk() { 143 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse); 144 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full); 145 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created || 146 mInternalHeader.mState == InternalHeader::State::Done || 147 mInternalHeader.mState == InternalHeader::State::Recycled); 148 } 149 #endif 150 151 // Must be called with the first block tail (may be empty), which will be 152 // skipped if the reader starts with this ProfileBufferChunk. 153 [[nodiscard]] SpanOfBytes ReserveInitialBlockAsTail(Length aTailSize) { 154 #ifdef DEBUG 155 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse); 156 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full); 157 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done); 158 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created || 159 mInternalHeader.mState == InternalHeader::State::Recycled); 160 mInternalHeader.mState = InternalHeader::State::InUse; 161 #endif 162 mInternalHeader.mHeader.mOffsetFirstBlock = aTailSize; 163 mInternalHeader.mHeader.mOffsetPastLastBlock = aTailSize; 164 mInternalHeader.mHeader.mStartTimeStamp = TimeStamp::Now(); 165 return SpanOfBytes(&mBuffer, aTailSize); 166 } 167 168 struct ReserveReturn { 169 SpanOfBytes mSpan; 170 ProfileBufferBlockIndex mBlockRangeIndex; 171 }; 172 173 // Reserve a block of up to `aBlockSize` bytes, and return a Span to it, and 174 // its starting index. The actual size may be smaller, if the block cannot fit 175 // in the remaining space. 176 [[nodiscard]] ReserveReturn ReserveBlock(Length aBlockSize) { 177 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Created); 178 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full); 179 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done); 180 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Recycled); 181 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse); 182 MOZ_ASSERT(RangeStart() != 0, 183 "Expected valid range start before first Reserve()"); 184 const Length blockOffset = mInternalHeader.mHeader.mOffsetPastLastBlock; 185 Length reservedSize = aBlockSize; 186 if (MOZ_UNLIKELY(aBlockSize >= RemainingBytes())) { 187 reservedSize = RemainingBytes(); 188 #ifdef DEBUG 189 mInternalHeader.mState = InternalHeader::State::Full; 190 #endif 191 } 192 mInternalHeader.mHeader.mOffsetPastLastBlock += reservedSize; 193 mInternalHeader.mHeader.mBlockCount += 1; 194 return {SpanOfBytes(&mBuffer + blockOffset, reservedSize), 195 ProfileBufferBlockIndex::CreateFromProfileBufferIndex( 196 mInternalHeader.mHeader.mRangeStart + blockOffset)}; 197 } 198 199 // When a chunk will not be used to store more blocks (because it is full, or 200 // because the profiler will not add more data), it should be marked "done". 201 // Access to its content is still allowed. 202 void MarkDone() { 203 #ifdef DEBUG 204 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Created); 205 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done); 206 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Recycled); 207 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse || 208 mInternalHeader.mState == InternalHeader::State::Full); 209 mInternalHeader.mState = InternalHeader::State::Done; 210 #endif 211 mInternalHeader.mHeader.mDoneTimeStamp = TimeStamp::Now(); 212 } 213 214 // A "Done" chunk may be recycled, to avoid allocating a new one. 215 void MarkRecycled() { 216 #ifdef DEBUG 217 // We also allow Created and already-Recycled chunks to be recycled, this 218 // way it's easier to recycle chunks when their state is not easily 219 // trackable. 220 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse); 221 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full); 222 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created || 223 mInternalHeader.mState == InternalHeader::State::Done || 224 mInternalHeader.mState == InternalHeader::State::Recycled); 225 mInternalHeader.mState = InternalHeader::State::Recycled; 226 #endif 227 // Reset all header fields, in case this recycled chunk gets read. 228 mInternalHeader.mHeader.Reset(); 229 } 230 231 // Public header, meant to uniquely identify a chunk, it may be shared with 232 // other processes to coordinate global memory handling. 233 struct Header { 234 explicit Header(Length aBufferBytes) : mBufferBytes(aBufferBytes) {} 235 236 // Reset all members to their as-new values (apart from the buffer size, 237 // which cannot change), ready for re-use. 238 void Reset() { 239 mOffsetFirstBlock = 0; 240 mOffsetPastLastBlock = 0; 241 mStartTimeStamp = TimeStamp{}; 242 mDoneTimeStamp = TimeStamp{}; 243 mBlockCount = 0; 244 mRangeStart = 0; 245 mProcessId = 0; 246 } 247 248 // Note: Part of the ordering of members below is to avoid unnecessary 249 // padding. 250 251 // Members managed by the ProfileBufferChunk. 252 253 // Offset of the first block (past the initial tail block, which may be 0). 254 Length mOffsetFirstBlock = 0; 255 // Offset past the last byte of the last reserved block 256 // It may be past mBufferBytes when last block continues in the next 257 // ProfileBufferChunk. It may be before mBufferBytes if ProfileBufferChunk 258 // is marked "Done" before the end is reached. 259 Length mOffsetPastLastBlock = 0; 260 // Timestamp when the buffer becomes in-use, ready to record data. 261 TimeStamp mStartTimeStamp; 262 // Timestamp when the buffer is "Done" (which happens when the last block is 263 // written). This will be used to find and discard the oldest 264 // ProfileBufferChunk. 265 TimeStamp mDoneTimeStamp; 266 // Number of bytes in the buffer, set once at construction time. 267 const Length mBufferBytes; 268 // Number of reserved blocks (including final one even if partial, but 269 // excluding initial tail). 270 Length mBlockCount = 0; 271 272 // Meta-data set by the user. 273 274 // Index of the first byte of this ProfileBufferChunk, relative to all 275 // Chunks for this process. Index 0 is reserved as nullptr-like index, 276 // mRangeStart should be set to a non-0 value before the first `Reserve()`. 277 ProfileBufferIndex mRangeStart = 0; 278 // Process writing to this ProfileBufferChunk. 279 int mProcessId = 0; 280 281 // A bit of spare space (necessary here because of the alignment due to 282 // other members), may be later repurposed for extra data. 283 const int mPADDING = 0; 284 }; 285 286 [[nodiscard]] const Header& ChunkHeader() const { 287 return mInternalHeader.mHeader; 288 } 289 290 [[nodiscard]] Length BufferBytes() const { 291 return ChunkHeader().mBufferBytes; 292 } 293 294 // Total size of the chunk (buffer + header). 295 [[nodiscard]] Length ChunkBytes() const { 296 return static_cast<Length>(sizeof(InternalHeader)) + BufferBytes(); 297 } 298 299 // Size of external resources, in this case all the following chunks. 300 [[nodiscard]] size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { 301 const ProfileBufferChunk* const next = GetNext(); 302 return next ? next->SizeOfIncludingThis(aMallocSizeOf) : 0; 303 } 304 305 // Size of this chunk and all following ones. 306 [[nodiscard]] size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { 307 // Just in case `aMallocSizeOf` falls back on just `sizeof`, make sure we 308 // account for at least the actual Chunk requested allocation size. 309 return std::max<size_t>(aMallocSizeOf(this), ChunkBytes()) + 310 SizeOfExcludingThis(aMallocSizeOf); 311 } 312 313 [[nodiscard]] Length RemainingBytes() const { 314 return BufferBytes() - OffsetPastLastBlock(); 315 } 316 317 [[nodiscard]] Length OffsetFirstBlock() const { 318 return ChunkHeader().mOffsetFirstBlock; 319 } 320 321 [[nodiscard]] Length OffsetPastLastBlock() const { 322 return ChunkHeader().mOffsetPastLastBlock; 323 } 324 325 [[nodiscard]] Length BlockCount() const { return ChunkHeader().mBlockCount; } 326 327 [[nodiscard]] int ProcessId() const { return ChunkHeader().mProcessId; } 328 329 void SetProcessId(int aProcessId) { 330 mInternalHeader.mHeader.mProcessId = aProcessId; 331 } 332 333 // Global range index at the start of this Chunk. 334 [[nodiscard]] ProfileBufferIndex RangeStart() const { 335 return ChunkHeader().mRangeStart; 336 } 337 338 void SetRangeStart(ProfileBufferIndex aRangeStart) { 339 mInternalHeader.mHeader.mRangeStart = aRangeStart; 340 } 341 342 // Get a read-only Span to the buffer. It is up to the caller to decypher the 343 // contents, based on known offsets and the internal block structure. 344 [[nodiscard]] Span<const Byte> BufferSpan() const { 345 return Span<const Byte>(&mBuffer, BufferBytes()); 346 } 347 348 [[nodiscard]] Byte ByteAt(Length aOffset) const { 349 MOZ_ASSERT(aOffset < OffsetPastLastBlock()); 350 return *(&mBuffer + aOffset); 351 } 352 353 [[nodiscard]] ProfileBufferChunk* GetNext() { 354 return mInternalHeader.mNext.get(); 355 } 356 [[nodiscard]] const ProfileBufferChunk* GetNext() const { 357 return mInternalHeader.mNext.get(); 358 } 359 360 [[nodiscard]] UniquePtr<ProfileBufferChunk> ReleaseNext() { 361 return std::move(mInternalHeader.mNext); 362 } 363 364 void InsertNext(UniquePtr<ProfileBufferChunk>&& aChunk) { 365 if (!aChunk) { 366 return; 367 } 368 aChunk->SetLast(ReleaseNext()); 369 mInternalHeader.mNext = std::move(aChunk); 370 } 371 372 // Find the last chunk in this chain (it may be `this`). 373 [[nodiscard]] ProfileBufferChunk* Last() { 374 ProfileBufferChunk* chunk = this; 375 for (;;) { 376 ProfileBufferChunk* next = chunk->GetNext(); 377 if (!next) { 378 return chunk; 379 } 380 chunk = next; 381 } 382 } 383 [[nodiscard]] const ProfileBufferChunk* Last() const { 384 const ProfileBufferChunk* chunk = this; 385 for (;;) { 386 const ProfileBufferChunk* next = chunk->GetNext(); 387 if (!next) { 388 return chunk; 389 } 390 chunk = next; 391 } 392 } 393 394 void SetLast(UniquePtr<ProfileBufferChunk>&& aChunk) { 395 if (!aChunk) { 396 return; 397 } 398 Last()->mInternalHeader.mNext = std::move(aChunk); 399 } 400 401 // Join two possibly-null chunk lists. 402 [[nodiscard]] static UniquePtr<ProfileBufferChunk> Join( 403 UniquePtr<ProfileBufferChunk>&& aFirst, 404 UniquePtr<ProfileBufferChunk>&& aLast) { 405 if (aFirst) { 406 aFirst->SetLast(std::move(aLast)); 407 return std::move(aFirst); 408 } 409 return std::move(aLast); 410 } 411 412 #ifdef DEBUG 413 void Dump(std::FILE* aFile = stdout) const { 414 fprintf(aFile, 415 "Chunk[%p] chunkSize=%u bufferSize=%u state=%s rangeStart=%u " 416 "firstBlockOffset=%u offsetPastLastBlock=%u blockCount=%u", 417 this, unsigned(ChunkBytes()), unsigned(BufferBytes()), 418 mInternalHeader.StateString(), unsigned(RangeStart()), 419 unsigned(OffsetFirstBlock()), unsigned(OffsetPastLastBlock()), 420 unsigned(BlockCount())); 421 const auto len = OffsetPastLastBlock(); 422 constexpr unsigned columns = 16; 423 unsigned char ascii[columns + 1]; 424 ascii[columns] = '\0'; 425 for (Length i = 0; i < len; ++i) { 426 if (i % columns == 0) { 427 fprintf(aFile, "\n %4u=0x%03x:", unsigned(i), unsigned(i)); 428 for (unsigned a = 0; a < columns; ++a) { 429 ascii[a] = ' '; 430 } 431 } 432 unsigned char sep = ' '; 433 if (i == OffsetFirstBlock()) { 434 if (i == OffsetPastLastBlock()) { 435 sep = '#'; 436 } else { 437 sep = '['; 438 } 439 } else if (i == OffsetPastLastBlock()) { 440 sep = ']'; 441 } 442 unsigned char c = *(&mBuffer + i); 443 fprintf(aFile, "%c%02x", sep, c); 444 445 if (i == len - 1) { 446 if (i + 1 == OffsetPastLastBlock()) { 447 // Special case when last block ends right at the end. 448 fprintf(aFile, "]"); 449 } else { 450 fprintf(aFile, " "); 451 } 452 } else if (i % columns == columns - 1) { 453 fprintf(aFile, " "); 454 } 455 456 ascii[i % columns] = (c >= ' ' && c <= '~') ? c : '.'; 457 458 if (i % columns == columns - 1) { 459 fprintf(aFile, " %s", ascii); 460 } 461 } 462 463 if (len % columns < columns - 1) { 464 for (Length i = len % columns; i < columns; ++i) { 465 fprintf(aFile, " "); 466 } 467 fprintf(aFile, " %s", ascii); 468 } 469 470 fprintf(aFile, "\n"); 471 } 472 #endif // DEBUG 473 474 private: 475 // ProfileBufferChunk constructor. Use static `Create()` to allocate and 476 // construct a ProfileBufferChunk. 477 explicit ProfileBufferChunk(Length aBufferBytes) 478 : mInternalHeader(aBufferBytes) {} 479 480 // This internal header starts with the public `Header`, and adds some data 481 // only necessary for local handling. 482 // This encapsulation is also necessary to perform placement-new in 483 // `Create()`. 484 struct InternalHeader { 485 explicit InternalHeader(Length aBufferBytes) : mHeader(aBufferBytes) {} 486 487 Header mHeader; 488 UniquePtr<ProfileBufferChunk> mNext; 489 490 #ifdef DEBUG 491 enum class State { 492 Created, // Self-set. Just constructed, waiting for initial block tail. 493 InUse, // Ready to accept blocks. 494 Full, // Self-set. Blocks reach the end (or further). 495 Done, // Blocks won't be added anymore. 496 Recycled // Still full of data, but expecting an initial block tail. 497 }; 498 499 State mState = State::Created; 500 // Transition table: (X=unexpected) 501 // Method \ State Created InUse Full Done Recycled 502 // ReserveInitialBlockAsTail InUse X X X InUse 503 // Reserve X InUse/Full X X X 504 // MarkDone X Done Done X X 505 // MarkRecycled X X X Recycled X 506 // destructor ok X X ok ok 507 508 const char* StateString() const { 509 switch (mState) { 510 case State::Created: 511 return "Created"; 512 case State::InUse: 513 return "InUse"; 514 case State::Full: 515 return "Full"; 516 case State::Done: 517 return "Done"; 518 case State::Recycled: 519 return "Recycled"; 520 default: 521 return "?"; 522 } 523 } 524 #else // DEBUG 525 const char* StateString() const { return "(non-DEBUG)"; } 526 #endif 527 }; 528 529 InternalHeader mInternalHeader; 530 531 // KEEP THIS LAST! 532 // First byte of the buffer. Note that ProfileBufferChunk::Create allocates a 533 // bigger block, such that `mBuffer` is the first of `mBufferBytes` available 534 // bytes. 535 // The initialization is not strictly needed, because bytes should only be 536 // read after they have been written and `mOffsetPastLastBlock` has been 537 // updated. However: 538 // - Reviewbot complains that it's not initialized. 539 // - It's cheap to initialize one byte. 540 // - In the worst case (reading does happen), zero is not a valid entry size 541 // and should get caught in entry readers. 542 Byte mBuffer = '\0'; 543 }; 544 545 } // namespace mozilla 546 547 #endif // ProfileBufferChunk_h