ProfileBufferEntry.cpp (45467B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "ProfileBufferEntry.h" 8 9 #include <ostream> 10 #include <type_traits> 11 12 #include "mozilla/Logging.h" 13 #include "mozilla/ScopeExit.h" 14 #include "mozilla/Sprintf.h" 15 #include "mozilla/StackWalk.h" 16 17 #include "mozilla/BaseProfiler.h" 18 #include "mozilla/BaseProfilerMarkers.h" 19 #include "platform.h" 20 #include "ProfileBuffer.h" 21 #include "ProfilerBacktrace.h" 22 23 namespace mozilla { 24 namespace baseprofiler { 25 26 //////////////////////////////////////////////////////////////////////// 27 // BEGIN ProfileBufferEntry 28 29 ProfileBufferEntry::ProfileBufferEntry() 30 : mKind(Kind::INVALID), mStorage{0, 0, 0, 0, 0, 0, 0, 0} {} 31 32 // aString must be a static string. 33 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, const char* aString) 34 : mKind(aKind) { 35 memcpy(mStorage, &aString, sizeof(aString)); 36 } 37 38 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, char aChars[kNumChars]) 39 : mKind(aKind) { 40 memcpy(mStorage, aChars, kNumChars); 41 } 42 43 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, void* aPtr) : mKind(aKind) { 44 memcpy(mStorage, &aPtr, sizeof(aPtr)); 45 } 46 47 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, double aDouble) 48 : mKind(aKind) { 49 memcpy(mStorage, &aDouble, sizeof(aDouble)); 50 } 51 52 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int aInt) : mKind(aKind) { 53 memcpy(mStorage, &aInt, sizeof(aInt)); 54 } 55 56 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int64_t aInt64) 57 : mKind(aKind) { 58 memcpy(mStorage, &aInt64, sizeof(aInt64)); 59 } 60 61 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, uint64_t aUint64) 62 : mKind(aKind) { 63 memcpy(mStorage, &aUint64, sizeof(aUint64)); 64 } 65 66 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, uint32_t aUint32) 67 : mKind(aKind) { 68 memcpy(mStorage, &aUint32, sizeof(aUint32)); 69 } 70 71 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, 72 BaseProfilerThreadId aThreadId) 73 : mKind(aKind) { 74 static_assert(std::is_trivially_copyable_v<BaseProfilerThreadId>); 75 static_assert(sizeof(aThreadId) <= sizeof(mStorage)); 76 memcpy(mStorage, &aThreadId, sizeof(aThreadId)); 77 } 78 79 const char* ProfileBufferEntry::GetString() const { 80 const char* result; 81 memcpy(&result, mStorage, sizeof(result)); 82 return result; 83 } 84 85 void* ProfileBufferEntry::GetPtr() const { 86 void* result; 87 memcpy(&result, mStorage, sizeof(result)); 88 return result; 89 } 90 91 double ProfileBufferEntry::GetDouble() const { 92 double result; 93 memcpy(&result, mStorage, sizeof(result)); 94 return result; 95 } 96 97 int ProfileBufferEntry::GetInt() const { 98 int result; 99 memcpy(&result, mStorage, sizeof(result)); 100 return result; 101 } 102 103 int64_t ProfileBufferEntry::GetInt64() const { 104 int64_t result; 105 memcpy(&result, mStorage, sizeof(result)); 106 return result; 107 } 108 109 uint64_t ProfileBufferEntry::GetUint64() const { 110 uint64_t result; 111 memcpy(&result, mStorage, sizeof(result)); 112 return result; 113 } 114 115 BaseProfilerThreadId ProfileBufferEntry::GetThreadId() const { 116 BaseProfilerThreadId result; 117 static_assert(std::is_trivially_copyable_v<BaseProfilerThreadId>); 118 memcpy(&result, mStorage, sizeof(result)); 119 return result; 120 } 121 122 void ProfileBufferEntry::CopyCharsInto(char (&aOutArray)[kNumChars]) const { 123 memcpy(aOutArray, mStorage, kNumChars); 124 } 125 126 // END ProfileBufferEntry 127 //////////////////////////////////////////////////////////////////////// 128 129 // As mentioned in ProfileBufferEntry.h, the JSON format contains many 130 // arrays whose elements are laid out according to various schemas to help 131 // de-duplication. This RAII class helps write these arrays by keeping track of 132 // the last non-null element written and adding the appropriate number of null 133 // elements when writing new non-null elements. It also automatically opens and 134 // closes an array element on the given JSON writer. 135 // 136 // You grant the AutoArraySchemaWriter exclusive access to the JSONWriter and 137 // the UniqueJSONStrings objects for the lifetime of AutoArraySchemaWriter. Do 138 // not access them independently while the AutoArraySchemaWriter is alive. 139 // If you need to add complex objects, call FreeFormElement(), which will give 140 // you temporary access to the writer. 141 // 142 // Example usage: 143 // 144 // // Define the schema of elements in this type of array: [FOO, BAR, BAZ] 145 // enum Schema : uint32_t { 146 // FOO = 0, 147 // BAR = 1, 148 // BAZ = 2 149 // }; 150 // 151 // AutoArraySchemaWriter writer(someJsonWriter, someUniqueStrings); 152 // if (shouldWriteFoo) { 153 // writer.IntElement(FOO, getFoo()); 154 // } 155 // ... etc ... 156 // 157 // The elements need to be added in-order. 158 class MOZ_RAII AutoArraySchemaWriter { 159 public: 160 explicit AutoArraySchemaWriter(SpliceableJSONWriter& aWriter) 161 : mJSONWriter(aWriter), mNextFreeIndex(0) { 162 mJSONWriter.StartArrayElement(); 163 } 164 165 ~AutoArraySchemaWriter() { mJSONWriter.EndArray(); } 166 167 template <typename T> 168 void IntElement(uint32_t aIndex, T aValue) { 169 static_assert(!std::is_same_v<T, uint64_t>, 170 "Narrowing uint64 -> int64 conversion not allowed"); 171 FillUpTo(aIndex); 172 mJSONWriter.IntElement(static_cast<int64_t>(aValue)); 173 } 174 175 void DoubleElement(uint32_t aIndex, double aValue) { 176 FillUpTo(aIndex); 177 mJSONWriter.DoubleElement(aValue); 178 } 179 180 void TimeMsElement(uint32_t aIndex, double aTime_ms) { 181 FillUpTo(aIndex); 182 mJSONWriter.TimeDoubleMsElement(aTime_ms); 183 } 184 185 void BoolElement(uint32_t aIndex, bool aValue) { 186 FillUpTo(aIndex); 187 mJSONWriter.BoolElement(aValue); 188 } 189 190 protected: 191 SpliceableJSONWriter& Writer() { return mJSONWriter; } 192 193 void FillUpTo(uint32_t aIndex) { 194 MOZ_ASSERT(aIndex >= mNextFreeIndex); 195 mJSONWriter.NullElements(aIndex - mNextFreeIndex); 196 mNextFreeIndex = aIndex + 1; 197 } 198 199 private: 200 SpliceableJSONWriter& mJSONWriter; 201 uint32_t mNextFreeIndex; 202 }; 203 204 // Same as AutoArraySchemaWriter, but this can also write strings (output as 205 // indexes into the table of unique strings). 206 class MOZ_RAII AutoArraySchemaWithStringsWriter : public AutoArraySchemaWriter { 207 public: 208 AutoArraySchemaWithStringsWriter(SpliceableJSONWriter& aWriter, 209 UniqueJSONStrings& aStrings) 210 : AutoArraySchemaWriter(aWriter), mStrings(aStrings) {} 211 212 void StringElement(uint32_t aIndex, const Span<const char>& aValue) { 213 FillUpTo(aIndex); 214 mStrings.WriteElement(Writer(), aValue); 215 } 216 217 private: 218 UniqueJSONStrings& mStrings; 219 }; 220 221 UniqueStacks::StackKey UniqueStacks::BeginStack(const FrameKey& aFrame) { 222 return StackKey(GetOrAddFrameIndex(aFrame)); 223 } 224 225 UniqueStacks::StackKey UniqueStacks::AppendFrame(const StackKey& aStack, 226 const FrameKey& aFrame) { 227 return StackKey(aStack, GetOrAddStackIndex(aStack), 228 GetOrAddFrameIndex(aFrame)); 229 } 230 231 bool UniqueStacks::FrameKey::NormalFrameData::operator==( 232 const NormalFrameData& aOther) const { 233 return mLocation == aOther.mLocation && 234 mRelevantForJS == aOther.mRelevantForJS && 235 mInnerWindowID == aOther.mInnerWindowID && mLine == aOther.mLine && 236 mColumn == aOther.mColumn && mCategoryPair == aOther.mCategoryPair; 237 } 238 239 UniqueStacks::UniqueStacks() 240 : mUniqueStrings(MakeUnique<UniqueJSONStrings>( 241 FailureLatchInfallibleSource::Singleton())), 242 mFrameTableWriter(FailureLatchInfallibleSource::Singleton()), 243 mStackTableWriter(FailureLatchInfallibleSource::Singleton()) { 244 mFrameTableWriter.StartBareList(); 245 mStackTableWriter.StartBareList(); 246 } 247 248 uint32_t UniqueStacks::GetOrAddStackIndex(const StackKey& aStack) { 249 uint32_t count = mStackToIndexMap.count(); 250 auto entry = mStackToIndexMap.lookupForAdd(aStack); 251 if (entry) { 252 MOZ_ASSERT(entry->value() < count); 253 return entry->value(); 254 } 255 256 MOZ_RELEASE_ASSERT(mStackToIndexMap.add(entry, aStack, count)); 257 StreamStack(aStack); 258 return count; 259 } 260 261 uint32_t UniqueStacks::GetOrAddFrameIndex(const FrameKey& aFrame) { 262 uint32_t count = mFrameToIndexMap.count(); 263 auto entry = mFrameToIndexMap.lookupForAdd(aFrame); 264 if (entry) { 265 MOZ_ASSERT(entry->value() < count); 266 return entry->value(); 267 } 268 269 MOZ_RELEASE_ASSERT(mFrameToIndexMap.add(entry, aFrame, count)); 270 StreamNonJITFrame(aFrame); 271 return count; 272 } 273 274 void UniqueStacks::SpliceFrameTableElements(SpliceableJSONWriter& aWriter) { 275 mFrameTableWriter.EndBareList(); 276 aWriter.TakeAndSplice(mFrameTableWriter.TakeChunkedWriteFunc()); 277 } 278 279 void UniqueStacks::SpliceStackTableElements(SpliceableJSONWriter& aWriter) { 280 mStackTableWriter.EndBareList(); 281 aWriter.TakeAndSplice(mStackTableWriter.TakeChunkedWriteFunc()); 282 } 283 284 void UniqueStacks::StreamStack(const StackKey& aStack) { 285 enum Schema : uint32_t { PREFIX = 0, FRAME = 1 }; 286 287 AutoArraySchemaWriter writer(mStackTableWriter); 288 if (aStack.mPrefixStackIndex.isSome()) { 289 writer.IntElement(PREFIX, *aStack.mPrefixStackIndex); 290 } 291 writer.IntElement(FRAME, aStack.mFrameIndex); 292 } 293 294 void UniqueStacks::StreamNonJITFrame(const FrameKey& aFrame) { 295 using NormalFrameData = FrameKey::NormalFrameData; 296 297 enum Schema : uint32_t { 298 LOCATION = 0, 299 RELEVANT_FOR_JS = 1, 300 INNER_WINDOW_ID = 2, 301 IMPLEMENTATION = 3, 302 LINE = 4, 303 COLUMN = 5, 304 CATEGORY = 6, 305 SUBCATEGORY = 7 306 }; 307 308 AutoArraySchemaWithStringsWriter writer(mFrameTableWriter, *mUniqueStrings); 309 310 const NormalFrameData& data = aFrame.mData.as<NormalFrameData>(); 311 writer.StringElement(LOCATION, data.mLocation); 312 writer.BoolElement(RELEVANT_FOR_JS, data.mRelevantForJS); 313 314 // It's okay to convert uint64_t to double here because DOM always creates IDs 315 // that are convertible to double. 316 writer.DoubleElement(INNER_WINDOW_ID, data.mInnerWindowID); 317 318 if (data.mLine.isSome()) { 319 writer.IntElement(LINE, *data.mLine); 320 } 321 if (data.mColumn.isSome()) { 322 writer.IntElement(COLUMN, *data.mColumn); 323 } 324 if (data.mCategoryPair.isSome()) { 325 const ProfilingCategoryPairInfo& info = 326 GetProfilingCategoryPairInfo(*data.mCategoryPair); 327 writer.IntElement(CATEGORY, uint32_t(info.mCategory)); 328 writer.IntElement(SUBCATEGORY, info.mSubcategoryIndex); 329 } 330 } 331 332 struct ProfileSample { 333 uint32_t mStack; 334 double mTime; 335 Maybe<double> mResponsiveness; 336 }; 337 338 static void WriteSample(SpliceableJSONWriter& aWriter, 339 const ProfileSample& aSample) { 340 enum Schema : uint32_t { 341 STACK = 0, 342 TIME = 1, 343 EVENT_DELAY = 2, 344 }; 345 346 AutoArraySchemaWriter writer(aWriter); 347 348 writer.IntElement(STACK, aSample.mStack); 349 350 writer.TimeMsElement(TIME, aSample.mTime); 351 352 if (aSample.mResponsiveness.isSome()) { 353 writer.DoubleElement(EVENT_DELAY, *aSample.mResponsiveness); 354 } 355 } 356 357 class EntryGetter { 358 public: 359 explicit EntryGetter(ProfileChunkedBuffer::Reader& aReader, 360 uint64_t aInitialReadPos = 0) 361 : mBlockIt( 362 aReader.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex( 363 aInitialReadPos))), 364 mBlockItEnd(aReader.end()) { 365 if (!ReadLegacyOrEnd()) { 366 // Find and read the next non-legacy entry. 367 Next(); 368 } 369 } 370 371 bool Has() const { return mBlockIt != mBlockItEnd; } 372 373 const ProfileBufferEntry& Get() const { 374 MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Get()`"); 375 return mEntry; 376 } 377 378 void Next() { 379 MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Next()`"); 380 for (;;) { 381 ++mBlockIt; 382 if (ReadLegacyOrEnd()) { 383 // Either we're at the end, or we could read a legacy entry -> Done. 384 break; 385 } 386 // Otherwise loop around until we hit the end or a legacy entry. 387 } 388 } 389 390 ProfileBufferBlockIndex CurBlockIndex() const { 391 return mBlockIt.CurrentBlockIndex(); 392 } 393 394 uint64_t CurPos() const { 395 return CurBlockIndex().ConvertToProfileBufferIndex(); 396 } 397 398 private: 399 // Try to read the entry at the current `mBlockIt` position. 400 // * If we're at the end of the buffer, just return `true`. 401 // * If there is a "legacy" entry (containing a real `ProfileBufferEntry`), 402 // read it into `mEntry`, and return `true` as well. 403 // * Otherwise the entry contains a "modern" type that cannot be read into 404 // `mEntry`, return `false` (so `EntryGetter` can skip to another entry). 405 bool ReadLegacyOrEnd() { 406 if (!Has()) { 407 return true; 408 } 409 // Read the entry "kind", which is always at the start of all entries. 410 ProfileBufferEntryReader aER = *mBlockIt; 411 auto type = static_cast<ProfileBufferEntry::Kind>( 412 aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>()); 413 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) < 414 static_cast<ProfileBufferEntry::KindUnderlyingType>( 415 ProfileBufferEntry::Kind::MODERN_LIMIT)); 416 if (type >= ProfileBufferEntry::Kind::LEGACY_LIMIT) { 417 aER.SetRemainingBytes(0); 418 return false; 419 } 420 // Here, we have a legacy item, we need to read it from the start. 421 // Because the above `ReadObject` moved the reader, we ned to reset it to 422 // the start of the entry before reading the whole entry. 423 aER = *mBlockIt; 424 aER.ReadBytes(&mEntry, aER.RemainingBytes()); 425 return true; 426 } 427 428 ProfileBufferEntry mEntry; 429 ProfileChunkedBuffer::BlockIterator mBlockIt; 430 const ProfileChunkedBuffer::BlockIterator mBlockItEnd; 431 }; 432 433 // The following grammar shows legal sequences of profile buffer entries. 434 // The sequences beginning with a ThreadId entry are known as "samples". 435 // 436 // ( 437 // ( /* Samples */ 438 // ThreadId 439 // Time 440 // ( NativeLeafAddr 441 // | Label FrameFlags? DynamicStringFragment* LineNumber? CategoryPair? 442 // | JitReturnAddr 443 // )+ 444 // Responsiveness? 445 // ) 446 // | MarkerData 447 // | ( /* Counters */ 448 // CounterId 449 // Time 450 // ( 451 // CounterKey 452 // Count 453 // Number? 454 // )* 455 // ) 456 // | CollectionStart 457 // | CollectionEnd 458 // | Pause 459 // | Resume 460 // | ( ProfilerOverheadTime /* Sampling start timestamp */ 461 // ProfilerOverheadDuration /* Lock acquisition */ 462 // ProfilerOverheadDuration /* Expired data cleaning */ 463 // ProfilerOverheadDuration /* Counters */ 464 // ProfilerOverheadDuration /* Threads */ 465 // ) 466 // )* 467 // 468 // The most complicated part is the stack entry sequence that begins with 469 // Label. Here are some examples. 470 // 471 // - ProfilingStack frames without a dynamic string: 472 // 473 // Label("js::RunScript") 474 // CategoryPair(ProfilingCategoryPair::JS) 475 // 476 // Label("XREMain::XRE_main") 477 // LineNumber(4660) 478 // CategoryPair(ProfilingCategoryPair::OTHER) 479 // 480 // Label("ElementRestyler::ComputeStyleChangeFor") 481 // LineNumber(3003) 482 // CategoryPair(ProfilingCategoryPair::CSS) 483 // 484 // - ProfilingStack frames with a dynamic string: 485 // 486 // Label("nsObserverService::NotifyObservers") 487 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME)) 488 // DynamicStringFragment("domwindo") 489 // DynamicStringFragment("wopened") 490 // LineNumber(291) 491 // CategoryPair(ProfilingCategoryPair::OTHER) 492 // 493 // Label("") 494 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME)) 495 // DynamicStringFragment("closeWin") 496 // DynamicStringFragment("dow (chr") 497 // DynamicStringFragment("ome://gl") 498 // DynamicStringFragment("obal/con") 499 // DynamicStringFragment("tent/glo") 500 // DynamicStringFragment("balOverl") 501 // DynamicStringFragment("ay.js:5)") 502 // DynamicStringFragment("") # this string holds the closing '\0' 503 // LineNumber(25) 504 // CategoryPair(ProfilingCategoryPair::JS) 505 // 506 // Label("") 507 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME)) 508 // DynamicStringFragment("bound (s") 509 // DynamicStringFragment("elf-host") 510 // DynamicStringFragment("ed:914)") 511 // LineNumber(945) 512 // CategoryPair(ProfilingCategoryPair::JS) 513 // 514 // - A profiling stack frame with an overly long dynamic string: 515 // 516 // Label("") 517 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME)) 518 // DynamicStringFragment("(too lon") 519 // DynamicStringFragment("g)") 520 // LineNumber(100) 521 // CategoryPair(ProfilingCategoryPair::NETWORK) 522 // 523 // - A wasm JIT frame: 524 // 525 // Label("") 526 // FrameFlags(uint64_t(0)) 527 // DynamicStringFragment("wasm-fun") 528 // DynamicStringFragment("ction[87") 529 // DynamicStringFragment("36] (blo") 530 // DynamicStringFragment("b:http:/") 531 // DynamicStringFragment("/webasse") 532 // DynamicStringFragment("mbly.org") 533 // DynamicStringFragment("/3dc5759") 534 // DynamicStringFragment("4-ce58-4") 535 // DynamicStringFragment("626-975b") 536 // DynamicStringFragment("-08ad116") 537 // DynamicStringFragment("30bc1:38") 538 // DynamicStringFragment("29856)") 539 // 540 // - A JS frame in a synchronous sample: 541 // 542 // Label("") 543 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME)) 544 // DynamicStringFragment("u (https") 545 // DynamicStringFragment("://perf-") 546 // DynamicStringFragment("html.io/") 547 // DynamicStringFragment("ac0da204") 548 // DynamicStringFragment("aaa44d75") 549 // DynamicStringFragment("a800.bun") 550 // DynamicStringFragment("dle.js:2") 551 // DynamicStringFragment("5)") 552 553 // Because this is a format entirely internal to the Profiler, any parsing 554 // error indicates a bug in the ProfileBuffer writing or the parser itself, 555 // or possibly flaky hardware. 556 #define ERROR_AND_CONTINUE(msg) \ 557 { \ 558 fprintf(stderr, "ProfileBuffer parse error: %s", msg); \ 559 MOZ_ASSERT(false, msg); \ 560 continue; \ 561 } 562 563 BaseProfilerThreadId ProfileBuffer::StreamSamplesToJSON( 564 SpliceableJSONWriter& aWriter, BaseProfilerThreadId aThreadId, 565 double aSinceTime, UniqueStacks& aUniqueStacks) const { 566 UniquePtr<char[]> dynStrBuf = MakeUnique<char[]>(kMaxFrameKeyLength); 567 568 return mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { 569 MOZ_ASSERT(aReader, 570 "ProfileChunkedBuffer cannot be out-of-session when sampler is " 571 "running"); 572 573 BaseProfilerThreadId processedThreadId; 574 575 EntryGetter e(*aReader); 576 577 for (;;) { 578 // This block skips entries until we find the start of the next sample. 579 // This is useful in three situations. 580 // 581 // - The circular buffer overwrites old entries, so when we start parsing 582 // we might be in the middle of a sample, and we must skip forward to 583 // the start of the next sample. 584 // 585 // - We skip samples that don't have an appropriate ThreadId or Time. 586 // 587 // - We skip range Pause, Resume, CollectionStart, Counter and 588 // CollectionEnd entries between samples. 589 while (e.Has()) { 590 if (e.Get().IsThreadId()) { 591 break; 592 } 593 e.Next(); 594 } 595 596 if (!e.Has()) { 597 break; 598 } 599 600 // Due to the skip_to_next_sample block above, if we have an entry here it 601 // must be a ThreadId entry. 602 MOZ_ASSERT(e.Get().IsThreadId()); 603 604 BaseProfilerThreadId threadId = e.Get().GetThreadId(); 605 e.Next(); 606 607 // Ignore samples that are for the wrong thread. 608 if (threadId != aThreadId && aThreadId.IsSpecified()) { 609 continue; 610 } 611 612 MOZ_ASSERT( 613 aThreadId.IsSpecified() || !processedThreadId.IsSpecified(), 614 "Unspecified aThreadId should only be used with 1-sample buffer"); 615 616 ProfileSample sample; 617 618 if (e.Has() && e.Get().IsTime()) { 619 sample.mTime = e.Get().GetDouble(); 620 e.Next(); 621 622 // Ignore samples that are too old. 623 if (sample.mTime < aSinceTime) { 624 continue; 625 } 626 } else { 627 ERROR_AND_CONTINUE("expected a Time entry"); 628 } 629 630 UniqueStacks::StackKey stack = 631 aUniqueStacks.BeginStack(UniqueStacks::FrameKey("(root)")); 632 633 int numFrames = 0; 634 while (e.Has()) { 635 if (e.Get().IsNativeLeafAddr()) { 636 numFrames++; 637 638 void* pc = e.Get().GetPtr(); 639 e.Next(); 640 641 static const uint32_t BUF_SIZE = 256; 642 char buf[BUF_SIZE]; 643 644 // Bug 753041: We need a double cast here to tell GCC that we don't 645 // want to sign extend 32-bit addresses starting with 0xFXXXXXX. 646 unsigned long long pcULL = (unsigned long long)(uintptr_t)pc; 647 SprintfLiteral(buf, "0x%llx", pcULL); 648 649 // If the "MOZ_PROFILER_SYMBOLICATE" env-var is set, we add a local 650 // symbolication description to the PC address. This is off by 651 // default, and mainly intended for local development. 652 static const bool preSymbolicate = []() { 653 const char* symbolicate = getenv("MOZ_PROFILER_SYMBOLICATE"); 654 return symbolicate && symbolicate[0] != '\0'; 655 }(); 656 if (preSymbolicate) { 657 MozCodeAddressDetails details; 658 if (MozDescribeCodeAddress(pc, &details)) { 659 // Replace \0 terminator with space. 660 const uint32_t pcLen = strlen(buf); 661 buf[pcLen] = ' '; 662 // Add description after space. Note: Using a frame number of 0, 663 // as using `numFrames` wouldn't help here, and would prevent 664 // combining same function calls that happen at different depths. 665 // TODO: Remove unsightly "#00: " if too annoying. :-) 666 MozFormatCodeAddressDetails( 667 buf + pcLen + 1, BUF_SIZE - (pcLen + 1), 0, pc, &details); 668 } 669 } 670 671 stack = aUniqueStacks.AppendFrame(stack, UniqueStacks::FrameKey(buf)); 672 673 } else if (e.Get().IsLabel()) { 674 numFrames++; 675 676 const char* label = e.Get().GetString(); 677 e.Next(); 678 679 using FrameFlags = ProfilingStackFrame::Flags; 680 uint32_t frameFlags = 0; 681 if (e.Has() && e.Get().IsFrameFlags()) { 682 frameFlags = uint32_t(e.Get().GetUint64()); 683 e.Next(); 684 } 685 686 bool relevantForJS = 687 frameFlags & uint32_t(FrameFlags::RELEVANT_FOR_JS); 688 689 // Copy potential dynamic string fragments into dynStrBuf, so that 690 // dynStrBuf will then contain the entire dynamic string. 691 size_t i = 0; 692 dynStrBuf[0] = '\0'; 693 while (e.Has()) { 694 if (e.Get().IsDynamicStringFragment()) { 695 char chars[ProfileBufferEntry::kNumChars]; 696 e.Get().CopyCharsInto(chars); 697 for (char c : chars) { 698 if (i < kMaxFrameKeyLength) { 699 dynStrBuf[i] = c; 700 i++; 701 } 702 } 703 e.Next(); 704 } else { 705 break; 706 } 707 } 708 dynStrBuf[kMaxFrameKeyLength - 1] = '\0'; 709 bool hasDynamicString = (i != 0); 710 711 std::string frameLabel; 712 if (label[0] != '\0' && hasDynamicString) { 713 if (frameFlags & uint32_t(FrameFlags::STRING_TEMPLATE_METHOD)) { 714 frameLabel += label; 715 frameLabel += '.'; 716 frameLabel += dynStrBuf.get(); 717 } else if (frameFlags & 718 uint32_t(FrameFlags::STRING_TEMPLATE_GETTER)) { 719 frameLabel += "get "; 720 frameLabel += label; 721 frameLabel += '.'; 722 frameLabel += dynStrBuf.get(); 723 } else if (frameFlags & 724 uint32_t(FrameFlags::STRING_TEMPLATE_SETTER)) { 725 frameLabel += "set "; 726 frameLabel += label; 727 frameLabel += '.'; 728 frameLabel += dynStrBuf.get(); 729 } else { 730 frameLabel += label; 731 frameLabel += ' '; 732 frameLabel += dynStrBuf.get(); 733 } 734 } else if (hasDynamicString) { 735 frameLabel += dynStrBuf.get(); 736 } else { 737 frameLabel += label; 738 } 739 740 uint64_t innerWindowID = 0; 741 if (e.Has() && e.Get().IsInnerWindowID()) { 742 innerWindowID = uint64_t(e.Get().GetUint64()); 743 e.Next(); 744 } 745 746 Maybe<unsigned> line; 747 if (e.Has() && e.Get().IsLineNumber()) { 748 line = Some(unsigned(e.Get().GetInt())); 749 e.Next(); 750 } 751 752 Maybe<unsigned> column; 753 if (e.Has() && e.Get().IsColumnNumber()) { 754 column = Some(unsigned(e.Get().GetInt())); 755 e.Next(); 756 } 757 758 Maybe<ProfilingCategoryPair> categoryPair; 759 if (e.Has() && e.Get().IsCategoryPair()) { 760 categoryPair = 761 Some(ProfilingCategoryPair(uint32_t(e.Get().GetInt()))); 762 e.Next(); 763 } 764 765 stack = aUniqueStacks.AppendFrame( 766 stack, UniqueStacks::FrameKey(std::move(frameLabel), 767 relevantForJS, innerWindowID, line, 768 column, categoryPair)); 769 770 } else { 771 break; 772 } 773 } 774 775 if (numFrames == 0) { 776 // It is possible to have empty stacks if native stackwalking is 777 // disabled. Skip samples with empty stacks. (See Bug 1497985). 778 // Thus, don't use ERROR_AND_CONTINUE, but just continue. 779 continue; 780 } 781 782 sample.mStack = aUniqueStacks.GetOrAddStackIndex(stack); 783 784 if (e.Has() && e.Get().IsResponsiveness()) { 785 sample.mResponsiveness = Some(e.Get().GetDouble()); 786 e.Next(); 787 } 788 789 WriteSample(aWriter, sample); 790 791 processedThreadId = threadId; 792 } 793 794 return processedThreadId; 795 }); 796 } 797 798 void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter, 799 BaseProfilerThreadId aThreadId, 800 const TimeStamp& aProcessStartTime, 801 double aSinceTime, 802 UniqueStacks& aUniqueStacks) const { 803 mEntries.ReadEach([&](ProfileBufferEntryReader& aER) { 804 auto type = static_cast<ProfileBufferEntry::Kind>( 805 aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>()); 806 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) < 807 static_cast<ProfileBufferEntry::KindUnderlyingType>( 808 ProfileBufferEntry::Kind::MODERN_LIMIT)); 809 if (type == ProfileBufferEntry::Kind::Marker) { 810 ::mozilla::base_profiler_markers_detail::DeserializeAfterKindAndStream( 811 aER, 812 [&](const BaseProfilerThreadId& aMarkerThreadId) { 813 return (aMarkerThreadId == aThreadId) ? &aWriter : nullptr; 814 }, 815 [&](ProfileChunkedBuffer& aChunkedBuffer) { 816 ProfilerBacktrace backtrace("", &aChunkedBuffer); 817 backtrace.StreamJSON(aWriter, TimeStamp::ProcessCreation(), 818 aUniqueStacks); 819 }, 820 // We don't have Rust markers in the mozglue. 821 [&](mozilla::base_profiler_markers_detail::Streaming:: 822 DeserializerTag) { 823 MOZ_ASSERT_UNREACHABLE("No Rust markers in mozglue."); 824 }); 825 } else { 826 // The entry was not a marker, we need to skip to the end. 827 aER.SetRemainingBytes(0); 828 } 829 }); 830 } 831 832 void ProfileBuffer::StreamProfilerOverheadToJSON( 833 SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime, 834 double aSinceTime) const { 835 const char* recordOverheads = getenv("MOZ_PROFILER_RECORD_OVERHEADS"); 836 if (!recordOverheads || recordOverheads[0] == '\0') { 837 // Overheads were not recorded, return early. 838 return; 839 } 840 841 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { 842 MOZ_ASSERT(aReader, 843 "ProfileChunkedBuffer cannot be out-of-session when sampler is " 844 "running"); 845 846 EntryGetter e(*aReader); 847 848 enum Schema : uint32_t { 849 TIME = 0, 850 LOCKING = 1, 851 MARKER_CLEANING = 2, 852 COUNTERS = 3, 853 THREADS = 4 854 }; 855 856 aWriter.StartObjectProperty("profilerOverhead"); 857 aWriter.StartObjectProperty("samples"); 858 // Stream all sampling overhead data. We skip other entries, because we 859 // process them in StreamSamplesToJSON()/etc. 860 { 861 JSONSchemaWriter schema(aWriter); 862 schema.WriteField("time"); 863 schema.WriteField("locking"); 864 schema.WriteField("expiredMarkerCleaning"); 865 schema.WriteField("counters"); 866 schema.WriteField("threads"); 867 } 868 869 aWriter.StartArrayProperty("data"); 870 double firstTime = 0.0; 871 double lastTime = 0.0; 872 ProfilerStats intervals, overheads, lockings, cleanings, counters, threads; 873 while (e.Has()) { 874 // valid sequence: ProfilerOverheadTime, ProfilerOverheadDuration * 4 875 if (e.Get().IsProfilerOverheadTime()) { 876 double time = e.Get().GetDouble(); 877 if (time >= aSinceTime) { 878 e.Next(); 879 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { 880 ERROR_AND_CONTINUE( 881 "expected a ProfilerOverheadDuration entry after " 882 "ProfilerOverheadTime"); 883 } 884 double locking = e.Get().GetDouble(); 885 e.Next(); 886 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { 887 ERROR_AND_CONTINUE( 888 "expected a ProfilerOverheadDuration entry after " 889 "ProfilerOverheadTime,ProfilerOverheadDuration"); 890 } 891 double cleaning = e.Get().GetDouble(); 892 e.Next(); 893 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { 894 ERROR_AND_CONTINUE( 895 "expected a ProfilerOverheadDuration entry after " 896 "ProfilerOverheadTime,ProfilerOverheadDuration*2"); 897 } 898 double counter = e.Get().GetDouble(); 899 e.Next(); 900 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { 901 ERROR_AND_CONTINUE( 902 "expected a ProfilerOverheadDuration entry after " 903 "ProfilerOverheadTime,ProfilerOverheadDuration*3"); 904 } 905 double thread = e.Get().GetDouble(); 906 907 if (firstTime == 0.0) { 908 firstTime = time; 909 } else { 910 // Note that we'll have 1 fewer interval than other numbers (because 911 // we need both ends of an interval to know its duration). The final 912 // difference should be insignificant over the expected many 913 // thousands of iterations. 914 intervals.Count(time - lastTime); 915 } 916 lastTime = time; 917 overheads.Count(locking + cleaning + counter + thread); 918 lockings.Count(locking); 919 cleanings.Count(cleaning); 920 counters.Count(counter); 921 threads.Count(thread); 922 923 AutoArraySchemaWriter writer(aWriter); 924 writer.TimeMsElement(TIME, time); 925 writer.DoubleElement(LOCKING, locking); 926 writer.DoubleElement(MARKER_CLEANING, cleaning); 927 writer.DoubleElement(COUNTERS, counter); 928 writer.DoubleElement(THREADS, thread); 929 } 930 } 931 e.Next(); 932 } 933 aWriter.EndArray(); // data 934 aWriter.EndObject(); // samples 935 936 // Only output statistics if there is at least one full interval (and 937 // therefore at least two samplings.) 938 if (intervals.n > 0) { 939 aWriter.StartObjectProperty("statistics"); 940 aWriter.DoubleProperty("profiledDuration", lastTime - firstTime); 941 aWriter.IntProperty("samplingCount", overheads.n); 942 aWriter.DoubleProperty("overheadDurations", overheads.sum); 943 aWriter.DoubleProperty("overheadPercentage", 944 overheads.sum / (lastTime - firstTime)); 945 #define PROFILER_STATS(name, var) \ 946 aWriter.DoubleProperty("mean" name, (var).sum / (var).n); \ 947 aWriter.DoubleProperty("min" name, (var).min); \ 948 aWriter.DoubleProperty("max" name, (var).max); 949 PROFILER_STATS("Interval", intervals); 950 PROFILER_STATS("Overhead", overheads); 951 PROFILER_STATS("Lockings", lockings); 952 PROFILER_STATS("Cleaning", cleanings); 953 PROFILER_STATS("Counter", counters); 954 PROFILER_STATS("Thread", threads); 955 #undef PROFILER_STATS 956 aWriter.EndObject(); // statistics 957 } 958 aWriter.EndObject(); // profilerOverhead 959 }); 960 } 961 962 struct CounterSample { 963 double mTime; 964 uint64_t mNumber; 965 int64_t mCount; 966 }; 967 968 using CounterSamples = Vector<CounterSample>; 969 970 // HashMap lookup, if not found, a default value is inserted. 971 // Returns reference to (existing or new) value inside the HashMap. 972 template <typename HashM, typename Key> 973 static auto& LookupOrAdd(HashM& aMap, Key&& aKey) { 974 auto addPtr = aMap.lookupForAdd(aKey); 975 if (!addPtr) { 976 MOZ_RELEASE_ASSERT(aMap.add(addPtr, std::forward<Key>(aKey), 977 typename HashM::Entry::ValueType{})); 978 MOZ_ASSERT(!!addPtr); 979 } 980 return addPtr->value(); 981 } 982 983 void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter, 984 const TimeStamp& aProcessStartTime, 985 double aSinceTime) const { 986 // Because this is a format entirely internal to the Profiler, any parsing 987 // error indicates a bug in the ProfileBuffer writing or the parser itself, 988 // or possibly flaky hardware. 989 990 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { 991 MOZ_ASSERT(aReader, 992 "ProfileChunkedBuffer cannot be out-of-session when sampler is " 993 "running"); 994 995 EntryGetter e(*aReader); 996 997 enum Schema : uint32_t { TIME = 0, COUNT = 1, NUMBER = 2 }; 998 999 // Stream all counters. We skip other entries, because we process them in 1000 // StreamSamplesToJSON()/etc. 1001 // 1002 // Valid sequence in the buffer: 1003 // CounterID 1004 // Time 1005 // ( Count Number? )* 1006 // 1007 // And the JSON (example): 1008 // "counters": { 1009 // "name": "malloc", 1010 // "category": "Memory", 1011 // "description": "Amount of allocated memory", 1012 // "samples": { 1013 // "schema": {"time": 0, "count": 1, "number": 2}, 1014 // "data": [ 1015 // [ 1016 // 16117.033968000002, 1017 // 2446216, 1018 // 6801320 1019 // ], 1020 // [ 1021 // 16118.037638, 1022 // 2446216, 1023 // 6801320 1024 // ], 1025 // ], 1026 // }, 1027 // } 1028 1029 // Build the map of counters and populate it 1030 HashMap<void*, CounterSamples> counters; 1031 1032 while (e.Has()) { 1033 // skip all non-Counters, including if we start in the middle of a counter 1034 if (e.Get().IsCounterId()) { 1035 void* id = e.Get().GetPtr(); 1036 CounterSamples& data = LookupOrAdd(counters, id); 1037 e.Next(); 1038 if (!e.Has() || !e.Get().IsTime()) { 1039 ERROR_AND_CONTINUE("expected a Time entry"); 1040 } 1041 double time = e.Get().GetDouble(); 1042 e.Next(); 1043 if (time >= aSinceTime) { 1044 if (!e.Has() || !e.Get().IsCount()) { 1045 ERROR_AND_CONTINUE("expected a Count entry"); 1046 } 1047 int64_t count = e.Get().GetUint64(); 1048 e.Next(); 1049 uint64_t number; 1050 if (!e.Has() || !e.Get().IsNumber()) { 1051 number = 0; 1052 } else { 1053 number = e.Get().GetInt64(); 1054 e.Next(); 1055 } 1056 CounterSample sample = {time, number, count}; 1057 MOZ_RELEASE_ASSERT(data.append(sample)); 1058 } else { 1059 // skip counter sample - only need to skip the initial counter 1060 // id, then let the loop at the top skip the rest 1061 } 1062 } else { 1063 e.Next(); 1064 } 1065 } 1066 // we have a map of counter entries; dump them to JSON 1067 if (counters.count() == 0) { 1068 return; 1069 } 1070 1071 aWriter.StartArrayProperty("counters"); 1072 for (auto iter = counters.iter(); !iter.done(); iter.next()) { 1073 CounterSamples& samples = iter.get().value(); 1074 size_t size = samples.length(); 1075 if (size == 0) { 1076 continue; 1077 } 1078 const BaseProfilerCount* base_counter = 1079 static_cast<const BaseProfilerCount*>(iter.get().key()); 1080 1081 aWriter.Start(); 1082 aWriter.StringProperty("name", MakeStringSpan(base_counter->mLabel)); 1083 aWriter.StringProperty("category", 1084 MakeStringSpan(base_counter->mCategory)); 1085 aWriter.StringProperty("description", 1086 MakeStringSpan(base_counter->mDescription)); 1087 1088 bool hasNumber = false; 1089 for (size_t i = 0; i < size; i++) { 1090 if (samples[i].mNumber != 0) { 1091 hasNumber = true; 1092 break; 1093 } 1094 } 1095 1096 aWriter.StartObjectProperty("samples"); 1097 { 1098 JSONSchemaWriter schema(aWriter); 1099 schema.WriteField("time"); 1100 schema.WriteField("count"); 1101 if (hasNumber) { 1102 schema.WriteField("number"); 1103 } 1104 } 1105 1106 aWriter.StartArrayProperty("data"); 1107 uint64_t previousNumber = 0; 1108 int64_t previousCount = 0; 1109 for (size_t i = 0; i < size; i++) { 1110 // Encode as deltas, and only encode if different than the last 1111 // sample 1112 if (i == 0 || samples[i].mNumber != previousNumber || 1113 samples[i].mCount != previousCount) { 1114 MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime); 1115 MOZ_ASSERT(samples[i].mNumber >= previousNumber); 1116 MOZ_ASSERT(samples[i].mNumber - previousNumber <= 1117 uint64_t(std::numeric_limits<int64_t>::max())); 1118 1119 AutoArraySchemaWriter writer(aWriter); 1120 writer.TimeMsElement(TIME, samples[i].mTime); 1121 writer.IntElement(COUNT, samples[i].mCount - previousCount); 1122 if (hasNumber) { 1123 writer.IntElement(NUMBER, static_cast<int64_t>(samples[i].mNumber - 1124 previousNumber)); 1125 } 1126 previousNumber = samples[i].mNumber; 1127 previousCount = samples[i].mCount; 1128 } 1129 } 1130 aWriter.EndArray(); // data 1131 aWriter.EndObject(); // samples 1132 aWriter.End(); // for each counter 1133 } 1134 aWriter.EndArray(); // counters 1135 }); 1136 } 1137 1138 #undef ERROR_AND_CONTINUE 1139 1140 static void AddPausedRange(SpliceableJSONWriter& aWriter, const char* aReason, 1141 const Maybe<double>& aStartTime, 1142 const Maybe<double>& aEndTime) { 1143 aWriter.Start(); 1144 if (aStartTime) { 1145 aWriter.TimeDoubleMsProperty("startTime", *aStartTime); 1146 } else { 1147 aWriter.NullProperty("startTime"); 1148 } 1149 if (aEndTime) { 1150 aWriter.TimeDoubleMsProperty("endTime", *aEndTime); 1151 } else { 1152 aWriter.NullProperty("endTime"); 1153 } 1154 aWriter.StringProperty("reason", MakeStringSpan(aReason)); 1155 aWriter.End(); 1156 } 1157 1158 void ProfileBuffer::StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter, 1159 double aSinceTime) const { 1160 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { 1161 MOZ_ASSERT(aReader, 1162 "ProfileChunkedBuffer cannot be out-of-session when sampler is " 1163 "running"); 1164 1165 EntryGetter e(*aReader); 1166 1167 Maybe<double> currentPauseStartTime; 1168 Maybe<double> currentCollectionStartTime; 1169 1170 while (e.Has()) { 1171 if (e.Get().IsPause()) { 1172 currentPauseStartTime = Some(e.Get().GetDouble()); 1173 } else if (e.Get().IsResume()) { 1174 AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime, 1175 Some(e.Get().GetDouble())); 1176 currentPauseStartTime = Nothing(); 1177 } else if (e.Get().IsCollectionStart()) { 1178 currentCollectionStartTime = Some(e.Get().GetDouble()); 1179 } else if (e.Get().IsCollectionEnd()) { 1180 AddPausedRange(aWriter, "collecting", currentCollectionStartTime, 1181 Some(e.Get().GetDouble())); 1182 currentCollectionStartTime = Nothing(); 1183 } 1184 e.Next(); 1185 } 1186 1187 if (currentPauseStartTime) { 1188 AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime, 1189 Nothing()); 1190 } 1191 if (currentCollectionStartTime) { 1192 AddPausedRange(aWriter, "collecting", currentCollectionStartTime, 1193 Nothing()); 1194 } 1195 }); 1196 } 1197 1198 bool ProfileBuffer::DuplicateLastSample(BaseProfilerThreadId aThreadId, 1199 const TimeStamp& aProcessStartTime, 1200 Maybe<uint64_t>& aLastSample) { 1201 if (!aLastSample) { 1202 return false; 1203 } 1204 1205 ProfileChunkedBuffer tempBuffer( 1206 ProfileChunkedBuffer::ThreadSafety::WithoutMutex, WorkerChunkManager()); 1207 1208 auto retrieveWorkerChunk = MakeScopeExit( 1209 [&]() { WorkerChunkManager().Reset(tempBuffer.GetAllChunks()); }); 1210 1211 const bool ok = mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { 1212 MOZ_ASSERT(aReader, 1213 "ProfileChunkedBuffer cannot be out-of-session when sampler is " 1214 "running"); 1215 1216 EntryGetter e(*aReader, *aLastSample); 1217 1218 if (e.CurPos() != *aLastSample) { 1219 // The last sample is no longer within the buffer range, so we cannot 1220 // use it. Reset the stored buffer position to Nothing(). 1221 aLastSample.reset(); 1222 return false; 1223 } 1224 1225 MOZ_RELEASE_ASSERT(e.Has() && e.Get().IsThreadId() && 1226 e.Get().GetThreadId() == aThreadId); 1227 1228 e.Next(); 1229 1230 // Go through the whole entry and duplicate it, until we find the next 1231 // one. 1232 while (e.Has()) { 1233 switch (e.Get().GetKind()) { 1234 case ProfileBufferEntry::Kind::Pause: 1235 case ProfileBufferEntry::Kind::Resume: 1236 case ProfileBufferEntry::Kind::PauseSampling: 1237 case ProfileBufferEntry::Kind::ResumeSampling: 1238 case ProfileBufferEntry::Kind::CollectionStart: 1239 case ProfileBufferEntry::Kind::CollectionEnd: 1240 case ProfileBufferEntry::Kind::ThreadId: 1241 // We're done. 1242 return true; 1243 case ProfileBufferEntry::Kind::Time: 1244 // Copy with new time 1245 AddEntry( 1246 tempBuffer, 1247 ProfileBufferEntry::Time( 1248 (TimeStamp::Now() - aProcessStartTime).ToMilliseconds())); 1249 break; 1250 case ProfileBufferEntry::Kind::Number: 1251 case ProfileBufferEntry::Kind::Count: 1252 case ProfileBufferEntry::Kind::Responsiveness: 1253 // Don't copy anything not part of a thread's stack sample 1254 break; 1255 case ProfileBufferEntry::Kind::CounterId: 1256 // CounterId is normally followed by Time - if so, we'd like 1257 // to skip it. If we duplicate Time, it won't hurt anything, just 1258 // waste buffer space (and this can happen if the CounterId has 1259 // fallen off the end of the buffer, but Time (and Number/Count) 1260 // are still in the buffer). 1261 e.Next(); 1262 if (e.Has() && e.Get().GetKind() != ProfileBufferEntry::Kind::Time) { 1263 // this would only happen if there was an invalid sequence 1264 // in the buffer. Don't skip it. 1265 continue; 1266 } 1267 // we've skipped Time 1268 break; 1269 case ProfileBufferEntry::Kind::ProfilerOverheadTime: 1270 // ProfilerOverheadTime is normally followed by 1271 // ProfilerOverheadDuration*4 - if so, we'd like to skip it. Don't 1272 // duplicate, as we are in the middle of a sampling and will soon 1273 // capture its own overhead. 1274 e.Next(); 1275 // A missing Time would only happen if there was an invalid 1276 // sequence in the buffer. Don't skip unexpected entry. 1277 if (e.Has() && 1278 e.Get().GetKind() != 1279 ProfileBufferEntry::Kind::ProfilerOverheadDuration) { 1280 continue; 1281 } 1282 e.Next(); 1283 if (e.Has() && 1284 e.Get().GetKind() != 1285 ProfileBufferEntry::Kind::ProfilerOverheadDuration) { 1286 continue; 1287 } 1288 e.Next(); 1289 if (e.Has() && 1290 e.Get().GetKind() != 1291 ProfileBufferEntry::Kind::ProfilerOverheadDuration) { 1292 continue; 1293 } 1294 e.Next(); 1295 if (e.Has() && 1296 e.Get().GetKind() != 1297 ProfileBufferEntry::Kind::ProfilerOverheadDuration) { 1298 continue; 1299 } 1300 // we've skipped ProfilerOverheadTime and 1301 // ProfilerOverheadDuration*4. 1302 break; 1303 default: { 1304 // Copy anything else we don't know about. 1305 AddEntry(tempBuffer, e.Get()); 1306 break; 1307 } 1308 } 1309 e.Next(); 1310 } 1311 return true; 1312 }); 1313 1314 if (!ok) { 1315 return false; 1316 } 1317 1318 // If the buffer was big enough, there won't be any cleared blocks. 1319 if (tempBuffer.GetState().mClearedBlockCount != 0) { 1320 // No need to try to read stack again as it won't fit. Reset the stored 1321 // buffer position to Nothing(). 1322 aLastSample.reset(); 1323 return false; 1324 } 1325 1326 aLastSample = Some(AddThreadIdEntry(aThreadId)); 1327 1328 tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) { 1329 MOZ_ASSERT(aReader, "tempBuffer cannot be out-of-session"); 1330 1331 EntryGetter e(*aReader); 1332 1333 while (e.Has()) { 1334 AddEntry(e.Get()); 1335 e.Next(); 1336 } 1337 }); 1338 1339 return true; 1340 } 1341 1342 void ProfileBuffer::DiscardSamplesBeforeTime(double aTime) { 1343 // This function does nothing! 1344 // The duration limit will be removed from Firefox, see bug 1632365. 1345 (void)aTime; 1346 } 1347 1348 // END ProfileBuffer 1349 //////////////////////////////////////////////////////////////////////// 1350 1351 } // namespace baseprofiler 1352 } // namespace mozilla