HeapSnapshot.cpp (53630B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */ 2 /* This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 5 6 #include "HeapSnapshot.h" 7 8 #include <google/protobuf/io/coded_stream.h> 9 #include <google/protobuf/io/gzip_stream.h> 10 #include <google/protobuf/io/zero_copy_stream_impl_lite.h> 11 12 #include "js/Array.h" // JS::NewArrayObject 13 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin, JS::TaggedColumnNumberOneOrigin 14 #include "js/Debug.h" 15 #include "js/PropertyAndElement.h" // JS_DefineProperty 16 #include "js/TypeDecls.h" 17 #include "js/UbiNodeBreadthFirst.h" 18 #include "js/UbiNodeCensus.h" 19 #include "js/UbiNodeDominatorTree.h" 20 #include "js/UbiNodeShortestPaths.h" 21 #include "mozilla/Attributes.h" 22 #include "mozilla/CycleCollectedJSContext.h" 23 #include "mozilla/devtools/AutoMemMap.h" 24 #include "mozilla/devtools/CoreDump.pb.h" 25 #include "mozilla/devtools/DeserializedNode.h" 26 #include "mozilla/devtools/DominatorTree.h" 27 #include "mozilla/devtools/FileDescriptorOutputStream.h" 28 #include "mozilla/devtools/HeapSnapshotTempFileHelperChild.h" 29 #include "mozilla/devtools/ZeroCopyNSIOutputStream.h" 30 #include "mozilla/dom/ChromeUtils.h" 31 #include "mozilla/dom/ContentChild.h" 32 #include "mozilla/dom/HeapSnapshotBinding.h" 33 #include "mozilla/RangedPtr.h" 34 #include "mozilla/glean/DevtoolsSharedHeapsnapshotMetrics.h" 35 36 #include "jsapi.h" 37 #include "jsfriendapi.h" 38 #include "js/GCVector.h" 39 #include "js/MapAndSet.h" 40 #include "js/Object.h" // JS::GetCompartment 41 #include "nsComponentManagerUtils.h" // do_CreateInstance 42 #include "nsCycleCollectionParticipant.h" 43 #include "nsCRTGlue.h" 44 #include "nsIFile.h" 45 #include "nsIOutputStream.h" 46 #include "nsISupportsImpl.h" 47 #include "nsNetUtil.h" 48 #include "nsPrintfCString.h" 49 #include "prerror.h" 50 #include "prio.h" 51 #include "prtypes.h" 52 #include "SpecialSystemDirectory.h" 53 54 namespace mozilla { 55 namespace devtools { 56 57 using namespace JS; 58 using namespace dom; 59 60 using ::google::protobuf::io::ArrayInputStream; 61 using ::google::protobuf::io::CodedInputStream; 62 using ::google::protobuf::io::GzipInputStream; 63 using ::google::protobuf::io::ZeroCopyInputStream; 64 65 using JS::ubi::AtomOrTwoByteChars; 66 using JS::ubi::ShortestPaths; 67 68 MallocSizeOf GetCurrentThreadDebuggerMallocSizeOf() { 69 auto ccjscx = CycleCollectedJSContext::Get(); 70 MOZ_ASSERT(ccjscx); 71 auto cx = ccjscx->Context(); 72 MOZ_ASSERT(cx); 73 auto mallocSizeOf = JS::dbg::GetDebuggerMallocSizeOf(cx); 74 MOZ_ASSERT(mallocSizeOf); 75 return mallocSizeOf; 76 } 77 78 /*** Cycle Collection Boilerplate *********************************************/ 79 80 NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(HeapSnapshot, mParent) 81 82 NS_IMPL_CYCLE_COLLECTING_ADDREF(HeapSnapshot) 83 NS_IMPL_CYCLE_COLLECTING_RELEASE(HeapSnapshot) 84 85 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(HeapSnapshot) 86 NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY 87 NS_INTERFACE_MAP_ENTRY(nsISupports) 88 NS_INTERFACE_MAP_END 89 90 /* virtual */ 91 JSObject* HeapSnapshot::WrapObject(JSContext* aCx, 92 JS::Handle<JSObject*> aGivenProto) { 93 return HeapSnapshot_Binding::Wrap(aCx, this, aGivenProto); 94 } 95 96 /*** Reading Heap Snapshots ***************************************************/ 97 98 /* static */ 99 already_AddRefed<HeapSnapshot> HeapSnapshot::Create(JSContext* cx, 100 GlobalObject& global, 101 const uint8_t* buffer, 102 uint32_t size, 103 ErrorResult& rv) { 104 RefPtr<HeapSnapshot> snapshot = new HeapSnapshot(cx, global.GetAsSupports()); 105 if (!snapshot->init(cx, buffer, size)) { 106 rv.Throw(NS_ERROR_UNEXPECTED); 107 return nullptr; 108 } 109 return snapshot.forget(); 110 } 111 112 template <typename MessageType> 113 static bool parseMessage(ZeroCopyInputStream& stream, uint32_t sizeOfMessage, 114 MessageType& message) { 115 // We need to create a new `CodedInputStream` for each message so that the 116 // 64MB limit is applied per-message rather than to the whole stream. 117 CodedInputStream codedStream(&stream); 118 119 // The protobuf message nesting that core dumps exhibit is dominated by 120 // allocation stacks' frames. In the most deeply nested case, each frame has 121 // two messages: a StackFrame message and a StackFrame::Data message. These 122 // frames are on top of a small constant of other messages. There are a 123 // MAX_STACK_DEPTH number of frames, so we multiply this by 3 to make room for 124 // the two messages per frame plus some head room for the constant number of 125 // non-dominating messages. 126 codedStream.SetRecursionLimit(HeapSnapshot::MAX_STACK_DEPTH * 3); 127 128 auto limit = codedStream.PushLimit(sizeOfMessage); 129 if (NS_WARN_IF(!message.ParseFromCodedStream(&codedStream)) || 130 NS_WARN_IF(!codedStream.ConsumedEntireMessage()) || 131 NS_WARN_IF(codedStream.BytesUntilLimit() != 0)) { 132 return false; 133 } 134 135 codedStream.PopLimit(limit); 136 return true; 137 } 138 139 template <typename CharT, typename InternedStringSet> 140 struct GetOrInternStringMatcher { 141 InternedStringSet& internedStrings; 142 143 explicit GetOrInternStringMatcher(InternedStringSet& strings) 144 : internedStrings(strings) {} 145 146 const CharT* operator()(const std::string* str) { 147 MOZ_ASSERT(str); 148 size_t length = str->length() / sizeof(CharT); 149 auto tempString = reinterpret_cast<const CharT*>(str->data()); 150 151 UniqueFreePtr<CharT[]> owned(NS_xstrndup(tempString, length)); 152 if (!internedStrings.append(std::move(owned))) return nullptr; 153 154 return internedStrings.back().get(); 155 } 156 157 const CharT* operator()(uint64_t ref) { 158 if (MOZ_LIKELY(ref < internedStrings.length())) { 159 auto& string = internedStrings[ref]; 160 MOZ_ASSERT(string); 161 return string.get(); 162 } 163 164 return nullptr; 165 } 166 }; 167 168 template < 169 // Either char or char16_t. 170 typename CharT, 171 // A reference to either `internedOneByteStrings` or 172 // `internedTwoByteStrings` if CharT is char or char16_t respectively. 173 typename InternedStringSet> 174 const CharT* HeapSnapshot::getOrInternString( 175 InternedStringSet& internedStrings, Maybe<StringOrRef>& maybeStrOrRef) { 176 // Incomplete message: has neither a string nor a reference to an already 177 // interned string. 178 if (MOZ_UNLIKELY(maybeStrOrRef.isNothing())) return nullptr; 179 180 GetOrInternStringMatcher<CharT, InternedStringSet> m(internedStrings); 181 return maybeStrOrRef->match(m); 182 } 183 184 // Get a de-duplicated string as a Maybe<StringOrRef> from the given `msg`. 185 #define GET_STRING_OR_REF_WITH_PROP_NAMES(msg, strPropertyName, \ 186 refPropertyName) \ 187 (msg.has_##refPropertyName() ? Some(StringOrRef(msg.refPropertyName())) \ 188 : msg.has_##strPropertyName() ? Some(StringOrRef(&msg.strPropertyName())) \ 189 : Nothing()) 190 191 #define GET_STRING_OR_REF(msg, property) \ 192 (msg.has_##property##ref() ? Some(StringOrRef(msg.property##ref())) \ 193 : msg.has_##property() ? Some(StringOrRef(&msg.property())) \ 194 : Nothing()) 195 196 bool HeapSnapshot::saveNode(const protobuf::Node& node, 197 NodeIdSet& edgeReferents) { 198 // NB: de-duplicated string properties must be read back and interned in the 199 // same order here as they are written and serialized in 200 // `CoreDumpWriter::writeNode` or else indices in references to already 201 // serialized strings will be off. 202 203 if (NS_WARN_IF(!node.has_id())) return false; 204 NodeId id = node.id(); 205 206 // NodeIds are derived from pointers (at most 48 bits) and we rely on them 207 // fitting into JS numbers (IEEE 754 doubles, can precisely store 53 bit 208 // integers) despite storing them on disk as 64 bit integers. 209 if (NS_WARN_IF(!JS::Value::isNumberRepresentable(id))) return false; 210 211 // Should only deserialize each node once. 212 if (NS_WARN_IF(nodes.has(id))) return false; 213 214 if (NS_WARN_IF(!JS::ubi::Uint32IsValidCoarseType(node.coarsetype()))) 215 return false; 216 auto coarseType = JS::ubi::Uint32ToCoarseType(node.coarsetype()); 217 218 Maybe<StringOrRef> typeNameOrRef = 219 GET_STRING_OR_REF_WITH_PROP_NAMES(node, typename_, typenameref); 220 auto typeName = 221 getOrInternString<char16_t>(internedTwoByteStrings, typeNameOrRef); 222 if (NS_WARN_IF(!typeName)) return false; 223 224 if (NS_WARN_IF(!node.has_size())) return false; 225 uint64_t size = node.size(); 226 227 auto edgesLength = node.edges_size(); 228 DeserializedNode::EdgeVector edges; 229 if (NS_WARN_IF(!edges.reserve(edgesLength))) return false; 230 for (decltype(edgesLength) i = 0; i < edgesLength; i++) { 231 auto& protoEdge = node.edges(i); 232 233 if (NS_WARN_IF(!protoEdge.has_referent())) return false; 234 NodeId referent = protoEdge.referent(); 235 236 if (NS_WARN_IF(!edgeReferents.put(referent))) return false; 237 238 const char16_t* edgeName = nullptr; 239 if (protoEdge.EdgeNameOrRef_case() != 240 protobuf::Edge::EDGENAMEORREF_NOT_SET) { 241 Maybe<StringOrRef> edgeNameOrRef = GET_STRING_OR_REF(protoEdge, name); 242 edgeName = 243 getOrInternString<char16_t>(internedTwoByteStrings, edgeNameOrRef); 244 if (NS_WARN_IF(!edgeName)) return false; 245 } 246 247 edges.infallibleAppend(DeserializedEdge(referent, edgeName)); 248 } 249 250 Maybe<StackFrameId> allocationStack; 251 if (node.has_allocationstack()) { 252 StackFrameId id = 0; 253 if (NS_WARN_IF(!saveStackFrame(node.allocationstack(), id))) return false; 254 allocationStack.emplace(id); 255 } 256 MOZ_ASSERT(allocationStack.isSome() == node.has_allocationstack()); 257 258 const char* jsObjectClassName = nullptr; 259 if (node.JSObjectClassNameOrRef_case() != 260 protobuf::Node::JSOBJECTCLASSNAMEORREF_NOT_SET) { 261 Maybe<StringOrRef> clsNameOrRef = 262 GET_STRING_OR_REF(node, jsobjectclassname); 263 jsObjectClassName = 264 getOrInternString<char>(internedOneByteStrings, clsNameOrRef); 265 if (NS_WARN_IF(!jsObjectClassName)) return false; 266 } 267 268 const char* scriptFilename = nullptr; 269 if (node.ScriptFilenameOrRef_case() != 270 protobuf::Node::SCRIPTFILENAMEORREF_NOT_SET) { 271 Maybe<StringOrRef> scriptFilenameOrRef = 272 GET_STRING_OR_REF(node, scriptfilename); 273 scriptFilename = 274 getOrInternString<char>(internedOneByteStrings, scriptFilenameOrRef); 275 if (NS_WARN_IF(!scriptFilename)) return false; 276 } 277 278 const char16_t* descriptiveTypeName = nullptr; 279 if (node.descriptiveTypeNameOrRef_case() != 280 protobuf::Node::DESCRIPTIVETYPENAMEORREF_NOT_SET) { 281 Maybe<StringOrRef> descriptiveTypeNameOrRef = 282 GET_STRING_OR_REF(node, descriptivetypename); 283 descriptiveTypeName = getOrInternString<char16_t>(internedTwoByteStrings, 284 descriptiveTypeNameOrRef); 285 if (NS_WARN_IF(!descriptiveTypeName)) return false; 286 } 287 288 if (NS_WARN_IF(!nodes.putNew( 289 id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), 290 allocationStack, jsObjectClassName, 291 scriptFilename, descriptiveTypeName, *this)))) { 292 return false; 293 }; 294 295 return true; 296 } 297 298 bool HeapSnapshot::saveStackFrame(const protobuf::StackFrame& frame, 299 StackFrameId& outFrameId) { 300 // NB: de-duplicated string properties must be read in the same order here as 301 // they are written in `CoreDumpWriter::getProtobufStackFrame` or else indices 302 // in references to already serialized strings will be off. 303 304 if (frame.has_ref()) { 305 // We should only get a reference to the previous frame if we have already 306 // seen the previous frame. 307 if (!frames.has(frame.ref())) return false; 308 309 outFrameId = frame.ref(); 310 return true; 311 } 312 313 // Incomplete message. 314 if (!frame.has_data()) return false; 315 316 auto data = frame.data(); 317 318 if (!data.has_id()) return false; 319 StackFrameId id = data.id(); 320 321 // This should be the first and only time we see this frame. 322 if (frames.has(id)) return false; 323 324 if (!data.has_line()) return false; 325 uint32_t line = data.line(); 326 327 if (!data.has_column()) return false; 328 JS::TaggedColumnNumberOneOrigin column( 329 JS::LimitedColumnNumberOneOrigin(data.column())); 330 331 if (!data.has_issystem()) return false; 332 bool isSystem = data.issystem(); 333 334 if (!data.has_isselfhosted()) return false; 335 bool isSelfHosted = data.isselfhosted(); 336 337 Maybe<StringOrRef> sourceOrRef = GET_STRING_OR_REF(data, source); 338 auto source = 339 getOrInternString<char16_t>(internedTwoByteStrings, sourceOrRef); 340 if (!source) return false; 341 342 const char16_t* functionDisplayName = nullptr; 343 if (data.FunctionDisplayNameOrRef_case() != 344 protobuf::StackFrame_Data::FUNCTIONDISPLAYNAMEORREF_NOT_SET) { 345 Maybe<StringOrRef> nameOrRef = GET_STRING_OR_REF(data, functiondisplayname); 346 functionDisplayName = 347 getOrInternString<char16_t>(internedTwoByteStrings, nameOrRef); 348 if (!functionDisplayName) return false; 349 } 350 351 Maybe<StackFrameId> parent; 352 if (data.has_parent()) { 353 StackFrameId parentId = 0; 354 if (!saveStackFrame(data.parent(), parentId)) return false; 355 parent = Some(parentId); 356 } 357 358 if (!frames.putNew(id, 359 DeserializedStackFrame(id, parent, line, column, source, 360 functionDisplayName, isSystem, 361 isSelfHosted, *this))) { 362 return false; 363 } 364 365 outFrameId = id; 366 return true; 367 } 368 369 #undef GET_STRING_OR_REF_WITH_PROP_NAMES 370 #undef GET_STRING_OR_REF 371 372 // Because protobuf messages aren't self-delimiting, we serialize each message 373 // preceded by its size in bytes. When deserializing, we read this size and then 374 // limit reading from the stream to the given byte size. If we didn't, then the 375 // first message would consume the entire stream. 376 static bool readSizeOfNextMessage(ZeroCopyInputStream& stream, 377 uint32_t* sizep) { 378 MOZ_ASSERT(sizep); 379 CodedInputStream codedStream(&stream); 380 return codedStream.ReadVarint32(sizep) && *sizep > 0; 381 } 382 383 bool HeapSnapshot::init(JSContext* cx, const uint8_t* buffer, uint32_t size) { 384 ArrayInputStream stream(buffer, size); 385 GzipInputStream gzipStream(&stream); 386 uint32_t sizeOfMessage = 0; 387 388 // First is the metadata. 389 390 protobuf::Metadata metadata; 391 if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage))) 392 return false; 393 if (!parseMessage(gzipStream, sizeOfMessage, metadata)) return false; 394 if (metadata.has_timestamp()) timestamp.emplace(metadata.timestamp()); 395 396 // Next is the root node. 397 398 protobuf::Node root; 399 if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage))) 400 return false; 401 if (!parseMessage(gzipStream, sizeOfMessage, root)) return false; 402 403 // Although the id is optional in the protobuf format for future proofing, we 404 // can't currently do anything without it. 405 if (NS_WARN_IF(!root.has_id())) return false; 406 rootId = root.id(); 407 408 // The set of all node ids we've found edges pointing to. 409 NodeIdSet edgeReferents(cx); 410 411 if (NS_WARN_IF(!saveNode(root, edgeReferents))) return false; 412 413 // Finally, the rest of the nodes in the core dump. 414 415 // Test for the end of the stream. The protobuf library gives no way to tell 416 // the difference between an underlying read error and the stream being 417 // done. All we can do is attempt to read the size of the next message and 418 // extrapolate guestimations from the result of that operation. 419 while (readSizeOfNextMessage(gzipStream, &sizeOfMessage)) { 420 protobuf::Node node; 421 if (!parseMessage(gzipStream, sizeOfMessage, node)) return false; 422 if (NS_WARN_IF(!saveNode(node, edgeReferents))) return false; 423 } 424 425 // Check the set of node ids referred to by edges we found and ensure that we 426 // have the node corresponding to each id. If we don't have all of them, it is 427 // unsafe to perform analyses of this heap snapshot. 428 for (auto iter = edgeReferents.iter(); !iter.done(); iter.next()) { 429 if (NS_WARN_IF(!nodes.has(iter.get()))) return false; 430 } 431 432 return true; 433 } 434 435 /*** Heap Snapshot Analyses ***************************************************/ 436 437 void HeapSnapshot::TakeCensus(JSContext* cx, JS::Handle<JSObject*> options, 438 JS::MutableHandle<JS::Value> rval, 439 ErrorResult& rv) { 440 JS::ubi::Census census(cx); 441 442 JS::ubi::CountTypePtr rootType; 443 if (NS_WARN_IF(!JS::ubi::ParseCensusOptions(cx, census, options, rootType))) { 444 rv.Throw(NS_ERROR_UNEXPECTED); 445 return; 446 } 447 448 JS::ubi::RootedCount rootCount(cx, rootType->makeCount()); 449 if (NS_WARN_IF(!rootCount)) { 450 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 451 return; 452 } 453 454 JS::ubi::CensusHandler handler(census, rootCount, 455 GetCurrentThreadDebuggerMallocSizeOf()); 456 457 { 458 JS::AutoCheckCannotGC nogc; 459 460 JS::ubi::CensusTraversal traversal(cx, handler, nogc); 461 462 if (NS_WARN_IF(!traversal.addStart(getRoot()))) { 463 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 464 return; 465 } 466 467 if (NS_WARN_IF(!traversal.traverse())) { 468 rv.Throw(NS_ERROR_UNEXPECTED); 469 return; 470 } 471 } 472 473 if (NS_WARN_IF(!handler.report(cx, rval))) { 474 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 475 return; 476 } 477 } 478 479 void HeapSnapshot::DescribeNode(JSContext* cx, JS::Handle<JSObject*> breakdown, 480 uint64_t nodeId, 481 JS::MutableHandle<JS::Value> rval, 482 ErrorResult& rv) { 483 MOZ_ASSERT(breakdown); 484 JS::Rooted<JS::Value> breakdownVal(cx, JS::ObjectValue(*breakdown)); 485 JS::Rooted<JS::GCVector<JSLinearString*>> seen(cx, cx); 486 JS::ubi::CountTypePtr rootType = 487 JS::ubi::ParseBreakdown(cx, breakdownVal, &seen); 488 if (NS_WARN_IF(!rootType)) { 489 rv.Throw(NS_ERROR_UNEXPECTED); 490 return; 491 } 492 493 JS::ubi::RootedCount rootCount(cx, rootType->makeCount()); 494 if (NS_WARN_IF(!rootCount)) { 495 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 496 return; 497 } 498 499 JS::ubi::Node::Id id(nodeId); 500 Maybe<JS::ubi::Node> node = getNodeById(id); 501 if (NS_WARN_IF(node.isNothing())) { 502 rv.Throw(NS_ERROR_INVALID_ARG); 503 return; 504 } 505 506 MallocSizeOf mallocSizeOf = GetCurrentThreadDebuggerMallocSizeOf(); 507 if (NS_WARN_IF(!rootCount->count(mallocSizeOf, *node))) { 508 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 509 return; 510 } 511 512 if (NS_WARN_IF(!rootCount->report(cx, rval))) { 513 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 514 return; 515 } 516 } 517 518 already_AddRefed<DominatorTree> HeapSnapshot::ComputeDominatorTree( 519 ErrorResult& rv) { 520 Maybe<JS::ubi::DominatorTree> maybeTree; 521 { 522 auto ccjscx = CycleCollectedJSContext::Get(); 523 MOZ_ASSERT(ccjscx); 524 auto cx = ccjscx->Context(); 525 MOZ_ASSERT(cx); 526 JS::AutoCheckCannotGC nogc(cx); 527 maybeTree = JS::ubi::DominatorTree::Create(cx, nogc, getRoot()); 528 } 529 530 if (NS_WARN_IF(maybeTree.isNothing())) { 531 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 532 return nullptr; 533 } 534 535 return MakeAndAddRef<DominatorTree>(std::move(*maybeTree), this, mParent); 536 } 537 538 void HeapSnapshot::ComputeShortestPaths(JSContext* cx, uint64_t start, 539 const Sequence<uint64_t>& targets, 540 uint64_t maxNumPaths, 541 JS::MutableHandle<JSObject*> results, 542 ErrorResult& rv) { 543 // First ensure that our inputs are valid. 544 545 if (NS_WARN_IF(maxNumPaths == 0)) { 546 rv.Throw(NS_ERROR_INVALID_ARG); 547 return; 548 } 549 550 Maybe<JS::ubi::Node> startNode = getNodeById(start); 551 if (NS_WARN_IF(startNode.isNothing())) { 552 rv.Throw(NS_ERROR_INVALID_ARG); 553 return; 554 } 555 556 if (NS_WARN_IF(targets.Length() == 0)) { 557 rv.Throw(NS_ERROR_INVALID_ARG); 558 return; 559 } 560 561 // Aggregate the targets into a set and make sure that they exist in the heap 562 // snapshot. 563 564 JS::ubi::NodeSet targetsSet; 565 566 for (const auto& target : targets) { 567 Maybe<JS::ubi::Node> targetNode = getNodeById(target); 568 if (NS_WARN_IF(targetNode.isNothing())) { 569 rv.Throw(NS_ERROR_INVALID_ARG); 570 return; 571 } 572 573 if (NS_WARN_IF(!targetsSet.put(*targetNode))) { 574 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 575 return; 576 } 577 } 578 579 // Walk the heap graph and find the shortest paths. 580 581 Maybe<ShortestPaths> maybeShortestPaths; 582 { 583 JS::AutoCheckCannotGC nogc(cx); 584 maybeShortestPaths = ShortestPaths::Create( 585 cx, nogc, maxNumPaths, *startNode, std::move(targetsSet)); 586 } 587 588 if (NS_WARN_IF(maybeShortestPaths.isNothing())) { 589 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 590 return; 591 } 592 593 auto& shortestPaths = *maybeShortestPaths; 594 595 // Convert the results into a Map object mapping target node IDs to arrays of 596 // paths found. 597 598 JS::Rooted<JSObject*> resultsMap(cx, JS::NewMapObject(cx)); 599 if (NS_WARN_IF(!resultsMap)) { 600 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 601 return; 602 } 603 604 for (auto iter = shortestPaths.targetIter(); !iter.done(); iter.next()) { 605 JS::Rooted<JS::Value> key(cx, JS::NumberValue(iter.get().identifier())); 606 JS::RootedVector<JS::Value> paths(cx); 607 608 bool ok = shortestPaths.forEachPath(iter.get(), [&](JS::ubi::Path& path) { 609 JS::RootedVector<JS::Value> pathValues(cx); 610 611 for (JS::ubi::BackEdge* edge : path) { 612 JS::Rooted<JSObject*> pathPart(cx, JS_NewPlainObject(cx)); 613 if (!pathPart) { 614 return false; 615 } 616 617 JS::Rooted<JS::Value> predecessor( 618 cx, NumberValue(edge->predecessor().identifier())); 619 if (!JS_DefineProperty(cx, pathPart, "predecessor", predecessor, 620 JSPROP_ENUMERATE)) { 621 return false; 622 } 623 624 JS::Rooted<JS::Value> edgeNameVal(cx, NullValue()); 625 if (edge->name()) { 626 JS::Rooted<JSString*> edgeName( 627 cx, JS_AtomizeUCString(cx, edge->name().get())); 628 if (!edgeName) { 629 return false; 630 } 631 edgeNameVal = StringValue(edgeName); 632 } 633 634 if (!JS_DefineProperty(cx, pathPart, "edge", edgeNameVal, 635 JSPROP_ENUMERATE)) { 636 return false; 637 } 638 639 if (!pathValues.append(ObjectValue(*pathPart))) { 640 return false; 641 } 642 } 643 644 JS::Rooted<JSObject*> pathObj(cx, JS::NewArrayObject(cx, pathValues)); 645 return pathObj && paths.append(ObjectValue(*pathObj)); 646 }); 647 648 if (NS_WARN_IF(!ok)) { 649 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 650 return; 651 } 652 653 JS::Rooted<JSObject*> pathsArray(cx, JS::NewArrayObject(cx, paths)); 654 if (NS_WARN_IF(!pathsArray)) { 655 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 656 return; 657 } 658 659 JS::Rooted<JS::Value> pathsVal(cx, ObjectValue(*pathsArray)); 660 if (NS_WARN_IF(!JS::MapSet(cx, resultsMap, key, pathsVal))) { 661 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 662 return; 663 } 664 } 665 666 results.set(resultsMap); 667 } 668 669 /*** Saving Heap Snapshots ****************************************************/ 670 671 // If we are only taking a snapshot of the heap affected by the given set of 672 // globals, find the set of compartments the globals are allocated 673 // within. Returns false on OOM failure. 674 static bool PopulateCompartmentsWithGlobals( 675 CompartmentSet& compartments, JS::HandleVector<JSObject*> globals) { 676 unsigned length = globals.length(); 677 for (unsigned i = 0; i < length; i++) { 678 if (!compartments.put(JS::GetCompartment(globals[i]))) return false; 679 } 680 681 return true; 682 } 683 684 // Add the given set of globals as explicit roots in the given roots 685 // list. Returns false on OOM failure. 686 static bool AddGlobalsAsRoots(JS::HandleVector<JSObject*> globals, 687 ubi::RootList& roots) { 688 unsigned length = globals.length(); 689 for (unsigned i = 0; i < length; i++) { 690 if (!roots.addRoot(ubi::Node(globals[i].get()), u"heap snapshot global")) { 691 return false; 692 } 693 } 694 return true; 695 } 696 697 // Choose roots and limits for a traversal, given `boundaries`. Set `roots` to 698 // the set of nodes within the boundaries that are referred to by nodes 699 // outside. If `boundaries` does not include all JS compartments, initialize 700 // `compartments` to the set of included compartments; otherwise, leave 701 // `compartments` uninitialized. (You can use compartments.initialized() to 702 // check.) 703 // 704 // If `boundaries` is incoherent, or we encounter an error while trying to 705 // handle it, or we run out of memory, set `rv` appropriately and return 706 // `false`. 707 // 708 // Return value is a pair of the status and an AutoCheckCannotGC token, 709 // forwarded from ubi::RootList::init(), to ensure that the caller does 710 // not GC while the RootList is live and initialized. 711 static std::pair<bool, AutoCheckCannotGC> EstablishBoundaries( 712 JSContext* cx, ErrorResult& rv, const HeapSnapshotBoundaries& boundaries, 713 ubi::RootList& roots, CompartmentSet& compartments) { 714 MOZ_ASSERT(!roots.initialized()); 715 MOZ_ASSERT(compartments.empty()); 716 717 bool foundBoundaryProperty = false; 718 719 if (boundaries.mRuntime.WasPassed()) { 720 foundBoundaryProperty = true; 721 722 if (!boundaries.mRuntime.Value()) { 723 rv.Throw(NS_ERROR_INVALID_ARG); 724 return {false, AutoCheckCannotGC(cx)}; 725 } 726 727 auto [ok, nogc] = roots.init(); 728 if (!ok) { 729 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 730 return {false, nogc}; 731 } 732 } 733 734 if (boundaries.mDebugger.WasPassed()) { 735 if (foundBoundaryProperty) { 736 rv.Throw(NS_ERROR_INVALID_ARG); 737 return {false, AutoCheckCannotGC(cx)}; 738 } 739 foundBoundaryProperty = true; 740 741 JSObject* dbgObj = boundaries.mDebugger.Value(); 742 if (!dbgObj || !dbg::IsDebugger(*dbgObj)) { 743 rv.Throw(NS_ERROR_INVALID_ARG); 744 return {false, AutoCheckCannotGC(cx)}; 745 } 746 747 JS::RootedVector<JSObject*> globals(cx); 748 if (!dbg::GetDebuggeeGlobals(cx, *dbgObj, &globals) || 749 !PopulateCompartmentsWithGlobals(compartments, globals) || 750 !roots.init(compartments).first || !AddGlobalsAsRoots(globals, roots)) { 751 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 752 return {false, AutoCheckCannotGC(cx)}; 753 } 754 } 755 756 if (boundaries.mGlobals.WasPassed()) { 757 if (foundBoundaryProperty) { 758 rv.Throw(NS_ERROR_INVALID_ARG); 759 return {false, AutoCheckCannotGC(cx)}; 760 } 761 foundBoundaryProperty = true; 762 763 uint32_t length = boundaries.mGlobals.Value().Length(); 764 if (length == 0) { 765 rv.Throw(NS_ERROR_INVALID_ARG); 766 return {false, AutoCheckCannotGC(cx)}; 767 } 768 769 JS::RootedVector<JSObject*> globals(cx); 770 for (uint32_t i = 0; i < length; i++) { 771 JSObject* global = boundaries.mGlobals.Value().ElementAt(i); 772 if (!JS_IsGlobalObject(global)) { 773 rv.Throw(NS_ERROR_INVALID_ARG); 774 return {false, AutoCheckCannotGC(cx)}; 775 } 776 if (!globals.append(global)) { 777 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 778 return {false, AutoCheckCannotGC(cx)}; 779 } 780 } 781 782 if (!PopulateCompartmentsWithGlobals(compartments, globals) || 783 !roots.init(compartments).first || !AddGlobalsAsRoots(globals, roots)) { 784 rv.Throw(NS_ERROR_OUT_OF_MEMORY); 785 return {false, AutoCheckCannotGC(cx)}; 786 } 787 } 788 AutoCheckCannotGC nogc(cx); 789 790 if (!foundBoundaryProperty) { 791 rv.Throw(NS_ERROR_INVALID_ARG); 792 return {false, nogc}; 793 } 794 795 MOZ_ASSERT(roots.initialized()); 796 return {true, nogc}; 797 } 798 799 // A variant covering all the various two-byte strings that we can get from the 800 // ubi::Node API. 801 class TwoByteString 802 : public Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName> { 803 using Base = Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName>; 804 805 struct CopyToBufferMatcher { 806 RangedPtr<char16_t> destination; 807 size_t maxLength; 808 809 CopyToBufferMatcher(RangedPtr<char16_t> destination, size_t maxLength) 810 : destination(destination), maxLength(maxLength) {} 811 812 size_t operator()(JS::ubi::EdgeName& ptr) { 813 return ptr ? operator()(ptr.get()) : 0; 814 } 815 816 size_t operator()(JSAtom* atom) { 817 MOZ_ASSERT(atom); 818 JS::ubi::AtomOrTwoByteChars s(atom); 819 return s.copyToBuffer(destination, maxLength); 820 } 821 822 size_t operator()(const char16_t* chars) { 823 MOZ_ASSERT(chars); 824 JS::ubi::AtomOrTwoByteChars s(chars); 825 return s.copyToBuffer(destination, maxLength); 826 } 827 }; 828 829 public: 830 template <typename T> 831 MOZ_IMPLICIT TwoByteString(T&& rhs) : Base(std::forward<T>(rhs)) {} 832 833 template <typename T> 834 TwoByteString& operator=(T&& rhs) { 835 MOZ_ASSERT(this != &rhs, "self-move disallowed"); 836 this->~TwoByteString(); 837 new (this) TwoByteString(std::forward<T>(rhs)); 838 return *this; 839 } 840 841 TwoByteString(const TwoByteString&) = delete; 842 TwoByteString& operator=(const TwoByteString&) = delete; 843 844 // Rewrap the inner value of a JS::ubi::AtomOrTwoByteChars as a TwoByteString. 845 static TwoByteString from(JS::ubi::AtomOrTwoByteChars&& s) { 846 return s.match([](auto* a) { return TwoByteString(a); }); 847 } 848 849 // Returns true if the given TwoByteString is non-null, false otherwise. 850 bool isNonNull() const { 851 return match([](auto& t) { return t != nullptr; }); 852 } 853 854 // Return the length of the string, 0 if it is null. 855 size_t length() const { 856 return match( 857 [](JSAtom* atom) -> size_t { 858 MOZ_ASSERT(atom); 859 JS::ubi::AtomOrTwoByteChars s(atom); 860 return s.length(); 861 }, 862 [](const char16_t* chars) -> size_t { 863 MOZ_ASSERT(chars); 864 return NS_strlen(chars); 865 }, 866 [](const JS::ubi::EdgeName& ptr) -> size_t { 867 MOZ_ASSERT(ptr); 868 return NS_strlen(ptr.get()); 869 }); 870 } 871 872 // Copy the contents of a TwoByteString into the provided buffer. The buffer 873 // is NOT null terminated. The number of characters written is returned. 874 size_t copyToBuffer(RangedPtr<char16_t> destination, size_t maxLength) { 875 CopyToBufferMatcher m(destination, maxLength); 876 return match(m); 877 } 878 879 struct HashPolicy; 880 }; 881 882 // A hashing policy for TwoByteString. 883 // 884 // Atoms are pointer hashed and use pointer equality, which means that we 885 // tolerate some duplication across atoms and the other two types of two-byte 886 // strings. In practice, we expect the amount of this duplication to be very low 887 // because each type is generally a different semantic thing in addition to 888 // having a slightly different representation. For example, the set of edge 889 // names and the set stack frames' source names naturally tend not to overlap 890 // very much if at all. 891 struct TwoByteString::HashPolicy { 892 using Lookup = TwoByteString; 893 894 static js::HashNumber hash(const Lookup& l) { 895 return l.match( 896 [](const JSAtom* atom) { 897 return js::DefaultHasher<const JSAtom*>::hash(atom); 898 }, 899 [](const char16_t* chars) { 900 MOZ_ASSERT(chars); 901 auto length = NS_strlen(chars); 902 return HashString(chars, length); 903 }, 904 [](const JS::ubi::EdgeName& ptr) { 905 const char16_t* chars = ptr.get(); 906 MOZ_ASSERT(chars); 907 auto length = NS_strlen(chars); 908 return HashString(chars, length); 909 }); 910 } 911 912 struct EqualityMatcher { 913 const TwoByteString& rhs; 914 explicit EqualityMatcher(const TwoByteString& rhs) : rhs(rhs) {} 915 916 bool operator()(const JSAtom* atom) { 917 return rhs.is<JSAtom*>() && rhs.as<JSAtom*>() == atom; 918 } 919 920 bool operator()(const char16_t* chars) { 921 MOZ_ASSERT(chars); 922 923 const char16_t* rhsChars = nullptr; 924 if (rhs.is<const char16_t*>()) 925 rhsChars = rhs.as<const char16_t*>(); 926 else if (rhs.is<JS::ubi::EdgeName>()) 927 rhsChars = rhs.as<JS::ubi::EdgeName>().get(); 928 else 929 return false; 930 MOZ_ASSERT(rhsChars); 931 932 auto length = NS_strlen(chars); 933 if (NS_strlen(rhsChars) != length) return false; 934 935 return memcmp(chars, rhsChars, length * sizeof(char16_t)) == 0; 936 } 937 938 bool operator()(const JS::ubi::EdgeName& ptr) { 939 MOZ_ASSERT(ptr); 940 return operator()(ptr.get()); 941 } 942 }; 943 944 static bool match(const TwoByteString& k, const Lookup& l) { 945 EqualityMatcher eq(l); 946 return k.match(eq); 947 } 948 949 static void rekey(TwoByteString& k, TwoByteString&& newKey) { 950 k = std::move(newKey); 951 } 952 }; 953 954 // Returns whether `edge` should be included in a heap snapshot of 955 // `compartments`. The optional `policy` out-param is set to INCLUDE_EDGES 956 // if we want to include the referent's edges, or EXCLUDE_EDGES if we don't 957 // want to include them. 958 static bool ShouldIncludeEdge(JS::CompartmentSet* compartments, 959 const ubi::Node& origin, const ubi::Edge& edge, 960 CoreDumpWriter::EdgePolicy* policy = nullptr) { 961 if (policy) { 962 *policy = CoreDumpWriter::INCLUDE_EDGES; 963 } 964 965 if (!compartments) { 966 // We aren't targeting a particular set of compartments, so serialize all 967 // the things! 968 return true; 969 } 970 971 // We are targeting a particular set of compartments. If this node is in our 972 // target set, serialize it and all of its edges. If this node is _not_ in our 973 // target set, we also serialize under the assumption that it is a shared 974 // resource being used by something in our target compartments since we 975 // reached it by traversing the heap graph. However, we do not serialize its 976 // outgoing edges and we abandon further traversal from this node. 977 // 978 // If the node does not belong to any compartment, we also serialize its 979 // outgoing edges. This case is relevant for Shapes: they don't belong to a 980 // specific compartment and contain edges to parent/kids Shapes we want to 981 // include. Note that these Shapes may contain pointers into our target 982 // compartment (the Shape's getter/setter JSObjects). However, we do not 983 // serialize nodes in other compartments that are reachable from these 984 // non-compartment nodes. 985 986 JS::Compartment* compartment = edge.referent.compartment(); 987 988 if (!compartment || compartments->has(compartment)) { 989 return true; 990 } 991 992 if (policy) { 993 *policy = CoreDumpWriter::EXCLUDE_EDGES; 994 } 995 996 return !!origin.compartment(); 997 } 998 999 // A `CoreDumpWriter` that serializes nodes to protobufs and writes them to the 1000 // given `ZeroCopyOutputStream`. 1001 class MOZ_STACK_CLASS StreamWriter : public CoreDumpWriter { 1002 using FrameSet = js::HashSet<uint64_t>; 1003 using TwoByteStringMap = 1004 js::HashMap<TwoByteString, uint64_t, TwoByteString::HashPolicy>; 1005 using OneByteStringMap = js::HashMap<const char*, uint64_t>; 1006 1007 JSContext* cx; 1008 bool wantNames; 1009 // The set of |JS::ubi::StackFrame::identifier()|s that have already been 1010 // serialized and written to the core dump. 1011 FrameSet framesAlreadySerialized; 1012 // The set of two-byte strings that have already been serialized and written 1013 // to the core dump. 1014 TwoByteStringMap twoByteStringsAlreadySerialized; 1015 // The set of one-byte strings that have already been serialized and written 1016 // to the core dump. 1017 OneByteStringMap oneByteStringsAlreadySerialized; 1018 1019 ::google::protobuf::io::ZeroCopyOutputStream& stream; 1020 1021 JS::CompartmentSet* compartments; 1022 1023 bool writeMessage(const ::google::protobuf::MessageLite& message) { 1024 // We have to create a new CodedOutputStream when writing each message so 1025 // that the 64MB size limit used by Coded{Output,Input}Stream to prevent 1026 // integer overflow is enforced per message rather than on the whole stream. 1027 ::google::protobuf::io::CodedOutputStream codedStream(&stream); 1028 codedStream.WriteVarint32(message.ByteSizeLong()); 1029 message.SerializeWithCachedSizes(&codedStream); 1030 return !codedStream.HadError(); 1031 } 1032 1033 // Attach the full two-byte string or a reference to a two-byte string that 1034 // has already been serialized to a protobuf message. 1035 template <typename SetStringFunction, typename SetRefFunction> 1036 bool attachTwoByteString(TwoByteString& string, SetStringFunction setString, 1037 SetRefFunction setRef) { 1038 auto ptr = twoByteStringsAlreadySerialized.lookupForAdd(string); 1039 if (ptr) { 1040 setRef(ptr->value()); 1041 return true; 1042 } 1043 1044 auto length = string.length(); 1045 auto stringData = MakeUnique<std::string>(length * sizeof(char16_t), '\0'); 1046 if (!stringData) return false; 1047 1048 auto buf = const_cast<char16_t*>( 1049 reinterpret_cast<const char16_t*>(stringData->data())); 1050 string.copyToBuffer(RangedPtr<char16_t>(buf, length), length); 1051 1052 uint64_t ref = twoByteStringsAlreadySerialized.count(); 1053 if (!twoByteStringsAlreadySerialized.add(ptr, std::move(string), ref)) 1054 return false; 1055 1056 setString(stringData.release()); 1057 return true; 1058 } 1059 1060 // Attach the full one-byte string or a reference to a one-byte string that 1061 // has already been serialized to a protobuf message. 1062 template <typename SetStringFunction, typename SetRefFunction> 1063 bool attachOneByteString(const char* string, SetStringFunction setString, 1064 SetRefFunction setRef) { 1065 auto ptr = oneByteStringsAlreadySerialized.lookupForAdd(string); 1066 if (ptr) { 1067 setRef(ptr->value()); 1068 return true; 1069 } 1070 1071 auto length = strlen(string); 1072 auto stringData = MakeUnique<std::string>(string, length); 1073 if (!stringData) return false; 1074 1075 uint64_t ref = oneByteStringsAlreadySerialized.count(); 1076 if (!oneByteStringsAlreadySerialized.add(ptr, string, ref)) return false; 1077 1078 setString(stringData.release()); 1079 return true; 1080 } 1081 1082 protobuf::StackFrame* getProtobufStackFrame(JS::ubi::StackFrame& frame, 1083 size_t depth = 1) { 1084 // NB: de-duplicated string properties must be written in the same order 1085 // here as they are read in `HeapSnapshot::saveStackFrame` or else indices 1086 // in references to already serialized strings will be off. 1087 1088 MOZ_ASSERT(frame, 1089 "null frames should be represented as the lack of a serialized " 1090 "stack frame"); 1091 1092 auto id = frame.identifier(); 1093 auto protobufStackFrame = MakeUnique<protobuf::StackFrame>(); 1094 if (!protobufStackFrame) return nullptr; 1095 1096 if (framesAlreadySerialized.has(id)) { 1097 protobufStackFrame->set_ref(id); 1098 return protobufStackFrame.release(); 1099 } 1100 1101 auto data = MakeUnique<protobuf::StackFrame_Data>(); 1102 if (!data) return nullptr; 1103 1104 data->set_id(id); 1105 data->set_line(frame.line()); 1106 data->set_column(frame.column().oneOriginValue()); 1107 data->set_issystem(frame.isSystem()); 1108 data->set_isselfhosted(frame.isSelfHosted(cx)); 1109 1110 auto dupeSource = TwoByteString::from(frame.source()); 1111 if (!attachTwoByteString( 1112 dupeSource, 1113 [&](std::string* source) { data->set_allocated_source(source); }, 1114 [&](uint64_t ref) { data->set_sourceref(ref); })) { 1115 return nullptr; 1116 } 1117 1118 auto dupeName = TwoByteString::from(frame.functionDisplayName()); 1119 if (dupeName.isNonNull()) { 1120 if (!attachTwoByteString( 1121 dupeName, 1122 [&](std::string* name) { 1123 data->set_allocated_functiondisplayname(name); 1124 }, 1125 [&](uint64_t ref) { data->set_functiondisplaynameref(ref); })) { 1126 return nullptr; 1127 } 1128 } 1129 1130 auto parent = frame.parent(); 1131 if (parent && depth < HeapSnapshot::MAX_STACK_DEPTH) { 1132 auto protobufParent = getProtobufStackFrame(parent, depth + 1); 1133 if (!protobufParent) return nullptr; 1134 data->set_allocated_parent(protobufParent); 1135 } 1136 1137 protobufStackFrame->set_allocated_data(data.release()); 1138 1139 if (!framesAlreadySerialized.put(id)) return nullptr; 1140 1141 return protobufStackFrame.release(); 1142 } 1143 1144 public: 1145 StreamWriter(JSContext* cx, 1146 ::google::protobuf::io::ZeroCopyOutputStream& stream, 1147 bool wantNames, JS::CompartmentSet* compartments) 1148 : cx(cx), 1149 wantNames(wantNames), 1150 framesAlreadySerialized(cx), 1151 twoByteStringsAlreadySerialized(cx), 1152 oneByteStringsAlreadySerialized(cx), 1153 stream(stream), 1154 compartments(compartments) {} 1155 1156 ~StreamWriter() override {} 1157 1158 bool writeMetadata(uint64_t timestamp) final { 1159 protobuf::Metadata metadata; 1160 metadata.set_timestamp(timestamp); 1161 return writeMessage(metadata); 1162 } 1163 1164 bool writeNode(const JS::ubi::Node& ubiNode, EdgePolicy includeEdges) final { 1165 // NB: de-duplicated string properties must be written in the same order 1166 // here as they are read in `HeapSnapshot::saveNode` or else indices in 1167 // references to already serialized strings will be off. 1168 1169 protobuf::Node protobufNode; 1170 protobufNode.set_id(ubiNode.identifier()); 1171 1172 protobufNode.set_coarsetype( 1173 JS::ubi::CoarseTypeToUint32(ubiNode.coarseType())); 1174 1175 auto typeName = TwoByteString(ubiNode.typeName()); 1176 if (NS_WARN_IF(!attachTwoByteString( 1177 typeName, 1178 [&](std::string* name) { 1179 protobufNode.set_allocated_typename_(name); 1180 }, 1181 [&](uint64_t ref) { protobufNode.set_typenameref(ref); }))) { 1182 return false; 1183 } 1184 1185 mozilla::MallocSizeOf mallocSizeOf = dbg::GetDebuggerMallocSizeOf(cx); 1186 MOZ_ASSERT(mallocSizeOf); 1187 protobufNode.set_size(ubiNode.size(mallocSizeOf)); 1188 1189 if (includeEdges) { 1190 auto edges = ubiNode.edges(cx, wantNames); 1191 if (NS_WARN_IF(!edges)) return false; 1192 1193 for (; !edges->empty(); edges->popFront()) { 1194 ubi::Edge& ubiEdge = edges->front(); 1195 if (!ShouldIncludeEdge(compartments, ubiNode, ubiEdge)) { 1196 continue; 1197 } 1198 1199 protobuf::Edge* protobufEdge = protobufNode.add_edges(); 1200 if (NS_WARN_IF(!protobufEdge)) { 1201 return false; 1202 } 1203 1204 protobufEdge->set_referent(ubiEdge.referent.identifier()); 1205 1206 if (wantNames && ubiEdge.name) { 1207 TwoByteString edgeName(std::move(ubiEdge.name)); 1208 if (NS_WARN_IF(!attachTwoByteString( 1209 edgeName, 1210 [&](std::string* name) { 1211 protobufEdge->set_allocated_name(name); 1212 }, 1213 [&](uint64_t ref) { protobufEdge->set_nameref(ref); }))) { 1214 return false; 1215 } 1216 } 1217 } 1218 } 1219 1220 if (ubiNode.hasAllocationStack()) { 1221 auto ubiStackFrame = ubiNode.allocationStack(); 1222 auto protoStackFrame = getProtobufStackFrame(ubiStackFrame); 1223 if (NS_WARN_IF(!protoStackFrame)) return false; 1224 protobufNode.set_allocated_allocationstack(protoStackFrame); 1225 } 1226 1227 if (auto className = ubiNode.jsObjectClassName()) { 1228 if (NS_WARN_IF(!attachOneByteString( 1229 className, 1230 [&](std::string* name) { 1231 protobufNode.set_allocated_jsobjectclassname(name); 1232 }, 1233 [&](uint64_t ref) { 1234 protobufNode.set_jsobjectclassnameref(ref); 1235 }))) { 1236 return false; 1237 } 1238 } 1239 1240 if (auto scriptFilename = ubiNode.scriptFilename()) { 1241 if (NS_WARN_IF(!attachOneByteString( 1242 scriptFilename, 1243 [&](std::string* name) { 1244 protobufNode.set_allocated_scriptfilename(name); 1245 }, 1246 [&](uint64_t ref) { 1247 protobufNode.set_scriptfilenameref(ref); 1248 }))) { 1249 return false; 1250 } 1251 } 1252 1253 if (ubiNode.descriptiveTypeName()) { 1254 auto descriptiveTypeName = TwoByteString(ubiNode.descriptiveTypeName()); 1255 if (NS_WARN_IF(!attachTwoByteString( 1256 descriptiveTypeName, 1257 [&](std::string* name) { 1258 protobufNode.set_allocated_descriptivetypename(name); 1259 }, 1260 [&](uint64_t ref) { 1261 protobufNode.set_descriptivetypenameref(ref); 1262 }))) { 1263 return false; 1264 } 1265 } 1266 1267 return writeMessage(protobufNode); 1268 } 1269 }; 1270 1271 // A JS::ubi::BreadthFirst handler that serializes a snapshot of the heap into a 1272 // core dump. 1273 class MOZ_STACK_CLASS HeapSnapshotHandler { 1274 CoreDumpWriter& writer; 1275 JS::CompartmentSet* compartments; 1276 1277 public: 1278 // For telemetry. 1279 uint32_t nodeCount; 1280 uint32_t edgeCount; 1281 1282 HeapSnapshotHandler(CoreDumpWriter& writer, JS::CompartmentSet* compartments) 1283 : writer(writer), 1284 compartments(compartments), 1285 nodeCount(0), 1286 edgeCount(0) {} 1287 1288 // JS::ubi::BreadthFirst handler interface. 1289 1290 class NodeData {}; 1291 typedef JS::ubi::BreadthFirst<HeapSnapshotHandler> Traversal; 1292 bool operator()(Traversal& traversal, JS::ubi::Node origin, 1293 const JS::ubi::Edge& edge, NodeData*, bool first) { 1294 edgeCount++; 1295 1296 // We're only interested in the first time we reach edge.referent, not in 1297 // every edge arriving at that node. "But, don't we want to serialize every 1298 // edge in the heap graph?" you ask. Don't worry! This edge is still 1299 // serialized into the core dump. Serializing a node also serializes each of 1300 // its edges, and if we are traversing a given edge, we must have already 1301 // visited and serialized the origin node and its edges. 1302 if (!first) return true; 1303 1304 CoreDumpWriter::EdgePolicy policy; 1305 if (!ShouldIncludeEdge(compartments, origin, edge, &policy)) { 1306 // Because ShouldIncludeEdge considers the |origin| node as well, we don't 1307 // want to consider this node 'visited' until we write it to the core 1308 // dump. 1309 traversal.doNotMarkReferentAsVisited(); 1310 return true; 1311 } 1312 1313 nodeCount++; 1314 1315 if (policy == CoreDumpWriter::EXCLUDE_EDGES) traversal.abandonReferent(); 1316 1317 return writer.writeNode(edge.referent, policy); 1318 } 1319 }; 1320 1321 bool WriteHeapGraph(JSContext* cx, const JS::ubi::Node& node, 1322 CoreDumpWriter& writer, bool wantNames, 1323 JS::CompartmentSet* compartments, 1324 JS::AutoCheckCannotGC& noGC, uint32_t& outNodeCount, 1325 uint32_t& outEdgeCount) { 1326 // Serialize the starting node to the core dump. 1327 1328 if (NS_WARN_IF(!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES))) { 1329 return false; 1330 } 1331 1332 // Walk the heap graph starting from the given node and serialize it into the 1333 // core dump. 1334 1335 HeapSnapshotHandler handler(writer, compartments); 1336 HeapSnapshotHandler::Traversal traversal(cx, handler, noGC); 1337 traversal.wantNames = wantNames; 1338 1339 bool ok = traversal.addStartVisited(node) && traversal.traverse(); 1340 1341 if (ok) { 1342 outNodeCount = handler.nodeCount; 1343 outEdgeCount = handler.edgeCount; 1344 } 1345 1346 return ok; 1347 } 1348 1349 static unsigned long msSinceProcessCreation(const TimeStamp& now) { 1350 auto duration = now - TimeStamp::ProcessCreation(); 1351 return (unsigned long)duration.ToMilliseconds(); 1352 } 1353 1354 /* static */ 1355 already_AddRefed<nsIFile> HeapSnapshot::CreateUniqueCoreDumpFile( 1356 ErrorResult& rv, const TimeStamp& now, nsAString& outFilePath, 1357 nsAString& outSnapshotId) { 1358 MOZ_RELEASE_ASSERT(XRE_IsParentProcess()); 1359 nsCOMPtr<nsIFile> file; 1360 rv = GetSpecialSystemDirectory(OS_TemporaryDirectory, getter_AddRefs(file)); 1361 if (NS_WARN_IF(rv.Failed())) return nullptr; 1362 1363 nsAutoString tempPath; 1364 rv = file->GetPath(tempPath); 1365 if (NS_WARN_IF(rv.Failed())) return nullptr; 1366 1367 auto ms = msSinceProcessCreation(now); 1368 rv = file->AppendNative(nsPrintfCString("%lu.fxsnapshot", ms)); 1369 if (NS_WARN_IF(rv.Failed())) return nullptr; 1370 1371 rv = file->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0666); 1372 if (NS_WARN_IF(rv.Failed())) return nullptr; 1373 1374 rv = file->GetPath(outFilePath); 1375 if (NS_WARN_IF(rv.Failed())) return nullptr; 1376 1377 // The snapshot ID must be computed in the process that created the 1378 // temp file, because TmpD may not be the same in all processes. 1379 outSnapshotId.Assign(Substring( 1380 outFilePath, tempPath.Length() + 1, 1381 outFilePath.Length() - tempPath.Length() - sizeof(".fxsnapshot"))); 1382 1383 return file.forget(); 1384 } 1385 1386 // Deletion policy for cleaning up PHeapSnapshotTempFileHelperChild pointers. 1387 class DeleteHeapSnapshotTempFileHelperChild { 1388 public: 1389 constexpr DeleteHeapSnapshotTempFileHelperChild() {} 1390 1391 void operator()(PHeapSnapshotTempFileHelperChild* ptr) const { 1392 (void)NS_WARN_IF(!HeapSnapshotTempFileHelperChild::Send__delete__(ptr)); 1393 } 1394 }; 1395 1396 // A UniquePtr alias to automatically manage PHeapSnapshotTempFileHelperChild 1397 // pointers. 1398 using UniqueHeapSnapshotTempFileHelperChild = 1399 UniquePtr<PHeapSnapshotTempFileHelperChild, 1400 DeleteHeapSnapshotTempFileHelperChild>; 1401 1402 // Get an nsIOutputStream that we can write the heap snapshot to. In non-e10s 1403 // and in the e10s parent process, open a file directly and create an output 1404 // stream for it. In e10s child processes, we are sandboxed without access to 1405 // the filesystem. Use IPDL to request a file descriptor from the parent 1406 // process. 1407 static already_AddRefed<nsIOutputStream> getCoreDumpOutputStream( 1408 ErrorResult& rv, TimeStamp& start, nsAString& outFilePath, 1409 nsAString& outSnapshotId) { 1410 if (XRE_IsParentProcess()) { 1411 // Create the file and open the output stream directly. 1412 1413 nsCOMPtr<nsIFile> file = HeapSnapshot::CreateUniqueCoreDumpFile( 1414 rv, start, outFilePath, outSnapshotId); 1415 if (NS_WARN_IF(rv.Failed())) return nullptr; 1416 1417 nsCOMPtr<nsIOutputStream> outputStream; 1418 rv = NS_NewLocalFileOutputStream(getter_AddRefs(outputStream), file, 1419 PR_WRONLY, -1, 0); 1420 if (NS_WARN_IF(rv.Failed())) return nullptr; 1421 1422 return outputStream.forget(); 1423 } 1424 // Request a file descriptor from the parent process over IPDL. 1425 1426 auto cc = ContentChild::GetSingleton(); 1427 if (!cc) { 1428 rv.Throw(NS_ERROR_UNEXPECTED); 1429 return nullptr; 1430 } 1431 1432 UniqueHeapSnapshotTempFileHelperChild helper( 1433 cc->SendPHeapSnapshotTempFileHelperConstructor()); 1434 if (NS_WARN_IF(!helper)) { 1435 rv.Throw(NS_ERROR_UNEXPECTED); 1436 return nullptr; 1437 } 1438 1439 OpenHeapSnapshotTempFileResponse response; 1440 if (!helper->SendOpenHeapSnapshotTempFile(&response)) { 1441 rv.Throw(NS_ERROR_UNEXPECTED); 1442 return nullptr; 1443 } 1444 if (response.type() == OpenHeapSnapshotTempFileResponse::Tnsresult) { 1445 rv.Throw(response.get_nsresult()); 1446 return nullptr; 1447 } 1448 1449 auto opened = response.get_OpenedFile(); 1450 outFilePath = opened.path(); 1451 outSnapshotId = opened.snapshotId(); 1452 nsCOMPtr<nsIOutputStream> outputStream = 1453 FileDescriptorOutputStream::Create(opened.descriptor()); 1454 if (NS_WARN_IF(!outputStream)) { 1455 rv.Throw(NS_ERROR_UNEXPECTED); 1456 return nullptr; 1457 } 1458 1459 return outputStream.forget(); 1460 } 1461 1462 } // namespace devtools 1463 1464 namespace dom { 1465 1466 using namespace JS; 1467 using namespace devtools; 1468 1469 /* static */ 1470 void ChromeUtils::SaveHeapSnapshotShared( 1471 GlobalObject& global, const HeapSnapshotBoundaries& boundaries, 1472 nsAString& outFilePath, nsAString& outSnapshotId, ErrorResult& rv) { 1473 auto start = TimeStamp::Now(); 1474 1475 bool wantNames = true; 1476 CompartmentSet compartments; 1477 uint32_t nodeCount = 0; 1478 uint32_t edgeCount = 0; 1479 1480 nsCOMPtr<nsIOutputStream> outputStream = 1481 getCoreDumpOutputStream(rv, start, outFilePath, outSnapshotId); 1482 if (NS_WARN_IF(rv.Failed())) return; 1483 1484 ZeroCopyNSIOutputStream zeroCopyStream(outputStream); 1485 ::google::protobuf::io::GzipOutputStream gzipStream(&zeroCopyStream); 1486 1487 JSContext* cx = global.Context(); 1488 1489 { 1490 ubi::RootList rootList(cx, wantNames); 1491 auto [ok, nogc] = 1492 EstablishBoundaries(cx, rv, boundaries, rootList, compartments); 1493 if (!ok) { 1494 return; 1495 } 1496 1497 StreamWriter writer(cx, gzipStream, wantNames, 1498 !compartments.empty() ? &compartments : nullptr); 1499 1500 ubi::Node roots(&rootList); 1501 1502 // Serialize the initial heap snapshot metadata to the core dump. 1503 if (!writer.writeMetadata(PR_Now()) || 1504 // Serialize the heap graph to the core dump, starting from our list of 1505 // roots. 1506 !WriteHeapGraph(cx, roots, writer, wantNames, 1507 !compartments.empty() ? &compartments : nullptr, nogc, 1508 nodeCount, edgeCount)) { 1509 rv.Throw(zeroCopyStream.failed() ? zeroCopyStream.result() 1510 : NS_ERROR_UNEXPECTED); 1511 return; 1512 } 1513 } 1514 1515 glean::devtools::save_heap_snapshot.AccumulateRawDuration(TimeStamp::Now() - 1516 start); 1517 glean::devtools::heap_snapshot_node_count.AccumulateSingleSample(nodeCount); 1518 glean::devtools::heap_snapshot_edge_count.AccumulateSingleSample(edgeCount); 1519 } 1520 1521 /* static */ 1522 uint64_t ChromeUtils::GetObjectNodeId(GlobalObject& global, 1523 JS::Handle<JSObject*> val) { 1524 JS::Rooted<JSObject*> obj(global.Context(), val); 1525 1526 JS::ubi::Node node(obj); 1527 return node.identifier(); 1528 } 1529 1530 /* static */ 1531 void ChromeUtils::SaveHeapSnapshot(GlobalObject& global, 1532 const HeapSnapshotBoundaries& boundaries, 1533 nsAString& outFilePath, ErrorResult& rv) { 1534 nsAutoString snapshotId; 1535 SaveHeapSnapshotShared(global, boundaries, outFilePath, snapshotId, rv); 1536 } 1537 1538 /* static */ 1539 void ChromeUtils::SaveHeapSnapshotGetId( 1540 GlobalObject& global, const HeapSnapshotBoundaries& boundaries, 1541 nsAString& outSnapshotId, ErrorResult& rv) { 1542 nsAutoString filePath; 1543 SaveHeapSnapshotShared(global, boundaries, filePath, outSnapshotId, rv); 1544 } 1545 1546 /* static */ 1547 already_AddRefed<HeapSnapshot> ChromeUtils::ReadHeapSnapshot( 1548 GlobalObject& global, const nsAString& filePath, ErrorResult& rv) { 1549 auto start = TimeStamp::Now(); 1550 1551 nsCOMPtr<nsIFile> snapshotFile; 1552 rv = NS_NewLocalFile(filePath, getter_AddRefs(snapshotFile)); 1553 if (rv.Failed()) { 1554 return nullptr; 1555 } 1556 1557 AutoMemMap mm; 1558 rv = mm.init(snapshotFile); 1559 if (rv.Failed()) return nullptr; 1560 1561 RefPtr<HeapSnapshot> snapshot = HeapSnapshot::Create( 1562 global.Context(), global, reinterpret_cast<const uint8_t*>(mm.address()), 1563 mm.size(), rv); 1564 1565 if (!rv.Failed()) 1566 glean::devtools::read_heap_snapshot.AccumulateRawDuration(TimeStamp::Now() - 1567 start); 1568 1569 return snapshot.forget(); 1570 } 1571 1572 } // namespace dom 1573 } // namespace mozilla