StartupCache.cpp (31110B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "prio.h" 8 #include "PLDHashTable.h" 9 #include "mozilla/IOInterposer.h" 10 #include "mozilla/AutoMemMap.h" 11 #include "mozilla/IOBuffers.h" 12 #include "mozilla/MemoryReporting.h" 13 #include "mozilla/MemUtils.h" 14 #include "mozilla/MmapFaultHandler.h" 15 #include "mozilla/ResultExtensions.h" 16 #include "mozilla/scache/StartupCache.h" 17 #include "mozilla/ScopeExit.h" 18 #include "mozilla/Try.h" 19 20 #include "nsClassHashtable.h" 21 #include "nsComponentManagerUtils.h" 22 #include "nsCRT.h" 23 #include "nsDirectoryServiceUtils.h" 24 #include "nsIClassInfo.h" 25 #include "nsIFile.h" 26 #include "nsIObserver.h" 27 #include "nsIOutputStream.h" 28 #include "nsISupports.h" 29 #include "nsITimer.h" 30 #include "mozilla/Omnijar.h" 31 #include "prenv.h" 32 #include "mozilla/glean/StartupcacheMetrics.h" 33 #include "nsThreadUtils.h" 34 #include "nsXULAppAPI.h" 35 #include "nsIProtocolHandler.h" 36 #include "GeckoProfiler.h" 37 #include "nsAppRunner.h" 38 #include "xpcpublic.h" 39 #ifdef MOZ_BACKGROUNDTASKS 40 # include "mozilla/BackgroundTasks.h" 41 #endif 42 43 #if defined(XP_WIN) 44 # include <windows.h> 45 #endif 46 47 #ifdef IS_BIG_ENDIAN 48 # define SC_ENDIAN "big" 49 #else 50 # define SC_ENDIAN "little" 51 #endif 52 53 #if PR_BYTES_PER_WORD == 4 54 # define SC_WORDSIZE "4" 55 #else 56 # define SC_WORDSIZE "8" 57 #endif 58 59 using namespace mozilla::Compression; 60 61 namespace mozilla { 62 namespace scache { 63 64 MOZ_DEFINE_MALLOC_SIZE_OF(StartupCacheMallocSizeOf) 65 66 NS_IMETHODIMP 67 StartupCache::CollectReports(nsIHandleReportCallback* aHandleReport, 68 nsISupports* aData, bool aAnonymize) { 69 MutexAutoLock lock(mTableLock); 70 MOZ_COLLECT_REPORT( 71 "explicit/startup-cache/mapping", KIND_NONHEAP, UNITS_BYTES, 72 mCacheData.nonHeapSizeOfExcludingThis(), 73 "Memory used to hold the mapping of the startup cache from file. " 74 "This memory is likely to be swapped out shortly after start-up."); 75 76 MOZ_COLLECT_REPORT("explicit/startup-cache/data", KIND_HEAP, UNITS_BYTES, 77 HeapSizeOfIncludingThis(StartupCacheMallocSizeOf), 78 "Memory used by the startup cache for things other than " 79 "the file mapping."); 80 81 return NS_OK; 82 } 83 84 static const uint8_t MAGIC[] = "startupcache0002"; 85 // This is a heuristic value for how much to reserve for mTable to avoid 86 // rehashing. This is not a hard limit in release builds, but it is in 87 // debug builds as it should be stable. If we exceed this number we should 88 // just increase it. 89 static const size_t STARTUP_CACHE_RESERVE_CAPACITY = 450; 90 91 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED 92 // This is a hard limit which we will assert on, to ensure that we don't 93 // have some bug causing runaway cache growth. 94 static const size_t STARTUP_CACHE_MAX_CAPACITY = 5000; 95 #endif 96 97 // Not const because we change it for gtests. 98 static uint8_t STARTUP_CACHE_WRITE_TIMEOUT = 60; 99 100 #define STARTUP_CACHE_NAME "startupCache." SC_WORDSIZE "." SC_ENDIAN 101 102 static inline Result<Ok, nsresult> Write(PRFileDesc* fd, const void* data, 103 int32_t len) { 104 if (PR_Write(fd, data, len) != len) { 105 return Err(NS_ERROR_FAILURE); 106 } 107 return Ok(); 108 } 109 110 static inline Result<Ok, nsresult> Seek(PRFileDesc* fd, int32_t offset) { 111 if (PR_Seek(fd, offset, PR_SEEK_SET) == -1) { 112 return Err(NS_ERROR_FAILURE); 113 } 114 return Ok(); 115 } 116 117 static nsresult MapLZ4ErrorToNsresult(size_t aError) { 118 return NS_ERROR_FAILURE; 119 } 120 121 StartupCache* StartupCache::GetSingletonNoInit() { 122 return StartupCache::gStartupCache; 123 } 124 125 StartupCache* StartupCache::GetSingleton() { 126 #ifdef MOZ_BACKGROUNDTASKS 127 if (BackgroundTasks::IsBackgroundTaskMode()) { 128 return nullptr; 129 } 130 #endif 131 132 if (!gStartupCache) { 133 if (!XRE_IsParentProcess()) { 134 return nullptr; 135 } 136 #ifdef MOZ_DISABLE_STARTUPCACHE 137 return nullptr; 138 #else 139 StartupCache::InitSingleton(); 140 #endif 141 } 142 143 return StartupCache::gStartupCache; 144 } 145 146 void StartupCache::DeleteSingleton() { StartupCache::gStartupCache = nullptr; } 147 148 nsresult StartupCache::InitSingleton() { 149 nsresult rv; 150 StartupCache::gStartupCache = new StartupCache(); 151 152 rv = StartupCache::gStartupCache->Init(); 153 if (NS_FAILED(rv)) { 154 StartupCache::gStartupCache = nullptr; 155 } 156 return rv; 157 } 158 159 StaticRefPtr<StartupCache> StartupCache::gStartupCache; 160 bool StartupCache::gShutdownInitiated; 161 bool StartupCache::gIgnoreDiskCache; 162 bool StartupCache::gFoundDiskCacheOnInit; 163 164 NS_IMPL_ISUPPORTS(StartupCache, nsIMemoryReporter) 165 166 StartupCache::StartupCache() 167 : mTableLock("StartupCache::mTableLock"), 168 mDirty(false), 169 mWrittenOnce(false), 170 mCurTableReferenced(false), 171 mRequestedCount(0), 172 mCacheEntriesBaseOffset(0) {} 173 174 StartupCache::~StartupCache() { UnregisterWeakMemoryReporter(this); } 175 176 nsresult StartupCache::Init() { 177 // workaround for bug 653936 178 nsCOMPtr<nsIProtocolHandler> jarInitializer( 179 do_GetService(NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX "jar")); 180 181 nsresult rv; 182 183 if (mozilla::RunningGTest()) { 184 STARTUP_CACHE_WRITE_TIMEOUT = 3; 185 } 186 187 // This allows to override the startup cache filename 188 // which is useful from xpcshell, when there is no ProfLDS directory to keep 189 // cache in. 190 char* env = PR_GetEnv("MOZ_STARTUP_CACHE"); 191 if (env && *env) { 192 MOZ_TRY( 193 NS_NewNativeLocalFile(nsDependentCString(env), getter_AddRefs(mFile))); 194 } else { 195 nsCOMPtr<nsIFile> file; 196 rv = NS_GetSpecialDirectory("ProfLDS", getter_AddRefs(file)); 197 if (NS_FAILED(rv)) { 198 // return silently, this will fail in mochitests's xpcshell process. 199 return rv; 200 } 201 202 rv = file->AppendNative("startupCache"_ns); 203 NS_ENSURE_SUCCESS(rv, rv); 204 205 // Try to create the directory if it's not there yet 206 rv = file->Create(nsIFile::DIRECTORY_TYPE, 0777); 207 if (NS_FAILED(rv) && rv != NS_ERROR_FILE_ALREADY_EXISTS) return rv; 208 209 rv = file->AppendNative(nsLiteralCString(STARTUP_CACHE_NAME)); 210 NS_ENSURE_SUCCESS(rv, rv); 211 212 mFile = file.forget(); 213 } 214 215 mObserverService = do_GetService("@mozilla.org/observer-service;1"); 216 217 if (!mObserverService) { 218 NS_WARNING("Could not get observerService."); 219 return NS_ERROR_UNEXPECTED; 220 } 221 222 mListener = new StartupCacheListener(); 223 rv = mObserverService->AddObserver(mListener, NS_XPCOM_SHUTDOWN_OBSERVER_ID, 224 false); 225 NS_ENSURE_SUCCESS(rv, rv); 226 rv = mObserverService->AddObserver(mListener, "startupcache-invalidate", 227 false); 228 NS_ENSURE_SUCCESS(rv, rv); 229 rv = mObserverService->AddObserver(mListener, "intl:app-locales-changed", 230 false); 231 NS_ENSURE_SUCCESS(rv, rv); 232 233 { 234 MutexAutoLock lock(mTableLock); 235 auto result = LoadArchive(); 236 rv = result.isErr() ? result.unwrapErr() : NS_OK; 237 } 238 239 gFoundDiskCacheOnInit = rv != NS_ERROR_FILE_NOT_FOUND; 240 241 // Sometimes we don't have a cache yet, that's ok. 242 // If it's corrupted, just remove it and start over. 243 if (gIgnoreDiskCache || (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND)) { 244 NS_WARNING("Failed to load startupcache file correctly, removing!"); 245 InvalidateCache(); 246 } 247 248 RegisterWeakMemoryReporter(this); 249 mDecompressionContext = MakeUnique<LZ4FrameDecompressionContext>(true); 250 251 return NS_OK; 252 } 253 254 void StartupCache::StartPrefetchMemory() { 255 { 256 MonitorAutoLock lock(mPrefetchComplete); 257 mPrefetchInProgress = true; 258 } 259 NS_DispatchBackgroundTask(NewRunnableMethod<uint8_t*, size_t>( 260 "StartupCache::ThreadedPrefetch", this, &StartupCache::ThreadedPrefetch, 261 mCacheData.get<uint8_t>().get(), mCacheData.size())); 262 } 263 264 /** 265 * LoadArchive can only be called from the main thread. 266 */ 267 Result<Ok, nsresult> StartupCache::LoadArchive() { 268 MOZ_ASSERT(NS_IsMainThread(), "Can only load startup cache on main thread"); 269 if (gIgnoreDiskCache) return Err(NS_ERROR_FAILURE); 270 271 MOZ_TRY(mCacheData.init(mFile)); 272 auto size = mCacheData.size(); 273 if (CanPrefetchMemory()) { 274 StartPrefetchMemory(); 275 } 276 277 uint32_t headerSize; 278 if (size < sizeof(MAGIC) + sizeof(headerSize)) { 279 return Err(NS_ERROR_UNEXPECTED); 280 } 281 282 auto data = mCacheData.get<uint8_t>(); 283 auto end = data + size; 284 285 MMAP_FAULT_HANDLER_BEGIN_BUFFER(data.get(), size) 286 287 if (memcmp(MAGIC, data.get(), sizeof(MAGIC))) { 288 return Err(NS_ERROR_UNEXPECTED); 289 } 290 data += sizeof(MAGIC); 291 292 headerSize = LittleEndian::readUint32(data.get()); 293 data += sizeof(headerSize); 294 295 if (headerSize > end - data) { 296 MOZ_ASSERT(false, "StartupCache file is corrupt."); 297 return Err(NS_ERROR_UNEXPECTED); 298 } 299 300 Range<const uint8_t> header(data, data + headerSize); 301 data += headerSize; 302 303 mCacheEntriesBaseOffset = sizeof(MAGIC) + sizeof(headerSize) + headerSize; 304 { 305 if (!mTable.reserve(STARTUP_CACHE_RESERVE_CAPACITY)) { 306 return Err(NS_ERROR_UNEXPECTED); 307 } 308 auto cleanup = MakeScopeExit([&]() { 309 mTableLock.AssertCurrentThreadOwns(); 310 WaitOnPrefetch(); 311 mTable.clear(); 312 mCacheData.reset(); 313 }); 314 loader::InputBuffer buf(header); 315 316 uint32_t currentOffset = 0; 317 while (!buf.finished()) { 318 uint32_t offset = 0; 319 uint32_t compressedSize = 0; 320 uint32_t uncompressedSize = 0; 321 nsCString key; 322 buf.codeUint32(offset); 323 buf.codeUint32(compressedSize); 324 buf.codeUint32(uncompressedSize); 325 buf.codeString(key); 326 327 if (offset + compressedSize > end - data) { 328 MOZ_ASSERT(false, "StartupCache file is corrupt."); 329 return Err(NS_ERROR_UNEXPECTED); 330 } 331 332 // Make sure offsets match what we'd expect based on script ordering and 333 // size, as a basic sanity check. 334 if (offset != currentOffset) { 335 return Err(NS_ERROR_UNEXPECTED); 336 } 337 currentOffset += compressedSize; 338 339 // We could use mTable.putNew if we knew the file we're loading weren't 340 // corrupt. However, we don't know that, so check if the key already 341 // exists. If it does, we know the file must be corrupt. 342 decltype(mTable)::AddPtr p = mTable.lookupForAdd(key); 343 if (p) { 344 return Err(NS_ERROR_UNEXPECTED); 345 } 346 347 if (!mTable.add( 348 p, key, 349 StartupCacheEntry(offset, compressedSize, uncompressedSize))) { 350 return Err(NS_ERROR_UNEXPECTED); 351 } 352 } 353 354 if (buf.error()) { 355 return Err(NS_ERROR_UNEXPECTED); 356 } 357 358 cleanup.release(); 359 } 360 361 MMAP_FAULT_HANDLER_CATCH(Err(NS_ERROR_UNEXPECTED)) 362 363 return Ok(); 364 } 365 366 bool StartupCache::HasEntry(const char* id) { 367 AUTO_PROFILER_LABEL("StartupCache::HasEntry", OTHER); 368 369 MOZ_ASSERT(NS_IsMainThread(), "Startup cache only available on main thread"); 370 371 MutexAutoLock lock(mTableLock); 372 return mTable.has(nsDependentCString(id)); 373 } 374 375 nsresult StartupCache::GetBuffer(const char* id, const char** outbuf, 376 uint32_t* length) 377 MOZ_NO_THREAD_SAFETY_ANALYSIS { 378 AUTO_PROFILER_LABEL("StartupCache::GetBuffer", OTHER); 379 380 NS_ASSERTION(NS_IsMainThread(), 381 "Startup cache only available on main thread"); 382 383 auto label = glean::startup_cache::RequestsLabel::eMiss; 384 auto telemetry = MakeScopeExit( 385 [&label] { glean::startup_cache::requests.EnumGet(label).Add(); }); 386 387 MutexAutoLock lock(mTableLock); 388 decltype(mTable)::Ptr p = mTable.lookup(nsDependentCString(id)); 389 if (!p) { 390 return NS_ERROR_NOT_AVAILABLE; 391 } 392 393 auto& value = p->value(); 394 if (value.mData) { 395 label = glean::startup_cache::RequestsLabel::eHitmemory; 396 } else { 397 if (!mCacheData.initialized()) { 398 return NS_ERROR_NOT_AVAILABLE; 399 } 400 // It is impossible for a write to be pending here. This is because 401 // we just checked mCacheData.initialized(), and this is reset before 402 // writing to the cache. It's not re-initialized unless we call 403 // LoadArchive(), either from Init() (which must have already happened) or 404 // InvalidateCache(). InvalidateCache() locks the mutex, so a write can't be 405 // happening. 406 // Also, WriteToDisk() requires mTableLock, so while it's writing we can't 407 // be here. 408 409 size_t totalRead = 0; 410 size_t totalWritten = 0; 411 Span<const char> compressed = Span( 412 mCacheData.get<char>().get() + mCacheEntriesBaseOffset + value.mOffset, 413 value.mCompressedSize); 414 value.mData = UniqueFreePtr<char[]>(reinterpret_cast<char*>( 415 malloc(sizeof(char) * value.mUncompressedSize))); 416 Span<char> uncompressed = Span(value.mData.get(), value.mUncompressedSize); 417 MMAP_FAULT_HANDLER_BEGIN_BUFFER(uncompressed.Elements(), 418 uncompressed.Length()) 419 bool finished = false; 420 while (!finished) { 421 auto result = mDecompressionContext->Decompress( 422 uncompressed.From(totalWritten), compressed.From(totalRead)); 423 if (NS_WARN_IF(result.isErr())) { 424 value.mData = nullptr; 425 MutexAutoUnlock unlock(mTableLock); 426 InvalidateCache(); 427 return NS_ERROR_FAILURE; 428 } 429 auto decompressionResult = result.unwrap(); 430 totalRead += decompressionResult.mSizeRead; 431 totalWritten += decompressionResult.mSizeWritten; 432 finished = decompressionResult.mFinished; 433 } 434 435 MMAP_FAULT_HANDLER_CATCH(NS_ERROR_FAILURE) 436 437 label = glean::startup_cache::RequestsLabel::eHitdisk; 438 } 439 440 if (!value.mRequested) { 441 value.mRequested = true; 442 value.mRequestedOrder = ++mRequestedCount; 443 MOZ_ASSERT(mRequestedCount <= mTable.count(), 444 "Somehow we requested more StartupCache items than exist."); 445 ResetStartupWriteTimerCheckingReadCount(); 446 } 447 448 // Track that something holds a reference into mTable, so we know to hold 449 // onto it in case the cache is invalidated. 450 mCurTableReferenced = true; 451 *outbuf = value.mData.get(); 452 *length = value.mUncompressedSize; 453 return NS_OK; 454 } 455 456 nsresult StartupCache::PutBuffer(const char* id, UniqueFreePtr<char[]>&& inbuf, 457 uint32_t len) MOZ_NO_THREAD_SAFETY_ANALYSIS { 458 NS_ASSERTION(NS_IsMainThread(), 459 "Startup cache only available on main thread"); 460 if (StartupCache::gShutdownInitiated) { 461 return NS_ERROR_NOT_AVAILABLE; 462 } 463 464 // Try to gain the table write lock. If the background task to write the 465 // cache is running, this will fail. 466 MutexAutoTryLock lock(mTableLock); 467 if (!lock) { 468 return NS_ERROR_NOT_AVAILABLE; 469 } 470 mTableLock.AssertCurrentThreadOwns(); 471 bool exists = mTable.has(nsDependentCString(id)); 472 if (exists) { 473 NS_WARNING("Existing entry in StartupCache."); 474 // Double-caching is undesirable but not an error. 475 return NS_OK; 476 } 477 478 // putNew returns false on alloc failure - in the very unlikely event we hit 479 // that and aren't going to crash elsewhere, there's no reason we need to 480 // crash here. 481 if (mTable.putNew(nsCString(id), StartupCacheEntry(std::move(inbuf), len, 482 ++mRequestedCount))) { 483 return ResetStartupWriteTimer(); 484 } 485 MOZ_DIAGNOSTIC_ASSERT(mTable.count() < STARTUP_CACHE_MAX_CAPACITY, 486 "Too many StartupCache entries."); 487 return NS_OK; 488 } 489 490 size_t StartupCache::HeapSizeOfIncludingThis( 491 mozilla::MallocSizeOf aMallocSizeOf) const { 492 // This function could measure more members, but they haven't been found by 493 // DMD to be significant. They can be added later if necessary. 494 495 size_t n = aMallocSizeOf(this); 496 497 n += mTable.shallowSizeOfExcludingThis(aMallocSizeOf); 498 for (auto iter = mTable.iter(); !iter.done(); iter.next()) { 499 if (iter.get().value().mData) { 500 n += aMallocSizeOf(iter.get().value().mData.get()); 501 } 502 n += iter.get().key().SizeOfExcludingThisIfUnshared(aMallocSizeOf); 503 } 504 505 return n; 506 } 507 508 /** 509 * WriteToDisk writes the cache out to disk. Callers of WriteToDisk need to call 510 * WaitOnWriteComplete to make sure there isn't a write 511 * happening on another thread. 512 * We own the mTableLock here. 513 */ 514 Result<Ok, nsresult> StartupCache::WriteToDisk() { 515 if (!mDirty || mWrittenOnce) { 516 return Ok(); 517 } 518 519 if (!mFile) { 520 return Err(NS_ERROR_UNEXPECTED); 521 } 522 523 AutoFDClose raiiFd; 524 MOZ_TRY(mFile->OpenNSPRFileDesc(PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE, 525 0644, getter_Transfers(raiiFd))); 526 const auto fd = raiiFd.get(); 527 528 nsTArray<StartupCacheEntry::KeyValuePair> entries(mTable.count()); 529 for (auto iter = mTable.iter(); !iter.done(); iter.next()) { 530 if (iter.get().value().mRequested) { 531 StartupCacheEntry::KeyValuePair kv(&iter.get().key(), 532 &iter.get().value()); 533 entries.AppendElement(kv); 534 } 535 } 536 537 if (entries.IsEmpty()) { 538 return Ok(); 539 } 540 541 entries.Sort(StartupCacheEntry::Comparator()); 542 loader::OutputBuffer buf; 543 for (auto& e : entries) { 544 auto* key = e.first; 545 auto* value = e.second; 546 auto uncompressedSize = value->mUncompressedSize; 547 // Set the mHeaderOffsetInFile so we can go back and edit the offset. 548 value->mHeaderOffsetInFile = buf.cursor(); 549 // Write a 0 offset/compressed size as a placeholder until we get the real 550 // offset after compressing. 551 buf.codeUint32(0); 552 buf.codeUint32(0); 553 buf.codeUint32(uncompressedSize); 554 buf.codeString(*key); 555 } 556 557 uint8_t headerSize[4]; 558 LittleEndian::writeUint32(headerSize, buf.cursor()); 559 560 MOZ_TRY(Write(fd, MAGIC, sizeof(MAGIC))); 561 MOZ_TRY(Write(fd, headerSize, sizeof(headerSize))); 562 size_t headerStart = sizeof(MAGIC) + sizeof(headerSize); 563 size_t dataStart = headerStart + buf.cursor(); 564 MOZ_TRY(Seek(fd, dataStart)); 565 566 size_t offset = 0; 567 568 const size_t chunkSize = 1024 * 16; 569 LZ4FrameCompressionContext ctx(6, /* aCompressionLevel */ 570 chunkSize, /* aReadBufLen */ 571 true, /* aChecksum */ 572 true); /* aStableSrc */ 573 size_t writeBufLen = ctx.GetRequiredWriteBufferLength(); 574 auto writeBuffer = MakeUnique<char[]>(writeBufLen); 575 auto writeSpan = Span(writeBuffer.get(), writeBufLen); 576 577 for (auto& e : entries) { 578 auto value = e.second; 579 value->mOffset = offset; 580 Span<const char> result = 581 MOZ_TRY(ctx.BeginCompressing(writeSpan).mapErr(MapLZ4ErrorToNsresult)); 582 MOZ_TRY(Write(fd, result.Elements(), result.Length())); 583 offset += result.Length(); 584 585 for (size_t i = 0; i < value->mUncompressedSize; i += chunkSize) { 586 size_t size = std::min(chunkSize, value->mUncompressedSize - i); 587 char* uncompressed = value->mData.get() + i; 588 result = MOZ_TRY(ctx.ContinueCompressing(Span(uncompressed, size)) 589 .mapErr(MapLZ4ErrorToNsresult)); 590 MOZ_TRY(Write(fd, result.Elements(), result.Length())); 591 offset += result.Length(); 592 } 593 594 result = MOZ_TRY(ctx.EndCompressing().mapErr(MapLZ4ErrorToNsresult)); 595 MOZ_TRY(Write(fd, result.Elements(), result.Length())); 596 offset += result.Length(); 597 value->mCompressedSize = offset - value->mOffset; 598 MOZ_TRY(Seek(fd, dataStart + offset)); 599 } 600 601 for (auto& e : entries) { 602 auto value = e.second; 603 uint8_t* headerEntry = buf.Get() + value->mHeaderOffsetInFile; 604 LittleEndian::writeUint32(headerEntry, value->mOffset); 605 LittleEndian::writeUint32(headerEntry + sizeof(value->mOffset), 606 value->mCompressedSize); 607 } 608 MOZ_TRY(Seek(fd, headerStart)); 609 MOZ_TRY(Write(fd, buf.Get(), buf.cursor())); 610 611 mDirty = false; 612 mWrittenOnce = true; 613 614 return Ok(); 615 } 616 617 void StartupCache::InvalidateCache(bool memoryOnly) { 618 WaitOnPrefetch(); 619 // Ensure we're not writing using mTable... 620 MutexAutoLock lock(mTableLock); 621 622 mWrittenOnce = false; 623 if (memoryOnly) { 624 // This should only be called in tests. 625 auto writeResult = WriteToDisk(); 626 if (NS_WARN_IF(writeResult.isErr())) { 627 gIgnoreDiskCache = true; 628 return; 629 } 630 } 631 if (mCurTableReferenced) { 632 // There should be no way for this assert to fail other than a user manually 633 // sending startupcache-invalidate messages through the Browser Toolbox. If 634 // something knowingly invalidates the cache, the event can be counted with 635 // mAllowedInvalidationsCount. 636 MOZ_DIAGNOSTIC_ASSERT( 637 xpc::IsInAutomation() || 638 // The allowed invalidations can grow faster than the old tables, so 639 // guard against incorrect unsigned subtraction. 640 mAllowedInvalidationsCount > mOldTables.Length() || 641 // Now perform the real check. 642 mOldTables.Length() - mAllowedInvalidationsCount < 10, 643 "Startup cache invalidated too many times."); 644 mOldTables.AppendElement(std::move(mTable)); 645 mCurTableReferenced = false; 646 } else { 647 mTable.clear(); 648 } 649 mRequestedCount = 0; 650 if (!memoryOnly) { 651 mCacheData.reset(); 652 nsresult rv = mFile->Remove(false); 653 if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) { 654 gIgnoreDiskCache = true; 655 return; 656 } 657 } 658 gIgnoreDiskCache = false; 659 auto result = LoadArchive(); 660 if (NS_WARN_IF(result.isErr())) { 661 gIgnoreDiskCache = true; 662 } 663 } 664 665 void StartupCache::CountAllowedInvalidation() { mAllowedInvalidationsCount++; } 666 667 void StartupCache::MaybeInitShutdownWrite() { 668 if (mTimer) { 669 mTimer->Cancel(); 670 } 671 gShutdownInitiated = true; 672 673 MaybeWriteOffMainThread(); 674 } 675 676 void StartupCache::EnsureShutdownWriteComplete() { 677 MutexAutoLock lock(mTableLock); 678 // If we've already written or there's nothing to write, 679 // we don't need to do anything. This is the common case. 680 if (mWrittenOnce || (mCacheData.initialized() && !ShouldCompactCache())) { 681 return; 682 } 683 // Otherwise, ensure the write happens. The timer should have been cancelled 684 // already in MaybeInitShutdownWrite. 685 686 // We got the lock. Keep the following in sync with 687 // MaybeWriteOffMainThread: 688 WaitOnPrefetch(); 689 mDirty = true; 690 mCacheData.reset(); 691 // Most of this should be redundant given MaybeWriteOffMainThread should 692 // have run before now. 693 694 auto writeResult = WriteToDisk(); 695 (void)NS_WARN_IF(writeResult.isErr()); 696 // We've had the lock, and `WriteToDisk()` sets mWrittenOnce and mDirty 697 // when done, and checks for them when starting, so we don't need to do 698 // anything else. 699 } 700 701 void StartupCache::IgnoreDiskCache() { 702 gIgnoreDiskCache = true; 703 if (gStartupCache) gStartupCache->InvalidateCache(); 704 } 705 706 bool StartupCache::GetIgnoreDiskCache() { return gIgnoreDiskCache; } 707 708 void StartupCache::WaitOnPrefetch() { 709 // This can't be called from within ThreadedPrefetch() 710 MonitorAutoLock lock(mPrefetchComplete); 711 while (mPrefetchInProgress) { 712 mPrefetchComplete.Wait(); 713 } 714 } 715 716 void StartupCache::ThreadedPrefetch(uint8_t* aStart, size_t aSize) { 717 // Always notify of completion, even if MMAP_FAULT_HANDLER_CATCH() 718 // early-returns. 719 auto notifyPrefetchComplete = MakeScopeExit([&] { 720 MonitorAutoLock lock(mPrefetchComplete); 721 mPrefetchInProgress = false; 722 mPrefetchComplete.NotifyAll(); 723 }); 724 725 // PrefetchMemory does madvise/equivalent, but doesn't access the memory 726 // pointed to by aStart 727 MMAP_FAULT_HANDLER_BEGIN_BUFFER(aStart, aSize) 728 PrefetchMemory(aStart, aSize); 729 MMAP_FAULT_HANDLER_CATCH() 730 } 731 732 // mTableLock must be held 733 bool StartupCache::ShouldCompactCache() { 734 // If we've requested less than 4/5 of the startup cache, then we should 735 // probably compact it down. This can happen quite easily after the first run, 736 // which seems to request quite a few more things than subsequent runs. 737 CheckedInt<uint32_t> threshold = CheckedInt<uint32_t>(mTable.count()) * 4 / 5; 738 MOZ_RELEASE_ASSERT(threshold.isValid(), "Runaway StartupCache size"); 739 return mRequestedCount < threshold.value(); 740 } 741 742 /* 743 * The write-thread is spawned on a timeout(which is reset with every write). 744 * This can avoid a slow shutdown. 745 */ 746 void StartupCache::WriteTimeout(nsITimer* aTimer, void* aClosure) { 747 /* 748 * It is safe to use the pointer passed in aClosure to reference the 749 * StartupCache object because the timer's lifetime is tightly coupled to 750 * the lifetime of the StartupCache object; this timer is canceled in the 751 * StartupCache destructor, guaranteeing that this function runs if and only 752 * if the StartupCache object is valid. 753 */ 754 StartupCache* startupCacheObj = static_cast<StartupCache*>(aClosure); 755 startupCacheObj->MaybeWriteOffMainThread(); 756 } 757 758 /* 759 * See StartupCache::WriteTimeout above - this is just the non-static body. 760 */ 761 void StartupCache::MaybeWriteOffMainThread() { 762 { 763 MutexAutoLock lock(mTableLock); 764 if (mWrittenOnce || (mCacheData.initialized() && !ShouldCompactCache())) { 765 return; 766 } 767 } 768 // Keep this code in sync with EnsureShutdownWriteComplete. 769 WaitOnPrefetch(); 770 { 771 MutexAutoLock lock(mTableLock); 772 mDirty = true; 773 mCacheData.reset(); 774 } 775 776 RefPtr<StartupCache> self = this; 777 nsCOMPtr<nsIRunnable> runnable = 778 NS_NewRunnableFunction("StartupCache::Write", [self]() mutable { 779 MutexAutoLock lock(self->mTableLock); 780 auto result = self->WriteToDisk(); 781 (void)NS_WARN_IF(result.isErr()); 782 }); 783 NS_DispatchBackgroundTask(runnable.forget(), NS_DISPATCH_EVENT_MAY_BLOCK); 784 } 785 786 // We don't want to refcount StartupCache, so we'll just 787 // hold a ref to this and pass it to observerService instead. 788 NS_IMPL_ISUPPORTS(StartupCacheListener, nsIObserver) 789 790 nsresult StartupCacheListener::Observe(nsISupports* subject, const char* topic, 791 const char16_t* data) { 792 StartupCache* sc = StartupCache::GetSingleton(); 793 if (!sc) return NS_OK; 794 795 if (strcmp(topic, NS_XPCOM_SHUTDOWN_OBSERVER_ID) == 0) { 796 // Do not leave the thread running past xpcom shutdown 797 sc->WaitOnPrefetch(); 798 StartupCache::gShutdownInitiated = true; 799 // Note that we don't do anything special for the background write 800 // task; we expect the threadpool to finish running any tasks already 801 // posted to it prior to shutdown. FastShutdown will call 802 // EnsureShutdownWriteComplete() to ensure any pending writes happen 803 // in that case. 804 } else if (strcmp(topic, "startupcache-invalidate") == 0) { 805 sc->InvalidateCache(data && nsCRT::strcmp(data, u"memoryOnly") == 0); 806 } else if (strcmp(topic, "intl:app-locales-changed") == 0) { 807 // Live language switching invalidates the startup cache due to the history 808 // sidebar retaining localized strings in its internal SQL query. This 809 // should be a relatively rare event, but a user could do it an arbitrary 810 // number of times. 811 sc->CountAllowedInvalidation(); 812 } 813 return NS_OK; 814 } 815 816 nsresult StartupCache::GetDebugObjectOutputStream( 817 nsIObjectOutputStream* aStream, nsIObjectOutputStream** aOutStream) { 818 NS_ENSURE_ARG_POINTER(aStream); 819 #ifdef DEBUG 820 auto* stream = new StartupCacheDebugOutputStream(aStream, &mWriteObjectMap); 821 NS_ADDREF(*aOutStream = stream); 822 #else 823 NS_ADDREF(*aOutStream = aStream); 824 #endif 825 826 return NS_OK; 827 } 828 829 nsresult StartupCache::ResetStartupWriteTimerCheckingReadCount() { 830 nsresult rv = NS_OK; 831 if (!mTimer) 832 mTimer = NS_NewTimer(); 833 else 834 rv = mTimer->Cancel(); 835 NS_ENSURE_SUCCESS(rv, rv); 836 // Wait for the specified timeout, then write out the cache. 837 mTimer->InitWithNamedFuncCallback( 838 StartupCache::WriteTimeout, this, STARTUP_CACHE_WRITE_TIMEOUT * 1000, 839 nsITimer::TYPE_ONE_SHOT, "StartupCache::WriteTimeout"_ns); 840 return NS_OK; 841 } 842 843 // For test code only 844 nsresult StartupCache::ResetStartupWriteTimerAndLock() { 845 MutexAutoLock lock(mTableLock); 846 return ResetStartupWriteTimer(); 847 } 848 849 nsresult StartupCache::ResetStartupWriteTimer() { 850 mDirty = true; 851 nsresult rv = NS_OK; 852 if (!mTimer) 853 mTimer = NS_NewTimer(); 854 else 855 rv = mTimer->Cancel(); 856 NS_ENSURE_SUCCESS(rv, rv); 857 // Wait for the specified timeout, then write out the cache. 858 mTimer->InitWithNamedFuncCallback( 859 StartupCache::WriteTimeout, this, STARTUP_CACHE_WRITE_TIMEOUT * 1000, 860 nsITimer::TYPE_ONE_SHOT, "StartupCache::WriteTimeout"_ns); 861 return NS_OK; 862 } 863 864 // Used only in tests: 865 bool StartupCache::StartupWriteComplete() { 866 // Need to have written to disk and not added new things since; 867 MutexAutoLock lock(mTableLock); 868 return !mDirty && mWrittenOnce; 869 } 870 871 // StartupCacheDebugOutputStream implementation 872 #ifdef DEBUG 873 NS_IMPL_ISUPPORTS(StartupCacheDebugOutputStream, nsIObjectOutputStream, 874 nsIBinaryOutputStream, nsIOutputStream) 875 876 bool StartupCacheDebugOutputStream::CheckReferences(nsISupports* aObject) { 877 nsresult rv; 878 879 nsCOMPtr<nsIClassInfo> classInfo = do_QueryInterface(aObject); 880 if (!classInfo) { 881 NS_ERROR("aObject must implement nsIClassInfo"); 882 return false; 883 } 884 885 uint32_t flags; 886 rv = classInfo->GetFlags(&flags); 887 NS_ENSURE_SUCCESS(rv, false); 888 if (flags & nsIClassInfo::SINGLETON) return true; 889 890 bool inserted = mObjectMap->EnsureInserted(aObject); 891 if (!inserted) { 892 NS_ERROR( 893 "non-singleton aObject is referenced multiple times in this" 894 "serialization, we don't support that."); 895 } 896 897 return inserted; 898 } 899 900 // nsIObjectOutputStream implementation 901 nsresult StartupCacheDebugOutputStream::WriteObject(nsISupports* aObject, 902 bool aIsStrongRef) { 903 nsCOMPtr<nsISupports> rootObject(do_QueryInterface(aObject)); 904 905 NS_ASSERTION(rootObject.get() == aObject, 906 "bad call to WriteObject -- call WriteCompoundObject!"); 907 bool check = CheckReferences(aObject); 908 NS_ENSURE_TRUE(check, NS_ERROR_FAILURE); 909 return mBinaryStream->WriteObject(aObject, aIsStrongRef); 910 } 911 912 nsresult StartupCacheDebugOutputStream::WriteSingleRefObject( 913 nsISupports* aObject) { 914 nsCOMPtr<nsISupports> rootObject(do_QueryInterface(aObject)); 915 916 NS_ASSERTION(rootObject.get() == aObject, 917 "bad call to WriteSingleRefObject -- call WriteCompoundObject!"); 918 bool check = CheckReferences(aObject); 919 NS_ENSURE_TRUE(check, NS_ERROR_FAILURE); 920 return mBinaryStream->WriteSingleRefObject(aObject); 921 } 922 923 nsresult StartupCacheDebugOutputStream::WriteCompoundObject( 924 nsISupports* aObject, const nsIID& aIID, bool aIsStrongRef) { 925 nsCOMPtr<nsISupports> rootObject(do_QueryInterface(aObject)); 926 927 nsCOMPtr<nsISupports> roundtrip; 928 rootObject->QueryInterface(aIID, getter_AddRefs(roundtrip)); 929 NS_ASSERTION(roundtrip.get() == aObject, 930 "bad aggregation or multiple inheritance detected by call to " 931 "WriteCompoundObject!"); 932 933 bool check = CheckReferences(aObject); 934 NS_ENSURE_TRUE(check, NS_ERROR_FAILURE); 935 return mBinaryStream->WriteCompoundObject(aObject, aIID, aIsStrongRef); 936 } 937 938 nsresult StartupCacheDebugOutputStream::WriteID(nsID const& aID) { 939 return mBinaryStream->WriteID(aID); 940 } 941 942 char* StartupCacheDebugOutputStream::GetBuffer(uint32_t aLength, 943 uint32_t aAlignMask) { 944 return mBinaryStream->GetBuffer(aLength, aAlignMask); 945 } 946 947 void StartupCacheDebugOutputStream::PutBuffer(char* aBuffer, uint32_t aLength) { 948 mBinaryStream->PutBuffer(aBuffer, aLength); 949 } 950 #endif // DEBUG 951 952 } // namespace scache 953 } // namespace mozilla