CacheFile.cpp (75741B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 #include "CacheFile.h" 6 7 #include <algorithm> 8 #include <utility> 9 10 #include "CacheFileChunk.h" 11 #include "CacheFileInputStream.h" 12 #include "CacheFileOutputStream.h" 13 #include "CacheFileUtils.h" 14 #include "CacheIndex.h" 15 #include "CacheLog.h" 16 #include "mozilla/DebugOnly.h" 17 #include "mozilla/glean/NetwerkCache2Metrics.h" 18 #include "mozilla/TelemetryHistogramEnums.h" 19 #include "nsComponentManagerUtils.h" 20 #include "nsICacheEntry.h" 21 #include "nsProxyRelease.h" 22 #include "nsThreadUtils.h" 23 24 // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks. 25 // When it is not defined, we always release the chunks ASAP, i.e. we cache 26 // unused chunks only when: 27 // - CacheFile is memory-only 28 // - CacheFile is still waiting for the handle 29 // - the chunk is preloaded 30 31 // #define CACHE_CHUNKS 32 33 namespace mozilla::net { 34 35 using CacheFileUtils::CacheFileLock; 36 37 class NotifyCacheFileListenerEvent : public Runnable { 38 public: 39 NotifyCacheFileListenerEvent(CacheFileListener* aCallback, nsresult aResult, 40 bool aIsNew) 41 : Runnable("net::NotifyCacheFileListenerEvent"), 42 mCallback(aCallback), 43 mRV(aResult), 44 mIsNew(aIsNew) { 45 LOG( 46 ("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() " 47 "[this=%p]", 48 this)); 49 } 50 51 protected: 52 ~NotifyCacheFileListenerEvent() { 53 LOG( 54 ("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() " 55 "[this=%p]", 56 this)); 57 } 58 59 public: 60 NS_IMETHOD Run() override { 61 LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this)); 62 63 mCallback->OnFileReady(mRV, mIsNew); 64 return NS_OK; 65 } 66 67 protected: 68 nsCOMPtr<CacheFileListener> mCallback; 69 nsresult mRV; 70 bool mIsNew; 71 }; 72 73 class NotifyChunkListenerEvent : public Runnable { 74 public: 75 NotifyChunkListenerEvent(CacheFileChunkListener* aCallback, nsresult aResult, 76 uint32_t aChunkIdx, CacheFileChunk* aChunk) 77 : Runnable("net::NotifyChunkListenerEvent"), 78 mCallback(aCallback), 79 mRV(aResult), 80 mChunkIdx(aChunkIdx), 81 mChunk(aChunk) { 82 LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]", 83 this)); 84 } 85 86 protected: 87 ~NotifyChunkListenerEvent() { 88 LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]", 89 this)); 90 } 91 92 public: 93 NS_IMETHOD Run() override { 94 LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this)); 95 96 mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk); 97 return NS_OK; 98 } 99 100 protected: 101 nsCOMPtr<CacheFileChunkListener> mCallback; 102 nsresult mRV; 103 uint32_t mChunkIdx; 104 RefPtr<CacheFileChunk> mChunk; 105 }; 106 107 class DoomFileHelper : public CacheFileIOListener { 108 public: 109 NS_DECL_THREADSAFE_ISUPPORTS 110 111 explicit DoomFileHelper(CacheFileListener* aListener) 112 : mListener(aListener) {} 113 114 NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override { 115 MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!"); 116 return NS_ERROR_UNEXPECTED; 117 } 118 119 NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf, 120 nsresult aResult) override { 121 MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!"); 122 return NS_ERROR_UNEXPECTED; 123 } 124 125 NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf, 126 nsresult aResult) override { 127 MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!"); 128 return NS_ERROR_UNEXPECTED; 129 } 130 131 NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override { 132 if (mListener) mListener->OnFileDoomed(aResult); 133 return NS_OK; 134 } 135 136 NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override { 137 MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!"); 138 return NS_ERROR_UNEXPECTED; 139 } 140 141 NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle, 142 nsresult aResult) override { 143 MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!"); 144 return NS_ERROR_UNEXPECTED; 145 } 146 147 private: 148 virtual ~DoomFileHelper() = default; 149 150 nsCOMPtr<CacheFileListener> mListener; 151 }; 152 153 NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener) 154 155 NS_IMPL_ADDREF(CacheFile) 156 NS_IMPL_RELEASE(CacheFile) 157 NS_INTERFACE_MAP_BEGIN(CacheFile) 158 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener) 159 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener) 160 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener) 161 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, 162 mozilla::net::CacheFileChunkListener) 163 NS_INTERFACE_MAP_END 164 165 CacheFile::CacheFile() : mLock(new CacheFileLock()) { 166 LOG(("CacheFile::CacheFile() [this=%p]", this)); 167 } 168 169 CacheFile::~CacheFile() { 170 LOG(("CacheFile::~CacheFile() [this=%p]", this)); 171 172 MutexAutoLock lock(mLock->Lock()); 173 if (!mMemoryOnly && mReady && !mKill) { 174 // mReady flag indicates we have metadata plus in a valid state. 175 WriteMetadataIfNeededLocked(true); 176 } 177 } 178 179 nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew, 180 bool aMemoryOnly, bool aSkipSizeCheck, bool aPriority, 181 bool aPinned, CacheFileListener* aCallback) 182 MOZ_NO_THREAD_SAFETY_ANALYSIS { 183 MOZ_ASSERT(!mListener); 184 MOZ_ASSERT(!mHandle); 185 186 MOZ_ASSERT(!(aMemoryOnly && aPinned)); 187 188 nsresult rv; 189 190 mKey = aKey; 191 mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly; 192 mSkipSizeCheck = aSkipSizeCheck; 193 mPriority = aPriority; 194 mPinned = aPinned; 195 196 // Some consumers (at least nsHTTPCompressConv) assume that Read() can read 197 // such amount of data that was announced by Available(). 198 // CacheFileInputStream::Available() uses also preloaded chunks to compute 199 // number of available bytes in the input stream, so we have to make sure the 200 // preloadChunkCount won't change during CacheFile's lifetime since otherwise 201 // we could potentially release some cached chunks that was used to calculate 202 // available bytes but would not be available later during call to 203 // CacheFileInputStream::Read(). 204 mPreloadChunkCount = CacheObserver::PreloadChunkCount(); 205 206 LOG( 207 ("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, " 208 "priority=%d, listener=%p]", 209 this, mKey.get(), aCreateNew, aMemoryOnly, aPriority, aCallback)); 210 211 if (mMemoryOnly) { 212 MOZ_ASSERT(!aCallback); 213 214 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey, 215 WrapNotNull(mLock)); 216 mReady = true; 217 mDataSize = mMetadata->Offset(); 218 return NS_OK; 219 } 220 uint32_t flags; 221 if (aCreateNew) { 222 MOZ_ASSERT(!aCallback); 223 flags = CacheFileIOManager::CREATE_NEW; 224 225 // make sure we can use this entry immediately 226 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey, 227 WrapNotNull(mLock)); 228 mReady = true; 229 mDataSize = mMetadata->Offset(); 230 } else { 231 flags = CacheFileIOManager::CREATE; 232 } 233 234 if (mPriority) { 235 flags |= CacheFileIOManager::PRIORITY; 236 } 237 238 if (mPinned) { 239 flags |= CacheFileIOManager::PINNED; 240 } 241 242 mOpeningFile = true; 243 mListener = aCallback; 244 rv = CacheFileIOManager::OpenFile(mKey, flags, this); 245 if (NS_FAILED(rv)) { 246 mListener = nullptr; 247 mOpeningFile = false; 248 249 if (mPinned) { 250 LOG( 251 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed " 252 "but we want to pin, fail the file opening. [this=%p]", 253 this)); 254 return NS_ERROR_NOT_AVAILABLE; 255 } 256 257 if (aCreateNew) { 258 NS_WARNING("Forcing memory-only entry since OpenFile failed"); 259 LOG( 260 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed " 261 "synchronously. We can continue in memory-only mode since " 262 "aCreateNew == true. [this=%p]", 263 this)); 264 265 mMemoryOnly = true; 266 } else if (rv == NS_ERROR_NOT_INITIALIZED) { 267 NS_WARNING( 268 "Forcing memory-only entry since CacheIOManager isn't " 269 "initialized."); 270 LOG( 271 ("CacheFile::Init() - CacheFileIOManager isn't initialized, " 272 "initializing entry as memory-only. [this=%p]", 273 this)); 274 275 mMemoryOnly = true; 276 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey, 277 WrapNotNull(mLock)); 278 mReady = true; 279 mDataSize = mMetadata->Offset(); 280 281 RefPtr<NotifyCacheFileListenerEvent> ev; 282 ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true); 283 rv = NS_DispatchToCurrentThread(ev); 284 NS_ENSURE_SUCCESS(rv, rv); 285 } else { 286 NS_ENSURE_SUCCESS(rv, rv); 287 } 288 } 289 290 return NS_OK; 291 } 292 293 void CacheFile::SetDictionary(DictionaryCacheEntry* aDict) { 294 CacheFileAutoLock lock(this); 295 mDict = aDict; 296 if (OutputStreamExists(false)) { 297 mOutput->SetDictionary(aDict); 298 } 299 } 300 301 void CacheFile::Key(nsACString& aKey) { 302 CacheFileAutoLock lock(this); 303 aKey = mKey; 304 } 305 306 bool CacheFile::IsPinned() { 307 CacheFileAutoLock lock(this); 308 return mPinned; 309 } 310 311 nsresult CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) { 312 CacheFileAutoLock lock(this); 313 314 nsresult rv; 315 316 uint32_t index = aChunk->Index(); 317 318 LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32 319 ", chunk=%p, idx=%u]", 320 this, static_cast<uint32_t>(aResult), aChunk, index)); 321 322 if (aChunk->mDiscardedChunk) { 323 // We discard only unused chunks, so it must be still unused when reading 324 // data finishes. 325 MOZ_ASSERT(aChunk->mRefCnt == 2); 326 aChunk->mActiveChunk = false; 327 ReleaseOutsideLock( 328 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile))); 329 330 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk); 331 MOZ_ASSERT(removed); 332 return NS_OK; 333 } 334 335 if (NS_FAILED(aResult)) { 336 SetError(aResult); 337 } 338 339 if (HaveChunkListeners(index)) { 340 rv = NotifyChunkListeners(index, aResult, aChunk); 341 NS_ENSURE_SUCCESS(rv, rv); 342 } 343 344 return NS_OK; 345 } 346 347 nsresult CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) { 348 // In case the chunk was reused, made dirty and released between calls to 349 // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write 350 // the chunk to the disk again. When the chunk is unused and is dirty simply 351 // addref and release (outside the lock) the chunk which ensures that 352 // CacheFile::DeactivateChunk() will be called again. 353 RefPtr<CacheFileChunk> deactivateChunkAgain; 354 355 CacheFileAutoLock lock(this); 356 357 nsresult rv; 358 359 LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32 360 ", chunk=%p, idx=%u]", 361 this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index())); 362 363 MOZ_ASSERT(!mMemoryOnly); 364 MOZ_ASSERT(!mOpeningFile); 365 MOZ_ASSERT(mHandle); 366 367 if (aChunk->mDiscardedChunk) { 368 // We discard only unused chunks, so it must be still unused when writing 369 // data finishes. 370 MOZ_ASSERT(aChunk->mRefCnt == 2); 371 aChunk->mActiveChunk = false; 372 ReleaseOutsideLock( 373 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile))); 374 375 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk); 376 MOZ_ASSERT(removed); 377 return NS_OK; 378 } 379 380 if (NS_FAILED(aResult)) { 381 SetError(aResult); 382 } 383 384 if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) { 385 // update hash value in metadata 386 mMetadata->SetHash(aChunk->Index(), aChunk->Hash()); 387 } 388 389 // notify listeners if there is any 390 if (HaveChunkListeners(aChunk->Index())) { 391 // don't release the chunk since there are some listeners queued 392 rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk); 393 if (NS_SUCCEEDED(rv)) { 394 MOZ_ASSERT(aChunk->mRefCnt != 2); 395 return NS_OK; 396 } 397 } 398 399 if (aChunk->mRefCnt != 2) { 400 LOG( 401 ("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p," 402 " refcnt=%" PRIuPTR "]", 403 this, aChunk, aChunk->mRefCnt.get())); 404 405 return NS_OK; 406 } 407 408 if (aChunk->IsDirty()) { 409 LOG( 410 ("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go " 411 "through deactivation again. [this=%p, chunk=%p]", 412 this, aChunk)); 413 414 deactivateChunkAgain = aChunk; 415 return NS_OK; 416 } 417 418 bool keepChunk = false; 419 if (NS_SUCCEEDED(aResult)) { 420 keepChunk = ShouldCacheChunk(aChunk->Index()); 421 LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]", 422 keepChunk ? "Caching" : "Releasing", this, aChunk)); 423 } else { 424 LOG( 425 ("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, " 426 "chunk=%p]", 427 this, aChunk)); 428 } 429 430 RemoveChunkInternal(aChunk, keepChunk); 431 432 WriteMetadataIfNeededLocked(); 433 434 return NS_OK; 435 } 436 437 nsresult CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx, 438 CacheFileChunk* aChunk) { 439 MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!"); 440 return NS_ERROR_UNEXPECTED; 441 } 442 443 nsresult CacheFile::OnChunkUpdated(CacheFileChunk* aChunk) { 444 MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!"); 445 return NS_ERROR_UNEXPECTED; 446 } 447 448 nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) { 449 // Using an 'auto' class to perform doom or fail the listener 450 // outside the CacheFile's lock. 451 class AutoFailDoomListener { 452 public: 453 explicit AutoFailDoomListener(CacheFileHandle* aHandle) 454 : mHandle(aHandle), mAlreadyDoomed(false) {} 455 ~AutoFailDoomListener() { 456 if (!mListener) return; 457 458 if (mHandle) { 459 if (mAlreadyDoomed) { 460 mListener->OnFileDoomed(mHandle, NS_OK); 461 } else { 462 CacheFileIOManager::DoomFile(mHandle, mListener); 463 } 464 } else { 465 mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE); 466 } 467 } 468 469 CacheFileHandle* mHandle; 470 nsCOMPtr<CacheFileIOListener> mListener; 471 bool mAlreadyDoomed; 472 } autoDoom(aHandle); 473 474 RefPtr<CacheFileMetadata> metadata; 475 nsCOMPtr<CacheFileListener> listener; 476 bool isNew = false; 477 nsresult retval = NS_OK; 478 479 { 480 CacheFileAutoLock lock(this); 481 482 MOZ_ASSERT(mOpeningFile); 483 MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) || 484 (NS_FAILED(aResult) && !aHandle)); 485 MOZ_ASSERT((mListener && !mMetadata) || // !createNew 486 (!mListener && mMetadata)); // createNew 487 MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry 488 489 LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]", 490 this, static_cast<uint32_t>(aResult), aHandle)); 491 492 mOpeningFile = false; 493 494 if (mDict && OutputStreamExists(false)) { 495 mOutput->SetDictionary(mDict); 496 // leave mDict set for hash accumulation 497 } 498 499 autoDoom.mListener.swap(mDoomAfterOpenListener); 500 501 if (mMemoryOnly) { 502 // We can be here only in case the entry was initilized as createNew and 503 // SetMemoryOnly() was called. 504 505 // Just don't store the handle into mHandle and exit 506 autoDoom.mAlreadyDoomed = true; 507 return NS_OK; 508 } 509 510 if (NS_FAILED(aResult)) { 511 if (mMetadata) { 512 // This entry was initialized as createNew, just switch to memory-only 513 // mode. 514 NS_WARNING("Forcing memory-only entry since OpenFile failed"); 515 LOG( 516 ("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() " 517 "failed asynchronously. We can continue in memory-only mode since " 518 "aCreateNew == true. [this=%p]", 519 this)); 520 521 mMemoryOnly = true; 522 return NS_OK; 523 } 524 525 if (aResult == NS_ERROR_FILE_INVALID_PATH) { 526 // CacheFileIOManager doesn't have mCacheDirectory, switch to 527 // memory-only mode. 528 NS_WARNING( 529 "Forcing memory-only entry since CacheFileIOManager doesn't " 530 "have mCacheDirectory."); 531 LOG( 532 ("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have " 533 "mCacheDirectory, initializing entry as memory-only. [this=%p]", 534 this)); 535 536 mMemoryOnly = true; 537 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey, 538 WrapNotNull(mLock)); 539 mReady = true; 540 mDataSize = mMetadata->Offset(); 541 542 isNew = true; 543 retval = NS_OK; 544 } else { 545 // CacheFileIOManager::OpenFile() failed for another reason. 546 isNew = false; 547 retval = aResult; 548 } 549 550 mListener.swap(listener); 551 } else { 552 mHandle = aHandle; 553 if (NS_FAILED(mStatus)) { 554 CacheFileIOManager::DoomFile(mHandle, nullptr); 555 } 556 557 if (mMetadata) { 558 InitIndexEntry(); 559 560 // The entry was initialized as createNew, don't try to read metadata. 561 mMetadata->SetHandle(mHandle); 562 563 // Write all cached chunks, otherwise they may stay unwritten. 564 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) { 565 uint32_t idx = iter.Key(); 566 RefPtr<CacheFileChunk>& chunk = iter.Data(); 567 568 LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]", 569 this, idx, chunk.get())); 570 571 mChunks.InsertOrUpdate(idx, RefPtr{chunk}); 572 chunk->mFile = this; 573 chunk->mActiveChunk = true; 574 575 MOZ_ASSERT(chunk->IsReady()); 576 577 // This would be cleaner if we had an nsRefPtr constructor that took 578 // a RefPtr<Derived>. 579 ReleaseOutsideLock(std::move(chunk)); 580 581 iter.Remove(); 582 } 583 584 return NS_OK; 585 } 586 } 587 if (listener) { 588 lock.Unlock(); 589 listener->OnFileReady(retval, isNew); 590 return NS_OK; 591 } 592 593 MOZ_ASSERT(NS_SUCCEEDED(aResult)); 594 MOZ_ASSERT(!mMetadata); 595 MOZ_ASSERT(mListener); 596 597 // mMetaData is protected by a lock, but ReadMetaData has to be called 598 // without the lock. Alternatively we could make a 599 // "ReadMetaDataLocked", and temporarily unlock to call OnFileReady 600 metadata = mMetadata = 601 new CacheFileMetadata(mHandle, mKey, WrapNotNull(mLock)); 602 } 603 metadata->ReadMetadata(this); 604 return NS_OK; 605 } 606 607 nsresult CacheFile::OnDataWritten(CacheFileHandle* aHandle, const char* aBuf, 608 nsresult aResult) { 609 MOZ_CRASH("CacheFile::OnDataWritten should not be called!"); 610 return NS_ERROR_UNEXPECTED; 611 } 612 613 nsresult CacheFile::OnDataRead(CacheFileHandle* aHandle, char* aBuf, 614 nsresult aResult) { 615 MOZ_CRASH("CacheFile::OnDataRead should not be called!"); 616 return NS_ERROR_UNEXPECTED; 617 } 618 619 nsresult CacheFile::OnMetadataRead(nsresult aResult) { 620 nsCOMPtr<CacheFileListener> listener; 621 bool isNew = false; 622 { 623 CacheFileAutoLock lock(this); 624 MOZ_ASSERT(mListener); 625 626 LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this, 627 static_cast<uint32_t>(aResult))); 628 629 if (NS_SUCCEEDED(aResult)) { 630 mPinned = mMetadata->Pinned(); 631 mReady = true; 632 mDataSize = mMetadata->Offset(); 633 if (mDataSize == 0 && mMetadata->ElementsSize() == 0) { 634 isNew = true; 635 mMetadata->MarkDirty(); 636 } else { 637 const char* altData = 638 mMetadata->GetElement(CacheFileUtils::kAltDataKey); 639 if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo( 640 altData, &mAltDataOffset, &mAltDataType)) || 641 (mAltDataOffset > mDataSize))) { 642 // alt-metadata cannot be parsed or alt-data offset is invalid 643 mMetadata->InitEmptyMetadata(); 644 isNew = true; 645 mAltDataOffset = -1; 646 mAltDataType.Truncate(); 647 mDataSize = 0; 648 } else { 649 PreloadChunks(0); 650 } 651 } 652 653 InitIndexEntry(); 654 } 655 656 mListener.swap(listener); 657 } 658 listener->OnFileReady(aResult, isNew); 659 return NS_OK; 660 } 661 662 nsresult CacheFile::OnMetadataWritten(nsresult aResult) { 663 CacheFileAutoLock lock(this); 664 665 LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]", this, 666 static_cast<uint32_t>(aResult))); 667 668 MOZ_ASSERT(mWritingMetadata); 669 mWritingMetadata = false; 670 671 MOZ_ASSERT(!mMemoryOnly); 672 MOZ_ASSERT(!mOpeningFile); 673 674 if (NS_WARN_IF(NS_FAILED(aResult))) { 675 // TODO close streams with an error ??? 676 SetError(aResult); 677 } 678 679 if (mOutput || mInputs.Length() || mChunks.Count()) return NS_OK; 680 681 if (IsDirty()) WriteMetadataIfNeededLocked(); 682 683 if (!mWritingMetadata) { 684 LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]", 685 this)); 686 CacheFileIOManager::ReleaseNSPRHandle(mHandle); 687 } 688 689 return NS_OK; 690 } 691 692 nsresult CacheFile::OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) { 693 nsCOMPtr<CacheFileListener> listener; 694 695 { 696 CacheFileAutoLock lock(this); 697 698 MOZ_ASSERT(mListener); 699 700 LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]", 701 this, static_cast<uint32_t>(aResult), aHandle)); 702 703 mListener.swap(listener); 704 } 705 706 listener->OnFileDoomed(aResult); 707 return NS_OK; 708 } 709 710 nsresult CacheFile::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) { 711 MOZ_CRASH("CacheFile::OnEOFSet should not be called!"); 712 return NS_ERROR_UNEXPECTED; 713 } 714 715 nsresult CacheFile::OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) { 716 MOZ_CRASH("CacheFile::OnFileRenamed should not be called!"); 717 return NS_ERROR_UNEXPECTED; 718 } 719 720 bool CacheFile::IsKilled() { 721 bool killed = mKill; 722 if (killed) { 723 LOG(("CacheFile is killed, this=%p", this)); 724 } 725 726 return killed; 727 } 728 729 nsresult CacheFile::OpenInputStream(nsICacheEntry* aEntryHandle, 730 nsIInputStream** _retval) { 731 CacheFileAutoLock lock(this); 732 733 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); 734 735 if (!mReady) { 736 LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]", 737 this)); 738 739 return NS_ERROR_NOT_AVAILABLE; 740 } 741 742 if (NS_FAILED(mStatus)) { 743 LOG( 744 ("CacheFile::OpenInputStream() - CacheFile is in a failure state " 745 "[this=%p, status=0x%08" PRIx32 "]", 746 this, static_cast<uint32_t>(mStatus))); 747 748 // Don't allow opening the input stream when this CacheFile is in 749 // a failed state. This is the only way to protect consumers correctly 750 // from reading a broken entry. When the file is in the failed state, 751 // it's also doomed, so reopening the entry won't make any difference - 752 // data will still be inaccessible anymore. Note that for just doomed 753 // files, we must allow reading the data. 754 return mStatus; 755 } 756 757 // Once we open input stream we no longer allow preloading of chunks without 758 // input stream, i.e. we will no longer keep first few chunks preloaded when 759 // the last input stream is closed. 760 mPreloadWithoutInputStreams = false; 761 762 CacheFileInputStream* input = 763 new CacheFileInputStream(this, aEntryHandle, false); 764 LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]", 765 input, this)); 766 767 mInputs.AppendElement(input); 768 NS_ADDREF(input); 769 770 mDataAccessed = true; 771 *_retval = do_AddRef(input).take(); 772 return NS_OK; 773 } 774 775 nsresult CacheFile::OpenAlternativeInputStream(nsICacheEntry* aEntryHandle, 776 const char* aAltDataType, 777 nsIInputStream** _retval) { 778 CacheFileAutoLock lock(this); 779 780 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); 781 782 if (NS_WARN_IF(!mReady)) { 783 LOG( 784 ("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready " 785 "[this=%p]", 786 this)); 787 return NS_ERROR_NOT_AVAILABLE; 788 } 789 790 if (mAltDataOffset == -1) { 791 LOG( 792 ("CacheFile::OpenAlternativeInputStream() - Alternative data is not " 793 "available [this=%p]", 794 this)); 795 return NS_ERROR_NOT_AVAILABLE; 796 } 797 798 if (NS_FAILED(mStatus)) { 799 LOG( 800 ("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure " 801 "state [this=%p, status=0x%08" PRIx32 "]", 802 this, static_cast<uint32_t>(mStatus))); 803 804 // Don't allow opening the input stream when this CacheFile is in 805 // a failed state. This is the only way to protect consumers correctly 806 // from reading a broken entry. When the file is in the failed state, 807 // it's also doomed, so reopening the entry won't make any difference - 808 // data will still be inaccessible anymore. Note that for just doomed 809 // files, we must allow reading the data. 810 return mStatus; 811 } 812 813 if (mAltDataType != aAltDataType) { 814 LOG( 815 ("CacheFile::OpenAlternativeInputStream() - Alternative data is of a " 816 "different type than requested [this=%p, availableType=%s, " 817 "requestedType=%s]", 818 this, mAltDataType.get(), aAltDataType)); 819 return NS_ERROR_NOT_AVAILABLE; 820 } 821 822 // Once we open input stream we no longer allow preloading of chunks without 823 // input stream, i.e. we will no longer keep first few chunks preloaded when 824 // the last input stream is closed. 825 mPreloadWithoutInputStreams = false; 826 827 CacheFileInputStream* input = 828 new CacheFileInputStream(this, aEntryHandle, true); 829 830 LOG( 831 ("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p " 832 "[this=%p]", 833 input, this)); 834 835 mInputs.AppendElement(input); 836 NS_ADDREF(input); 837 838 mDataAccessed = true; 839 *_retval = do_AddRef(input).take(); 840 841 return NS_OK; 842 } 843 844 nsresult CacheFile::OpenOutputStream(CacheOutputCloseListener* aCloseListener, 845 nsIOutputStream** _retval) { 846 CacheFileAutoLock lock(this); 847 848 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); 849 850 nsresult rv; 851 852 if (!mReady) { 853 LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]", 854 this)); 855 856 return NS_ERROR_NOT_AVAILABLE; 857 } 858 859 if (mOutput) { 860 LOG( 861 ("CacheFile::OpenOutputStream() - We already have output stream %p " 862 "[this=%p]", 863 mOutput, this)); 864 865 return NS_ERROR_NOT_AVAILABLE; 866 } 867 868 if (NS_FAILED(mStatus)) { 869 LOG( 870 ("CacheFile::OpenOutputStream() - CacheFile is in a failure state " 871 "[this=%p, status=0x%08" PRIx32 "]", 872 this, static_cast<uint32_t>(mStatus))); 873 874 // The CacheFile is already doomed. It make no sense to allow to write any 875 // data to such entry. 876 return mStatus; 877 } 878 879 // Fail if there is any input stream opened for alternative data 880 for (uint32_t i = 0; i < mInputs.Length(); ++i) { 881 if (mInputs[i]->IsAlternativeData()) { 882 return NS_ERROR_NOT_AVAILABLE; 883 } 884 } 885 886 if (mAltDataOffset != -1) { 887 // Remove alt-data 888 rv = Truncate(mAltDataOffset); 889 if (NS_FAILED(rv)) { 890 LOG( 891 ("CacheFile::OpenOutputStream() - Truncating alt-data failed " 892 "[rv=0x%08" PRIx32 "]", 893 static_cast<uint32_t>(rv))); 894 return rv; 895 } 896 SetAltMetadata(nullptr); 897 mAltDataOffset = -1; 898 mAltDataType.Truncate(); 899 } 900 901 // Once we open output stream we no longer allow preloading of chunks without 902 // input stream. There is no reason to believe that some input stream will be 903 // opened soon. Otherwise we would cache unused chunks of all newly created 904 // entries until the CacheFile is destroyed. 905 mPreloadWithoutInputStreams = false; 906 907 mOutput = new CacheFileOutputStream(this, aCloseListener, false); 908 909 LOG( 910 ("CacheFile::OpenOutputStream() - Creating new output stream %p " 911 "[this=%p]", 912 mOutput, this)); 913 914 if (mDict) { 915 mOutput->SetDictionary(mDict); 916 } 917 918 mDataAccessed = true; 919 *_retval = do_AddRef(mOutput).take(); 920 return NS_OK; 921 } 922 923 nsresult CacheFile::OpenAlternativeOutputStream( 924 CacheOutputCloseListener* aCloseListener, const char* aAltDataType, 925 nsIAsyncOutputStream** _retval) { 926 CacheFileAutoLock lock(this); 927 928 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); 929 930 if (!mReady) { 931 LOG( 932 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready " 933 "[this=%p]", 934 this)); 935 936 return NS_ERROR_NOT_AVAILABLE; 937 } 938 939 if (mOutput) { 940 LOG( 941 ("CacheFile::OpenAlternativeOutputStream() - We already have output " 942 "stream %p [this=%p]", 943 mOutput, this)); 944 945 return NS_ERROR_NOT_AVAILABLE; 946 } 947 948 if (NS_FAILED(mStatus)) { 949 LOG( 950 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure " 951 "state [this=%p, status=0x%08" PRIx32 "]", 952 this, static_cast<uint32_t>(mStatus))); 953 954 // The CacheFile is already doomed. It make no sense to allow to write any 955 // data to such entry. 956 return mStatus; 957 } 958 959 // Fail if there is any input stream opened for alternative data 960 for (uint32_t i = 0; i < mInputs.Length(); ++i) { 961 if (mInputs[i]->IsAlternativeData()) { 962 return NS_ERROR_NOT_AVAILABLE; 963 } 964 } 965 966 nsresult rv; 967 968 if (mAltDataOffset != -1) { 969 // Truncate old alt-data 970 rv = Truncate(mAltDataOffset); 971 if (NS_FAILED(rv)) { 972 LOG( 973 ("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data " 974 "failed [rv=0x%08" PRIx32 "]", 975 static_cast<uint32_t>(rv))); 976 return rv; 977 } 978 } else { 979 mAltDataOffset = mDataSize; 980 } 981 982 nsAutoCString altMetadata; 983 CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset, 984 altMetadata); 985 rv = SetAltMetadata(altMetadata.get()); 986 if (NS_FAILED(rv)) { 987 LOG( 988 ("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data" 989 "failed [rv=0x%08" PRIx32 "]", 990 static_cast<uint32_t>(rv))); 991 return rv; 992 } 993 994 // Once we open output stream we no longer allow preloading of chunks without 995 // input stream. There is no reason to believe that some input stream will be 996 // opened soon. Otherwise we would cache unused chunks of all newly created 997 // entries until the CacheFile is destroyed. 998 mPreloadWithoutInputStreams = false; 999 1000 mOutput = new CacheFileOutputStream(this, aCloseListener, true); 1001 1002 LOG( 1003 ("CacheFile::OpenAlternativeOutputStream() - Creating new output stream " 1004 "%p [this=%p]", 1005 mOutput, this)); 1006 1007 mDataAccessed = true; 1008 mAltDataType = aAltDataType; 1009 *_retval = do_AddRef(mOutput).take(); 1010 return NS_OK; 1011 } 1012 1013 nsresult CacheFile::SetMemoryOnly() { 1014 CacheFileAutoLock lock(this); 1015 1016 LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]", mMemoryOnly, 1017 this)); 1018 1019 if (mMemoryOnly) return NS_OK; 1020 1021 MOZ_ASSERT(mReady); 1022 1023 if (!mReady) { 1024 LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]", 1025 this)); 1026 1027 return NS_ERROR_NOT_AVAILABLE; 1028 } 1029 1030 if (mDataAccessed) { 1031 LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]", 1032 this)); 1033 return NS_ERROR_NOT_AVAILABLE; 1034 } 1035 1036 // TODO what to do when this isn't a new entry and has an existing metadata??? 1037 mMemoryOnly = true; 1038 return NS_OK; 1039 } 1040 1041 nsresult CacheFile::Doom(CacheFileListener* aCallback) { 1042 LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback)); 1043 1044 CacheFileAutoLock lock(this); 1045 1046 return DoomLocked(aCallback); 1047 } 1048 1049 nsresult CacheFile::DoomLocked(CacheFileListener* aCallback) { 1050 AssertOwnsLock(); 1051 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); 1052 1053 LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback)); 1054 1055 nsresult rv = NS_OK; 1056 1057 if (mMemoryOnly) { 1058 return NS_ERROR_FILE_NOT_FOUND; 1059 } 1060 1061 if (mHandle && mHandle->IsDoomed()) { 1062 return NS_ERROR_FILE_NOT_FOUND; 1063 } 1064 1065 nsCOMPtr<CacheFileIOListener> listener; 1066 if (aCallback || !mHandle) { 1067 listener = new DoomFileHelper(aCallback); 1068 } 1069 if (mHandle) { 1070 rv = CacheFileIOManager::DoomFile(mHandle, listener); 1071 } else if (mOpeningFile) { 1072 mDoomAfterOpenListener = listener; 1073 } 1074 1075 return rv; 1076 } 1077 1078 nsresult CacheFile::ThrowMemoryCachedData() { 1079 CacheFileAutoLock lock(this); 1080 1081 LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this)); 1082 1083 if (mMemoryOnly) { 1084 // This method should not be called when the CacheFile was initialized as 1085 // memory-only, but it can be called when CacheFile end up as memory-only 1086 // due to e.g. IO failure since CacheEntry doesn't know it. 1087 LOG( 1088 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the " 1089 "entry is memory-only. [this=%p]", 1090 this)); 1091 1092 return NS_ERROR_NOT_AVAILABLE; 1093 } 1094 1095 if (mOpeningFile) { 1096 // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading 1097 // entries from being purged. 1098 1099 LOG( 1100 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the " 1101 "entry is still opening the file [this=%p]", 1102 this)); 1103 1104 return NS_ERROR_ABORT; 1105 } 1106 1107 // We cannot release all cached chunks since we need to keep preloaded chunks 1108 // in memory. See initialization of mPreloadChunkCount for explanation. 1109 CleanUpCachedChunks(); 1110 1111 return NS_OK; 1112 } 1113 1114 nsresult CacheFile::GetElement(const char* aKey, char** _retval) { 1115 CacheFileAutoLock lock(this); 1116 MOZ_ASSERT(mMetadata); 1117 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1118 1119 const char* value; 1120 value = mMetadata->GetElement(aKey); 1121 if (!value) return NS_ERROR_NOT_AVAILABLE; 1122 1123 *_retval = NS_xstrdup(value); 1124 return NS_OK; 1125 } 1126 1127 nsresult CacheFile::SetElement(const char* aKey, const char* aValue) { 1128 CacheFileAutoLock lock(this); 1129 1130 LOG(("CacheFile::SetElement() this=%p", this)); 1131 1132 MOZ_ASSERT(mMetadata); 1133 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1134 1135 if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) { 1136 NS_ERROR( 1137 "alt-data element is reserved for internal use and must not be " 1138 "changed via CacheFile::SetElement()"); 1139 return NS_ERROR_FAILURE; 1140 } 1141 1142 PostWriteTimer(); 1143 return mMetadata->SetElement(aKey, aValue); 1144 } 1145 1146 nsresult CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor) { 1147 CacheFileAutoLock lock(this); 1148 MOZ_ASSERT(mMetadata); 1149 MOZ_ASSERT(mReady); 1150 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1151 1152 mMetadata->Visit(aVisitor); 1153 return NS_OK; 1154 } 1155 1156 nsresult CacheFile::ElementsSize(uint32_t* _retval) { 1157 CacheFileAutoLock lock(this); 1158 1159 if (!mMetadata) return NS_ERROR_NOT_AVAILABLE; 1160 1161 *_retval = mMetadata->ElementsSize(); 1162 return NS_OK; 1163 } 1164 1165 nsresult CacheFile::SetExpirationTime(uint32_t aExpirationTime) { 1166 CacheFileAutoLock lock(this); 1167 1168 LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u", this, 1169 aExpirationTime)); 1170 1171 MOZ_ASSERT(mMetadata); 1172 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1173 1174 PostWriteTimer(); 1175 mMetadata->SetExpirationTime(aExpirationTime); 1176 return NS_OK; 1177 } 1178 1179 nsresult CacheFile::GetExpirationTime(uint32_t* _retval) { 1180 CacheFileAutoLock lock(this); 1181 MOZ_ASSERT(mMetadata); 1182 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1183 1184 *_retval = mMetadata->GetExpirationTime(); 1185 return NS_OK; 1186 } 1187 1188 nsresult CacheFile::SetFrecency(uint32_t aFrecency) { 1189 CacheFileAutoLock lock(this); 1190 1191 LOG(("CacheFile::SetFrecency() this=%p, frecency=%u", this, aFrecency)); 1192 1193 MOZ_ASSERT(mMetadata); 1194 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1195 1196 PostWriteTimer(); 1197 1198 if (mHandle && !mHandle->IsDoomed()) { 1199 CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr, 1200 nullptr, nullptr); 1201 } 1202 1203 mMetadata->SetFrecency(aFrecency); 1204 return NS_OK; 1205 } 1206 1207 nsresult CacheFile::GetFrecency(uint32_t* _retval) { 1208 CacheFileAutoLock lock(this); 1209 MOZ_ASSERT(mMetadata); 1210 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1211 *_retval = mMetadata->GetFrecency(); 1212 return NS_OK; 1213 } 1214 1215 nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime, 1216 uint64_t aOnStopTime) { 1217 CacheFileAutoLock lock(this); 1218 1219 LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64 1220 ", aOnStopTime=%" PRIu64 "", 1221 this, aOnStartTime, aOnStopTime)); 1222 1223 MOZ_ASSERT(mMetadata); 1224 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1225 1226 PostWriteTimer(); 1227 1228 nsAutoCString onStartTime; 1229 onStartTime.AppendInt(aOnStartTime); 1230 nsresult rv = 1231 mMetadata->SetElement("net-response-time-onstart", onStartTime.get()); 1232 if (NS_WARN_IF(NS_FAILED(rv))) { 1233 return rv; 1234 } 1235 1236 nsAutoCString onStopTime; 1237 onStopTime.AppendInt(aOnStopTime); 1238 rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get()); 1239 if (NS_WARN_IF(NS_FAILED(rv))) { 1240 return rv; 1241 } 1242 1243 uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound 1244 ? aOnStartTime 1245 : kIndexTimeOutOfBound; 1246 uint16_t onStopTime16 = 1247 aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound; 1248 1249 if (mHandle && !mHandle->IsDoomed()) { 1250 CacheFileIOManager::UpdateIndexEntry( 1251 mHandle, nullptr, nullptr, &onStartTime16, &onStopTime16, nullptr); 1252 } 1253 return NS_OK; 1254 } 1255 1256 nsresult CacheFile::GetOnStartTime(uint64_t* _retval) { 1257 CacheFileAutoLock lock(this); 1258 1259 MOZ_ASSERT(mMetadata); 1260 const char* onStartTimeStr = 1261 mMetadata->GetElement("net-response-time-onstart"); 1262 if (!onStartTimeStr) { 1263 return NS_ERROR_NOT_AVAILABLE; 1264 } 1265 nsresult rv; 1266 *_retval = nsDependentCString(onStartTimeStr).ToInteger64(&rv); 1267 MOZ_ASSERT(NS_SUCCEEDED(rv)); 1268 return NS_OK; 1269 } 1270 1271 nsresult CacheFile::GetOnStopTime(uint64_t* _retval) { 1272 CacheFileAutoLock lock(this); 1273 1274 MOZ_ASSERT(mMetadata); 1275 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop"); 1276 if (!onStopTimeStr) { 1277 return NS_ERROR_NOT_AVAILABLE; 1278 } 1279 nsresult rv; 1280 *_retval = nsDependentCString(onStopTimeStr).ToInteger64(&rv); 1281 MOZ_ASSERT(NS_SUCCEEDED(rv)); 1282 return NS_OK; 1283 } 1284 1285 nsresult CacheFile::SetContentType(uint8_t aContentType) { 1286 CacheFileAutoLock lock(this); 1287 1288 LOG(("CacheFile::SetContentType() this=%p, contentType=%u", this, 1289 aContentType)); 1290 1291 MOZ_ASSERT(mMetadata); 1292 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1293 1294 PostWriteTimer(); 1295 1296 // Save the content type to metadata for case we need to rebuild the index. 1297 nsAutoCString contentType; 1298 contentType.AppendInt(aContentType); 1299 nsresult rv = mMetadata->SetElement("ctid", contentType.get()); 1300 if (NS_WARN_IF(NS_FAILED(rv))) { 1301 return rv; 1302 } 1303 1304 if (mHandle && !mHandle->IsDoomed()) { 1305 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr, 1306 nullptr, &aContentType); 1307 } 1308 return NS_OK; 1309 } 1310 1311 nsresult CacheFile::SetAltMetadata(const char* aAltMetadata) { 1312 AssertOwnsLock(); 1313 LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s", this, 1314 aAltMetadata ? aAltMetadata : "")); 1315 1316 MOZ_ASSERT(mMetadata); 1317 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1318 1319 PostWriteTimer(); 1320 1321 nsresult rv = 1322 mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata); 1323 1324 bool hasAltData = !!aAltMetadata; 1325 1326 if (NS_FAILED(rv)) { 1327 // Removing element shouldn't fail because it doesn't allocate memory. 1328 mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr); 1329 1330 mAltDataOffset = -1; 1331 mAltDataType.Truncate(); 1332 hasAltData = false; 1333 } 1334 1335 if (mHandle && !mHandle->IsDoomed()) { 1336 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &hasAltData, nullptr, 1337 nullptr, nullptr); 1338 } 1339 return rv; 1340 } 1341 1342 nsresult CacheFile::GetLastModified(uint32_t* _retval) { 1343 CacheFileAutoLock lock(this); 1344 MOZ_ASSERT(mMetadata); 1345 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1346 1347 *_retval = mMetadata->GetLastModified(); 1348 return NS_OK; 1349 } 1350 1351 nsresult CacheFile::GetLastFetched(uint32_t* _retval) { 1352 CacheFileAutoLock lock(this); 1353 MOZ_ASSERT(mMetadata); 1354 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1355 1356 *_retval = mMetadata->GetLastFetched(); 1357 return NS_OK; 1358 } 1359 1360 nsresult CacheFile::GetFetchCount(uint32_t* _retval) { 1361 CacheFileAutoLock lock(this); 1362 MOZ_ASSERT(mMetadata); 1363 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1364 *_retval = mMetadata->GetFetchCount(); 1365 return NS_OK; 1366 } 1367 1368 nsresult CacheFile::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) { 1369 CacheFileAutoLock lock(this); 1370 if (!mHandle) { 1371 return NS_ERROR_NOT_AVAILABLE; 1372 } 1373 1374 *aDiskStorageSize = mHandle->FileSizeInK(); 1375 return NS_OK; 1376 } 1377 1378 nsresult CacheFile::OnFetched() { 1379 CacheFileAutoLock lock(this); 1380 1381 LOG(("CacheFile::OnFetched() this=%p", this)); 1382 1383 MOZ_ASSERT(mMetadata); 1384 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); 1385 1386 PostWriteTimer(); 1387 1388 mMetadata->OnFetched(); 1389 return NS_OK; 1390 } 1391 1392 void CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) { 1393 AssertOwnsLock(); 1394 1395 mObjsToRelease.AppendElement(std::move(aObject)); 1396 } 1397 1398 nsresult CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller, 1399 CacheFileChunkListener* aCallback, 1400 CacheFileChunk** _retval) { 1401 AssertOwnsLock(); 1402 1403 LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]", 1404 this, aIndex, aCaller, aCallback)); 1405 1406 MOZ_ASSERT(mReady); 1407 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); 1408 MOZ_ASSERT((aCaller == READER && aCallback) || 1409 (aCaller == WRITER && !aCallback) || 1410 (aCaller == PRELOADER && !aCallback)); 1411 1412 // Preload chunks from disk when this is disk backed entry and the listener 1413 // is reader. 1414 bool preload = !mMemoryOnly && (aCaller == READER); 1415 1416 nsresult rv; 1417 1418 RefPtr<CacheFileChunk> chunk; 1419 if (mChunks.Get(aIndex, getter_AddRefs(chunk))) { 1420 LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]", 1421 chunk.get(), this)); 1422 1423 // Preloader calls this method to preload only non-loaded chunks. 1424 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!"); 1425 1426 // We might get failed chunk between releasing the lock in 1427 // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read 1428 rv = chunk->GetStatus(); 1429 if (NS_FAILED(rv)) { 1430 SetError(rv); 1431 LOG( 1432 ("CacheFile::GetChunkLocked() - Found failed chunk in mChunks " 1433 "[this=%p]", 1434 this)); 1435 return rv; 1436 } 1437 1438 if (chunk->IsReady() || aCaller == WRITER) { 1439 chunk.swap(*_retval); 1440 } else { 1441 QueueChunkListener(aIndex, aCallback); 1442 } 1443 1444 if (preload) { 1445 PreloadChunks(aIndex + 1); 1446 } 1447 1448 return NS_OK; 1449 } 1450 1451 if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) { 1452 LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]", 1453 chunk.get(), this)); 1454 1455 // Preloader calls this method to preload only non-loaded chunks. 1456 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!"); 1457 1458 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk}); 1459 mCachedChunks.Remove(aIndex); 1460 chunk->mFile = this; 1461 chunk->mActiveChunk = true; 1462 1463 MOZ_ASSERT(chunk->IsReady()); 1464 1465 chunk.swap(*_retval); 1466 1467 if (preload) { 1468 PreloadChunks(aIndex + 1); 1469 } 1470 1471 return NS_OK; 1472 } 1473 1474 int64_t off = aIndex * static_cast<int64_t>(kChunkSize); 1475 1476 if (off < mDataSize) { 1477 // We cannot be here if this is memory only entry since the chunk must exist 1478 MOZ_ASSERT(!mMemoryOnly); 1479 if (mMemoryOnly) { 1480 // If this ever really happen it is better to fail rather than crashing on 1481 // a null handle. 1482 LOG( 1483 ("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize " 1484 "for memory-only entry. [this=%p, off=%" PRId64 1485 ", mDataSize=%" PRId64 "]", 1486 this, off, mDataSize)); 1487 1488 return NS_ERROR_UNEXPECTED; 1489 } 1490 1491 chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER); 1492 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk}); 1493 chunk->mActiveChunk = true; 1494 1495 LOG( 1496 ("CacheFile::GetChunkLocked() - Reading newly created chunk %p from " 1497 "the disk [this=%p]", 1498 chunk.get(), this)); 1499 1500 // Read the chunk from the disk 1501 rv = chunk->Read(mHandle, 1502 std::min(static_cast<uint32_t>(mDataSize - off), 1503 static_cast<uint32_t>(kChunkSize)), 1504 mMetadata->GetHash(aIndex), this); 1505 if (NS_WARN_IF(NS_FAILED(rv))) { 1506 RemoveChunkInternal(chunk, false); 1507 return rv; 1508 } 1509 1510 if (aCaller == WRITER) { 1511 chunk.swap(*_retval); 1512 } else if (aCaller != PRELOADER) { 1513 QueueChunkListener(aIndex, aCallback); 1514 } 1515 1516 if (preload) { 1517 PreloadChunks(aIndex + 1); 1518 } 1519 1520 return NS_OK; 1521 } 1522 if (off == mDataSize) { 1523 if (aCaller == WRITER) { 1524 // this listener is going to write to the chunk 1525 chunk = new CacheFileChunk(this, aIndex, true); 1526 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk}); 1527 chunk->mActiveChunk = true; 1528 1529 LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]", 1530 chunk.get(), this)); 1531 1532 chunk->InitNew(); 1533 mMetadata->SetHash(aIndex, chunk->Hash()); 1534 1535 if (HaveChunkListeners(aIndex)) { 1536 rv = NotifyChunkListeners(aIndex, NS_OK, chunk); 1537 NS_ENSURE_SUCCESS(rv, rv); 1538 } 1539 1540 chunk.swap(*_retval); 1541 return NS_OK; 1542 } 1543 } else { 1544 if (aCaller == WRITER) { 1545 // this chunk was requested by writer, but we need to fill the gap first 1546 1547 // Fill with zero the last chunk if it is incomplete 1548 if (mDataSize % kChunkSize) { 1549 rv = PadChunkWithZeroes(mDataSize / kChunkSize); 1550 NS_ENSURE_SUCCESS(rv, rv); 1551 1552 MOZ_ASSERT(!(mDataSize % kChunkSize)); 1553 } 1554 1555 uint32_t startChunk = mDataSize / kChunkSize; 1556 1557 if (mMemoryOnly) { 1558 // We need to create all missing CacheFileChunks if this is memory-only 1559 // entry 1560 for (uint32_t i = startChunk; i < aIndex; i++) { 1561 rv = PadChunkWithZeroes(i); 1562 NS_ENSURE_SUCCESS(rv, rv); 1563 } 1564 } else { 1565 // We don't need to create CacheFileChunk for other empty chunks unless 1566 // there is some input stream waiting for this chunk. 1567 1568 if (startChunk != aIndex) { 1569 // Make sure the file contains zeroes at the end of the file 1570 rv = CacheFileIOManager::TruncateSeekSetEOF( 1571 mHandle, startChunk * kChunkSize, aIndex * kChunkSize, nullptr); 1572 NS_ENSURE_SUCCESS(rv, rv); 1573 } 1574 1575 for (uint32_t i = startChunk; i < aIndex; i++) { 1576 if (HaveChunkListeners(i)) { 1577 rv = PadChunkWithZeroes(i); 1578 NS_ENSURE_SUCCESS(rv, rv); 1579 } else { 1580 mMetadata->SetHash(i, kEmptyChunkHash); 1581 mDataSize = (i + 1) * kChunkSize; 1582 } 1583 } 1584 } 1585 1586 MOZ_ASSERT(mDataSize == off); 1587 rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk)); 1588 NS_ENSURE_SUCCESS(rv, rv); 1589 1590 chunk.swap(*_retval); 1591 return NS_OK; 1592 } 1593 } 1594 1595 // We can be here only if the caller is reader since writer always create a 1596 // new chunk above and preloader calls this method to preload only chunks that 1597 // are not loaded but that do exist. 1598 MOZ_ASSERT(aCaller == READER, "Unexpected!"); 1599 1600 if (mOutput) { 1601 // the chunk doesn't exist but mOutput may create it 1602 QueueChunkListener(aIndex, aCallback); 1603 } else { 1604 return NS_ERROR_NOT_AVAILABLE; 1605 } 1606 1607 return NS_OK; 1608 } 1609 1610 void CacheFile::PreloadChunks(uint32_t aIndex) { 1611 AssertOwnsLock(); 1612 1613 uint32_t limit = aIndex + mPreloadChunkCount; 1614 1615 for (uint32_t i = aIndex; i < limit; ++i) { 1616 int64_t off = i * static_cast<int64_t>(kChunkSize); 1617 1618 if (off >= mDataSize) { 1619 // This chunk is beyond EOF. 1620 return; 1621 } 1622 1623 if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) { 1624 // This chunk is already in memory or is being read right now. 1625 continue; 1626 } 1627 1628 LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]", 1629 this, i)); 1630 1631 RefPtr<CacheFileChunk> chunk; 1632 GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk)); 1633 // We've checked that we don't have this chunk, so no chunk must be 1634 // returned. 1635 MOZ_ASSERT(!chunk); 1636 } 1637 } 1638 1639 bool CacheFile::ShouldCacheChunk(uint32_t aIndex) { 1640 AssertOwnsLock(); 1641 1642 #ifdef CACHE_CHUNKS 1643 // We cache all chunks. 1644 return true; 1645 #else 1646 1647 if (mPreloadChunkCount != 0 && mInputs.Length() == 0 && 1648 mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) { 1649 // We don't have any input stream yet, but it is likely that some will be 1650 // opened soon. Keep first mPreloadChunkCount chunks in memory. The 1651 // condition is here instead of in MustKeepCachedChunk() since these 1652 // chunks should be preloaded and can be kept in memory as an optimization, 1653 // but they can be released at any time until they are considered as 1654 // preloaded chunks for any input stream. 1655 return true; 1656 } 1657 1658 // Cache only chunks that we really need to keep. 1659 return MustKeepCachedChunk(aIndex); 1660 #endif 1661 } 1662 1663 bool CacheFile::MustKeepCachedChunk(uint32_t aIndex) { 1664 AssertOwnsLock(); 1665 1666 // We must keep the chunk when this is memory only entry or we don't have 1667 // a handle yet. 1668 if (mMemoryOnly || mOpeningFile) { 1669 return true; 1670 } 1671 1672 if (mPreloadChunkCount == 0) { 1673 // Preloading of chunks is disabled 1674 return false; 1675 } 1676 1677 // Check whether this chunk should be considered as preloaded chunk for any 1678 // existing input stream. 1679 1680 // maxPos is the position of the last byte in the given chunk 1681 int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1; 1682 1683 // minPos is the position of the first byte in a chunk that precedes the given 1684 // chunk by mPreloadChunkCount chunks 1685 int64_t minPos; 1686 if (mPreloadChunkCount >= aIndex) { 1687 minPos = 0; 1688 } else { 1689 minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize; 1690 } 1691 1692 for (uint32_t i = 0; i < mInputs.Length(); ++i) { 1693 int64_t inputPos = mInputs[i]->GetPosition(); 1694 if (inputPos >= minPos && inputPos <= maxPos) { 1695 return true; 1696 } 1697 } 1698 1699 return false; 1700 } 1701 1702 nsresult CacheFile::DeactivateChunk(CacheFileChunk* aChunk) { 1703 nsresult rv; 1704 1705 // Avoid lock reentrancy by increasing the RefCnt 1706 RefPtr<CacheFileChunk> chunk = aChunk; 1707 1708 { 1709 CacheFileAutoLock lock(this); 1710 1711 LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]", this, 1712 aChunk, aChunk->Index())); 1713 1714 MOZ_ASSERT(mReady); 1715 MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) || 1716 (!mHandle && mMemoryOnly && !mOpeningFile) || 1717 (!mHandle && !mMemoryOnly && mOpeningFile)); 1718 1719 if (aChunk->mRefCnt != 2) { 1720 LOG( 1721 ("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, " 1722 "chunk=%p, refcnt=%" PRIuPTR "]", 1723 this, aChunk, aChunk->mRefCnt.get())); 1724 1725 // somebody got the reference before the lock was acquired 1726 return NS_OK; 1727 } 1728 1729 if (aChunk->mDiscardedChunk) { 1730 aChunk->mActiveChunk = false; 1731 ReleaseOutsideLock( 1732 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile))); 1733 1734 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk); 1735 MOZ_ASSERT(removed); 1736 return NS_OK; 1737 } 1738 1739 #ifdef DEBUG 1740 { 1741 // We can be here iff the chunk is in the hash table 1742 RefPtr<CacheFileChunk> chunkCheck; 1743 mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck)); 1744 MOZ_ASSERT(chunkCheck == chunk); 1745 1746 // We also shouldn't have any queued listener for this chunk 1747 ChunkListeners* listeners; 1748 mChunkListeners.Get(chunk->Index(), &listeners); 1749 MOZ_ASSERT(!listeners); 1750 } 1751 #endif 1752 1753 if (NS_FAILED(chunk->GetStatus())) { 1754 SetError(chunk->GetStatus()); 1755 } 1756 1757 if (NS_FAILED(mStatus)) { 1758 // Don't write any chunk to disk since this entry will be doomed 1759 LOG( 1760 ("CacheFile::DeactivateChunk() - Releasing chunk because of status " 1761 "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]", 1762 this, chunk.get(), static_cast<uint32_t>(mStatus))); 1763 1764 RemoveChunkInternal(chunk, false); 1765 return mStatus; 1766 } 1767 1768 if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) { 1769 LOG( 1770 ("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk " 1771 "[this=%p]", 1772 this)); 1773 1774 mDataIsDirty = true; 1775 1776 rv = chunk->Write(mHandle, this); 1777 if (NS_FAILED(rv)) { 1778 LOG( 1779 ("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed " 1780 "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32 1781 "]", 1782 this, chunk.get(), static_cast<uint32_t>(rv))); 1783 1784 RemoveChunkInternal(chunk, false); 1785 1786 SetError(rv); 1787 return rv; 1788 } 1789 1790 // Chunk will be removed in OnChunkWritten if it is still unused 1791 1792 // chunk needs to be released under the lock to be able to rely on 1793 // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten() 1794 chunk = nullptr; 1795 return NS_OK; 1796 } 1797 1798 bool keepChunk = ShouldCacheChunk(aChunk->Index()); 1799 LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]", 1800 keepChunk ? "Caching" : "Releasing", this, chunk.get())); 1801 1802 RemoveChunkInternal(chunk, keepChunk); 1803 1804 if (!mMemoryOnly) WriteMetadataIfNeededLocked(); 1805 } 1806 1807 return NS_OK; 1808 } 1809 1810 void CacheFile::RemoveChunkInternal(CacheFileChunk* aChunk, bool aCacheChunk) { 1811 AssertOwnsLock(); 1812 1813 aChunk->mActiveChunk = false; 1814 ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile))); 1815 1816 if (aCacheChunk) { 1817 mCachedChunks.InsertOrUpdate(aChunk->Index(), RefPtr{aChunk}); 1818 } 1819 1820 mChunks.Remove(aChunk->Index()); 1821 } 1822 1823 bool CacheFile::OutputStreamExists(bool aAlternativeData) { 1824 AssertOwnsLock(); 1825 1826 if (!mOutput) { 1827 return false; 1828 } 1829 1830 return mOutput->IsAlternativeData() == aAlternativeData; 1831 } 1832 1833 int64_t CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData) { 1834 AssertOwnsLock(); 1835 1836 int64_t dataSize; 1837 1838 if (mAltDataOffset != -1) { 1839 if (aAlternativeData) { 1840 dataSize = mDataSize; 1841 } else { 1842 dataSize = mAltDataOffset; 1843 } 1844 } else { 1845 MOZ_ASSERT(!aAlternativeData); 1846 dataSize = mDataSize; 1847 } 1848 1849 if (!dataSize) { 1850 return 0; 1851 } 1852 1853 // Index of the last existing chunk. 1854 uint32_t lastChunk = (dataSize - 1) / kChunkSize; 1855 if (aIndex > lastChunk) { 1856 return 0; 1857 } 1858 1859 // We can use only preloaded chunks for the given stream to calculate 1860 // available bytes if this is an entry stored on disk, since only those 1861 // chunks are guaranteed not to be released. 1862 uint32_t maxPreloadedChunk; 1863 if (mMemoryOnly) { 1864 maxPreloadedChunk = lastChunk; 1865 } else { 1866 maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk); 1867 } 1868 1869 uint32_t i; 1870 for (i = aIndex; i <= maxPreloadedChunk; ++i) { 1871 CacheFileChunk* chunk; 1872 1873 chunk = mChunks.GetWeak(i); 1874 if (chunk) { 1875 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize); 1876 if (chunk->IsReady()) { 1877 continue; 1878 } 1879 1880 // don't search this chunk in cached 1881 break; 1882 } 1883 1884 chunk = mCachedChunks.GetWeak(i); 1885 if (chunk) { 1886 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize); 1887 continue; 1888 } 1889 1890 break; 1891 } 1892 1893 // theoretic bytes in advance 1894 int64_t advance = int64_t(i - aIndex) * kChunkSize; 1895 // real bytes till the end of the file 1896 int64_t tail = dataSize - (aIndex * kChunkSize); 1897 1898 return std::min(advance, tail); 1899 } 1900 1901 nsresult CacheFile::Truncate(int64_t aOffset) { 1902 AssertOwnsLock(); 1903 1904 LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset)); 1905 1906 nsresult rv; 1907 1908 // If we ever need to truncate on non alt-data boundary, we need to handle 1909 // existing input streams. 1910 MOZ_ASSERT(aOffset == mAltDataOffset, 1911 "Truncating normal data not implemented"); 1912 MOZ_ASSERT(mReady); 1913 MOZ_ASSERT(!mOutput); 1914 1915 uint32_t lastChunk = 0; 1916 if (mDataSize > 0) { 1917 lastChunk = (mDataSize - 1) / kChunkSize; 1918 } 1919 1920 uint32_t newLastChunk = 0; 1921 if (aOffset > 0) { 1922 newLastChunk = (aOffset - 1) / kChunkSize; 1923 } 1924 1925 uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize; 1926 1927 LOG( 1928 ("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, " 1929 "bytesInNewLastChunk=%u", 1930 lastChunk, newLastChunk, bytesInNewLastChunk)); 1931 1932 // Remove all truncated chunks from mCachedChunks 1933 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) { 1934 uint32_t idx = iter.Key(); 1935 1936 if (idx > newLastChunk) { 1937 // This is unused chunk, simply remove it. 1938 LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx)); 1939 iter.Remove(); 1940 } 1941 } 1942 1943 // We need to make sure no input stream holds a reference to a chunk we're 1944 // going to discard. In theory, if alt-data begins at chunk boundary, input 1945 // stream for normal data can get the chunk containing only alt-data via 1946 // EnsureCorrectChunk() call. The input stream won't read the data from such 1947 // chunk, but it will keep the reference until the stream is closed and we 1948 // cannot simply discard this chunk. 1949 int64_t maxInputChunk = -1; 1950 for (uint32_t i = 0; i < mInputs.Length(); ++i) { 1951 int64_t inputChunk = mInputs[i]->GetChunkIdx(); 1952 1953 if (maxInputChunk < inputChunk) { 1954 maxInputChunk = inputChunk; 1955 } 1956 1957 MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset); 1958 } 1959 1960 MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1); 1961 if (maxInputChunk == newLastChunk + 1) { 1962 // Truncating must be done at chunk boundary 1963 MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize); 1964 newLastChunk++; 1965 bytesInNewLastChunk = 0; 1966 LOG( 1967 ("CacheFile::Truncate() - chunk %p is still in use, using " 1968 "newLastChunk=%u and bytesInNewLastChunk=%u", 1969 mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk)); 1970 } 1971 1972 // Discard all truncated chunks in mChunks 1973 for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) { 1974 uint32_t idx = iter.Key(); 1975 1976 if (idx > newLastChunk) { 1977 RefPtr<CacheFileChunk>& chunk = iter.Data(); 1978 LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]", idx, 1979 chunk.get())); 1980 1981 if (HaveChunkListeners(idx)) { 1982 NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk); 1983 } 1984 1985 chunk->mDiscardedChunk = true; 1986 mDiscardedChunks.AppendElement(chunk); 1987 iter.Remove(); 1988 } 1989 } 1990 1991 // Remove hashes of all removed chunks from the metadata 1992 for (uint32_t i = lastChunk; i > newLastChunk; --i) { 1993 mMetadata->RemoveHash(i); 1994 } 1995 1996 // Truncate new last chunk 1997 if (bytesInNewLastChunk == kChunkSize) { 1998 LOG(("CacheFile::Truncate() - not truncating last chunk.")); 1999 } else { 2000 RefPtr<CacheFileChunk> chunk; 2001 if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) { 2002 LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.", 2003 chunk.get())); 2004 } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) { 2005 LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.", 2006 chunk.get())); 2007 } else { 2008 // New last chunk isn't loaded but we need to update the hash. 2009 MOZ_ASSERT(!mMemoryOnly); 2010 MOZ_ASSERT(mHandle); 2011 2012 rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr, 2013 getter_AddRefs(chunk)); 2014 if (NS_FAILED(rv)) { 2015 return rv; 2016 } 2017 // We've checked that we don't have this chunk, so no chunk must be 2018 // returned. 2019 MOZ_ASSERT(!chunk); 2020 2021 if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) { 2022 return NS_ERROR_UNEXPECTED; 2023 } 2024 2025 LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.", 2026 chunk.get())); 2027 } 2028 2029 rv = chunk->GetStatus(); 2030 if (NS_FAILED(rv)) { 2031 LOG( 2032 ("CacheFile::Truncate() - New last chunk is failed " 2033 "[status=0x%08" PRIx32 "]", 2034 static_cast<uint32_t>(rv))); 2035 return rv; 2036 } 2037 2038 chunk->Truncate(bytesInNewLastChunk); 2039 2040 // If the chunk is ready set the new hash now. If it's still being loaded 2041 // CacheChunk::Truncate() made the chunk dirty and the hash will be updated 2042 // in OnChunkWritten(). 2043 if (chunk->IsReady()) { 2044 mMetadata->SetHash(newLastChunk, chunk->Hash()); 2045 } 2046 } 2047 2048 if (mHandle) { 2049 rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset, 2050 nullptr); 2051 if (NS_FAILED(rv)) { 2052 return rv; 2053 } 2054 } 2055 2056 mDataSize = aOffset; 2057 2058 return NS_OK; 2059 } 2060 2061 static uint32_t StatusToTelemetryEnum(nsresult aStatus) { 2062 if (NS_SUCCEEDED(aStatus)) { 2063 return 0; 2064 } 2065 2066 switch (aStatus) { 2067 case NS_BASE_STREAM_CLOSED: 2068 return 0; // Log this as a success 2069 case NS_ERROR_OUT_OF_MEMORY: 2070 return 2; 2071 case NS_ERROR_FILE_NO_DEVICE_SPACE: 2072 return 3; 2073 case NS_ERROR_FILE_CORRUPTED: 2074 return 4; 2075 case NS_ERROR_FILE_NOT_FOUND: 2076 return 5; 2077 case NS_BINDING_ABORTED: 2078 return 6; 2079 default: 2080 return 1; // other error 2081 } 2082 2083 MOZ_ASSERT_UNREACHABLE("We should never get here"); 2084 } 2085 2086 void CacheFile::RemoveInput(CacheFileInputStream* aInput, nsresult aStatus) { 2087 AssertOwnsLock(); 2088 2089 LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]", 2090 this, aInput, static_cast<uint32_t>(aStatus))); 2091 2092 DebugOnly<bool> found{}; 2093 found = mInputs.RemoveElement(aInput); 2094 MOZ_ASSERT(found); 2095 2096 ReleaseOutsideLock( 2097 already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput))); 2098 2099 if (!mMemoryOnly) WriteMetadataIfNeededLocked(); 2100 2101 // If the input didn't read all data, there might be left some preloaded 2102 // chunks that won't be used anymore. 2103 CleanUpCachedChunks(); 2104 2105 glean::network::cache_v2_input_stream_status.AccumulateSingleSample( 2106 StatusToTelemetryEnum(aStatus)); 2107 } 2108 2109 void CacheFile::RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus) { 2110 AssertOwnsLock(); 2111 2112 nsresult rv; 2113 2114 LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]", 2115 this, aOutput, static_cast<uint32_t>(aStatus))); 2116 2117 if (mOutput != aOutput) { 2118 LOG( 2119 ("CacheFile::RemoveOutput() - This output was already removed, ignoring" 2120 " call [this=%p]", 2121 this)); 2122 return; 2123 } 2124 2125 // This is to finalize the Hash calculation 2126 if (mDict) { 2127 mDict->FinishHash(); 2128 mDict = nullptr; 2129 } 2130 2131 mOutput = nullptr; // XXX should this be after NotifyCloseListener? 2132 2133 // Cancel all queued chunk and update listeners that cannot be satisfied 2134 NotifyListenersAboutOutputRemoval(); 2135 2136 if (!mMemoryOnly) WriteMetadataIfNeededLocked(); 2137 2138 // Make sure the CacheFile status is set to a failure when the output stream 2139 // is closed with a fatal error. This way we propagate correctly and w/o any 2140 // windows the failure state of this entry to end consumers. 2141 if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) && 2142 aStatus != NS_BASE_STREAM_CLOSED) { 2143 if (aOutput->IsAlternativeData()) { 2144 MOZ_ASSERT(mAltDataOffset != -1); 2145 // If there is no alt-data input stream truncate only alt-data, otherwise 2146 // doom the entry. 2147 bool altDataInputExists = false; 2148 for (uint32_t i = 0; i < mInputs.Length(); ++i) { 2149 if (mInputs[i]->IsAlternativeData()) { 2150 altDataInputExists = true; 2151 break; 2152 } 2153 } 2154 if (altDataInputExists) { 2155 SetError(aStatus); 2156 } else { 2157 rv = Truncate(mAltDataOffset); 2158 if (NS_FAILED(rv)) { 2159 LOG( 2160 ("CacheFile::RemoveOutput() - Truncating alt-data failed " 2161 "[rv=0x%08" PRIx32 "]", 2162 static_cast<uint32_t>(rv))); 2163 SetError(aStatus); 2164 } else { 2165 SetAltMetadata(nullptr); 2166 mAltDataOffset = -1; 2167 mAltDataType.Truncate(); 2168 } 2169 } 2170 } else { 2171 SetError(aStatus); 2172 } 2173 } 2174 2175 // Notify close listener as the last action 2176 aOutput->NotifyCloseListener(); 2177 2178 glean::network::cache_v2_output_stream_status.AccumulateSingleSample( 2179 StatusToTelemetryEnum(aStatus)); 2180 } 2181 2182 nsresult CacheFile::NotifyChunkListener(CacheFileChunkListener* aCallback, 2183 nsIEventTarget* aTarget, 2184 nsresult aResult, uint32_t aChunkIdx, 2185 CacheFileChunk* aChunk) { 2186 LOG( 2187 ("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, " 2188 "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]", 2189 this, aCallback, aTarget, static_cast<uint32_t>(aResult), aChunkIdx, 2190 aChunk)); 2191 2192 RefPtr<NotifyChunkListenerEvent> ev; 2193 ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk); 2194 if (aTarget) { 2195 return aTarget->Dispatch(ev, NS_DISPATCH_NORMAL); 2196 } 2197 return NS_DispatchToCurrentThread(ev); 2198 } 2199 2200 void CacheFile::QueueChunkListener(uint32_t aIndex, 2201 CacheFileChunkListener* aCallback) { 2202 LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]", this, 2203 aIndex, aCallback)); 2204 2205 AssertOwnsLock(); 2206 2207 MOZ_ASSERT(aCallback); 2208 2209 ChunkListenerItem* item = new ChunkListenerItem(); 2210 item->mTarget = CacheFileIOManager::IOTarget(); 2211 if (!item->mTarget) { 2212 LOG( 2213 ("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using " 2214 "main thread for callback.")); 2215 item->mTarget = GetMainThreadSerialEventTarget(); 2216 } 2217 item->mCallback = aCallback; 2218 2219 mChunkListeners.GetOrInsertNew(aIndex)->mItems.AppendElement(item); 2220 } 2221 2222 nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult, 2223 CacheFileChunk* aChunk) { 2224 LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32 2225 ", " 2226 "chunk=%p]", 2227 this, aIndex, static_cast<uint32_t>(aResult), aChunk)); 2228 2229 AssertOwnsLock(); 2230 2231 nsresult rv, rv2; 2232 2233 ChunkListeners* listeners; 2234 mChunkListeners.Get(aIndex, &listeners); 2235 MOZ_ASSERT(listeners); 2236 2237 rv = NS_OK; 2238 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) { 2239 ChunkListenerItem* item = listeners->mItems[i]; 2240 rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex, 2241 aChunk); 2242 if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2; 2243 delete item; 2244 } 2245 2246 mChunkListeners.Remove(aIndex); 2247 2248 return rv; 2249 } 2250 2251 bool CacheFile::HaveChunkListeners(uint32_t aIndex) { 2252 AssertOwnsLock(); 2253 ChunkListeners* listeners; 2254 mChunkListeners.Get(aIndex, &listeners); 2255 return !!listeners; 2256 } 2257 2258 void CacheFile::NotifyListenersAboutOutputRemoval() { 2259 LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this)); 2260 2261 AssertOwnsLock(); 2262 2263 // First fail all chunk listeners that wait for non-existent chunk 2264 for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) { 2265 uint32_t idx = iter.Key(); 2266 auto* listeners = iter.UserData(); 2267 2268 LOG( 2269 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail " 2270 "[this=%p, idx=%u]", 2271 this, idx)); 2272 2273 RefPtr<CacheFileChunk> chunk; 2274 mChunks.Get(idx, getter_AddRefs(chunk)); 2275 if (chunk) { 2276 // Skip these listeners because the chunk is being read. We don't have 2277 // assertion here to check its state because it might be already in READY 2278 // state while CacheFile::OnChunkRead() is waiting on Cache I/O thread for 2279 // a lock so the listeners hasn't been notified yet. In any case, the 2280 // listeners will be notified from CacheFile::OnChunkRead(). 2281 continue; 2282 } 2283 2284 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) { 2285 ChunkListenerItem* item = listeners->mItems[i]; 2286 NotifyChunkListener(item->mCallback, item->mTarget, 2287 NS_ERROR_NOT_AVAILABLE, idx, nullptr); 2288 delete item; 2289 } 2290 2291 iter.Remove(); 2292 } 2293 2294 // Fail all update listeners 2295 for (const auto& entry : mChunks) { 2296 const RefPtr<CacheFileChunk>& chunk = entry.GetData(); 2297 LOG( 2298 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 " 2299 "[this=%p, idx=%u]", 2300 this, entry.GetKey())); 2301 2302 if (chunk->IsReady()) { 2303 chunk->NotifyUpdateListeners(); 2304 } 2305 } 2306 } 2307 2308 bool CacheFile::DataSize(int64_t* aSize) { 2309 CacheFileAutoLock lock(this); 2310 2311 if (OutputStreamExists(false)) { 2312 return false; 2313 } 2314 2315 if (mAltDataOffset == -1) { 2316 *aSize = mDataSize; 2317 } else { 2318 *aSize = mAltDataOffset; 2319 } 2320 2321 return true; 2322 } 2323 2324 nsresult CacheFile::GetAltDataSize(int64_t* aSize) { 2325 CacheFileAutoLock lock(this); 2326 if (mOutput) { 2327 return NS_ERROR_IN_PROGRESS; 2328 } 2329 2330 if (mAltDataOffset == -1) { 2331 return NS_ERROR_NOT_AVAILABLE; 2332 } 2333 2334 *aSize = mDataSize - mAltDataOffset; 2335 return NS_OK; 2336 } 2337 2338 nsresult CacheFile::GetAltDataType(nsACString& aType) { 2339 CacheFileAutoLock lock(this); 2340 2341 if (mAltDataOffset == -1) { 2342 return NS_ERROR_NOT_AVAILABLE; 2343 } 2344 2345 aType = mAltDataType; 2346 return NS_OK; 2347 } 2348 2349 bool CacheFile::IsDoomed() { 2350 CacheFileAutoLock lock(this); 2351 2352 if (!mHandle) return false; 2353 2354 return mHandle->IsDoomed(); 2355 } 2356 2357 bool CacheFile::IsWriteInProgress() { 2358 CacheFileAutoLock lock(this); 2359 2360 bool result = false; 2361 2362 if (!mMemoryOnly) { 2363 result = 2364 mDataIsDirty || (mMetadata && mMetadata->IsDirty()) || mWritingMetadata; 2365 } 2366 2367 result = result || mOpeningFile || mOutput || mChunks.Count(); 2368 2369 return result; 2370 } 2371 2372 bool CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, 2373 bool aIsAltData) { 2374 CacheFileAutoLock lock(this); 2375 2376 if (mSkipSizeCheck || aSize < 0) { 2377 return false; 2378 } 2379 2380 int64_t totalSize = aOffset + aSize; 2381 if (aIsAltData) { 2382 totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset; 2383 } 2384 2385 return CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly); 2386 } 2387 2388 bool CacheFile::IsDirty() { return mDataIsDirty || mMetadata->IsDirty(); } 2389 2390 void CacheFile::WriteMetadataIfNeeded() { 2391 LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this)); 2392 2393 CacheFileAutoLock lock(this); 2394 2395 if (!mMemoryOnly) WriteMetadataIfNeededLocked(); 2396 } 2397 2398 void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) { 2399 // When aFireAndForget is set to true, we are called from dtor. 2400 // |this| must not be referenced after this method returns! 2401 2402 LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this)); 2403 2404 nsresult rv; 2405 2406 AssertOwnsLock(); 2407 MOZ_ASSERT(!mMemoryOnly); 2408 2409 if (!mMetadata) { 2410 MOZ_CRASH("Must have metadata here"); 2411 return; 2412 } 2413 2414 if (NS_FAILED(mStatus)) return; 2415 2416 if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() || 2417 mWritingMetadata || mOpeningFile || mKill) { 2418 return; 2419 } 2420 2421 if (!aFireAndForget) { 2422 // if aFireAndForget is set, we are called from dtor. Write 2423 // scheduler hard-refers CacheFile otherwise, so we cannot be here. 2424 CacheFileIOManager::UnscheduleMetadataWrite(this); 2425 } 2426 2427 LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]", 2428 this)); 2429 2430 rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this); 2431 if (NS_SUCCEEDED(rv)) { 2432 mWritingMetadata = true; 2433 mDataIsDirty = false; 2434 } else { 2435 LOG( 2436 ("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously " 2437 "failed [this=%p]", 2438 this)); 2439 // TODO: close streams with error 2440 SetError(rv); 2441 } 2442 } 2443 2444 void CacheFile::PostWriteTimer() { 2445 if (mMemoryOnly) return; 2446 LOG(("CacheFile::PostWriteTimer() [this=%p]", this)); 2447 2448 CacheFileIOManager::ScheduleMetadataWrite(this); 2449 } 2450 2451 void CacheFile::CleanUpCachedChunks() { 2452 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) { 2453 uint32_t idx = iter.Key(); 2454 const RefPtr<CacheFileChunk>& chunk = iter.Data(); 2455 2456 LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this, 2457 idx, chunk.get())); 2458 2459 if (MustKeepCachedChunk(idx)) { 2460 LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk")); 2461 continue; 2462 } 2463 2464 LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk")); 2465 iter.Remove(); 2466 } 2467 } 2468 2469 nsresult CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) { 2470 AssertOwnsLock(); 2471 2472 // This method is used to pad last incomplete chunk with zeroes or create 2473 // a new chunk full of zeroes 2474 MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx); 2475 2476 nsresult rv; 2477 RefPtr<CacheFileChunk> chunk; 2478 rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk)); 2479 NS_ENSURE_SUCCESS(rv, rv); 2480 2481 LOG( 2482 ("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d" 2483 " [this=%p]", 2484 aChunkIdx, chunk->DataSize(), kChunkSize - 1, this)); 2485 2486 CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize); 2487 if (!hnd.Buf()) { 2488 ReleaseOutsideLock(std::move(chunk)); 2489 SetError(NS_ERROR_OUT_OF_MEMORY); 2490 return NS_ERROR_OUT_OF_MEMORY; 2491 } 2492 2493 uint32_t offset = hnd.DataSize(); 2494 memset(hnd.Buf() + offset, 0, kChunkSize - offset); 2495 hnd.UpdateDataSize(offset, kChunkSize - offset); 2496 2497 ReleaseOutsideLock(std::move(chunk)); 2498 2499 return NS_OK; 2500 } 2501 2502 void CacheFile::SetError(nsresult aStatus) { 2503 AssertOwnsLock(); 2504 2505 if (NS_SUCCEEDED(mStatus)) { 2506 mStatus = aStatus; 2507 if (mHandle) { 2508 CacheFileIOManager::DoomFile(mHandle, nullptr); 2509 } 2510 } 2511 } 2512 2513 nsresult CacheFile::InitIndexEntry() { 2514 AssertOwnsLock(); 2515 MOZ_ASSERT(mHandle); 2516 2517 if (mHandle->IsDoomed()) return NS_OK; 2518 2519 nsresult rv; 2520 2521 rv = CacheFileIOManager::InitIndexEntry( 2522 mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()), 2523 mMetadata->IsAnonymous(), mPinned); 2524 NS_ENSURE_SUCCESS(rv, rv); 2525 2526 uint32_t frecency = mMetadata->GetFrecency(); 2527 2528 bool hasAltData = 2529 mMetadata->GetElement(CacheFileUtils::kAltDataKey) != nullptr; 2530 2531 static auto toUint16 = [](const char* s) -> uint16_t { 2532 if (s) { 2533 nsresult rv; 2534 uint64_t n64 = nsDependentCString(s).ToInteger64(&rv); 2535 MOZ_ASSERT(NS_SUCCEEDED(rv)); 2536 return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound; 2537 } 2538 return kIndexTimeNotAvailable; 2539 }; 2540 2541 const char* onStartTimeStr = 2542 mMetadata->GetElement("net-response-time-onstart"); 2543 uint16_t onStartTime = toUint16(onStartTimeStr); 2544 2545 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop"); 2546 uint16_t onStopTime = toUint16(onStopTimeStr); 2547 2548 const char* contentTypeStr = mMetadata->GetElement("ctid"); 2549 uint8_t contentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN; 2550 if (contentTypeStr) { 2551 int64_t n64 = nsDependentCString(contentTypeStr).ToInteger64(&rv); 2552 if (NS_FAILED(rv) || n64 < nsICacheEntry::CONTENT_TYPE_UNKNOWN || 2553 n64 >= nsICacheEntry::CONTENT_TYPE_LAST) { 2554 n64 = nsICacheEntry::CONTENT_TYPE_UNKNOWN; 2555 } 2556 contentType = n64; 2557 } 2558 2559 rv = CacheFileIOManager::UpdateIndexEntry( 2560 mHandle, &frecency, &hasAltData, &onStartTime, &onStopTime, &contentType); 2561 NS_ENSURE_SUCCESS(rv, rv); 2562 2563 return NS_OK; 2564 } 2565 2566 size_t CacheFile::SizeOfExcludingThis( 2567 mozilla::MallocSizeOf mallocSizeOf) const { 2568 CacheFileAutoLock lock(const_cast<CacheFile*>(this)); 2569 2570 size_t n = 0; 2571 n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf); 2572 n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf); 2573 for (const auto& chunk : mChunks.Values()) { 2574 n += chunk->SizeOfIncludingThis(mallocSizeOf); 2575 } 2576 n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf); 2577 for (const auto& chunk : mCachedChunks.Values()) { 2578 n += chunk->SizeOfIncludingThis(mallocSizeOf); 2579 } 2580 // Ignore metadata if it's still being read. It's not safe to access buffers 2581 // in CacheFileMetadata because they might be reallocated on another thread 2582 // outside CacheFile's lock. 2583 if (mMetadata && mReady) { 2584 n += mMetadata->SizeOfIncludingThis(mallocSizeOf); 2585 } 2586 2587 // Input streams are not elsewhere reported. 2588 n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf); 2589 for (uint32_t i = 0; i < mInputs.Length(); ++i) { 2590 n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf); 2591 } 2592 2593 // Output streams are not elsewhere reported. 2594 if (mOutput) { 2595 n += mOutput->SizeOfIncludingThis(mallocSizeOf); 2596 } 2597 2598 // The listeners are usually classes reported just above. 2599 n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf); 2600 n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf); 2601 2602 // mHandle reported directly from CacheFileIOManager. 2603 2604 return n; 2605 } 2606 2607 size_t CacheFile::SizeOfIncludingThis( 2608 mozilla::MallocSizeOf mallocSizeOf) const { 2609 return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf); 2610 } 2611 2612 } // namespace mozilla::net