SurfaceCache.cpp (70185B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 5 6 /** 7 * SurfaceCache is a service for caching temporary surfaces in imagelib. 8 */ 9 10 #include "SurfaceCache.h" 11 12 #include <algorithm> 13 #include <utility> 14 15 #include "ISurfaceProvider.h" 16 #include "Image.h" 17 #include "LookupResult.h" 18 #include "ShutdownTracker.h" 19 #include "gfx2DGlue.h" 20 #include "gfxPlatform.h" 21 #include "imgFrame.h" 22 #include "mozilla/AppShutdown.h" 23 #include "mozilla/Assertions.h" 24 #include "mozilla/Attributes.h" 25 #include "mozilla/CheckedInt.h" 26 #include "mozilla/DebugOnly.h" 27 #include "mozilla/Likely.h" 28 #include "mozilla/RefPtr.h" 29 #include "mozilla/StaticMutex.h" 30 #include "mozilla/StaticPrefs_image.h" 31 #include "mozilla/StaticPtr.h" 32 33 #include "nsExpirationTracker.h" 34 #include "nsHashKeys.h" 35 #include "nsIMemoryReporter.h" 36 #include "nsRefPtrHashtable.h" 37 #include "nsSize.h" 38 #include "nsTArray.h" 39 #include "Orientation.h" 40 #include "prsystem.h" 41 42 using std::max; 43 using std::min; 44 45 namespace mozilla { 46 47 using namespace gfx; 48 49 namespace image { 50 51 MOZ_DEFINE_MALLOC_SIZE_OF(SurfaceCacheMallocSizeOf) 52 53 class CachedSurface; 54 class SurfaceCacheImpl; 55 56 /////////////////////////////////////////////////////////////////////////////// 57 // Static Data 58 /////////////////////////////////////////////////////////////////////////////// 59 60 // The single surface cache instance. 61 static StaticRefPtr<SurfaceCacheImpl> sInstance; 62 63 // The mutex protecting the surface cache. 64 static StaticMutex sInstanceMutex MOZ_UNANNOTATED; 65 66 /////////////////////////////////////////////////////////////////////////////// 67 // SurfaceCache Implementation 68 /////////////////////////////////////////////////////////////////////////////// 69 70 /** 71 * Cost models the cost of storing a surface in the cache. Right now, this is 72 * simply an estimate of the size of the surface in bytes, but in the future it 73 * may be worth taking into account the cost of rematerializing the surface as 74 * well. 75 */ 76 typedef size_t Cost; 77 78 static Cost ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) { 79 MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4); 80 return aSize.width * aSize.height * aBytesPerPixel; 81 } 82 83 /** 84 * Since we want to be able to make eviction decisions based on cost, we need to 85 * be able to look up the CachedSurface which has a certain cost as well as the 86 * cost associated with a certain CachedSurface. To make this possible, in data 87 * structures we actually store a CostEntry, which contains a weak pointer to 88 * its associated surface. 89 * 90 * To make usage of the weak pointer safe, SurfaceCacheImpl always calls 91 * StartTracking after a surface is stored in the cache and StopTracking before 92 * it is removed. 93 */ 94 class CostEntry { 95 public: 96 CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost) 97 : mSurface(aSurface), mCost(aCost) {} 98 99 NotNull<CachedSurface*> Surface() const { return mSurface; } 100 Cost GetCost() const { return mCost; } 101 102 bool operator==(const CostEntry& aOther) const { 103 return mSurface == aOther.mSurface && mCost == aOther.mCost; 104 } 105 106 bool operator<(const CostEntry& aOther) const { 107 return mCost < aOther.mCost || 108 (mCost == aOther.mCost && mSurface < aOther.mSurface); 109 } 110 111 private: 112 NotNull<CachedSurface*> mSurface; 113 Cost mCost; 114 }; 115 116 /** 117 * A CachedSurface associates a surface with a key that uniquely identifies that 118 * surface. 119 */ 120 class CachedSurface { 121 ~CachedSurface() {} 122 123 public: 124 MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface) 125 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface) 126 127 explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider) 128 : mProvider(aProvider), mIsLocked(false) {} 129 130 DrawableSurface GetDrawableSurface() const { 131 if (MOZ_UNLIKELY(IsPlaceholder())) { 132 MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder"); 133 return DrawableSurface(); 134 } 135 136 return mProvider->Surface(); 137 } 138 139 DrawableSurface GetDrawableSurfaceEvenIfPlaceholder() const { 140 return mProvider->Surface(); 141 } 142 143 void SetLocked(bool aLocked) { 144 if (IsPlaceholder()) { 145 return; // Can't lock a placeholder. 146 } 147 148 // Update both our state and our provider's state. Some surface providers 149 // are permanently locked; maintaining our own locking state enables us to 150 // respect SetLocked() even when it's meaningless from the provider's 151 // perspective. 152 mIsLocked = aLocked; 153 mProvider->SetLocked(aLocked); 154 } 155 156 bool IsLocked() const { 157 return !IsPlaceholder() && mIsLocked && mProvider->IsLocked(); 158 } 159 160 void SetCannotSubstitute() { 161 mProvider->Availability().SetCannotSubstitute(); 162 } 163 bool CannotSubstitute() const { 164 return mProvider->Availability().CannotSubstitute(); 165 } 166 167 bool IsPlaceholder() const { 168 return mProvider->Availability().IsPlaceholder(); 169 } 170 bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); } 171 172 ImageKey GetImageKey() const { return mProvider->GetImageKey(); } 173 const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); } 174 nsExpirationState* GetExpirationState() { return &mExpirationState; } 175 176 CostEntry GetCostEntry() { 177 return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes()); 178 } 179 180 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { 181 return aMallocSizeOf(this) + aMallocSizeOf(mProvider.get()); 182 } 183 184 void InvalidateSurface() { mProvider->InvalidateSurface(); } 185 186 // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces. 187 struct MOZ_STACK_CLASS SurfaceMemoryReport { 188 SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters, 189 MallocSizeOf aMallocSizeOf) 190 : mCounters(aCounters), mMallocSizeOf(aMallocSizeOf) {} 191 192 void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) { 193 if (aCachedSurface->IsPlaceholder()) { 194 return; 195 } 196 197 // Record the memory used by the ISurfaceProvider. This may not have a 198 // straightforward relationship to the size of the surface that 199 // DrawableRef() returns if the surface is generated dynamically. (i.e., 200 // for surfaces with PlaybackType::eAnimated.) 201 aCachedSurface->mProvider->AddSizeOfExcludingThis( 202 mMallocSizeOf, [&](ISurfaceProvider::AddSizeOfCbData& aMetadata) { 203 SurfaceMemoryCounter counter(aCachedSurface->GetSurfaceKey(), 204 aCachedSurface->IsLocked(), 205 aCachedSurface->CannotSubstitute(), 206 aIsFactor2, aMetadata.mFinished); 207 208 counter.Values().SetDecodedHeap(aMetadata.mHeapBytes); 209 counter.Values().SetDecodedNonHeap(aMetadata.mNonHeapBytes); 210 counter.Values().SetDecodedUnknown(aMetadata.mUnknownBytes); 211 counter.Values().SetExternalHandles(aMetadata.mExternalHandles); 212 counter.Values().SetFrameIndex(aMetadata.mIndex); 213 counter.Values().SetExternalId(aMetadata.mExternalId); 214 counter.Values().SetSurfaceTypes(aMetadata.mTypes); 215 216 mCounters.AppendElement(counter); 217 }); 218 } 219 220 private: 221 nsTArray<SurfaceMemoryCounter>& mCounters; 222 MallocSizeOf mMallocSizeOf; 223 }; 224 225 private: 226 nsExpirationState mExpirationState; 227 NotNull<RefPtr<ISurfaceProvider>> mProvider; 228 bool mIsLocked; 229 }; 230 231 static int64_t AreaOfIntSize(const IntSize& aSize) { 232 return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height); 233 } 234 235 /** 236 * An ImageSurfaceCache is a per-image surface cache. For correctness we must be 237 * able to remove all surfaces associated with an image when the image is 238 * destroyed or invalidated. Since this will happen frequently, it makes sense 239 * to make it cheap by storing the surfaces for each image separately. 240 * 241 * ImageSurfaceCache also keeps track of whether its associated image is locked 242 * or unlocked. 243 * 244 * The cache may also enter "factor of 2" mode which occurs when the number of 245 * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces" 246 * pref plus the number of native sizes of the image. When in "factor of 2" 247 * mode, the cache will strongly favour sizes which are a factor of 2 of the 248 * largest native size. It accomplishes this by suggesting a factor of 2 size 249 * when lookups fail and substituting the nearest factor of 2 surface to the 250 * ideal size as the "best" available (as opposed to substitution but not 251 * found). This allows us to minimize memory consumption and CPU time spent 252 * decoding when a website requires many variants of the same surface. 253 */ 254 class ImageSurfaceCache { 255 ~ImageSurfaceCache() {} 256 257 public: 258 explicit ImageSurfaceCache(const ImageKey aImageKey) 259 : mLocked(false), 260 mFactor2Mode(false), 261 mFactor2Pruned(false), 262 mIsVectorImage(aImageKey->GetType() == imgIContainer::TYPE_VECTOR) {} 263 264 MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache) 265 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache) 266 267 typedef nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface> 268 SurfaceTable; 269 270 auto Values() const { return mSurfaces.Values(); } 271 uint32_t Count() const { return mSurfaces.Count(); } 272 bool IsEmpty() const { return mSurfaces.Count() == 0; } 273 274 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { 275 size_t bytes = aMallocSizeOf(this) + 276 mSurfaces.ShallowSizeOfExcludingThis(aMallocSizeOf); 277 for (const auto& value : Values()) { 278 bytes += value->ShallowSizeOfIncludingThis(aMallocSizeOf); 279 } 280 return bytes; 281 } 282 283 [[nodiscard]] bool Insert(NotNull<CachedSurface*> aSurface) { 284 MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(), 285 "Inserting an unlocked surface for a locked image"); 286 const auto& surfaceKey = aSurface->GetSurfaceKey(); 287 if (surfaceKey.Region()) { 288 // We don't allow substitutes for surfaces with regions, so we don't want 289 // to allow factor of 2 mode pruning to release these surfaces. 290 aSurface->SetCannotSubstitute(); 291 } 292 return mSurfaces.InsertOrUpdate(surfaceKey, RefPtr<CachedSurface>{aSurface}, 293 fallible); 294 } 295 296 already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) { 297 MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()), 298 "Should not be removing a surface we don't have"); 299 300 RefPtr<CachedSurface> surface; 301 mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface)); 302 AfterMaybeRemove(); 303 return surface.forget(); 304 } 305 306 already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey, 307 bool aForAccess) { 308 RefPtr<CachedSurface> surface; 309 mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface)); 310 311 if (aForAccess) { 312 if (surface) { 313 // We don't want to allow factor of 2 mode pruning to release surfaces 314 // for which the callers will accept no substitute. 315 surface->SetCannotSubstitute(); 316 } else if (!mFactor2Mode) { 317 // If no exact match is found, and this is for use rather than internal 318 // accounting (i.e. insert and removal), we know this will trigger a 319 // decode. Make sure we switch now to factor of 2 mode if necessary. 320 MaybeSetFactor2Mode(); 321 } 322 } 323 324 return surface.forget(); 325 } 326 327 /** 328 * @returns A tuple containing the best matching CachedSurface if available, 329 * a MatchType describing how the CachedSurface was selected, and 330 * an IntSize which is the size the caller should choose to decode 331 * at should it attempt to do so. 332 */ 333 std::tuple<already_AddRefed<CachedSurface>, MatchType, IntSize> 334 LookupBestMatch(const SurfaceKey& aIdealKey) { 335 // Try for an exact match first. 336 RefPtr<CachedSurface> exactMatch; 337 mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch)); 338 if (exactMatch) { 339 if (exactMatch->IsDecoded()) { 340 return std::make_tuple(exactMatch.forget(), MatchType::EXACT, 341 IntSize()); 342 } 343 } else if (aIdealKey.Region()) { 344 // We cannot substitute if we have a region. Allow it to create an exact 345 // match. 346 return std::make_tuple(exactMatch.forget(), MatchType::NOT_FOUND, 347 IntSize()); 348 } else if (!mFactor2Mode) { 349 // If no exact match is found, and we are not in factor of 2 mode, then 350 // we know that we will trigger a decode because at best we will provide 351 // a substitute. Make sure we switch now to factor of 2 mode if necessary. 352 MaybeSetFactor2Mode(); 353 } 354 355 // Try for a best match second, if using compact. 356 IntSize suggestedSize = SuggestedSize(aIdealKey.Size()); 357 if (suggestedSize != aIdealKey.Size()) { 358 if (!exactMatch) { 359 SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize); 360 mSurfaces.Get(compactKey, getter_AddRefs(exactMatch)); 361 if (exactMatch && exactMatch->IsDecoded()) { 362 MOZ_ASSERT(suggestedSize != aIdealKey.Size()); 363 return std::make_tuple(exactMatch.forget(), 364 MatchType::SUBSTITUTE_BECAUSE_BEST, 365 suggestedSize); 366 } 367 } 368 } 369 370 // There's no perfect match, so find the best match we can. 371 RefPtr<CachedSurface> bestMatch; 372 for (const auto& value : Values()) { 373 NotNull<CachedSurface*> current = WrapNotNull(value); 374 const SurfaceKey& currentKey = current->GetSurfaceKey(); 375 376 // We never match a placeholder or a surface with a region. 377 if (current->IsPlaceholder() || currentKey.Region()) { 378 continue; 379 } 380 // Matching the playback type and SVG context is required. 381 if (currentKey.Playback() != aIdealKey.Playback() || 382 currentKey.SVGContext() != aIdealKey.SVGContext()) { 383 continue; 384 } 385 // Matching the flags is required. 386 if (currentKey.Flags() != aIdealKey.Flags()) { 387 continue; 388 } 389 // Anything is better than nothing! (Within the constraints we just 390 // checked, of course.) 391 if (!bestMatch) { 392 bestMatch = current; 393 continue; 394 } 395 396 MOZ_ASSERT(bestMatch, "Should have a current best match"); 397 398 // Always prefer completely decoded surfaces. 399 bool bestMatchIsDecoded = bestMatch->IsDecoded(); 400 if (bestMatchIsDecoded && !current->IsDecoded()) { 401 continue; 402 } 403 if (!bestMatchIsDecoded && current->IsDecoded()) { 404 bestMatch = current; 405 continue; 406 } 407 408 SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey(); 409 if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(), 410 currentKey.Size())) { 411 bestMatch = current; 412 } 413 } 414 415 MatchType matchType; 416 if (bestMatch) { 417 if (!exactMatch) { 418 // No exact match, neither ideal nor factor of 2. 419 MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(), 420 "No exact match despite the fact the sizes match!"); 421 matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND; 422 } else if (exactMatch != bestMatch) { 423 // The exact match is still decoding, but we found a substitute. 424 matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING; 425 } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) { 426 // The best factor of 2 match is still decoding, but the best we've got. 427 MOZ_ASSERT(suggestedSize != aIdealKey.Size()); 428 MOZ_ASSERT(mFactor2Mode || mIsVectorImage); 429 matchType = MatchType::SUBSTITUTE_BECAUSE_BEST; 430 } else { 431 // The exact match is still decoding, but it's the best we've got. 432 matchType = MatchType::EXACT; 433 } 434 } else { 435 if (exactMatch) { 436 // We found an "exact match"; it must have been a placeholder. 437 MOZ_ASSERT(exactMatch->IsPlaceholder()); 438 matchType = MatchType::PENDING; 439 } else { 440 // We couldn't find an exact match *or* a substitute. 441 matchType = MatchType::NOT_FOUND; 442 } 443 } 444 445 return std::make_tuple(bestMatch.forget(), matchType, suggestedSize); 446 } 447 448 void MaybeSetFactor2Mode() { 449 MOZ_ASSERT(!mFactor2Mode); 450 451 // Typically an image cache will not have too many size-varying surfaces, so 452 // if we exceed the given threshold, we should consider using a subset. 453 int32_t thresholdSurfaces = 454 StaticPrefs::image_cache_factor2_threshold_surfaces(); 455 if (thresholdSurfaces < 0 || 456 mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) { 457 return; 458 } 459 460 // Determine how many native surfaces this image has. If it is zero, and it 461 // is a vector image, then we should impute a single native size. Otherwise, 462 // it may be zero because we don't know yet, or the image has an error, or 463 // it isn't supported. 464 NotNull<CachedSurface*> current = 465 WrapNotNull(mSurfaces.ConstIter().UserData()); 466 Image* image = static_cast<Image*>(current->GetImageKey()); 467 size_t nativeSizes = image->GetNativeSizesLength(); 468 if (mIsVectorImage) { 469 MOZ_ASSERT(nativeSizes == 0); 470 nativeSizes = 1; 471 } else if (nativeSizes == 0) { 472 return; 473 } 474 475 // Increase the threshold by the number of native sizes. This ensures that 476 // we do not prevent decoding of the image at all its native sizes. It does 477 // not guarantee we will provide a surface at that size however (i.e. many 478 // other sized surfaces are requested, in addition to the native sizes). 479 thresholdSurfaces += nativeSizes; 480 if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) { 481 return; 482 } 483 484 // We have a valid size, we can change modes. 485 mFactor2Mode = true; 486 } 487 488 template <typename Function> 489 void Prune(Function&& aRemoveCallback) { 490 if (!mFactor2Mode || mFactor2Pruned) { 491 return; 492 } 493 494 // Attempt to discard any surfaces which are not factor of 2 and the best 495 // factor of 2 match exists. 496 bool hasNotFactorSize = false; 497 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) { 498 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData()); 499 const SurfaceKey& currentKey = current->GetSurfaceKey(); 500 const IntSize& currentSize = currentKey.Size(); 501 502 // First we check if someone requested this size and would not accept 503 // an alternatively sized surface. 504 if (current->CannotSubstitute()) { 505 continue; 506 } 507 508 // Next we find the best factor of 2 size for this surface. If this 509 // surface is a factor of 2 size, then we want to keep it. 510 IntSize bestSize = SuggestedSize(currentSize); 511 if (bestSize == currentSize) { 512 continue; 513 } 514 515 // Check the cache for a surface with the same parameters except for the 516 // size which uses the closest factor of 2 size. 517 SurfaceKey compactKey = currentKey.CloneWithSize(bestSize); 518 RefPtr<CachedSurface> compactMatch; 519 mSurfaces.Get(compactKey, getter_AddRefs(compactMatch)); 520 if (compactMatch && compactMatch->IsDecoded()) { 521 aRemoveCallback(current); 522 iter.Remove(); 523 } else { 524 hasNotFactorSize = true; 525 } 526 } 527 528 // We have no surfaces that are not factor of 2 sized, so we can stop 529 // pruning henceforth, because we avoid the insertion of new surfaces that 530 // don't match our sizing set (unless the caller won't accept a 531 // substitution.) 532 if (!hasNotFactorSize) { 533 mFactor2Pruned = true; 534 } 535 536 // We should never leave factor of 2 mode due to pruning in of itself, but 537 // if we discarded surfaces due to the volatile buffers getting released, 538 // it is possible. 539 AfterMaybeRemove(); 540 } 541 542 template <typename Function> 543 bool Invalidate(Function&& aRemoveCallback) { 544 // Remove all non-blob recordings from the cache. Invalidate any blob 545 // recordings. 546 bool found = false; 547 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) { 548 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData()); 549 550 found = true; 551 current->InvalidateSurface(); 552 553 if (current->GetSurfaceKey().Flags() & SurfaceFlags::RECORD_BLOB) { 554 continue; 555 } 556 557 aRemoveCallback(current); 558 iter.Remove(); 559 } 560 561 AfterMaybeRemove(); 562 return found; 563 } 564 565 IntSize SuggestedSize(const IntSize& aSize) const { 566 IntSize suggestedSize = SuggestedSizeInternal(aSize); 567 if (mIsVectorImage) { 568 suggestedSize = SurfaceCache::ClampVectorSize(suggestedSize); 569 } 570 return suggestedSize; 571 } 572 573 IntSize SuggestedSizeInternal(const IntSize& aSize) const { 574 // When not in factor of 2 mode, we can always decode at the given size. 575 if (!mFactor2Mode) { 576 return aSize; 577 } 578 579 // We cannot enter factor of 2 mode unless we have a minimum number of 580 // surfaces, and we should have left it if the cache was emptied. 581 if (MOZ_UNLIKELY(IsEmpty())) { 582 MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!"); 583 return aSize; 584 } 585 586 // This bit of awkwardness gets the largest native size of the image. 587 NotNull<CachedSurface*> firstSurface = 588 WrapNotNull(mSurfaces.ConstIter().UserData()); 589 Image* image = static_cast<Image*>(firstSurface->GetImageKey()); 590 IntSize factorSize; 591 if (NS_FAILED(image->GetWidth(&factorSize.width)) || 592 NS_FAILED(image->GetHeight(&factorSize.height)) || 593 factorSize.IsEmpty()) { 594 // Valid vector images may have a default size of 0x0. In that case, just 595 // assume a default size of 100x100 and apply the intrinsic ratio if 596 // available. If our guess was too small, don't use factor-of-scaling. 597 MOZ_ASSERT(mIsVectorImage); 598 factorSize = IntSize(100, 100); 599 if (AspectRatio aspectRatio = image->GetIntrinsicRatio()) { 600 factorSize.width = 601 NSToIntRound(aspectRatio.ApplyToFloat(float(factorSize.height))); 602 if (factorSize.IsEmpty()) { 603 return aSize; 604 } 605 } 606 } 607 608 if (mIsVectorImage) { 609 // Ensure the aspect ratio matches the native size before forcing the 610 // caller to accept a factor of 2 size. The difference between the aspect 611 // ratios is: 612 // 613 // delta = nativeWidth/nativeHeight - desiredWidth/desiredHeight 614 // 615 // delta*nativeHeight*desiredHeight = nativeWidth*desiredHeight 616 // - desiredWidth*nativeHeight 617 // 618 // Using the maximum accepted delta as a constant, we can avoid the 619 // floating point division and just compare after some integer ops. 620 int32_t delta = 621 factorSize.width * aSize.height - aSize.width * factorSize.height; 622 int32_t maxDelta = (factorSize.height * aSize.height) >> 4; 623 if (delta > maxDelta || delta < -maxDelta) { 624 return aSize; 625 } 626 627 // If the requested size is bigger than the native size, we actually need 628 // to grow the native size instead of shrinking it. 629 if (factorSize.width < aSize.width) { 630 do { 631 IntSize candidate(factorSize.width * 2, factorSize.height * 2); 632 if (!SurfaceCache::IsLegalSize(candidate)) { 633 break; 634 } 635 636 factorSize = candidate; 637 } while (factorSize.width < aSize.width); 638 639 return factorSize; 640 } 641 642 // Otherwise we can find the best fit as normal. 643 } 644 645 // Start with the native size as the best first guess. 646 IntSize bestSize = factorSize; 647 factorSize.width /= 2; 648 factorSize.height /= 2; 649 650 while (!factorSize.IsEmpty()) { 651 if (!CompareArea(aSize, bestSize, factorSize)) { 652 // This size is not better than the last. Since we proceed from largest 653 // to smallest, we know that the next size will not be better if the 654 // previous size was rejected. Break early. 655 break; 656 } 657 658 // The current factor of 2 size is better than the last selected size. 659 bestSize = factorSize; 660 factorSize.width /= 2; 661 factorSize.height /= 2; 662 } 663 664 return bestSize; 665 } 666 667 bool CompareArea(const IntSize& aIdealSize, const IntSize& aBestSize, 668 const IntSize& aSize) const { 669 // Compare sizes. We use an area-based heuristic here instead of computing a 670 // truly optimal answer, since it seems very unlikely to make a difference 671 // for realistic sizes. 672 int64_t idealArea = AreaOfIntSize(aIdealSize); 673 int64_t currentArea = AreaOfIntSize(aSize); 674 int64_t bestMatchArea = AreaOfIntSize(aBestSize); 675 676 // If the best match is smaller than the ideal size, prefer bigger sizes. 677 if (bestMatchArea < idealArea) { 678 if (currentArea > bestMatchArea) { 679 return true; 680 } 681 return false; 682 } 683 684 // Other, prefer sizes closer to the ideal size, but still not smaller. 685 if (idealArea <= currentArea && currentArea < bestMatchArea) { 686 return true; 687 } 688 689 // This surface isn't an improvement over the current best match. 690 return false; 691 } 692 693 template <typename Function> 694 void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters, 695 MallocSizeOf aMallocSizeOf, 696 Function&& aRemoveCallback) { 697 CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf); 698 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) { 699 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData()); 700 701 // We don't need the drawable surface for ourselves, but adding a surface 702 // to the report will trigger this indirectly. If the surface was 703 // discarded by the OS because it was in volatile memory, we should remove 704 // it from the cache immediately rather than include it in the report. 705 DrawableSurface drawableSurface; 706 if (!surface->IsPlaceholder()) { 707 drawableSurface = surface->GetDrawableSurface(); 708 if (!drawableSurface) { 709 aRemoveCallback(surface); 710 iter.Remove(); 711 continue; 712 } 713 } 714 715 const IntSize& size = surface->GetSurfaceKey().Size(); 716 bool factor2Size = false; 717 if (mFactor2Mode) { 718 factor2Size = (size == SuggestedSize(size)); 719 } 720 report.Add(surface, factor2Size); 721 } 722 723 AfterMaybeRemove(); 724 } 725 726 void SetLocked(bool aLocked) { mLocked = aLocked; } 727 bool IsLocked() const { return mLocked; } 728 729 private: 730 void AfterMaybeRemove() { 731 if (IsEmpty() && mFactor2Mode) { 732 // The last surface for this cache was removed. This can happen if the 733 // surface was stored in a volatile buffer and got purged, or the surface 734 // expired from the cache. If the cache itself lingers for some reason 735 // (e.g. in the process of performing a lookup, the cache itself is 736 // locked), then we need to reset the factor of 2 state because it 737 // requires at least one surface present to get the native size 738 // information from the image. 739 mFactor2Mode = mFactor2Pruned = false; 740 } 741 } 742 743 SurfaceTable mSurfaces; 744 745 bool mLocked; 746 747 // True in "factor of 2" mode. 748 bool mFactor2Mode; 749 750 // True if all non-factor of 2 surfaces have been removed from the cache. Note 751 // that this excludes unsubstitutable sizes. 752 bool mFactor2Pruned; 753 754 // True if the surfaces are produced from a vector image. If so, it must match 755 // the aspect ratio when using factor of 2 mode. 756 bool mIsVectorImage; 757 }; 758 759 /** 760 * SurfaceCacheImpl is responsible for determining which surfaces will be cached 761 * and managing the surface cache data structures. Rather than interact with 762 * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which 763 * maintains high-level invariants and encapsulates the details of the surface 764 * cache's implementation. 765 */ 766 class SurfaceCacheImpl final : public nsIMemoryReporter { 767 public: 768 NS_DECL_ISUPPORTS 769 770 SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS, 771 uint32_t aSurfaceCacheDiscardFactor, 772 uint32_t aSurfaceCacheSize) 773 : mExpirationTracker(aSurfaceCacheExpirationTimeMS), 774 mMemoryPressureObserver(new MemoryPressureObserver), 775 mDiscardFactor(aSurfaceCacheDiscardFactor), 776 mMaxCost(aSurfaceCacheSize), 777 mAvailableCost(aSurfaceCacheSize), 778 mLockedCost(0), 779 mOverflowCount(0), 780 mAlreadyPresentCount(0), 781 mTableFailureCount(0), 782 mTrackingFailureCount(0) { 783 nsCOMPtr<nsIObserverService> os = services::GetObserverService(); 784 if (os) { 785 os->AddObserver(mMemoryPressureObserver, "memory-pressure", false); 786 } 787 } 788 789 private: 790 virtual ~SurfaceCacheImpl() { 791 nsCOMPtr<nsIObserverService> os = services::GetObserverService(); 792 if (os) { 793 os->RemoveObserver(mMemoryPressureObserver, "memory-pressure"); 794 } 795 796 UnregisterWeakMemoryReporter(this); 797 } 798 799 public: 800 void InitMemoryReporter() { RegisterWeakMemoryReporter(this); } 801 802 InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, bool aSetAvailable, 803 const StaticMutexAutoLock& aAutoLock) { 804 // If this is a duplicate surface, refuse to replace the original. 805 // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup 806 // twice. We'll make this more efficient in bug 1185137. 807 LookupResult result = 808 Lookup(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock, 809 /* aMarkUsed = */ false); 810 if (MOZ_UNLIKELY(result)) { 811 mAlreadyPresentCount++; 812 return InsertOutcome::FAILURE_ALREADY_PRESENT; 813 } 814 815 if (result.Type() == MatchType::PENDING) { 816 RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), 817 aAutoLock); 818 } 819 820 MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND || 821 result.Type() == MatchType::PENDING, 822 "A LookupResult with no surface should be NOT_FOUND or PENDING"); 823 824 // If this is bigger than we can hold after discarding everything we can, 825 // refuse to cache it. 826 Cost cost = aProvider->LogicalSizeInBytes(); 827 if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) { 828 mOverflowCount++; 829 return InsertOutcome::FAILURE; 830 } 831 832 // Remove elements in order of cost until we can fit this in the cache. Note 833 // that locked surfaces aren't in mCosts, so we never remove them here. 834 while (cost > mAvailableCost) { 835 MOZ_ASSERT(!mCosts.IsEmpty(), 836 "Removed everything and it still won't fit"); 837 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true, 838 aAutoLock); 839 } 840 841 // Locate the appropriate per-image cache. If there's not an existing cache 842 // for this image, create it. 843 const ImageKey imageKey = aProvider->GetImageKey(); 844 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey); 845 if (!cache) { 846 cache = new ImageSurfaceCache(imageKey); 847 if (!mImageCaches.InsertOrUpdate(aProvider->GetImageKey(), RefPtr{cache}, 848 fallible)) { 849 mTableFailureCount++; 850 return InsertOutcome::FAILURE; 851 } 852 } 853 854 // If we were asked to mark the cache entry available, do so. 855 if (aSetAvailable) { 856 aProvider->Availability().SetAvailable(); 857 } 858 859 auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider); 860 861 // We require that locking succeed if the image is locked and we're not 862 // inserting a placeholder; the caller may need to know this to handle 863 // errors correctly. 864 bool mustLock = cache->IsLocked() && !surface->IsPlaceholder(); 865 if (mustLock) { 866 surface->SetLocked(true); 867 if (!surface->IsLocked()) { 868 return InsertOutcome::FAILURE; 869 } 870 } 871 872 // Insert. 873 MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost"); 874 if (!cache->Insert(surface)) { 875 mTableFailureCount++; 876 if (mustLock) { 877 surface->SetLocked(false); 878 } 879 return InsertOutcome::FAILURE; 880 } 881 882 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) { 883 MOZ_ASSERT(!mustLock); 884 Remove(surface, /* aStopTracking */ false, aAutoLock); 885 return InsertOutcome::FAILURE; 886 } 887 888 return InsertOutcome::SUCCESS; 889 } 890 891 void Remove(NotNull<CachedSurface*> aSurface, bool aStopTracking, 892 const StaticMutexAutoLock& aAutoLock) { 893 ImageKey imageKey = aSurface->GetImageKey(); 894 895 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey); 896 MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache"); 897 898 // If the surface was not a placeholder, tell its image that we discarded 899 // it. 900 if (!aSurface->IsPlaceholder()) { 901 static_cast<Image*>(imageKey)->OnSurfaceDiscarded( 902 aSurface->GetSurfaceKey()); 903 } 904 905 // If we failed during StartTracking, we can skip this step. 906 if (aStopTracking) { 907 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); 908 } 909 910 // Individual surfaces must be freed outside the lock. 911 mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface)); 912 913 MaybeRemoveEmptyCache(imageKey, cache); 914 } 915 916 bool StartTracking(NotNull<CachedSurface*> aSurface, 917 const StaticMutexAutoLock& aAutoLock) { 918 CostEntry costEntry = aSurface->GetCostEntry(); 919 MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost, 920 "Cost too large and the caller didn't catch it"); 921 922 if (aSurface->IsLocked()) { 923 mLockedCost += costEntry.GetCost(); 924 MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?"); 925 } else { 926 if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) { 927 mTrackingFailureCount++; 928 return false; 929 } 930 931 // This may fail during XPCOM shutdown, so we need to ensure the object is 932 // tracked before calling RemoveObject in StopTracking. 933 nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock); 934 if (NS_WARN_IF(NS_FAILED(rv))) { 935 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry); 936 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface"); 937 mTrackingFailureCount++; 938 return false; 939 } 940 } 941 942 mAvailableCost -= costEntry.GetCost(); 943 return true; 944 } 945 946 void StopTracking(NotNull<CachedSurface*> aSurface, bool aIsTracked, 947 const StaticMutexAutoLock& aAutoLock) { 948 CostEntry costEntry = aSurface->GetCostEntry(); 949 950 if (aSurface->IsLocked()) { 951 MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance"); 952 mLockedCost -= costEntry.GetCost(); 953 // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n). 954 MOZ_ASSERT(!mCosts.Contains(costEntry), 955 "Shouldn't have a cost entry for a locked surface"); 956 } else { 957 if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) { 958 MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!"); 959 mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock); 960 } else { 961 // Our call to AddObject must have failed in StartTracking; most likely 962 // we're in XPCOM shutdown right now. 963 MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!"); 964 } 965 966 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry); 967 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface"); 968 } 969 970 mAvailableCost += costEntry.GetCost(); 971 MOZ_ASSERT(mAvailableCost <= mMaxCost, 972 "More available cost than we started with"); 973 } 974 975 LookupResult Lookup(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey, 976 const StaticMutexAutoLock& aAutoLock, bool aMarkUsed) { 977 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 978 if (!cache) { 979 // No cached surfaces for this image. 980 return LookupResult(MatchType::NOT_FOUND); 981 } 982 983 RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed); 984 if (!surface) { 985 // Lookup in the per-image cache missed. 986 return LookupResult(MatchType::NOT_FOUND); 987 } 988 989 if (surface->IsPlaceholder()) { 990 return LookupResult(MatchType::PENDING); 991 } 992 993 DrawableSurface drawableSurface = surface->GetDrawableSurface(); 994 if (!drawableSurface) { 995 // The surface was released by the operating system. Remove the cache 996 // entry as well. 997 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock); 998 return LookupResult(MatchType::NOT_FOUND); 999 } 1000 1001 if (aMarkUsed && 1002 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) { 1003 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock); 1004 return LookupResult(MatchType::NOT_FOUND); 1005 } 1006 1007 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey, 1008 "Lookup() not returning an exact match?"); 1009 return LookupResult(std::move(drawableSurface), MatchType::EXACT); 1010 } 1011 1012 LookupResult LookupBestMatch(const ImageKey aImageKey, 1013 const SurfaceKey& aSurfaceKey, 1014 const StaticMutexAutoLock& aAutoLock, 1015 bool aMarkUsed) { 1016 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1017 if (!cache) { 1018 // No cached surfaces for this image. 1019 return LookupResult( 1020 MatchType::NOT_FOUND, 1021 SurfaceCache::ClampSize(aImageKey, aSurfaceKey.Size())); 1022 } 1023 1024 // Repeatedly look up the best match, trying again if the resulting surface 1025 // has been freed by the operating system, until we can either lock a 1026 // surface for drawing or there are no matching surfaces left. 1027 // XXX(seth): This is O(N^2), but N is expected to be very small. If we 1028 // encounter a performance problem here we can revisit this. 1029 1030 RefPtr<CachedSurface> surface; 1031 DrawableSurface drawableSurface; 1032 MatchType matchType = MatchType::NOT_FOUND; 1033 IntSize suggestedSize; 1034 while (true) { 1035 std::tie(surface, matchType, suggestedSize) = 1036 cache->LookupBestMatch(aSurfaceKey); 1037 1038 if (!surface) { 1039 return LookupResult( 1040 matchType, suggestedSize); // Lookup in the per-image cache missed. 1041 } 1042 1043 drawableSurface = surface->GetDrawableSurface(); 1044 if (drawableSurface) { 1045 break; 1046 } 1047 1048 // The surface was released by the operating system. Remove the cache 1049 // entry as well. 1050 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock); 1051 } 1052 1053 MOZ_ASSERT_IF(matchType == MatchType::EXACT, 1054 surface->GetSurfaceKey() == aSurfaceKey); 1055 MOZ_ASSERT_IF( 1056 matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND || 1057 matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING, 1058 surface->GetSurfaceKey().Region() == aSurfaceKey.Region() && 1059 surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() && 1060 surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() && 1061 surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags()); 1062 1063 if (matchType == MatchType::EXACT || 1064 matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) { 1065 if (aMarkUsed && 1066 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) { 1067 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock); 1068 } 1069 } 1070 1071 return LookupResult(std::move(drawableSurface), matchType, suggestedSize); 1072 } 1073 1074 bool CanHold(const Cost aCost) const { return aCost <= mMaxCost; } 1075 1076 size_t MaximumCapacity() const { return size_t(mMaxCost); } 1077 1078 void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider, 1079 const StaticMutexAutoLock& aAutoLock) { 1080 if (!aProvider->Availability().IsPlaceholder()) { 1081 MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder"); 1082 return; 1083 } 1084 1085 // Reinsert the provider, requesting that Insert() mark it available. This 1086 // may or may not succeed, depending on whether some other decoder has 1087 // beaten us to the punch and inserted a non-placeholder version of this 1088 // surface first, but it's fine either way. 1089 // XXX(seth): This could be implemented more efficiently; we should be able 1090 // to just update our data structures without reinserting. 1091 Insert(aProvider, /* aSetAvailable = */ true, aAutoLock); 1092 } 1093 1094 void LockImage(const ImageKey aImageKey) { 1095 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1096 if (!cache) { 1097 cache = new ImageSurfaceCache(aImageKey); 1098 mImageCaches.InsertOrUpdate(aImageKey, RefPtr{cache}); 1099 } 1100 1101 cache->SetLocked(true); 1102 1103 // We don't relock this image's existing surfaces right away; instead, the 1104 // image should arrange for Lookup() to touch them if they are still useful. 1105 } 1106 1107 void UnlockImage(const ImageKey aImageKey, 1108 const StaticMutexAutoLock& aAutoLock) { 1109 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1110 if (!cache || !cache->IsLocked()) { 1111 return; // Already unlocked. 1112 } 1113 1114 cache->SetLocked(false); 1115 DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock); 1116 } 1117 1118 void UnlockEntries(const ImageKey aImageKey, 1119 const StaticMutexAutoLock& aAutoLock) { 1120 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1121 if (!cache || !cache->IsLocked()) { 1122 return; // Already unlocked. 1123 } 1124 1125 // (Note that we *don't* unlock the per-image cache here; that's the 1126 // difference between this and UnlockImage.) 1127 DoUnlockSurfaces(WrapNotNull(cache), 1128 /* aStaticOnly = */ 1129 !StaticPrefs::image_mem_animated_discardable_AtStartup(), 1130 aAutoLock); 1131 } 1132 1133 already_AddRefed<ImageSurfaceCache> RemoveImage( 1134 const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) { 1135 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1136 if (!cache) { 1137 return nullptr; // No cached surfaces for this image, so nothing to do. 1138 } 1139 1140 // Discard all of the cached surfaces for this image. 1141 // XXX(seth): This is O(n^2) since for each item in the cache we are 1142 // removing an element from the costs array. Since n is expected to be 1143 // small, performance should be good, but if usage patterns change we should 1144 // change the data structure used for mCosts. 1145 for (const auto& value : cache->Values()) { 1146 StopTracking(WrapNotNull(value), 1147 /* aIsTracked */ true, aAutoLock); 1148 } 1149 1150 // The per-image cache isn't needed anymore, so remove it as well. 1151 // This implicitly unlocks the image if it was locked. 1152 mImageCaches.Remove(aImageKey); 1153 1154 // Since we did not actually remove any of the surfaces from the cache 1155 // itself, only stopped tracking them, we should free it outside the lock. 1156 return cache.forget(); 1157 } 1158 1159 void PruneImage(const ImageKey aImageKey, 1160 const StaticMutexAutoLock& aAutoLock) { 1161 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1162 if (!cache) { 1163 return; // No cached surfaces for this image, so nothing to do. 1164 } 1165 1166 cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void { 1167 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); 1168 // Individual surfaces must be freed outside the lock. 1169 mCachedSurfacesDiscard.AppendElement(aSurface); 1170 }); 1171 1172 MaybeRemoveEmptyCache(aImageKey, cache); 1173 } 1174 1175 bool InvalidateImage(const ImageKey aImageKey, 1176 const StaticMutexAutoLock& aAutoLock) { 1177 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1178 if (!cache) { 1179 return false; // No cached surfaces for this image, so nothing to do. 1180 } 1181 1182 bool rv = cache->Invalidate( 1183 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void { 1184 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); 1185 // Individual surfaces must be freed outside the lock. 1186 mCachedSurfacesDiscard.AppendElement(aSurface); 1187 }); 1188 1189 MaybeRemoveEmptyCache(aImageKey, cache); 1190 return rv; 1191 } 1192 1193 void DiscardAll(const StaticMutexAutoLock& aAutoLock) { 1194 // Remove in order of cost because mCosts is an array and the other data 1195 // structures are all hash tables. Note that locked surfaces are not 1196 // removed, since they aren't present in mCosts. 1197 while (!mCosts.IsEmpty()) { 1198 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true, 1199 aAutoLock); 1200 } 1201 } 1202 1203 void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) { 1204 // Compute our discardable cost. Since locked surfaces aren't discardable, 1205 // we exclude them. 1206 const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost; 1207 MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up"); 1208 1209 // Our target is to raise our available cost by (1 / mDiscardFactor) of our 1210 // discardable cost - in other words, we want to end up with about 1211 // (discardableCost / mDiscardFactor) fewer bytes stored in the surface 1212 // cache after we're done. 1213 const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor); 1214 1215 if (targetCost > mMaxCost - mLockedCost) { 1216 MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard"); 1217 DiscardAll(aAutoLock); 1218 return; 1219 } 1220 1221 // Discard surfaces until we've reduced our cost to our target cost. 1222 while (mAvailableCost < targetCost) { 1223 MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done"); 1224 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true, 1225 aAutoLock); 1226 } 1227 } 1228 1229 void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard, 1230 const StaticMutexAutoLock& aAutoLock) { 1231 MOZ_ASSERT(aDiscard.IsEmpty()); 1232 aDiscard = std::move(mCachedSurfacesDiscard); 1233 } 1234 1235 already_AddRefed<CachedSurface> GetSurfaceForResetAnimation( 1236 const ImageKey aImageKey, const SurfaceKey& aSurfaceKey, 1237 const StaticMutexAutoLock& aAutoLock) { 1238 RefPtr<CachedSurface> surface; 1239 1240 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1241 if (!cache) { 1242 // No cached surfaces for this image. 1243 return surface.forget(); 1244 } 1245 1246 surface = cache->Lookup(aSurfaceKey, /* aForAccess = */ false); 1247 return surface.forget(); 1248 } 1249 1250 void LockSurface(NotNull<CachedSurface*> aSurface, 1251 const StaticMutexAutoLock& aAutoLock) { 1252 if (aSurface->IsPlaceholder() || aSurface->IsLocked()) { 1253 return; 1254 } 1255 1256 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); 1257 1258 // Lock the surface. This can fail. 1259 aSurface->SetLocked(true); 1260 DebugOnly<bool> tracked = StartTracking(aSurface, aAutoLock); 1261 MOZ_ASSERT(tracked); 1262 } 1263 1264 size_t ShallowSizeOfIncludingThis( 1265 MallocSizeOf aMallocSizeOf, const StaticMutexAutoLock& aAutoLock) const { 1266 size_t bytes = 1267 aMallocSizeOf(this) + mCosts.ShallowSizeOfExcludingThis(aMallocSizeOf) + 1268 mImageCaches.ShallowSizeOfExcludingThis(aMallocSizeOf) + 1269 mCachedSurfacesDiscard.ShallowSizeOfExcludingThis(aMallocSizeOf) + 1270 mExpirationTracker.ShallowSizeOfExcludingThis(aMallocSizeOf); 1271 for (const auto& data : mImageCaches.Values()) { 1272 bytes += data->ShallowSizeOfIncludingThis(aMallocSizeOf); 1273 } 1274 return bytes; 1275 } 1276 1277 NS_IMETHOD 1278 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, 1279 bool aAnonymize) override { 1280 StaticMutexAutoLock lock(sInstanceMutex); 1281 1282 uint32_t lockedImageCount = 0; 1283 uint32_t totalSurfaceCount = 0; 1284 uint32_t lockedSurfaceCount = 0; 1285 for (const auto& cache : mImageCaches.Values()) { 1286 totalSurfaceCount += cache->Count(); 1287 if (cache->IsLocked()) { 1288 ++lockedImageCount; 1289 } 1290 for (const auto& value : cache->Values()) { 1291 if (value->IsLocked()) { 1292 ++lockedSurfaceCount; 1293 } 1294 } 1295 } 1296 1297 // clang-format off 1298 // We have explicit memory reporting for the surface cache which is more 1299 // accurate than the cost metrics we report here, but these metrics are 1300 // still useful to report, since they control the cache's behavior. 1301 MOZ_COLLECT_REPORT( 1302 "explicit/images/cache/overhead", KIND_HEAP, UNITS_BYTES, 1303 ShallowSizeOfIncludingThis(SurfaceCacheMallocSizeOf, lock), 1304 "Memory used by the surface cache data structures, excluding surface data."); 1305 1306 MOZ_COLLECT_REPORT( 1307 "imagelib-surface-cache-estimated-total", 1308 KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost), 1309 "Estimated total memory used by the imagelib surface cache."); 1310 1311 MOZ_COLLECT_REPORT( 1312 "imagelib-surface-cache-estimated-locked", 1313 KIND_OTHER, UNITS_BYTES, mLockedCost, 1314 "Estimated memory used by locked surfaces in the imagelib surface cache."); 1315 1316 MOZ_COLLECT_REPORT( 1317 "imagelib-surface-cache-tracked-cost-count", 1318 KIND_OTHER, UNITS_COUNT, mCosts.Length(), 1319 "Total number of surfaces tracked for cost (and expiry) in the imagelib surface cache."); 1320 1321 MOZ_COLLECT_REPORT( 1322 "imagelib-surface-cache-tracked-expiry-count", 1323 KIND_OTHER, UNITS_COUNT, mExpirationTracker.Length(lock), 1324 "Total number of surfaces tracked for expiry (and cost) in the imagelib surface cache."); 1325 1326 MOZ_COLLECT_REPORT( 1327 "imagelib-surface-cache-image-count", 1328 KIND_OTHER, UNITS_COUNT, mImageCaches.Count(), 1329 "Total number of images in the imagelib surface cache."); 1330 1331 MOZ_COLLECT_REPORT( 1332 "imagelib-surface-cache-locked-image-count", 1333 KIND_OTHER, UNITS_COUNT, lockedImageCount, 1334 "Total number of locked images in the imagelib surface cache."); 1335 1336 MOZ_COLLECT_REPORT( 1337 "imagelib-surface-cache-image-surface-count", 1338 KIND_OTHER, UNITS_COUNT, totalSurfaceCount, 1339 "Total number of surfaces in the imagelib surface cache."); 1340 1341 MOZ_COLLECT_REPORT( 1342 "imagelib-surface-cache-locked-surfaces-count", 1343 KIND_OTHER, UNITS_COUNT, lockedSurfaceCount, 1344 "Total number of locked surfaces in the imagelib surface cache."); 1345 1346 MOZ_COLLECT_REPORT( 1347 "imagelib-surface-cache-overflow-count", 1348 KIND_OTHER, UNITS_COUNT, mOverflowCount, 1349 "Count of how many times the surface cache has hit its capacity and been " 1350 "unable to insert a new surface."); 1351 1352 MOZ_COLLECT_REPORT( 1353 "imagelib-surface-cache-tracking-failure-count", 1354 KIND_OTHER, UNITS_COUNT, mTrackingFailureCount, 1355 "Count of how many times the surface cache has failed to begin tracking a " 1356 "given surface."); 1357 1358 MOZ_COLLECT_REPORT( 1359 "imagelib-surface-cache-already-present-count", 1360 KIND_OTHER, UNITS_COUNT, mAlreadyPresentCount, 1361 "Count of how many times the surface cache has failed to insert a surface " 1362 "because it is already present."); 1363 1364 MOZ_COLLECT_REPORT( 1365 "imagelib-surface-cache-table-failure-count", 1366 KIND_OTHER, UNITS_COUNT, mTableFailureCount, 1367 "Count of how many times the surface cache has failed to insert a surface " 1368 "because a hash table could not accept an entry."); 1369 // clang-format on 1370 1371 return NS_OK; 1372 } 1373 1374 void CollectSizeOfSurfaces(const ImageKey aImageKey, 1375 nsTArray<SurfaceMemoryCounter>& aCounters, 1376 MallocSizeOf aMallocSizeOf, 1377 const StaticMutexAutoLock& aAutoLock) { 1378 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1379 if (!cache) { 1380 return; // No surfaces for this image. 1381 } 1382 1383 // Report all surfaces in the per-image cache. 1384 cache->CollectSizeOfSurfaces( 1385 aCounters, aMallocSizeOf, 1386 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void { 1387 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); 1388 // Individual surfaces must be freed outside the lock. 1389 mCachedSurfacesDiscard.AppendElement(aSurface); 1390 }); 1391 1392 MaybeRemoveEmptyCache(aImageKey, cache); 1393 } 1394 1395 void ReleaseImageOnMainThread(already_AddRefed<image::Image>&& aImage, 1396 const StaticMutexAutoLock& aAutoLock) { 1397 RefPtr<image::Image> image = aImage; 1398 if (!image) { 1399 return; 1400 } 1401 1402 bool needsDispatch = mReleasingImagesOnMainThread.IsEmpty(); 1403 mReleasingImagesOnMainThread.AppendElement(image); 1404 1405 if (!needsDispatch || 1406 AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownFinal)) { 1407 // Either there is already a ongoing task for ClearReleasingImages() or 1408 // it's too late in shutdown to dispatch. 1409 return; 1410 } 1411 1412 NS_DispatchToMainThread(NS_NewRunnableFunction( 1413 "SurfaceCacheImpl::ReleaseImageOnMainThread", 1414 []() -> void { SurfaceCache::ClearReleasingImages(); })); 1415 } 1416 1417 void TakeReleasingImages(nsTArray<RefPtr<image::Image>>& aImage, 1418 const StaticMutexAutoLock& aAutoLock) { 1419 MOZ_ASSERT(NS_IsMainThread()); 1420 aImage.SwapElements(mReleasingImagesOnMainThread); 1421 } 1422 1423 private: 1424 already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) { 1425 RefPtr<ImageSurfaceCache> imageCache; 1426 mImageCaches.Get(aImageKey, getter_AddRefs(imageCache)); 1427 return imageCache.forget(); 1428 } 1429 1430 void MaybeRemoveEmptyCache(const ImageKey aImageKey, 1431 ImageSurfaceCache* aCache) { 1432 // Remove the per-image cache if it's unneeded now. Keep it if the image is 1433 // locked, since the per-image cache is where we store that state. Note that 1434 // we don't push it into mImageCachesDiscard because all of its surfaces 1435 // have been removed, so it is safe to free while holding the lock. 1436 if (aCache->IsEmpty() && !aCache->IsLocked()) { 1437 mImageCaches.Remove(aImageKey); 1438 } 1439 } 1440 1441 // This is similar to CanHold() except that it takes into account the costs of 1442 // locked surfaces. It's used internally in Insert(), but it's not exposed 1443 // publicly because we permit multithreaded access to the surface cache, which 1444 // means that the result would be meaningless: another thread could insert a 1445 // surface or lock an image at any time. 1446 bool CanHoldAfterDiscarding(const Cost aCost) const { 1447 return aCost <= mMaxCost - mLockedCost; 1448 } 1449 1450 bool MarkUsed(NotNull<CachedSurface*> aSurface, 1451 NotNull<ImageSurfaceCache*> aCache, 1452 const StaticMutexAutoLock& aAutoLock) { 1453 if (aCache->IsLocked()) { 1454 LockSurface(aSurface, aAutoLock); 1455 return true; 1456 } 1457 1458 nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock); 1459 if (NS_WARN_IF(NS_FAILED(rv))) { 1460 // If mark used fails, it is because it failed to reinsert the surface 1461 // after removing it from the tracker. Thus we need to update our 1462 // own accounting but otherwise expect it to be untracked. 1463 StopTracking(aSurface, /* aIsTracked */ false, aAutoLock); 1464 return false; 1465 } 1466 return true; 1467 } 1468 1469 void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly, 1470 const StaticMutexAutoLock& aAutoLock) { 1471 AutoTArray<NotNull<CachedSurface*>, 8> discard; 1472 1473 // Unlock all the surfaces the per-image cache is holding. 1474 for (const auto& value : aCache->Values()) { 1475 NotNull<CachedSurface*> surface = WrapNotNull(value); 1476 if (surface->IsPlaceholder() || !surface->IsLocked()) { 1477 continue; 1478 } 1479 if (aStaticOnly && 1480 surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) { 1481 continue; 1482 } 1483 StopTracking(surface, /* aIsTracked */ true, aAutoLock); 1484 surface->SetLocked(false); 1485 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) { 1486 discard.AppendElement(surface); 1487 } 1488 } 1489 1490 // Discard any that we failed to track. 1491 for (auto iter = discard.begin(); iter != discard.end(); ++iter) { 1492 Remove(*iter, /* aStopTracking */ false, aAutoLock); 1493 } 1494 } 1495 1496 void RemoveEntry(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey, 1497 const StaticMutexAutoLock& aAutoLock) { 1498 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); 1499 if (!cache) { 1500 return; // No cached surfaces for this image. 1501 } 1502 1503 RefPtr<CachedSurface> surface = 1504 cache->Lookup(aSurfaceKey, /* aForAccess = */ false); 1505 if (!surface) { 1506 return; // Lookup in the per-image cache missed. 1507 } 1508 1509 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock); 1510 } 1511 1512 class SurfaceTracker final 1513 : public ExpirationTrackerImpl<CachedSurface, 2, StaticMutex, 1514 StaticMutexAutoLock> { 1515 public: 1516 explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS) 1517 : ExpirationTrackerImpl<CachedSurface, 2, StaticMutex, 1518 StaticMutexAutoLock>( 1519 aSurfaceCacheExpirationTimeMS, "SurfaceTracker"_ns) {} 1520 1521 protected: 1522 void NotifyExpiredLocked(CachedSurface* aSurface, 1523 const StaticMutexAutoLock& aAutoLock) override { 1524 sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true, 1525 aAutoLock); 1526 } 1527 1528 void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override { 1529 sInstance->TakeDiscard(mDiscard, aAutoLock); 1530 } 1531 1532 void NotifyHandlerEnd() override { 1533 nsTArray<RefPtr<CachedSurface>> discard(std::move(mDiscard)); 1534 } 1535 1536 StaticMutex& GetMutex() override { return sInstanceMutex; } 1537 1538 nsTArray<RefPtr<CachedSurface>> mDiscard; 1539 }; 1540 1541 class MemoryPressureObserver final : public nsIObserver { 1542 public: 1543 NS_DECL_ISUPPORTS 1544 1545 NS_IMETHOD Observe(nsISupports*, const char* aTopic, 1546 const char16_t*) override { 1547 nsTArray<RefPtr<CachedSurface>> discard; 1548 { 1549 StaticMutexAutoLock lock(sInstanceMutex); 1550 if (sInstance && strcmp(aTopic, "memory-pressure") == 0) { 1551 sInstance->DiscardForMemoryPressure(lock); 1552 sInstance->TakeDiscard(discard, lock); 1553 } 1554 } 1555 return NS_OK; 1556 } 1557 1558 private: 1559 virtual ~MemoryPressureObserver() {} 1560 }; 1561 1562 nsTArray<CostEntry> mCosts; 1563 nsRefPtrHashtable<nsPtrHashKey<Image>, ImageSurfaceCache> mImageCaches; 1564 nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard; 1565 SurfaceTracker mExpirationTracker; 1566 RefPtr<MemoryPressureObserver> mMemoryPressureObserver; 1567 nsTArray<RefPtr<image::Image>> mReleasingImagesOnMainThread; 1568 const uint32_t mDiscardFactor; 1569 const Cost mMaxCost; 1570 Cost mAvailableCost; 1571 Cost mLockedCost; 1572 size_t mOverflowCount; 1573 size_t mAlreadyPresentCount; 1574 size_t mTableFailureCount; 1575 size_t mTrackingFailureCount; 1576 }; 1577 1578 NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter) 1579 NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver) 1580 1581 /////////////////////////////////////////////////////////////////////////////// 1582 // Public API 1583 /////////////////////////////////////////////////////////////////////////////// 1584 1585 /* static */ 1586 void SurfaceCache::Initialize() { 1587 // Initialize preferences. 1588 MOZ_ASSERT(NS_IsMainThread()); 1589 MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once"); 1590 1591 // See StaticPrefs for the default values of these preferences. 1592 1593 // Length of time before an unused surface is removed from the cache, in 1594 // milliseconds. 1595 uint32_t surfaceCacheExpirationTimeMS = 1596 StaticPrefs::image_mem_surfacecache_min_expiration_ms_AtStartup(); 1597 1598 // What fraction of the memory used by the surface cache we should discard 1599 // when we get a memory pressure notification. This value is interpreted as 1600 // 1/N, so 1 means to discard everything, 2 means to discard about half of the 1601 // memory we're using, and so forth. We clamp it to avoid division by zero. 1602 uint32_t surfaceCacheDiscardFactor = 1603 max(StaticPrefs::image_mem_surfacecache_discard_factor_AtStartup(), 1u); 1604 1605 // Maximum size of the surface cache, in kilobytes. 1606 uint64_t surfaceCacheMaxSizeKB = 1607 StaticPrefs::image_mem_surfacecache_max_size_kb_AtStartup(); 1608 1609 if (sizeof(uintptr_t) <= 4) { 1610 // Limit surface cache to 1 GB if our address space is 32 bit. 1611 surfaceCacheMaxSizeKB = 1024 * 1024; 1612 } 1613 1614 // A knob determining the actual size of the surface cache. Currently the 1615 // cache is (size of main memory) / (surface cache size factor) KB 1616 // or (surface cache max size) KB, whichever is smaller. The formula 1617 // may change in the future, though. 1618 // For example, a value of 4 would yield a 256MB cache on a 1GB machine. 1619 // The smallest machines we are likely to run this code on have 256MB 1620 // of memory, which would yield a 64MB cache on this setting. 1621 // We clamp this value to avoid division by zero. 1622 uint32_t surfaceCacheSizeFactor = 1623 max(StaticPrefs::image_mem_surfacecache_size_factor_AtStartup(), 1u); 1624 1625 // Compute the size of the surface cache. 1626 uint64_t memorySize = PR_GetPhysicalMemorySize(); 1627 if (memorySize == 0) { 1628 #if !defined(__DragonFly__) 1629 MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here"); 1630 #endif 1631 memorySize = 256 * 1024 * 1024; // Fall back to 256MB. 1632 } 1633 uint64_t proposedSize = memorySize / surfaceCacheSizeFactor; 1634 uint64_t surfaceCacheSizeBytes = 1635 min(proposedSize, surfaceCacheMaxSizeKB * 1024); 1636 uint32_t finalSurfaceCacheSizeBytes = 1637 min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX)); 1638 1639 // Create the surface cache singleton with the requested settings. Note that 1640 // the size is a limit that the cache may not grow beyond, but we do not 1641 // actually allocate any storage for surfaces at this time. 1642 sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS, 1643 surfaceCacheDiscardFactor, 1644 finalSurfaceCacheSizeBytes); 1645 sInstance->InitMemoryReporter(); 1646 } 1647 1648 /* static */ 1649 void SurfaceCache::Shutdown() { 1650 RefPtr<SurfaceCacheImpl> cache; 1651 { 1652 StaticMutexAutoLock lock(sInstanceMutex); 1653 MOZ_ASSERT(NS_IsMainThread()); 1654 MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?"); 1655 cache = sInstance.forget(); 1656 } 1657 } 1658 1659 /* static */ 1660 LookupResult SurfaceCache::Lookup(const ImageKey aImageKey, 1661 const SurfaceKey& aSurfaceKey, 1662 bool aMarkUsed) { 1663 nsTArray<RefPtr<CachedSurface>> discard; 1664 LookupResult rv(MatchType::NOT_FOUND); 1665 1666 { 1667 StaticMutexAutoLock lock(sInstanceMutex); 1668 if (!sInstance) { 1669 return rv; 1670 } 1671 1672 rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock, aMarkUsed); 1673 sInstance->TakeDiscard(discard, lock); 1674 } 1675 1676 return rv; 1677 } 1678 1679 /* static */ 1680 LookupResult SurfaceCache::LookupBestMatch(const ImageKey aImageKey, 1681 const SurfaceKey& aSurfaceKey, 1682 bool aMarkUsed) { 1683 nsTArray<RefPtr<CachedSurface>> discard; 1684 LookupResult rv(MatchType::NOT_FOUND); 1685 1686 { 1687 StaticMutexAutoLock lock(sInstanceMutex); 1688 if (!sInstance) { 1689 return rv; 1690 } 1691 1692 rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock, aMarkUsed); 1693 sInstance->TakeDiscard(discard, lock); 1694 } 1695 1696 return rv; 1697 } 1698 1699 /* static */ 1700 InsertOutcome SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider) { 1701 nsTArray<RefPtr<CachedSurface>> discard; 1702 InsertOutcome rv(InsertOutcome::FAILURE); 1703 1704 { 1705 StaticMutexAutoLock lock(sInstanceMutex); 1706 if (!sInstance) { 1707 return rv; 1708 } 1709 1710 rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock); 1711 sInstance->TakeDiscard(discard, lock); 1712 } 1713 1714 return rv; 1715 } 1716 1717 /* static */ 1718 bool SurfaceCache::CanHold(const IntSize& aSize, 1719 uint32_t aBytesPerPixel /* = 4 */) { 1720 StaticMutexAutoLock lock(sInstanceMutex); 1721 if (!sInstance) { 1722 return false; 1723 } 1724 1725 Cost cost = ComputeCost(aSize, aBytesPerPixel); 1726 return sInstance->CanHold(cost); 1727 } 1728 1729 /* static */ 1730 bool SurfaceCache::CanHold(size_t aSize) { 1731 StaticMutexAutoLock lock(sInstanceMutex); 1732 if (!sInstance) { 1733 return false; 1734 } 1735 1736 return sInstance->CanHold(aSize); 1737 } 1738 1739 /* static */ 1740 void SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider) { 1741 StaticMutexAutoLock lock(sInstanceMutex); 1742 if (!sInstance) { 1743 return; 1744 } 1745 1746 sInstance->SurfaceAvailable(aProvider, lock); 1747 } 1748 1749 /* static */ 1750 void SurfaceCache::LockImage(const ImageKey aImageKey) { 1751 StaticMutexAutoLock lock(sInstanceMutex); 1752 if (sInstance) { 1753 return sInstance->LockImage(aImageKey); 1754 } 1755 } 1756 1757 /* static */ 1758 void SurfaceCache::UnlockImage(const ImageKey aImageKey) { 1759 StaticMutexAutoLock lock(sInstanceMutex); 1760 if (sInstance) { 1761 return sInstance->UnlockImage(aImageKey, lock); 1762 } 1763 } 1764 1765 /* static */ 1766 void SurfaceCache::UnlockEntries(const ImageKey aImageKey) { 1767 StaticMutexAutoLock lock(sInstanceMutex); 1768 if (sInstance) { 1769 return sInstance->UnlockEntries(aImageKey, lock); 1770 } 1771 } 1772 1773 /* static */ 1774 void SurfaceCache::RemoveImage(const ImageKey aImageKey) { 1775 RefPtr<ImageSurfaceCache> discard; 1776 { 1777 StaticMutexAutoLock lock(sInstanceMutex); 1778 if (sInstance) { 1779 discard = sInstance->RemoveImage(aImageKey, lock); 1780 } 1781 } 1782 } 1783 1784 /* static */ 1785 void SurfaceCache::PruneImage(const ImageKey aImageKey) { 1786 nsTArray<RefPtr<CachedSurface>> discard; 1787 { 1788 StaticMutexAutoLock lock(sInstanceMutex); 1789 if (sInstance) { 1790 sInstance->PruneImage(aImageKey, lock); 1791 sInstance->TakeDiscard(discard, lock); 1792 } 1793 } 1794 } 1795 1796 /* static */ 1797 bool SurfaceCache::InvalidateImage(const ImageKey aImageKey) { 1798 nsTArray<RefPtr<CachedSurface>> discard; 1799 bool rv = false; 1800 { 1801 StaticMutexAutoLock lock(sInstanceMutex); 1802 if (sInstance) { 1803 rv = sInstance->InvalidateImage(aImageKey, lock); 1804 sInstance->TakeDiscard(discard, lock); 1805 } 1806 } 1807 return rv; 1808 } 1809 1810 /* static */ 1811 void SurfaceCache::DiscardAll() { 1812 nsTArray<RefPtr<CachedSurface>> discard; 1813 { 1814 StaticMutexAutoLock lock(sInstanceMutex); 1815 if (sInstance) { 1816 sInstance->DiscardAll(lock); 1817 sInstance->TakeDiscard(discard, lock); 1818 } 1819 } 1820 } 1821 1822 /* static */ 1823 void SurfaceCache::ResetAnimation(const ImageKey aImageKey, 1824 const SurfaceKey& aSurfaceKey) { 1825 RefPtr<CachedSurface> surface; 1826 nsTArray<RefPtr<CachedSurface>> discard; 1827 { 1828 StaticMutexAutoLock lock(sInstanceMutex); 1829 if (!sInstance) { 1830 return; 1831 } 1832 1833 surface = 1834 sInstance->GetSurfaceForResetAnimation(aImageKey, aSurfaceKey, lock); 1835 sInstance->TakeDiscard(discard, lock); 1836 } 1837 1838 // Calling Reset will acquire the AnimationSurfaceProvider::mFramesMutex 1839 // mutex. In other places we acquire the mFramesMutex then call into the 1840 // surface cache (acquiring the surface cache mutex), so that determines a 1841 // lock order which we must obey by calling Reset after releasing the surface 1842 // cache mutex. 1843 if (surface) { 1844 DrawableSurface drawableSurface = 1845 surface->GetDrawableSurfaceEvenIfPlaceholder(); 1846 if (drawableSurface) { 1847 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey, 1848 "ResetAnimation() not returning an exact match?"); 1849 1850 drawableSurface.Reset(); 1851 } 1852 } 1853 } 1854 1855 /* static */ 1856 void SurfaceCache::CollectSizeOfSurfaces( 1857 const ImageKey aImageKey, nsTArray<SurfaceMemoryCounter>& aCounters, 1858 MallocSizeOf aMallocSizeOf) { 1859 nsTArray<RefPtr<CachedSurface>> discard; 1860 { 1861 StaticMutexAutoLock lock(sInstanceMutex); 1862 if (!sInstance) { 1863 return; 1864 } 1865 1866 sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock); 1867 sInstance->TakeDiscard(discard, lock); 1868 } 1869 } 1870 1871 /* static */ 1872 size_t SurfaceCache::MaximumCapacity() { 1873 StaticMutexAutoLock lock(sInstanceMutex); 1874 if (!sInstance) { 1875 return 0; 1876 } 1877 1878 return sInstance->MaximumCapacity(); 1879 } 1880 1881 /* static */ 1882 bool SurfaceCache::IsLegalSize(const IntSize& aSize) { 1883 // reject over-wide or over-tall images 1884 const int32_t k64KLimit = 0x0000FFFF; 1885 if (MOZ_UNLIKELY(aSize.width > k64KLimit || aSize.height > k64KLimit)) { 1886 NS_WARNING("image too big"); 1887 return false; 1888 } 1889 1890 // protect against invalid sizes 1891 if (MOZ_UNLIKELY(aSize.height <= 0 || aSize.width <= 0)) { 1892 return false; 1893 } 1894 1895 // check to make sure we don't overflow a 32-bit 1896 CheckedInt32 requiredBytes = 1897 CheckedInt32(aSize.width) * CheckedInt32(aSize.height) * 4; 1898 if (MOZ_UNLIKELY(!requiredBytes.isValid())) { 1899 NS_WARNING("width or height too large"); 1900 return false; 1901 } 1902 const int32_t maxSize = StaticPrefs::image_mem_max_legal_imgframe_size_kb(); 1903 if (MOZ_UNLIKELY(maxSize > 0 && requiredBytes.value() / 1024 > maxSize)) { 1904 return false; 1905 } 1906 return true; 1907 } 1908 1909 IntSize SurfaceCache::ClampVectorSize(const IntSize& aSize) { 1910 // If we exceed the maximum, we need to scale the size downwards to fit. 1911 // It shouldn't get here if it is significantly larger because 1912 // VectorImage::UseSurfaceCacheForSize should prevent us from requesting 1913 // a rasterized version of a surface greater than 4x the maximum. 1914 int32_t maxSizeKB = 1915 StaticPrefs::image_cache_max_rasterized_svg_threshold_kb(); 1916 if (maxSizeKB <= 0) { 1917 return aSize; 1918 } 1919 1920 int64_t proposedKB = int64_t(aSize.width) * aSize.height / 256; 1921 if (maxSizeKB >= proposedKB) { 1922 return aSize; 1923 } 1924 1925 double scale = sqrt(double(maxSizeKB) / proposedKB); 1926 return IntSize(int32_t(scale * aSize.width), int32_t(scale * aSize.height)); 1927 } 1928 1929 IntSize SurfaceCache::ClampSize(ImageKey aImageKey, const IntSize& aSize) { 1930 if (aImageKey->GetType() != imgIContainer::TYPE_VECTOR) { 1931 return aSize; 1932 } 1933 1934 return ClampVectorSize(aSize); 1935 } 1936 1937 /* static */ 1938 void SurfaceCache::ReleaseImageOnMainThread( 1939 already_AddRefed<image::Image> aImage, bool aAlwaysProxy) { 1940 if (NS_IsMainThread() && !aAlwaysProxy) { 1941 RefPtr<image::Image> image = std::move(aImage); 1942 return; 1943 } 1944 1945 // Don't try to dispatch the release after shutdown, we'll just leak the 1946 // runnable. 1947 if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownFinal)) { 1948 (void)aImage; 1949 return; 1950 } 1951 1952 StaticMutexAutoLock lock(sInstanceMutex); 1953 if (sInstance) { 1954 sInstance->ReleaseImageOnMainThread(std::move(aImage), lock); 1955 } else { 1956 NS_ReleaseOnMainThread("SurfaceCache::ReleaseImageOnMainThread", 1957 std::move(aImage), /* aAlwaysProxy */ true); 1958 } 1959 } 1960 1961 /* static */ 1962 void SurfaceCache::ClearReleasingImages() { 1963 MOZ_ASSERT(NS_IsMainThread()); 1964 1965 nsTArray<RefPtr<image::Image>> images; 1966 { 1967 StaticMutexAutoLock lock(sInstanceMutex); 1968 if (sInstance) { 1969 sInstance->TakeReleasingImages(images, lock); 1970 } 1971 } 1972 } 1973 1974 } // namespace image 1975 } // namespace mozilla