SurfacePoolCA.mm (18695B)
1 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 5 6 #include "mozilla/layers/SurfacePoolCA.h" 7 8 #import <CoreVideo/CVPixelBuffer.h> 9 #include <IOSurface/IOSurfaceTypes.h> 10 11 #include <algorithm> 12 #include <unordered_set> 13 #include <utility> 14 15 #include "mozilla/ProfilerLabels.h" 16 #include "mozilla/ProfilerMarkers.h" 17 #include "mozilla/StaticMutex.h" 18 #include "mozilla/StaticPrefs_gfx.h" 19 20 #ifdef XP_MACOSX 21 # include "GLContextCGL.h" 22 #else 23 # include "GLContextEAGL.h" 24 # include <OpenGLES/EAGLIOSurface.h> 25 #endif 26 27 #include "MozFramebuffer.h" 28 #include "ScopedGLHelpers.h" 29 30 namespace mozilla { 31 namespace layers { 32 33 using gfx::IntPoint; 34 using gfx::IntRect; 35 using gfx::IntRegion; 36 using gfx::IntSize; 37 using gl::GLContext; 38 #ifdef XP_MACOSX 39 using gl::GLContextCGL; 40 #else 41 using gl::GLContextEAGL; 42 #endif 43 44 // GL_TEXTURE_RECTANGLE_ARB does not exist in OpenGL ES (which is used on iOS). 45 // Instead GL_TEXTURE_2D supports arbitrary dimensions. 46 #ifdef XP_MACOSX 47 static constexpr GLenum kTextureRectTarget = LOCAL_GL_TEXTURE_RECTANGLE_ARB; 48 #else 49 static constexpr GLenum kTextureRectTarget = LOCAL_GL_TEXTURE_2D; 50 #endif 51 52 /* static */ RefPtr<SurfacePool> SurfacePool::Create(size_t aPoolSizeLimit) { 53 return new SurfacePoolCA(aPoolSizeLimit); 54 } 55 56 // SurfacePoolCA::LockedPool 57 58 SurfacePoolCA::LockedPool::LockedPool(size_t aPoolSizeLimit) 59 : mPoolSizeLimit(aPoolSizeLimit) {} 60 61 SurfacePoolCA::LockedPool::~LockedPool() { 62 MOZ_RELEASE_ASSERT( 63 mWrappers.empty(), 64 "Any outstanding wrappers should have kept the surface pool alive"); 65 MOZ_RELEASE_ASSERT(mInUseEntries.empty(), 66 "Leak! No more surfaces should be in use at this point."); 67 // Remove all entries in mPendingEntries and mAvailableEntries. 68 MutateEntryStorage("Clear", {}, [&]() { 69 mPendingEntries.Clear(); 70 mAvailableEntries.Clear(); 71 }); 72 } 73 74 RefPtr<SurfacePoolCAWrapperForGL> SurfacePoolCA::LockedPool::GetWrapperForGL( 75 SurfacePoolCA* aPool, GLContext* aGL) { 76 auto& wrapper = mWrappers[aGL]; 77 if (!wrapper) { 78 wrapper = new SurfacePoolCAWrapperForGL(aPool, aGL); 79 } 80 return wrapper; 81 } 82 83 void SurfacePoolCA::LockedPool::DestroyGLResourcesForContext(GLContext* aGL) { 84 ForEachEntry([&](SurfacePoolEntry& entry) { 85 if (entry.mGLResources && entry.mGLResources->mGLContext == aGL) { 86 entry.mGLResources = Nothing(); 87 } 88 }); 89 mDepthBuffers.RemoveElementsBy( 90 [&](const DepthBufferEntry& entry) { return entry.mGLContext == aGL; }); 91 } 92 93 template <typename F> 94 void SurfacePoolCA::LockedPool::MutateEntryStorage(const char* aMutationType, 95 const gfx::IntSize& aSize, 96 F aFn) { 97 size_t inUseCountBefore = mInUseEntries.size(); 98 size_t pendingCountBefore = mPendingEntries.Length(); 99 size_t availableCountBefore = mAvailableEntries.Length(); 100 TimeStamp before = TimeStamp::Now(); 101 102 aFn(); 103 104 if (profiler_thread_is_being_profiled_for_markers()) { 105 PROFILER_MARKER_TEXT( 106 "SurfacePool", GRAPHICS, MarkerTiming::IntervalUntilNowFrom(before), 107 nsPrintfCString("%d -> %d in use | %d -> %d waiting for | %d -> %d " 108 "available | %s %dx%d | %dMB total memory", 109 int(inUseCountBefore), int(mInUseEntries.size()), 110 int(pendingCountBefore), int(mPendingEntries.Length()), 111 int(availableCountBefore), 112 int(mAvailableEntries.Length()), aMutationType, 113 aSize.width, aSize.height, 114 int(EstimateTotalMemory() / 1000 / 1000))); 115 } 116 } 117 118 template <typename F> 119 void SurfacePoolCA::LockedPool::ForEachEntry(F aFn) { 120 for (auto& iter : mInUseEntries) { 121 aFn(iter.second); 122 } 123 for (auto& entry : mPendingEntries) { 124 aFn(entry.mEntry); 125 } 126 for (auto& entry : mAvailableEntries) { 127 aFn(entry); 128 } 129 } 130 131 uint64_t SurfacePoolCA::LockedPool::EstimateTotalMemory() { 132 std::unordered_set<const gl::DepthAndStencilBuffer*> depthAndStencilBuffers; 133 uint64_t memBytes = 0; 134 135 ForEachEntry([&](const SurfacePoolEntry& entry) { 136 auto size = entry.mSize; 137 memBytes += size.width * 4 * size.height; 138 if (entry.mGLResources) { 139 const auto& fb = *entry.mGLResources->mFramebuffer; 140 if (const auto& buffer = fb.GetDepthAndStencilBuffer()) { 141 depthAndStencilBuffers.insert(buffer.get()); 142 } 143 } 144 }); 145 146 for (const auto& buffer : depthAndStencilBuffers) { 147 memBytes += buffer->EstimateMemory(); 148 } 149 150 return memBytes; 151 } 152 153 bool SurfacePoolCA::LockedPool::CanRecycleSurfaceForRequest( 154 const SurfacePoolEntry& aEntry, const IntSize& aSize, GLContext* aGL) { 155 if (aEntry.mSize != aSize) { 156 return false; 157 } 158 if (aEntry.mGLResources) { 159 return aEntry.mGLResources->mGLContext == aGL; 160 } 161 return true; 162 } 163 164 CFTypeRefPtr<IOSurfaceRef> SurfacePoolCA::LockedPool::ObtainSurfaceFromPool( 165 const IntSize& aSize, GLContext* aGL) { 166 // Do a linear scan through mAvailableEntries to find an eligible surface, 167 // going from oldest to newest. The size of this array is limited, so the 168 // linear scan is fast. 169 auto iterToRecycle = 170 std::find_if(mAvailableEntries.begin(), mAvailableEntries.end(), 171 [&](const SurfacePoolEntry& aEntry) { 172 return CanRecycleSurfaceForRequest(aEntry, aSize, aGL); 173 }); 174 if (iterToRecycle != mAvailableEntries.end()) { 175 CFTypeRefPtr<IOSurfaceRef> surface = iterToRecycle->mIOSurface; 176 MOZ_RELEASE_ASSERT(surface.get(), "Available surfaces should be non-null."); 177 // Move the entry from mAvailableEntries to mInUseEntries. 178 MutateEntryStorage("Recycle", aSize, [&]() { 179 mInUseEntries.insert({surface, std::move(*iterToRecycle)}); 180 mAvailableEntries.RemoveElementAt(iterToRecycle); 181 }); 182 return surface; 183 } 184 185 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING( 186 "IOSurface creation", GRAPHICS_TileAllocation, 187 nsPrintfCString("%dx%d", aSize.width, aSize.height)); 188 CFTypeRefPtr<IOSurfaceRef> surface = 189 CFTypeRefPtr<IOSurfaceRef>::WrapUnderCreateRule( 190 IOSurfaceCreate((__bridge CFDictionaryRef) @{ 191 (__bridge NSString*)kIOSurfaceWidth : @(aSize.width), 192 (__bridge NSString*)kIOSurfaceHeight : @(aSize.height), 193 (__bridge NSString*) 194 kIOSurfacePixelFormat : @(kCVPixelFormatType_32BGRA), 195 (__bridge NSString*)kIOSurfaceBytesPerElement : @(4), 196 })); 197 if (surface) { 198 if (StaticPrefs::gfx_color_management_native_srgb()) { 199 IOSurfaceSetValue(surface.get(), CFSTR("IOSurfaceColorSpace"), 200 kCGColorSpaceSRGB); 201 } 202 // Create a new entry in mInUseEntries. 203 MutateEntryStorage("Create", aSize, [&]() { 204 mInUseEntries.insert({surface, SurfacePoolEntry{aSize, surface, {}}}); 205 }); 206 } 207 return surface; 208 } 209 210 void SurfacePoolCA::LockedPool::ReturnSurfaceToPool( 211 CFTypeRefPtr<IOSurfaceRef> aSurface) { 212 auto inUseEntryIter = mInUseEntries.find(aSurface); 213 MOZ_RELEASE_ASSERT(inUseEntryIter != mInUseEntries.end()); 214 if (IOSurfaceIsInUse(aSurface.get())) { 215 // Move the entry from mInUseEntries to mPendingEntries. 216 MutateEntryStorage( 217 "Start waiting for", IntSize(inUseEntryIter->second.mSize), [&]() { 218 mPendingEntries.AppendElement(PendingSurfaceEntry{ 219 std::move(inUseEntryIter->second), mCollectionGeneration, 0}); 220 mInUseEntries.erase(inUseEntryIter); 221 }); 222 } else { 223 // Move the entry from mInUseEntries to mAvailableEntries. 224 MOZ_RELEASE_ASSERT(inUseEntryIter->second.mIOSurface.get(), 225 "In use surfaces should be non-null."); 226 MutateEntryStorage("Retain", IntSize(inUseEntryIter->second.mSize), [&]() { 227 mAvailableEntries.AppendElement(std::move(inUseEntryIter->second)); 228 mInUseEntries.erase(inUseEntryIter); 229 }); 230 } 231 } 232 233 void SurfacePoolCA::LockedPool::EnforcePoolSizeLimit() { 234 // Enforce the pool size limit, removing least-recently-used entries as 235 // necessary. 236 while (mAvailableEntries.Length() > mPoolSizeLimit) { 237 MutateEntryStorage("Evict", IntSize(mAvailableEntries[0].mSize), 238 [&]() { mAvailableEntries.RemoveElementAt(0); }); 239 } 240 } 241 242 uint64_t SurfacePoolCA::LockedPool::CollectPendingSurfaces( 243 uint64_t aCheckGenerationsUpTo) { 244 mCollectionGeneration++; 245 246 // Loop from back to front, potentially deleting items as we iterate. 247 // mPendingEntries is used as a set; the order of its items is not meaningful. 248 size_t i = mPendingEntries.Length(); 249 while (i) { 250 i -= 1; 251 auto& pendingSurf = mPendingEntries[i]; 252 if (pendingSurf.mPreviousCheckGeneration > aCheckGenerationsUpTo) { 253 continue; 254 } 255 // Check if the window server is still using the surface. As long as it is 256 // doing that, we cannot move the surface to mAvailableSurfaces because 257 // anything we draw to it could reach the screen in a place where we don't 258 // expect it. 259 if (IOSurfaceIsInUse(pendingSurf.mEntry.mIOSurface.get())) { 260 // The surface is still in use. Update mPreviousCheckGeneration and 261 // mCheckCount. 262 pendingSurf.mPreviousCheckGeneration = mCollectionGeneration; 263 pendingSurf.mCheckCount++; 264 if (pendingSurf.mCheckCount >= 30) { 265 // The window server has been holding on to this surface for an 266 // unreasonably long time. This is known to happen sometimes, for 267 // example in occluded windows or after a GPU switch. In that case, 268 // release our references to the surface so that it's Not Our Problem 269 // anymore. Remove the entry from mPendingEntries. 270 MutateEntryStorage("Eject", IntSize(pendingSurf.mEntry.mSize), 271 [&]() { mPendingEntries.RemoveElementAt(i); }); 272 } 273 } else { 274 // The surface has become unused! 275 // Move the entry from mPendingEntries to mAvailableEntries. 276 MOZ_RELEASE_ASSERT(pendingSurf.mEntry.mIOSurface.get(), 277 "Pending surfaces should be non-null."); 278 MutateEntryStorage( 279 "Stop waiting for", IntSize(pendingSurf.mEntry.mSize), [&]() { 280 mAvailableEntries.AppendElement(std::move(pendingSurf.mEntry)); 281 mPendingEntries.RemoveElementAt(i); 282 }); 283 } 284 } 285 return mCollectionGeneration; 286 } 287 288 void SurfacePoolCA::LockedPool::OnWrapperDestroyed( 289 gl::GLContext* aGL, SurfacePoolCAWrapperForGL* aWrapper) { 290 if (aGL) { 291 DestroyGLResourcesForContext(aGL); 292 } 293 294 auto iter = mWrappers.find(aGL); 295 MOZ_RELEASE_ASSERT(iter != mWrappers.end()); 296 MOZ_RELEASE_ASSERT(iter->second == aWrapper, 297 "Only one SurfacePoolCAWrapperForGL object should " 298 "exist for each GLContext* at any time"); 299 mWrappers.erase(iter); 300 } 301 302 Maybe<GLuint> SurfacePoolCA::LockedPool::GetFramebufferForSurface( 303 CFTypeRefPtr<IOSurfaceRef> aSurface, GLContext* aGL, 304 bool aNeedsDepthBuffer) { 305 MOZ_RELEASE_ASSERT(aGL); 306 307 auto inUseEntryIter = mInUseEntries.find(aSurface); 308 MOZ_RELEASE_ASSERT(inUseEntryIter != mInUseEntries.end()); 309 310 SurfacePoolEntry& entry = inUseEntryIter->second; 311 if (entry.mGLResources) { 312 // We have an existing framebuffer. 313 MOZ_RELEASE_ASSERT(entry.mGLResources->mGLContext == aGL, 314 "Recycled surface that still had GL resources from a " 315 "different GL context. " 316 "This shouldn't happen."); 317 if (!aNeedsDepthBuffer || entry.mGLResources->mFramebuffer->HasDepth()) { 318 return Some(entry.mGLResources->mFramebuffer->mFB); 319 } 320 } 321 322 // No usable existing framebuffer, we need to create one. 323 324 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING( 325 "Framebuffer creation", GRAPHICS_TileAllocation, 326 nsPrintfCString("%dx%d", entry.mSize.width, entry.mSize.height)); 327 328 #ifdef XP_MACOSX 329 RefPtr<GLContextCGL> cgl = GLContextCGL::Cast(aGL); 330 MOZ_RELEASE_ASSERT(cgl, "Unexpected GLContext type"); 331 #else 332 RefPtr<GLContextEAGL> eagl = GLContextEAGL::Cast(aGL); 333 MOZ_RELEASE_ASSERT(eagl, "Unexpected GLContext type"); 334 #endif 335 336 if (!aGL->MakeCurrent()) { 337 // Context may have been destroyed. 338 return {}; 339 } 340 341 GLuint tex = aGL->CreateTexture(); 342 { 343 const gl::ScopedBindTexture bindTex(aGL, tex, kTextureRectTarget); 344 #ifdef XP_MACOSX 345 CGLTexImageIOSurface2D(cgl->GetCGLContext(), kTextureRectTarget, 346 LOCAL_GL_RGBA, entry.mSize.width, entry.mSize.height, 347 LOCAL_GL_BGRA, LOCAL_GL_UNSIGNED_INT_8_8_8_8_REV, 348 entry.mIOSurface.get(), 0); 349 #elif TARGET_OS_SIMULATOR 350 // texImageIOSurface is unavailable in simulator. 351 MOZ_CRASH("unimplemented"); 352 #else 353 [eagl->GetEAGLContext() texImageIOSurface:entry.mIOSurface.get() 354 target:kTextureRectTarget 355 internalFormat:LOCAL_GL_RGBA 356 width:entry.mSize.width 357 height:entry.mSize.height 358 format:LOCAL_GL_BGRA 359 type:LOCAL_GL_UNSIGNED_INT_8_8_8_8_REV 360 plane:0]; 361 #endif 362 } 363 364 auto fb = 365 CreateFramebufferForTexture(aGL, entry.mSize, tex, aNeedsDepthBuffer); 366 if (!fb) { 367 // Framebuffer completeness check may have failed. 368 return {}; 369 } 370 371 GLuint fbo = fb->mFB; 372 entry.mGLResources = Some(GLResourcesForSurface{aGL, std::move(fb)}); 373 return Some(fbo); 374 } 375 376 RefPtr<gl::DepthAndStencilBuffer> 377 SurfacePoolCA::LockedPool::GetDepthBufferForSharing(GLContext* aGL, 378 const IntSize& aSize) { 379 // Clean out entries for which the weak pointer has become null. 380 mDepthBuffers.RemoveElementsBy( 381 [&](const DepthBufferEntry& entry) { return !entry.mBuffer; }); 382 383 for (const auto& entry : mDepthBuffers) { 384 if (entry.mGLContext == aGL && entry.mSize == aSize) { 385 return entry.mBuffer.get(); 386 } 387 } 388 return nullptr; 389 } 390 391 UniquePtr<gl::MozFramebuffer> 392 SurfacePoolCA::LockedPool::CreateFramebufferForTexture(GLContext* aGL, 393 const IntSize& aSize, 394 GLuint aTexture, 395 bool aNeedsDepthBuffer) { 396 if (aNeedsDepthBuffer) { 397 // Try to find an existing depth buffer of aSize in aGL and create a 398 // framebuffer that shares it. 399 if (auto buffer = GetDepthBufferForSharing(aGL, aSize)) { 400 return gl::MozFramebuffer::CreateForBackingWithSharedDepthAndStencil( 401 aSize, 0, kTextureRectTarget, aTexture, buffer); 402 } 403 } 404 405 // No depth buffer needed or we didn't find one. Create a framebuffer with a 406 // new depth buffer and store a weak pointer to the new depth buffer in 407 // mDepthBuffers. 408 UniquePtr<gl::MozFramebuffer> fb = gl::MozFramebuffer::CreateForBacking( 409 aGL, aSize, 0, aNeedsDepthBuffer, kTextureRectTarget, aTexture); 410 if (fb && fb->GetDepthAndStencilBuffer()) { 411 mDepthBuffers.AppendElement( 412 DepthBufferEntry{aGL, aSize, fb->GetDepthAndStencilBuffer().get()}); 413 } 414 415 return fb; 416 } 417 418 // SurfacePoolHandleCA 419 420 SurfacePoolHandleCA::SurfacePoolHandleCA( 421 RefPtr<SurfacePoolCAWrapperForGL>&& aPoolWrapper, 422 uint64_t aCurrentCollectionGeneration) 423 : mPoolWrapper(aPoolWrapper), 424 mPreviousFrameCollectionGeneration( 425 "SurfacePoolHandleCA::mPreviousFrameCollectionGeneration") { 426 auto generation = mPreviousFrameCollectionGeneration.Lock(); 427 *generation = aCurrentCollectionGeneration; 428 } 429 430 SurfacePoolHandleCA::~SurfacePoolHandleCA() {} 431 432 void SurfacePoolHandleCA::OnBeginFrame() { 433 auto generation = mPreviousFrameCollectionGeneration.Lock(); 434 *generation = mPoolWrapper->mPool->CollectPendingSurfaces(*generation); 435 } 436 437 void SurfacePoolHandleCA::OnEndFrame() { 438 mPoolWrapper->mPool->EnforcePoolSizeLimit(); 439 } 440 441 CFTypeRefPtr<IOSurfaceRef> SurfacePoolHandleCA::ObtainSurfaceFromPool( 442 const IntSize& aSize) { 443 return mPoolWrapper->mPool->ObtainSurfaceFromPool(aSize, mPoolWrapper->mGL); 444 } 445 446 void SurfacePoolHandleCA::ReturnSurfaceToPool( 447 CFTypeRefPtr<IOSurfaceRef> aSurface) { 448 mPoolWrapper->mPool->ReturnSurfaceToPool(aSurface); 449 } 450 451 Maybe<GLuint> SurfacePoolHandleCA::GetFramebufferForSurface( 452 CFTypeRefPtr<IOSurfaceRef> aSurface, bool aNeedsDepthBuffer) { 453 return mPoolWrapper->mPool->GetFramebufferForSurface( 454 aSurface, mPoolWrapper->mGL, aNeedsDepthBuffer); 455 } 456 457 // SurfacePoolCA 458 459 SurfacePoolCA::SurfacePoolCA(size_t aPoolSizeLimit) 460 : mPool(LockedPool(aPoolSizeLimit), "SurfacePoolCA::mPool") {} 461 462 SurfacePoolCA::~SurfacePoolCA() {} 463 464 RefPtr<SurfacePoolHandle> SurfacePoolCA::GetHandleForGL(GLContext* aGL) { 465 RefPtr<SurfacePoolCAWrapperForGL> wrapper; 466 uint64_t collectionGeneration = 0; 467 { 468 auto pool = mPool.Lock(); 469 wrapper = pool->GetWrapperForGL(this, aGL); 470 collectionGeneration = pool->mCollectionGeneration; 471 } 472 473 // Run the SurfacePoolHandleCA constructor outside of the lock so that the 474 // mPool lock and the handle's lock are always ordered the same way. 475 return new SurfacePoolHandleCA(std::move(wrapper), collectionGeneration); 476 } 477 478 void SurfacePoolCA::DestroyGLResourcesForContext(GLContext* aGL) { 479 auto pool = mPool.Lock(); 480 pool->DestroyGLResourcesForContext(aGL); 481 } 482 483 CFTypeRefPtr<IOSurfaceRef> SurfacePoolCA::ObtainSurfaceFromPool( 484 const IntSize& aSize, GLContext* aGL) { 485 auto pool = mPool.Lock(); 486 return pool->ObtainSurfaceFromPool(aSize, aGL); 487 } 488 489 void SurfacePoolCA::ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface) { 490 auto pool = mPool.Lock(); 491 pool->ReturnSurfaceToPool(aSurface); 492 } 493 494 uint64_t SurfacePoolCA::CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo) { 495 auto pool = mPool.Lock(); 496 return pool->CollectPendingSurfaces(aCheckGenerationsUpTo); 497 } 498 void SurfacePoolCA::EnforcePoolSizeLimit() { 499 auto pool = mPool.Lock(); 500 pool->EnforcePoolSizeLimit(); 501 } 502 503 Maybe<GLuint> SurfacePoolCA::GetFramebufferForSurface( 504 CFTypeRefPtr<IOSurfaceRef> aSurface, GLContext* aGL, 505 bool aNeedsDepthBuffer) { 506 auto pool = mPool.Lock(); 507 return pool->GetFramebufferForSurface(aSurface, aGL, aNeedsDepthBuffer); 508 } 509 510 void SurfacePoolCA::OnWrapperDestroyed(gl::GLContext* aGL, 511 SurfacePoolCAWrapperForGL* aWrapper) { 512 auto pool = mPool.Lock(); 513 return pool->OnWrapperDestroyed(aGL, aWrapper); 514 } 515 516 } // namespace layers 517 } // namespace mozilla