CanvasDrawEventRecorder.cpp (15841B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "CanvasDrawEventRecorder.h" 8 9 #include <string.h> 10 11 #include "mozilla/dom/WorkerCommon.h" 12 #include "mozilla/dom/WorkerPrivate.h" 13 #include "mozilla/dom/WorkerRef.h" 14 #include "mozilla/dom/WorkerRunnable.h" 15 #include "mozilla/layers/TextureRecorded.h" 16 #include "mozilla/layers/SharedSurfacesChild.h" 17 #include "mozilla/StaticPrefs_gfx.h" 18 #include "RecordedCanvasEventImpl.h" 19 20 namespace mozilla { 21 namespace layers { 22 23 struct ShmemAndHandle { 24 ipc::SharedMemoryMapping shmem; 25 ipc::MutableSharedMemoryHandle handle; 26 }; 27 28 static Maybe<ShmemAndHandle> CreateAndMapShmem(size_t aSize) { 29 auto handle = ipc::shared_memory::Create(aSize); 30 if (!handle) { 31 return Nothing(); 32 } 33 auto mapping = handle.Map(); 34 if (!mapping) { 35 return Nothing(); 36 } 37 38 return Some(ShmemAndHandle{std::move(mapping), std::move(handle)}); 39 } 40 41 CanvasDrawEventRecorder::CanvasDrawEventRecorder( 42 dom::ThreadSafeWorkerRef* aWorkerRef) 43 : mWorkerRef(aWorkerRef), mIsOnWorker(!!aWorkerRef) { 44 mDefaultBufferSize = ipc::shared_memory::PageAlignedSize( 45 StaticPrefs::gfx_canvas_remote_default_buffer_size()); 46 mMaxDefaultBuffers = StaticPrefs::gfx_canvas_remote_max_default_buffers(); 47 mMaxSpinCount = StaticPrefs::gfx_canvas_remote_max_spin_count(); 48 mDropBufferLimit = StaticPrefs::gfx_canvas_remote_drop_buffer_limit(); 49 mDropBufferOnZero = mDropBufferLimit; 50 } 51 52 CanvasDrawEventRecorder::~CanvasDrawEventRecorder() { MOZ_ASSERT(!mWorkerRef); } 53 54 bool CanvasDrawEventRecorder::Init(TextureType aTextureType, 55 TextureType aWebglTextureType, 56 gfx::BackendType aBackendType, 57 UniquePtr<Helpers> aHelpers) { 58 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 59 60 mHelpers = std::move(aHelpers); 61 62 MOZ_ASSERT(mTextureType == TextureType::Unknown); 63 auto header = CreateAndMapShmem(sizeof(Header)); 64 if (NS_WARN_IF(header.isNothing())) { 65 return false; 66 } 67 68 mHeader = header->shmem.DataAs<Header>(); 69 mHeader->eventCount = 0; 70 mHeader->writerWaitCount = 0; 71 mHeader->writerState = State::Processing; 72 mHeader->processedCount = 0; 73 mHeader->readerState = State::Paused; 74 75 // We always keep at least two buffers. This means that when we 76 // have to add a new buffer, there is at least a full buffer that requires 77 // translating while the handle is sent over. 78 AutoTArray<ipc::ReadOnlySharedMemoryHandle, 2> bufferHandles; 79 auto buffer = CreateAndMapShmem(mDefaultBufferSize); 80 if (NS_WARN_IF(buffer.isNothing())) { 81 return false; 82 } 83 mCurrentBuffer = CanvasBuffer(std::move(buffer->shmem)); 84 bufferHandles.AppendElement(std::move(buffer->handle).ToReadOnly()); 85 86 buffer = CreateAndMapShmem(mDefaultBufferSize); 87 if (NS_WARN_IF(buffer.isNothing())) { 88 return false; 89 } 90 mRecycledBuffers.emplace(std::move(buffer->shmem), 0); 91 bufferHandles.AppendElement(std::move(buffer->handle).ToReadOnly()); 92 93 mWriterSemaphore.reset(CrossProcessSemaphore::Create("CanvasRecorder", 0)); 94 auto writerSem = mWriterSemaphore->CloneHandle(); 95 mWriterSemaphore->CloseHandle(); 96 if (!IsHandleValid(writerSem)) { 97 return false; 98 } 99 100 mReaderSemaphore.reset(CrossProcessSemaphore::Create("CanvasTranslator", 0)); 101 auto readerSem = mReaderSemaphore->CloneHandle(); 102 mReaderSemaphore->CloseHandle(); 103 if (!IsHandleValid(readerSem)) { 104 return false; 105 } 106 107 if (!mHelpers->InitTranslator(aTextureType, aWebglTextureType, aBackendType, 108 std::move(header->handle), 109 std::move(bufferHandles), std::move(readerSem), 110 std::move(writerSem))) { 111 return false; 112 } 113 114 mTextureType = aTextureType; 115 mHeaderShmem = std::move(header->shmem); 116 return true; 117 } 118 119 void CanvasDrawEventRecorder::RecordEvent(const gfx::RecordedEvent& aEvent) { 120 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 121 aEvent.RecordToStream(*this); 122 } 123 124 int64_t CanvasDrawEventRecorder::CreateCheckpoint() { 125 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 126 int64_t checkpoint = mHeader->eventCount; 127 RecordEvent(RecordedCheckpoint()); 128 ClearProcessedExternalSurfaces(); 129 ClearProcessedExternalImages(); 130 return checkpoint; 131 } 132 133 bool CanvasDrawEventRecorder::WaitForCheckpoint(int64_t aCheckpoint) { 134 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 135 136 uint32_t spinCount = mMaxSpinCount; 137 do { 138 if (mHeader->processedCount >= aCheckpoint) { 139 return true; 140 } 141 } while (--spinCount != 0); 142 143 mHeader->writerState = State::AboutToWait; 144 if (mHeader->processedCount >= aCheckpoint) { 145 mHeader->writerState = State::Processing; 146 return true; 147 } 148 149 mHeader->writerWaitCount = aCheckpoint; 150 mHeader->writerState = State::Waiting; 151 152 // Wait unless we detect the reading side has closed. 153 while (!mHelpers->ReaderClosed() && mHeader->readerState != State::Failed) { 154 if (mWriterSemaphore->Wait(Some(TimeDuration::FromMilliseconds(100)))) { 155 MOZ_ASSERT(mHeader->processedCount >= aCheckpoint); 156 return true; 157 } 158 } 159 160 // Either the reader has failed or we're stopping writing for some other 161 // reason (e.g. shutdown), so mark us as failed so the reader is aware. 162 mHeader->writerState = State::Failed; 163 return false; 164 } 165 166 void CanvasDrawEventRecorder::WriteInternalEvent(EventType aEventType) { 167 MOZ_ASSERT(mCurrentBuffer.SizeRemaining() > 0); 168 169 WriteElement(mCurrentBuffer.Writer(), aEventType); 170 IncrementEventCount(); 171 } 172 173 gfx::ContiguousBuffer& CanvasDrawEventRecorder::GetContiguousBuffer( 174 size_t aSize) { 175 if (!mCurrentBuffer.IsValid()) { 176 // If the current buffer is invalid then we've already failed previously. 177 MOZ_ASSERT(mHeader->writerState == State::Failed); 178 return mCurrentBuffer; 179 } 180 181 // We make sure that our buffer can hold aSize + 1 to ensure we always have 182 // room for the end of buffer event. 183 184 // Check if there is enough room is our current buffer. 185 if (mCurrentBuffer.SizeRemaining() > aSize) { 186 return mCurrentBuffer; 187 } 188 189 bool useRecycledBuffer = false; 190 if (mRecycledBuffers.front().Capacity() > aSize) { 191 // The recycled buffer is big enough, check if it is free. 192 if (mRecycledBuffers.front().eventCount <= mHeader->processedCount) { 193 useRecycledBuffer = true; 194 } else if (mRecycledBuffers.size() >= mMaxDefaultBuffers) { 195 // We've hit he max number of buffers, wait for the next one to be free. 196 // We wait for (eventCount - 1), as we check and signal in the translator 197 // during the play event, before the processedCount has been updated. 198 useRecycledBuffer = true; 199 if (!WaitForCheckpoint(mRecycledBuffers.front().eventCount - 1)) { 200 // The wait failed or we're shutting down, just return an empty buffer. 201 mCurrentBuffer = CanvasBuffer(); 202 return mCurrentBuffer; 203 } 204 } 205 } 206 207 if (useRecycledBuffer) { 208 // Only queue default size buffers for recycling. 209 if (mCurrentBuffer.Capacity() == mDefaultBufferSize) { 210 WriteInternalEvent(RECYCLE_BUFFER); 211 mRecycledBuffers.emplace(std::move(mCurrentBuffer.shmem), 212 mHeader->eventCount); 213 } else { 214 WriteInternalEvent(DROP_BUFFER); 215 } 216 217 mCurrentBuffer = CanvasBuffer(std::move(mRecycledBuffers.front().shmem)); 218 mRecycledBuffers.pop(); 219 220 // If we have more than one recycled buffers free a configured number of 221 // times in a row then drop one. 222 if (mRecycledBuffers.size() > 1 && 223 mRecycledBuffers.front().eventCount < mHeader->processedCount) { 224 if (--mDropBufferOnZero == 0) { 225 WriteInternalEvent(DROP_BUFFER); 226 mCurrentBuffer = 227 CanvasBuffer(std::move(mRecycledBuffers.front().shmem)); 228 mRecycledBuffers.pop(); 229 mDropBufferOnZero = 1; 230 } 231 } else { 232 mDropBufferOnZero = mDropBufferLimit; 233 } 234 235 return mCurrentBuffer; 236 } 237 238 // We don't have a buffer free or it is not big enough, so create a new one. 239 WriteInternalEvent(PAUSE_TRANSLATION); 240 241 // Only queue default size buffers for recycling. 242 if (mCurrentBuffer.Capacity() == mDefaultBufferSize) { 243 mRecycledBuffers.emplace(std::move(mCurrentBuffer.shmem), 244 mHeader->eventCount); 245 } 246 247 size_t bufferSize = std::max(mDefaultBufferSize, 248 ipc::shared_memory::PageAlignedSize(aSize + 1)); 249 auto newBuffer = CreateAndMapShmem(bufferSize); 250 if (NS_WARN_IF(newBuffer.isNothing())) { 251 mHeader->writerState = State::Failed; 252 mCurrentBuffer = CanvasBuffer(); 253 return mCurrentBuffer; 254 } 255 256 if (!mHelpers->AddBuffer(std::move(newBuffer->handle).ToReadOnly())) { 257 mHeader->writerState = State::Failed; 258 mCurrentBuffer = CanvasBuffer(); 259 return mCurrentBuffer; 260 } 261 262 mCurrentBuffer = CanvasBuffer(std::move(newBuffer->shmem)); 263 return mCurrentBuffer; 264 } 265 266 void CanvasDrawEventRecorder::DropFreeBuffers() { 267 while (mRecycledBuffers.size() > 1 && 268 mRecycledBuffers.front().eventCount < mHeader->processedCount) { 269 // If we encountered an error, we may have invalidated mCurrentBuffer in 270 // GetContiguousBuffer. No need to write the DROP_BUFFER event. 271 if (mCurrentBuffer.IsValid()) { 272 WriteInternalEvent(DROP_BUFFER); 273 } 274 mCurrentBuffer = CanvasBuffer(std::move(mRecycledBuffers.front().shmem)); 275 mRecycledBuffers.pop(); 276 } 277 278 ClearProcessedExternalSurfaces(); 279 ClearProcessedExternalImages(); 280 } 281 282 void CanvasDrawEventRecorder::IncrementEventCount() { 283 mHeader->eventCount++; 284 CheckAndSignalReader(); 285 } 286 287 void CanvasDrawEventRecorder::CheckAndSignalReader() { 288 do { 289 switch (mHeader->readerState) { 290 case State::Processing: 291 case State::Paused: 292 case State::Failed: 293 return; 294 case State::AboutToWait: 295 // The reader is making a decision about whether to wait. So, we must 296 // wait until it has decided to avoid races. Check if the reader is 297 // closed to avoid hangs. 298 if (mHelpers->ReaderClosed()) { 299 return; 300 } 301 continue; 302 case State::Waiting: 303 if (mHeader->processedCount < mHeader->eventCount) { 304 // We have to use compareExchange here because the reader can change 305 // from Waiting to Stopped. 306 if (mHeader->readerState.compareExchange(State::Waiting, 307 State::Processing)) { 308 mReaderSemaphore->Signal(); 309 return; 310 } 311 312 MOZ_ASSERT(mHeader->readerState == State::Stopped); 313 continue; 314 } 315 return; 316 case State::Stopped: 317 if (mHeader->processedCount < mHeader->eventCount) { 318 mHeader->readerState = State::Processing; 319 if (!mHelpers->RestartReader()) { 320 mHeader->writerState = State::Failed; 321 } 322 } 323 return; 324 default: 325 MOZ_ASSERT_UNREACHABLE("Invalid waiting state."); 326 return; 327 } 328 } while (true); 329 } 330 331 void CanvasDrawEventRecorder::DetachResources() { 332 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 333 334 DrawEventRecorderPrivate::DetachResources(); 335 336 { 337 auto lockedPendingDeletions = mPendingDeletions.Lock(); 338 mWorkerRef = nullptr; 339 } 340 } 341 342 void CanvasDrawEventRecorder::QueueProcessPendingDeletionsLocked( 343 RefPtr<CanvasDrawEventRecorder>&& aRecorder) { 344 if (!mWorkerRef) { 345 MOZ_RELEASE_ASSERT( 346 !mIsOnWorker, 347 "QueueProcessPendingDeletionsLocked called after worker shutdown!"); 348 349 NS_DispatchToMainThread(NS_NewRunnableFunction( 350 "CanvasDrawEventRecorder::QueueProcessPendingDeletionsLocked", 351 [self = std::move(aRecorder)]() { self->ProcessPendingDeletions(); })); 352 return; 353 } 354 355 if (!NS_IsMainThread()) { 356 NS_DispatchToMainThread(NS_NewRunnableFunction( 357 "CanvasDrawEventRecorder::QueueProcessPendingDeletionsLocked", 358 [self = std::move(aRecorder)]() mutable { 359 self->QueueProcessPendingDeletions(std::move(self)); 360 })); 361 return; 362 } 363 364 class ProcessPendingRunnable final : public dom::MainThreadWorkerRunnable { 365 public: 366 explicit ProcessPendingRunnable(RefPtr<CanvasDrawEventRecorder>&& aRecorder) 367 : dom::MainThreadWorkerRunnable("ProcessPendingRunnable"), 368 mRecorder(std::move(aRecorder)) {} 369 370 bool WorkerRun(JSContext*, dom::WorkerPrivate*) override { 371 RefPtr<CanvasDrawEventRecorder> recorder = std::move(mRecorder); 372 recorder->ProcessPendingDeletions(); 373 return true; 374 } 375 376 private: 377 RefPtr<CanvasDrawEventRecorder> mRecorder; 378 }; 379 380 auto task = MakeRefPtr<ProcessPendingRunnable>(std::move(aRecorder)); 381 if (NS_WARN_IF(!task->Dispatch(mWorkerRef->Private()))) { 382 MOZ_CRASH("ProcessPendingRunnable leaked!"); 383 } 384 } 385 386 void CanvasDrawEventRecorder::QueueProcessPendingDeletions( 387 RefPtr<CanvasDrawEventRecorder>&& aRecorder) { 388 auto lockedPendingDeletions = mPendingDeletions.Lock(); 389 if (lockedPendingDeletions->empty()) { 390 // We raced to handle the deletions, and something got there first. 391 return; 392 } 393 394 QueueProcessPendingDeletionsLocked(std::move(aRecorder)); 395 } 396 397 void CanvasDrawEventRecorder::AddPendingDeletion( 398 std::function<void()>&& aPendingDeletion) { 399 PendingDeletionsVector pendingDeletions; 400 401 { 402 auto lockedPendingDeletions = mPendingDeletions.Lock(); 403 bool wasEmpty = lockedPendingDeletions->empty(); 404 lockedPendingDeletions->emplace_back(std::move(aPendingDeletion)); 405 406 MOZ_RELEASE_ASSERT(!mIsOnWorker || mWorkerRef, 407 "AddPendingDeletion called after worker shutdown!"); 408 409 // If we are not on the owning thread, we must queue an event to run the 410 // deletions, if we transitioned from empty to non-empty. 411 if ((mWorkerRef && !mWorkerRef->Private()->IsOnCurrentThread()) || 412 (!mWorkerRef && !NS_IsMainThread())) { 413 if (wasEmpty) { 414 RefPtr<CanvasDrawEventRecorder> self(this); 415 QueueProcessPendingDeletionsLocked(std::move(self)); 416 } 417 return; 418 } 419 420 // Otherwise, we can just run all of them right now. 421 pendingDeletions.swap(*lockedPendingDeletions); 422 } 423 424 for (const auto& pendingDeletion : pendingDeletions) { 425 pendingDeletion(); 426 } 427 } 428 429 void CanvasDrawEventRecorder::StoreSourceSurfaceRecording( 430 gfx::SourceSurface* aSurface, const char* aReason) { 431 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 432 433 if (NS_IsMainThread()) { 434 wr::ExternalImageId extId{}; 435 nsresult rv = layers::SharedSurfacesChild::Share(aSurface, extId); 436 if (NS_SUCCEEDED(rv)) { 437 StoreExternalSurfaceRecording(aSurface, wr::AsUint64(extId)); 438 mExternalSurfaces.back().mEventCount = mHeader->eventCount; 439 return; 440 } 441 } 442 443 DrawEventRecorderPrivate::StoreSourceSurfaceRecording(aSurface, aReason); 444 } 445 446 void CanvasDrawEventRecorder::StoreImageRecording( 447 const RefPtr<Image>& aImageOfSurfaceDescriptor, const char* aReasony) { 448 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); 449 450 StoreExternalImageRecording(aImageOfSurfaceDescriptor); 451 mExternalImages.back().mEventCount = mHeader->eventCount; 452 453 ClearProcessedExternalImages(); 454 } 455 456 void CanvasDrawEventRecorder::ClearProcessedExternalSurfaces() { 457 while (!mExternalSurfaces.empty()) { 458 if (mExternalSurfaces.front().mEventCount > mHeader->processedCount) { 459 break; 460 } 461 mExternalSurfaces.pop_front(); 462 } 463 } 464 465 void CanvasDrawEventRecorder::ClearProcessedExternalImages() { 466 while (!mExternalImages.empty()) { 467 if (mExternalImages.front().mEventCount > mHeader->processedCount) { 468 break; 469 } 470 mExternalImages.pop_front(); 471 } 472 } 473 474 } // namespace layers 475 } // namespace mozilla