IpcResourceUpdateQueue.cpp (16025B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "IpcResourceUpdateQueue.h" 8 #include <string.h> 9 #include <algorithm> 10 #include "mozilla/Maybe.h" 11 #include "mozilla/layers/PTextureChild.h" 12 #include "mozilla/layers/WebRenderBridgeChild.h" 13 14 namespace mozilla { 15 namespace wr { 16 17 using namespace mozilla::layers; 18 19 ShmSegmentsWriter::ShmSegmentsWriter(layers::WebRenderBridgeChild* aAllocator, 20 size_t aChunkSize) 21 : mShmAllocator(aAllocator), mCursor(0), mChunkSize(aChunkSize) { 22 MOZ_ASSERT(mShmAllocator); 23 } 24 25 ShmSegmentsWriter::~ShmSegmentsWriter() { Clear(); } 26 27 ShmSegmentsWriter::ShmSegmentsWriter(ShmSegmentsWriter&& aOther) noexcept 28 : mSmallAllocs(std::move(aOther.mSmallAllocs)), 29 mLargeAllocs(std::move(aOther.mLargeAllocs)), 30 mShmAllocator(aOther.mShmAllocator), 31 mCursor(aOther.mCursor), 32 mChunkSize(aOther.mChunkSize) { 33 aOther.mCursor = 0; 34 } 35 36 ShmSegmentsWriter& ShmSegmentsWriter::operator=( 37 ShmSegmentsWriter&& aOther) noexcept { 38 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!"); 39 Clear(); 40 mSmallAllocs = std::move(aOther.mSmallAllocs); 41 mLargeAllocs = std::move(aOther.mLargeAllocs); 42 mShmAllocator = aOther.mShmAllocator; 43 mCursor = aOther.mCursor; 44 mChunkSize = aOther.mChunkSize; 45 aOther.mCursor = 0; 46 return *this; 47 } 48 49 layers::OffsetRange ShmSegmentsWriter::Write(Range<uint8_t> aBytes) { 50 const size_t start = mCursor; 51 const size_t length = aBytes.length(); 52 53 if (length >= mChunkSize * 4) { 54 auto range = AllocLargeChunk(length); 55 if (range.length()) { 56 // Allocation was successful 57 uint8_t* dstPtr = mLargeAllocs.LastElement().get<uint8_t>(); 58 memcpy(dstPtr, aBytes.begin().get(), length); 59 } 60 return range; 61 } 62 63 int remainingBytesToCopy = length; 64 65 size_t srcCursor = 0; 66 size_t dstCursor = mCursor; 67 size_t currAllocLen = mSmallAllocs.Length(); 68 69 while (remainingBytesToCopy > 0) { 70 if (dstCursor >= mSmallAllocs.Length() * mChunkSize) { 71 if (!AllocChunk()) { 72 // Allocation failed, so roll back to the state at the start of this 73 // Write() call and abort. 74 while (mSmallAllocs.Length() > currAllocLen) { 75 RefCountedShmem shm = mSmallAllocs.PopLastElement(); 76 RefCountedShm::Dealloc(mShmAllocator, shm); 77 } 78 MOZ_ASSERT(mSmallAllocs.Length() == currAllocLen); 79 return layers::OffsetRange(0, start, 0); 80 } 81 // Allocation succeeded, so dstCursor should now be pointing to 82 // something inside the allocation buffer 83 MOZ_ASSERT(dstCursor < (mSmallAllocs.Length() * mChunkSize)); 84 } 85 86 const size_t dstMaxOffset = mChunkSize * mSmallAllocs.Length(); 87 const size_t dstBaseOffset = mChunkSize * (mSmallAllocs.Length() - 1); 88 89 MOZ_ASSERT(dstCursor >= dstBaseOffset); 90 MOZ_ASSERT(dstCursor <= dstMaxOffset); 91 92 size_t availableRange = dstMaxOffset - dstCursor; 93 size_t copyRange = std::min<int>(availableRange, remainingBytesToCopy); 94 95 uint8_t* srcPtr = &aBytes[srcCursor]; 96 uint8_t* dstPtr = RefCountedShm::GetBytes(mSmallAllocs.LastElement()) + 97 (dstCursor - dstBaseOffset); 98 99 memcpy(dstPtr, srcPtr, copyRange); 100 101 srcCursor += copyRange; 102 dstCursor += copyRange; 103 remainingBytesToCopy -= copyRange; 104 105 // sanity check 106 MOZ_ASSERT(remainingBytesToCopy >= 0); 107 } 108 109 mCursor += length; 110 111 return layers::OffsetRange(0, start, length); 112 } 113 114 bool ShmSegmentsWriter::AllocChunk() { 115 RefCountedShmem shm; 116 if (!mShmAllocator->AllocResourceShmem(mChunkSize, shm)) { 117 gfxCriticalNote << "ShmSegmentsWriter failed to allocate chunk #" 118 << mSmallAllocs.Length(); 119 MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate chunk"); 120 return false; 121 } 122 RefCountedShm::AddRef(shm); 123 mSmallAllocs.AppendElement(shm); 124 return true; 125 } 126 127 layers::OffsetRange ShmSegmentsWriter::AllocLargeChunk(size_t aSize) { 128 ipc::Shmem shm; 129 if (!mShmAllocator->AllocShmem(aSize, &shm)) { 130 gfxCriticalNote 131 << "ShmSegmentsWriter failed to allocate large chunk of size " << aSize; 132 MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate large chunk"); 133 return layers::OffsetRange(0, 0, 0); 134 } 135 mLargeAllocs.AppendElement(shm); 136 137 return layers::OffsetRange(mLargeAllocs.Length(), 0, aSize); 138 } 139 140 void ShmSegmentsWriter::Flush(nsTArray<RefCountedShmem>& aSmallAllocs, 141 nsTArray<ipc::Shmem>& aLargeAllocs) { 142 MOZ_ASSERT(aSmallAllocs.IsEmpty()); 143 MOZ_ASSERT(aLargeAllocs.IsEmpty()); 144 aSmallAllocs = std::move(mSmallAllocs); 145 aLargeAllocs = std::move(mLargeAllocs); 146 mCursor = 0; 147 } 148 149 bool ShmSegmentsWriter::IsEmpty() const { return mCursor == 0; } 150 151 void ShmSegmentsWriter::Clear() { 152 if (mShmAllocator) { 153 IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mSmallAllocs); 154 IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mLargeAllocs); 155 } 156 mCursor = 0; 157 } 158 159 ShmSegmentsReader::ShmSegmentsReader( 160 const nsTArray<RefCountedShmem>& aSmallShmems, 161 const nsTArray<ipc::Shmem>& aLargeShmems) 162 : mSmallAllocs(aSmallShmems), mLargeAllocs(aLargeShmems), mChunkSize(0) { 163 if (mSmallAllocs.IsEmpty()) { 164 return; 165 } 166 167 mChunkSize = RefCountedShm::GetSize(mSmallAllocs[0]); 168 169 // Check that all shmems are readable and have the same size. If anything 170 // isn't right, set mChunkSize to zero which signifies that the reader is 171 // in an invalid state and Read calls will return false; 172 for (const auto& shm : mSmallAllocs) { 173 if (!RefCountedShm::IsValid(shm) || 174 RefCountedShm::GetSize(shm) != mChunkSize || 175 RefCountedShm::GetBytes(shm) == nullptr) { 176 mChunkSize = 0; 177 return; 178 } 179 } 180 181 for (const auto& shm : mLargeAllocs) { 182 if (!shm.IsReadable() || shm.get<uint8_t>() == nullptr) { 183 mChunkSize = 0; 184 return; 185 } 186 } 187 } 188 189 bool ShmSegmentsReader::ReadLarge(const layers::OffsetRange& aRange, 190 wr::Vec<uint8_t>& aInto) { 191 // source = zero is for small allocs. 192 MOZ_RELEASE_ASSERT(aRange.source() != 0); 193 if (aRange.source() > mLargeAllocs.Length()) { 194 return false; 195 } 196 size_t id = aRange.source() - 1; 197 const ipc::Shmem& shm = mLargeAllocs[id]; 198 if (shm.Size<uint8_t>() < aRange.length()) { 199 return false; 200 } 201 202 uint8_t* srcPtr = shm.get<uint8_t>(); 203 aInto.PushBytes(Range<uint8_t>(srcPtr, aRange.length())); 204 205 return true; 206 } 207 208 bool ShmSegmentsReader::Read(const layers::OffsetRange& aRange, 209 wr::Vec<uint8_t>& aInto) { 210 if (aRange.length() == 0) { 211 return true; 212 } 213 214 if (aRange.source() != 0) { 215 return ReadLarge(aRange, aInto); 216 } 217 218 if (mChunkSize == 0) { 219 return false; 220 } 221 222 if (aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) { 223 return false; 224 } 225 226 size_t initialLength = aInto.Length(); 227 228 size_t srcCursor = aRange.start(); 229 size_t remainingBytesToCopy = aRange.length(); 230 while (remainingBytesToCopy > 0) { 231 const size_t shm_idx = srcCursor / mChunkSize; 232 const size_t ptrOffset = srcCursor % mChunkSize; 233 const size_t copyRange = 234 std::min(remainingBytesToCopy, mChunkSize - ptrOffset); 235 uint8_t* srcPtr = 236 RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset; 237 238 aInto.PushBytes(Range<uint8_t>(srcPtr, copyRange)); 239 240 srcCursor += copyRange; 241 remainingBytesToCopy -= copyRange; 242 } 243 244 return aInto.Length() - initialLength == aRange.length(); 245 } 246 247 Maybe<Range<uint8_t>> ShmSegmentsReader::GetReadPointerLarge( 248 const layers::OffsetRange& aRange) { 249 // source = zero is for small allocs. 250 MOZ_RELEASE_ASSERT(aRange.source() != 0); 251 if (aRange.source() > mLargeAllocs.Length()) { 252 return Nothing(); 253 } 254 size_t id = aRange.source() - 1; 255 const ipc::Shmem& shm = mLargeAllocs[id]; 256 if (shm.Size<uint8_t>() < aRange.length()) { 257 return Nothing(); 258 } 259 260 uint8_t* srcPtr = shm.get<uint8_t>(); 261 return Some(Range<uint8_t>(srcPtr, aRange.length())); 262 } 263 264 Maybe<Range<uint8_t>> ShmSegmentsReader::GetReadPointer( 265 const layers::OffsetRange& aRange) { 266 if (aRange.length() == 0) { 267 return Some(Range<uint8_t>()); 268 } 269 270 if (aRange.source() != 0) { 271 return GetReadPointerLarge(aRange); 272 } 273 274 if (mChunkSize == 0 || 275 aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) { 276 return Nothing(); 277 } 278 279 size_t srcCursor = aRange.start(); 280 size_t remainingBytesToCopy = aRange.length(); 281 const size_t shm_idx = srcCursor / mChunkSize; 282 const size_t ptrOffset = srcCursor % mChunkSize; 283 // Return nothing if we can't return a pointer to the full range 284 if (mChunkSize - ptrOffset < remainingBytesToCopy) { 285 return Nothing(); 286 } 287 uint8_t* srcPtr = RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset; 288 return Some(Range<uint8_t>(srcPtr, remainingBytesToCopy)); 289 } 290 291 IpcResourceUpdateQueue::IpcResourceUpdateQueue( 292 layers::WebRenderBridgeChild* aAllocator, size_t aChunkSize) 293 : mWriter(aAllocator, aChunkSize) {} 294 295 IpcResourceUpdateQueue::IpcResourceUpdateQueue( 296 IpcResourceUpdateQueue&& aOther) noexcept 297 : mWriter(std::move(aOther.mWriter)), 298 mUpdates(std::move(aOther.mUpdates)) {} 299 300 IpcResourceUpdateQueue& IpcResourceUpdateQueue::operator=( 301 IpcResourceUpdateQueue&& aOther) noexcept { 302 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!"); 303 mWriter = std::move(aOther.mWriter); 304 mUpdates = std::move(aOther.mUpdates); 305 return *this; 306 } 307 308 void IpcResourceUpdateQueue::ReplaceResources(IpcResourceUpdateQueue&& aOther) { 309 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!"); 310 mWriter = std::move(aOther.mWriter); 311 mUpdates = std::move(aOther.mUpdates); 312 } 313 314 bool IpcResourceUpdateQueue::AddImage(ImageKey key, 315 const ImageDescriptor& aDescriptor, 316 Range<uint8_t> aBytes) { 317 auto bytes = mWriter.Write(aBytes); 318 if (!bytes.length()) { 319 return false; 320 } 321 mUpdates.AppendElement(layers::OpAddImage(aDescriptor, bytes, 0, key)); 322 return true; 323 } 324 325 void IpcResourceUpdateQueue::AddSnapshotImage(SnapshotImageKey aKey) { 326 mUpdates.AppendElement(layers::OpAddSnapshotImage(aKey)); 327 } 328 329 bool IpcResourceUpdateQueue::AddBlobImage(BlobImageKey key, 330 const ImageDescriptor& aDescriptor, 331 Range<uint8_t> aBytes, 332 ImageIntRect aVisibleRect) { 333 MOZ_RELEASE_ASSERT(aDescriptor.width > 0 && aDescriptor.height > 0); 334 auto bytes = mWriter.Write(aBytes); 335 if (!bytes.length()) { 336 return false; 337 } 338 mUpdates.AppendElement( 339 layers::OpAddBlobImage(aDescriptor, bytes, aVisibleRect, 0, key)); 340 return true; 341 } 342 343 void IpcResourceUpdateQueue::AddSharedExternalImage(wr::ExternalImageId aExtId, 344 wr::ImageKey aKey) { 345 mUpdates.AppendElement(layers::OpAddSharedExternalImage(aExtId, aKey)); 346 } 347 348 void IpcResourceUpdateQueue::PushExternalImageForTexture( 349 wr::ExternalImageId aExtId, wr::ImageKey aKey, 350 layers::TextureClient* aTexture, bool aIsUpdate) { 351 MOZ_ASSERT(aTexture); 352 MOZ_ASSERT(aTexture->GetIPDLActor()); 353 MOZ_RELEASE_ASSERT(aTexture->GetIPDLActor()->GetIPCChannel() == 354 mWriter.WrBridge()->GetIPCChannel()); 355 mUpdates.AppendElement(layers::OpPushExternalImageForTexture( 356 aExtId, aKey, WrapNotNull(aTexture->GetIPDLActor()), aIsUpdate)); 357 } 358 359 bool IpcResourceUpdateQueue::UpdateImageBuffer( 360 ImageKey aKey, const ImageDescriptor& aDescriptor, Range<uint8_t> aBytes) { 361 auto bytes = mWriter.Write(aBytes); 362 if (!bytes.length()) { 363 return false; 364 } 365 mUpdates.AppendElement(layers::OpUpdateImage(aDescriptor, bytes, aKey)); 366 return true; 367 } 368 369 bool IpcResourceUpdateQueue::UpdateBlobImage(BlobImageKey aKey, 370 const ImageDescriptor& aDescriptor, 371 Range<uint8_t> aBytes, 372 ImageIntRect aVisibleRect, 373 ImageIntRect aDirtyRect) { 374 MOZ_ASSERT(aVisibleRect.width > 0 && aVisibleRect.height > 0); 375 376 auto bytes = mWriter.Write(aBytes); 377 if (!bytes.length()) { 378 return false; 379 } 380 mUpdates.AppendElement(layers::OpUpdateBlobImage(aDescriptor, bytes, aKey, 381 aVisibleRect, aDirtyRect)); 382 return true; 383 } 384 385 void IpcResourceUpdateQueue::UpdateSharedExternalImage( 386 wr::ExternalImageId aExtId, wr::ImageKey aKey, ImageIntRect aDirtyRect) { 387 mUpdates.AppendElement( 388 layers::OpUpdateSharedExternalImage(aExtId, aKey, aDirtyRect)); 389 } 390 391 void IpcResourceUpdateQueue::SetBlobImageVisibleArea( 392 wr::BlobImageKey aKey, const ImageIntRect& aArea) { 393 mUpdates.AppendElement(layers::OpSetBlobImageVisibleArea(aArea, aKey)); 394 } 395 396 void IpcResourceUpdateQueue::DeleteImage(ImageKey aKey) { 397 mUpdates.AppendElement(layers::OpDeleteImage(aKey)); 398 } 399 400 void IpcResourceUpdateQueue::DeleteSnapshotImage(SnapshotImageKey aKey) { 401 mUpdates.AppendElement(layers::OpDeleteSnapshotImage(aKey)); 402 } 403 404 void IpcResourceUpdateQueue::DeleteBlobImage(BlobImageKey aKey) { 405 mUpdates.AppendElement(layers::OpDeleteBlobImage(aKey)); 406 } 407 408 bool IpcResourceUpdateQueue::AddRawFont(wr::FontKey aKey, Range<uint8_t> aBytes, 409 uint32_t aIndex) { 410 auto bytes = mWriter.Write(aBytes); 411 if (!bytes.length()) { 412 return false; 413 } 414 mUpdates.AppendElement(layers::OpAddRawFont(bytes, aIndex, aKey)); 415 return true; 416 } 417 418 bool IpcResourceUpdateQueue::AddFontDescriptor(wr::FontKey aKey, 419 Range<uint8_t> aBytes, 420 uint32_t aIndex) { 421 auto bytes = mWriter.Write(aBytes); 422 if (!bytes.length()) { 423 return false; 424 } 425 mUpdates.AppendElement(layers::OpAddFontDescriptor(bytes, aIndex, aKey)); 426 return true; 427 } 428 429 void IpcResourceUpdateQueue::DeleteFont(wr::FontKey aKey) { 430 mUpdates.AppendElement(layers::OpDeleteFont(aKey)); 431 } 432 433 void IpcResourceUpdateQueue::AddFontInstance( 434 wr::FontInstanceKey aKey, wr::FontKey aFontKey, float aGlyphSize, 435 const wr::FontInstanceOptions* aOptions, 436 const wr::FontInstancePlatformOptions* aPlatformOptions, 437 Range<const gfx::FontVariation> aVariations) { 438 auto bytes = mWriter.WriteAsBytes(aVariations); 439 mUpdates.AppendElement(layers::OpAddFontInstance( 440 aOptions ? Some(*aOptions) : Nothing(), 441 aPlatformOptions ? Some(*aPlatformOptions) : Nothing(), bytes, aKey, 442 aFontKey, aGlyphSize)); 443 } 444 445 void IpcResourceUpdateQueue::DeleteFontInstance(wr::FontInstanceKey aKey) { 446 mUpdates.AppendElement(layers::OpDeleteFontInstance(aKey)); 447 } 448 449 void IpcResourceUpdateQueue::Flush( 450 nsTArray<layers::OpUpdateResource>& aUpdates, 451 nsTArray<layers::RefCountedShmem>& aSmallAllocs, 452 nsTArray<ipc::Shmem>& aLargeAllocs) { 453 aUpdates = std::move(mUpdates); 454 mWriter.Flush(aSmallAllocs, aLargeAllocs); 455 } 456 457 bool IpcResourceUpdateQueue::IsEmpty() const { 458 if (mUpdates.Length() == 0) { 459 MOZ_ASSERT(mWriter.IsEmpty()); 460 return true; 461 } 462 return false; 463 } 464 465 void IpcResourceUpdateQueue::Clear() { 466 mWriter.Clear(); 467 mUpdates.Clear(); 468 } 469 470 // static 471 void IpcResourceUpdateQueue::ReleaseShmems( 472 ipc::IProtocol* aShmAllocator, nsTArray<layers::RefCountedShmem>& aShms) { 473 for (auto& shm : aShms) { 474 if (RefCountedShm::IsValid(shm) && RefCountedShm::Release(shm) == 0) { 475 RefCountedShm::Dealloc(aShmAllocator, shm); 476 } 477 } 478 aShms.Clear(); 479 } 480 481 // static 482 void IpcResourceUpdateQueue::ReleaseShmems(ipc::IProtocol* aShmAllocator, 483 nsTArray<ipc::Shmem>& aShms) { 484 for (auto& shm : aShms) { 485 aShmAllocator->DeallocShmem(shm); 486 } 487 aShms.Clear(); 488 } 489 490 } // namespace wr 491 } // namespace mozilla