tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

SourceBuffer.cpp (22879B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this
      4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      5 
      6 #include "SourceBuffer.h"
      7 
      8 #include <algorithm>
      9 #include <cmath>
     10 #include <cstring>
     11 #include "mozilla/Likely.h"
     12 #include "nsIInputStream.h"
     13 #include "MainThreadUtils.h"
     14 #include "SurfaceCache.h"
     15 
     16 using std::max;
     17 using std::min;
     18 
     19 namespace mozilla {
     20 namespace image {
     21 
     22 //////////////////////////////////////////////////////////////////////////////
     23 // SourceBufferIterator implementation.
     24 //////////////////////////////////////////////////////////////////////////////
     25 
     26 SourceBufferIterator::~SourceBufferIterator() {
     27  if (mOwner) {
     28    mOwner->OnIteratorRelease();
     29  }
     30 }
     31 
     32 SourceBufferIterator& SourceBufferIterator::operator=(
     33    SourceBufferIterator&& aOther) {
     34  if (mOwner) {
     35    mOwner->OnIteratorRelease();
     36  }
     37 
     38  mOwner = std::move(aOther.mOwner);
     39  mState = aOther.mState;
     40  mData = aOther.mData;
     41  mChunkCount = aOther.mChunkCount;
     42  mByteCount = aOther.mByteCount;
     43  mRemainderToRead = aOther.mRemainderToRead;
     44 
     45  return *this;
     46 }
     47 
     48 SourceBufferIterator::State SourceBufferIterator::AdvanceOrScheduleResume(
     49    size_t aRequestedBytes, IResumable* aConsumer) {
     50  MOZ_ASSERT(mOwner);
     51 
     52  if (MOZ_UNLIKELY(!HasMore())) {
     53    MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
     54    return COMPLETE;
     55  }
     56 
     57  // The range of data [mOffset, mOffset + mNextReadLength) has just been read
     58  // by the caller (or at least they don't have any interest in it), so consume
     59  // that data.
     60  MOZ_ASSERT(mData.mIterating.mNextReadLength <=
     61             mData.mIterating.mAvailableLength);
     62  mData.mIterating.mOffset += mData.mIterating.mNextReadLength;
     63  mData.mIterating.mAvailableLength -= mData.mIterating.mNextReadLength;
     64 
     65  // An iterator can have a limit imposed on it to read only a subset of a
     66  // source buffer. If it is present, we need to mimic the same behaviour as
     67  // the owning SourceBuffer.
     68  if (MOZ_UNLIKELY(mRemainderToRead != SIZE_MAX)) {
     69    MOZ_ASSERT(mData.mIterating.mNextReadLength <= mRemainderToRead);
     70    mRemainderToRead -= mData.mIterating.mNextReadLength;
     71 
     72    if (MOZ_UNLIKELY(mRemainderToRead == 0)) {
     73      mData.mIterating.mNextReadLength = 0;
     74      SetComplete(NS_OK);
     75      return COMPLETE;
     76    }
     77 
     78    if (MOZ_UNLIKELY(aRequestedBytes > mRemainderToRead)) {
     79      aRequestedBytes = mRemainderToRead;
     80    }
     81  }
     82 
     83  mData.mIterating.mNextReadLength = 0;
     84 
     85  if (MOZ_LIKELY(mState == READY)) {
     86    // If the caller wants zero bytes of data, that's easy enough; we just
     87    // configured ourselves for a zero-byte read above!  In theory we could do
     88    // this even in the START state, but it's not important for performance and
     89    // breaking the ability of callers to assert that the pointer returned by
     90    // Data() is non-null doesn't seem worth it.
     91    if (aRequestedBytes == 0) {
     92      MOZ_ASSERT(mData.mIterating.mNextReadLength == 0);
     93      return READY;
     94    }
     95 
     96    // Try to satisfy the request out of our local buffer. This is potentially
     97    // much faster than requesting data from our owning SourceBuffer because we
     98    // don't have to take the lock. Note that if we have anything at all in our
     99    // local buffer, we use it to satisfy the request; @aRequestedBytes is just
    100    // the *maximum* number of bytes we can return.
    101    if (mData.mIterating.mAvailableLength > 0) {
    102      return AdvanceFromLocalBuffer(aRequestedBytes);
    103    }
    104  }
    105 
    106  // Our local buffer is empty, so we'll have to request data from our owning
    107  // SourceBuffer.
    108  return mOwner->AdvanceIteratorOrScheduleResume(*this, aRequestedBytes,
    109                                                 aConsumer);
    110 }
    111 
    112 bool SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const {
    113  MOZ_ASSERT(mOwner);
    114  return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes);
    115 }
    116 
    117 //////////////////////////////////////////////////////////////////////////////
    118 // SourceBuffer implementation.
    119 //////////////////////////////////////////////////////////////////////////////
    120 
    121 const size_t SourceBuffer::MIN_CHUNK_CAPACITY;
    122 const size_t SourceBuffer::MAX_CHUNK_CAPACITY;
    123 
    124 SourceBuffer::SourceBuffer()
    125    : mMutex("image::SourceBuffer"), mConsumerCount(0), mCompacted(false) {}
    126 
    127 SourceBuffer::~SourceBuffer() {
    128  MOZ_ASSERT(mConsumerCount == 0,
    129             "SourceBuffer destroyed with active consumers");
    130 }
    131 
    132 nsresult SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk) {
    133  mMutex.AssertCurrentThreadOwns();
    134 
    135  if (MOZ_UNLIKELY(!aChunk)) {
    136    return NS_ERROR_OUT_OF_MEMORY;
    137  }
    138 
    139  if (MOZ_UNLIKELY(aChunk->AllocationFailed())) {
    140    return NS_ERROR_OUT_OF_MEMORY;
    141  }
    142 
    143  if (MOZ_UNLIKELY(!mChunks.AppendElement(std::move(*aChunk), fallible))) {
    144    return NS_ERROR_OUT_OF_MEMORY;
    145  }
    146 
    147  return NS_OK;
    148 }
    149 
    150 Maybe<SourceBuffer::Chunk> SourceBuffer::CreateChunk(
    151    size_t aCapacity, size_t aExistingCapacity /* = 0 */,
    152    bool aRoundUp /* = true */) {
    153  if (MOZ_UNLIKELY(aCapacity == 0)) {
    154    MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
    155    return Nothing();
    156  }
    157 
    158  // Round up if requested.
    159  size_t finalCapacity = aRoundUp ? RoundedUpCapacity(aCapacity) : aCapacity;
    160 
    161  // Use the size of the SurfaceCache as an additional heuristic to avoid
    162  // allocating huge buffers. Generally images do not get smaller when decoded,
    163  // so if we could store the source data in the SurfaceCache, we assume that
    164  // there's no way we'll be able to store the decoded version.
    165  if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity + aExistingCapacity))) {
    166    NS_WARNING(
    167        "SourceBuffer refused to create chunk too large for SurfaceCache");
    168    return Nothing();
    169  }
    170 
    171  return Some(Chunk(finalCapacity));
    172 }
    173 
    174 nsresult SourceBuffer::Compact() {
    175  mMutex.AssertCurrentThreadOwns();
    176 
    177  MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here");
    178  MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters");
    179  MOZ_ASSERT(mStatus, "Should be complete here");
    180 
    181  // If we've tried to compact once, don't attempt again.
    182  if (mCompacted) {
    183    return NS_OK;
    184  }
    185 
    186  mCompacted = true;
    187 
    188  // Compact our waiting consumers list, since we're complete and no future
    189  // consumer will ever have to wait.
    190  mWaitingConsumers.Compact();
    191 
    192  // If we have no chunks, then there's nothing to compact.
    193  if (mChunks.Length() < 1) {
    194    return NS_OK;
    195  }
    196 
    197  // If we have one chunk, then we can compact if it has excess capacity.
    198  if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) {
    199    return NS_OK;
    200  }
    201 
    202  // If the last chunk has the maximum capacity, then we know the total size
    203  // will be quite large and not worth consolidating. We can likely/cheapily
    204  // trim the last chunk if it is too big however.
    205  size_t capacity = mChunks.LastElement().Capacity();
    206  if (capacity == MAX_CHUNK_CAPACITY) {
    207    size_t lastLength = mChunks.LastElement().Length();
    208    if (lastLength != capacity) {
    209      mChunks.LastElement().SetCapacity(lastLength);
    210    }
    211    return NS_OK;
    212  }
    213 
    214  // We can compact our buffer. Determine the total length.
    215  size_t length = 0;
    216  for (uint32_t i = 0; i < mChunks.Length(); ++i) {
    217    length += mChunks[i].Length();
    218  }
    219 
    220  // If our total length is zero (which means ExpectLength() got called, but no
    221  // data ever actually got written) then just empty our chunk list.
    222  if (MOZ_UNLIKELY(length == 0)) {
    223    mChunks.Clear();
    224    return NS_OK;
    225  }
    226 
    227  Chunk& mergeChunk = mChunks[0];
    228  if (MOZ_UNLIKELY(!mergeChunk.SetCapacity(length))) {
    229    NS_WARNING("Failed to reallocate chunk for SourceBuffer compacting - OOM?");
    230    return NS_OK;
    231  }
    232 
    233  // Copy our old chunks into the newly reallocated first chunk.
    234  for (uint32_t i = 1; i < mChunks.Length(); ++i) {
    235    size_t offset = mergeChunk.Length();
    236    MOZ_ASSERT(offset < mergeChunk.Capacity());
    237    MOZ_ASSERT(offset + mChunks[i].Length() <= mergeChunk.Capacity());
    238 
    239    memcpy(mergeChunk.Data() + offset, mChunks[i].Data(), mChunks[i].Length());
    240    mergeChunk.AddLength(mChunks[i].Length());
    241  }
    242 
    243  MOZ_ASSERT(mergeChunk.Length() == mergeChunk.Capacity(),
    244             "Compacted chunk has slack space");
    245 
    246  // Remove the redundant chunks.
    247  mChunks.RemoveLastElements(mChunks.Length() - 1);
    248  mChunks.Compact();
    249 
    250  return NS_OK;
    251 }
    252 
    253 /* static */
    254 size_t SourceBuffer::RoundedUpCapacity(size_t aCapacity) {
    255  // Protect against overflow.
    256  if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) {
    257    return aCapacity;
    258  }
    259 
    260  // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
    261  // size of a page).
    262  size_t roundedCapacity =
    263      (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1);
    264  MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?");
    265  MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?");
    266 
    267  return roundedCapacity;
    268 }
    269 
    270 size_t SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity) {
    271  mMutex.AssertCurrentThreadOwns();
    272 
    273  // We grow the source buffer using a Fibonacci growth rate. It will be capped
    274  // at MAX_CHUNK_CAPACITY, unless the available data exceeds that.
    275 
    276  size_t length = mChunks.Length();
    277 
    278  if (length == 0 || aMinCapacity > MAX_CHUNK_CAPACITY) {
    279    return aMinCapacity;
    280  }
    281 
    282  if (length == 1) {
    283    return min(max(2 * mChunks[0].Capacity(), aMinCapacity),
    284               MAX_CHUNK_CAPACITY);
    285  }
    286 
    287  return min(
    288      max(mChunks[length - 1].Capacity() + mChunks[length - 2].Capacity(),
    289          aMinCapacity),
    290      MAX_CHUNK_CAPACITY);
    291 }
    292 
    293 void SourceBuffer::AddWaitingConsumer(IResumable* aConsumer) {
    294  mMutex.AssertCurrentThreadOwns();
    295 
    296  MOZ_ASSERT(!mStatus, "Waiting when we're complete?");
    297 
    298  if (aConsumer) {
    299    mWaitingConsumers.AppendElement(aConsumer);
    300  }
    301 }
    302 
    303 void SourceBuffer::ResumeWaitingConsumers() {
    304  mMutex.AssertCurrentThreadOwns();
    305 
    306  if (mWaitingConsumers.Length() == 0) {
    307    return;
    308  }
    309 
    310  for (uint32_t i = 0; i < mWaitingConsumers.Length(); ++i) {
    311    mWaitingConsumers[i]->Resume();
    312  }
    313 
    314  mWaitingConsumers.Clear();
    315 }
    316 
    317 nsresult SourceBuffer::ExpectLength(size_t aExpectedLength) {
    318  MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?");
    319 
    320  MutexAutoLock lock(mMutex);
    321 
    322  if (MOZ_UNLIKELY(mStatus)) {
    323    MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
    324    return NS_OK;
    325  }
    326 
    327  if (MOZ_UNLIKELY(mChunks.Length() > 0)) {
    328    MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
    329    return NS_OK;
    330  }
    331 
    332  if (MOZ_UNLIKELY(!SurfaceCache::CanHold(aExpectedLength))) {
    333    NS_WARNING("SourceBuffer refused to store too large buffer");
    334    return HandleError(NS_ERROR_INVALID_ARG);
    335  }
    336 
    337  size_t length = min(aExpectedLength, MAX_CHUNK_CAPACITY);
    338  if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(length,
    339                                                     /* aExistingCapacity */ 0,
    340                                                     /* aRoundUp */ false))))) {
    341    return HandleError(NS_ERROR_OUT_OF_MEMORY);
    342  }
    343 
    344  return NS_OK;
    345 }
    346 
    347 nsresult SourceBuffer::Append(const char* aData, size_t aLength) {
    348  MOZ_ASSERT(aData, "Should have a buffer");
    349  MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
    350 
    351  size_t currentChunkCapacity = 0;
    352  size_t currentChunkLength = 0;
    353  char* currentChunkData = nullptr;
    354  size_t currentChunkRemaining = 0;
    355  size_t forCurrentChunk = 0;
    356  size_t forNextChunk = 0;
    357  size_t nextChunkCapacity = 0;
    358  size_t totalCapacity = 0;
    359 
    360  {
    361    MutexAutoLock lock(mMutex);
    362 
    363    if (MOZ_UNLIKELY(mStatus)) {
    364      // This SourceBuffer is already complete; ignore further data.
    365      return NS_ERROR_FAILURE;
    366    }
    367 
    368    if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
    369      if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) {
    370        return HandleError(NS_ERROR_OUT_OF_MEMORY);
    371      }
    372    }
    373 
    374    // Copy out the current chunk's information so we can release the lock.
    375    // Note that this wouldn't be safe if multiple producers were allowed!
    376    Chunk& currentChunk = mChunks.LastElement();
    377    currentChunkCapacity = currentChunk.Capacity();
    378    currentChunkLength = currentChunk.Length();
    379    currentChunkData = currentChunk.Data();
    380 
    381    // Partition this data between the current chunk and the next chunk.
    382    // (Because we always allocate a chunk big enough to fit everything passed
    383    // to Append, we'll never need more than those two chunks to store
    384    // everything.)
    385    currentChunkRemaining = currentChunkCapacity - currentChunkLength;
    386    forCurrentChunk = min(aLength, currentChunkRemaining);
    387    forNextChunk = aLength - forCurrentChunk;
    388 
    389    // If we'll need another chunk, determine what its capacity should be while
    390    // we still hold the lock.
    391    nextChunkCapacity =
    392        forNextChunk > 0 ? FibonacciCapacityWithMinimum(forNextChunk) : 0;
    393 
    394    for (uint32_t i = 0; i < mChunks.Length(); ++i) {
    395      totalCapacity += mChunks[i].Capacity();
    396    }
    397  }
    398 
    399  // Write everything we can fit into the current chunk.
    400  MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity);
    401  memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk);
    402 
    403  // If there's something left, create a new chunk and write it there.
    404  Maybe<Chunk> nextChunk;
    405  if (forNextChunk > 0) {
    406    MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?");
    407    nextChunk = CreateChunk(nextChunkCapacity, totalCapacity);
    408    if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) {
    409      memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk);
    410      nextChunk->AddLength(forNextChunk);
    411    }
    412  }
    413 
    414  // Update shared data structures.
    415  {
    416    MutexAutoLock lock(mMutex);
    417 
    418    // Update the length of the current chunk.
    419    Chunk& currentChunk = mChunks.LastElement();
    420    MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?");
    421    MOZ_ASSERT(currentChunk.Length() == currentChunkLength,
    422               "Multiple producers?");
    423 
    424    currentChunk.AddLength(forCurrentChunk);
    425 
    426    // If we created a new chunk, add it to the series.
    427    if (forNextChunk > 0) {
    428      if (MOZ_UNLIKELY(!nextChunk)) {
    429        return HandleError(NS_ERROR_OUT_OF_MEMORY);
    430      }
    431 
    432      if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(std::move(nextChunk))))) {
    433        return HandleError(NS_ERROR_OUT_OF_MEMORY);
    434      }
    435    }
    436 
    437    // Resume any waiting readers now that there's new data.
    438    ResumeWaitingConsumers();
    439  }
    440 
    441  return NS_OK;
    442 }
    443 
    444 nsresult SourceBuffer::AdoptData(char* aData, size_t aLength,
    445                                 void* (*aRealloc)(void*, size_t),
    446                                 void (*aFree)(void*)) {
    447  MOZ_ASSERT(aData, "Should have a buffer");
    448  MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
    449  MutexAutoLock lock(mMutex);
    450  return AppendChunk(Some(Chunk(aData, aLength, aRealloc, aFree)));
    451 }
    452 
    453 static nsresult AppendToSourceBuffer(nsIInputStream*, void* aClosure,
    454                                     const char* aFromRawSegment, uint32_t,
    455                                     uint32_t aCount, uint32_t* aWriteCount) {
    456  SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure);
    457 
    458  // Copy the source data. Unless we hit OOM, we squelch the return value here,
    459  // because returning an error means that ReadSegments stops reading data, and
    460  // we want to ensure that we read everything we get. If we hit OOM then we
    461  // return a failed status to the caller.
    462  nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount);
    463  if (rv == NS_ERROR_OUT_OF_MEMORY) {
    464    return rv;
    465  }
    466 
    467  // Report that we wrote everything we got.
    468  *aWriteCount = aCount;
    469 
    470  return NS_OK;
    471 }
    472 
    473 nsresult SourceBuffer::AppendFromInputStream(nsIInputStream* aInputStream,
    474                                             uint32_t aCount) {
    475  uint32_t bytesRead;
    476  nsresult rv = aInputStream->ReadSegments(AppendToSourceBuffer, this, aCount,
    477                                           &bytesRead);
    478  if (NS_WARN_IF(NS_FAILED(rv))) {
    479    return rv;
    480  }
    481 
    482  if (bytesRead == 0) {
    483    // The loading of the image has been canceled.
    484    return NS_ERROR_FAILURE;
    485  }
    486 
    487  if (bytesRead != aCount) {
    488    // Only some of the given data was read. We may have failed in
    489    // SourceBuffer::Append but ReadSegments swallowed the error. Otherwise the
    490    // stream itself failed to yield the data.
    491    MutexAutoLock lock(mMutex);
    492    if (mStatus) {
    493      MOZ_ASSERT(NS_FAILED(*mStatus));
    494      return *mStatus;
    495    }
    496 
    497    MOZ_ASSERT_UNREACHABLE("AppendToSourceBuffer should consume everything");
    498  }
    499 
    500  return rv;
    501 }
    502 
    503 void SourceBuffer::Complete(nsresult aStatus) {
    504  MutexAutoLock lock(mMutex);
    505 
    506  // When an error occurs internally (e.g. due to an OOM), we save the status.
    507  // This will indirectly trigger a failure higher up and that will call
    508  // SourceBuffer::Complete. Since it doesn't necessarily know we are already
    509  // complete, it is safe to ignore.
    510  if (mStatus && (MOZ_UNLIKELY(NS_SUCCEEDED(*mStatus) ||
    511                               aStatus != NS_IMAGELIB_ERROR_FAILURE))) {
    512    MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
    513    return;
    514  }
    515 
    516  if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) {
    517    // It's illegal to succeed without writing anything.
    518    aStatus = NS_ERROR_FAILURE;
    519  }
    520 
    521  mStatus = Some(aStatus);
    522 
    523  // Resume any waiting consumers now that we're complete.
    524  ResumeWaitingConsumers();
    525 
    526  // If we still have active consumers, just return.
    527  if (mConsumerCount > 0) {
    528    return;
    529  }
    530 
    531  // Attempt to compact our buffer down to a single chunk.
    532  Compact();
    533 }
    534 
    535 bool SourceBuffer::IsComplete() {
    536  MutexAutoLock lock(mMutex);
    537  return bool(mStatus);
    538 }
    539 
    540 size_t SourceBuffer::SizeOfIncludingThisWithComputedFallback(
    541    MallocSizeOf aMallocSizeOf) const {
    542  MutexAutoLock lock(mMutex);
    543 
    544  size_t n = aMallocSizeOf(this);
    545  n += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
    546 
    547  for (uint32_t i = 0; i < mChunks.Length(); ++i) {
    548    size_t chunkSize = aMallocSizeOf(mChunks[i].Data());
    549 
    550    if (chunkSize == 0) {
    551      // We're on a platform where moz_malloc_size_of always returns 0.
    552      chunkSize = mChunks[i].Capacity();
    553    }
    554 
    555    n += chunkSize;
    556  }
    557 
    558  return n;
    559 }
    560 
    561 SourceBufferIterator SourceBuffer::Iterator(size_t aReadLength) {
    562  {
    563    MutexAutoLock lock(mMutex);
    564    mConsumerCount++;
    565  }
    566 
    567  return SourceBufferIterator(this, aReadLength);
    568 }
    569 
    570 void SourceBuffer::OnIteratorRelease() {
    571  MutexAutoLock lock(mMutex);
    572 
    573  MOZ_ASSERT(mConsumerCount > 0, "Consumer count doesn't add up");
    574  mConsumerCount--;
    575 
    576  // If we still have active consumers, or we're not complete yet, then return.
    577  if (mConsumerCount > 0 || !mStatus) {
    578    return;
    579  }
    580 
    581  // Attempt to compact our buffer down to a single chunk.
    582  Compact();
    583 }
    584 
    585 bool SourceBuffer::RemainingBytesIsNoMoreThan(
    586    const SourceBufferIterator& aIterator, size_t aBytes) const {
    587  MutexAutoLock lock(mMutex);
    588 
    589  // If we're not complete, we always say no.
    590  if (!mStatus) {
    591    return false;
    592  }
    593 
    594  // If the iterator's at the end, the answer is trivial.
    595  if (!aIterator.HasMore()) {
    596    return true;
    597  }
    598 
    599  uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk;
    600  size_t iteratorOffset = aIterator.mData.mIterating.mOffset;
    601  size_t iteratorLength = aIterator.mData.mIterating.mAvailableLength;
    602 
    603  // Include the bytes the iterator is currently pointing to in the limit, so
    604  // that the current chunk doesn't have to be a special case.
    605  size_t bytes = aBytes + iteratorOffset + iteratorLength;
    606 
    607  // Count the length over all of our chunks, starting with the one that the
    608  // iterator is currently pointing to. (This is O(N), but N is expected to be
    609  // ~1, so it doesn't seem worth caching the length separately.)
    610  size_t lengthSoFar = 0;
    611  for (uint32_t i = iteratorChunk; i < mChunks.Length(); ++i) {
    612    lengthSoFar += mChunks[i].Length();
    613    if (lengthSoFar > bytes) {
    614      return false;
    615    }
    616  }
    617 
    618  return true;
    619 }
    620 
    621 SourceBufferIterator::State SourceBuffer::AdvanceIteratorOrScheduleResume(
    622    SourceBufferIterator& aIterator, size_t aRequestedBytes,
    623    IResumable* aConsumer) {
    624  MutexAutoLock lock(mMutex);
    625 
    626  MOZ_ASSERT(aIterator.HasMore(),
    627             "Advancing a completed iterator and "
    628             "AdvanceOrScheduleResume didn't catch it");
    629 
    630  if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) {
    631    // This SourceBuffer is complete due to an error; all reads fail.
    632    return aIterator.SetComplete(*mStatus);
    633  }
    634 
    635  if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
    636    // We haven't gotten an initial chunk yet.
    637    AddWaitingConsumer(aConsumer);
    638    return aIterator.SetWaiting(!!aConsumer);
    639  }
    640 
    641  uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk;
    642  MOZ_ASSERT(iteratorChunkIdx < mChunks.Length());
    643 
    644  const Chunk& currentChunk = mChunks[iteratorChunkIdx];
    645  size_t iteratorEnd = aIterator.mData.mIterating.mOffset +
    646                       aIterator.mData.mIterating.mAvailableLength;
    647  MOZ_ASSERT(iteratorEnd <= currentChunk.Length());
    648  MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity());
    649 
    650  if (iteratorEnd < currentChunk.Length()) {
    651    // There's more data in the current chunk.
    652    return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(),
    653                              iteratorEnd, currentChunk.Length() - iteratorEnd,
    654                              aRequestedBytes);
    655  }
    656 
    657  if (iteratorEnd == currentChunk.Capacity() &&
    658      !IsLastChunk(iteratorChunkIdx)) {
    659    // Advance to the next chunk.
    660    const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1];
    661    return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0,
    662                              nextChunk.Length(), aRequestedBytes);
    663  }
    664 
    665  MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced");
    666 
    667  if (mStatus) {
    668    // There's no more data and this SourceBuffer completed successfully.
    669    MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier");
    670    return aIterator.SetComplete(*mStatus);
    671  }
    672 
    673  // We're not complete, but there's no more data right now. Arrange to wake up
    674  // the consumer when we get more data.
    675  AddWaitingConsumer(aConsumer);
    676  return aIterator.SetWaiting(!!aConsumer);
    677 }
    678 
    679 nsresult SourceBuffer::HandleError(nsresult aError) {
    680  MOZ_ASSERT(NS_FAILED(aError), "Should have an error here");
    681  MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY || aError == NS_ERROR_INVALID_ARG,
    682             "Unexpected error; may want to notify waiting readers, which "
    683             "HandleError currently doesn't do");
    684 
    685  mMutex.AssertCurrentThreadOwns();
    686 
    687  NS_WARNING("SourceBuffer encountered an unrecoverable error");
    688 
    689  // Record the error.
    690  mStatus = Some(aError);
    691 
    692  // Drop our references to waiting readers.
    693  mWaitingConsumers.Clear();
    694 
    695  return *mStatus;
    696 }
    697 
    698 bool SourceBuffer::IsEmpty() {
    699  mMutex.AssertCurrentThreadOwns();
    700  return mChunks.Length() == 0 || mChunks[0].Length() == 0;
    701 }
    702 
    703 bool SourceBuffer::IsLastChunk(uint32_t aChunk) {
    704  mMutex.AssertCurrentThreadOwns();
    705  return aChunk + 1 == mChunks.Length();
    706 }
    707 
    708 }  // namespace image
    709 }  // namespace mozilla