tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

VideoOutput.h (11165B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
      2 /* This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
      4 * You can obtain one at http://mozilla.org/MPL/2.0/. */
      5 
      6 #ifndef VideoOutput_h
      7 #define VideoOutput_h
      8 
      9 #include "MediaTrackListener.h"
     10 #include "VideoFrameContainer.h"
     11 
     12 namespace mozilla {
     13 
     14 static bool SetImageToBlackPixel(layers::PlanarYCbCrImage* aImage) {
     15  uint8_t blackPixel[] = {0x10, 0x80, 0x80};
     16 
     17  layers::PlanarYCbCrData data;
     18  data.mYChannel = blackPixel;
     19  data.mCbChannel = blackPixel + 1;
     20  data.mCrChannel = blackPixel + 2;
     21  data.mYStride = data.mCbCrStride = 1;
     22  data.mPictureRect = gfx::IntRect(0, 0, 1, 1);
     23  data.mYUVColorSpace = gfx::YUVColorSpace::BT601;
     24  // This could be made FULL once bug 1568745 is complete. A black pixel being
     25  // 0x00, 0x80, 0x80
     26  data.mColorRange = gfx::ColorRange::LIMITED;
     27 
     28  return NS_SUCCEEDED(aImage->CopyData(data));
     29 }
     30 
     31 class VideoOutput : public DirectMediaTrackListener {
     32 protected:
     33  typedef layers::Image Image;
     34  typedef layers::ImageContainer ImageContainer;
     35  typedef layers::ImageContainer::FrameID FrameID;
     36  typedef layers::ImageContainer::ProducerID ProducerID;
     37 
     38  virtual ~VideoOutput() = default;
     39 
     40  void DropPastFrames() {
     41    TimeStamp now = TimeStamp::Now();
     42    size_t nrChunksInPast = 0;
     43    for (const auto& idChunkPair : mFrames) {
     44      const VideoChunk& chunk = idChunkPair.second;
     45      if (chunk.mTimeStamp > now) {
     46        break;
     47      }
     48      ++nrChunksInPast;
     49    }
     50    if (nrChunksInPast > 1) {
     51      // We need to keep one frame that starts in the past, because it only ends
     52      // when the next frame starts (which also needs to be in the past for it
     53      // to drop).
     54      mFrames.RemoveElementsAt(0, nrChunksInPast - 1);
     55    }
     56  }
     57 
     58  void SendFramesEnsureLocked() {
     59    mMutex.AssertCurrentThreadOwns();
     60    SendFrames();
     61  }
     62 
     63  void SendFrames() {
     64    DropPastFrames();
     65 
     66    if (mFrames.IsEmpty()) {
     67      return;
     68    }
     69 
     70    if (!mEnabled && mDisabledBlackImageSent) {
     71      return;
     72    }
     73 
     74    // Collect any new frames produced in this iteration.
     75    AutoTArray<ImageContainer::NonOwningImage, 16> images;
     76    PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;
     77 
     78    for (const auto& idChunkPair : mFrames) {
     79      ImageContainer::FrameID frameId = idChunkPair.first;
     80      const VideoChunk& chunk = idChunkPair.second;
     81      const VideoFrame& frame = chunk.mFrame;
     82      Image* image = frame.GetImage();
     83      if (frame.GetForceBlack() || !mEnabled) {
     84        if (!mBlackImage) {
     85          RefPtr<Image> blackImage = mVideoFrameContainer->GetImageContainer()
     86                                         ->CreatePlanarYCbCrImage();
     87          if (blackImage) {
     88            // Sets the image to a single black pixel, which will be scaled to
     89            // fill the rendered size.
     90            if (SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage())) {
     91              mBlackImage = blackImage;
     92            }
     93          }
     94        }
     95        if (mBlackImage) {
     96          image = mBlackImage;
     97        }
     98      }
     99      if (!image) {
    100        // We ignore null images.
    101        continue;
    102      }
    103      ImageContainer::NonOwningImage nonOwningImage(
    104          image, chunk.mTimeStamp, frameId, mProducerID,
    105          chunk.mProcessingDuration, chunk.mMediaTime, chunk.mWebrtcCaptureTime,
    106          chunk.mWebrtcReceiveTime, chunk.mRtpTimestamp);
    107      images.AppendElement(std::move(nonOwningImage));
    108 
    109      lastPrincipalHandle = chunk.GetPrincipalHandle();
    110 
    111      if (!mEnabled && mBlackImage) {
    112        MOZ_ASSERT(images.Length() == 1);
    113        mDisabledBlackImageSent = true;
    114        break;
    115      }
    116    }
    117 
    118    if (images.IsEmpty()) {
    119      // This could happen if the only images in mFrames are null. We leave the
    120      // container at the current frame in this case.
    121      mVideoFrameContainer->ClearFutureFrames();
    122      return;
    123    }
    124 
    125    bool principalHandleChanged =
    126        lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
    127        lastPrincipalHandle != mVideoFrameContainer->GetLastPrincipalHandle();
    128 
    129    if (principalHandleChanged) {
    130      mVideoFrameContainer->UpdatePrincipalHandleForFrameID(
    131          lastPrincipalHandle, images.LastElement().mFrameID);
    132    }
    133 
    134    mVideoFrameContainer->SetCurrentFrames(
    135        mFrames[0].second.mFrame.GetIntrinsicSize(), images);
    136    mMainThread->Dispatch(NewRunnableMethod("VideoFrameContainer::Invalidate",
    137                                            mVideoFrameContainer,
    138                                            &VideoFrameContainer::Invalidate));
    139  }
    140 
    141  FrameID NewFrameID() {
    142    mMutex.AssertCurrentThreadOwns();
    143    return ++mFrameID;
    144  }
    145 
    146 public:
    147  VideoOutput(VideoFrameContainer* aContainer, AbstractThread* aMainThread)
    148      : mMutex("VideoOutput::mMutex"),
    149        mVideoFrameContainer(aContainer),
    150        mMainThread(aMainThread) {}
    151  void NotifyRealtimeTrackData(MediaTrackGraph* aGraph, TrackTime aTrackOffset,
    152                               const MediaSegment& aMedia) override {
    153    MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO);
    154    const VideoSegment& video = static_cast<const VideoSegment&>(aMedia);
    155    MutexAutoLock lock(mMutex);
    156    for (VideoSegment::ConstChunkIterator i(video); !i.IsEnded(); i.Next()) {
    157      if (!mLastFrameTime.IsNull() && i->mTimeStamp < mLastFrameTime) {
    158        // Time can go backwards if the source is a captured MediaDecoder and
    159        // it seeks, as the previously buffered frames would stretch into the
    160        // future. If this happens, we clear the buffered frames and start over.
    161        mFrames.ClearAndRetainStorage();
    162      }
    163      mFrames.AppendElement(std::make_pair(NewFrameID(), *i));
    164      mLastFrameTime = i->mTimeStamp;
    165    }
    166 
    167    SendFramesEnsureLocked();
    168  }
    169  void NotifyRemoved(MediaTrackGraph* aGraph) override {
    170    // Doesn't need locking by mMutex, since the direct listener is removed from
    171    // the track before we get notified.
    172    if (mFrames.Length() <= 1) {
    173      // The compositor has already received the last frame.
    174      mFrames.ClearAndRetainStorage();
    175      mVideoFrameContainer->ClearFutureFrames();
    176      return;
    177    }
    178 
    179    // The compositor has multiple frames. ClearFutureFrames() would only retain
    180    // the first as that's normally the current one. We however stop doing
    181    // SetCurrentFrames() once we've received the last frame in a track, so
    182    // there might be old frames lingering. We'll find the current one and
    183    // re-send that.
    184    DropPastFrames();
    185    mFrames.RemoveLastElements(mFrames.Length() - 1);
    186    SendFrames();
    187    mFrames.ClearAndRetainStorage();
    188  }
    189  void NotifyEnded(MediaTrackGraph* aGraph) override {
    190    // Doesn't need locking by mMutex, since for the track to end, it must have
    191    // been ended by the source, meaning that the source won't append more data.
    192    if (mFrames.IsEmpty()) {
    193      return;
    194    }
    195 
    196    // Re-send only the last one to the compositor.
    197    mFrames.RemoveElementsAt(0, mFrames.Length() - 1);
    198    SendFrames();
    199    mFrames.ClearAndRetainStorage();
    200  }
    201  void NotifyEnabledStateChanged(MediaTrackGraph* aGraph,
    202                                 bool aEnabled) override {
    203    MutexAutoLock lock(mMutex);
    204    mEnabled = aEnabled;
    205    DropPastFrames();
    206    if (mEnabled) {
    207      mDisabledBlackImageSent = false;
    208    }
    209    if (!mEnabled || mFrames.Length() > 1) {
    210      // Re-send frames when disabling, as new frames may not arrive. When
    211      // enabling we keep them black until new frames arrive, or re-send if we
    212      // already have frames in the future. If we're disabling and there are no
    213      // frames available yet, we invent one. Unfortunately with a hardcoded
    214      // size.
    215      //
    216      // Since mEnabled will affect whether
    217      // frames are real, or black, we assign new FrameIDs whenever we re-send
    218      // frames after an mEnabled change.
    219      for (auto& idChunkPair : mFrames) {
    220        idChunkPair.first = NewFrameID();
    221      }
    222      if (mFrames.IsEmpty()) {
    223        VideoSegment v;
    224        v.AppendFrame(nullptr, gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE,
    225                      true, TimeStamp::Now());
    226        mFrames.AppendElement(std::make_pair(NewFrameID(), *v.GetLastChunk()));
    227      }
    228      SendFramesEnsureLocked();
    229    }
    230  }
    231 
    232  Mutex mMutex MOZ_UNANNOTATED;
    233  TimeStamp mLastFrameTime;
    234  // Once the frame is forced to black, we initialize mBlackImage for use in any
    235  // following forced-black frames.
    236  RefPtr<Image> mBlackImage;
    237  // True once mBlackImage has been sent due to mEnabled being false.
    238  bool mDisabledBlackImageSent = false;
    239  bool mEnabled = true;
    240  // This array is accessed from both the direct video thread, and the graph
    241  // thread. Protected by mMutex.
    242  nsTArray<std::pair<ImageContainer::FrameID, VideoChunk>> mFrames;
    243  // Accessed from both the direct video thread, and the graph thread. Protected
    244  // by mMutex.
    245  FrameID mFrameID = 0;
    246  const RefPtr<VideoFrameContainer> mVideoFrameContainer;
    247  const RefPtr<AbstractThread> mMainThread;
    248  const ProducerID mProducerID = ImageContainer::AllocateProducerID();
    249 };
    250 
    251 /**
    252 * This listener observes the first video frame to arrive with a non-empty size,
    253 * and renders it to its VideoFrameContainer.
    254 */
    255 class FirstFrameVideoOutput : public VideoOutput {
    256 public:
    257  FirstFrameVideoOutput(VideoFrameContainer* aContainer,
    258                        AbstractThread* aMainThread)
    259      : VideoOutput(aContainer, aMainThread) {
    260    MOZ_ASSERT(NS_IsMainThread());
    261  }
    262 
    263  // NB that this overrides VideoOutput::NotifyRealtimeTrackData, so we can
    264  // filter out all frames but the first one with a real size. This allows us to
    265  // later re-use the logic in VideoOutput for rendering that frame.
    266  void NotifyRealtimeTrackData(MediaTrackGraph* aGraph, TrackTime aTrackOffset,
    267                               const MediaSegment& aMedia) override {
    268    MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO);
    269 
    270    if (mInitialSizeFound) {
    271      return;
    272    }
    273 
    274    const VideoSegment& video = static_cast<const VideoSegment&>(aMedia);
    275    for (VideoSegment::ConstChunkIterator c(video); !c.IsEnded(); c.Next()) {
    276      if (c->mFrame.GetIntrinsicSize() != gfx::IntSize(0, 0)) {
    277        mInitialSizeFound = true;
    278 
    279        mMainThread->Dispatch(NS_NewRunnableFunction(
    280            "FirstFrameVideoOutput::FirstFrameRenderedSetter",
    281            [self = RefPtr<FirstFrameVideoOutput>(this)] {
    282              self->mFirstFrameRendered = true;
    283            }));
    284 
    285        // Pick the first frame and run it through the rendering code.
    286        VideoSegment segment;
    287        segment.AppendFrame(*c);
    288        VideoOutput::NotifyRealtimeTrackData(aGraph, aTrackOffset, segment);
    289        return;
    290      }
    291    }
    292  }
    293 
    294  // Main thread only.
    295  Watchable<bool> mFirstFrameRendered = {
    296      false, "FirstFrameVideoOutput::mFirstFrameRendered"};
    297 
    298 private:
    299  // Whether a frame with a concrete size has been received. May only be
    300  // accessed on the MTG's appending thread. (this is a direct listener so we
    301  // get called by whoever is producing this track's data)
    302  bool mInitialSizeFound = false;
    303 };
    304 
    305 }  // namespace mozilla
    306 
    307 #endif  // VideoOutput_h