tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

DecodedStream.cpp (43897B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "DecodedStream.h"
      8 
      9 #include "AudioDecoderInputTrack.h"
     10 #include "MediaData.h"
     11 #include "MediaDecoderStateMachine.h"
     12 #include "MediaQueue.h"
     13 #include "MediaTrackGraph.h"
     14 #include "MediaTrackListener.h"
     15 #include "Tracing.h"
     16 #include "VideoSegment.h"
     17 #include "VideoUtils.h"
     18 #include "mozilla/AbstractThread.h"
     19 #include "mozilla/CheckedInt.h"
     20 #include "mozilla/ProfilerLabels.h"
     21 #include "mozilla/StaticPrefs_dom.h"
     22 #include "mozilla/SyncRunnable.h"
     23 #include "mozilla/gfx/Point.h"
     24 #include "nsProxyRelease.h"
     25 
     26 namespace mozilla {
     27 
     28 using media::NullableTimeUnit;
     29 using media::TimeUnit;
     30 
     31 extern LazyLogModule gMediaDecoderLog;
     32 
     33 #define LOG_DS(type, fmt, ...)    \
     34  MOZ_LOG(gMediaDecoderLog, type, \
     35          ("DecodedStream=%p " fmt, this, ##__VA_ARGS__))
     36 
     37 #define PLAYBACK_PROFILER_MARKER(markerString) \
     38  PROFILER_MARKER_TEXT(FUNCTION_SIGNATURE, MEDIA_PLAYBACK, {}, markerString)
     39 
     40 /*
     41 * A container class to make it easier to pass the playback info all the
     42 * way to DecodedStreamGraphListener from DecodedStream.
     43 */
     44 struct PlaybackInfoInit {
     45  TimeUnit mStartTime;
     46  MediaInfo mInfo;
     47 };
     48 
     49 class DecodedStreamGraphListener;
     50 
     51 class SourceVideoTrackListener : public MediaTrackListener {
     52 public:
     53  SourceVideoTrackListener(DecodedStreamGraphListener* aGraphListener,
     54                           SourceMediaTrack* aVideoTrack,
     55                           MediaTrack* aAudioTrack,
     56                           nsISerialEventTarget* aDecoderThread);
     57 
     58  void NotifyOutput(MediaTrackGraph* aGraph,
     59                    TrackTime aCurrentTrackTime) override;
     60  void NotifyEnded(MediaTrackGraph* aGraph) override;
     61 
     62 private:
     63  const RefPtr<DecodedStreamGraphListener> mGraphListener;
     64  const RefPtr<SourceMediaTrack> mVideoTrack;
     65  const RefPtr<const MediaTrack> mAudioTrack;
     66  const RefPtr<nsISerialEventTarget> mDecoderThread;
     67  TrackTime mLastVideoOutputTime = 0;
     68 };
     69 
     70 class DecodedStreamGraphListener {
     71  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
     72 private:
     73  DecodedStreamGraphListener(
     74      nsISerialEventTarget* aDecoderThread, AudioDecoderInputTrack* aAudioTrack,
     75      MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
     76      SourceMediaTrack* aVideoTrack,
     77      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder)
     78      : mDecoderThread(aDecoderThread),
     79        mVideoTrackListener(
     80            aVideoTrack ? MakeRefPtr<SourceVideoTrackListener>(
     81                              this, aVideoTrack, aAudioTrack, aDecoderThread)
     82                        : nullptr),
     83        mAudioEndedHolder(std::move(aAudioEndedHolder)),
     84        mVideoEndedHolder(std::move(aVideoEndedHolder)),
     85        mAudioTrack(aAudioTrack),
     86        mVideoTrack(aVideoTrack) {
     87    MOZ_ASSERT(NS_IsMainThread());
     88    MOZ_ASSERT(mDecoderThread);
     89 
     90    if (!mAudioTrack) {
     91      mAudioEnded = true;
     92      mAudioEndedHolder.ResolveIfExists(true, __func__);
     93    }
     94 
     95    if (!mVideoTrackListener) {
     96      mVideoEnded = true;
     97      mVideoEndedHolder.ResolveIfExists(true, __func__);
     98    }
     99  }
    100 
    101  void RegisterListeners() {
    102    if (mAudioTrack) {
    103      mOnAudioOutput = mAudioTrack->OnOutput().Connect(
    104          mDecoderThread,
    105          [self = RefPtr<DecodedStreamGraphListener>(this)](TrackTime aTime) {
    106            self->NotifyOutput(MediaSegment::AUDIO, aTime);
    107          });
    108      mOnAudioEnd = mAudioTrack->OnEnd().Connect(
    109          mDecoderThread, [self = RefPtr<DecodedStreamGraphListener>(this)]() {
    110            self->NotifyEnded(MediaSegment::AUDIO);
    111          });
    112    }
    113 
    114    if (mVideoTrackListener) {
    115      mVideoTrack->AddListener(mVideoTrackListener);
    116    }
    117  }
    118 
    119 public:
    120  static already_AddRefed<DecodedStreamGraphListener> Create(
    121      nsISerialEventTarget* aDecoderThread, AudioDecoderInputTrack* aAudioTrack,
    122      MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
    123      SourceMediaTrack* aVideoTrack,
    124      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder) {
    125    RefPtr<DecodedStreamGraphListener> listener =
    126        new DecodedStreamGraphListener(
    127            aDecoderThread, aAudioTrack, std::move(aAudioEndedHolder),
    128            aVideoTrack, std::move(aVideoEndedHolder));
    129    listener->RegisterListeners();
    130    return listener.forget();
    131  }
    132 
    133  void Close() {
    134    AssertOnDecoderThread();
    135    if (mAudioTrack) {
    136      mAudioTrack->Close();
    137    }
    138    if (mVideoTrack) {
    139      mVideoTrack->End();
    140    }
    141    mAudioEndedHolder.ResolveIfExists(false, __func__);
    142    mVideoEndedHolder.ResolveIfExists(false, __func__);
    143    mOnAudioOutput.DisconnectIfExists();
    144    mOnAudioEnd.DisconnectIfExists();
    145  }
    146 
    147  void NotifyOutput(MediaSegment::Type aType, TrackTime aCurrentTrackTime) {
    148    AssertOnDecoderThread();
    149    if (aType == MediaSegment::AUDIO) {
    150      mAudioOutputFrames = aCurrentTrackTime;
    151    } else if (aType == MediaSegment::VIDEO) {
    152      if (aCurrentTrackTime >= mVideoEndTime) {
    153        mVideoTrack->End();
    154      }
    155    } else {
    156      MOZ_CRASH("Unexpected track type");
    157    }
    158 
    159    MOZ_ASSERT_IF(aType == MediaSegment::AUDIO, !mAudioEnded);
    160    MOZ_ASSERT_IF(aType == MediaSegment::VIDEO, !mVideoEnded);
    161    // This situation would happen when playing audio in >1x playback rate,
    162    // because the audio output clock isn't align the graph time and would go
    163    // forward faster. Eg. playback rate=2, when the graph time passes 10s, the
    164    // audio clock time actually already goes forward 20s. After audio track
    165    // ended, video track would tirgger the clock, but the video time still
    166    // follows the graph time, which is smaller than the preivous audio clock
    167    // time and should be ignored.
    168    if (aCurrentTrackTime <= mLastOutputTime) {
    169      MOZ_ASSERT(aType == MediaSegment::VIDEO);
    170      return;
    171    }
    172    MOZ_ASSERT(aCurrentTrackTime > mLastOutputTime);
    173    mLastOutputTime = aCurrentTrackTime;
    174 
    175    // Only when audio track doesn't exists or has reached the end, video
    176    // track should drive the clock.
    177    MOZ_ASSERT_IF(aType == MediaSegment::VIDEO, mAudioEnded);
    178    const MediaTrack* track = aType == MediaSegment::VIDEO
    179                                  ? static_cast<MediaTrack*>(mVideoTrack)
    180                                  : static_cast<MediaTrack*>(mAudioTrack);
    181    mOnOutput.Notify(track->TrackTimeToMicroseconds(aCurrentTrackTime));
    182  }
    183 
    184  void NotifyEnded(MediaSegment::Type aType) {
    185    AssertOnDecoderThread();
    186    if (aType == MediaSegment::AUDIO) {
    187      MOZ_ASSERT(!mAudioEnded);
    188      mAudioEnded = true;
    189      mAudioEndedHolder.ResolveIfExists(true, __func__);
    190    } else if (aType == MediaSegment::VIDEO) {
    191      MOZ_ASSERT(!mVideoEnded);
    192      mVideoEnded = true;
    193      mVideoEndedHolder.ResolveIfExists(true, __func__);
    194    } else {
    195      MOZ_CRASH("Unexpected track type");
    196    }
    197  }
    198 
    199  /**
    200   * Tell the graph listener to end the track sourced by the given track after
    201   * it has seen at least aEnd worth of output reported as processed by the
    202   * graph.
    203   *
    204   * A TrackTime of TRACK_TIME_MAX indicates that the track has no end and is
    205   * the default.
    206   *
    207   * This method of ending tracks is needed because the MediaTrackGraph
    208   * processes ended tracks (through SourceMediaTrack::EndTrack) at the
    209   * beginning of an iteration, but waits until the end of the iteration to
    210   * process any ControlMessages. When such a ControlMessage is a listener that
    211   * is to be added to a track that has ended in its very first iteration, the
    212   * track ends before the listener tracking this ending is added. This can lead
    213   * to a MediaStreamTrack ending on main thread (it uses another listener)
    214   * before the listeners to render the track get added, potentially meaning a
    215   * media element doesn't progress before reaching the end although data was
    216   * available.
    217   */
    218  void EndVideoTrackAt(MediaTrack* aTrack, TrackTime aEnd) {
    219    AssertOnDecoderThread();
    220    MOZ_DIAGNOSTIC_ASSERT(aTrack == mVideoTrack);
    221    mVideoEndTime = aEnd;
    222  }
    223 
    224  void Forget() {
    225    MOZ_ASSERT(NS_IsMainThread());
    226    if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) {
    227      mVideoTrack->RemoveListener(mVideoTrackListener);
    228    }
    229    mVideoTrackListener = nullptr;
    230  }
    231 
    232  TrackTime GetAudioFramesPlayed() {
    233    AssertOnDecoderThread();
    234    return mAudioOutputFrames;
    235  }
    236 
    237  MediaEventSource<int64_t>& OnOutput() { return mOnOutput; }
    238 
    239 private:
    240  ~DecodedStreamGraphListener() {
    241    MOZ_ASSERT(mAudioEndedHolder.IsEmpty());
    242    MOZ_ASSERT(mVideoEndedHolder.IsEmpty());
    243  }
    244 
    245  inline void AssertOnDecoderThread() const {
    246    MOZ_ASSERT(mDecoderThread->IsOnCurrentThread());
    247  }
    248 
    249  const RefPtr<nsISerialEventTarget> mDecoderThread;
    250 
    251  // Accessible on any thread, but only notify on the decoder thread.
    252  MediaEventProducer<int64_t> mOnOutput;
    253 
    254  RefPtr<SourceVideoTrackListener> mVideoTrackListener;
    255 
    256  // These can be resolved on the main thread on creation if there is no
    257  // corresponding track, otherwise they are resolved on the decoder thread.
    258  MozPromiseHolder<DecodedStream::EndedPromise> mAudioEndedHolder;
    259  MozPromiseHolder<DecodedStream::EndedPromise> mVideoEndedHolder;
    260 
    261  // Decoder thread only.
    262  TrackTime mAudioOutputFrames = 0;
    263  TrackTime mLastOutputTime = 0;
    264  bool mAudioEnded = false;
    265  bool mVideoEnded = false;
    266 
    267  // Any thread.
    268  const RefPtr<AudioDecoderInputTrack> mAudioTrack;
    269  const RefPtr<SourceMediaTrack> mVideoTrack;
    270  MediaEventListener mOnAudioOutput;
    271  MediaEventListener mOnAudioEnd;
    272  Atomic<TrackTime> mVideoEndTime{TRACK_TIME_MAX};
    273 };
    274 
    275 SourceVideoTrackListener::SourceVideoTrackListener(
    276    DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aVideoTrack,
    277    MediaTrack* aAudioTrack, nsISerialEventTarget* aDecoderThread)
    278    : mGraphListener(aGraphListener),
    279      mVideoTrack(aVideoTrack),
    280      mAudioTrack(aAudioTrack),
    281      mDecoderThread(aDecoderThread) {}
    282 
    283 void SourceVideoTrackListener::NotifyOutput(MediaTrackGraph* aGraph,
    284                                            TrackTime aCurrentTrackTime) {
    285  aGraph->AssertOnGraphThreadOrNotRunning();
    286  if (mAudioTrack && !mAudioTrack->Ended()) {
    287    // Only audio playout drives the clock forward, if present and live.
    288    return;
    289  }
    290  // The graph can iterate without time advancing, but the invariant is that
    291  // time can never go backwards.
    292  if (aCurrentTrackTime <= mLastVideoOutputTime) {
    293    MOZ_ASSERT(aCurrentTrackTime == mLastVideoOutputTime);
    294    return;
    295  }
    296  mLastVideoOutputTime = aCurrentTrackTime;
    297  mDecoderThread->Dispatch(NS_NewRunnableFunction(
    298      "SourceVideoTrackListener::NotifyOutput",
    299      [self = RefPtr<SourceVideoTrackListener>(this), aCurrentTrackTime]() {
    300        self->mGraphListener->NotifyOutput(MediaSegment::VIDEO,
    301                                           aCurrentTrackTime);
    302      }));
    303 }
    304 
    305 void SourceVideoTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
    306  aGraph->AssertOnGraphThreadOrNotRunning();
    307  mDecoderThread->Dispatch(NS_NewRunnableFunction(
    308      "SourceVideoTrackListener::NotifyEnded",
    309      [self = RefPtr<SourceVideoTrackListener>(this)]() {
    310        self->mGraphListener->NotifyEnded(MediaSegment::VIDEO);
    311      }));
    312 }
    313 
    314 /**
    315 * All MediaStream-related data is protected by the decoder's monitor. We have
    316 * at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as
    317 * inputs for all output tracks created by OutputStreamManager after calls to
    318 * captureStream/UntilEnded. Seeking creates new source tracks, as does
    319 * replaying after the input as ended. In the latter case, the new sources are
    320 * not connected to tracks created by captureStreamUntilEnded.
    321 */
    322 class DecodedStreamData final {
    323 public:
    324  DecodedStreamData(
    325      PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
    326      RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
    327      RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
    328      MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
    329      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
    330      float aPlaybackRate, float aVolume, bool aPreservesPitch,
    331      nsISerialEventTarget* aDecoderThread);
    332  ~DecodedStreamData();
    333  MediaEventSource<int64_t>& OnOutput();
    334  // This is used to mark track as closed and should be called before Forget().
    335  // Decoder thread only.
    336  void Close();
    337  // After calling this function, the DecodedStreamData would be destroyed.
    338  // Main thread only.
    339  void Forget();
    340  void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);
    341 
    342  void WriteVideoToSegment(layers::Image* aImage, const TimeUnit& aStart,
    343                           const TimeUnit& aEnd,
    344                           const gfx::IntSize& aIntrinsicSize,
    345                           const TimeStamp& aTimeStamp, VideoSegment* aOutput,
    346                           const PrincipalHandle& aPrincipalHandle,
    347                           double aPlaybackRate);
    348 
    349  /* The following group of fields are protected by the decoder's monitor
    350   * and can be read or written on any thread.
    351   */
    352  // Count of audio frames written to the track
    353  int64_t mAudioFramesWritten;
    354  // Count of video frames written to the track in the track's rate
    355  TrackTime mVideoTrackWritten;
    356  // mNextAudioTime is the end timestamp for the last packet sent to the track.
    357  // Therefore audio packets starting at or after this time need to be copied
    358  // to the output track.
    359  TimeUnit mNextAudioTime;
    360  // mLastVideoStartTime is the start timestamp for the last packet sent to the
    361  // track. Therefore video packets starting after this time need to be copied
    362  // to the output track.
    363  NullableTimeUnit mLastVideoStartTime;
    364  // mLastVideoEndTime is the end timestamp for the last packet sent to the
    365  // track. It is used to adjust durations of chunks sent to the output track
    366  // when there are overlaps in VideoData.
    367  NullableTimeUnit mLastVideoEndTime;
    368  // The timestamp of the last frame, so we can ensure time never goes
    369  // backwards.
    370  TimeStamp mLastVideoTimeStamp;
    371  // The last video image sent to the track. Useful if we need to replicate
    372  // the image.
    373  RefPtr<layers::Image> mLastVideoImage;
    374  gfx::IntSize mLastVideoImageDisplaySize;
    375  bool mHaveSentFinishAudio;
    376  bool mHaveSentFinishVideo;
    377 
    378  const RefPtr<AudioDecoderInputTrack> mAudioTrack;
    379  const RefPtr<SourceMediaTrack> mVideoTrack;
    380  const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
    381  const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
    382  const RefPtr<MediaInputPort> mAudioPort;
    383  const RefPtr<MediaInputPort> mVideoPort;
    384  const RefPtr<DecodedStream::EndedPromise> mAudioEndedPromise;
    385  const RefPtr<DecodedStream::EndedPromise> mVideoEndedPromise;
    386  const RefPtr<DecodedStreamGraphListener> mListener;
    387 };
    388 
    389 DecodedStreamData::DecodedStreamData(
    390    PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
    391    RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
    392    RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
    393    MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
    394    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
    395    float aPlaybackRate, float aVolume, bool aPreservesPitch,
    396    nsISerialEventTarget* aDecoderThread)
    397    : mAudioFramesWritten(0),
    398      mVideoTrackWritten(0),
    399      mNextAudioTime(aInit.mStartTime),
    400      mHaveSentFinishAudio(false),
    401      mHaveSentFinishVideo(false),
    402      mAudioTrack(aInit.mInfo.HasAudio()
    403                      ? AudioDecoderInputTrack::Create(
    404                            aGraph, aDecoderThread, aInit.mInfo.mAudio,
    405                            aPlaybackRate, aVolume, aPreservesPitch)
    406                      : nullptr),
    407      mVideoTrack(aInit.mInfo.HasVideo()
    408                      ? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
    409                      : nullptr),
    410      mAudioOutputTrack(std::move(aAudioOutputTrack)),
    411      mVideoOutputTrack(std::move(aVideoOutputTrack)),
    412      mAudioPort((mAudioOutputTrack && mAudioTrack)
    413                     ? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
    414                     : nullptr),
    415      mVideoPort((mVideoOutputTrack && mVideoTrack)
    416                     ? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
    417                     : nullptr),
    418      mAudioEndedPromise(aAudioEndedPromise.Ensure(__func__)),
    419      mVideoEndedPromise(aVideoEndedPromise.Ensure(__func__)),
    420      // DecodedStreamGraphListener will resolve these promises.
    421      mListener(DecodedStreamGraphListener::Create(
    422          aDecoderThread, mAudioTrack, std::move(aAudioEndedPromise),
    423          mVideoTrack, std::move(aVideoEndedPromise))) {
    424  MOZ_ASSERT(NS_IsMainThread());
    425 }
    426 
    427 DecodedStreamData::~DecodedStreamData() {
    428  MOZ_ASSERT(NS_IsMainThread());
    429  if (mAudioTrack) {
    430    mAudioTrack->Destroy();
    431  }
    432  if (mVideoTrack) {
    433    mVideoTrack->Destroy();
    434  }
    435  if (mAudioPort) {
    436    mAudioPort->Destroy();
    437  }
    438  if (mVideoPort) {
    439    mVideoPort->Destroy();
    440  }
    441 }
    442 
    443 MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
    444  return mListener->OnOutput();
    445 }
    446 
    447 void DecodedStreamData::Close() { mListener->Close(); }
    448 
    449 void DecodedStreamData::Forget() { mListener->Forget(); }
    450 
    451 void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
    452  CopyUTF8toUTF16(nsPrintfCString("%p", this), aInfo.mInstance);
    453  aInfo.mAudioFramesWritten = mAudioFramesWritten;
    454  aInfo.mStreamAudioWritten = mListener->GetAudioFramesPlayed();
    455  aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
    456  aInfo.mLastVideoStartTime =
    457      mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
    458          .ToMicroseconds();
    459  aInfo.mLastVideoEndTime =
    460      mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
    461          .ToMicroseconds();
    462  aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
    463  aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
    464 }
    465 
    466 DecodedStream::DecodedStream(
    467    AbstractThread* aOwnerThread,
    468    nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack,
    469    CopyableTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
    470    AbstractCanonical<PrincipalHandle>* aCanonicalOutputPrincipal,
    471    double aVolume, double aPlaybackRate, bool aPreservesPitch,
    472    MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
    473    : mOwnerThread(aOwnerThread),
    474      mDummyTrack(std::move(aDummyTrack)),
    475 
    476      mWatchManager(this, mOwnerThread),
    477      mPlaying(false, "DecodedStream::mPlaying"),
    478      mPrincipalHandle(aOwnerThread, PRINCIPAL_HANDLE_NONE,
    479                       "DecodedStream::mPrincipalHandle (Mirror)"),
    480      mCanonicalOutputPrincipal(aCanonicalOutputPrincipal),
    481      mOutputTracks(std::move(aOutputTracks)),
    482      mVolume(aVolume),
    483      mPlaybackRate(aPlaybackRate),
    484      mPreservesPitch(aPreservesPitch),
    485      mAudioQueue(aAudioQueue),
    486      mVideoQueue(aVideoQueue) {}
    487 
    488 DecodedStream::~DecodedStream() {
    489  MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
    490 }
    491 
    492 RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
    493  AssertOwnerThread();
    494  MOZ_ASSERT(mStartTime.isSome());
    495 
    496  if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
    497    return mAudioEndedPromise;
    498  }
    499  if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) {
    500    return mVideoEndedPromise;
    501  }
    502  return nullptr;
    503 }
    504 
    505 nsresult DecodedStream::Start(const TimeUnit& aStartTime,
    506                              const MediaInfo& aInfo) {
    507  AssertOwnerThread();
    508  MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
    509 
    510  AUTO_PROFILER_LABEL(FUNCTION_SIGNATURE, MEDIA_PLAYBACK);
    511  if (profiler_thread_is_being_profiled_for_markers()) {
    512    nsPrintfCString markerString("StartTime=%" PRId64,
    513                                 aStartTime.ToMicroseconds());
    514    PLAYBACK_PROFILER_MARKER(markerString);
    515  }
    516  LOG_DS(LogLevel::Debug, "Start() mStartTime=%" PRId64,
    517         aStartTime.ToMicroseconds());
    518 
    519  mStartTime.emplace(aStartTime);
    520  mLastOutputTime = TimeUnit::Zero();
    521  mInfo = aInfo;
    522  mPlaying = true;
    523  mPrincipalHandle.Connect(mCanonicalOutputPrincipal);
    524  mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
    525  mAudibilityMonitor.emplace(
    526      mInfo.mAudio.mRate,
    527      StaticPrefs::dom_media_silence_duration_for_audibility());
    528  ConnectListener();
    529 
    530  class R : public Runnable {
    531   public:
    532    R(PlaybackInfoInit&& aInit,
    533      nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack,
    534      nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
    535      MozPromiseHolder<MediaSink::EndedPromise>&& aAudioEndedPromise,
    536      MozPromiseHolder<MediaSink::EndedPromise>&& aVideoEndedPromise,
    537      float aPlaybackRate, float aVolume, bool aPreservesPitch,
    538      nsISerialEventTarget* aDecoderThread)
    539        : Runnable("CreateDecodedStreamData"),
    540          mInit(std::move(aInit)),
    541          mDummyTrack(std::move(aDummyTrack)),
    542          mOutputTracks(std::move(aOutputTracks)),
    543          mAudioEndedPromise(std::move(aAudioEndedPromise)),
    544          mVideoEndedPromise(std::move(aVideoEndedPromise)),
    545          mPlaybackRate(aPlaybackRate),
    546          mVolume(aVolume),
    547          mPreservesPitch(aPreservesPitch),
    548          mDecoderThread(aDecoderThread) {}
    549    NS_IMETHOD Run() override {
    550      MOZ_ASSERT(NS_IsMainThread());
    551      RefPtr<ProcessedMediaTrack> audioOutputTrack;
    552      RefPtr<ProcessedMediaTrack> videoOutputTrack;
    553      for (const auto& track : mOutputTracks) {
    554        if (track->mType == MediaSegment::AUDIO) {
    555          MOZ_DIAGNOSTIC_ASSERT(
    556              !audioOutputTrack,
    557              "We only support capturing to one output track per kind");
    558          audioOutputTrack = track;
    559        } else if (track->mType == MediaSegment::VIDEO) {
    560          MOZ_DIAGNOSTIC_ASSERT(
    561              !videoOutputTrack,
    562              "We only support capturing to one output track per kind");
    563          videoOutputTrack = track;
    564        } else {
    565          MOZ_CRASH("Unknown media type");
    566        }
    567      }
    568      if (!mDummyTrack) {
    569        // No dummy track - no graph. This could be intentional as the owning
    570        // media element needs access to the tracks on main thread to set up
    571        // forwarding of them before playback starts. MDSM will re-create
    572        // DecodedStream once a dummy track is available. This effectively halts
    573        // playback for this DecodedStream.
    574        return NS_OK;
    575      }
    576      if ((audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
    577          (videoOutputTrack && videoOutputTrack->IsDestroyed())) {
    578        // A track has been destroyed and we'll soon get re-created with a
    579        // proper one. This effectively halts playback for this DecodedStream.
    580        return NS_OK;
    581      }
    582      mData = MakeUnique<DecodedStreamData>(
    583          std::move(mInit), mDummyTrack->mTrack->Graph(),
    584          std::move(audioOutputTrack), std::move(videoOutputTrack),
    585          std::move(mAudioEndedPromise), std::move(mVideoEndedPromise),
    586          mPlaybackRate, mVolume, mPreservesPitch, mDecoderThread);
    587      return NS_OK;
    588    }
    589    UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
    590 
    591   private:
    592    PlaybackInfoInit mInit;
    593    nsMainThreadPtrHandle<SharedDummyTrack> mDummyTrack;
    594    const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
    595    MozPromiseHolder<MediaSink::EndedPromise> mAudioEndedPromise;
    596    MozPromiseHolder<MediaSink::EndedPromise> mVideoEndedPromise;
    597    UniquePtr<DecodedStreamData> mData;
    598    const float mPlaybackRate;
    599    const float mVolume;
    600    const bool mPreservesPitch;
    601    const RefPtr<nsISerialEventTarget> mDecoderThread;
    602  };
    603 
    604  MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
    605  MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
    606  PlaybackInfoInit init{aStartTime, aInfo};
    607  nsCOMPtr<nsIRunnable> r =
    608      new R(std::move(init), mDummyTrack, mOutputTracks.Clone(),
    609            std::move(audioEndedHolder), std::move(videoEndedHolder),
    610            static_cast<float>(mPlaybackRate), static_cast<float>(mVolume),
    611            mPreservesPitch, mOwnerThread);
    612  SyncRunnable::DispatchToThread(GetMainThreadSerialEventTarget(), r);
    613  mData = static_cast<R*>(r.get())->ReleaseData();
    614 
    615  if (mData) {
    616    mAudioEndedPromise = mData->mAudioEndedPromise;
    617    mVideoEndedPromise = mData->mVideoEndedPromise;
    618    mOutputListener = mData->OnOutput().Connect(mOwnerThread, this,
    619                                                &DecodedStream::NotifyOutput);
    620    SendData();
    621  }
    622  return NS_OK;
    623 }
    624 
    625 void DecodedStream::Stop() {
    626  AssertOwnerThread();
    627  MOZ_ASSERT(mStartTime.isSome(), "playback not started.");
    628 
    629  TRACE("DecodedStream::Stop");
    630  LOG_DS(LogLevel::Debug, "Stop()");
    631 
    632  DisconnectListener();
    633  ResetVideo(mPrincipalHandle);
    634  ResetAudio();
    635  mStartTime.reset();
    636  mAudioEndedPromise = nullptr;
    637  mVideoEndedPromise = nullptr;
    638 
    639  // Clear mData immediately when this playback session ends so we won't
    640  // send data to the wrong track in SendData() in next playback session.
    641  DestroyData(std::move(mData));
    642 
    643  mPrincipalHandle.DisconnectIfConnected();
    644  mWatchManager.Unwatch(mPlaying, &DecodedStream::PlayingChanged);
    645  mAudibilityMonitor.reset();
    646 }
    647 
    648 bool DecodedStream::IsStarted() const {
    649  AssertOwnerThread();
    650  return mStartTime.isSome();
    651 }
    652 
    653 bool DecodedStream::IsPlaying() const {
    654  AssertOwnerThread();
    655  return IsStarted() && mPlaying;
    656 }
    657 
    658 void DecodedStream::Shutdown() {
    659  AssertOwnerThread();
    660  mPrincipalHandle.DisconnectIfConnected();
    661  mWatchManager.Shutdown();
    662 }
    663 
    664 void DecodedStream::DestroyData(UniquePtr<DecodedStreamData>&& aData) {
    665  AssertOwnerThread();
    666 
    667  if (!aData) {
    668    return;
    669  }
    670 
    671  TRACE("DecodedStream::DestroyData");
    672  mOutputListener.Disconnect();
    673 
    674  aData->Close();
    675  NS_DispatchToMainThread(
    676      NS_NewRunnableFunction("DecodedStream::DestroyData",
    677                             [data = std::move(aData)]() { data->Forget(); }));
    678 }
    679 
    680 void DecodedStream::SetPlaying(bool aPlaying) {
    681  AssertOwnerThread();
    682 
    683  // Resume/pause matters only when playback started.
    684  if (mStartTime.isNothing()) {
    685    return;
    686  }
    687 
    688  if (profiler_thread_is_being_profiled_for_markers()) {
    689    nsPrintfCString markerString("Playing=%s", aPlaying ? "true" : "false");
    690    PLAYBACK_PROFILER_MARKER(markerString);
    691  }
    692  LOG_DS(LogLevel::Debug, "playing (%d) -> (%d)", mPlaying.Ref(), aPlaying);
    693  mPlaying = aPlaying;
    694 }
    695 
    696 void DecodedStream::SetVolume(double aVolume) {
    697  AssertOwnerThread();
    698  if (profiler_thread_is_being_profiled_for_markers()) {
    699    nsPrintfCString markerString("Volume=%f", aVolume);
    700    PLAYBACK_PROFILER_MARKER(markerString);
    701  }
    702  if (mVolume == aVolume) {
    703    return;
    704  }
    705  mVolume = aVolume;
    706  if (mData && mData->mAudioTrack) {
    707    mData->mAudioTrack->SetVolume(static_cast<float>(aVolume));
    708  }
    709 }
    710 
    711 void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
    712  AssertOwnerThread();
    713  if (profiler_thread_is_being_profiled_for_markers()) {
    714    nsPrintfCString markerString("PlaybackRate=%f", aPlaybackRate);
    715    PLAYBACK_PROFILER_MARKER(markerString);
    716  }
    717  if (mPlaybackRate == aPlaybackRate) {
    718    return;
    719  }
    720  mPlaybackRate = aPlaybackRate;
    721  if (mData && mData->mAudioTrack) {
    722    mData->mAudioTrack->SetPlaybackRate(static_cast<float>(aPlaybackRate));
    723  }
    724 }
    725 
    726 void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
    727  AssertOwnerThread();
    728  if (profiler_thread_is_being_profiled_for_markers()) {
    729    nsPrintfCString markerString("PreservesPitch=%s",
    730                                 aPreservesPitch ? "true" : "false");
    731    PLAYBACK_PROFILER_MARKER(markerString);
    732  }
    733  if (mPreservesPitch == aPreservesPitch) {
    734    return;
    735  }
    736  mPreservesPitch = aPreservesPitch;
    737  if (mData && mData->mAudioTrack) {
    738    mData->mAudioTrack->SetPreservesPitch(aPreservesPitch);
    739  }
    740 }
    741 
    742 RefPtr<GenericPromise> DecodedStream::SetAudioDevice(
    743    RefPtr<AudioDeviceInfo> aDevice) {
    744  // All audio is captured, so nothing is actually played out, so nothing to do.
    745  return GenericPromise::CreateAndResolve(true, __func__);
    746 }
    747 
    748 double DecodedStream::PlaybackRate() const {
    749  AssertOwnerThread();
    750  return mPlaybackRate;
    751 }
    752 
    753 void DecodedStream::SendAudio(const PrincipalHandle& aPrincipalHandle) {
    754  AssertOwnerThread();
    755 
    756  if (!mInfo.HasAudio()) {
    757    return;
    758  }
    759 
    760  if (mData->mHaveSentFinishAudio) {
    761    return;
    762  }
    763 
    764  TRACE("DecodedStream::SendAudio");
    765  // It's OK to hold references to the AudioData because AudioData
    766  // is ref-counted.
    767  AutoTArray<RefPtr<AudioData>, 10> audio;
    768  mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
    769 
    770  // This will happen everytime when the media sink switches from `AudioSink` to
    771  // `DecodedStream`. If we don't insert the silence then the A/V will be out of
    772  // sync.
    773  RefPtr<AudioData> nextAudio = audio.IsEmpty() ? nullptr : audio[0];
    774  if (RefPtr<AudioData> silence = CreateSilenceDataIfGapExists(nextAudio)) {
    775    LOG_DS(LogLevel::Verbose, "Detect a gap in audio, insert silence=%u",
    776           silence->Frames());
    777    audio.InsertElementAt(0, silence);
    778  }
    779 
    780  // Append data which hasn't been sent to audio track before.
    781  mData->mAudioTrack->AppendData(audio, aPrincipalHandle);
    782  for (uint32_t i = 0; i < audio.Length(); ++i) {
    783    CheckIsDataAudible(audio[i]);
    784    mData->mNextAudioTime = audio[i]->GetEndTime();
    785    mData->mAudioFramesWritten += audio[i]->Frames();
    786  }
    787 
    788  if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
    789    mData->mAudioTrack->NotifyEndOfStream();
    790    mData->mHaveSentFinishAudio = true;
    791  }
    792 }
    793 
    794 already_AddRefed<AudioData> DecodedStream::CreateSilenceDataIfGapExists(
    795    RefPtr<AudioData>& aNextAudio) {
    796  AssertOwnerThread();
    797  if (!aNextAudio) {
    798    return nullptr;
    799  }
    800  CheckedInt64 audioWrittenOffset =
    801      mData->mAudioFramesWritten +
    802      TimeUnitToFrames(*mStartTime, aNextAudio->mRate);
    803  CheckedInt64 frameOffset =
    804      TimeUnitToFrames(aNextAudio->mTime, aNextAudio->mRate);
    805  if (audioWrittenOffset.value() >= frameOffset.value()) {
    806    return nullptr;
    807  }
    808  // We've written less audio than our frame offset, return a silence data so we
    809  // have enough audio to be at the correct offset for our current frames.
    810  CheckedInt64 missingFrames = frameOffset - audioWrittenOffset;
    811  AlignedAudioBuffer silenceBuffer(missingFrames.value() *
    812                                   aNextAudio->mChannels);
    813  if (!silenceBuffer) {
    814    NS_WARNING("OOM in DecodedStream::CreateSilenceDataIfGapExists");
    815    return nullptr;
    816  }
    817  auto duration = media::TimeUnit(missingFrames.value(), aNextAudio->mRate);
    818  if (!duration.IsValid()) {
    819    NS_WARNING("Int overflow in DecodedStream::CreateSilenceDataIfGapExists");
    820    return nullptr;
    821  }
    822  RefPtr<AudioData> silenceData = new AudioData(
    823      aNextAudio->mOffset, aNextAudio->mTime, std::move(silenceBuffer),
    824      aNextAudio->mChannels, aNextAudio->mRate);
    825  MOZ_DIAGNOSTIC_ASSERT(duration == silenceData->mDuration, "must be equal");
    826  return silenceData.forget();
    827 }
    828 
    829 void DecodedStream::CheckIsDataAudible(const AudioData* aData) {
    830  MOZ_ASSERT(aData);
    831 
    832  mAudibilityMonitor->Process(aData);
    833  bool isAudible = mAudibilityMonitor->RecentlyAudible();
    834 
    835  if (isAudible != mIsAudioDataAudible) {
    836    mIsAudioDataAudible = isAudible;
    837    mAudibleEvent.Notify(mIsAudioDataAudible);
    838  }
    839 }
    840 
    841 void DecodedStreamData::WriteVideoToSegment(
    842    layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
    843    const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
    844    VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle,
    845    double aPlaybackRate) {
    846  RefPtr<layers::Image> image = aImage;
    847  aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
    848                       aTimeStamp, media::TimeUnit::Invalid(), aStart);
    849  // Extend this so we get accurate durations for all frames.
    850  // Because this track is pushed, we need durations so the graph can track
    851  // when playout of the track has finished.
    852  MOZ_ASSERT(aPlaybackRate > 0);
    853  TrackTime start = aStart.ToTicksAtRate(mVideoTrack->mSampleRate);
    854  TrackTime end = aEnd.ToTicksAtRate(mVideoTrack->mSampleRate);
    855  aOutput->ExtendLastFrameBy(
    856      static_cast<TrackTime>((float)(end - start) / aPlaybackRate));
    857 
    858  mLastVideoStartTime = Some(aStart);
    859  mLastVideoEndTime = Some(aEnd);
    860  mLastVideoTimeStamp = aTimeStamp;
    861 }
    862 
    863 static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {
    864  // Get the last video frame's start time in VideoSegment aInput.
    865  // If the start time is equal to the duration of aInput, means the last video
    866  // frame's duration is zero.
    867  TrackTime lastVideoStratTime;
    868  aInput.GetLastFrame(&lastVideoStratTime);
    869  return lastVideoStratTime == aInput.GetDuration();
    870 }
    871 
    872 void DecodedStream::ResetAudio() {
    873  AssertOwnerThread();
    874 
    875  if (!mData) {
    876    return;
    877  }
    878 
    879  if (!mInfo.HasAudio()) {
    880    return;
    881  }
    882 
    883  TRACE("DecodedStream::ResetAudio");
    884  mData->mAudioTrack->ClearFutureData();
    885  if (const RefPtr<AudioData>& v = mAudioQueue.PeekFront()) {
    886    mData->mNextAudioTime = v->mTime;
    887    mData->mHaveSentFinishAudio = false;
    888  }
    889 }
    890 
    891 void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
    892  AssertOwnerThread();
    893 
    894  if (!mData) {
    895    return;
    896  }
    897 
    898  if (!mInfo.HasVideo()) {
    899    return;
    900  }
    901 
    902  TRACE("DecodedStream::ResetVideo");
    903  TrackTime cleared = mData->mVideoTrack->ClearFutureData();
    904  mData->mVideoTrackWritten -= cleared;
    905  if (mData->mHaveSentFinishVideo && cleared > 0) {
    906    mData->mHaveSentFinishVideo = false;
    907    mData->mListener->EndVideoTrackAt(mData->mVideoTrack, TRACK_TIME_MAX);
    908  }
    909 
    910  VideoSegment resetter;
    911  TimeStamp currentTime;
    912  TimeUnit currentPosition = GetPosition(&currentTime);
    913 
    914  // Giving direct consumers a frame (really *any* frame, so in this case:
    915  // nullptr) at an earlier time than the previous, will signal to that consumer
    916  // to discard any frames ahead in time of the new frame. To be honest, this is
    917  // an ugly hack because the direct listeners of the MediaTrackGraph do not
    918  // have an API that supports clearing the future frames. ImageContainer and
    919  // VideoFrameContainer do though, and we will need to move to a similar API
    920  // for video tracks as part of bug 1493618.
    921  resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
    922                       aPrincipalHandle, false, currentTime);
    923  mData->mVideoTrack->AppendData(&resetter);
    924 
    925  // Consumer buffers have been reset. We now set the next time to the start
    926  // time of the current frame, so that it can be displayed again on resuming.
    927  if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
    928    mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
    929    mData->mLastVideoEndTime = Some(v->mTime);
    930  } else {
    931    // There was no current frame in the queue. We set the next time to the
    932    // current time, so we at least don't resume starting in the future.
    933    mData->mLastVideoStartTime =
    934        Some(currentPosition - TimeUnit::FromMicroseconds(1));
    935    mData->mLastVideoEndTime = Some(currentPosition);
    936  }
    937 
    938  mData->mLastVideoTimeStamp = currentTime;
    939 }
    940 
    941 void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
    942  AssertOwnerThread();
    943 
    944  if (!mInfo.HasVideo()) {
    945    return;
    946  }
    947 
    948  if (mData->mHaveSentFinishVideo) {
    949    return;
    950  }
    951 
    952  TRACE("DecodedStream::SendVideo");
    953  VideoSegment output;
    954  AutoTArray<RefPtr<VideoData>, 10> video;
    955 
    956  // It's OK to hold references to the VideoData because VideoData
    957  // is ref-counted.
    958  mVideoQueue.GetElementsAfter(
    959      mData->mLastVideoStartTime.valueOr(mStartTime.ref()), &video);
    960 
    961  TimeStamp currentTime;
    962  TimeUnit currentPosition = GetPosition(&currentTime);
    963 
    964  if (mData->mLastVideoTimeStamp.IsNull()) {
    965    mData->mLastVideoTimeStamp = currentTime;
    966  }
    967 
    968  for (uint32_t i = 0; i < video.Length(); ++i) {
    969    VideoData* v = video[i];
    970    TimeUnit lastStart = mData->mLastVideoStartTime.valueOr(
    971        mStartTime.ref() - TimeUnit::FromMicroseconds(1));
    972    TimeUnit lastEnd = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
    973 
    974    if (lastEnd < v->mTime) {
    975      // Write last video frame to catch up. mLastVideoImage can be null here
    976      // which is fine, it just means there's no video.
    977 
    978      // TODO: |mLastVideoImage| should come from the last image rendered
    979      // by the state machine. This will avoid the black frame when capture
    980      // happens in the middle of playback (especially in th middle of a
    981      // video frame). E.g. if we have a video frame that is 30 sec long
    982      // and capture happens at 15 sec, we'll have to append a black frame
    983      // that is 15 sec long.
    984      TimeStamp t =
    985          std::max(mData->mLastVideoTimeStamp,
    986                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
    987      mData->WriteVideoToSegment(mData->mLastVideoImage, lastEnd, v->mTime,
    988                                 mData->mLastVideoImageDisplaySize, t, &output,
    989                                 aPrincipalHandle, mPlaybackRate);
    990      lastEnd = v->mTime;
    991    }
    992 
    993    if (lastStart < v->mTime) {
    994      // This frame starts after the last frame's start. Note that this could be
    995      // before the last frame's end time for some videos. This only matters for
    996      // the track's lifetime in the MTG, as rendering is based on timestamps,
    997      // aka frame start times.
    998      TimeStamp t =
    999          std::max(mData->mLastVideoTimeStamp,
   1000                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
   1001      TimeUnit end = std::max(
   1002          v->GetEndTime(),
   1003          lastEnd + TimeUnit::FromMicroseconds(
   1004                        mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
   1005      mData->mLastVideoImage = v->mImage;
   1006      mData->mLastVideoImageDisplaySize = v->mDisplay;
   1007      mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
   1008                                 &output, aPrincipalHandle, mPlaybackRate);
   1009    }
   1010  }
   1011 
   1012  // Check the output is not empty.
   1013  bool compensateEOS = false;
   1014  bool forceBlack = false;
   1015  if (output.GetLastFrame()) {
   1016    compensateEOS = ZeroDurationAtLastChunk(output);
   1017  }
   1018 
   1019  if (output.GetDuration() > 0) {
   1020    mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
   1021  }
   1022 
   1023  if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
   1024    if (!mData->mLastVideoImage) {
   1025      // We have video, but the video queue finished before we received any
   1026      // frame. We insert a black frame to progress any consuming
   1027      // HTMLMediaElement. This mirrors the behavior of VideoSink.
   1028 
   1029      // Force a frame - can be null
   1030      compensateEOS = true;
   1031      // Force frame to be black
   1032      forceBlack = true;
   1033      // Override the frame's size (will be 0x0 otherwise)
   1034      mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
   1035      LOG_DS(LogLevel::Debug, "No mLastVideoImage");
   1036    }
   1037    if (compensateEOS) {
   1038      VideoSegment endSegment;
   1039      auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
   1040      mData->WriteVideoToSegment(
   1041          mData->mLastVideoImage, start, start,
   1042          mData->mLastVideoImageDisplaySize,
   1043          currentTime + (start - currentPosition).ToTimeDuration(), &endSegment,
   1044          aPrincipalHandle, mPlaybackRate);
   1045      // ForwardedInputTrack drops zero duration frames, even at the end of
   1046      // the track.  Give the frame a minimum duration so that it is not
   1047      // dropped.
   1048      endSegment.ExtendLastFrameBy(1);
   1049      LOG_DS(LogLevel::Debug,
   1050             "compensateEOS: start %s, duration %" PRId64
   1051             ", mPlaybackRate %lf, sample rate %" PRId32,
   1052             start.ToString().get(), endSegment.GetDuration(), mPlaybackRate,
   1053             mData->mVideoTrack->mSampleRate);
   1054      MOZ_ASSERT(endSegment.GetDuration() > 0);
   1055      if (forceBlack) {
   1056        endSegment.ReplaceWithDisabled();
   1057      }
   1058      mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
   1059    }
   1060    mData->mListener->EndVideoTrackAt(mData->mVideoTrack,
   1061                                      mData->mVideoTrackWritten);
   1062    mData->mHaveSentFinishVideo = true;
   1063  }
   1064 }
   1065 
   1066 void DecodedStream::SendData() {
   1067  AssertOwnerThread();
   1068 
   1069  // Not yet created on the main thread. MDSM will try again later.
   1070  if (!mData) {
   1071    return;
   1072  }
   1073 
   1074  if (!mPlaying) {
   1075    return;
   1076  }
   1077 
   1078  LOG_DS(LogLevel::Verbose, "SendData()");
   1079  SendAudio(mPrincipalHandle);
   1080  SendVideo(mPrincipalHandle);
   1081 }
   1082 
   1083 TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
   1084  AssertOwnerThread();
   1085  TRACE("DecodedStream::GetEndTime");
   1086  if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
   1087    auto t = mStartTime.ref() +
   1088             media::TimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
   1089    if (t.IsValid()) {
   1090      return t;
   1091    }
   1092  } else if (aType == TrackInfo::kVideoTrack && mData) {
   1093    return mData->mLastVideoEndTime.valueOr(mStartTime.ref());
   1094  }
   1095  return TimeUnit::Zero();
   1096 }
   1097 
   1098 TimeUnit DecodedStream::GetPosition(TimeStamp* aTimeStamp) {
   1099  AssertOwnerThread();
   1100  TRACE("DecodedStream::GetPosition");
   1101  // This is only called after MDSM starts playback. So mStartTime is
   1102  // guaranteed to be something.
   1103  MOZ_ASSERT(mStartTime.isSome());
   1104  if (aTimeStamp) {
   1105    *aTimeStamp = TimeStamp::Now();
   1106  }
   1107  return mStartTime.ref() + mLastOutputTime;
   1108 }
   1109 
   1110 void DecodedStream::NotifyOutput(int64_t aTime) {
   1111  AssertOwnerThread();
   1112  TimeUnit time = TimeUnit::FromMicroseconds(aTime);
   1113  if (time == mLastOutputTime) {
   1114    return;
   1115  }
   1116  MOZ_ASSERT(mLastOutputTime < time);
   1117  mLastOutputTime = time;
   1118  auto currentTime = GetPosition();
   1119 
   1120  if (profiler_thread_is_being_profiled_for_markers()) {
   1121    nsPrintfCString markerString("OutputTime=%" PRId64,
   1122                                 currentTime.ToMicroseconds());
   1123    PLAYBACK_PROFILER_MARKER(markerString);
   1124  }
   1125  LOG_DS(LogLevel::Verbose, "time is now %" PRId64,
   1126         currentTime.ToMicroseconds());
   1127 
   1128  // Remove audio samples that have been played by MTG from the queue.
   1129  RefPtr<AudioData> a = mAudioQueue.PeekFront();
   1130  for (; a && a->GetEndTime() <= currentTime;) {
   1131    LOG_DS(LogLevel::Debug, "Dropping audio [%" PRId64 ",%" PRId64 "]",
   1132           a->mTime.ToMicroseconds(), a->GetEndTime().ToMicroseconds());
   1133    RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
   1134    a = mAudioQueue.PeekFront();
   1135  }
   1136 }
   1137 
   1138 void DecodedStream::PlayingChanged() {
   1139  AssertOwnerThread();
   1140  TRACE("DecodedStream::PlayingChanged");
   1141 
   1142  if (!mPlaying) {
   1143    // On seek or pause we discard future frames.
   1144    ResetVideo(mPrincipalHandle);
   1145    ResetAudio();
   1146  }
   1147 }
   1148 
   1149 void DecodedStream::ConnectListener() {
   1150  AssertOwnerThread();
   1151 
   1152  mAudioPushListener = mAudioQueue.PushEvent().Connect(
   1153      mOwnerThread, this, &DecodedStream::SendData);
   1154  mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
   1155      mOwnerThread, this, &DecodedStream::SendData);
   1156  mVideoPushListener = mVideoQueue.PushEvent().Connect(
   1157      mOwnerThread, this, &DecodedStream::SendData);
   1158  mVideoFinishListener = mVideoQueue.FinishEvent().Connect(
   1159      mOwnerThread, this, &DecodedStream::SendData);
   1160  mWatchManager.Watch(mPlaying, &DecodedStream::SendData);
   1161 }
   1162 
   1163 void DecodedStream::DisconnectListener() {
   1164  AssertOwnerThread();
   1165 
   1166  mAudioPushListener.Disconnect();
   1167  mVideoPushListener.Disconnect();
   1168  mAudioFinishListener.Disconnect();
   1169  mVideoFinishListener.Disconnect();
   1170  mWatchManager.Unwatch(mPlaying, &DecodedStream::SendData);
   1171 }
   1172 
   1173 void DecodedStream::GetDebugInfo(dom::MediaSinkDebugInfo& aInfo) {
   1174  AssertOwnerThread();
   1175  int64_t startTime = mStartTime.isSome() ? mStartTime->ToMicroseconds() : -1;
   1176  aInfo.mDecodedStream.mInstance =
   1177      NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
   1178  aInfo.mDecodedStream.mStartTime = startTime;
   1179  aInfo.mDecodedStream.mLastOutputTime = mLastOutputTime.ToMicroseconds();
   1180  aInfo.mDecodedStream.mPlaying = mPlaying.Ref();
   1181  auto lastAudio = mAudioQueue.PeekBack();
   1182  aInfo.mDecodedStream.mLastAudio =
   1183      lastAudio ? lastAudio->GetEndTime().ToMicroseconds() : -1;
   1184  aInfo.mDecodedStream.mAudioQueueFinished = mAudioQueue.IsFinished();
   1185  aInfo.mDecodedStream.mAudioQueueSize =
   1186      AssertedCast<int>(mAudioQueue.GetSize());
   1187  if (mData) {
   1188    mData->GetDebugInfo(aInfo.mDecodedStream.mData);
   1189  }
   1190 }
   1191 
   1192 #undef LOG_DS
   1193 
   1194 }  // namespace mozilla