tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MediaDecoderStateMachine.cpp (176874B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "MediaDecoderStateMachine.h"
      8 
      9 #include <stdint.h>
     10 
     11 #include <algorithm>
     12 #include <utility>
     13 
     14 #include "AudioSegment.h"
     15 #include "DOMMediaStream.h"
     16 #include "ImageContainer.h"
     17 #include "MediaDecoder.h"
     18 #include "MediaShutdownManager.h"
     19 #include "MediaTimer.h"
     20 #include "MediaTrackGraph.h"
     21 #include "PerformanceRecorder.h"
     22 #include "ReaderProxy.h"
     23 #include "TimeUnits.h"
     24 #include "VideoSegment.h"
     25 #include "VideoUtils.h"
     26 #include "mediasink/AudioSink.h"
     27 #include "mediasink/AudioSinkWrapper.h"
     28 #include "mediasink/DecodedStream.h"
     29 #include "mediasink/VideoSink.h"
     30 #include "mozilla/Logging.h"
     31 #include "mozilla/ProfilerLabels.h"
     32 #include "mozilla/ProfilerMarkerTypes.h"
     33 #include "mozilla/ProfilerMarkers.h"
     34 #include "mozilla/SharedThreadPool.h"
     35 #include "mozilla/Sprintf.h"
     36 #include "mozilla/StaticPrefs_media.h"
     37 #include "mozilla/TaskQueue.h"
     38 #include "nsIMemoryReporter.h"
     39 #include "nsPrintfCString.h"
     40 #include "nsTArray.h"
     41 
     42 namespace mozilla {
     43 
     44 using namespace mozilla::media;
     45 
     46 #define NS_DispatchToMainThread(...) \
     47  CompileError_UseAbstractThreadDispatchInstead
     48 
     49 // avoid redefined macro in unified build
     50 #undef FMT
     51 #undef LOG
     52 #undef LOGV
     53 #undef LOGW
     54 #undef LOGE
     55 #undef SFMT
     56 #undef SLOG
     57 #undef SLOGW
     58 #undef SLOGE
     59 
     60 #define FMT(x, ...) "Decoder=%p " x, mDecoderID, ##__VA_ARGS__
     61 #define LOG(x, ...)                                                         \
     62  DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, "Decoder=%p " x, mDecoderID, \
     63            ##__VA_ARGS__)
     64 #define LOGV(x, ...)                                                          \
     65  DDMOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, "Decoder=%p " x, mDecoderID, \
     66            ##__VA_ARGS__)
     67 #define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get())
     68 #define LOGE(x, ...)                                                   \
     69  NS_DebugBreak(NS_DEBUG_WARNING,                                      \
     70                nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, \
     71                __FILE__, __LINE__)
     72 
     73 // Used by StateObject and its sub-classes
     74 #define SFMT(x, ...)                                                     \
     75  "Decoder=%p state=%s " x, mMaster->mDecoderID, ToStateStr(GetState()), \
     76      ##__VA_ARGS__
     77 #define SLOG(x, ...)                                                     \
     78  DDMOZ_LOGEX(mMaster, gMediaDecoderLog, LogLevel::Debug, "state=%s " x, \
     79              ToStateStr(GetState()), ##__VA_ARGS__)
     80 #define SLOGW(x, ...) NS_WARNING(nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get())
     81 #define SLOGE(x, ...)                                                   \
     82  NS_DebugBreak(NS_DEBUG_WARNING,                                       \
     83                nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get(), nullptr, \
     84                __FILE__, __LINE__)
     85 
     86 // Certain constants get stored as member variables and then adjusted by various
     87 // scale factors on a per-decoder basis. We want to make sure to avoid using
     88 // these constants directly, so we put them in a namespace.
     89 namespace detail {
     90 
     91 // Resume a suspended video decoder to the current playback position plus this
     92 // time premium for compensating the seeking delay.
     93 static constexpr auto RESUME_VIDEO_PREMIUM = TimeUnit::FromMicroseconds(125000);
     94 
     95 static const int64_t AMPLE_AUDIO_USECS = 2000000;
     96 
     97 // If more than this much decoded audio is queued, we'll hold off
     98 // decoding more audio.
     99 static constexpr auto AMPLE_AUDIO_THRESHOLD =
    100    TimeUnit::FromMicroseconds(AMPLE_AUDIO_USECS);
    101 
    102 }  // namespace detail
    103 
    104 // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
    105 // we're not "prerolling video", we'll skip the video up to the next keyframe
    106 // which is at or after the current playback position.
    107 static const uint32_t LOW_VIDEO_FRAMES = 2;
    108 
    109 // Arbitrary "frame duration" when playing only audio.
    110 static const uint32_t AUDIO_DURATION_USECS = 40000;
    111 
    112 namespace detail {
    113 
    114 // If we have less than this much buffered data available, we'll consider
    115 // ourselves to be running low on buffered data. We determine how much
    116 // buffered data we have remaining using the reader's GetBuffered()
    117 // implementation.
    118 static const int64_t LOW_BUFFER_THRESHOLD_USECS = 5000000;
    119 
    120 static constexpr auto LOW_BUFFER_THRESHOLD =
    121    TimeUnit::FromMicroseconds(LOW_BUFFER_THRESHOLD_USECS);
    122 
    123 // LOW_BUFFER_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS,
    124 // otherwise the skip-to-keyframe logic can activate when we're running low on
    125 // data.
    126 static_assert(LOW_BUFFER_THRESHOLD_USECS > AMPLE_AUDIO_USECS,
    127              "LOW_BUFFER_THRESHOLD_USECS is too small");
    128 
    129 }  // namespace detail
    130 
    131 // Amount of excess data to add in to the "should we buffer" calculation.
    132 static constexpr auto EXHAUSTED_DATA_MARGIN =
    133    TimeUnit::FromMicroseconds(100000);
    134 
    135 static const uint32_t MIN_VIDEO_QUEUE_SIZE = 3;
    136 
    137 template <typename Type, typename Function>
    138 static void DiscardFramesFromTail(MediaQueue<Type>& aQueue,
    139                                  const Function&& aTest) {
    140  while (aQueue.GetSize()) {
    141    if (aTest(aQueue.PeekBack()->mTime.ToMicroseconds())) {
    142      RefPtr<Type> releaseMe = aQueue.PopBack();
    143      continue;
    144    }
    145    break;
    146  }
    147 }
    148 
    149 // Delay, in milliseconds, that tabs needs to be in background before video
    150 // decoding is suspended.
    151 static TimeDuration SuspendBackgroundVideoDelay() {
    152  return TimeDuration::FromMilliseconds(
    153      StaticPrefs::media_suspend_background_video_delay_ms());
    154 }
    155 
    156 class MediaDecoderStateMachine::StateObject {
    157 public:
    158  virtual ~StateObject() = default;
    159  virtual void Exit() {}  // Exit action.
    160  virtual void Step() {}  // Perform a 'cycle' of this state object.
    161  virtual State GetState() const = 0;
    162 
    163  // Event handlers for various events.
    164  virtual void HandleAudioCaptured() {}
    165  virtual void HandleAudioDecoded(AudioData* aAudio) {
    166    Crash("Unexpected event!", __func__);
    167  }
    168  virtual void HandleVideoDecoded(VideoData* aVideo) {
    169    Crash("Unexpected event!", __func__);
    170  }
    171  virtual void HandleAudioWaited(MediaData::Type aType) {
    172    Crash("Unexpected event!", __func__);
    173  }
    174  virtual void HandleVideoWaited(MediaData::Type aType) {
    175    Crash("Unexpected event!", __func__);
    176  }
    177  virtual void HandleWaitingForAudio() { Crash("Unexpected event!", __func__); }
    178  virtual void HandleAudioCanceled() { Crash("Unexpected event!", __func__); }
    179  virtual void HandleEndOfAudio() { Crash("Unexpected event!", __func__); }
    180  virtual void HandleWaitingForVideo() { Crash("Unexpected event!", __func__); }
    181  virtual void HandleVideoCanceled() { Crash("Unexpected event!", __func__); }
    182  virtual void HandleEndOfVideo() { Crash("Unexpected event!", __func__); }
    183 
    184  virtual RefPtr<MediaDecoder::SeekPromise> HandleSeek(
    185      const SeekTarget& aTarget);
    186 
    187  virtual RefPtr<ShutdownPromise> HandleShutdown();
    188 
    189  virtual void HandleVideoSuspendTimeout() = 0;
    190 
    191  virtual void HandleResumeVideoDecoding(const TimeUnit& aTarget);
    192 
    193  virtual void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) {}
    194 
    195  virtual void GetDebugInfo(
    196      dom::MediaDecoderStateMachineDecodingStateDebugInfo& aInfo) {}
    197 
    198  virtual void HandleLoopingChanged() {}
    199 
    200 private:
    201  template <class S, typename R, typename... As>
    202  auto ReturnTypeHelper(R (S::*)(As...)) -> R;
    203 
    204  void Crash(const char* aReason, const char* aSite) {
    205    char buf[1024];
    206    SprintfLiteral(buf, "%s state=%s callsite=%s", aReason,
    207                   ToStateStr(GetState()), aSite);
    208    MOZ_ReportAssertionFailure(buf, __FILE__, __LINE__);
    209    MOZ_CRASH();
    210  }
    211 
    212 protected:
    213  enum class EventVisibility : int8_t { Observable, Suppressed };
    214 
    215  using Master = MediaDecoderStateMachine;
    216  explicit StateObject(Master* aPtr) : mMaster(aPtr) {}
    217  TaskQueue* OwnerThread() const { return mMaster->mTaskQueue; }
    218  ReaderProxy* Reader() const { return mMaster->mReader; }
    219  const MediaInfo& Info() const { return mMaster->Info(); }
    220  MediaQueue<AudioData>& AudioQueue() const { return mMaster->mAudioQueue; }
    221  MediaQueue<VideoData>& VideoQueue() const { return mMaster->mVideoQueue; }
    222 
    223  template <class S, typename... Args, size_t... Indexes>
    224  auto CallEnterMemberFunction(S* aS, std::tuple<Args...>& aTuple,
    225                               std::index_sequence<Indexes...>)
    226      -> decltype(ReturnTypeHelper(&S::Enter)) {
    227    AUTO_PROFILER_LABEL("StateObject::CallEnterMemberFunction", MEDIA_PLAYBACK);
    228    return aS->Enter(std::move(std::get<Indexes>(aTuple))...);
    229  }
    230 
    231  // Note this function will delete the current state object.
    232  // Don't access members to avoid UAF after this call.
    233  template <class S, typename... Ts>
    234  auto SetState(Ts&&... aArgs) -> decltype(ReturnTypeHelper(&S::Enter)) {
    235    // |aArgs| must be passed by reference to avoid passing MOZ_NON_PARAM class
    236    // SeekJob by value.  See bug 1287006 and bug 1338374.  But we still *must*
    237    // copy the parameters, because |Exit()| can modify them.  See bug 1312321.
    238    // So we 1) pass the parameters by reference, but then 2) immediately copy
    239    // them into a Tuple to be safe against modification, and finally 3) move
    240    // the elements of the Tuple into the final function call.
    241    auto copiedArgs = std::make_tuple(std::forward<Ts>(aArgs)...);
    242 
    243    // Copy mMaster which will reset to null.
    244    auto* master = mMaster;
    245 
    246    auto* s = new S(master);
    247 
    248    // It's possible to seek again during seeking, otherwise the new state
    249    // should always be different from the original one.
    250    MOZ_ASSERT(GetState() != s->GetState() ||
    251               GetState() == DECODER_STATE_SEEKING_ACCURATE ||
    252               GetState() == DECODER_STATE_SEEKING_FROMDORMANT ||
    253               GetState() == DECODER_STATE_SEEKING_NEXTFRAMESEEKING ||
    254               GetState() == DECODER_STATE_SEEKING_VIDEOONLY);
    255 
    256    SLOG("change state to: %s", ToStateStr(s->GetState()));
    257    PROFILER_MARKER_TEXT("MDSM::StateChange", MEDIA_PLAYBACK, {},
    258                         nsPrintfCString("%s", ToStateStr(s->GetState())));
    259 
    260    Exit();
    261 
    262    // Delete the old state asynchronously to avoid UAF if the caller tries to
    263    // access its members after SetState() returns.
    264    master->OwnerThread()->DispatchDirectTask(
    265        NS_NewRunnableFunction("MDSM::StateObject::DeleteOldState",
    266                               [toDelete = std::move(master->mStateObj)]() {}));
    267    // Also reset mMaster to catch potentail UAF.
    268    mMaster = nullptr;
    269 
    270    master->mStateObj.reset(s);
    271    return CallEnterMemberFunction(s, copiedArgs,
    272                                   std::index_sequence_for<Ts...>{});
    273  }
    274 
    275  RefPtr<MediaDecoder::SeekPromise> SetSeekingState(
    276      SeekJob&& aSeekJob, EventVisibility aVisibility);
    277 
    278  void SetDecodingState();
    279 
    280  // Take a raw pointer in order not to change the life cycle of MDSM.
    281  // It is guaranteed to be valid by MDSM.
    282  Master* mMaster;
    283 };
    284 
    285 /**
    286 * Purpose: decode metadata like duration and dimensions of the media resource.
    287 *
    288 * Transition to other states when decoding metadata is done:
    289 *   SHUTDOWN if failing to decode metadata.
    290 *   DECODING_FIRSTFRAME otherwise.
    291 */
    292 class MediaDecoderStateMachine::DecodeMetadataState
    293    : public MediaDecoderStateMachine::StateObject {
    294 public:
    295  explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) {}
    296 
    297  void Enter() {
    298    MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
    299    MOZ_ASSERT(!mMetadataRequest.Exists());
    300    SLOG("Dispatching AsyncReadMetadata");
    301 
    302    // We disconnect mMetadataRequest in Exit() so it is fine to capture
    303    // a raw pointer here.
    304    Reader()
    305        ->ReadMetadata()
    306        ->Then(
    307            OwnerThread(), __func__,
    308            [this](MetadataHolder&& aMetadata) {
    309              OnMetadataRead(std::move(aMetadata));
    310            },
    311            [this](const MediaResult& aError) { OnMetadataNotRead(aError); })
    312        ->Track(mMetadataRequest);
    313  }
    314 
    315  void Exit() override { mMetadataRequest.DisconnectIfExists(); }
    316 
    317  State GetState() const override { return DECODER_STATE_DECODING_METADATA; }
    318 
    319  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
    320      const SeekTarget& aTarget) override {
    321    MOZ_DIAGNOSTIC_CRASH("Can't seek while decoding metadata.");
    322    return MediaDecoder::SeekPromise::CreateAndReject(true, __func__);
    323  }
    324 
    325  void HandleVideoSuspendTimeout() override {
    326    // Do nothing since no decoders are created yet.
    327  }
    328 
    329  void HandleResumeVideoDecoding(const TimeUnit&) override {
    330    // We never suspend video decoding in this state.
    331    MOZ_ASSERT(false, "Shouldn't have suspended video decoding.");
    332  }
    333 
    334 private:
    335  void OnMetadataRead(MetadataHolder&& aMetadata);
    336 
    337  void OnMetadataNotRead(const MediaResult& aError) {
    338    AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataNotRead",
    339                        MEDIA_PLAYBACK);
    340 
    341    mMetadataRequest.Complete();
    342    SLOGE("Decode metadata failed, shutting down decoder");
    343    mMaster->DecodeError(aError);
    344  }
    345 
    346  MozPromiseRequestHolder<MediaFormatReader::MetadataPromise> mMetadataRequest;
    347 };
    348 
    349 /**
    350 * Purpose: release decoder resources to save memory and hardware resources.
    351 *
    352 * Transition to:
    353 *   SEEKING if any seek request or play state changes to PLAYING.
    354 */
    355 class MediaDecoderStateMachine::DormantState
    356    : public MediaDecoderStateMachine::StateObject {
    357 public:
    358  explicit DormantState(Master* aPtr) : StateObject(aPtr) {}
    359 
    360  void Enter() {
    361    if (mMaster->IsPlaying()) {
    362      mMaster->StopPlayback();
    363    }
    364 
    365    // Calculate the position to seek to when exiting dormant.
    366    auto t = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock()
    367                                              : mMaster->GetMediaTime();
    368    mMaster->AdjustByLooping(t);
    369    mPendingSeek.mTarget.emplace(t, SeekTarget::Accurate);
    370    // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we
    371    // need to create the promise even it is not used at all.
    372    // The promise may be used when coming out of DormantState into
    373    // SeekingState.
    374    RefPtr<MediaDecoder::SeekPromise> x =
    375        mPendingSeek.mPromise.Ensure(__func__);
    376 
    377    // Reset the decoding state to ensure that any queued video frames are
    378    // released and don't consume video memory.
    379    mMaster->ResetDecode();
    380 
    381    // No need to call StopMediaSink() here.
    382    // We will do it during seeking when exiting dormant.
    383 
    384    // Ignore WAIT_FOR_DATA since we won't decode in dormant.
    385    mMaster->mAudioWaitRequest.DisconnectIfExists();
    386    mMaster->mVideoWaitRequest.DisconnectIfExists();
    387 
    388    MaybeReleaseResources();
    389  }
    390 
    391  void Exit() override {
    392    // mPendingSeek is either moved when exiting dormant or
    393    // should be rejected here before transition to SHUTDOWN.
    394    mPendingSeek.RejectIfExists(__func__);
    395  }
    396 
    397  State GetState() const override { return DECODER_STATE_DORMANT; }
    398 
    399  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
    400      const SeekTarget& aTarget) override;
    401 
    402  void HandleVideoSuspendTimeout() override {
    403    // Do nothing since we've released decoders in Enter().
    404  }
    405 
    406  void HandleResumeVideoDecoding(const TimeUnit&) override {
    407    // Do nothing since we won't resume decoding until exiting dormant.
    408  }
    409 
    410  void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override;
    411 
    412  void HandleAudioDecoded(AudioData*) override { MaybeReleaseResources(); }
    413  void HandleVideoDecoded(VideoData*) override { MaybeReleaseResources(); }
    414  void HandleWaitingForAudio() override { MaybeReleaseResources(); }
    415  void HandleWaitingForVideo() override { MaybeReleaseResources(); }
    416  void HandleAudioCanceled() override { MaybeReleaseResources(); }
    417  void HandleVideoCanceled() override { MaybeReleaseResources(); }
    418  void HandleEndOfAudio() override { MaybeReleaseResources(); }
    419  void HandleEndOfVideo() override { MaybeReleaseResources(); }
    420 
    421 private:
    422  void MaybeReleaseResources() {
    423    if (!mMaster->mAudioDataRequest.Exists() &&
    424        !mMaster->mVideoDataRequest.Exists()) {
    425      // Release decoders only when they are idle. Otherwise it might cause
    426      // decode error later when resetting decoders during seeking.
    427      mMaster->mReader->ReleaseResources();
    428    }
    429  }
    430 
    431  SeekJob mPendingSeek;
    432 };
    433 
    434 /**
    435 * Purpose: decode the 1st audio and video frames to fire the 'loadeddata'
    436 * event.
    437 *
    438 * Transition to:
    439 *   SHUTDOWN if any decode error.
    440 *   SEEKING if any seek request.
    441 *   DECODING/LOOPING_DECODING when the 'loadeddata' event is fired.
    442 */
    443 class MediaDecoderStateMachine::DecodingFirstFrameState
    444    : public MediaDecoderStateMachine::StateObject {
    445 public:
    446  explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) {}
    447 
    448  void Enter();
    449 
    450  void Exit() override {
    451    // mPendingSeek is either moved in MaybeFinishDecodeFirstFrame()
    452    // or should be rejected here before transition to SHUTDOWN.
    453    mPendingSeek.RejectIfExists(__func__);
    454  }
    455 
    456  State GetState() const override { return DECODER_STATE_DECODING_FIRSTFRAME; }
    457 
    458  void HandleAudioDecoded(AudioData* aAudio) override {
    459    mMaster->PushAudio(aAudio);
    460    MaybeFinishDecodeFirstFrame();
    461  }
    462 
    463  void HandleVideoDecoded(VideoData* aVideo) override {
    464    mMaster->PushVideo(aVideo);
    465    MaybeFinishDecodeFirstFrame();
    466  }
    467 
    468  void HandleWaitingForAudio() override {
    469    mMaster->WaitForData(MediaData::Type::AUDIO_DATA);
    470  }
    471 
    472  void HandleAudioCanceled() override { mMaster->RequestAudioData(); }
    473 
    474  void HandleEndOfAudio() override {
    475    AudioQueue().Finish();
    476    MaybeFinishDecodeFirstFrame();
    477  }
    478 
    479  void HandleWaitingForVideo() override {
    480    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
    481  }
    482 
    483  void HandleVideoCanceled() override {
    484    mMaster->RequestVideoData(media::TimeUnit());
    485  }
    486 
    487  void HandleEndOfVideo() override {
    488    VideoQueue().Finish();
    489    MaybeFinishDecodeFirstFrame();
    490  }
    491 
    492  void HandleAudioWaited(MediaData::Type aType) override {
    493    mMaster->RequestAudioData();
    494  }
    495 
    496  void HandleVideoWaited(MediaData::Type aType) override {
    497    mMaster->RequestVideoData(media::TimeUnit());
    498  }
    499 
    500  void HandleVideoSuspendTimeout() override {
    501    // Do nothing for we need to decode the 1st video frame to get the
    502    // dimensions.
    503  }
    504 
    505  void HandleResumeVideoDecoding(const TimeUnit&) override {
    506    // We never suspend video decoding in this state.
    507    MOZ_ASSERT(false, "Shouldn't have suspended video decoding.");
    508  }
    509 
    510  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
    511      const SeekTarget& aTarget) override {
    512    if (mMaster->mIsMSE) {
    513      return StateObject::HandleSeek(aTarget);
    514    }
    515    // Delay seek request until decoding first frames for non-MSE media.
    516    SLOG("Not Enough Data to seek at this stage, queuing seek");
    517    mPendingSeek.RejectIfExists(__func__);
    518    mPendingSeek.mTarget.emplace(aTarget);
    519    return mPendingSeek.mPromise.Ensure(__func__);
    520  }
    521 
    522 private:
    523  // Notify FirstFrameLoaded if having decoded first frames and
    524  // transition to SEEKING if there is any pending seek, or DECODING otherwise.
    525  void MaybeFinishDecodeFirstFrame();
    526 
    527  SeekJob mPendingSeek;
    528 };
    529 
    530 /**
    531 * Purpose: decode audio/video data for playback.
    532 *
    533 * Transition to:
    534 *   DORMANT if playback is paused for a while.
    535 *   SEEKING if any seek request.
    536 *   SHUTDOWN if any decode error.
    537 *   BUFFERING if playback can't continue due to lack of decoded data.
    538 *   COMPLETED when having decoded all audio/video data.
    539 *   LOOPING_DECODING when media start seamless looping
    540 */
    541 class MediaDecoderStateMachine::DecodingState
    542    : public MediaDecoderStateMachine::StateObject {
    543 public:
    544  explicit DecodingState(Master* aPtr)
    545      : StateObject(aPtr), mDormantTimer(OwnerThread()) {}
    546 
    547  void Enter();
    548 
    549  void Exit() override {
    550    if (!mDecodeStartTime.IsNull()) {
    551      TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime;
    552      SLOG("Exiting DECODING, decoded for %.3lfs", decodeDuration.ToSeconds());
    553    }
    554    mDormantTimer.Reset();
    555    mOnAudioPopped.DisconnectIfExists();
    556    mOnVideoPopped.DisconnectIfExists();
    557  }
    558 
    559  void Step() override;
    560 
    561  State GetState() const override { return DECODER_STATE_DECODING; }
    562 
    563  void HandleAudioDecoded(AudioData* aAudio) override {
    564    mMaster->PushAudio(aAudio);
    565    DispatchDecodeTasksIfNeeded();
    566    MaybeStopPrerolling();
    567  }
    568 
    569  void HandleVideoDecoded(VideoData* aVideo) override {
    570    // We only do this check when we're not looping, which can be known by
    571    // checking the queue's offset.
    572    const auto currentTime = mMaster->GetMediaTime();
    573    if (aVideo->GetEndTime() < currentTime &&
    574        VideoQueue().GetOffset() == media::TimeUnit::Zero()) {
    575      if (!mVideoFirstLateTime) {
    576        mVideoFirstLateTime = Some(TimeStamp::Now());
    577      }
    578      PROFILER_MARKER("Video falling behind", MEDIA_PLAYBACK, {},
    579                      VideoFallingBehindMarker, aVideo->mTime.ToMicroseconds(),
    580                      currentTime.ToMicroseconds());
    581      SLOG("video %" PRId64 " starts being late (current=%" PRId64 ")",
    582           aVideo->mTime.ToMicroseconds(), currentTime.ToMicroseconds());
    583    } else {
    584      mVideoFirstLateTime.reset();
    585    }
    586    mMaster->PushVideo(aVideo);
    587    DispatchDecodeTasksIfNeeded();
    588    MaybeStopPrerolling();
    589  }
    590 
    591  void HandleAudioCanceled() override { mMaster->RequestAudioData(); }
    592 
    593  void HandleVideoCanceled() override {
    594    mMaster->RequestVideoData(mMaster->GetMediaTime(),
    595                              ShouldRequestNextKeyFrame());
    596  }
    597 
    598  void HandleEndOfAudio() override;
    599  void HandleEndOfVideo() override;
    600 
    601  void HandleWaitingForAudio() override {
    602    mMaster->WaitForData(MediaData::Type::AUDIO_DATA);
    603    MaybeStopPrerolling();
    604  }
    605 
    606  void HandleWaitingForVideo() override {
    607    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
    608    MaybeStopPrerolling();
    609  }
    610 
    611  void HandleAudioWaited(MediaData::Type aType) override {
    612    mMaster->RequestAudioData();
    613  }
    614 
    615  void HandleVideoWaited(MediaData::Type aType) override {
    616    mMaster->RequestVideoData(mMaster->GetMediaTime(),
    617                              ShouldRequestNextKeyFrame());
    618  }
    619 
    620  void HandleAudioCaptured() override {
    621    MaybeStopPrerolling();
    622    // MediaSink is changed. Schedule Step() to check if we can start playback.
    623    mMaster->ScheduleStateMachine();
    624  }
    625 
    626  void HandleVideoSuspendTimeout() override {
    627    // No video, so nothing to suspend.
    628    if (!mMaster->HasVideo()) {
    629      return;
    630    }
    631 
    632    PROFILER_MARKER_UNTYPED("MDSM::EnterVideoSuspend", MEDIA_PLAYBACK);
    633    mMaster->mVideoDecodeSuspended = true;
    634    mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend);
    635    Reader()->SetVideoBlankDecode(true);
    636  }
    637 
    638  void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override {
    639    // Schedule Step() to check if we can start or stop playback.
    640    mMaster->ScheduleStateMachine();
    641    if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) {
    642      // Try to dispatch decoding tasks for mMinimizePreroll might be reset.
    643      DispatchDecodeTasksIfNeeded();
    644    }
    645 
    646    if (aPlayState == MediaDecoder::PLAY_STATE_PAUSED) {
    647      StartDormantTimer();
    648      mVideoFirstLateTime.reset();
    649    } else {
    650      mDormantTimer.Reset();
    651    }
    652  }
    653 
    654  void GetDebugInfo(
    655      dom::MediaDecoderStateMachineDecodingStateDebugInfo& aInfo) override {
    656    aInfo.mIsPrerolling = mIsPrerolling;
    657  }
    658 
    659  void HandleLoopingChanged() override { SetDecodingState(); }
    660 
    661 protected:
    662  virtual void EnsureAudioDecodeTaskQueued();
    663  virtual void EnsureVideoDecodeTaskQueued();
    664 
    665  virtual bool ShouldStopPrerolling() const {
    666    return mIsPrerolling &&
    667           (DonePrerollingAudio() ||
    668            IsWaitingData(MediaData::Type::AUDIO_DATA)) &&
    669           (DonePrerollingVideo() ||
    670            IsWaitingData(MediaData::Type::VIDEO_DATA));
    671  }
    672 
    673  virtual bool IsWaitingData(MediaData::Type aType) const {
    674    if (aType == MediaData::Type::AUDIO_DATA) {
    675      return mMaster->IsWaitingAudioData();
    676    }
    677    MOZ_ASSERT(aType == MediaData::Type::VIDEO_DATA);
    678    return mMaster->IsWaitingVideoData();
    679  }
    680 
    681  void MaybeStopPrerolling() {
    682    if (ShouldStopPrerolling()) {
    683      mIsPrerolling = false;
    684      // Check if we can start playback.
    685      mMaster->ScheduleStateMachine();
    686    }
    687  }
    688 
    689  bool ShouldRequestNextKeyFrame() const {
    690    if (!mVideoFirstLateTime) {
    691      return false;
    692    }
    693    const double elapsedTimeMs =
    694        (TimeStamp::Now() - *mVideoFirstLateTime).ToMilliseconds();
    695    const bool rv = elapsedTimeMs >=
    696                    StaticPrefs::media_decoder_skip_when_video_too_slow_ms();
    697    if (rv) {
    698      PROFILER_MARKER_UNTYPED("Skipping to next keyframe", MEDIA_PLAYBACK);
    699      SLOG(
    700          "video has been late behind media time for %f ms, should skip to "
    701          "next key frame",
    702          elapsedTimeMs);
    703    }
    704    return rv;
    705  }
    706 
    707  virtual bool IsBufferingAllowed() const { return true; }
    708 
    709 private:
    710  void DispatchDecodeTasksIfNeeded();
    711  void MaybeStartBuffering();
    712 
    713  // At the start of decoding we want to "preroll" the decode until we've
    714  // got a few frames decoded before we consider whether decode is falling
    715  // behind. Otherwise our "we're falling behind" logic will trigger
    716  // unnecessarily if we start playing as soon as the first sample is
    717  // decoded. These two fields store how many video frames and audio
    718  // samples we must consume before are considered to be finished prerolling.
    719  TimeUnit AudioPrerollThreshold() const {
    720    return (mMaster->mAmpleAudioThreshold / 2)
    721        .MultDouble(mMaster->mPlaybackRate);
    722  }
    723 
    724  uint32_t VideoPrerollFrames() const {
    725    uint32_t preroll = static_cast<uint32_t>(
    726        mMaster->GetAmpleVideoFrames() / 2. * mMaster->mPlaybackRate + 1);
    727    // Keep it under maximal queue size.
    728    mMaster->mReader->GetMaxVideoQueueSize().apply(
    729        [&preroll](const uint32_t& x) { preroll = std::min(preroll, x); });
    730    return preroll;
    731  }
    732 
    733  bool DonePrerollingAudio() const {
    734    return !mMaster->IsAudioDecoding() ||
    735           mMaster->GetDecodedAudioDuration() >= AudioPrerollThreshold();
    736  }
    737 
    738  bool DonePrerollingVideo() const {
    739    return !mMaster->IsVideoDecoding() ||
    740           static_cast<uint32_t>(mMaster->VideoQueue().GetSize()) >=
    741               VideoPrerollFrames();
    742  }
    743 
    744  void StartDormantTimer() {
    745    if (!mMaster->mMediaSeekable) {
    746      // Don't enter dormant if the media is not seekable because we need to
    747      // seek when exiting dormant.
    748      return;
    749    }
    750 
    751    auto timeout = StaticPrefs::media_dormant_on_pause_timeout_ms();
    752    if (timeout < 0) {
    753      // Disabled when timeout is negative.
    754      return;
    755    }
    756 
    757    if (timeout == 0) {
    758      // Enter dormant immediately without scheduling a timer.
    759      SetState<DormantState>();
    760      return;
    761    }
    762 
    763    if (mMaster->mMinimizePreroll) {
    764      SetState<DormantState>();
    765      return;
    766    }
    767 
    768    TimeStamp target =
    769        TimeStamp::Now() + TimeDuration::FromMilliseconds(timeout);
    770 
    771    mDormantTimer.Ensure(
    772        target,
    773        [this]() {
    774          AUTO_PROFILER_LABEL("DecodingState::StartDormantTimer:SetDormant",
    775                              MEDIA_PLAYBACK);
    776          mDormantTimer.CompleteRequest();
    777          SetState<DormantState>();
    778        },
    779        [this]() { mDormantTimer.CompleteRequest(); });
    780  }
    781 
    782  // Time at which we started decoding.
    783  TimeStamp mDecodeStartTime;
    784 
    785  // When we start decoding (either for the first time, or after a pause)
    786  // we may be low on decoded data. We don't want our "low data" logic to
    787  // kick in and decide that we're low on decoded data because the download
    788  // can't keep up with the decode, and cause us to pause playback. So we
    789  // have a "preroll" stage, where we ignore the results of our "low data"
    790  // logic during the first few frames of our decode. This occurs during
    791  // playback.
    792  bool mIsPrerolling = true;
    793 
    794  // Fired when playback is paused for a while to enter dormant.
    795  DelayedScheduler<TimeStamp> mDormantTimer;
    796 
    797  MediaEventListener mOnAudioPopped;
    798  MediaEventListener mOnVideoPopped;
    799 
    800  // If video has been later than the media time, this will records when the
    801  // video started being late. It will be reset once video catches up with the
    802  // media time.
    803  Maybe<TimeStamp> mVideoFirstLateTime;
    804 };
    805 
    806 /**
    807 * Purpose: decode audio data for playback when media is in seamless
    808 * looping, we will adjust media time to make samples time monotonically
    809 * increasing. All its methods runs on its owner thread (MDSM thread).
    810 *
    811 * Transition to:
    812 *   DORMANT if playback is paused for a while.
    813 *   SEEKING if any seek request.
    814 *   SHUTDOWN if any decode error.
    815 *   BUFFERING if playback can't continue due to lack of decoded data.
    816 *   COMPLETED when the media resource is closed and no data is available
    817 *             anymore.
    818 *   DECODING when media stops seamless looping.
    819 */
    820 class MediaDecoderStateMachine::LoopingDecodingState
    821    : public MediaDecoderStateMachine::DecodingState {
    822 public:
    823  explicit LoopingDecodingState(Master* aPtr)
    824      : DecodingState(aPtr),
    825        mIsReachingAudioEOS(!mMaster->IsAudioDecoding()),
    826        mIsReachingVideoEOS(!mMaster->IsVideoDecoding()),
    827        mAudioEndedBeforeEnteringStateWithoutDuration(false),
    828        mVideoEndedBeforeEnteringStateWithoutDuration(false) {
    829    MOZ_ASSERT(mMaster->mLooping);
    830    SLOG(
    831        "LoopingDecodingState ctor, mIsReachingAudioEOS=%d, "
    832        "mIsReachingVideoEOS=%d",
    833        mIsReachingAudioEOS, mIsReachingVideoEOS);
    834    // If the track has reached EOS and we already have its last data, then we
    835    // can know its duration. But if playback starts from EOS (due to seeking),
    836    // the decoded end time would be zero because none of data gets decoded yet.
    837    if (mIsReachingAudioEOS) {
    838      if (mMaster->HasLastDecodedData(MediaData::Type::AUDIO_DATA) &&
    839          !mMaster->mAudioTrackDecodedDuration) {
    840        mMaster->mAudioTrackDecodedDuration.emplace(
    841            mMaster->mDecodedAudioEndTime);
    842        SLOG("determine mAudioTrackDecodedDuration");
    843      } else {
    844        mAudioEndedBeforeEnteringStateWithoutDuration = true;
    845        SLOG("still don't know mAudioTrackDecodedDuration");
    846      }
    847    }
    848 
    849    if (mIsReachingVideoEOS) {
    850      if (mMaster->HasLastDecodedData(MediaData::Type::VIDEO_DATA) &&
    851          !mMaster->mVideoTrackDecodedDuration) {
    852        mMaster->mVideoTrackDecodedDuration.emplace(
    853            mMaster->mDecodedVideoEndTime);
    854        SLOG("determine mVideoTrackDecodedDuration");
    855      } else {
    856        mVideoEndedBeforeEnteringStateWithoutDuration = true;
    857        SLOG("still don't know mVideoTrackDecodedDuration");
    858      }
    859    }
    860 
    861    // We might be able to determine the duration already, let's check.
    862    if (mIsReachingAudioEOS || mIsReachingVideoEOS) {
    863      (void)DetermineOriginalDecodedDurationIfNeeded();
    864    }
    865 
    866    // If we've looped at least once before, then we need to update queue offset
    867    // correctly to make the media data time and the clock time consistent.
    868    // Otherwise, it would cause a/v desync.
    869    if (mMaster->mOriginalDecodedDuration != media::TimeUnit::Zero()) {
    870      if (mIsReachingAudioEOS && mMaster->HasAudio()) {
    871        AudioQueue().SetOffset(AudioQueue().GetOffset() +
    872                               mMaster->mOriginalDecodedDuration);
    873      }
    874      if (mIsReachingVideoEOS && mMaster->HasVideo()) {
    875        VideoQueue().SetOffset(VideoQueue().GetOffset() +
    876                               mMaster->mOriginalDecodedDuration);
    877      }
    878    }
    879  }
    880 
    881  void Enter() {
    882    if (mMaster->HasAudio() && mIsReachingAudioEOS) {
    883      SLOG("audio has ended, request the data again.");
    884      RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack);
    885    }
    886    if (mMaster->HasVideo() && mIsReachingVideoEOS) {
    887      SLOG("video has ended, request the data again.");
    888      RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack);
    889    }
    890    DecodingState::Enter();
    891  }
    892 
    893  void Exit() override {
    894    MOZ_DIAGNOSTIC_ASSERT(mMaster->OnTaskQueue());
    895    SLOG("Leaving looping state, offset [a=%" PRId64 ",v=%" PRId64
    896         "], endtime [a=%" PRId64 ",v=%" PRId64 "], track duration [a=%" PRId64
    897         ",v=%" PRId64 "], waiting=%s",
    898         AudioQueue().GetOffset().ToMicroseconds(),
    899         VideoQueue().GetOffset().ToMicroseconds(),
    900         mMaster->mDecodedAudioEndTime.ToMicroseconds(),
    901         mMaster->mDecodedVideoEndTime.ToMicroseconds(),
    902         mMaster->mAudioTrackDecodedDuration
    903             ? mMaster->mAudioTrackDecodedDuration->ToMicroseconds()
    904             : 0,
    905         mMaster->mVideoTrackDecodedDuration
    906             ? mMaster->mVideoTrackDecodedDuration->ToMicroseconds()
    907             : 0,
    908         mDataWaitingTimestampAdjustment
    909             ? MediaData::EnumValueToString(
    910                   mDataWaitingTimestampAdjustment->mType)
    911             : "none");
    912    if (ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA)) {
    913      DiscardLoopedData(MediaData::Type::AUDIO_DATA);
    914    }
    915    if (ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA)) {
    916      DiscardLoopedData(MediaData::Type::VIDEO_DATA);
    917    }
    918 
    919    if (mMaster->HasAudio() && HasDecodedLastAudioFrame()) {
    920      SLOG("Mark audio queue as finished");
    921      mMaster->mAudioDataRequest.DisconnectIfExists();
    922      mMaster->mAudioWaitRequest.DisconnectIfExists();
    923      AudioQueue().Finish();
    924    }
    925    if (mMaster->HasVideo() && HasDecodedLastVideoFrame()) {
    926      SLOG("Mark video queue as finished");
    927      mMaster->mVideoDataRequest.DisconnectIfExists();
    928      mMaster->mVideoWaitRequest.DisconnectIfExists();
    929      VideoQueue().Finish();
    930    }
    931 
    932    // Clear waiting data should be done after marking queue as finished.
    933    mDataWaitingTimestampAdjustment = nullptr;
    934 
    935    mAudioDataRequest.DisconnectIfExists();
    936    mVideoDataRequest.DisconnectIfExists();
    937    mAudioSeekRequest.DisconnectIfExists();
    938    mVideoSeekRequest.DisconnectIfExists();
    939    DecodingState::Exit();
    940  }
    941 
    942  ~LoopingDecodingState() {
    943    MOZ_DIAGNOSTIC_ASSERT(!mAudioDataRequest.Exists());
    944    MOZ_DIAGNOSTIC_ASSERT(!mVideoDataRequest.Exists());
    945    MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest.Exists());
    946    MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists());
    947  }
    948 
    949  State GetState() const override { return DECODER_STATE_LOOPING_DECODING; }
    950 
    951  void HandleAudioDecoded(AudioData* aAudio) override {
    952    // TODO : check if we need to update mOriginalDecodedDuration
    953 
    954    // After pushing data to the queue, timestamp might be adjusted.
    955    DecodingState::HandleAudioDecoded(aAudio);
    956    mMaster->mDecodedAudioEndTime =
    957        std::max(aAudio->GetEndTime(), mMaster->mDecodedAudioEndTime);
    958    SLOG("audio sample after time-adjustment [%" PRId64 ",%" PRId64 "]",
    959         aAudio->mTime.ToMicroseconds(), aAudio->GetEndTime().ToMicroseconds());
    960  }
    961 
    962  void HandleVideoDecoded(VideoData* aVideo) override {
    963    // TODO : check if we need to update mOriginalDecodedDuration
    964 
    965    // Here sample still keeps its original timestamp.
    966 
    967    // This indicates there is a shorter audio track, and it's the first time in
    968    // the looping (audio ends but video is playing) so that we haven't been
    969    // able to determine the decoded duration. Therefore, we fill the gap
    970    // between two tracks before video ends. Afterward, this adjustment will be
    971    // done in `HandleEndOfAudio()`.
    972    if (mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero() &&
    973        mMaster->mAudioTrackDecodedDuration &&
    974        aVideo->GetEndTime() > *mMaster->mAudioTrackDecodedDuration) {
    975      media::TimeUnit gap;
    976      // First time we fill gap between the video frame to the last audio.
    977      if (auto prevVideo = VideoQueue().PeekBack();
    978          prevVideo &&
    979          prevVideo->GetEndTime() < *mMaster->mAudioTrackDecodedDuration) {
    980        gap =
    981            aVideo->GetEndTime().ToBase(*mMaster->mAudioTrackDecodedDuration) -
    982            *mMaster->mAudioTrackDecodedDuration;
    983      }
    984      // Then fill the gap for all following videos.
    985      else {
    986        gap = aVideo->mDuration.ToBase(*mMaster->mAudioTrackDecodedDuration);
    987      }
    988      SLOG("Longer video %" PRId64 "%s (audio-durtaion=%" PRId64
    989           "%s), insert silence to fill the gap %" PRId64 "%s",
    990           aVideo->GetEndTime().ToMicroseconds(),
    991           aVideo->GetEndTime().ToString().get(),
    992           mMaster->mAudioTrackDecodedDuration->ToMicroseconds(),
    993           mMaster->mAudioTrackDecodedDuration->ToString().get(),
    994           gap.ToMicroseconds(), gap.ToString().get());
    995      PushFakeAudioDataIfNeeded(gap);
    996    }
    997 
    998    // After pushing data to the queue, timestamp might be adjusted.
    999    DecodingState::HandleVideoDecoded(aVideo);
   1000    mMaster->mDecodedVideoEndTime =
   1001        std::max(aVideo->GetEndTime(), mMaster->mDecodedVideoEndTime);
   1002    SLOG("video sample after time-adjustment [%" PRId64 ",%" PRId64 "]",
   1003         aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds());
   1004  }
   1005 
   1006  void HandleEndOfAudio() override {
   1007    mIsReachingAudioEOS = true;
   1008    if (!mMaster->mAudioTrackDecodedDuration &&
   1009        mMaster->HasLastDecodedData(MediaData::Type::AUDIO_DATA)) {
   1010      mMaster->mAudioTrackDecodedDuration.emplace(
   1011          mMaster->mDecodedAudioEndTime);
   1012    }
   1013    if (DetermineOriginalDecodedDurationIfNeeded()) {
   1014      AudioQueue().SetOffset(AudioQueue().GetOffset() +
   1015                             mMaster->mOriginalDecodedDuration);
   1016    }
   1017 
   1018    // This indicates that the audio track is shorter than the video track, so
   1019    // we need to add some silence to fill the gap.
   1020    if (mMaster->mAudioTrackDecodedDuration &&
   1021        mMaster->mOriginalDecodedDuration >
   1022            *mMaster->mAudioTrackDecodedDuration) {
   1023      MOZ_ASSERT(mMaster->HasVideo());
   1024      MOZ_ASSERT(mMaster->mVideoTrackDecodedDuration);
   1025      MOZ_ASSERT(mMaster->mOriginalDecodedDuration ==
   1026                 *mMaster->mVideoTrackDecodedDuration);
   1027      auto gap = mMaster->mOriginalDecodedDuration.ToBase(
   1028                     *mMaster->mAudioTrackDecodedDuration) -
   1029                 *mMaster->mAudioTrackDecodedDuration;
   1030      SLOG(
   1031          "Audio track is shorter than the original decoded duration "
   1032          "(a=%" PRId64 "%s, t=%" PRId64
   1033          "%s), insert silence to fill the gap %" PRId64 "%s",
   1034          mMaster->mAudioTrackDecodedDuration->ToMicroseconds(),
   1035          mMaster->mAudioTrackDecodedDuration->ToString().get(),
   1036          mMaster->mOriginalDecodedDuration.ToMicroseconds(),
   1037          mMaster->mOriginalDecodedDuration.ToString().get(),
   1038          gap.ToMicroseconds(), gap.ToString().get());
   1039      PushFakeAudioDataIfNeeded(gap);
   1040    }
   1041 
   1042    SLOG(
   1043        "received audio EOS when seamless looping, starts seeking, "
   1044        "audioLoopingOffset=[%" PRId64 "], mAudioTrackDecodedDuration=[%" PRId64
   1045        "]",
   1046        AudioQueue().GetOffset().ToMicroseconds(),
   1047        mMaster->mAudioTrackDecodedDuration->ToMicroseconds());
   1048    if (!IsRequestingDataFromStartPosition(MediaData::Type::AUDIO_DATA)) {
   1049      RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack);
   1050    }
   1051    ProcessSamplesWaitingAdjustmentIfAny();
   1052  }
   1053 
   1054  void HandleEndOfVideo() override {
   1055    mIsReachingVideoEOS = true;
   1056    if (!mMaster->mVideoTrackDecodedDuration &&
   1057        mMaster->HasLastDecodedData(MediaData::Type::VIDEO_DATA)) {
   1058      mMaster->mVideoTrackDecodedDuration.emplace(
   1059          mMaster->mDecodedVideoEndTime);
   1060    }
   1061    if (DetermineOriginalDecodedDurationIfNeeded()) {
   1062      VideoQueue().SetOffset(VideoQueue().GetOffset() +
   1063                             mMaster->mOriginalDecodedDuration);
   1064    }
   1065 
   1066    SLOG(
   1067        "received video EOS when seamless looping, starts seeking, "
   1068        "videoLoopingOffset=[%" PRId64 "], mVideoTrackDecodedDuration=[%" PRId64
   1069        "]",
   1070        VideoQueue().GetOffset().ToMicroseconds(),
   1071        mMaster->mVideoTrackDecodedDuration->ToMicroseconds());
   1072    if (!IsRequestingDataFromStartPosition(MediaData::Type::VIDEO_DATA)) {
   1073      RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack);
   1074    }
   1075    ProcessSamplesWaitingAdjustmentIfAny();
   1076  }
   1077 
   1078 private:
   1079  void RequestDataFromStartPosition(TrackInfo::TrackType aType) {
   1080    MOZ_DIAGNOSTIC_ASSERT(aType == TrackInfo::TrackType::kAudioTrack ||
   1081                          aType == TrackInfo::TrackType::kVideoTrack);
   1082 
   1083    const bool isAudio = aType == TrackInfo::TrackType::kAudioTrack;
   1084    MOZ_ASSERT_IF(isAudio, mMaster->HasAudio());
   1085    MOZ_ASSERT_IF(!isAudio, mMaster->HasVideo());
   1086 
   1087    if (IsReaderSeeking()) {
   1088      MOZ_ASSERT(!mPendingSeekingType);
   1089      mPendingSeekingType = Some(aType);
   1090      SLOG("Delay %s seeking until the reader finishes current seeking",
   1091           isAudio ? "audio" : "video");
   1092      return;
   1093    }
   1094 
   1095    auto& seekRequest = isAudio ? mAudioSeekRequest : mVideoSeekRequest;
   1096    Reader()->ResetDecode(aType);
   1097    Reader()
   1098        ->Seek(SeekTarget(media::TimeUnit::Zero(), SeekTarget::Type::Accurate,
   1099                          isAudio ? SeekTarget::Track::AudioOnly
   1100                                  : SeekTarget::Track::VideoOnly))
   1101        ->Then(
   1102            OwnerThread(), __func__,
   1103            [this, isAudio, master = RefPtr{mMaster}]() mutable -> void {
   1104              AUTO_PROFILER_LABEL(
   1105                  nsPrintfCString(
   1106                      "LoopingDecodingState::RequestDataFromStartPosition(%s)::"
   1107                      "SeekResolved",
   1108                      isAudio ? "audio" : "video")
   1109                      .get(),
   1110                  MEDIA_PLAYBACK);
   1111              if (auto& state = master->mStateObj;
   1112                  state &&
   1113                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
   1114                MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
   1115                return;
   1116              }
   1117              if (isAudio) {
   1118                mAudioSeekRequest.Complete();
   1119              } else {
   1120                mVideoSeekRequest.Complete();
   1121              }
   1122              SLOG(
   1123                  "seeking completed, start to request first %s sample "
   1124                  "(queued=%zu, decoder-queued=%zu)",
   1125                  isAudio ? "audio" : "video",
   1126                  isAudio ? AudioQueue().GetSize() : VideoQueue().GetSize(),
   1127                  isAudio ? Reader()->SizeOfAudioQueueInFrames()
   1128                          : Reader()->SizeOfVideoQueueInFrames());
   1129              if (isAudio) {
   1130                RequestAudioDataFromReaderAfterEOS();
   1131              } else {
   1132                RequestVideoDataFromReaderAfterEOS();
   1133              }
   1134              if (mPendingSeekingType) {
   1135                auto seekingType = *mPendingSeekingType;
   1136                mPendingSeekingType.reset();
   1137                SLOG("Perform pending %s seeking", TrackTypeToStr(seekingType));
   1138                RequestDataFromStartPosition(seekingType);
   1139              }
   1140            },
   1141            [this, isAudio, master = RefPtr{mMaster}](
   1142                const SeekRejectValue& aReject) mutable -> void {
   1143              AUTO_PROFILER_LABEL(
   1144                  nsPrintfCString("LoopingDecodingState::"
   1145                                  "RequestDataFromStartPosition(%s)::"
   1146                                  "SeekRejected",
   1147                                  isAudio ? "audio" : "video")
   1148                      .get(),
   1149                  MEDIA_PLAYBACK);
   1150              if (auto& state = master->mStateObj;
   1151                  state &&
   1152                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
   1153                MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
   1154                return;
   1155              }
   1156              if (isAudio) {
   1157                mAudioSeekRequest.Complete();
   1158              } else {
   1159                mVideoSeekRequest.Complete();
   1160              }
   1161              HandleError(aReject.mError, isAudio);
   1162            })
   1163        ->Track(seekRequest);
   1164  }
   1165 
   1166  void RequestAudioDataFromReaderAfterEOS() {
   1167    MOZ_ASSERT(mMaster->HasAudio());
   1168    Reader()
   1169        ->RequestAudioData()
   1170        ->Then(
   1171            OwnerThread(), __func__,
   1172            [this, master = RefPtr{mMaster}](const RefPtr<AudioData>& aAudio) {
   1173              AUTO_PROFILER_LABEL(
   1174                  "LoopingDecodingState::"
   1175                  "RequestAudioDataFromReader::"
   1176                  "RequestDataResolved",
   1177                  MEDIA_PLAYBACK);
   1178              if (auto& state = master->mStateObj;
   1179                  state &&
   1180                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
   1181                MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
   1182                return;
   1183              }
   1184              mIsReachingAudioEOS = false;
   1185              mAudioDataRequest.Complete();
   1186              SLOG(
   1187                  "got audio decoded sample "
   1188                  "[%" PRId64 ",%" PRId64 "]",
   1189                  aAudio->mTime.ToMicroseconds(),
   1190                  aAudio->GetEndTime().ToMicroseconds());
   1191              if (ShouldPutDataOnWaiting(MediaData::Type::AUDIO_DATA)) {
   1192                SLOG(
   1193                    "decoded audio sample needs to wait for timestamp "
   1194                    "adjustment after EOS");
   1195                PutDataOnWaiting(aAudio);
   1196                return;
   1197              }
   1198              HandleAudioDecoded(aAudio);
   1199              ProcessSamplesWaitingAdjustmentIfAny();
   1200            },
   1201            [this, master = RefPtr{mMaster}](const MediaResult& aError) {
   1202              AUTO_PROFILER_LABEL(
   1203                  "LoopingDecodingState::"
   1204                  "RequestAudioDataFromReader::"
   1205                  "RequestDataRejected",
   1206                  MEDIA_PLAYBACK);
   1207              if (auto& state = master->mStateObj;
   1208                  state &&
   1209                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
   1210                MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
   1211                return;
   1212              }
   1213              mAudioDataRequest.Complete();
   1214              HandleError(aError, true /* isAudio */);
   1215            })
   1216        ->Track(mAudioDataRequest);
   1217  }
   1218 
   1219  void RequestVideoDataFromReaderAfterEOS() {
   1220    MOZ_ASSERT(mMaster->HasVideo());
   1221    Reader()
   1222        ->RequestVideoData(media::TimeUnit(),
   1223                           false /* aRequestNextVideoKeyFrame */)
   1224        ->Then(
   1225            OwnerThread(), __func__,
   1226            [this, master = RefPtr{mMaster}](const RefPtr<VideoData>& aVideo) {
   1227              AUTO_PROFILER_LABEL(
   1228                  "LoopingDecodingState::"
   1229                  "RequestVideoDataFromReaderAfterEOS()::"
   1230                  "RequestDataResolved",
   1231                  MEDIA_PLAYBACK);
   1232              if (auto& state = master->mStateObj;
   1233                  state &&
   1234                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
   1235                MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
   1236                return;
   1237              }
   1238              mIsReachingVideoEOS = false;
   1239              mVideoDataRequest.Complete();
   1240              SLOG(
   1241                  "got video decoded sample "
   1242                  "[%" PRId64 ",%" PRId64 "]",
   1243                  aVideo->mTime.ToMicroseconds(),
   1244                  aVideo->GetEndTime().ToMicroseconds());
   1245              if (ShouldPutDataOnWaiting(MediaData::Type::VIDEO_DATA)) {
   1246                SLOG(
   1247                    "decoded video sample needs to wait for timestamp "
   1248                    "adjustment after EOS");
   1249                PutDataOnWaiting(aVideo);
   1250                return;
   1251              }
   1252              mMaster->mBypassingSkipToNextKeyFrameCheck = true;
   1253              HandleVideoDecoded(aVideo);
   1254              ProcessSamplesWaitingAdjustmentIfAny();
   1255            },
   1256            [this, master = RefPtr{mMaster}](const MediaResult& aError) {
   1257              AUTO_PROFILER_LABEL(
   1258                  "LoopingDecodingState::"
   1259                  "RequestVideoDataFromReaderAfterEOS()::"
   1260                  "RequestDataRejected",
   1261                  MEDIA_PLAYBACK);
   1262              if (auto& state = master->mStateObj;
   1263                  state &&
   1264                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
   1265                MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
   1266                return;
   1267              }
   1268              mVideoDataRequest.Complete();
   1269              HandleError(aError, false /* isAudio */);
   1270            })
   1271        ->Track(mVideoDataRequest);
   1272  }
   1273 
   1274  void HandleError(const MediaResult& aError, bool aIsAudio);
   1275 
   1276  bool ShouldRequestData(MediaData::Type aType) const {
   1277    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
   1278                          aType == MediaData::Type::VIDEO_DATA);
   1279 
   1280    if (aType == MediaData::Type::AUDIO_DATA &&
   1281        (mAudioSeekRequest.Exists() || mAudioDataRequest.Exists() ||
   1282         IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA) ||
   1283         mMaster->IsWaitingAudioData())) {
   1284      return false;
   1285    }
   1286    if (aType == MediaData::Type::VIDEO_DATA &&
   1287        (mVideoSeekRequest.Exists() || mVideoDataRequest.Exists() ||
   1288         IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA) ||
   1289         mMaster->IsWaitingVideoData())) {
   1290      return false;
   1291    }
   1292    return true;
   1293  }
   1294 
   1295  void HandleAudioCanceled() override {
   1296    if (ShouldRequestData(MediaData::Type::AUDIO_DATA)) {
   1297      mMaster->RequestAudioData();
   1298    }
   1299  }
   1300 
   1301  void HandleAudioWaited(MediaData::Type aType) override {
   1302    if (ShouldRequestData(MediaData::Type::AUDIO_DATA)) {
   1303      mMaster->RequestAudioData();
   1304    }
   1305  }
   1306 
   1307  void HandleVideoCanceled() override {
   1308    if (ShouldRequestData(MediaData::Type::VIDEO_DATA)) {
   1309      mMaster->RequestVideoData(mMaster->GetMediaTime(),
   1310                                ShouldRequestNextKeyFrame());
   1311    };
   1312  }
   1313 
   1314  void HandleVideoWaited(MediaData::Type aType) override {
   1315    if (ShouldRequestData(MediaData::Type::VIDEO_DATA)) {
   1316      mMaster->RequestVideoData(mMaster->GetMediaTime(),
   1317                                ShouldRequestNextKeyFrame());
   1318    };
   1319  }
   1320 
   1321  void EnsureAudioDecodeTaskQueued() override {
   1322    if (!ShouldRequestData(MediaData::Type::AUDIO_DATA)) {
   1323      return;
   1324    }
   1325    DecodingState::EnsureAudioDecodeTaskQueued();
   1326  }
   1327 
   1328  void EnsureVideoDecodeTaskQueued() override {
   1329    if (!ShouldRequestData(MediaData::Type::VIDEO_DATA)) {
   1330      return;
   1331    }
   1332    DecodingState::EnsureVideoDecodeTaskQueued();
   1333  }
   1334 
   1335  bool DetermineOriginalDecodedDurationIfNeeded() {
   1336    // Duration would only need to be set once, unless we get more data which is
   1337    // larger than the duration. That can happen on MSE (reopen stream).
   1338    if (mMaster->mOriginalDecodedDuration != media::TimeUnit::Zero()) {
   1339      return true;
   1340    }
   1341 
   1342    // Single track situations
   1343    if (mMaster->HasAudio() && !mMaster->HasVideo() &&
   1344        mMaster->mAudioTrackDecodedDuration) {
   1345      mMaster->mOriginalDecodedDuration = *mMaster->mAudioTrackDecodedDuration;
   1346      SLOG("audio only, duration=%" PRId64,
   1347           mMaster->mOriginalDecodedDuration.ToMicroseconds());
   1348      return true;
   1349    }
   1350    if (mMaster->HasVideo() && !mMaster->HasAudio() &&
   1351        mMaster->mVideoTrackDecodedDuration) {
   1352      mMaster->mOriginalDecodedDuration = *mMaster->mVideoTrackDecodedDuration;
   1353      SLOG("video only, duration=%" PRId64,
   1354           mMaster->mOriginalDecodedDuration.ToMicroseconds());
   1355      return true;
   1356    }
   1357    // Two tracks situation
   1358    if (mMaster->HasAudio() && mMaster->HasVideo()) {
   1359      // Both tracks have ended so that we can check which track is longer.
   1360      if (mMaster->mAudioTrackDecodedDuration &&
   1361          mMaster->mVideoTrackDecodedDuration) {
   1362        mMaster->mOriginalDecodedDuration =
   1363            std::max(*mMaster->mVideoTrackDecodedDuration,
   1364                     *mMaster->mAudioTrackDecodedDuration);
   1365        SLOG("Both tracks ended, original duration=%" PRId64 " (a=%" PRId64
   1366             ", v=%" PRId64 ")",
   1367             mMaster->mOriginalDecodedDuration.ToMicroseconds(),
   1368             mMaster->mAudioTrackDecodedDuration->ToMicroseconds(),
   1369             mMaster->mVideoTrackDecodedDuration->ToMicroseconds());
   1370        return true;
   1371      }
   1372      // When entering the state, video has ended but audio hasn't, which means
   1373      // audio is longer.
   1374      if (mMaster->mAudioTrackDecodedDuration &&
   1375          mVideoEndedBeforeEnteringStateWithoutDuration) {
   1376        mMaster->mOriginalDecodedDuration =
   1377            *mMaster->mAudioTrackDecodedDuration;
   1378        mVideoEndedBeforeEnteringStateWithoutDuration = false;
   1379        SLOG("audio is longer, duration=%" PRId64,
   1380             mMaster->mOriginalDecodedDuration.ToMicroseconds());
   1381        return true;
   1382      }
   1383      // When entering the state, audio has ended but video hasn't, which means
   1384      // video is longer.
   1385      if (mMaster->mVideoTrackDecodedDuration &&
   1386          mAudioEndedBeforeEnteringStateWithoutDuration) {
   1387        mMaster->mOriginalDecodedDuration =
   1388            *mMaster->mVideoTrackDecodedDuration;
   1389        mAudioEndedBeforeEnteringStateWithoutDuration = false;
   1390        SLOG("video is longer, duration=%" PRId64,
   1391             mMaster->mOriginalDecodedDuration.ToMicroseconds());
   1392        return true;
   1393      }
   1394      SLOG("Still waiting for another track ends...");
   1395      MOZ_ASSERT(!mMaster->mAudioTrackDecodedDuration ||
   1396                 !mMaster->mVideoTrackDecodedDuration);
   1397    }
   1398    SLOG("can't determine the original decoded duration yet");
   1399    MOZ_ASSERT(mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero());
   1400    return false;
   1401  }
   1402 
   1403  void ProcessSamplesWaitingAdjustmentIfAny() {
   1404    if (!mDataWaitingTimestampAdjustment) {
   1405      return;
   1406    }
   1407 
   1408    RefPtr<MediaData> data = mDataWaitingTimestampAdjustment;
   1409    mDataWaitingTimestampAdjustment = nullptr;
   1410    const bool isAudio = data->mType == MediaData::Type::AUDIO_DATA;
   1411    SLOG("process %s sample waiting for timestamp adjustment",
   1412         isAudio ? "audio" : "video");
   1413    if (isAudio) {
   1414      // Waiting sample is for next round of looping, so the queue offset
   1415      // shouldn't be zero. This happens when the track has reached EOS before
   1416      // entering the state (and looping never happens before). Same for below
   1417      // video case.
   1418      if (AudioQueue().GetOffset() == media::TimeUnit::Zero()) {
   1419        AudioQueue().SetOffset(mMaster->mOriginalDecodedDuration);
   1420      }
   1421      HandleAudioDecoded(data->As<AudioData>());
   1422    } else {
   1423      MOZ_DIAGNOSTIC_ASSERT(data->mType == MediaData::Type::VIDEO_DATA);
   1424      if (VideoQueue().GetOffset() == media::TimeUnit::Zero()) {
   1425        VideoQueue().SetOffset(mMaster->mOriginalDecodedDuration);
   1426      }
   1427      HandleVideoDecoded(data->As<VideoData>());
   1428    }
   1429  }
   1430 
   1431  bool IsDataWaitingForTimestampAdjustment(MediaData::Type aType) const {
   1432    return mDataWaitingTimestampAdjustment &&
   1433           mDataWaitingTimestampAdjustment->mType == aType;
   1434  }
   1435 
   1436  bool ShouldPutDataOnWaiting(MediaData::Type aType) const {
   1437    // If another track is already waiting, this track shouldn't be waiting.
   1438    // This case only happens when both tracks reached EOS before entering the
   1439    // looping decoding state, so we don't know the decoded duration yet (used
   1440    // to adjust timestamp) But this is fine, because both tracks will start
   1441    // from 0 so we don't need to adjust them now.
   1442    if (mDataWaitingTimestampAdjustment &&
   1443        !IsDataWaitingForTimestampAdjustment(aType)) {
   1444      return false;
   1445    }
   1446 
   1447    // Only have one track, no need to wait.
   1448    if ((aType == MediaData::Type::AUDIO_DATA && !mMaster->HasVideo()) ||
   1449        (aType == MediaData::Type::VIDEO_DATA && !mMaster->HasAudio())) {
   1450      return false;
   1451    }
   1452 
   1453    // We don't know the duration yet, so we can't calculate the looping offset.
   1454    return mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero();
   1455  }
   1456 
   1457  void PutDataOnWaiting(MediaData* aData) {
   1458    MOZ_ASSERT(!mDataWaitingTimestampAdjustment);
   1459    mDataWaitingTimestampAdjustment = aData;
   1460    SLOG("put %s [%" PRId64 ",%" PRId64 "] on waiting",
   1461         MediaData::EnumValueToString(aData->mType),
   1462         aData->mTime.ToMicroseconds(), aData->GetEndTime().ToMicroseconds());
   1463    MaybeStopPrerolling();
   1464  }
   1465 
   1466  bool ShouldDiscardLoopedData(MediaData::Type aType) const {
   1467    if (!mMaster->mMediaSink->IsStarted()) {
   1468      return false;
   1469    }
   1470 
   1471    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
   1472                          aType == MediaData::Type::VIDEO_DATA);
   1473    const bool isAudio = aType == MediaData::Type::AUDIO_DATA;
   1474    if (isAudio && !mMaster->HasAudio()) {
   1475      return false;
   1476    }
   1477    if (!isAudio && !mMaster->HasVideo()) {
   1478      return false;
   1479    }
   1480 
   1481    /**
   1482     * If media cancels looping, we should check whether there is media data
   1483     * whose time is later than EOS. If so, we should discard them because we
   1484     * won't have a chance to play them.
   1485     *
   1486     *    playback                     last decoded
   1487     *    position          EOS        data time
   1488     *   ----|---------------|------------|---------> (Increasing timeline)
   1489     *    mCurrent         looping      mMaster's
   1490     *    ClockTime        offset      mDecodedXXXEndTime
   1491     *
   1492     */
   1493    const auto offset =
   1494        isAudio ? AudioQueue().GetOffset() : VideoQueue().GetOffset();
   1495    const auto endTime =
   1496        isAudio ? mMaster->mDecodedAudioEndTime : mMaster->mDecodedVideoEndTime;
   1497    const auto clockTime = mMaster->GetClock();
   1498    return (offset != media::TimeUnit::Zero() && clockTime < offset &&
   1499            offset < endTime);
   1500  }
   1501 
   1502  void DiscardLoopedData(MediaData::Type aType) {
   1503    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
   1504                          aType == MediaData::Type::VIDEO_DATA);
   1505    const bool isAudio = aType == MediaData::Type::AUDIO_DATA;
   1506    const auto offset =
   1507        isAudio ? AudioQueue().GetOffset() : VideoQueue().GetOffset();
   1508    if (offset == media::TimeUnit::Zero()) {
   1509      return;
   1510    }
   1511 
   1512    SLOG("Discard %s frames after the time=%" PRId64,
   1513         isAudio ? "audio" : "video", offset.ToMicroseconds());
   1514    if (isAudio) {
   1515      DiscardFramesFromTail(AudioQueue(), [&](int64_t aSampleTime) {
   1516        return aSampleTime > offset.ToMicroseconds();
   1517      });
   1518    } else {
   1519      DiscardFramesFromTail(VideoQueue(), [&](int64_t aSampleTime) {
   1520        return aSampleTime > offset.ToMicroseconds();
   1521      });
   1522    }
   1523  }
   1524 
   1525  void PushFakeAudioDataIfNeeded(const media::TimeUnit& aDuration) {
   1526    MOZ_ASSERT(Info().HasAudio());
   1527 
   1528    const auto& audioInfo = Info().mAudio;
   1529    CheckedInt64 frames = aDuration.ToTicksAtRate(audioInfo.mRate);
   1530    if (!frames.isValid() || !audioInfo.mChannels || !audioInfo.mRate) {
   1531      NS_WARNING("Can't create fake audio, invalid frames/channel/rate?");
   1532      return;
   1533    }
   1534 
   1535    if (!frames.value()) {
   1536      NS_WARNING(nsPrintfCString("Duration (%s) too short, no frame needed",
   1537                                 aDuration.ToString().get())
   1538                     .get());
   1539      return;
   1540    }
   1541 
   1542    // If we can get the last sample, use its frame. Otherwise, use common 1024.
   1543    int64_t typicalPacketFrameCount = 1024;
   1544    if (RefPtr<AudioData> audio = AudioQueue().PeekBack()) {
   1545      typicalPacketFrameCount = audio->Frames();
   1546    }
   1547 
   1548    media::TimeUnit totalDuration = TimeUnit::Zero(audioInfo.mRate);
   1549    // Generate fake audio in a smaller size of audio chunk.
   1550    while (frames.value()) {
   1551      int64_t packetFrameCount =
   1552          std::min(frames.value(), typicalPacketFrameCount);
   1553      frames -= packetFrameCount;
   1554      AlignedAudioBuffer samples(packetFrameCount * audioInfo.mChannels);
   1555      if (!samples) {
   1556        NS_WARNING("Can't create audio buffer, OOM?");
   1557        return;
   1558      }
   1559      // `mDecodedAudioEndTime` is adjusted time, and we want unadjusted time
   1560      // otherwise the time would be adjusted twice when pushing sample into the
   1561      // media queue.
   1562      media::TimeUnit startTime = mMaster->mDecodedAudioEndTime;
   1563      if (AudioQueue().GetOffset() != media::TimeUnit::Zero()) {
   1564        startTime -= AudioQueue().GetOffset();
   1565      }
   1566      RefPtr<AudioData> data(new AudioData(0, startTime, std::move(samples),
   1567                                           audioInfo.mChannels,
   1568                                           audioInfo.mRate));
   1569      SLOG("Created fake audio data (duration=%s, frame-left=%" PRId64 ")",
   1570           data->mDuration.ToString().get(), frames.value());
   1571      totalDuration += data->mDuration;
   1572      HandleAudioDecoded(data);
   1573    }
   1574    SLOG("Pushed fake silence audio data in total duration=%" PRId64 "%s",
   1575         totalDuration.ToMicroseconds(), totalDuration.ToString().get());
   1576  }
   1577 
   1578  bool HasDecodedLastAudioFrame() const {
   1579    // when we're going to leave looping state and have got EOS before, we
   1580    // should mark audio queue as ended because we have got all data we need.
   1581    return mAudioDataRequest.Exists() || mAudioSeekRequest.Exists() ||
   1582           ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA) ||
   1583           IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA) ||
   1584           mIsReachingAudioEOS;
   1585  }
   1586 
   1587  bool HasDecodedLastVideoFrame() const {
   1588    // when we're going to leave looping state and have got EOS before, we
   1589    // should mark video queue as ended because we have got all data we need.
   1590    return mVideoDataRequest.Exists() || mVideoSeekRequest.Exists() ||
   1591           ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA) ||
   1592           IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA) ||
   1593           mIsReachingVideoEOS;
   1594  }
   1595 
   1596  bool ShouldStopPrerolling() const override {
   1597    // These checks is used to handle the media queue aren't opened correctly
   1598    // because they've been close before entering the looping state. Therefore,
   1599    // we need to preroll data in order to let new data to reopen the queue
   1600    // automatically. Otherwise, playback can't start successfully.
   1601    bool isWaitingForNewData = false;
   1602    if (mMaster->HasAudio()) {
   1603      isWaitingForNewData |= (mIsReachingAudioEOS && AudioQueue().IsFinished());
   1604    }
   1605    if (mMaster->HasVideo()) {
   1606      isWaitingForNewData |= (mIsReachingVideoEOS && VideoQueue().IsFinished());
   1607    }
   1608    return !isWaitingForNewData && DecodingState::ShouldStopPrerolling();
   1609  }
   1610 
   1611  bool IsReaderSeeking() const {
   1612    return mAudioSeekRequest.Exists() || mVideoSeekRequest.Exists();
   1613  }
   1614 
   1615  bool IsWaitingData(MediaData::Type aType) const override {
   1616    if (aType == MediaData::Type::AUDIO_DATA) {
   1617      return mMaster->IsWaitingAudioData() ||
   1618             IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA);
   1619    }
   1620    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::VIDEO_DATA);
   1621    return mMaster->IsWaitingVideoData() ||
   1622           IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA);
   1623  }
   1624 
   1625  bool IsRequestingDataFromStartPosition(MediaData::Type aType) const {
   1626    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
   1627                          aType == MediaData::Type::VIDEO_DATA);
   1628    if (aType == MediaData::Type::AUDIO_DATA) {
   1629      return mAudioSeekRequest.Exists() || mAudioDataRequest.Exists();
   1630    }
   1631    return mVideoSeekRequest.Exists() || mVideoDataRequest.Exists();
   1632  }
   1633 
   1634  bool IsBufferingAllowed() const override {
   1635    return !mIsReachingAudioEOS && !mIsReachingVideoEOS;
   1636  }
   1637 
   1638  bool mIsReachingAudioEOS;
   1639  bool mIsReachingVideoEOS;
   1640 
   1641  /**
   1642   * If we have both tracks which have different length, when one track ends
   1643   * first, we can't adjust new data from that track if another longer track
   1644   * hasn't ended yet. The adjusted timestamp needs to be based off the longer
   1645   * track's last data's timestamp, because otherwise it would cause a deviation
   1646   * and eventually a/v unsync. Those sample needs to be stored and we will
   1647   * adjust their timestamp later.
   1648   *
   1649   * Following graph explains the situation in details.
   1650   * o : decoded data with timestamp adjusted or no adjustment (not looping yet)
   1651   * x : decoded data without timestamp adjustment.
   1652   * - : stop decoding and nothing happens
   1653   * EOS : the track reaches to the end. We now know the offset of the track.
   1654   *
   1655   * Timeline ----------------------------------->
   1656   * Track1 :  o EOS x  -  -  o
   1657   * Track2 :  o  o  o EOS o  o
   1658   *
   1659   * Before reaching track2's EOS, we can't adjust samples from track1 because
   1660   * track2 might have longer duration than track1. The sample X would be
   1661   * stored in `mDataWaitingTimestampAdjustment` and we would also stop decoding
   1662   * for track1.
   1663   *
   1664   * After reaching track2's EOS, now we know another track's offset, and the
   1665   * larger one would be used for `mOriginalDecodedDuration`. Once that duration
   1666   * has been determined, we will no longer need to put samples on waiting
   1667   * because we already know how to adjust timestamp.
   1668   */
   1669  RefPtr<MediaData> mDataWaitingTimestampAdjustment;
   1670 
   1671  MozPromiseRequestHolder<MediaFormatReader::SeekPromise> mAudioSeekRequest;
   1672  MozPromiseRequestHolder<MediaFormatReader::SeekPromise> mVideoSeekRequest;
   1673  MozPromiseRequestHolder<AudioDataPromise> mAudioDataRequest;
   1674  MozPromiseRequestHolder<VideoDataPromise> mVideoDataRequest;
   1675 
   1676  // The media format reader only allows seeking a track at a time, if we're
   1677  // already in seeking, then delay the new seek until the current one finishes.
   1678  Maybe<TrackInfo::TrackType> mPendingSeekingType;
   1679 
   1680  // These are used to track a special case where the playback starts from EOS
   1681  // position via seeking. So even if EOS has reached, none of data has been
   1682  // decoded yet. They will be reset when `mOriginalDecodedDuration` is
   1683  // determined.
   1684  bool mAudioEndedBeforeEnteringStateWithoutDuration;
   1685  bool mVideoEndedBeforeEnteringStateWithoutDuration;
   1686 };
   1687 
   1688 /**
   1689 * Purpose: seek to a particular new playback position.
   1690 *
   1691 * Transition to:
   1692 *   SEEKING if any new seek request.
   1693 *   SHUTDOWN if seek failed.
   1694 *   COMPLETED if the new playback position is the end of the media resource.
   1695 *   NextFrameSeekingState if completing a NextFrameSeekingFromDormantState.
   1696 *   DECODING/LOOPING_DECODING otherwise.
   1697 */
   1698 class MediaDecoderStateMachine::SeekingState
   1699    : public MediaDecoderStateMachine::StateObject {
   1700 public:
   1701  explicit SeekingState(Master* aPtr)
   1702      : StateObject(aPtr), mVisibility(static_cast<EventVisibility>(0)) {}
   1703 
   1704  RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob,
   1705                                          EventVisibility aVisibility) {
   1706    mSeekJob = std::move(aSeekJob);
   1707    mVisibility = aVisibility;
   1708 
   1709    // Suppressed visibility comes from two cases: (1) leaving dormant state,
   1710    // and (2) resuming suspended video decoder. We want both cases to be
   1711    // transparent to the user. So we only notify the change when the seek
   1712    // request is from the user.
   1713    if (mVisibility == EventVisibility::Observable) {
   1714      // Don't stop playback for a video-only seek since we want to keep playing
   1715      // audio and we don't need to stop playback while leaving dormant for the
   1716      // playback should has been stopped.
   1717      mMaster->StopPlayback();
   1718      mMaster->UpdatePlaybackPositionInternal(mSeekJob.mTarget->GetTime());
   1719      mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::SeekStarted);
   1720      mMaster->mOnNextFrameStatus.Notify(
   1721          MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
   1722    }
   1723 
   1724    RefPtr<MediaDecoder::SeekPromise> p = mSeekJob.mPromise.Ensure(__func__);
   1725 
   1726    DoSeek();
   1727 
   1728    return p;
   1729  }
   1730 
   1731  virtual void Exit() override = 0;
   1732 
   1733  State GetState() const override = 0;
   1734 
   1735  void HandleAudioDecoded(AudioData* aAudio) override = 0;
   1736  void HandleVideoDecoded(VideoData* aVideo) override = 0;
   1737  void HandleAudioWaited(MediaData::Type aType) override = 0;
   1738  void HandleVideoWaited(MediaData::Type aType) override = 0;
   1739 
   1740  void HandleVideoSuspendTimeout() override {
   1741    // Do nothing since we want a valid video frame to show when seek is done.
   1742  }
   1743 
   1744  void HandleResumeVideoDecoding(const TimeUnit&) override {
   1745    // Do nothing. We will resume video decoding in the decoding state.
   1746  }
   1747 
   1748  // We specially handle next frame seeks by ignoring them if we're already
   1749  // seeking.
   1750  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
   1751      const SeekTarget& aTarget) override {
   1752    if (aTarget.IsNextFrame()) {
   1753      // We ignore next frame seeks if we already have a seek pending
   1754      SLOG("Already SEEKING, ignoring seekToNextFrame");
   1755      MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   1756      return MediaDecoder::SeekPromise::CreateAndReject(
   1757          /* aRejectValue = */ true, __func__);
   1758    }
   1759 
   1760    return StateObject::HandleSeek(aTarget);
   1761  }
   1762 
   1763 protected:
   1764  SeekJob mSeekJob;
   1765  EventVisibility mVisibility;
   1766 
   1767  virtual void DoSeek() = 0;
   1768  // Transition to the next state (defined by the subclass) when seek is
   1769  // completed.
   1770  virtual void GoToNextState() { SetDecodingState(); }
   1771  void SeekCompleted();
   1772  virtual TimeUnit CalculateNewCurrentTime() const = 0;
   1773 };
   1774 
   1775 class MediaDecoderStateMachine::AccurateSeekingState
   1776    : public MediaDecoderStateMachine::SeekingState {
   1777 public:
   1778  explicit AccurateSeekingState(Master* aPtr) : SeekingState(aPtr) {}
   1779 
   1780  State GetState() const override { return DECODER_STATE_SEEKING_ACCURATE; }
   1781 
   1782  RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob,
   1783                                          EventVisibility aVisibility) {
   1784    MOZ_ASSERT(aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast());
   1785    mCurrentTimeBeforeSeek = mMaster->GetMediaTime();
   1786    return SeekingState::Enter(std::move(aSeekJob), aVisibility);
   1787  }
   1788 
   1789  void Exit() override {
   1790    // Disconnect MediaDecoder.
   1791    mSeekJob.RejectIfExists(__func__);
   1792 
   1793    // Disconnect ReaderProxy.
   1794    mSeekRequest.DisconnectIfExists();
   1795 
   1796    mWaitRequest.DisconnectIfExists();
   1797  }
   1798 
   1799  void HandleAudioDecoded(AudioData* aAudio) override {
   1800    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
   1801               "Seek shouldn't be finished");
   1802    MOZ_ASSERT(aAudio);
   1803 
   1804    AdjustFastSeekIfNeeded(aAudio);
   1805 
   1806    if (mSeekJob.mTarget->IsFast()) {
   1807      // Non-precise seek; we can stop the seek at the first sample.
   1808      mMaster->PushAudio(aAudio);
   1809      mDoneAudioSeeking = true;
   1810    } else {
   1811      nsresult rv = DropAudioUpToSeekTarget(aAudio);
   1812      if (NS_FAILED(rv)) {
   1813        mMaster->DecodeError(rv);
   1814        return;
   1815      }
   1816    }
   1817 
   1818    if (!mDoneAudioSeeking) {
   1819      RequestAudioData();
   1820      return;
   1821    }
   1822    MaybeFinishSeek();
   1823  }
   1824 
   1825  void HandleVideoDecoded(VideoData* aVideo) override {
   1826    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
   1827               "Seek shouldn't be finished");
   1828    MOZ_ASSERT(aVideo);
   1829 
   1830    AdjustFastSeekIfNeeded(aVideo);
   1831 
   1832    if (mSeekJob.mTarget->IsFast()) {
   1833      // Non-precise seek. We can stop the seek at the first sample.
   1834      mMaster->PushVideo(aVideo);
   1835      mDoneVideoSeeking = true;
   1836    } else {
   1837      nsresult rv = DropVideoUpToSeekTarget(aVideo);
   1838      if (NS_FAILED(rv)) {
   1839        mMaster->DecodeError(rv);
   1840        return;
   1841      }
   1842    }
   1843 
   1844    if (!mDoneVideoSeeking) {
   1845      RequestVideoData();
   1846      return;
   1847    }
   1848    MaybeFinishSeek();
   1849  }
   1850 
   1851  void HandleWaitingForAudio() override {
   1852    MOZ_ASSERT(!mDoneAudioSeeking);
   1853    mMaster->WaitForData(MediaData::Type::AUDIO_DATA);
   1854  }
   1855 
   1856  void HandleAudioCanceled() override {
   1857    MOZ_ASSERT(!mDoneAudioSeeking);
   1858    RequestAudioData();
   1859  }
   1860 
   1861  void HandleEndOfAudio() override {
   1862    HandleEndOfAudioInternal();
   1863    MaybeFinishSeek();
   1864  }
   1865 
   1866  void HandleWaitingForVideo() override {
   1867    MOZ_ASSERT(!mDoneVideoSeeking);
   1868    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
   1869  }
   1870 
   1871  void HandleVideoCanceled() override {
   1872    MOZ_ASSERT(!mDoneVideoSeeking);
   1873    RequestVideoData();
   1874  }
   1875 
   1876  void HandleEndOfVideo() override {
   1877    HandleEndOfVideoInternal();
   1878    MaybeFinishSeek();
   1879  }
   1880 
   1881  void HandleAudioWaited(MediaData::Type aType) override {
   1882    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
   1883               "Seek shouldn't be finished");
   1884 
   1885    RequestAudioData();
   1886  }
   1887 
   1888  void HandleVideoWaited(MediaData::Type aType) override {
   1889    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
   1890               "Seek shouldn't be finished");
   1891 
   1892    RequestVideoData();
   1893  }
   1894 
   1895  void DoSeek() override {
   1896    mDoneAudioSeeking = !Info().HasAudio();
   1897    mDoneVideoSeeking = !Info().HasVideo();
   1898 
   1899    // Resetting decode should be called after stopping media sink, which can
   1900    // ensure that we have an empty media queue before seeking the demuxer.
   1901    mMaster->StopMediaSink();
   1902    mMaster->ResetDecode();
   1903 
   1904    DemuxerSeek();
   1905  }
   1906 
   1907  TimeUnit CalculateNewCurrentTime() const override {
   1908    const auto seekTime = mSeekJob.mTarget->GetTime();
   1909 
   1910    // For the accurate seek, we always set the newCurrentTime = seekTime so
   1911    // that the updated HTMLMediaElement.currentTime will always be the seek
   1912    // target; we rely on the MediaSink to handles the gap between the
   1913    // newCurrentTime and the real decoded samples' start time.
   1914    if (mSeekJob.mTarget->IsAccurate()) {
   1915      return seekTime;
   1916    }
   1917 
   1918    // For the fast seek, we update the newCurrentTime with the decoded audio
   1919    // and video samples, set it to be the one which is closet to the seekTime.
   1920    if (mSeekJob.mTarget->IsFast()) {
   1921      RefPtr<AudioData> audio = AudioQueue().PeekFront();
   1922      RefPtr<VideoData> video = VideoQueue().PeekFront();
   1923 
   1924      // A situation that both audio and video approaches the end.
   1925      if (!audio && !video) {
   1926        return seekTime;
   1927      }
   1928 
   1929      const int64_t audioStart =
   1930          audio ? audio->mTime.ToMicroseconds() : INT64_MAX;
   1931      const int64_t videoStart =
   1932          video ? video->mTime.ToMicroseconds() : INT64_MAX;
   1933      const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds());
   1934      const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds());
   1935      return TimeUnit::FromMicroseconds(audioGap <= videoGap ? audioStart
   1936                                                             : videoStart);
   1937    }
   1938 
   1939    MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types.");
   1940    return TimeUnit::Zero();
   1941  }
   1942 
   1943 protected:
   1944  void DemuxerSeek() {
   1945    // Request the demuxer to perform seek.
   1946    Reader()
   1947        ->Seek(mSeekJob.mTarget.ref())
   1948        ->Then(
   1949            OwnerThread(), __func__,
   1950            [this](const media::TimeUnit& aUnit) { OnSeekResolved(aUnit); },
   1951            [this](const SeekRejectValue& aReject) { OnSeekRejected(aReject); })
   1952        ->Track(mSeekRequest);
   1953  }
   1954 
   1955  void OnSeekResolved(media::TimeUnit) {
   1956    AUTO_PROFILER_LABEL("AccurateSeekingState::OnSeekResolved", MEDIA_PLAYBACK);
   1957    mSeekRequest.Complete();
   1958 
   1959    // We must decode the first samples of active streams, so we can determine
   1960    // the new stream time. So dispatch tasks to do that.
   1961    if (!mDoneVideoSeeking) {
   1962      RequestVideoData();
   1963    }
   1964    if (!mDoneAudioSeeking) {
   1965      RequestAudioData();
   1966    }
   1967  }
   1968 
   1969  void OnSeekRejected(const SeekRejectValue& aReject) {
   1970    AUTO_PROFILER_LABEL("AccurateSeekingState::OnSeekRejected", MEDIA_PLAYBACK);
   1971    mSeekRequest.Complete();
   1972 
   1973    if (aReject.mError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
   1974      SLOG("OnSeekRejected reason=WAITING_FOR_DATA type=%s",
   1975           MediaData::EnumValueToString(aReject.mType));
   1976      MOZ_ASSERT_IF(aReject.mType == MediaData::Type::AUDIO_DATA,
   1977                    !mMaster->IsRequestingAudioData());
   1978      MOZ_ASSERT_IF(aReject.mType == MediaData::Type::VIDEO_DATA,
   1979                    !mMaster->IsRequestingVideoData());
   1980      MOZ_ASSERT_IF(aReject.mType == MediaData::Type::AUDIO_DATA,
   1981                    !mMaster->IsWaitingAudioData());
   1982      MOZ_ASSERT_IF(aReject.mType == MediaData::Type::VIDEO_DATA,
   1983                    !mMaster->IsWaitingVideoData());
   1984 
   1985      // Fire 'waiting' to notify the player that we are waiting for data.
   1986      mMaster->mOnNextFrameStatus.Notify(
   1987          MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
   1988 
   1989      Reader()
   1990          ->WaitForData(aReject.mType)
   1991          ->Then(
   1992              OwnerThread(), __func__,
   1993              [this](MediaData::Type aType) {
   1994                AUTO_PROFILER_LABEL(
   1995                    "AccurateSeekingState::OnSeekRejected:WaitDataResolved",
   1996                    MEDIA_PLAYBACK);
   1997                SLOG("OnSeekRejected wait promise resolved");
   1998                mWaitRequest.Complete();
   1999                DemuxerSeek();
   2000              },
   2001              [this](const WaitForDataRejectValue& aRejection) {
   2002                AUTO_PROFILER_LABEL(
   2003                    "AccurateSeekingState::OnSeekRejected:WaitDataRejected",
   2004                    MEDIA_PLAYBACK);
   2005                SLOG("OnSeekRejected wait promise rejected");
   2006                mWaitRequest.Complete();
   2007                mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
   2008              })
   2009          ->Track(mWaitRequest);
   2010      return;
   2011    }
   2012 
   2013    if (aReject.mError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
   2014      if (!mDoneAudioSeeking) {
   2015        HandleEndOfAudioInternal();
   2016      }
   2017      if (!mDoneVideoSeeking) {
   2018        HandleEndOfVideoInternal();
   2019      }
   2020      MaybeFinishSeek();
   2021      return;
   2022    }
   2023 
   2024    MOZ_ASSERT(NS_FAILED(aReject.mError),
   2025               "Cancels should also disconnect mSeekRequest");
   2026    mMaster->DecodeError(aReject.mError);
   2027  }
   2028 
   2029  void RequestAudioData() {
   2030    MOZ_ASSERT(!mDoneAudioSeeking);
   2031    mMaster->RequestAudioData();
   2032  }
   2033 
   2034  virtual void RequestVideoData() {
   2035    MOZ_ASSERT(!mDoneVideoSeeking);
   2036    mMaster->RequestVideoData(media::TimeUnit());
   2037  }
   2038 
   2039  void AdjustFastSeekIfNeeded(MediaData* aSample) {
   2040    if (mSeekJob.mTarget->IsFast() &&
   2041        mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek &&
   2042        aSample->mTime < mCurrentTimeBeforeSeek) {
   2043      // We are doing a fastSeek, but we ended up *before* the previous
   2044      // playback position. This is surprising UX, so switch to an accurate
   2045      // seek and decode to the seek target. This is not conformant to the
   2046      // spec, fastSeek should always be fast, but until we get the time to
   2047      // change all Readers to seek to the keyframe after the currentTime
   2048      // in this case, we'll just decode forward. Bug 1026330.
   2049      mSeekJob.mTarget->SetType(SeekTarget::Accurate);
   2050    }
   2051  }
   2052 
   2053  nsresult DropAudioUpToSeekTarget(AudioData* aAudio) {
   2054    MOZ_ASSERT(aAudio && mSeekJob.mTarget->IsAccurate());
   2055 
   2056    if (mSeekJob.mTarget->GetTime() >= aAudio->GetEndTime()) {
   2057      // Our seek target lies after the frames in this AudioData. Don't
   2058      // push it onto the audio queue, and keep decoding forwards.
   2059      return NS_OK;
   2060    }
   2061 
   2062    if (aAudio->mTime > mSeekJob.mTarget->GetTime()) {
   2063      // The seek target doesn't lie in the audio block just after the last
   2064      // audio frames we've seen which were before the seek target. This
   2065      // could have been the first audio data we've seen after seek, i.e. the
   2066      // seek terminated after the seek target in the audio stream. Just
   2067      // abort the audio decode-to-target, the state machine will play
   2068      // silence to cover the gap. Typically this happens in poorly muxed
   2069      // files.
   2070      SLOGW("Audio not synced after seek, maybe a poorly muxed file?");
   2071      mMaster->PushAudio(aAudio);
   2072      mDoneAudioSeeking = true;
   2073      return NS_OK;
   2074    }
   2075 
   2076    bool ok = aAudio->SetTrimWindow(
   2077        {mSeekJob.mTarget->GetTime().ToBase(aAudio->mTime),
   2078         aAudio->GetEndTime()});
   2079    if (!ok) {
   2080      return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
   2081    }
   2082 
   2083    MOZ_ASSERT(AudioQueue().GetSize() == 0,
   2084               "Should be the 1st sample after seeking");
   2085    mMaster->PushAudio(aAudio);
   2086    mDoneAudioSeeking = true;
   2087 
   2088    return NS_OK;
   2089  }
   2090 
   2091  nsresult DropVideoUpToSeekTarget(VideoData* aVideo) {
   2092    MOZ_ASSERT(aVideo);
   2093    SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]",
   2094         aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds());
   2095    const auto target = GetSeekTarget();
   2096 
   2097    // If the frame end time is less than the seek target, we won't want
   2098    // to display this frame after the seek, so discard it.
   2099    if (target >= aVideo->GetEndTime()) {
   2100      SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64
   2101           "] target=%" PRId64,
   2102           aVideo->mTime.ToMicroseconds(),
   2103           aVideo->GetEndTime().ToMicroseconds(), target.ToMicroseconds());
   2104      PROFILER_MARKER_UNTYPED("MDSM::DropVideoUpToSeekTarget", MEDIA_PLAYBACK);
   2105      mFirstVideoFrameAfterSeek = aVideo;
   2106    } else {
   2107      if (target >= aVideo->mTime && aVideo->GetEndTime() >= target) {
   2108        // The seek target lies inside this frame's time slice. Adjust the
   2109        // frame's start time to match the seek target.
   2110        aVideo->UpdateTimestamp(target);
   2111      }
   2112      mFirstVideoFrameAfterSeek = nullptr;
   2113 
   2114      SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64
   2115           "] containing target=%" PRId64,
   2116           aVideo->mTime.ToMicroseconds(),
   2117           aVideo->GetEndTime().ToMicroseconds(), target.ToMicroseconds());
   2118 
   2119      MOZ_ASSERT(VideoQueue().GetSize() == 0,
   2120                 "Should be the 1st sample after seeking");
   2121      mMaster->PushVideo(aVideo);
   2122      mDoneVideoSeeking = true;
   2123    }
   2124 
   2125    return NS_OK;
   2126  }
   2127 
   2128  void HandleEndOfAudioInternal() {
   2129    MOZ_ASSERT(!mDoneAudioSeeking);
   2130    AudioQueue().Finish();
   2131    mDoneAudioSeeking = true;
   2132  }
   2133 
   2134  void HandleEndOfVideoInternal() {
   2135    MOZ_ASSERT(!mDoneVideoSeeking);
   2136    if (mFirstVideoFrameAfterSeek) {
   2137      // Hit the end of stream. Move mFirstVideoFrameAfterSeek into
   2138      // mSeekedVideoData so we have something to display after seeking.
   2139      mMaster->PushVideo(mFirstVideoFrameAfterSeek);
   2140    }
   2141    VideoQueue().Finish();
   2142    mDoneVideoSeeking = true;
   2143  }
   2144 
   2145  void MaybeFinishSeek() {
   2146    if (mDoneAudioSeeking && mDoneVideoSeeking) {
   2147      SeekCompleted();
   2148    }
   2149  }
   2150 
   2151  /*
   2152   * Track the current seek promise made by the reader.
   2153   */
   2154  MozPromiseRequestHolder<MediaFormatReader::SeekPromise> mSeekRequest;
   2155 
   2156  /*
   2157   * Internal state.
   2158   */
   2159  media::TimeUnit mCurrentTimeBeforeSeek;
   2160  bool mDoneAudioSeeking = false;
   2161  bool mDoneVideoSeeking = false;
   2162  MozPromiseRequestHolder<WaitForDataPromise> mWaitRequest;
   2163 
   2164  // This temporarily stores the first frame we decode after we seek.
   2165  // This is so that if we hit end of stream while we're decoding to reach
   2166  // the seek target, we will still have a frame that we can display as the
   2167  // last frame in the media.
   2168  RefPtr<VideoData> mFirstVideoFrameAfterSeek;
   2169 
   2170 private:
   2171  virtual media::TimeUnit GetSeekTarget() const {
   2172    return mSeekJob.mTarget->GetTime();
   2173  }
   2174 };
   2175 
   2176 /*
   2177 * Remove samples from the queue until aCompare() returns false.
   2178 * aCompare A function object with the signature bool(int64_t) which returns
   2179 *          true for samples that should be removed.
   2180 */
   2181 template <typename Type, typename Function>
   2182 static void DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare) {
   2183  while (aQueue.GetSize() > 0) {
   2184    if (aCompare(aQueue.PeekFront()->mTime.ToMicroseconds())) {
   2185      RefPtr<Type> releaseMe = aQueue.PopFront();
   2186      continue;
   2187    }
   2188    break;
   2189  }
   2190 }
   2191 
   2192 class MediaDecoderStateMachine::NextFrameSeekingState
   2193    : public MediaDecoderStateMachine::SeekingState {
   2194 public:
   2195  explicit NextFrameSeekingState(Master* aPtr) : SeekingState(aPtr) {}
   2196 
   2197  State GetState() const override {
   2198    return DECODER_STATE_SEEKING_NEXTFRAMESEEKING;
   2199  }
   2200 
   2201  RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob,
   2202                                          EventVisibility aVisibility) {
   2203    MOZ_ASSERT(aSeekJob.mTarget->IsNextFrame());
   2204    mCurrentTime = mMaster->GetMediaTime();
   2205    mDuration = mMaster->Duration();
   2206    return SeekingState::Enter(std::move(aSeekJob), aVisibility);
   2207  }
   2208 
   2209  void Exit() override {
   2210    // Disconnect my async seek operation.
   2211    if (mAsyncSeekTask) {
   2212      mAsyncSeekTask->Cancel();
   2213    }
   2214 
   2215    // Disconnect MediaDecoder.
   2216    mSeekJob.RejectIfExists(__func__);
   2217  }
   2218 
   2219  void HandleAudioDecoded(AudioData* aAudio) override {
   2220    mMaster->PushAudio(aAudio);
   2221  }
   2222 
   2223  void HandleVideoDecoded(VideoData* aVideo) override {
   2224    MOZ_ASSERT(aVideo);
   2225    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2226    MOZ_ASSERT(NeedMoreVideo());
   2227 
   2228    if (aVideo->mTime > mCurrentTime) {
   2229      mMaster->PushVideo(aVideo);
   2230      FinishSeek();
   2231    } else {
   2232      RequestVideoData();
   2233    }
   2234  }
   2235 
   2236  void HandleWaitingForAudio() override {
   2237    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2238    // We don't care about audio decode errors in this state which will be
   2239    // handled by other states after seeking.
   2240  }
   2241 
   2242  void HandleAudioCanceled() override {
   2243    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2244    // We don't care about audio decode errors in this state which will be
   2245    // handled by other states after seeking.
   2246  }
   2247 
   2248  void HandleEndOfAudio() override {
   2249    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2250    // We don't care about audio decode errors in this state which will be
   2251    // handled by other states after seeking.
   2252  }
   2253 
   2254  void HandleWaitingForVideo() override {
   2255    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2256    MOZ_ASSERT(NeedMoreVideo());
   2257    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
   2258  }
   2259 
   2260  void HandleVideoCanceled() override {
   2261    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2262    MOZ_ASSERT(NeedMoreVideo());
   2263    RequestVideoData();
   2264  }
   2265 
   2266  void HandleEndOfVideo() override {
   2267    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2268    MOZ_ASSERT(NeedMoreVideo());
   2269    VideoQueue().Finish();
   2270    FinishSeek();
   2271  }
   2272 
   2273  void HandleAudioWaited(MediaData::Type aType) override {
   2274    // We don't care about audio in this state.
   2275  }
   2276 
   2277  void HandleVideoWaited(MediaData::Type aType) override {
   2278    MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
   2279    MOZ_ASSERT(NeedMoreVideo());
   2280    RequestVideoData();
   2281  }
   2282 
   2283  TimeUnit CalculateNewCurrentTime() const override {
   2284    // The HTMLMediaElement.currentTime should be updated to the seek target
   2285    // which has been updated to the next frame's time.
   2286    return mSeekJob.mTarget->GetTime();
   2287  }
   2288 
   2289  void DoSeek() override {
   2290    mMaster->StopMediaSink();
   2291 
   2292    auto currentTime = mCurrentTime;
   2293    DiscardFrames(VideoQueue(), [currentTime](int64_t aSampleTime) {
   2294      return aSampleTime <= currentTime.ToMicroseconds();
   2295    });
   2296 
   2297    // If there is a pending video request, finish the seeking if we don't need
   2298    // more data, or wait for HandleVideoDecoded() to finish seeking.
   2299    if (mMaster->IsRequestingVideoData()) {
   2300      if (!NeedMoreVideo()) {
   2301        FinishSeek();
   2302      }
   2303      return;
   2304    }
   2305 
   2306    // Otherwise, we need to do the seek operation asynchronously for a special
   2307    // case (video with no data)which has no data at all, the 1st
   2308    // seekToNextFrame() operation reaches the end of the media. If we did the
   2309    // seek operation synchronously, we immediately resolve the SeekPromise in
   2310    // mSeekJob and then switch to the CompletedState which dispatches an
   2311    // "ended" event. However, the ThenValue of the SeekPromise has not yet been
   2312    // set, so the promise resolving is postponed and then the JS developer
   2313    // receives the "ended" event before the seek promise is resolved. An
   2314    // asynchronous seek operation helps to solve this issue since while the
   2315    // seek is actually performed, the ThenValue of SeekPromise has already been
   2316    // set so that it won't be postponed.
   2317    RefPtr<Runnable> r = mAsyncSeekTask = new AysncNextFrameSeekTask(this);
   2318    nsresult rv = OwnerThread()->Dispatch(r.forget());
   2319    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   2320    (void)rv;
   2321  }
   2322 
   2323 private:
   2324  void DoSeekInternal() {
   2325    // We don't need to discard frames to the mCurrentTime here because we have
   2326    // done it at DoSeek() and any video data received in between either
   2327    // finishes the seek operation or be discarded, see HandleVideoDecoded().
   2328 
   2329    if (!NeedMoreVideo()) {
   2330      FinishSeek();
   2331    } else if (!mMaster->IsTrackingVideoData()) {
   2332      RequestVideoData();
   2333    }
   2334  }
   2335 
   2336  class AysncNextFrameSeekTask : public Runnable {
   2337   public:
   2338    explicit AysncNextFrameSeekTask(NextFrameSeekingState* aStateObject)
   2339        : Runnable(
   2340              "MediaDecoderStateMachine::NextFrameSeekingState::"
   2341              "AysncNextFrameSeekTask"),
   2342          mStateObj(aStateObject) {}
   2343 
   2344    void Cancel() { mStateObj = nullptr; }
   2345 
   2346    NS_IMETHOD Run() override {
   2347      if (mStateObj) {
   2348        AUTO_PROFILER_LABEL("AysncNextFrameSeekTask::Run", MEDIA_PLAYBACK);
   2349        mStateObj->DoSeekInternal();
   2350      }
   2351      return NS_OK;
   2352    }
   2353 
   2354   private:
   2355    NextFrameSeekingState* mStateObj;
   2356  };
   2357 
   2358  void RequestVideoData() { mMaster->RequestVideoData(media::TimeUnit()); }
   2359 
   2360  bool NeedMoreVideo() const {
   2361    // Need to request video when we have none and video queue is not finished.
   2362    return VideoQueue().GetSize() == 0 && !VideoQueue().IsFinished();
   2363  }
   2364 
   2365  // Update the seek target's time before resolving this seek task, the updated
   2366  // time will be used in the MDSM::SeekCompleted() to update the MDSM's
   2367  // position.
   2368  void UpdateSeekTargetTime() {
   2369    RefPtr<VideoData> data = VideoQueue().PeekFront();
   2370    if (data) {
   2371      mSeekJob.mTarget->SetTime(data->mTime);
   2372    } else {
   2373      MOZ_ASSERT(VideoQueue().AtEndOfStream());
   2374      mSeekJob.mTarget->SetTime(mDuration);
   2375    }
   2376  }
   2377 
   2378  void FinishSeek() {
   2379    MOZ_ASSERT(!NeedMoreVideo());
   2380    UpdateSeekTargetTime();
   2381    auto time = mSeekJob.mTarget->GetTime().ToMicroseconds();
   2382    DiscardFrames(AudioQueue(),
   2383                  [time](int64_t aSampleTime) { return aSampleTime < time; });
   2384    SeekCompleted();
   2385  }
   2386 
   2387  /*
   2388   * Internal state.
   2389   */
   2390  TimeUnit mCurrentTime;
   2391  TimeUnit mDuration;
   2392  RefPtr<AysncNextFrameSeekTask> mAsyncSeekTask;
   2393 };
   2394 
   2395 class MediaDecoderStateMachine::NextFrameSeekingFromDormantState
   2396    : public MediaDecoderStateMachine::AccurateSeekingState {
   2397 public:
   2398  explicit NextFrameSeekingFromDormantState(Master* aPtr)
   2399      : AccurateSeekingState(aPtr) {}
   2400 
   2401  State GetState() const override { return DECODER_STATE_SEEKING_FROMDORMANT; }
   2402 
   2403  RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aCurrentSeekJob,
   2404                                          SeekJob&& aFutureSeekJob) {
   2405    mFutureSeekJob = std::move(aFutureSeekJob);
   2406 
   2407    AccurateSeekingState::Enter(std::move(aCurrentSeekJob),
   2408                                EventVisibility::Suppressed);
   2409 
   2410    // Once seekToNextFrame() is called, we assume the user is likely to keep
   2411    // calling seekToNextFrame() repeatedly, and so, we should prevent the MDSM
   2412    // from getting into Dormant state.
   2413    mMaster->mMinimizePreroll = false;
   2414 
   2415    return mFutureSeekJob.mPromise.Ensure(__func__);
   2416  }
   2417 
   2418  void Exit() override {
   2419    mFutureSeekJob.RejectIfExists(__func__);
   2420    AccurateSeekingState::Exit();
   2421  }
   2422 
   2423 private:
   2424  SeekJob mFutureSeekJob;
   2425 
   2426  // We don't want to transition to DecodingState once this seek completes,
   2427  // instead, we transition to NextFrameSeekingState.
   2428  void GoToNextState() override {
   2429    SetState<NextFrameSeekingState>(std::move(mFutureSeekJob),
   2430                                    EventVisibility::Observable);
   2431  }
   2432 };
   2433 
   2434 class MediaDecoderStateMachine::VideoOnlySeekingState
   2435    : public MediaDecoderStateMachine::AccurateSeekingState {
   2436 public:
   2437  explicit VideoOnlySeekingState(Master* aPtr) : AccurateSeekingState(aPtr) {}
   2438 
   2439  State GetState() const override { return DECODER_STATE_SEEKING_VIDEOONLY; }
   2440 
   2441  RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob,
   2442                                          EventVisibility aVisibility) {
   2443    MOZ_ASSERT(aSeekJob.mTarget->IsVideoOnly());
   2444    MOZ_ASSERT(aVisibility == EventVisibility::Suppressed);
   2445 
   2446    RefPtr<MediaDecoder::SeekPromise> p =
   2447        AccurateSeekingState::Enter(std::move(aSeekJob), aVisibility);
   2448 
   2449    // Dispatch a mozvideoonlyseekbegin event to indicate UI for corresponding
   2450    // changes.
   2451    mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::VideoOnlySeekBegin);
   2452 
   2453    return p;
   2454  }
   2455 
   2456  void Exit() override {
   2457    // We are completing or discarding this video-only seek operation now,
   2458    // dispatch an event so that the UI can change in response to the end
   2459    // of video-only seek.
   2460    mMaster->mOnPlaybackEvent.Notify(
   2461        MediaPlaybackEvent::VideoOnlySeekCompleted);
   2462 
   2463    AccurateSeekingState::Exit();
   2464  }
   2465 
   2466  void HandleAudioDecoded(AudioData* aAudio) override {
   2467    MOZ_ASSERT(mDoneAudioSeeking && !mDoneVideoSeeking,
   2468               "Seek shouldn't be finished");
   2469    MOZ_ASSERT(aAudio);
   2470 
   2471    // Video-only seek doesn't reset audio decoder. There might be pending audio
   2472    // requests when AccurateSeekTask::Seek() begins. We will just store the
   2473    // data without checking |mDiscontinuity| or calling
   2474    // DropAudioUpToSeekTarget().
   2475    mMaster->PushAudio(aAudio);
   2476  }
   2477 
   2478  void HandleWaitingForAudio() override {}
   2479 
   2480  void HandleAudioCanceled() override {}
   2481 
   2482  void HandleEndOfAudio() override {}
   2483 
   2484  void HandleAudioWaited(MediaData::Type aType) override {
   2485    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
   2486               "Seek shouldn't be finished");
   2487 
   2488    // Ignore pending requests from video-only seek.
   2489  }
   2490 
   2491  void DoSeek() override {
   2492    // TODO: keep decoding audio.
   2493    mDoneAudioSeeking = true;
   2494    mDoneVideoSeeking = !Info().HasVideo();
   2495 
   2496    const auto offset = VideoQueue().GetOffset();
   2497    mMaster->ResetDecode(TrackInfo::kVideoTrack);
   2498 
   2499    // Entering video-only state and we've looped at least once before, so we
   2500    // need to set offset in order to let new video frames catch up with the
   2501    // clock time.
   2502    if (offset != media::TimeUnit::Zero()) {
   2503      VideoQueue().SetOffset(offset);
   2504    }
   2505 
   2506    DemuxerSeek();
   2507  }
   2508 
   2509 protected:
   2510  // Allow skip-to-next-key-frame to kick in if we fall behind the current
   2511  // playback position so decoding has a better chance to catch up.
   2512  void RequestVideoData() override {
   2513    MOZ_ASSERT(!mDoneVideoSeeking);
   2514 
   2515    auto clock = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock()
   2516                                                  : mMaster->GetMediaTime();
   2517    mMaster->AdjustByLooping(clock);
   2518    const auto& nextKeyFrameTime = GetNextKeyFrameTime();
   2519 
   2520    auto threshold = clock;
   2521 
   2522    if (nextKeyFrameTime.IsValid() &&
   2523        clock >= (nextKeyFrameTime - sSkipToNextKeyFrameThreshold)) {
   2524      threshold = nextKeyFrameTime;
   2525    }
   2526 
   2527    mMaster->RequestVideoData(threshold);
   2528  }
   2529 
   2530 private:
   2531  // Trigger skip to next key frame if the current playback position is very
   2532  // close the next key frame's time.
   2533  static constexpr TimeUnit sSkipToNextKeyFrameThreshold =
   2534      TimeUnit::FromMicroseconds(5000);
   2535 
   2536  // If the media is playing, drop video until catch up playback position.
   2537  media::TimeUnit GetSeekTarget() const override {
   2538    auto target = mMaster->mMediaSink->IsStarted()
   2539                      ? mMaster->GetClock()
   2540                      : mSeekJob.mTarget->GetTime();
   2541    mMaster->AdjustByLooping(target);
   2542    return target;
   2543  }
   2544 
   2545  media::TimeUnit GetNextKeyFrameTime() const {
   2546    // We only call this method in RequestVideoData() and we only request video
   2547    // data if we haven't done video seeking.
   2548    MOZ_DIAGNOSTIC_ASSERT(!mDoneVideoSeeking);
   2549    MOZ_DIAGNOSTIC_ASSERT(mMaster->VideoQueue().GetSize() == 0);
   2550 
   2551    if (mFirstVideoFrameAfterSeek) {
   2552      return mFirstVideoFrameAfterSeek->NextKeyFrameTime();
   2553    }
   2554 
   2555    return TimeUnit::Invalid();
   2556  }
   2557 };
   2558 
   2559 constexpr TimeUnit MediaDecoderStateMachine::VideoOnlySeekingState::
   2560    sSkipToNextKeyFrameThreshold;
   2561 
   2562 RefPtr<MediaDecoder::SeekPromise>
   2563 MediaDecoderStateMachine::DormantState::HandleSeek(const SeekTarget& aTarget) {
   2564  if (aTarget.IsNextFrame()) {
   2565    // NextFrameSeekingState doesn't reset the decoder unlike
   2566    // AccurateSeekingState. So we first must come out of dormant by seeking to
   2567    // mPendingSeek and continue later with the NextFrameSeek
   2568    SLOG("Changed state to SEEKING (to %" PRId64 ")",
   2569         aTarget.GetTime().ToMicroseconds());
   2570    SeekJob seekJob;
   2571    seekJob.mTarget = Some(aTarget);
   2572    return StateObject::SetState<NextFrameSeekingFromDormantState>(
   2573        std::move(mPendingSeek), std::move(seekJob));
   2574  }
   2575 
   2576  return StateObject::HandleSeek(aTarget);
   2577 }
   2578 
   2579 /**
   2580 * Purpose: stop playback until enough data is decoded to continue playback.
   2581 *
   2582 * Transition to:
   2583 *   SEEKING if any seek request.
   2584 *   SHUTDOWN if any decode error.
   2585 *   COMPLETED when having decoded all audio/video data.
   2586 *   DECODING/LOOPING_DECODING when having decoded enough data to continue
   2587 * playback.
   2588 */
   2589 class MediaDecoderStateMachine::BufferingState
   2590    : public MediaDecoderStateMachine::StateObject {
   2591 public:
   2592  explicit BufferingState(Master* aPtr) : StateObject(aPtr) {}
   2593 
   2594  void Enter() {
   2595    if (mMaster->IsPlaying()) {
   2596      mMaster->StopPlayback();
   2597    }
   2598 
   2599    mBufferingStart = TimeStamp::Now();
   2600    // Playback is now stopped, so there is no need to connect to the queues'
   2601    // PopFrontEvent()s, but frames may have been recently popped before the
   2602    // transition from DECODING.
   2603    if (mMaster->IsAudioDecoding() && !mMaster->HaveEnoughDecodedAudio() &&
   2604        !mMaster->IsTrackingAudioData()) {
   2605      mMaster->RequestAudioData();
   2606    }
   2607    if (mMaster->IsVideoDecoding() && !mMaster->HaveEnoughDecodedVideo() &&
   2608        !mMaster->IsTrackingVideoData()) {
   2609      mMaster->RequestVideoData(TimeUnit());
   2610    }
   2611 
   2612    mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S));
   2613    mMaster->mOnNextFrameStatus.Notify(
   2614        MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING);
   2615  }
   2616 
   2617  void Step() override;
   2618 
   2619  State GetState() const override { return DECODER_STATE_BUFFERING; }
   2620 
   2621  void HandleAudioDecoded(AudioData* aAudio) override {
   2622    mMaster->PushAudio(aAudio);
   2623    if (!mMaster->HaveEnoughDecodedAudio()) {
   2624      mMaster->RequestAudioData();
   2625    }
   2626    // This might be the sample we need to exit buffering.
   2627    // Schedule Step() to check it.
   2628    mMaster->ScheduleStateMachine();
   2629  }
   2630 
   2631  void HandleVideoDecoded(VideoData* aVideo) override {
   2632    mMaster->PushVideo(aVideo);
   2633    if (!mMaster->HaveEnoughDecodedVideo()) {
   2634      mMaster->RequestVideoData(media::TimeUnit());
   2635    }
   2636    // This might be the sample we need to exit buffering.
   2637    // Schedule Step() to check it.
   2638    mMaster->ScheduleStateMachine();
   2639  }
   2640 
   2641  void HandleAudioCanceled() override { mMaster->RequestAudioData(); }
   2642 
   2643  void HandleVideoCanceled() override {
   2644    mMaster->RequestVideoData(media::TimeUnit());
   2645  }
   2646 
   2647  void HandleWaitingForAudio() override {
   2648    mMaster->WaitForData(MediaData::Type::AUDIO_DATA);
   2649  }
   2650 
   2651  void HandleWaitingForVideo() override {
   2652    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
   2653  }
   2654 
   2655  void HandleAudioWaited(MediaData::Type aType) override {
   2656    mMaster->RequestAudioData();
   2657  }
   2658 
   2659  void HandleVideoWaited(MediaData::Type aType) override {
   2660    mMaster->RequestVideoData(media::TimeUnit());
   2661  }
   2662 
   2663  void HandleEndOfAudio() override;
   2664  void HandleEndOfVideo() override;
   2665 
   2666  void HandleVideoSuspendTimeout() override {
   2667    // No video, so nothing to suspend.
   2668    if (!mMaster->HasVideo()) {
   2669      return;
   2670    }
   2671 
   2672    mMaster->mVideoDecodeSuspended = true;
   2673    mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend);
   2674    Reader()->SetVideoBlankDecode(true);
   2675  }
   2676 
   2677 private:
   2678  TimeStamp mBufferingStart;
   2679 
   2680  // The maximum number of second we spend buffering when we are short on
   2681  // unbuffered data.
   2682  const uint32_t mBufferingWait = 15;
   2683 };
   2684 
   2685 /**
   2686 * Purpose: play all the decoded data and fire the 'ended' event.
   2687 *
   2688 * Transition to:
   2689 *   SEEKING if any seek request.
   2690 *   LOOPING_DECODING if MDSM enable looping.
   2691 */
   2692 class MediaDecoderStateMachine::CompletedState
   2693    : public MediaDecoderStateMachine::StateObject {
   2694 public:
   2695  explicit CompletedState(Master* aPtr) : StateObject(aPtr) {}
   2696 
   2697  void Enter() {
   2698    // On Android, the life cycle of graphic buffer is equal to Android's codec,
   2699    // we couldn't release it if we still need to render the frame.
   2700 #ifndef MOZ_WIDGET_ANDROID
   2701    if (!mMaster->mLooping) {
   2702      // We've decoded all samples.
   2703      // We don't need decoders anymore if not looping.
   2704      Reader()->ReleaseResources();
   2705    }
   2706 #endif
   2707    bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted) &&
   2708                        (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
   2709 
   2710    mMaster->mOnNextFrameStatus.Notify(
   2711        hasNextFrame ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
   2712                     : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
   2713 
   2714    Step();
   2715  }
   2716 
   2717  void Exit() override { mSentPlaybackEndedEvent = false; }
   2718 
   2719  void Step() override {
   2720    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
   2721        mMaster->IsPlaying()) {
   2722      mMaster->StopPlayback();
   2723    }
   2724 
   2725    // Play the remaining media. We want to run AdvanceFrame() at least
   2726    // once to ensure the current playback position is advanced to the
   2727    // end of the media, and so that we update the readyState.
   2728    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted) ||
   2729        (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
   2730      // Start playback if necessary to play the remaining media.
   2731      mMaster->MaybeStartPlayback();
   2732      mMaster->UpdatePlaybackPositionPeriodically();
   2733      MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(),
   2734                 "Must have timer scheduled");
   2735      return;
   2736    }
   2737 
   2738    // StopPlayback in order to reset the IsPlaying() state so audio
   2739    // is restarted correctly.
   2740    mMaster->StopPlayback();
   2741 
   2742    if (!mSentPlaybackEndedEvent) {
   2743      auto clockTime =
   2744          std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime());
   2745      // Correct the time over the end once looping was turned on.
   2746      mMaster->AdjustByLooping(clockTime);
   2747      if (mMaster->mDuration.Ref()->IsInfinite()) {
   2748        // We have a finite duration when playback reaches the end.
   2749        mMaster->mDuration = Some(clockTime);
   2750        DDLOGEX(mMaster, DDLogCategory::Property, "duration_us",
   2751                mMaster->mDuration.Ref()->ToMicroseconds());
   2752      }
   2753      mMaster->UpdatePlaybackPosition(clockTime);
   2754 
   2755      // Ensure readyState is updated before firing the 'ended' event.
   2756      mMaster->mOnNextFrameStatus.Notify(
   2757          MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
   2758 
   2759      mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::PlaybackEnded);
   2760 
   2761      mSentPlaybackEndedEvent = true;
   2762 
   2763      // MediaSink::GetEndTime() must be called before stopping playback.
   2764      mMaster->StopMediaSink();
   2765    }
   2766  }
   2767 
   2768  State GetState() const override { return DECODER_STATE_COMPLETED; }
   2769 
   2770  void HandleLoopingChanged() override {
   2771    if (mMaster->mLooping) {
   2772      SetDecodingState();
   2773    }
   2774  }
   2775 
   2776  void HandleAudioCaptured() override {
   2777    // MediaSink is changed. Schedule Step() to check if we can start playback.
   2778    mMaster->ScheduleStateMachine();
   2779  }
   2780 
   2781  void HandleVideoSuspendTimeout() override {
   2782    // Do nothing since no decoding is going on.
   2783  }
   2784 
   2785  void HandleResumeVideoDecoding(const TimeUnit&) override {
   2786    // Resume the video decoder and seek to the last video frame.
   2787    // This triggers a video-only seek which won't update the playback position.
   2788    auto target = mMaster->mDecodedVideoEndTime;
   2789    mMaster->AdjustByLooping(target);
   2790    StateObject::HandleResumeVideoDecoding(target);
   2791  }
   2792 
   2793  void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override {
   2794    if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) {
   2795      // Schedule Step() to check if we can start playback.
   2796      mMaster->ScheduleStateMachine();
   2797    }
   2798  }
   2799 
   2800 private:
   2801  bool mSentPlaybackEndedEvent = false;
   2802 };
   2803 
   2804 /**
   2805 * Purpose: release all resources allocated by MDSM.
   2806 *
   2807 * Transition to:
   2808 *   None since this is the final state.
   2809 *
   2810 * Transition from:
   2811 *   Any states other than SHUTDOWN.
   2812 */
   2813 class MediaDecoderStateMachine::ShutdownState
   2814    : public MediaDecoderStateMachine::StateObject {
   2815 public:
   2816  explicit ShutdownState(Master* aPtr) : StateObject(aPtr) {}
   2817 
   2818  RefPtr<ShutdownPromise> Enter();
   2819 
   2820  void Exit() override {
   2821    MOZ_DIAGNOSTIC_CRASH("Shouldn't escape the SHUTDOWN state.");
   2822  }
   2823 
   2824  State GetState() const override { return DECODER_STATE_SHUTDOWN; }
   2825 
   2826  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
   2827      const SeekTarget& aTarget) override {
   2828    MOZ_DIAGNOSTIC_CRASH("Can't seek in shutdown state.");
   2829    return MediaDecoder::SeekPromise::CreateAndReject(true, __func__);
   2830  }
   2831 
   2832  RefPtr<ShutdownPromise> HandleShutdown() override {
   2833    MOZ_DIAGNOSTIC_CRASH("Already shutting down.");
   2834    return nullptr;
   2835  }
   2836 
   2837  void HandleVideoSuspendTimeout() override {
   2838    MOZ_DIAGNOSTIC_CRASH("Already shutting down.");
   2839  }
   2840 
   2841  void HandleResumeVideoDecoding(const TimeUnit&) override {
   2842    MOZ_DIAGNOSTIC_CRASH("Already shutting down.");
   2843  }
   2844 };
   2845 
   2846 RefPtr<MediaDecoder::SeekPromise>
   2847 MediaDecoderStateMachine::StateObject::HandleSeek(const SeekTarget& aTarget) {
   2848  SLOG("Changed state to SEEKING (to %" PRId64 ")",
   2849       aTarget.GetTime().ToMicroseconds());
   2850  SeekJob seekJob;
   2851  seekJob.mTarget = Some(aTarget);
   2852  return SetSeekingState(std::move(seekJob), EventVisibility::Observable);
   2853 }
   2854 
   2855 RefPtr<ShutdownPromise>
   2856 MediaDecoderStateMachine::StateObject::HandleShutdown() {
   2857  return SetState<ShutdownState>();
   2858 }
   2859 
   2860 void MediaDecoderStateMachine::StateObject::HandleResumeVideoDecoding(
   2861    const TimeUnit& aTarget) {
   2862  MOZ_ASSERT(mMaster->mVideoDecodeSuspended);
   2863 
   2864  mMaster->mVideoDecodeSuspended = false;
   2865  mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::ExitVideoSuspend);
   2866  Reader()->SetVideoBlankDecode(false);
   2867 
   2868  // Start video-only seek to the current time.
   2869  SeekJob seekJob;
   2870 
   2871  // We use fastseek to optimize the resuming time.
   2872  // FastSeek is only used for video-only media since we don't need to worry
   2873  // about A/V sync.
   2874  // Don't use fastSeek if we want to seek to the end because it might seek to a
   2875  // keyframe before the last frame (if the last frame itself is not a keyframe)
   2876  // and we always want to present the final frame to the user when seeking to
   2877  // the end.
   2878  const auto type = mMaster->HasAudio() || aTarget == mMaster->Duration()
   2879                        ? SeekTarget::Type::Accurate
   2880                        : SeekTarget::Type::PrevSyncPoint;
   2881 
   2882  seekJob.mTarget.emplace(aTarget, type, SeekTarget::Track::VideoOnly);
   2883  SLOG("video-only seek target=%" PRId64 ", current time=%" PRId64,
   2884       aTarget.ToMicroseconds(), mMaster->GetMediaTime().ToMicroseconds());
   2885 
   2886  SetSeekingState(std::move(seekJob), EventVisibility::Suppressed);
   2887 }
   2888 
   2889 RefPtr<MediaDecoder::SeekPromise>
   2890 MediaDecoderStateMachine::StateObject::SetSeekingState(
   2891    SeekJob&& aSeekJob, EventVisibility aVisibility) {
   2892  if (aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast()) {
   2893    if (aSeekJob.mTarget->IsVideoOnly()) {
   2894      return SetState<VideoOnlySeekingState>(std::move(aSeekJob), aVisibility);
   2895    }
   2896    return SetState<AccurateSeekingState>(std::move(aSeekJob), aVisibility);
   2897  }
   2898 
   2899  if (aSeekJob.mTarget->IsNextFrame()) {
   2900    return SetState<NextFrameSeekingState>(std::move(aSeekJob), aVisibility);
   2901  }
   2902 
   2903  MOZ_ASSERT_UNREACHABLE("Unknown SeekTarget::Type.");
   2904  return nullptr;
   2905 }
   2906 
   2907 void MediaDecoderStateMachine::StateObject::SetDecodingState() {
   2908  if (mMaster->IsInSeamlessLooping()) {
   2909    SetState<LoopingDecodingState>();
   2910    return;
   2911  }
   2912  SetState<DecodingState>();
   2913 }
   2914 
   2915 void MediaDecoderStateMachine::DecodeMetadataState::OnMetadataRead(
   2916    MetadataHolder&& aMetadata) {
   2917  mMetadataRequest.Complete();
   2918 
   2919  AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataRead", MEDIA_PLAYBACK);
   2920  mMaster->mInfo.emplace(*aMetadata.mInfo);
   2921  mMaster->mMediaSeekable = Info().mMediaSeekable;
   2922  mMaster->mMediaSeekableOnlyInBufferedRanges =
   2923      Info().mMediaSeekableOnlyInBufferedRanges;
   2924 
   2925  if (Info().mMetadataDuration.isSome()) {
   2926    mMaster->mDuration = Info().mMetadataDuration;
   2927  } else if (Info().mUnadjustedMetadataEndTime.isSome()) {
   2928    const TimeUnit unadjusted = Info().mUnadjustedMetadataEndTime.ref();
   2929    const TimeUnit adjustment = Info().mStartTime;
   2930    SLOG("No metadata duration, calculate one. unadjusted=%" PRId64
   2931         ", adjustment=%" PRId64,
   2932         unadjusted.ToMicroseconds(), adjustment.ToMicroseconds());
   2933    mMaster->mInfo->mMetadataDuration.emplace(unadjusted - adjustment);
   2934    mMaster->mDuration = Info().mMetadataDuration;
   2935  }
   2936 
   2937  // If we don't know the duration by this point, we assume infinity, per spec.
   2938  if (mMaster->mDuration.Ref().isNothing()) {
   2939    mMaster->mDuration = Some(TimeUnit::FromInfinity());
   2940  }
   2941 
   2942  DDLOGEX(mMaster, DDLogCategory::Property, "duration_us",
   2943          mMaster->mDuration.Ref()->ToMicroseconds());
   2944 
   2945  if (mMaster->HasVideo()) {
   2946    SLOG("Video decode HWAccel=%d videoQueueSize=%d",
   2947         Reader()->VideoIsHardwareAccelerated(),
   2948         mMaster->GetAmpleVideoFrames());
   2949  }
   2950 
   2951  MOZ_ASSERT(mMaster->mDuration.Ref().isSome());
   2952  SLOG("OnMetadataRead, duration=%" PRId64,
   2953       mMaster->mDuration.Ref()->ToMicroseconds());
   2954 
   2955  mMaster->mMetadataLoadedEvent.Notify(std::move(aMetadata.mInfo),
   2956                                       std::move(aMetadata.mTags),
   2957                                       MediaDecoderEventVisibility::Observable);
   2958 
   2959  // Check whether the media satisfies the requirement of seamless looping.
   2960  // TODO : after we ensure video seamless looping is stable enough, then we can
   2961  // remove this to make the condition always true.
   2962  mMaster->mSeamlessLoopingAllowed = StaticPrefs::media_seamless_looping();
   2963  if (mMaster->HasVideo()) {
   2964    mMaster->mSeamlessLoopingAllowed =
   2965        StaticPrefs::media_seamless_looping_video();
   2966  }
   2967 
   2968  SetState<DecodingFirstFrameState>();
   2969 }
   2970 
   2971 void MediaDecoderStateMachine::DormantState::HandlePlayStateChanged(
   2972    MediaDecoder::PlayState aPlayState) {
   2973  if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) {
   2974    // Exit dormant when the user wants to play.
   2975    MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent);
   2976    SetSeekingState(std::move(mPendingSeek), EventVisibility::Suppressed);
   2977  }
   2978 }
   2979 
   2980 void MediaDecoderStateMachine::DecodingFirstFrameState::Enter() {
   2981  // Transition to DECODING if we've decoded first frames.
   2982  if (mMaster->mSentFirstFrameLoadedEvent) {
   2983    SetDecodingState();
   2984    return;
   2985  }
   2986 
   2987  MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
   2988 
   2989  // Dispatch tasks to decode first frames.
   2990  if (mMaster->HasAudio()) {
   2991    mMaster->RequestAudioData();
   2992  }
   2993  if (mMaster->HasVideo()) {
   2994    mMaster->RequestVideoData(media::TimeUnit());
   2995  }
   2996 }
   2997 
   2998 void MediaDecoderStateMachine::DecodingFirstFrameState::
   2999    MaybeFinishDecodeFirstFrame() {
   3000  MOZ_ASSERT(!mMaster->mSentFirstFrameLoadedEvent);
   3001 
   3002  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
   3003      (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
   3004    return;
   3005  }
   3006 
   3007  mMaster->FinishDecodeFirstFrame();
   3008  if (mPendingSeek.Exists()) {
   3009    SetSeekingState(std::move(mPendingSeek), EventVisibility::Observable);
   3010  } else {
   3011    SetDecodingState();
   3012  }
   3013 }
   3014 
   3015 void MediaDecoderStateMachine::DecodingState::Enter() {
   3016  MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent);
   3017 
   3018  if (mMaster->mVideoDecodeSuspended &&
   3019      mMaster->mVideoDecodeMode == VideoDecodeMode::Normal) {
   3020    StateObject::HandleResumeVideoDecoding(mMaster->GetMediaTime());
   3021    return;
   3022  }
   3023 
   3024  if (mMaster->mVideoDecodeMode == VideoDecodeMode::Suspend &&
   3025      !mMaster->mVideoDecodeSuspendTimer.IsScheduled() &&
   3026      !mMaster->mVideoDecodeSuspended) {
   3027    // If the VideoDecodeMode is Suspend and the timer is not schedule, it means
   3028    // the timer has timed out and we should suspend video decoding now if
   3029    // necessary.
   3030    HandleVideoSuspendTimeout();
   3031  }
   3032 
   3033  // If we're in the normal decoding mode and the decoding has finished, then we
   3034  // should go to `completed` state because we don't need to decode anything
   3035  // later. However, if we're in the saemless decoding mode, we will restart
   3036  // decoding ASAP so we can still stay in `decoding` state.
   3037  if (!mMaster->IsVideoDecoding() && !mMaster->IsAudioDecoding() &&
   3038      !mMaster->IsInSeamlessLooping()) {
   3039    SetState<CompletedState>();
   3040    return;
   3041  }
   3042 
   3043  mOnAudioPopped =
   3044      AudioQueue().PopFrontEvent().Connect(OwnerThread(), [this]() {
   3045        AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnAudioPopped",
   3046                            MEDIA_PLAYBACK);
   3047        if (mMaster->IsAudioDecoding() && !mMaster->HaveEnoughDecodedAudio()) {
   3048          EnsureAudioDecodeTaskQueued();
   3049        }
   3050      });
   3051  mOnVideoPopped =
   3052      VideoQueue().PopFrontEvent().Connect(OwnerThread(), [this]() {
   3053        AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnVideoPopped",
   3054                            MEDIA_PLAYBACK);
   3055        if (mMaster->IsVideoDecoding() && !mMaster->HaveEnoughDecodedVideo()) {
   3056          EnsureVideoDecodeTaskQueued();
   3057        }
   3058      });
   3059 
   3060  mMaster->mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE);
   3061 
   3062  mDecodeStartTime = TimeStamp::Now();
   3063 
   3064  MaybeStopPrerolling();
   3065 
   3066  // Ensure that we've got tasks enqueued to decode data if we need to.
   3067  DispatchDecodeTasksIfNeeded();
   3068 
   3069  mMaster->ScheduleStateMachine();
   3070 
   3071  // Will enter dormant when playback is paused for a while.
   3072  if (mMaster->mPlayState == MediaDecoder::PLAY_STATE_PAUSED) {
   3073    StartDormantTimer();
   3074  }
   3075 }
   3076 
   3077 void MediaDecoderStateMachine::DecodingState::Step() {
   3078  if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
   3079      mMaster->IsPlaying()) {
   3080    // We're playing, but the element/decoder is in paused state. Stop
   3081    // playing!
   3082    mMaster->StopPlayback();
   3083  }
   3084 
   3085  // Start playback if necessary so that the clock can be properly queried.
   3086  if (!mIsPrerolling) {
   3087    mMaster->MaybeStartPlayback();
   3088  }
   3089 
   3090  mMaster->UpdatePlaybackPositionPeriodically();
   3091  MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(),
   3092             "Must have timer scheduled");
   3093  if (IsBufferingAllowed()) {
   3094    MaybeStartBuffering();
   3095  }
   3096 }
   3097 
   3098 void MediaDecoderStateMachine::DecodingState::HandleEndOfAudio() {
   3099  AudioQueue().Finish();
   3100  if (!mMaster->IsVideoDecoding()) {
   3101    SetState<CompletedState>();
   3102  } else {
   3103    MaybeStopPrerolling();
   3104  }
   3105 }
   3106 
   3107 void MediaDecoderStateMachine::DecodingState::HandleEndOfVideo() {
   3108  VideoQueue().Finish();
   3109  if (!mMaster->IsAudioDecoding()) {
   3110    SetState<CompletedState>();
   3111  } else {
   3112    MaybeStopPrerolling();
   3113  }
   3114 }
   3115 
   3116 void MediaDecoderStateMachine::DecodingState::DispatchDecodeTasksIfNeeded() {
   3117  if (mMaster->IsAudioDecoding() && !mMaster->mMinimizePreroll &&
   3118      !mMaster->HaveEnoughDecodedAudio()) {
   3119    EnsureAudioDecodeTaskQueued();
   3120  }
   3121 
   3122  if (mMaster->IsVideoDecoding() && !mMaster->mMinimizePreroll &&
   3123      !mMaster->HaveEnoughDecodedVideo()) {
   3124    EnsureVideoDecodeTaskQueued();
   3125  }
   3126 }
   3127 
   3128 void MediaDecoderStateMachine::DecodingState::EnsureAudioDecodeTaskQueued() {
   3129  if (!mMaster->IsAudioDecoding() || mMaster->IsTrackingAudioData()) {
   3130    return;
   3131  }
   3132  mMaster->RequestAudioData();
   3133 }
   3134 
   3135 void MediaDecoderStateMachine::DecodingState::EnsureVideoDecodeTaskQueued() {
   3136  if (!mMaster->IsVideoDecoding() || mMaster->IsTrackingVideoData()) {
   3137    return;
   3138  }
   3139  mMaster->RequestVideoData(mMaster->GetMediaTime(),
   3140                            ShouldRequestNextKeyFrame());
   3141 }
   3142 
   3143 void MediaDecoderStateMachine::DecodingState::MaybeStartBuffering() {
   3144  // Buffering makes senses only after decoding first frames.
   3145  MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent);
   3146 
   3147  // Don't enter buffering when MediaDecoder is not playing.
   3148  if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING) {
   3149    return;
   3150  }
   3151 
   3152  // Don't enter buffering while prerolling so that the decoder has a chance to
   3153  // enqueue some decoded data before we give up and start buffering.
   3154  if (!mMaster->IsPlaying()) {
   3155    return;
   3156  }
   3157 
   3158  // Note we could have a wait promise pending when playing non-MSE EME.
   3159  if (mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData()) {
   3160    PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK, {},
   3161                         "OutOfDecodedAudio");
   3162    SLOG("Enter buffering due to out of decoded audio");
   3163    SetState<BufferingState>();
   3164    return;
   3165  }
   3166  if (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData()) {
   3167    PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK, {},
   3168                         "OutOfDecodedVideo");
   3169    SLOG("Enter buffering due to out of decoded video");
   3170    SetState<BufferingState>();
   3171    return;
   3172  }
   3173 
   3174  if (Reader()->UseBufferingHeuristics() && mMaster->HasLowDecodedData() &&
   3175      mMaster->HasLowBufferedData() && !mMaster->mCanPlayThrough) {
   3176    PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK, {},
   3177                         "BufferingHeuristics");
   3178    SLOG("Enter buffering due to buffering heruistics");
   3179    SetState<BufferingState>();
   3180  }
   3181 }
   3182 
   3183 void MediaDecoderStateMachine::LoopingDecodingState::HandleError(
   3184    const MediaResult& aError, bool aIsAudio) {
   3185  SLOG("%s looping failed, aError=%s", aIsAudio ? "audio" : "video",
   3186       aError.ErrorName().get());
   3187  switch (aError.Code()) {
   3188    case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
   3189      if (aIsAudio) {
   3190        HandleWaitingForAudio();
   3191      } else {
   3192        HandleWaitingForVideo();
   3193      }
   3194      [[fallthrough]];
   3195    case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
   3196      // This could happen after either the resource has been close, or the data
   3197      // hasn't been appended in MSE, so that we won't be able to get any
   3198      // sample and need to fallback to normal looping.
   3199      if (mIsReachingAudioEOS && mIsReachingVideoEOS) {
   3200        SetState<CompletedState>();
   3201      }
   3202      break;
   3203    default:
   3204      mMaster->DecodeError(aError);
   3205      break;
   3206  }
   3207 }
   3208 
   3209 void MediaDecoderStateMachine::SeekingState::SeekCompleted() {
   3210  const auto newCurrentTime = CalculateNewCurrentTime();
   3211 
   3212  if ((newCurrentTime == mMaster->Duration() ||
   3213       newCurrentTime.EqualsAtLowestResolution(
   3214           mMaster->Duration().ToBase(USECS_PER_S))) &&
   3215      !mMaster->IsLiveStream()) {
   3216    SLOG("Seek completed, seeked to end: %s", newCurrentTime.ToString().get());
   3217    // will transition to COMPLETED immediately. Note we don't do
   3218    // this when playing a live stream, since the end of media will advance
   3219    // once we download more data!
   3220    AudioQueue().Finish();
   3221    VideoQueue().Finish();
   3222 
   3223    // We won't start MediaSink when paused. m{Audio,Video}Completed will
   3224    // remain false and 'playbackEnded' won't be notified. Therefore we
   3225    // need to set these flags explicitly when seeking to the end.
   3226    mMaster->mAudioCompleted = true;
   3227    mMaster->mVideoCompleted = true;
   3228 
   3229    // There might still be a pending audio request when doing video-only or
   3230    // next-frame seek. Discard it so we won't break the invariants of the
   3231    // COMPLETED state by adding audio samples to a finished queue.
   3232    mMaster->mAudioDataRequest.DisconnectIfExists();
   3233  }
   3234 
   3235  // We want to resolve the seek request prior finishing the first frame
   3236  // to ensure that the seeked event is fired prior loadeded.
   3237  // Note: SeekJob.Resolve() resets SeekJob.mTarget. Don't use mSeekJob anymore
   3238  //       hereafter.
   3239  mSeekJob.Resolve(__func__);
   3240 
   3241  // Notify FirstFrameLoaded now if we haven't since we've decoded some data
   3242  // for readyState to transition to HAVE_CURRENT_DATA and fire 'loadeddata'.
   3243  if (!mMaster->mSentFirstFrameLoadedEvent) {
   3244    mMaster->FinishDecodeFirstFrame();
   3245  }
   3246 
   3247  // Ensure timestamps are up to date.
   3248  // Suppressed visibility comes from two cases: (1) leaving dormant state,
   3249  // and (2) resuming suspended video decoder. We want both cases to be
   3250  // transparent to the user. So we only notify the change when the seek
   3251  // request is from the user.
   3252  if (mVisibility == EventVisibility::Observable) {
   3253    // Don't update playback position for video-only seek.
   3254    // Otherwise we might have |newCurrentTime > mMediaSink->GetPosition()|
   3255    // and fail the assertion in GetClock() since we didn't stop MediaSink.
   3256    mMaster->UpdatePlaybackPositionInternal(newCurrentTime);
   3257  }
   3258 
   3259  // Try to decode another frame to detect if we're at the end...
   3260  SLOG("Seek completed, mCurrentPosition=%" PRId64,
   3261       mMaster->mCurrentPosition.Ref().ToMicroseconds());
   3262 
   3263  if (mMaster->VideoQueue().PeekFront()) {
   3264    mMaster->mMediaSink->Redraw(Info().mVideo);
   3265    mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::Invalidate);
   3266  }
   3267 
   3268  GoToNextState();
   3269 }
   3270 
   3271 void MediaDecoderStateMachine::BufferingState::Step() {
   3272  TimeStamp now = TimeStamp::Now();
   3273  MOZ_ASSERT(!mBufferingStart.IsNull(), "Must know buffering start time.");
   3274 
   3275  if (Reader()->UseBufferingHeuristics()) {
   3276    if (mMaster->IsWaitingAudioData() || mMaster->IsWaitingVideoData()) {
   3277      // Can't exit buffering when we are still waiting for data.
   3278      // Note we don't schedule next loop for we will do that when the wait
   3279      // promise is resolved.
   3280      return;
   3281    }
   3282    // With buffering heuristics, we exit buffering state when we:
   3283    // 1. can play through or
   3284    // 2. time out (specified by mBufferingWait) or
   3285    // 3. have enough buffered data.
   3286    TimeDuration elapsed = now - mBufferingStart;
   3287    TimeDuration timeout =
   3288        TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate);
   3289    bool stopBuffering =
   3290        mMaster->mCanPlayThrough || elapsed >= timeout ||
   3291        !mMaster->HasLowBufferedData(TimeUnit::FromSeconds(mBufferingWait));
   3292    if (!stopBuffering) {
   3293      SLOG("Buffering: wait %ds, timeout in %.3lfs", mBufferingWait,
   3294           mBufferingWait - elapsed.ToSeconds());
   3295      mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S));
   3296      return;
   3297    }
   3298  } else if (mMaster->OutOfDecodedAudio() || mMaster->OutOfDecodedVideo()) {
   3299    MOZ_ASSERT(!mMaster->OutOfDecodedAudio() ||
   3300               mMaster->IsTrackingAudioData() ||
   3301               mMaster->HasNotifiedPlaybackError());
   3302    MOZ_ASSERT(!mMaster->OutOfDecodedVideo() ||
   3303               mMaster->IsTrackingVideoData() ||
   3304               mMaster->HasNotifiedPlaybackError());
   3305    SLOG(
   3306        "In buffering mode, waiting to be notified: outOfAudio: %d, "
   3307        "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s",
   3308        mMaster->OutOfDecodedAudio(), mMaster->AudioRequestStatus(),
   3309        mMaster->OutOfDecodedVideo(), mMaster->VideoRequestStatus());
   3310    return;
   3311  }
   3312 
   3313  SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
   3314  mMaster->mTotalBufferingDuration += (now - mBufferingStart);
   3315  SetDecodingState();
   3316 }
   3317 
   3318 void MediaDecoderStateMachine::BufferingState::HandleEndOfAudio() {
   3319  AudioQueue().Finish();
   3320  if (!mMaster->IsVideoDecoding()) {
   3321    SetState<CompletedState>();
   3322  } else {
   3323    // Check if we can exit buffering.
   3324    mMaster->ScheduleStateMachine();
   3325  }
   3326 }
   3327 
   3328 void MediaDecoderStateMachine::BufferingState::HandleEndOfVideo() {
   3329  VideoQueue().Finish();
   3330  if (!mMaster->IsAudioDecoding()) {
   3331    SetState<CompletedState>();
   3332  } else {
   3333    // Check if we can exit buffering.
   3334    mMaster->ScheduleStateMachine();
   3335  }
   3336 }
   3337 
   3338 RefPtr<ShutdownPromise> MediaDecoderStateMachine::ShutdownState::Enter() {
   3339  auto* master = mMaster;
   3340 
   3341  master->mDelayedScheduler.Reset();
   3342 
   3343  // Shutdown happens while decode timer is active, we need to disconnect and
   3344  // dispose of the timer.
   3345  master->CancelSuspendTimer();
   3346 
   3347  if (master->IsPlaying()) {
   3348    master->StopPlayback();
   3349  }
   3350 
   3351  master->mAudioDataRequest.DisconnectIfExists();
   3352  master->mVideoDataRequest.DisconnectIfExists();
   3353  master->mAudioWaitRequest.DisconnectIfExists();
   3354  master->mVideoWaitRequest.DisconnectIfExists();
   3355 
   3356  // Resetting decode should be called after stopping media sink, which can
   3357  // ensure that we have an empty media queue before seeking the demuxer.
   3358  master->StopMediaSink();
   3359  master->ResetDecode();
   3360  master->mMediaSink->Shutdown();
   3361 
   3362  // Prevent dangling pointers by disconnecting the listeners.
   3363  master->mAudioQueueListener.Disconnect();
   3364  master->mVideoQueueListener.Disconnect();
   3365  master->mMetadataManager.Disconnect();
   3366  master->mOnMediaNotSeekable.Disconnect();
   3367  master->mAudibleListener.DisconnectIfExists();
   3368 
   3369  // Disconnect canonicals and mirrors before shutting down our task queue.
   3370  master->mStreamName.DisconnectIfConnected();
   3371  master->mSinkDevice.DisconnectIfConnected();
   3372  master->mOutputCaptureState.DisconnectIfConnected();
   3373  master->mOutputDummyTrack.DisconnectIfConnected();
   3374  master->mOutputTracks.DisconnectIfConnected();
   3375  master->mOutputPrincipal.DisconnectIfConnected();
   3376 
   3377  master->mDuration.DisconnectAll();
   3378  master->mCurrentPosition.DisconnectAll();
   3379  master->mIsAudioDataAudible.DisconnectAll();
   3380 
   3381  // Shut down the watch manager to stop further notifications.
   3382  master->mWatchManager.Shutdown();
   3383 
   3384  return Reader()->Shutdown()->Then(OwnerThread(), __func__, master,
   3385                                    &MediaDecoderStateMachine::FinishShutdown,
   3386                                    &MediaDecoderStateMachine::FinishShutdown);
   3387 }
   3388 
   3389 #define INIT_WATCHABLE(name, val) name(val, "MediaDecoderStateMachine::" #name)
   3390 #define INIT_MIRROR(name, val) \
   3391  name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Mirror)")
   3392 #define INIT_CANONICAL(name, val) \
   3393  name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Canonical)")
   3394 
   3395 MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
   3396                                                   MediaFormatReader* aReader)
   3397    : MediaDecoderStateMachineBase(aDecoder, aReader),
   3398      mWatchManager(this, mTaskQueue),
   3399      mDispatchedStateMachine(false),
   3400      mDelayedScheduler(mTaskQueue, true /*aFuzzy*/),
   3401      mCurrentFrameID(0),
   3402      mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
   3403      mVideoDecodeSuspended(false),
   3404      mVideoDecodeSuspendTimer(mTaskQueue),
   3405      mVideoDecodeMode(VideoDecodeMode::Normal),
   3406      mIsMSE(aDecoder->IsMSE()),
   3407      mShouldResistFingerprinting(aDecoder->ShouldResistFingerprinting()),
   3408      mSeamlessLoopingAllowed(false),
   3409      mTotalBufferingDuration(TimeDuration::Zero()),
   3410      INIT_MIRROR(mStreamName, nsAutoString()),
   3411      INIT_MIRROR(mSinkDevice, nullptr),
   3412      INIT_MIRROR(mOutputCaptureState, MediaDecoder::OutputCaptureState::None),
   3413      INIT_MIRROR(mOutputDummyTrack, nullptr),
   3414      INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
   3415      INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
   3416      INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
   3417      mShuttingDown(false),
   3418      mInitialized(false) {
   3419  MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   3420  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   3421 
   3422  DDLINKCHILD("reader", aReader);
   3423 }
   3424 
   3425 #undef INIT_WATCHABLE
   3426 #undef INIT_MIRROR
   3427 #undef INIT_CANONICAL
   3428 
   3429 MediaDecoderStateMachine::~MediaDecoderStateMachine() {
   3430  MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread.");
   3431  MOZ_COUNT_DTOR(MediaDecoderStateMachine);
   3432 }
   3433 
   3434 void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
   3435  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::InitializationTask",
   3436                      MEDIA_PLAYBACK);
   3437  MOZ_ASSERT(OnTaskQueue());
   3438 
   3439  MediaDecoderStateMachineBase::InitializationTask(aDecoder);
   3440 
   3441  // Initialize watchers.
   3442  mWatchManager.Watch(mStreamName,
   3443                      &MediaDecoderStateMachine::StreamNameChanged);
   3444  mWatchManager.Watch(mOutputCaptureState,
   3445                      &MediaDecoderStateMachine::UpdateOutputCaptured);
   3446  mWatchManager.Watch(mOutputDummyTrack,
   3447                      &MediaDecoderStateMachine::UpdateOutputCaptured);
   3448  mWatchManager.Watch(mOutputTracks,
   3449                      &MediaDecoderStateMachine::UpdateOutputCaptured);
   3450  mWatchManager.Watch(mOutputPrincipal,
   3451                      &MediaDecoderStateMachine::OutputPrincipalChanged);
   3452 
   3453  mMediaSink = CreateMediaSink();
   3454  mInitialized = true;
   3455 
   3456  MOZ_ASSERT(!mStateObj);
   3457  auto* s = new DecodeMetadataState(this);
   3458  mStateObj.reset(s);
   3459  s->Enter();
   3460 }
   3461 
   3462 void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) {
   3463  mIsAudioDataAudible = aAudible;
   3464 }
   3465 
   3466 MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
   3467  if (mOutputCaptureState != MediaDecoder::OutputCaptureState::None) {
   3468    DecodedStream* stream = new DecodedStream(
   3469        OwnerThread(),
   3470        mOutputCaptureState == MediaDecoder::OutputCaptureState::Capture
   3471            ? mOutputDummyTrack.Ref()
   3472            : nullptr,
   3473        mOutputTracks, CanonicalOutputPrincipal(), mVolume, mPlaybackRate,
   3474        mPreservesPitch, mAudioQueue, mVideoQueue);
   3475    mAudibleListener.DisconnectIfExists();
   3476    mAudibleListener = stream->AudibleEvent().Connect(
   3477        OwnerThread(), this, &MediaDecoderStateMachine::AudioAudibleChanged);
   3478    return stream;
   3479  }
   3480 
   3481  auto audioSinkCreator = [s = RefPtr<MediaDecoderStateMachine>(this), this]() {
   3482    MOZ_ASSERT(OnTaskQueue());
   3483    UniquePtr<AudioSink> audioSink{new AudioSink(
   3484        mTaskQueue, mAudioQueue, Info().mAudio, mShouldResistFingerprinting)};
   3485    mAudibleListener.DisconnectIfExists();
   3486    mAudibleListener = audioSink->AudibleEvent().Connect(
   3487        mTaskQueue, this, &MediaDecoderStateMachine::AudioAudibleChanged);
   3488    return audioSink;
   3489  };
   3490  return new AudioSinkWrapper(
   3491      mTaskQueue, mAudioQueue, std::move(audioSinkCreator), mVolume,
   3492      mPlaybackRate, mPreservesPitch, mSinkDevice.Ref());
   3493 }
   3494 
   3495 already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() {
   3496  MOZ_ASSERT(OnTaskQueue());
   3497  RefPtr<MediaSink> audioSink = CreateAudioSink();
   3498  RefPtr<MediaSink> mediaSink = new VideoSink(
   3499      mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer, *mFrameStats,
   3500      StaticPrefs::media_video_queue_send_to_compositor_size());
   3501  if (mSecondaryVideoContainer.Ref()) {
   3502    mediaSink->SetSecondaryVideoContainer(mSecondaryVideoContainer.Ref());
   3503  }
   3504  return mediaSink.forget();
   3505 }
   3506 
   3507 TimeUnit MediaDecoderStateMachine::GetDecodedAudioDuration() const {
   3508  MOZ_ASSERT(OnTaskQueue());
   3509  if (mMediaSink->IsStarted()) {
   3510    return mMediaSink->UnplayedDuration(TrackInfo::kAudioTrack) +
   3511           TimeUnit::FromMicroseconds(AudioQueue().Duration());
   3512  }
   3513  // MediaSink not started. All audio samples are in the queue.
   3514  return TimeUnit::FromMicroseconds(AudioQueue().Duration());
   3515 }
   3516 
   3517 bool MediaDecoderStateMachine::HaveEnoughDecodedAudio() const {
   3518  MOZ_ASSERT(OnTaskQueue());
   3519  auto ampleAudio = mAmpleAudioThreshold.MultDouble(mPlaybackRate);
   3520  return AudioQueue().GetSize() > 0 && GetDecodedAudioDuration() >= ampleAudio;
   3521 }
   3522 
   3523 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() const {
   3524  MOZ_ASSERT(OnTaskQueue());
   3525  return static_cast<double>(VideoQueue().GetSize()) >=
   3526             GetAmpleVideoFrames() * mPlaybackRate + 1 &&
   3527         IsVideoDataEnoughComparedWithAudio();
   3528 }
   3529 
   3530 bool MediaDecoderStateMachine::IsVideoDataEnoughComparedWithAudio() const {
   3531  // HW decoding is usually fast enough and we don't need to worry about its
   3532  // speed.
   3533  // TODO : we can consider whether we need to enable this on other HW decoding
   3534  // except VAAPI. When enabling VAAPI on Linux, ffmpeg is not able to store too
   3535  // many frames because it has a limitation of amount of stored video frames.
   3536  // See bug1716638 and 1718309.
   3537  if (mReader->VideoIsHardwareAccelerated()) {
   3538    return true;
   3539  }
   3540  // In extreme situations (e.g. 4k+ video without hardware acceleration), the
   3541  // video decoding will be much slower than audio. So for 4K+ video, we want to
   3542  // consider audio decoding speed as well in order to reduce frame drops. This
   3543  // check tries to keep the decoded video buffered as much as audio.
   3544  if (HasAudio() && Info().mVideo.mImage.width >= 3840 &&
   3545      Info().mVideo.mImage.height >= 2160) {
   3546    return VideoQueue().Duration() >= AudioQueue().Duration();
   3547  }
   3548  // For non-4k video, the video decoding is usually really fast so we won't
   3549  // need to consider audio decoding speed to store extra frames.
   3550  return true;
   3551 }
   3552 
   3553 void MediaDecoderStateMachine::PushAudio(AudioData* aSample) {
   3554  MOZ_ASSERT(OnTaskQueue());
   3555  MOZ_ASSERT(aSample);
   3556  AudioQueue().Push(aSample);
   3557  PROFILER_MARKER("MDSM::PushAudio", MEDIA_PLAYBACK, {}, MediaSampleMarker,
   3558                  aSample->mTime.ToMicroseconds(),
   3559                  aSample->GetEndTime().ToMicroseconds(),
   3560                  AudioQueue().GetSize());
   3561 }
   3562 
   3563 void MediaDecoderStateMachine::PushVideo(VideoData* aSample) {
   3564  MOZ_ASSERT(OnTaskQueue());
   3565  MOZ_ASSERT(aSample);
   3566  aSample->mFrameID = ++mCurrentFrameID;
   3567  VideoQueue().Push(aSample);
   3568  PROFILER_MARKER("MDSM::PushVideo", MEDIA_PLAYBACK, {}, MediaSampleMarker,
   3569                  aSample->mTime.ToMicroseconds(),
   3570                  aSample->GetEndTime().ToMicroseconds(),
   3571                  VideoQueue().GetSize());
   3572 }
   3573 
   3574 void MediaDecoderStateMachine::OnAudioPopped(const RefPtr<AudioData>& aSample) {
   3575  MOZ_ASSERT(OnTaskQueue());
   3576  mPlaybackOffset = std::max(mPlaybackOffset, aSample->mOffset);
   3577 }
   3578 
   3579 void MediaDecoderStateMachine::OnVideoPopped(const RefPtr<VideoData>& aSample) {
   3580  MOZ_ASSERT(OnTaskQueue());
   3581  mPlaybackOffset = std::max(mPlaybackOffset, aSample->mOffset);
   3582 }
   3583 
   3584 bool MediaDecoderStateMachine::IsAudioDecoding() {
   3585  MOZ_ASSERT(OnTaskQueue());
   3586  return HasAudio() && !AudioQueue().IsFinished();
   3587 }
   3588 
   3589 bool MediaDecoderStateMachine::IsVideoDecoding() {
   3590  MOZ_ASSERT(OnTaskQueue());
   3591  return HasVideo() && !VideoQueue().IsFinished();
   3592 }
   3593 
   3594 bool MediaDecoderStateMachine::IsPlaying() const {
   3595  MOZ_ASSERT(OnTaskQueue());
   3596  return mMediaSink->IsPlaying();
   3597 }
   3598 
   3599 void MediaDecoderStateMachine::SetMediaNotSeekable() { mMediaSeekable = false; }
   3600 
   3601 nsresult MediaDecoderStateMachine::Init(MediaDecoder* aDecoder) {
   3602  MOZ_ASSERT(NS_IsMainThread());
   3603 
   3604  nsresult rv = MediaDecoderStateMachineBase::Init(aDecoder);
   3605  if (NS_WARN_IF(NS_FAILED(rv))) {
   3606    return rv;
   3607  }
   3608 
   3609  // Connect mirrors.
   3610  aDecoder->CanonicalStreamName().ConnectMirror(&mStreamName);
   3611  aDecoder->CanonicalSinkDevice().ConnectMirror(&mSinkDevice);
   3612  aDecoder->CanonicalOutputCaptureState().ConnectMirror(&mOutputCaptureState);
   3613  aDecoder->CanonicalOutputDummyTrack().ConnectMirror(&mOutputDummyTrack);
   3614  aDecoder->CanonicalOutputTracks().ConnectMirror(&mOutputTracks);
   3615  aDecoder->CanonicalOutputPrincipal().ConnectMirror(&mOutputPrincipal);
   3616 
   3617  mAudioQueueListener = AudioQueue().PopFrontEvent().Connect(
   3618      mTaskQueue, this, &MediaDecoderStateMachine::OnAudioPopped);
   3619  mVideoQueueListener = VideoQueue().PopFrontEvent().Connect(
   3620      mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped);
   3621  mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
   3622      OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
   3623 
   3624  return NS_OK;
   3625 }
   3626 
   3627 void MediaDecoderStateMachine::StopPlayback() {
   3628  MOZ_ASSERT(OnTaskQueue());
   3629  LOG("StopPlayback()");
   3630 
   3631  if (IsPlaying()) {
   3632    mOnPlaybackEvent.Notify(MediaPlaybackEvent{
   3633        MediaPlaybackEvent::PlaybackStopped, mPlaybackOffset});
   3634    mMediaSink->SetPlaying(false);
   3635    MOZ_ASSERT(!IsPlaying());
   3636  }
   3637 }
   3638 
   3639 void MediaDecoderStateMachine::MaybeStartPlayback() {
   3640  MOZ_ASSERT(OnTaskQueue());
   3641  // Should try to start playback only after decoding first frames.
   3642  if (!mSentFirstFrameLoadedEvent) {
   3643    LOG("MaybeStartPlayback: Not starting playback before loading first frame");
   3644    return;
   3645  }
   3646 
   3647  if (IsPlaying()) {
   3648    // Logging this case is really spammy - don't do it.
   3649    return;
   3650  }
   3651 
   3652  if (mIsMediaSinkSuspended) {
   3653    LOG("MaybeStartPlayback: Not starting playback when sink is suspended");
   3654    return;
   3655  }
   3656 
   3657  if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) {
   3658    LOG("MaybeStartPlayback: Not starting playback [mPlayState=%d]",
   3659        mPlayState.Ref());
   3660    return;
   3661  }
   3662 
   3663  LOG("MaybeStartPlayback() starting playback");
   3664  StartMediaSink();
   3665 
   3666  if (!IsPlaying()) {
   3667    mMediaSink->SetPlaying(true);
   3668    MOZ_ASSERT(IsPlaying());
   3669  }
   3670 
   3671  mOnPlaybackEvent.Notify(
   3672      MediaPlaybackEvent{MediaPlaybackEvent::PlaybackStarted, mPlaybackOffset});
   3673 }
   3674 
   3675 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(
   3676    const TimeUnit& aTime) {
   3677  MOZ_ASSERT(OnTaskQueue());
   3678  LOGV("UpdatePlaybackPositionInternal(%" PRId64 ")", aTime.ToMicroseconds());
   3679 
   3680  // Ensure the position has a precision that matches other TimeUnit such as
   3681  // buffering ranges and duration.
   3682  mCurrentPosition = aTime.ToBase(1000000);
   3683  NS_ASSERTION(mCurrentPosition.Ref() >= TimeUnit::Zero(),
   3684               "CurrentTime should be positive!");
   3685  if (mDuration.Ref().ref() < mCurrentPosition.Ref()) {
   3686    mDuration = Some(mCurrentPosition.Ref());
   3687    DDLOG(DDLogCategory::Property, "duration_us",
   3688          mDuration.Ref()->ToMicroseconds());
   3689  }
   3690 }
   3691 
   3692 void MediaDecoderStateMachine::UpdatePlaybackPosition(const TimeUnit& aTime) {
   3693  MOZ_ASSERT(OnTaskQueue());
   3694  UpdatePlaybackPositionInternal(aTime);
   3695 
   3696  bool fragmentEnded =
   3697      mFragmentEndTime.IsValid() && GetMediaTime() >= mFragmentEndTime;
   3698  mMetadataManager.DispatchMetadataIfNeeded(aTime);
   3699 
   3700  if (fragmentEnded) {
   3701    StopPlayback();
   3702  }
   3703 }
   3704 
   3705 /* static */ const char* MediaDecoderStateMachine::ToStateStr(State aState) {
   3706  switch (aState) {
   3707    case DECODER_STATE_DECODING_METADATA:
   3708      return "DECODING_METADATA";
   3709    case DECODER_STATE_DORMANT:
   3710      return "DORMANT";
   3711    case DECODER_STATE_DECODING_FIRSTFRAME:
   3712      return "DECODING_FIRSTFRAME";
   3713    case DECODER_STATE_DECODING:
   3714      return "DECODING";
   3715    case DECODER_STATE_SEEKING_ACCURATE:
   3716      return "SEEKING_ACCURATE";
   3717    case DECODER_STATE_SEEKING_FROMDORMANT:
   3718      return "SEEKING_FROMDORMANT";
   3719    case DECODER_STATE_SEEKING_NEXTFRAMESEEKING:
   3720      return "DECODER_STATE_SEEKING_NEXTFRAMESEEKING";
   3721    case DECODER_STATE_SEEKING_VIDEOONLY:
   3722      return "SEEKING_VIDEOONLY";
   3723    case DECODER_STATE_BUFFERING:
   3724      return "BUFFERING";
   3725    case DECODER_STATE_COMPLETED:
   3726      return "COMPLETED";
   3727    case DECODER_STATE_SHUTDOWN:
   3728      return "SHUTDOWN";
   3729    case DECODER_STATE_LOOPING_DECODING:
   3730      return "LOOPING_DECODING";
   3731    default:
   3732      MOZ_ASSERT_UNREACHABLE("Invalid state.");
   3733  }
   3734  return "UNKNOWN";
   3735 }
   3736 
   3737 const char* MediaDecoderStateMachine::ToStateStr() {
   3738  MOZ_ASSERT(OnTaskQueue());
   3739  return ToStateStr(mStateObj->GetState());
   3740 }
   3741 
   3742 void MediaDecoderStateMachine::VolumeChanged() {
   3743  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::VolumeChanged",
   3744                      MEDIA_PLAYBACK);
   3745  PROFILER_MARKER_TEXT("MDSM::VolumeChanged", MEDIA_PLAYBACK, {},
   3746                       nsPrintfCString("%f", mVolume.Ref()));
   3747  MOZ_ASSERT(OnTaskQueue());
   3748  mMediaSink->SetVolume(mVolume);
   3749 }
   3750 
   3751 RefPtr<ShutdownPromise> MediaDecoderStateMachine::Shutdown() {
   3752  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Shutdown", MEDIA_PLAYBACK);
   3753  PROFILER_MARKER_UNTYPED("MDSM::Shutdown", MEDIA_PLAYBACK);
   3754  MOZ_ASSERT(OnTaskQueue());
   3755  mShuttingDown = true;
   3756  return mStateObj->HandleShutdown();
   3757 }
   3758 
   3759 void MediaDecoderStateMachine::PlayStateChanged() {
   3760  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::PlayStateChanged",
   3761                      MEDIA_PLAYBACK);
   3762  PROFILER_MARKER_TEXT(
   3763      "MDSM::PlayStateChanged", MEDIA_PLAYBACK, {},
   3764      nsPrintfCString("%s", MediaDecoder::EnumValueToString(mPlayState.Ref())));
   3765  MOZ_ASSERT(OnTaskQueue());
   3766 
   3767  if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) {
   3768    CancelSuspendTimer();
   3769  } else if (mMinimizePreroll) {
   3770    // Once we start playing, we don't want to minimize our prerolling, as we
   3771    // assume the user is likely to want to keep playing in future. This needs
   3772    // to happen before we invoke StartDecoding().
   3773    mMinimizePreroll = false;
   3774  }
   3775 
   3776  mStateObj->HandlePlayStateChanged(mPlayState);
   3777 }
   3778 
   3779 void MediaDecoderStateMachine::SetVideoDecodeMode(VideoDecodeMode aMode) {
   3780  MOZ_ASSERT(NS_IsMainThread());
   3781  nsCOMPtr<nsIRunnable> r = NewRunnableMethod<VideoDecodeMode>(
   3782      "MediaDecoderStateMachine::SetVideoDecodeModeInternal", this,
   3783      &MediaDecoderStateMachine::SetVideoDecodeModeInternal, aMode);
   3784  OwnerThread()->DispatchStateChange(r.forget());
   3785 }
   3786 
   3787 void MediaDecoderStateMachine::SetVideoDecodeModeInternal(
   3788    VideoDecodeMode aMode) {
   3789  MOZ_ASSERT(OnTaskQueue());
   3790 
   3791  LOG("SetVideoDecodeModeInternal(), VideoDecodeMode=(%s->%s), "
   3792      "mVideoDecodeSuspended=%c",
   3793      mVideoDecodeMode == VideoDecodeMode::Normal ? "Normal" : "Suspend",
   3794      aMode == VideoDecodeMode::Normal ? "Normal" : "Suspend",
   3795      mVideoDecodeSuspended ? 'T' : 'F');
   3796 
   3797  // Should not suspend decoding if we don't turn on the pref.
   3798  if (!StaticPrefs::media_suspend_background_video_enabled() &&
   3799      aMode == VideoDecodeMode::Suspend) {
   3800    LOG("SetVideoDecodeModeInternal(), early return because preference off and "
   3801        "set to Suspend");
   3802    return;
   3803  }
   3804 
   3805  if (aMode == mVideoDecodeMode) {
   3806    LOG("SetVideoDecodeModeInternal(), early return because the mode does not "
   3807        "change");
   3808    return;
   3809  }
   3810 
   3811  // Set new video decode mode.
   3812  mVideoDecodeMode = aMode;
   3813 
   3814  // Start timer to trigger suspended video decoding.
   3815  if (mVideoDecodeMode == VideoDecodeMode::Suspend) {
   3816    TimeStamp target = TimeStamp::Now() + SuspendBackgroundVideoDelay();
   3817 
   3818    RefPtr<MediaDecoderStateMachine> self = this;
   3819    mVideoDecodeSuspendTimer.Ensure(
   3820        target, [=]() { self->OnSuspendTimerResolved(); },
   3821        []() { MOZ_DIAGNOSTIC_CRASH("SetVideoDecodeModeInternal reject"); });
   3822    mOnPlaybackEvent.Notify(MediaPlaybackEvent::StartVideoSuspendTimer);
   3823    return;
   3824  }
   3825 
   3826  // Resuming from suspended decoding
   3827 
   3828  // If suspend timer exists, destroy it.
   3829  CancelSuspendTimer();
   3830 
   3831  if (mVideoDecodeSuspended) {
   3832    auto target = mMediaSink->IsStarted() ? GetClock() : GetMediaTime();
   3833    AdjustByLooping(target);
   3834    mStateObj->HandleResumeVideoDecoding(target + detail::RESUME_VIDEO_PREMIUM);
   3835  }
   3836 }
   3837 
   3838 void MediaDecoderStateMachine::BufferedRangeUpdated() {
   3839  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::BufferedRangeUpdated",
   3840                      MEDIA_PLAYBACK);
   3841  MOZ_ASSERT(OnTaskQueue());
   3842 
   3843  // While playing an unseekable stream of unknown duration, mDuration
   3844  // is updated as we play. But if data is being downloaded
   3845  // faster than played, mDuration won't reflect the end of playable data
   3846  // since we haven't played the frame at the end of buffered data. So update
   3847  // mDuration here as new data is downloaded to prevent such a lag.
   3848  if (mBuffered.Ref().IsInvalid()) {
   3849    return;
   3850  }
   3851 
   3852  bool exists;
   3853  media::TimeUnit end{mBuffered.Ref().GetEnd(&exists)};
   3854  if (!exists) {
   3855    return;
   3856  }
   3857 
   3858  // Use estimated duration from buffer ranges when mDuration is unknown or
   3859  // the estimated duration is larger.
   3860  if ((mDuration.Ref().isNothing() || mDuration.Ref()->IsInfinite() ||
   3861       end > mDuration.Ref().ref()) &&
   3862      end.IsPositiveOrZero()) {
   3863    nsPrintfCString msg{
   3864        "duration:%" PRId64 "->%" PRId64,
   3865        mDuration.Ref().isNothing() ? 0 : mDuration.Ref()->ToMicroseconds(),
   3866        end.ToMicroseconds()};
   3867    PROFILER_MARKER_TEXT("MDSM::BufferedRangeUpdated", MEDIA_PLAYBACK, {}, msg);
   3868    LOG("%s", msg.get());
   3869    mDuration = Some(end);
   3870    DDLOG(DDLogCategory::Property, "duration_us",
   3871          mDuration.Ref()->ToMicroseconds());
   3872  }
   3873 }
   3874 
   3875 RefPtr<MediaDecoder::SeekPromise> MediaDecoderStateMachine::Seek(
   3876    const SeekTarget& aTarget) {
   3877  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Seek", MEDIA_PLAYBACK);
   3878  PROFILER_MARKER_UNTYPED("MDSM::Seek", MEDIA_PLAYBACK);
   3879  MOZ_ASSERT(OnTaskQueue());
   3880 
   3881  // We need to be able to seek in some way
   3882  if (!mMediaSeekable && !mMediaSeekableOnlyInBufferedRanges) {
   3883    LOGW("Seek() should not be called on a non-seekable media");
   3884    return MediaDecoder::SeekPromise::CreateAndReject(/* aRejectValue = */ true,
   3885                                                      __func__);
   3886  }
   3887 
   3888  if (aTarget.IsNextFrame() && !HasVideo()) {
   3889    LOGW("Ignore a NextFrameSeekTask on a media file without video track.");
   3890    return MediaDecoder::SeekPromise::CreateAndReject(/* aRejectValue = */ true,
   3891                                                      __func__);
   3892  }
   3893 
   3894  MOZ_ASSERT(mDuration.Ref().isSome(), "We should have got duration already");
   3895 
   3896  return mStateObj->HandleSeek(aTarget);
   3897 }
   3898 
   3899 void MediaDecoderStateMachine::StopMediaSink() {
   3900  MOZ_ASSERT(OnTaskQueue());
   3901  if (mMediaSink->IsStarted()) {
   3902    LOG("Stop MediaSink");
   3903    mMediaSink->Stop();
   3904    mMediaSinkAudioEndedPromise.DisconnectIfExists();
   3905    mMediaSinkVideoEndedPromise.DisconnectIfExists();
   3906  }
   3907 }
   3908 
   3909 void MediaDecoderStateMachine::RequestAudioData() {
   3910  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RequestAudioData",
   3911                      MEDIA_PLAYBACK);
   3912  MOZ_ASSERT(OnTaskQueue());
   3913  MOZ_ASSERT(IsAudioDecoding());
   3914  MOZ_ASSERT(!IsRequestingAudioData());
   3915  MOZ_ASSERT(!IsWaitingAudioData());
   3916  LOGV("Queueing audio task - queued=%zu, decoder-queued=%zu",
   3917       AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames());
   3918 
   3919  PerformanceRecorder<PlaybackStage> perfRecorder(MediaStage::RequestData);
   3920  RefPtr<MediaDecoderStateMachine> self = this;
   3921  mReader->RequestAudioData()
   3922      ->Then(
   3923          OwnerThread(), __func__,
   3924          [this, self, perfRecorder(std::move(perfRecorder))](
   3925              const RefPtr<AudioData>& aAudio) mutable {
   3926            perfRecorder.Record();
   3927            AUTO_PROFILER_LABEL(
   3928                "MediaDecoderStateMachine::RequestAudioData:Resolved",
   3929                MEDIA_PLAYBACK);
   3930            MOZ_ASSERT(aAudio);
   3931            mAudioDataRequest.Complete();
   3932            // audio->GetEndTime() is not always mono-increasing in chained
   3933            // ogg.
   3934            mDecodedAudioEndTime =
   3935                std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
   3936            LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]",
   3937                 aAudio->mTime.ToMicroseconds(),
   3938                 aAudio->GetEndTime().ToMicroseconds());
   3939            mStateObj->HandleAudioDecoded(aAudio);
   3940          },
   3941          [this, self](const MediaResult& aError) {
   3942            AUTO_PROFILER_LABEL(
   3943                "MediaDecoderStateMachine::RequestAudioData:Rejected",
   3944                MEDIA_PLAYBACK);
   3945            LOGV("OnAudioNotDecoded ErrorName=%s Message=%s",
   3946                 aError.ErrorName().get(), aError.Message().get());
   3947            mAudioDataRequest.Complete();
   3948            switch (aError.Code()) {
   3949              case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
   3950                mStateObj->HandleWaitingForAudio();
   3951                break;
   3952              case NS_ERROR_DOM_MEDIA_CANCELED:
   3953                mStateObj->HandleAudioCanceled();
   3954                break;
   3955              case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
   3956                mStateObj->HandleEndOfAudio();
   3957                break;
   3958              default:
   3959                DecodeError(aError);
   3960            }
   3961          })
   3962      ->Track(mAudioDataRequest);
   3963 }
   3964 
   3965 void MediaDecoderStateMachine::RequestVideoData(
   3966    const media::TimeUnit& aCurrentTime, bool aRequestNextKeyFrame) {
   3967  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RequestVideoData",
   3968                      MEDIA_PLAYBACK);
   3969  MOZ_ASSERT(OnTaskQueue());
   3970  MOZ_ASSERT(IsVideoDecoding());
   3971  MOZ_ASSERT(!IsRequestingVideoData());
   3972  MOZ_ASSERT(!IsWaitingVideoData());
   3973  LOGV(
   3974      "Queueing video task - queued=%zu, decoder-queued=%zo"
   3975      ", stime=%" PRId64 ", by-pass-skip=%d",
   3976      VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(),
   3977      aCurrentTime.ToMicroseconds(), mBypassingSkipToNextKeyFrameCheck);
   3978 
   3979  PerformanceRecorder<PlaybackStage> perfRecorder(MediaStage::RequestData,
   3980                                                  Info().mVideo.mImage.height);
   3981  RefPtr<MediaDecoderStateMachine> self = this;
   3982  mReader
   3983      ->RequestVideoData(
   3984          mBypassingSkipToNextKeyFrameCheck ? media::TimeUnit() : aCurrentTime,
   3985          mBypassingSkipToNextKeyFrameCheck ? false : aRequestNextKeyFrame)
   3986      ->Then(
   3987          OwnerThread(), __func__,
   3988          [this, self, perfRecorder(std::move(perfRecorder))](
   3989              const RefPtr<VideoData>& aVideo) mutable {
   3990            perfRecorder.Record();
   3991            AUTO_PROFILER_LABEL(
   3992                "MediaDecoderStateMachine::RequestVideoData:Resolved",
   3993                MEDIA_PLAYBACK);
   3994            MOZ_ASSERT(aVideo);
   3995            mVideoDataRequest.Complete();
   3996            // Handle abnormal or negative timestamps.
   3997            mDecodedVideoEndTime =
   3998                std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
   3999            LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]",
   4000                 aVideo->mTime.ToMicroseconds(),
   4001                 aVideo->GetEndTime().ToMicroseconds());
   4002            mStateObj->HandleVideoDecoded(aVideo);
   4003          },
   4004          [this, self](const MediaResult& aError) {
   4005            AUTO_PROFILER_LABEL(
   4006                "MediaDecoderStateMachine::RequestVideoData:Rejected",
   4007                MEDIA_PLAYBACK);
   4008            LOGV("OnVideoNotDecoded ErrorName=%s Message=%s",
   4009                 aError.ErrorName().get(), aError.Message().get());
   4010            mVideoDataRequest.Complete();
   4011            switch (aError.Code()) {
   4012              case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
   4013                mStateObj->HandleWaitingForVideo();
   4014                break;
   4015              case NS_ERROR_DOM_MEDIA_CANCELED:
   4016                mStateObj->HandleVideoCanceled();
   4017                break;
   4018              case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
   4019                mStateObj->HandleEndOfVideo();
   4020                break;
   4021              default:
   4022                DecodeError(aError);
   4023            }
   4024          })
   4025      ->Track(mVideoDataRequest);
   4026 }
   4027 
   4028 void MediaDecoderStateMachine::WaitForData(MediaData::Type aType) {
   4029  MOZ_ASSERT(OnTaskQueue());
   4030  MOZ_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
   4031             aType == MediaData::Type::VIDEO_DATA);
   4032  LOG("%s: %s", __func__, MediaData::EnumValueToString(aType));
   4033  RefPtr<MediaDecoderStateMachine> self = this;
   4034  if (aType == MediaData::Type::AUDIO_DATA) {
   4035    mReader->WaitForData(MediaData::Type::AUDIO_DATA)
   4036        ->Then(
   4037            OwnerThread(), __func__,
   4038            [self](MediaData::Type aType) {
   4039              AUTO_PROFILER_LABEL(
   4040                  "MediaDecoderStateMachine::WaitForData:AudioResolved",
   4041                  MEDIA_PLAYBACK);
   4042              self->mAudioWaitRequest.Complete();
   4043              MOZ_ASSERT(aType == MediaData::Type::AUDIO_DATA);
   4044              self->mStateObj->HandleAudioWaited(aType);
   4045            },
   4046            [self](const WaitForDataRejectValue& aRejection) {
   4047              AUTO_PROFILER_LABEL(
   4048                  "MediaDecoderStateMachine::WaitForData:AudioRejected",
   4049                  MEDIA_PLAYBACK);
   4050              self->mAudioWaitRequest.Complete();
   4051              self->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
   4052            })
   4053        ->Track(mAudioWaitRequest);
   4054  } else {
   4055    mReader->WaitForData(MediaData::Type::VIDEO_DATA)
   4056        ->Then(
   4057            OwnerThread(), __func__,
   4058            [self, this](MediaData::Type aType) {
   4059              AUTO_PROFILER_LABEL(
   4060                  "MediaDecoderStateMachine::WaitForData:VideoResolved",
   4061                  MEDIA_PLAYBACK);
   4062              self->mVideoWaitRequest.Complete();
   4063              MOZ_ASSERT(aType == MediaData::Type::VIDEO_DATA);
   4064              LOG("WaitForData::VideoResolved");
   4065              self->mStateObj->HandleVideoWaited(aType);
   4066            },
   4067            [self, this](const WaitForDataRejectValue& aRejection) {
   4068              AUTO_PROFILER_LABEL(
   4069                  "MediaDecoderStateMachine::WaitForData:VideoRejected",
   4070                  MEDIA_PLAYBACK);
   4071              self->mVideoWaitRequest.Complete();
   4072              LOG("WaitForData::VideoRejected");
   4073              self->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
   4074            })
   4075        ->Track(mVideoWaitRequest);
   4076  }
   4077 }
   4078 
   4079 nsresult MediaDecoderStateMachine::StartMediaSink() {
   4080  MOZ_ASSERT(OnTaskQueue());
   4081 
   4082  if (mMediaSink->IsStarted()) {
   4083    return NS_OK;
   4084  }
   4085 
   4086  mAudioCompleted = false;
   4087  const auto startTime = GetMediaTime();
   4088  LOG("StartMediaSink, mediaTime=%" PRId64, startTime.ToMicroseconds());
   4089  nsresult rv = mMediaSink->Start(startTime, Info());
   4090  StreamNameChanged();
   4091 
   4092  auto videoPromise = mMediaSink->OnEnded(TrackInfo::kVideoTrack);
   4093  auto audioPromise = mMediaSink->OnEnded(TrackInfo::kAudioTrack);
   4094 
   4095  if (audioPromise) {
   4096    audioPromise
   4097        ->Then(OwnerThread(), __func__, this,
   4098               &MediaDecoderStateMachine::OnMediaSinkAudioComplete,
   4099               &MediaDecoderStateMachine::OnMediaSinkAudioError)
   4100        ->Track(mMediaSinkAudioEndedPromise);
   4101  }
   4102  if (videoPromise) {
   4103    videoPromise
   4104        ->Then(OwnerThread(), __func__, this,
   4105               &MediaDecoderStateMachine::OnMediaSinkVideoComplete,
   4106               &MediaDecoderStateMachine::OnMediaSinkVideoError)
   4107        ->Track(mMediaSinkVideoEndedPromise);
   4108  }
   4109  // Remember the initial offset when playback starts. This will be used
   4110  // to calculate the rate at which bytes are consumed as playback moves on.
   4111  RefPtr<MediaData> sample = mAudioQueue.PeekFront();
   4112  mPlaybackOffset = sample ? sample->mOffset : 0;
   4113  sample = mVideoQueue.PeekFront();
   4114  if (sample && sample->mOffset > mPlaybackOffset) {
   4115    mPlaybackOffset = sample->mOffset;
   4116  }
   4117  return rv;
   4118 }
   4119 
   4120 bool MediaDecoderStateMachine::HasLowDecodedAudio() {
   4121  MOZ_ASSERT(OnTaskQueue());
   4122  return IsAudioDecoding() &&
   4123         GetDecodedAudioDuration() <
   4124             EXHAUSTED_DATA_MARGIN.MultDouble(mPlaybackRate);
   4125 }
   4126 
   4127 bool MediaDecoderStateMachine::HasLowDecodedVideo() {
   4128  MOZ_ASSERT(OnTaskQueue());
   4129  return IsVideoDecoding() &&
   4130         VideoQueue().GetSize() <
   4131             static_cast<size_t>(floorl(LOW_VIDEO_FRAMES * mPlaybackRate));
   4132 }
   4133 
   4134 bool MediaDecoderStateMachine::HasLowDecodedData() {
   4135  MOZ_ASSERT(OnTaskQueue());
   4136  MOZ_ASSERT(mReader->UseBufferingHeuristics());
   4137  return HasLowDecodedAudio() || HasLowDecodedVideo();
   4138 }
   4139 
   4140 bool MediaDecoderStateMachine::OutOfDecodedAudio() {
   4141  MOZ_ASSERT(OnTaskQueue());
   4142  return IsAudioDecoding() && !AudioQueue().IsFinished() &&
   4143         AudioQueue().GetSize() == 0 &&
   4144         !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
   4145 }
   4146 
   4147 bool MediaDecoderStateMachine::HasLowBufferedData() {
   4148  MOZ_ASSERT(OnTaskQueue());
   4149  return HasLowBufferedData(detail::LOW_BUFFER_THRESHOLD);
   4150 }
   4151 
   4152 bool MediaDecoderStateMachine::HasLowBufferedData(const TimeUnit& aThreshold) {
   4153  MOZ_ASSERT(OnTaskQueue());
   4154 
   4155  // If we don't have a duration, mBuffered is probably not going to have
   4156  // a useful buffered range. Return false here so that we don't get stuck in
   4157  // buffering mode for live streams.
   4158  if (Duration().IsInfinite()) {
   4159    return false;
   4160  }
   4161 
   4162  if (mBuffered.Ref().IsInvalid()) {
   4163    return false;
   4164  }
   4165 
   4166  // We are never low in decoded data when we don't have audio/video or have
   4167  // decoded all audio/video samples.
   4168  TimeUnit endOfDecodedVideo = (HasVideo() && !VideoQueue().IsFinished())
   4169                                   ? mDecodedVideoEndTime
   4170                                   : TimeUnit::FromNegativeInfinity();
   4171  TimeUnit endOfDecodedAudio = (HasAudio() && !AudioQueue().IsFinished())
   4172                                   ? mDecodedAudioEndTime
   4173                                   : TimeUnit::FromNegativeInfinity();
   4174 
   4175  auto endOfDecodedData = std::max(endOfDecodedVideo, endOfDecodedAudio);
   4176  if (Duration() < endOfDecodedData) {
   4177    // Our duration is not up to date. No point buffering.
   4178    return false;
   4179  }
   4180 
   4181  if (endOfDecodedData.IsInfinite()) {
   4182    // Have decoded all samples. No point buffering.
   4183    return false;
   4184  }
   4185 
   4186  auto start = endOfDecodedData;
   4187  auto end = std::min(GetMediaTime() + aThreshold, Duration());
   4188  if (start >= end) {
   4189    // Duration of decoded samples is greater than our threshold.
   4190    return false;
   4191  }
   4192  media::TimeInterval interval(start, end);
   4193  return !mBuffered.Ref().Contains(interval);
   4194 }
   4195 
   4196 void MediaDecoderStateMachine::EnqueueFirstFrameLoadedEvent() {
   4197  MOZ_ASSERT(OnTaskQueue());
   4198  // Track value of mSentFirstFrameLoadedEvent from before updating it
   4199  bool firstFrameBeenLoaded = mSentFirstFrameLoadedEvent;
   4200  mSentFirstFrameLoadedEvent = true;
   4201  MediaDecoderEventVisibility visibility =
   4202      firstFrameBeenLoaded ? MediaDecoderEventVisibility::Suppressed
   4203                           : MediaDecoderEventVisibility::Observable;
   4204  mFirstFrameLoadedEvent.Notify(UniquePtr<MediaInfo>(new MediaInfo(Info())),
   4205                                visibility);
   4206 }
   4207 
   4208 void MediaDecoderStateMachine::FinishDecodeFirstFrame() {
   4209  MOZ_ASSERT(OnTaskQueue());
   4210  MOZ_ASSERT(!mSentFirstFrameLoadedEvent);
   4211  LOG("FinishDecodeFirstFrame");
   4212 
   4213  mMediaSink->Redraw(Info().mVideo);
   4214  mReader->GetSendToCompositorSize().apply([self = RefPtr{this}](uint32_t x) {
   4215    self->mMediaSink->SetVideoQueueSendToCompositorSize(x);
   4216  });
   4217 
   4218  LOG("Media duration %" PRId64 ", mediaSeekable=%d",
   4219      Duration().ToMicroseconds(), mMediaSeekable);
   4220 
   4221  // Get potentially updated metadata
   4222  mReader->ReadUpdatedMetadata(mInfo.ptr());
   4223 
   4224  EnqueueFirstFrameLoadedEvent();
   4225 }
   4226 
   4227 RefPtr<ShutdownPromise> MediaDecoderStateMachine::FinishShutdown() {
   4228  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::FinishShutdown",
   4229                      MEDIA_PLAYBACK);
   4230  MOZ_ASSERT(OnTaskQueue());
   4231  LOG("Shutting down state machine task queue");
   4232  return OwnerThread()->BeginShutdown();
   4233 }
   4234 
   4235 void MediaDecoderStateMachine::RunStateMachine() {
   4236  MOZ_ASSERT(OnTaskQueue());
   4237  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RunStateMachine",
   4238                      MEDIA_PLAYBACK);
   4239  mDelayedScheduler.Reset();  // Must happen on state machine task queue.
   4240  mDispatchedStateMachine = false;
   4241  mStateObj->Step();
   4242 }
   4243 
   4244 void MediaDecoderStateMachine::ResetDecode(const TrackSet& aTracks) {
   4245  MOZ_ASSERT(OnTaskQueue());
   4246  LOG("MediaDecoderStateMachine::Reset");
   4247 
   4248  // Assert that aTracks specifies to reset the video track because we
   4249  // don't currently support resetting just the audio track.
   4250  MOZ_ASSERT(aTracks.contains(TrackInfo::kVideoTrack));
   4251 
   4252  if (aTracks.contains(TrackInfo::kVideoTrack)) {
   4253    mDecodedVideoEndTime = TimeUnit::Zero();
   4254    mVideoCompleted = false;
   4255    VideoQueue().Reset();
   4256    mVideoDataRequest.DisconnectIfExists();
   4257    mVideoWaitRequest.DisconnectIfExists();
   4258  }
   4259 
   4260  if (aTracks.contains(TrackInfo::kAudioTrack)) {
   4261    mDecodedAudioEndTime = TimeUnit::Zero();
   4262    mAudioCompleted = false;
   4263    AudioQueue().Reset();
   4264    mAudioDataRequest.DisconnectIfExists();
   4265    mAudioWaitRequest.DisconnectIfExists();
   4266  }
   4267 
   4268  mReader->ResetDecode(aTracks);
   4269 }
   4270 
   4271 media::TimeUnit MediaDecoderStateMachine::GetClock(
   4272    TimeStamp* aTimeStamp) const {
   4273  MOZ_ASSERT(OnTaskQueue());
   4274  auto clockTime = mMediaSink->GetPosition(aTimeStamp);
   4275  // This fails on Windows some times, see 1765563
   4276 #if defined(XP_WIN)
   4277  NS_ASSERTION(GetMediaTime() <= clockTime, "Clock should go forwards.");
   4278 #else
   4279  MOZ_ASSERT(GetMediaTime() <= clockTime, "Clock should go forwards.");
   4280 #endif
   4281  return clockTime;
   4282 }
   4283 
   4284 void MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically() {
   4285  MOZ_ASSERT(OnTaskQueue());
   4286 
   4287  if (!IsPlaying()) {
   4288    return;
   4289  }
   4290 
   4291  // Cap the current time to the larger of the audio and video end time.
   4292  // This ensures that if we're running off the system clock, we don't
   4293  // advance the clock to after the media end time.
   4294  if (VideoEndTime() > TimeUnit::Zero() || AudioEndTime() > TimeUnit::Zero()) {
   4295    auto clockTime = GetClock();
   4296    // Once looping was turned on, the time is probably larger than the duration
   4297    // of the media track, so the time over the end should be corrected.
   4298    AdjustByLooping(clockTime);
   4299    bool loopback = clockTime < GetMediaTime() && mLooping;
   4300    if (loopback && mBypassingSkipToNextKeyFrameCheck) {
   4301      LOG("media has looped back, no longer bypassing skip-to-next-key-frame");
   4302      mBypassingSkipToNextKeyFrameCheck = false;
   4303    }
   4304 
   4305    // Skip frames up to the frame at the playback position, and figure out
   4306    // the time remaining until it's time to display the next frame and drop
   4307    // the current frame.
   4308    NS_ASSERTION(clockTime >= TimeUnit::Zero(),
   4309                 "Should have positive clock time.");
   4310 
   4311    // These will be non -1 if we've displayed a video frame, or played an audio
   4312    // frame.
   4313    auto maxEndTime = std::max(VideoEndTime(), AudioEndTime());
   4314    auto t = std::min(clockTime, maxEndTime);
   4315    // FIXME: Bug 1091422 - chained ogg files hit this assertion.
   4316    // MOZ_ASSERT(t >= GetMediaTime());
   4317    if (loopback || t > GetMediaTime()) {
   4318      UpdatePlaybackPosition(t);
   4319    }
   4320  }
   4321  // Note we have to update playback position before releasing the monitor.
   4322  // Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside
   4323  // the monitor and get a staled value from GetCurrentTimeUs() which hits the
   4324  // assertion in GetClock().
   4325 
   4326  int64_t delay = std::max<int64_t>(
   4327      1, static_cast<int64_t>(AUDIO_DURATION_USECS / mPlaybackRate));
   4328  ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay));
   4329 
   4330  // Notify the listener as we progress in the playback offset. Note it would
   4331  // be too intensive to send notifications for each popped audio/video sample.
   4332  // It is good enough to send 'PlaybackProgressed' events every 40us (defined
   4333  // by AUDIO_DURATION_USECS), and we ensure 'PlaybackProgressed' events are
   4334  // always sent after 'PlaybackStarted' and before 'PlaybackStopped'.
   4335  mOnPlaybackEvent.Notify(MediaPlaybackEvent{
   4336      MediaPlaybackEvent::PlaybackProgressed, mPlaybackOffset});
   4337 }
   4338 
   4339 void MediaDecoderStateMachine::ScheduleStateMachine() {
   4340  MOZ_ASSERT(OnTaskQueue());
   4341  if (mDispatchedStateMachine) {
   4342    return;
   4343  }
   4344  mDispatchedStateMachine = true;
   4345 
   4346  nsresult rv = OwnerThread()->Dispatch(
   4347      NewRunnableMethod("MediaDecoderStateMachine::RunStateMachine", this,
   4348                        &MediaDecoderStateMachine::RunStateMachine));
   4349  MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   4350  (void)rv;
   4351 }
   4352 
   4353 void MediaDecoderStateMachine::ScheduleStateMachineIn(const TimeUnit& aTime) {
   4354  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::ScheduleStateMachineIn",
   4355                      MEDIA_PLAYBACK);
   4356  MOZ_ASSERT(OnTaskQueue());  // mDelayedScheduler.Ensure() may Disconnect()
   4357                              // the promise, which must happen on the state
   4358                              // machine task queue.
   4359  MOZ_ASSERT(aTime > TimeUnit::Zero());
   4360  if (mDispatchedStateMachine) {
   4361    return;
   4362  }
   4363 
   4364  TimeStamp target = TimeStamp::Now() + aTime.ToTimeDuration();
   4365 
   4366  // It is OK to capture 'this' without causing UAF because the callback
   4367  // always happens before shutdown.
   4368  RefPtr<MediaDecoderStateMachine> self = this;
   4369  mDelayedScheduler.Ensure(
   4370      target,
   4371      [self]() {
   4372        self->mDelayedScheduler.CompleteRequest();
   4373        self->RunStateMachine();
   4374      },
   4375      []() { MOZ_DIAGNOSTIC_CRASH("ScheduleStateMachineIn reject"); });
   4376 }
   4377 
   4378 bool MediaDecoderStateMachine::IsStateMachineScheduled() const {
   4379  MOZ_ASSERT(OnTaskQueue());
   4380  return mDispatchedStateMachine || mDelayedScheduler.IsScheduled();
   4381 }
   4382 
   4383 void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate) {
   4384  MOZ_ASSERT(OnTaskQueue());
   4385  MOZ_ASSERT(aPlaybackRate != 0, "Should be handled by MediaDecoder::Pause()");
   4386  PROFILER_MARKER_TEXT("MDSM::SetPlaybackRate", MEDIA_PLAYBACK, {},
   4387                       nsPrintfCString("PlaybackRate:%f", aPlaybackRate));
   4388  mPlaybackRate = aPlaybackRate;
   4389  mMediaSink->SetPlaybackRate(mPlaybackRate);
   4390 
   4391  // Schedule next cycle to check if we can stop prerolling.
   4392  ScheduleStateMachine();
   4393 }
   4394 
   4395 void MediaDecoderStateMachine::PreservesPitchChanged() {
   4396  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::PreservesPitchChanged",
   4397                      MEDIA_PLAYBACK);
   4398  PROFILER_MARKER_TEXT(
   4399      "MDSM::PreservesPitchChanged", MEDIA_PLAYBACK, {},
   4400      nsPrintfCString("PreservesPitch:%d", mPreservesPitch.Ref()));
   4401  MOZ_ASSERT(OnTaskQueue());
   4402  mMediaSink->SetPreservesPitch(mPreservesPitch);
   4403 }
   4404 
   4405 void MediaDecoderStateMachine::LoopingChanged() {
   4406  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::LoopingChanged",
   4407                      MEDIA_PLAYBACK);
   4408  MOZ_ASSERT(OnTaskQueue());
   4409  LOGV("LoopingChanged, looping=%d", mLooping.Ref());
   4410  PROFILER_MARKER_TEXT("MDSM::LoopingChanged", MEDIA_PLAYBACK, {},
   4411                       mLooping ? "true"_ns : "false"_ns);
   4412  if (mSeamlessLoopingAllowed) {
   4413    mStateObj->HandleLoopingChanged();
   4414  }
   4415 }
   4416 
   4417 void MediaDecoderStateMachine::StreamNameChanged() {
   4418  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::StreamNameChanged",
   4419                      MEDIA_PLAYBACK);
   4420  MOZ_ASSERT(OnTaskQueue());
   4421 
   4422  mMediaSink->SetStreamName(mStreamName);
   4423 }
   4424 
   4425 void MediaDecoderStateMachine::UpdateOutputCaptured() {
   4426  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::UpdateOutputCaptured",
   4427                      MEDIA_PLAYBACK);
   4428  MOZ_ASSERT(OnTaskQueue());
   4429  MOZ_ASSERT_IF(
   4430      mOutputCaptureState == MediaDecoder::OutputCaptureState::Capture,
   4431      mOutputDummyTrack.Ref());
   4432 
   4433  // Reset these flags so they are consistent with the status of the sink.
   4434  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
   4435  // to reset these flags when switching MediaSinks.
   4436  mAudioCompleted = false;
   4437  mVideoCompleted = false;
   4438 
   4439  // Don't create a new media sink if we're still suspending media sink.
   4440  if (!mIsMediaSinkSuspended) {
   4441    const bool wasPlaying = IsPlaying();
   4442    // Stop and shut down the existing sink.
   4443    StopMediaSink();
   4444    mMediaSink->Shutdown();
   4445 
   4446    // Create a new sink according to whether output is captured.
   4447    mMediaSink = CreateMediaSink();
   4448    if (wasPlaying) {
   4449      DebugOnly<nsresult> rv = StartMediaSink();
   4450      MOZ_ASSERT(NS_SUCCEEDED(rv));
   4451    }
   4452  }
   4453 
   4454  // Don't buffer as much when audio is captured because we don't need to worry
   4455  // about high latency audio devices.
   4456  mAmpleAudioThreshold =
   4457      mOutputCaptureState != MediaDecoder::OutputCaptureState::None
   4458          ? detail::AMPLE_AUDIO_THRESHOLD / 2
   4459          : detail::AMPLE_AUDIO_THRESHOLD;
   4460 
   4461  mStateObj->HandleAudioCaptured();
   4462 }
   4463 
   4464 void MediaDecoderStateMachine::OutputPrincipalChanged() {
   4465  MOZ_ASSERT(OnTaskQueue());
   4466  mCanonicalOutputPrincipal = mOutputPrincipal;
   4467 }
   4468 
   4469 RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
   4470    const RefPtr<AudioDeviceInfo>& aSink) {
   4471  MOZ_ASSERT(NS_IsMainThread());
   4472  MOZ_ASSERT(aSink);
   4473 
   4474  return InvokeAsync(OwnerThread(), this, __func__,
   4475                     &MediaDecoderStateMachine::SetSink, aSink);
   4476 }
   4477 
   4478 RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
   4479    RefPtr<AudioDeviceInfo> aDevice) {
   4480  MOZ_ASSERT(OnTaskQueue());
   4481  if (mIsMediaSinkSuspended) {
   4482    // Don't create a new media sink when suspended.
   4483    return GenericPromise::CreateAndResolve(true, __func__);
   4484  }
   4485 
   4486  return mMediaSink->SetAudioDevice(std::move(aDevice));
   4487 }
   4488 
   4489 void MediaDecoderStateMachine::InvokeSuspendMediaSink() {
   4490  MOZ_ASSERT(NS_IsMainThread());
   4491 
   4492  nsresult rv = OwnerThread()->Dispatch(
   4493      NewRunnableMethod("MediaDecoderStateMachine::SuspendMediaSink", this,
   4494                        &MediaDecoderStateMachine::SuspendMediaSink));
   4495  MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   4496  (void)rv;
   4497 }
   4498 
   4499 void MediaDecoderStateMachine::SuspendMediaSink() {
   4500  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::SuspendMediaSink",
   4501                      MEDIA_PLAYBACK);
   4502  MOZ_ASSERT(OnTaskQueue());
   4503  if (mIsMediaSinkSuspended) {
   4504    return;
   4505  }
   4506  LOG("SuspendMediaSink");
   4507  mIsMediaSinkSuspended = true;
   4508  StopMediaSink();
   4509  mMediaSink->Shutdown();
   4510 }
   4511 
   4512 void MediaDecoderStateMachine::InvokeResumeMediaSink() {
   4513  MOZ_ASSERT(NS_IsMainThread());
   4514 
   4515  nsresult rv = OwnerThread()->Dispatch(
   4516      NewRunnableMethod("MediaDecoderStateMachine::ResumeMediaSink", this,
   4517                        &MediaDecoderStateMachine::ResumeMediaSink));
   4518  MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   4519  (void)rv;
   4520 }
   4521 
   4522 void MediaDecoderStateMachine::ResumeMediaSink() {
   4523  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::ResumeMediaSink",
   4524                      MEDIA_PLAYBACK);
   4525  MOZ_ASSERT(OnTaskQueue());
   4526  if (!mIsMediaSinkSuspended) {
   4527    return;
   4528  }
   4529  LOG("ResumeMediaSink");
   4530  mIsMediaSinkSuspended = false;
   4531  if (!mMediaSink->IsStarted()) {
   4532    mMediaSink = CreateMediaSink();
   4533    MaybeStartPlayback();
   4534  }
   4535 }
   4536 
   4537 void MediaDecoderStateMachine::UpdateSecondaryVideoContainer() {
   4538  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::UpdateSecondaryVideoContainer",
   4539                      MEDIA_PLAYBACK);
   4540  MOZ_ASSERT(OnTaskQueue());
   4541  MOZ_DIAGNOSTIC_ASSERT(mMediaSink);
   4542  mMediaSink->SetSecondaryVideoContainer(mSecondaryVideoContainer.Ref());
   4543  mOnSecondaryVideoContainerInstalled.Notify(mSecondaryVideoContainer.Ref());
   4544 }
   4545 
   4546 TimeUnit MediaDecoderStateMachine::AudioEndTime() const {
   4547  MOZ_ASSERT(OnTaskQueue());
   4548  if (mMediaSink->IsStarted()) {
   4549    return mMediaSink->GetEndTime(TrackInfo::kAudioTrack);
   4550  }
   4551  return GetMediaTime();
   4552 }
   4553 
   4554 TimeUnit MediaDecoderStateMachine::VideoEndTime() const {
   4555  MOZ_ASSERT(OnTaskQueue());
   4556  if (mMediaSink->IsStarted()) {
   4557    return mMediaSink->GetEndTime(TrackInfo::kVideoTrack);
   4558  }
   4559  return GetMediaTime();
   4560 }
   4561 
   4562 void MediaDecoderStateMachine::OnMediaSinkVideoComplete() {
   4563  MOZ_ASSERT(OnTaskQueue());
   4564  MOZ_ASSERT(HasVideo());
   4565  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkVideoComplete",
   4566                      MEDIA_PLAYBACK);
   4567  LOG("[%s]", __func__);
   4568 
   4569  mMediaSinkVideoEndedPromise.Complete();
   4570  mVideoCompleted = true;
   4571  ScheduleStateMachine();
   4572 }
   4573 
   4574 void MediaDecoderStateMachine::OnMediaSinkVideoError() {
   4575  MOZ_ASSERT(OnTaskQueue());
   4576  MOZ_ASSERT(HasVideo());
   4577  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkVideoError",
   4578                      MEDIA_PLAYBACK);
   4579  LOGE("[%s]", __func__);
   4580 
   4581  mMediaSinkVideoEndedPromise.Complete();
   4582  mVideoCompleted = true;
   4583  if (HasAudio()) {
   4584    return;
   4585  }
   4586  DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
   4587 }
   4588 
   4589 void MediaDecoderStateMachine::OnMediaSinkAudioComplete() {
   4590  MOZ_ASSERT(OnTaskQueue());
   4591  MOZ_ASSERT(HasAudio());
   4592  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkAudioComplete",
   4593                      MEDIA_PLAYBACK);
   4594  LOG("[%s]", __func__);
   4595 
   4596  mMediaSinkAudioEndedPromise.Complete();
   4597  mAudioCompleted = true;
   4598  // To notify PlaybackEnded as soon as possible.
   4599  ScheduleStateMachine();
   4600 
   4601  // Report OK to Decoder Doctor (to know if issue may have been resolved).
   4602  mOnDecoderDoctorEvent.Notify(
   4603      DecoderDoctorEvent{DecoderDoctorEvent::eAudioSinkStartup, NS_OK});
   4604 }
   4605 
   4606 void MediaDecoderStateMachine::OnMediaSinkAudioError(nsresult aResult) {
   4607  MOZ_ASSERT(OnTaskQueue());
   4608  MOZ_ASSERT(HasAudio());
   4609  AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkAudioError",
   4610                      MEDIA_PLAYBACK);
   4611  LOGE("[%s]", __func__);
   4612 
   4613  mMediaSinkAudioEndedPromise.Complete();
   4614  mAudioCompleted = true;
   4615 
   4616  // Result should never be NS_OK in this *error* handler. Report to Dec-Doc.
   4617  MOZ_ASSERT(NS_FAILED(aResult));
   4618  mOnDecoderDoctorEvent.Notify(
   4619      DecoderDoctorEvent{DecoderDoctorEvent::eAudioSinkStartup, aResult});
   4620 
   4621  // Make the best effort to continue playback when there is video.
   4622  if (HasVideo()) {
   4623    return;
   4624  }
   4625 
   4626  // Otherwise notify media decoder/element about this error for it makes
   4627  // no sense to play an audio-only file without sound output.
   4628  DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
   4629 }
   4630 
   4631 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
   4632  MOZ_ASSERT(OnTaskQueue());
   4633  if (mReader->VideoIsHardwareAccelerated()) {
   4634    // HW decoding should be fast so queue size can be as small as possible
   4635    // to lower frame latency.
   4636    uint32_t hw = std::max<uint32_t>(
   4637        StaticPrefs::media_video_queue_hw_accel_size(), MIN_VIDEO_QUEUE_SIZE);
   4638    mReader->GetMinVideoQueueSize().apply(
   4639        [&hw](const uint32_t& x) { hw = std::max(hw, x); });
   4640    return hw;
   4641  } else {
   4642    // SW decoding is slower and queuing more frames in advance reduces the
   4643    // chances of dropping late frames.
   4644    uint32_t sw = std::max<uint32_t>(
   4645        StaticPrefs::media_video_queue_default_size(), MIN_VIDEO_QUEUE_SIZE);
   4646    mReader->GetMaxVideoQueueSize().apply(
   4647        [&sw](const uint32_t& x) { sw = std::min(sw, x); });
   4648    return sw;
   4649  }
   4650 }
   4651 
   4652 void MediaDecoderStateMachine::GetDebugInfo(
   4653    dom::MediaDecoderStateMachineDebugInfo& aInfo) {
   4654  MOZ_ASSERT(OnTaskQueue());
   4655  aInfo.mDuration =
   4656      mDuration.Ref() ? mDuration.Ref().ref().ToMicroseconds() : -1;
   4657  aInfo.mMediaTime = GetMediaTime().ToMicroseconds();
   4658  aInfo.mClock = mMediaSink->IsStarted() ? GetClock().ToMicroseconds() : -1;
   4659  aInfo.mPlayState = int32_t(mPlayState.Ref());
   4660  aInfo.mSentFirstFrameLoadedEvent = mSentFirstFrameLoadedEvent;
   4661  aInfo.mIsPlaying = IsPlaying();
   4662  CopyUTF8toUTF16(MakeStringSpan(AudioRequestStatus()),
   4663                  aInfo.mAudioRequestStatus);
   4664  CopyUTF8toUTF16(MakeStringSpan(VideoRequestStatus()),
   4665                  aInfo.mVideoRequestStatus);
   4666  aInfo.mDecodedAudioEndTime = mDecodedAudioEndTime.ToMicroseconds();
   4667  aInfo.mDecodedVideoEndTime = mDecodedVideoEndTime.ToMicroseconds();
   4668  aInfo.mAudioCompleted = mAudioCompleted;
   4669  aInfo.mVideoCompleted = mVideoCompleted;
   4670  mStateObj->GetDebugInfo(aInfo.mStateObj);
   4671  mMediaSink->GetDebugInfo(aInfo.mMediaSink);
   4672  aInfo.mTotalBufferingTimeMs = mTotalBufferingDuration.ToMilliseconds();
   4673 }
   4674 
   4675 RefPtr<GenericPromise> MediaDecoderStateMachine::RequestDebugInfo(
   4676    dom::MediaDecoderStateMachineDebugInfo& aInfo) {
   4677  if (mShuttingDown) {
   4678    return GenericPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
   4679  }
   4680 
   4681  RefPtr<GenericPromise::Private> p = new GenericPromise::Private(__func__);
   4682  RefPtr<MediaDecoderStateMachine> self = this;
   4683  nsresult rv = OwnerThread()->Dispatch(
   4684      NS_NewRunnableFunction("MediaDecoderStateMachine::RequestDebugInfo",
   4685                             [self, p, &aInfo]() {
   4686                               self->GetDebugInfo(aInfo);
   4687                               p->Resolve(true, __func__);
   4688                             }),
   4689      AbstractThread::TailDispatch);
   4690  MOZ_ASSERT(NS_SUCCEEDED(rv));
   4691  (void)rv;
   4692  return p;
   4693 }
   4694 
   4695 class VideoQueueMemoryFunctor : public nsDequeFunctor<VideoData> {
   4696 public:
   4697  VideoQueueMemoryFunctor() : mSize(0) {}
   4698 
   4699  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
   4700 
   4701  virtual void operator()(VideoData* aObject) override {
   4702    mSize += aObject->SizeOfIncludingThis(MallocSizeOf);
   4703  }
   4704 
   4705  size_t mSize;
   4706 };
   4707 
   4708 class AudioQueueMemoryFunctor : public nsDequeFunctor<AudioData> {
   4709 public:
   4710  AudioQueueMemoryFunctor() : mSize(0) {}
   4711 
   4712  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
   4713 
   4714  virtual void operator()(AudioData* aObject) override {
   4715    mSize += aObject->SizeOfIncludingThis(MallocSizeOf);
   4716  }
   4717 
   4718  size_t mSize;
   4719 };
   4720 
   4721 size_t MediaDecoderStateMachine::SizeOfVideoQueue() const {
   4722  VideoQueueMemoryFunctor functor;
   4723  mVideoQueue.LockedForEach(functor);
   4724  return functor.mSize;
   4725 }
   4726 
   4727 size_t MediaDecoderStateMachine::SizeOfAudioQueue() const {
   4728  AudioQueueMemoryFunctor functor;
   4729  mAudioQueue.LockedForEach(functor);
   4730  return functor.mSize;
   4731 }
   4732 
   4733 const char* MediaDecoderStateMachine::AudioRequestStatus() const {
   4734  MOZ_ASSERT(OnTaskQueue());
   4735  if (IsRequestingAudioData()) {
   4736    MOZ_DIAGNOSTIC_ASSERT(!IsWaitingAudioData());
   4737    return "pending";
   4738  }
   4739 
   4740  if (IsWaitingAudioData()) {
   4741    return "waiting";
   4742  }
   4743  return "idle";
   4744 }
   4745 
   4746 const char* MediaDecoderStateMachine::VideoRequestStatus() const {
   4747  MOZ_ASSERT(OnTaskQueue());
   4748  if (IsRequestingVideoData()) {
   4749    MOZ_DIAGNOSTIC_ASSERT(!IsWaitingVideoData());
   4750    return "pending";
   4751  }
   4752 
   4753  if (IsWaitingVideoData()) {
   4754    return "waiting";
   4755  }
   4756  return "idle";
   4757 }
   4758 
   4759 void MediaDecoderStateMachine::OnSuspendTimerResolved() {
   4760  LOG("OnSuspendTimerResolved");
   4761  mVideoDecodeSuspendTimer.CompleteRequest();
   4762  mStateObj->HandleVideoSuspendTimeout();
   4763 }
   4764 
   4765 void MediaDecoderStateMachine::CancelSuspendTimer() {
   4766  LOG("CancelSuspendTimer: State: %s, Timer.IsScheduled: %c",
   4767      ToStateStr(mStateObj->GetState()),
   4768      mVideoDecodeSuspendTimer.IsScheduled() ? 'T' : 'F');
   4769  MOZ_ASSERT(OnTaskQueue());
   4770  if (mVideoDecodeSuspendTimer.IsScheduled()) {
   4771    mOnPlaybackEvent.Notify(MediaPlaybackEvent::CancelVideoSuspendTimer);
   4772  }
   4773  mVideoDecodeSuspendTimer.Reset();
   4774 }
   4775 
   4776 void MediaDecoderStateMachine::AdjustByLooping(media::TimeUnit& aTime) const {
   4777  MOZ_ASSERT(OnTaskQueue());
   4778 
   4779  // No need to adjust time.
   4780  if (mOriginalDecodedDuration == media::TimeUnit::Zero()) {
   4781    return;
   4782  }
   4783 
   4784  // There are situations where we need to perform subtraction instead of modulo
   4785  // to accurately adjust the clock. When we are not in a state of seamless
   4786  // looping, it is usually necessary to normalize the clock time within the
   4787  // range of [0, duration]. However, if the current clock time is greater than
   4788  // the duration (i.e., duration+1) and not in looping, we should not adjust it
   4789  // to 1 as we are not looping back to the starting position. Instead, we
   4790  // should leave the clock time unchanged and trim it later to match the
   4791  // maximum duration time.
   4792  if (mStateObj->GetState() != DECODER_STATE_LOOPING_DECODING) {
   4793    // Use the smaller offset rather than the larger one, as the larger offset
   4794    // indicates the next round of looping. For example, if the duration is X
   4795    // and the playback is currently in the third round of looping, both
   4796    // queues will have an offset of 3X. However, if the audio decoding is
   4797    // faster and the fourth round of data has already been added to the audio
   4798    // queue, the audio offset will become 4X. Since playback is still in the
   4799    // third round, we should use the smaller offset of 3X to adjust the time.
   4800    TimeUnit offset = TimeUnit::FromInfinity();
   4801    if (HasAudio()) {
   4802      offset = std::min(AudioQueue().GetOffset(), offset);
   4803    }
   4804    if (HasVideo()) {
   4805      offset = std::min(VideoQueue().GetOffset(), offset);
   4806    }
   4807    if (aTime > offset) {
   4808      aTime -= offset;
   4809      return;
   4810    }
   4811  }
   4812 
   4813  // When seamless looping happens at least once, it doesn't matter if we're
   4814  // looping or not.
   4815  aTime = aTime % mOriginalDecodedDuration;
   4816 }
   4817 
   4818 bool MediaDecoderStateMachine::IsInSeamlessLooping() const {
   4819  return mLooping && mSeamlessLoopingAllowed;
   4820 }
   4821 
   4822 bool MediaDecoderStateMachine::HasLastDecodedData(MediaData::Type aType) {
   4823  MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
   4824                        aType == MediaData::Type::VIDEO_DATA);
   4825  if (aType == MediaData::Type::AUDIO_DATA) {
   4826    return mDecodedAudioEndTime != TimeUnit::Zero();
   4827  }
   4828  return mDecodedVideoEndTime != TimeUnit::Zero();
   4829 }
   4830 
   4831 nsresult MediaDecoderStateMachine::IsCDMProxySupported(CDMProxy* aProxy) {
   4832 #ifdef MOZ_WMF_CDM
   4833  MOZ_ASSERT(aProxy);
   4834  // This proxy only works with the external state machine.
   4835  if (aProxy->AsWMFCDMProxy()) {
   4836    return NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR;
   4837  }
   4838 #endif
   4839  return NS_OK;
   4840 }
   4841 
   4842 RefPtr<SetCDMPromise> MediaDecoderStateMachine::SetCDMProxy(CDMProxy* aProxy) {
   4843  // Playback hasn't started yet.
   4844  if (!mInitialized) {
   4845    mReader->SetEncryptedCustomIdent();
   4846  }
   4847  return MediaDecoderStateMachineBase::SetCDMProxy(aProxy);
   4848 }
   4849 
   4850 }  // namespace mozilla
   4851 
   4852 // avoid redefined macro in unified build
   4853 #undef LOG
   4854 #undef LOGV
   4855 #undef LOGW
   4856 #undef LOGE
   4857 #undef SLOGW
   4858 #undef SLOGE
   4859 #undef NS_DispatchToMainThread