tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

AudioDestinationNode.cpp (21319B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "AudioDestinationNode.h"
      8 
      9 #include "AlignmentUtils.h"
     10 #include "AudibilityMonitor.h"
     11 #include "AudioContext.h"
     12 #include "AudioNodeEngine.h"
     13 #include "AudioNodeTrack.h"
     14 #include "CubebUtils.h"
     15 #include "MediaTrackGraph.h"
     16 #include "Tracing.h"
     17 #include "mozilla/StaticPrefs_dom.h"
     18 #include "mozilla/dom/AudioDestinationNodeBinding.h"
     19 #include "mozilla/dom/BaseAudioContextBinding.h"
     20 #include "mozilla/dom/OfflineAudioCompletionEvent.h"
     21 #include "mozilla/dom/Promise.h"
     22 #include "mozilla/dom/ScriptSettings.h"
     23 #include "mozilla/dom/WakeLock.h"
     24 #include "mozilla/dom/power/PowerManagerService.h"
     25 #include "nsContentUtils.h"
     26 #include "nsGlobalWindowInner.h"
     27 
     28 extern mozilla::LazyLogModule gAudioChannelLog;
     29 
     30 #define AUDIO_CHANNEL_LOG(msg, ...) \
     31  MOZ_LOG(gAudioChannelLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
     32 
     33 namespace mozilla::dom {
     34 
     35 namespace {
     36 class OnCompleteTask final : public Runnable {
     37 public:
     38  OnCompleteTask(AudioContext* aAudioContext, AudioBuffer* aRenderedBuffer)
     39      : Runnable("dom::OfflineDestinationNodeEngine::OnCompleteTask"),
     40        mAudioContext(aAudioContext),
     41        mRenderedBuffer(aRenderedBuffer) {}
     42 
     43  NS_IMETHOD Run() override {
     44    OfflineAudioCompletionEventInit param;
     45    param.mRenderedBuffer = mRenderedBuffer;
     46 
     47    RefPtr<OfflineAudioCompletionEvent> event =
     48        OfflineAudioCompletionEvent::Constructor(mAudioContext, u"complete"_ns,
     49                                                 param);
     50    mAudioContext->DispatchTrustedEvent(event);
     51 
     52    return NS_OK;
     53  }
     54 
     55 private:
     56  RefPtr<AudioContext> mAudioContext;
     57  RefPtr<AudioBuffer> mRenderedBuffer;
     58 };
     59 }  // anonymous namespace
     60 
     61 class OfflineDestinationNodeEngine final : public AudioNodeEngine {
     62 public:
     63  explicit OfflineDestinationNodeEngine(AudioDestinationNode* aNode)
     64      : AudioNodeEngine(aNode),
     65        mWriteIndex(0),
     66        mNumberOfChannels(aNode->ChannelCount()),
     67        mLength(aNode->Length()),
     68        mSampleRate(aNode->Context()->SampleRate()),
     69        mBufferAllocated(false) {}
     70 
     71  void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
     72                    const AudioBlock& aInput, AudioBlock* aOutput,
     73                    bool* aFinished) override {
     74    TRACE("OfflineDestinationNodeEngine::ProcessBlock");
     75    // Do this just for the sake of political correctness; this output
     76    // will not go anywhere.
     77    *aOutput = aInput;
     78 
     79    // The output buffer is allocated lazily, on the rendering thread, when
     80    // non-null input is received.
     81    if (!mBufferAllocated && !aInput.IsNull()) {
     82      // These allocations might fail if content provides a huge number of
     83      // channels or size, but it's OK since we'll deal with the failure
     84      // gracefully.
     85      mBuffer = ThreadSharedFloatArrayBufferList::Create(mNumberOfChannels,
     86                                                         mLength, fallible);
     87      if (mBuffer && mWriteIndex) {
     88        // Zero leading for any null chunks that were skipped.
     89        for (uint32_t i = 0; i < mNumberOfChannels; ++i) {
     90          float* channelData = mBuffer->GetDataForWrite(i);
     91          PodZero(channelData, mWriteIndex);
     92        }
     93      }
     94 
     95      mBufferAllocated = true;
     96    }
     97 
     98    // Skip copying if there is no buffer.
     99    uint32_t outputChannelCount = mBuffer ? mNumberOfChannels : 0;
    100 
    101    // Record our input buffer
    102    MOZ_ASSERT(mWriteIndex < mLength, "How did this happen?");
    103    const uint32_t duration =
    104        std::min(WEBAUDIO_BLOCK_SIZE, mLength - mWriteIndex);
    105    const uint32_t inputChannelCount = aInput.ChannelCount();
    106    for (uint32_t i = 0; i < outputChannelCount; ++i) {
    107      float* outputData = mBuffer->GetDataForWrite(i) + mWriteIndex;
    108      if (aInput.IsNull() || i >= inputChannelCount) {
    109        PodZero(outputData, duration);
    110      } else {
    111        const float* inputBuffer =
    112            static_cast<const float*>(aInput.mChannelData[i]);
    113        if (duration == WEBAUDIO_BLOCK_SIZE && IS_ALIGNED16(inputBuffer)) {
    114          // Use the optimized version of the copy with scale operation
    115          AudioBlockCopyChannelWithScale(inputBuffer, aInput.mVolume,
    116                                         outputData);
    117        } else {
    118          if (aInput.mVolume == 1.0f) {
    119            PodCopy(outputData, inputBuffer, duration);
    120          } else {
    121            for (uint32_t j = 0; j < duration; ++j) {
    122              outputData[j] = aInput.mVolume * inputBuffer[j];
    123            }
    124          }
    125        }
    126      }
    127    }
    128    mWriteIndex += duration;
    129 
    130    if (mWriteIndex >= mLength) {
    131      NS_ASSERTION(mWriteIndex == mLength, "Overshot length");
    132      // Go to finished state. When the graph's current time eventually reaches
    133      // the end of the track, then the main thread will be notified and we'll
    134      // shut down the AudioContext.
    135      *aFinished = true;
    136    }
    137  }
    138 
    139  bool IsActive() const override {
    140    // Keep processing to track track time, which is used for all timelines
    141    // associated with the same AudioContext.
    142    return true;
    143  }
    144 
    145  already_AddRefed<AudioBuffer> CreateAudioBuffer(AudioContext* aContext) {
    146    MOZ_ASSERT(NS_IsMainThread());
    147    // Create the input buffer
    148    ErrorResult rv;
    149    RefPtr<AudioBuffer> renderedBuffer =
    150        AudioBuffer::Create(aContext->GetOwnerWindow(), mNumberOfChannels,
    151                            mLength, mSampleRate, mBuffer.forget(), rv);
    152    if (rv.Failed()) {
    153      rv.SuppressException();
    154      return nullptr;
    155    }
    156 
    157    return renderedBuffer.forget();
    158  }
    159 
    160  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
    161    size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
    162    if (mBuffer) {
    163      amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
    164    }
    165    return amount;
    166  }
    167 
    168  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
    169    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
    170  }
    171 
    172 private:
    173  // The input to the destination node is recorded in mBuffer.
    174  // When this buffer fills up with mLength frames, the buffered input is sent
    175  // to the main thread in order to dispatch OfflineAudioCompletionEvent.
    176  RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
    177  // An index representing the next offset in mBuffer to be written to.
    178  uint32_t mWriteIndex;
    179  uint32_t mNumberOfChannels;
    180  // How many frames the OfflineAudioContext intends to produce.
    181  uint32_t mLength;
    182  float mSampleRate;
    183  bool mBufferAllocated;
    184 };
    185 
    186 class DestinationNodeEngine final : public AudioNodeEngine {
    187 public:
    188  explicit DestinationNodeEngine(AudioDestinationNode* aNode)
    189      : AudioNodeEngine(aNode),
    190        mSampleRate(CubebUtils::PreferredSampleRate(
    191            aNode->Context()->ShouldResistFingerprinting())),
    192        mVolume(1.0f),
    193        mAudibilityMonitor(
    194            mSampleRate,
    195            StaticPrefs::dom_media_silence_duration_for_audibility()),
    196        mSuspended(false),
    197        mIsAudible(false) {
    198    MOZ_ASSERT(aNode);
    199  }
    200 
    201  void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
    202                    const AudioBlock& aInput, AudioBlock* aOutput,
    203                    bool* aFinished) override {
    204    TRACE("DestinationNodeEngine::ProcessBlock");
    205    *aOutput = aInput;
    206    aOutput->mVolume *= mVolume;
    207 
    208    if (mSuspended) {
    209      return;
    210    }
    211 
    212    mAudibilityMonitor.Process(aInput);
    213    bool isAudible =
    214        mAudibilityMonitor.RecentlyAudible() && aOutput->mVolume > 0.0;
    215    if (isAudible != mIsAudible) {
    216      mIsAudible = isAudible;
    217      RefPtr<AudioNodeTrack> track = aTrack;
    218      auto r = [track, isAudible]() -> void {
    219        MOZ_ASSERT(NS_IsMainThread());
    220        RefPtr<AudioNode> node = track->Engine()->NodeMainThread();
    221        if (node) {
    222          RefPtr<AudioDestinationNode> destinationNode =
    223              static_cast<AudioDestinationNode*>(node.get());
    224          destinationNode->NotifyDataAudibleStateChanged(isAudible);
    225        }
    226      };
    227 
    228      aTrack->Graph()->DispatchToMainThreadStableState(NS_NewRunnableFunction(
    229          "dom::WebAudioAudibleStateChangedRunnable", r));
    230    }
    231  }
    232 
    233  bool IsActive() const override {
    234    // Keep processing to track track time, which is used for all timelines
    235    // associated with the same AudioContext.  If there are no other engines
    236    // for the AudioContext, then this could return false to suspend the
    237    // track, but the track is blocked anyway through
    238    // AudioDestinationNode::SetIsOnlyNodeForContext().
    239    return true;
    240  }
    241 
    242  void SetDoubleParameter(uint32_t aIndex, double aParam) override {
    243    if (aIndex == VOLUME) {
    244      mVolume = static_cast<float>(aParam);
    245    }
    246  }
    247 
    248  void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
    249    if (aIndex == SUSPENDED) {
    250      mSuspended = !!aParam;
    251      if (mSuspended) {
    252        mIsAudible = false;
    253      }
    254    }
    255  }
    256 
    257  enum Parameters {
    258    VOLUME,
    259    SUSPENDED,
    260  };
    261 
    262  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
    263    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
    264  }
    265 
    266 private:
    267  int mSampleRate;
    268  float mVolume;
    269  AudibilityMonitor mAudibilityMonitor;
    270  bool mSuspended;
    271  bool mIsAudible;
    272 };
    273 
    274 NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDestinationNode, AudioNode,
    275                                   mAudioChannelAgent, mOfflineRenderingPromise)
    276 
    277 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioDestinationNode)
    278  NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)
    279 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
    280 
    281 NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
    282 NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
    283 
    284 const AudioNodeTrack::Flags kTrackFlags =
    285    AudioNodeTrack::NEED_MAIN_THREAD_CURRENT_TIME |
    286    AudioNodeTrack::NEED_MAIN_THREAD_ENDED | AudioNodeTrack::EXTERNAL_OUTPUT;
    287 
    288 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
    289                                           bool aIsOffline,
    290                                           uint32_t aNumberOfChannels,
    291                                           uint32_t aLength)
    292    : AudioNode(aContext, aNumberOfChannels, ChannelCountMode::Explicit,
    293                ChannelInterpretation::Speakers),
    294      mFramesToProduce(aLength),
    295      mIsOffline(aIsOffline) {
    296  if (aIsOffline) {
    297    // The track is created on demand to avoid creating a graph thread that
    298    // may not be used.
    299    return;
    300  }
    301 
    302  // GetParentObject can return nullptr here. This will end up creating another
    303  // MediaTrackGraph
    304  MediaTrackGraph* graph = MediaTrackGraph::GetInstance(
    305      MediaTrackGraph::AUDIO_THREAD_DRIVER, aContext->GetOwnerWindow(),
    306      aContext->SampleRate(), MediaTrackGraph::DEFAULT_OUTPUT_DEVICE);
    307  AudioNodeEngine* engine = new DestinationNodeEngine(this);
    308 
    309  mTrack = AudioNodeTrack::Create(aContext, engine, kTrackFlags, graph);
    310  mTrack->AddMainThreadListener(this);
    311  // null key is fine: only one output per mTrack
    312  mTrack->AddAudioOutput(nullptr, nullptr);
    313 }
    314 
    315 void AudioDestinationNode::Init() {
    316  // The reason we don't do that in ctor is because we have to keep AudioContext
    317  // holding a strong reference to the destination node first. If we don't do
    318  // that, initializing the agent would cause an unexpected destroy of the
    319  // destination node when destroying the local weak reference inside
    320  // `InitWithWeakCallback()`.
    321  if (!mIsOffline) {
    322    CreateAndStartAudioChannelAgent();
    323  }
    324 }
    325 
    326 void AudioDestinationNode::Close() {
    327  DestroyAudioChannelAgentIfExists();
    328  ReleaseAudioWakeLockIfExists();
    329 }
    330 
    331 void AudioDestinationNode::CreateAndStartAudioChannelAgent() {
    332  MOZ_ASSERT(!mIsOffline);
    333  MOZ_ASSERT(!mAudioChannelAgent);
    334 
    335  AudioChannelAgent* agent = new AudioChannelAgent();
    336  nsresult rv = agent->InitWithWeakCallback(GetOwnerWindow(), this);
    337  if (NS_WARN_IF(NS_FAILED(rv))) {
    338    AUDIO_CHANNEL_LOG("Failed to init audio channel agent");
    339    return;
    340  }
    341 
    342  AudibleState state =
    343      IsAudible() ? AudibleState::eAudible : AudibleState::eNotAudible;
    344  rv = agent->NotifyStartedPlaying(state);
    345  if (NS_WARN_IF(NS_FAILED(rv))) {
    346    AUDIO_CHANNEL_LOG("Failed to start audio channel agent");
    347    return;
    348  }
    349 
    350  mAudioChannelAgent = agent;
    351  mAudioChannelAgent->PullInitialUpdate();
    352 }
    353 
    354 AudioDestinationNode::~AudioDestinationNode() {
    355  MOZ_ASSERT(!mAudioChannelAgent);
    356  MOZ_ASSERT(!mWakeLock);
    357  MOZ_ASSERT(!mCaptureTrackPort);
    358 }
    359 
    360 size_t AudioDestinationNode::SizeOfExcludingThis(
    361    MallocSizeOf aMallocSizeOf) const {
    362  size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
    363  // Might be useful in the future:
    364  // - mAudioChannelAgent
    365  return amount;
    366 }
    367 
    368 size_t AudioDestinationNode::SizeOfIncludingThis(
    369    MallocSizeOf aMallocSizeOf) const {
    370  return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
    371 }
    372 
    373 AudioNodeTrack* AudioDestinationNode::Track() {
    374  if (mTrack) {
    375    return mTrack;
    376  }
    377 
    378  AudioContext* context = Context();
    379  if (!context) {  // This node has been unlinked.
    380    return nullptr;
    381  }
    382 
    383  MOZ_ASSERT(mIsOffline, "Realtime tracks are created in constructor");
    384 
    385  // GetParentObject can return nullptr here when the document has been
    386  // unlinked.
    387  MediaTrackGraph* graph =
    388      MediaTrackGraph::CreateNonRealtimeInstance(context->SampleRate());
    389  AudioNodeEngine* engine = new OfflineDestinationNodeEngine(this);
    390 
    391  mTrack = AudioNodeTrack::Create(context, engine, kTrackFlags, graph);
    392  mTrack->AddMainThreadListener(this);
    393 
    394  return mTrack;
    395 }
    396 
    397 void AudioDestinationNode::DestroyAudioChannelAgentIfExists() {
    398  if (mAudioChannelAgent) {
    399    mAudioChannelAgent->NotifyStoppedPlaying();
    400    mAudioChannelAgent = nullptr;
    401    if (IsCapturingAudio()) {
    402      StopAudioCapturingTrack();
    403    }
    404  }
    405 }
    406 
    407 void AudioDestinationNode::DestroyMediaTrack() {
    408  Close();
    409  if (!mTrack) {
    410    return;
    411  }
    412 
    413  Context()->ShutdownWorklet();
    414 
    415  mTrack->RemoveMainThreadListener(this);
    416  AudioNode::DestroyMediaTrack();
    417 }
    418 
    419 void AudioDestinationNode::NotifyMainThreadTrackEnded() {
    420  MOZ_ASSERT(NS_IsMainThread());
    421  MOZ_ASSERT(mTrack->IsEnded());
    422 
    423  if (mIsOffline) {
    424    AbstractThread::MainThread()->Dispatch(NewRunnableMethod(
    425        "dom::AudioDestinationNode::FireOfflineCompletionEvent", this,
    426        &AudioDestinationNode::FireOfflineCompletionEvent));
    427  }
    428 }
    429 
    430 void AudioDestinationNode::FireOfflineCompletionEvent() {
    431  AudioContext* context = Context();
    432  context->OfflineClose();
    433 
    434  OfflineDestinationNodeEngine* engine =
    435      static_cast<OfflineDestinationNodeEngine*>(Track()->Engine());
    436  RefPtr<AudioBuffer> renderedBuffer = engine->CreateAudioBuffer(context);
    437  if (!renderedBuffer) {
    438    return;
    439  }
    440  ResolvePromise(renderedBuffer);
    441 
    442  context->Dispatch(do_AddRef(new OnCompleteTask(context, renderedBuffer)));
    443 
    444  context->OnStateChanged(nullptr, AudioContextState::Closed);
    445 
    446  mOfflineRenderingRef.Drop(this);
    447 }
    448 
    449 void AudioDestinationNode::ResolvePromise(AudioBuffer* aRenderedBuffer) {
    450  MOZ_ASSERT(NS_IsMainThread());
    451  MOZ_ASSERT(mIsOffline);
    452  mOfflineRenderingPromise->MaybeResolve(aRenderedBuffer);
    453 }
    454 
    455 uint32_t AudioDestinationNode::MaxChannelCount() const {
    456  return Context()->MaxChannelCount();
    457 }
    458 
    459 void AudioDestinationNode::SetChannelCount(uint32_t aChannelCount,
    460                                           ErrorResult& aRv) {
    461  if (aChannelCount > MaxChannelCount()) {
    462    aRv.ThrowIndexSizeError(
    463        nsPrintfCString("%u is larger than maxChannelCount", aChannelCount));
    464    return;
    465  }
    466 
    467  if (aChannelCount == ChannelCount()) {
    468    return;
    469  }
    470 
    471  AudioNode::SetChannelCount(aChannelCount, aRv);
    472 }
    473 
    474 void AudioDestinationNode::Mute() {
    475  MOZ_ASSERT(Context() && !Context()->IsOffline());
    476  SendDoubleParameterToTrack(DestinationNodeEngine::VOLUME, 0.0f);
    477 }
    478 
    479 void AudioDestinationNode::Unmute() {
    480  MOZ_ASSERT(Context() && !Context()->IsOffline());
    481  SendDoubleParameterToTrack(DestinationNodeEngine::VOLUME, 1.0f);
    482 }
    483 
    484 void AudioDestinationNode::Suspend() {
    485  SendInt32ParameterToTrack(DestinationNodeEngine::SUSPENDED, 1);
    486 }
    487 
    488 void AudioDestinationNode::Resume() {
    489  SendInt32ParameterToTrack(DestinationNodeEngine::SUSPENDED, 0);
    490 }
    491 
    492 void AudioDestinationNode::NotifyAudioContextStateChanged() {
    493  UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::ePauseStateChanged);
    494 }
    495 
    496 void AudioDestinationNode::OfflineShutdown() {
    497  MOZ_ASSERT(Context() && Context()->IsOffline(),
    498             "Should only be called on a valid OfflineAudioContext");
    499 
    500  mOfflineRenderingRef.Drop(this);
    501 }
    502 
    503 JSObject* AudioDestinationNode::WrapObject(JSContext* aCx,
    504                                           JS::Handle<JSObject*> aGivenProto) {
    505  return AudioDestinationNode_Binding::Wrap(aCx, this, aGivenProto);
    506 }
    507 
    508 void AudioDestinationNode::StartRendering(Promise* aPromise) {
    509  mOfflineRenderingPromise = aPromise;
    510  mOfflineRenderingRef.Take(this);
    511  Track()->Graph()->StartNonRealtimeProcessing(mFramesToProduce);
    512 }
    513 
    514 NS_IMETHODIMP
    515 AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted) {
    516  MOZ_ASSERT(mAudioChannelAgent);
    517  if (!mTrack) {
    518    return NS_OK;
    519  }
    520 
    521  AUDIO_CHANNEL_LOG(
    522      "AudioDestinationNode %p WindowVolumeChanged, "
    523      "aVolume = %f, aMuted = %s\n",
    524      this, aVolume, aMuted ? "true" : "false");
    525 
    526  mAudioChannelVolume = aMuted ? 0.0f : aVolume;
    527  mTrack->SetAudioOutputVolume(nullptr, mAudioChannelVolume);
    528  UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::eVolumeChanged);
    529  return NS_OK;
    530 }
    531 
    532 NS_IMETHODIMP
    533 AudioDestinationNode::WindowSuspendChanged(nsSuspendedTypes aSuspend) {
    534  MOZ_ASSERT(mAudioChannelAgent);
    535  if (!mTrack) {
    536    return NS_OK;
    537  }
    538 
    539  const bool shouldDisable = aSuspend == nsISuspendedTypes::SUSPENDED_BLOCK;
    540  if (mAudioChannelDisabled == shouldDisable) {
    541    return NS_OK;
    542  }
    543  mAudioChannelDisabled = shouldDisable;
    544 
    545  AUDIO_CHANNEL_LOG(
    546      "AudioDestinationNode %p WindowSuspendChanged, shouldDisable = %d\n",
    547      this, mAudioChannelDisabled);
    548 
    549  DisabledTrackMode disabledMode = mAudioChannelDisabled
    550                                       ? DisabledTrackMode::SILENCE_BLACK
    551                                       : DisabledTrackMode::ENABLED;
    552  mTrack->SetDisabledTrackMode(disabledMode);
    553  UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::ePauseStateChanged);
    554  return NS_OK;
    555 }
    556 
    557 NS_IMETHODIMP
    558 AudioDestinationNode::WindowAudioCaptureChanged(bool aCapture) {
    559  MOZ_ASSERT(mAudioChannelAgent);
    560  if (!mTrack) {
    561    return NS_OK;
    562  }
    563 
    564  if (!GetOwnerWindow()) {
    565    return NS_OK;
    566  }
    567 
    568  if (aCapture == IsCapturingAudio()) {
    569    return NS_OK;
    570  }
    571 
    572  if (aCapture) {
    573    StartAudioCapturingTrack();
    574  } else {
    575    StopAudioCapturingTrack();
    576  }
    577 
    578  return NS_OK;
    579 }
    580 
    581 bool AudioDestinationNode::IsCapturingAudio() const {
    582  return mCaptureTrackPort != nullptr;
    583 }
    584 
    585 void AudioDestinationNode::StartAudioCapturingTrack() {
    586  MOZ_ASSERT(!IsCapturingAudio());
    587  nsGlobalWindowInner* window = Context()->GetOwnerWindow();
    588  uint64_t id = window->WindowID();
    589  mCaptureTrackPort = mTrack->Graph()->ConnectToCaptureTrack(id, mTrack);
    590 }
    591 
    592 void AudioDestinationNode::StopAudioCapturingTrack() {
    593  MOZ_ASSERT(IsCapturingAudio());
    594  mCaptureTrackPort->Destroy();
    595  mCaptureTrackPort = nullptr;
    596 }
    597 
    598 void AudioDestinationNode::CreateAudioWakeLockIfNeeded() {
    599  if (!mWakeLock && IsAudible()) {
    600    RefPtr<power::PowerManagerService> pmService =
    601        power::PowerManagerService::GetInstance();
    602    NS_ENSURE_TRUE_VOID(pmService);
    603 
    604    ErrorResult rv;
    605    mWakeLock =
    606        pmService->NewWakeLock(u"audio-playing"_ns, GetOwnerWindow(), rv);
    607  }
    608 }
    609 
    610 void AudioDestinationNode::ReleaseAudioWakeLockIfExists() {
    611  if (mWakeLock) {
    612    IgnoredErrorResult rv;
    613    mWakeLock->Unlock(rv);
    614    mWakeLock = nullptr;
    615  }
    616 }
    617 
    618 void AudioDestinationNode::NotifyDataAudibleStateChanged(bool aAudible) {
    619  MOZ_ASSERT(!mIsOffline);
    620 
    621  AUDIO_CHANNEL_LOG(
    622      "AudioDestinationNode %p NotifyDataAudibleStateChanged, audible=%d", this,
    623      aAudible);
    624 
    625  mIsDataAudible = aAudible;
    626  UpdateFinalAudibleStateIfNeeded(AudibleChangedReasons::eDataAudibleChanged);
    627 }
    628 
    629 void AudioDestinationNode::UpdateFinalAudibleStateIfNeeded(
    630    AudibleChangedReasons aReason) {
    631  // The audio context has been closed and we've destroyed the agent.
    632  if (!mAudioChannelAgent) {
    633    return;
    634  }
    635  const bool newAudibleState = IsAudible();
    636  if (mFinalAudibleState == newAudibleState) {
    637    return;
    638  }
    639  AUDIO_CHANNEL_LOG("AudioDestinationNode %p Final audible state=%d", this,
    640                    newAudibleState);
    641  mFinalAudibleState = newAudibleState;
    642  AudibleState state =
    643      mFinalAudibleState ? AudibleState::eAudible : AudibleState::eNotAudible;
    644  mAudioChannelAgent->NotifyStartedAudible(state, aReason);
    645  if (mFinalAudibleState) {
    646    CreateAudioWakeLockIfNeeded();
    647  } else {
    648    ReleaseAudioWakeLockIfExists();
    649  }
    650 }
    651 
    652 bool AudioDestinationNode::IsAudible() const {
    653  // The desitionation node will be regarded as audible if all following
    654  // conditions are true.
    655  // (1) data audible state : both audio input and output are audible
    656  // (2) window audible state : the tab isn't muted by tab sound indicator
    657  // (3) audio context state : audio context should be running
    658  return Context()->State() == AudioContextState::Running && mIsDataAudible &&
    659         mAudioChannelVolume != 0.0;
    660 }
    661 
    662 }  // namespace mozilla::dom