tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

AudioContext.cpp (45154B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "AudioContext.h"
      8 
      9 #include "AudioBuffer.h"
     10 #include "AudioBufferSourceNode.h"
     11 #include "AudioChannelService.h"
     12 #include "AudioDestinationNode.h"
     13 #include "AudioListener.h"
     14 #include "AudioNodeTrack.h"
     15 #include "AudioStream.h"
     16 #include "AudioWorkletImpl.h"
     17 #include "AutoplayPolicy.h"
     18 #include "BiquadFilterNode.h"
     19 #include "ChannelMergerNode.h"
     20 #include "ChannelSplitterNode.h"
     21 #include "ConstantSourceNode.h"
     22 #include "ConvolverNode.h"
     23 #include "DelayNode.h"
     24 #include "DynamicsCompressorNode.h"
     25 #include "GainNode.h"
     26 #include "IIRFilterNode.h"
     27 #include "MediaElementAudioSourceNode.h"
     28 #include "MediaStreamAudioDestinationNode.h"
     29 #include "MediaStreamAudioSourceNode.h"
     30 #include "MediaStreamTrackAudioSourceNode.h"
     31 #include "MediaTrackGraph.h"
     32 #include "OscillatorNode.h"
     33 #include "PannerNode.h"
     34 #include "PeriodicWave.h"
     35 #include "ScriptProcessorNode.h"
     36 #include "StereoPannerNode.h"
     37 #include "Tracing.h"
     38 #include "WaveShaperNode.h"
     39 #include "blink/PeriodicWave.h"
     40 #include "js/ArrayBuffer.h"  // JS::StealArrayBufferContents
     41 #include "mozilla/ErrorResult.h"
     42 #include "mozilla/OwningNonNull.h"
     43 #include "mozilla/Preferences.h"
     44 #include "mozilla/RefPtr.h"
     45 #include "mozilla/StaticPrefs_media.h"
     46 #include "mozilla/dom/AnalyserNode.h"
     47 #include "mozilla/dom/AnalyserNodeBinding.h"
     48 #include "mozilla/dom/AudioBufferSourceNodeBinding.h"
     49 #include "mozilla/dom/AudioContextBinding.h"
     50 #include "mozilla/dom/AudioWorklet.h"
     51 #include "mozilla/dom/BaseAudioContextBinding.h"
     52 #include "mozilla/dom/BiquadFilterNodeBinding.h"
     53 #include "mozilla/dom/BrowsingContext.h"
     54 #include "mozilla/dom/CanonicalBrowsingContext.h"
     55 #include "mozilla/dom/ChannelMergerNodeBinding.h"
     56 #include "mozilla/dom/ChannelSplitterNodeBinding.h"
     57 #include "mozilla/dom/ContentChild.h"
     58 #include "mozilla/dom/ConvolverNodeBinding.h"
     59 #include "mozilla/dom/DelayNodeBinding.h"
     60 #include "mozilla/dom/DynamicsCompressorNodeBinding.h"
     61 #include "mozilla/dom/GainNodeBinding.h"
     62 #include "mozilla/dom/HTMLMediaElement.h"
     63 #include "mozilla/dom/IIRFilterNodeBinding.h"
     64 #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h"
     65 #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
     66 #include "mozilla/dom/MediaStreamTrackAudioSourceNodeBinding.h"
     67 #include "mozilla/dom/OfflineAudioContextBinding.h"
     68 #include "mozilla/dom/OscillatorNodeBinding.h"
     69 #include "mozilla/dom/PannerNodeBinding.h"
     70 #include "mozilla/dom/Performance.h"
     71 #include "mozilla/dom/PeriodicWaveBinding.h"
     72 #include "mozilla/dom/Promise.h"
     73 #include "mozilla/dom/StereoPannerNodeBinding.h"
     74 #include "mozilla/dom/WaveShaperNodeBinding.h"
     75 #include "nsContentUtils.h"
     76 #include "nsGlobalWindowInner.h"
     77 #include "nsIScriptError.h"
     78 #include "nsNetCID.h"
     79 #include "nsNetUtil.h"
     80 #include "nsPIDOMWindow.h"
     81 #include "nsPrintfCString.h"
     82 #include "nsRFPService.h"
     83 
     84 extern mozilla::LazyLogModule gAutoplayPermissionLog;
     85 
     86 #define AUTOPLAY_LOG(msg, ...) \
     87  MOZ_LOG(gAutoplayPermissionLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
     88 
     89 namespace mozilla::dom {
     90 
     91 // 0 is a special value that MediaTracks use to denote they are not part of a
     92 // AudioContext.
     93 static dom::AudioContext::AudioContextId gAudioContextId = 1;
     94 
     95 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
     96 
     97 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
     98  // The destination node and AudioContext form a cycle and so the destination
     99  // track will be destroyed.  mWorklet must be shut down before the track
    100  // is destroyed.  Do this before clearing mWorklet.
    101  tmp->ShutdownWorklet();
    102  NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination)
    103  NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
    104  NS_IMPL_CYCLE_COLLECTION_UNLINK(mWorklet)
    105  NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray)
    106  NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises)
    107  if (tmp->mTracksAreSuspended || !tmp->mIsStarted) {
    108    NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes)
    109  }
    110  // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
    111  // explicitly. mAllNodes is an array of weak pointers, ignore it here.
    112  // mBasicWaveFormCache cannot participate in cycles, ignore it here.
    113 
    114  // Remove weak reference on the global window as the context is not usable
    115  // without mDestination.
    116  tmp->DisconnectFromWindow();
    117 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper)
    118 
    119 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext,
    120                                                  DOMEventTargetHelper)
    121  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination)
    122  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
    123  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mWorklet)
    124  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray)
    125  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises)
    126  if (tmp->mTracksAreSuspended || !tmp->mIsStarted) {
    127    NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes)
    128  }
    129  // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed
    130  // explicitly. mAllNodes is an array of weak pointers, ignore it here.
    131  // mBasicWaveFormCache cannot participate in cycles, ignore it here.
    132 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
    133 
    134 NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper)
    135 NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper)
    136 
    137 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioContext)
    138  NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter)
    139 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
    140 
    141 static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate,
    142                                          bool aShouldResistFingerprinting) {
    143  if (aIsOffline || aSampleRate != 0.0) {
    144    return aSampleRate;
    145  } else {
    146    return static_cast<float>(
    147        CubebUtils::PreferredSampleRate(aShouldResistFingerprinting));
    148  }
    149 }
    150 
    151 AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, bool aIsOffline,
    152                           uint32_t aNumberOfChannels, uint32_t aLength,
    153                           float aSampleRate)
    154    : DOMEventTargetHelper(aWindow),
    155      mId(gAudioContextId++),
    156      mSampleRate(GetSampleRateForAudioContext(
    157          aIsOffline, aSampleRate,
    158          aWindow->AsGlobal()->ShouldResistFingerprinting(
    159              RFPTarget::AudioSampleRate))),
    160      mAudioContextState(AudioContextState::Suspended),
    161      mNumberOfChannels(aNumberOfChannels),
    162      mRTPCallerType(aWindow->AsGlobal()->GetRTPCallerType()),
    163      mShouldResistFingerprinting(
    164          aWindow->AsGlobal()->ShouldResistFingerprinting(
    165              RFPTarget::AudioContext)),
    166      mIsOffline(aIsOffline),
    167      mIsStarted(!aIsOffline),
    168      mIsShutDown(false),
    169      mIsDisconnecting(false),
    170      mCloseCalled(false),
    171      // Realtime contexts start with suspended tracks until an
    172      // AudioCallbackDriver is running.
    173      mTracksAreSuspended(!aIsOffline),
    174      mWasAllowedToStart(true),
    175      mSuspendedByContent(false),
    176      mSuspendedByChrome(nsGlobalWindowInner::Cast(aWindow)->IsSuspended()) {
    177  bool mute = aWindow->AddAudioContext(this);
    178 
    179  // Note: AudioDestinationNode needs an AudioContext that must already be
    180  // bound to the window.
    181  const bool allowedToStart = media::AutoplayPolicy::IsAllowedToPlay(*this);
    182  mDestination =
    183      new AudioDestinationNode(this, aIsOffline, aNumberOfChannels, aLength);
    184  mDestination->Init();
    185  // If an AudioContext is not allowed to start, we would postpone its state
    186  // transition from `suspended` to `running` until sites explicitly call
    187  // AudioContext.resume() or AudioScheduledSourceNode.start().
    188  if (!allowedToStart) {
    189    MOZ_ASSERT(!mIsOffline);
    190    AUTOPLAY_LOG("AudioContext %p is not allowed to start", this);
    191    ReportBlocked();
    192  } else if (!mIsOffline) {
    193    ResumeInternal();
    194  }
    195 
    196  // The context can't be muted until it has a destination.
    197  if (mute) {
    198    Mute();
    199  }
    200 
    201  FFTBlock::MainThreadInit();
    202 }
    203 
    204 void AudioContext::StartBlockedAudioContextIfAllowed() {
    205  MOZ_ASSERT(NS_IsMainThread());
    206  // Only try to start AudioContext when AudioContext was not allowed to start.
    207  if (mWasAllowedToStart) {
    208    return;
    209  }
    210 
    211  const bool isAllowedToPlay = media::AutoplayPolicy::IsAllowedToPlay(*this);
    212  AUTOPLAY_LOG("Trying to start AudioContext %p, IsAllowedToPlay=%d", this,
    213               isAllowedToPlay);
    214 
    215  // Only start the AudioContext if this resume() call was initiated by content,
    216  // not if it was a result of the AudioContext starting after having been
    217  // blocked because of the auto-play policy.
    218  if (isAllowedToPlay && !mSuspendedByContent) {
    219    ResumeInternal();
    220  } else {
    221    ReportBlocked();
    222  }
    223 }
    224 
    225 void AudioContext::DisconnectFromWindow() {
    226  MaybeClearPageAwakeRequest();
    227  if (nsGlobalWindowInner* window = GetOwnerWindow()) {
    228    window->RemoveAudioContext(this);
    229  }
    230 }
    231 
    232 AudioContext::~AudioContext() {
    233  DisconnectFromWindow();
    234  UnregisterWeakMemoryReporter(this);
    235  MOZ_ASSERT(!mSetPageAwakeRequest, "forgot to revoke for page awake?");
    236 }
    237 
    238 JSObject* AudioContext::WrapObject(JSContext* aCx,
    239                                   JS::Handle<JSObject*> aGivenProto) {
    240  if (mIsOffline) {
    241    return OfflineAudioContext_Binding::Wrap(aCx, this, aGivenProto);
    242  }
    243  return AudioContext_Binding::Wrap(aCx, this, aGivenProto);
    244 }
    245 
    246 static bool CheckFullyActive(nsPIDOMWindowInner* aWindow, ErrorResult& aRv) {
    247  if (!aWindow->IsFullyActive()) {
    248    aRv.ThrowInvalidStateError("The document is not fully active.");
    249    return false;
    250  }
    251  return true;
    252 }
    253 
    254 /* static */
    255 already_AddRefed<AudioContext> AudioContext::Constructor(
    256    const GlobalObject& aGlobal, const AudioContextOptions& aOptions,
    257    ErrorResult& aRv) {
    258  nsCOMPtr<nsPIDOMWindowInner> window =
    259      do_QueryInterface(aGlobal.GetAsSupports());
    260  if (!window) {
    261    aRv.Throw(NS_ERROR_FAILURE);
    262    return nullptr;
    263  }
    264  /**
    265   * If the current settings object’s responsible document is NOT fully
    266   * active, throw an InvalidStateError and abort these steps.
    267   */
    268  if (!CheckFullyActive(window, aRv)) {
    269    return nullptr;
    270  }
    271 
    272  if (aOptions.mSampleRate.WasPassed() &&
    273      (aOptions.mSampleRate.Value() < WebAudioUtils::MinSampleRate ||
    274       aOptions.mSampleRate.Value() > WebAudioUtils::MaxSampleRate)) {
    275    aRv.ThrowNotSupportedError(nsPrintfCString(
    276        "Sample rate %g is not in the range [%u, %u]",
    277        aOptions.mSampleRate.Value(), WebAudioUtils::MinSampleRate,
    278        WebAudioUtils::MaxSampleRate));
    279    return nullptr;
    280  }
    281  float sampleRate = aOptions.mSampleRate.WasPassed()
    282                         ? aOptions.mSampleRate.Value()
    283                         : MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE;
    284 
    285  WEB_AUDIO_API_LOG("AudioContext sampleRate={}", sampleRate);
    286  RefPtr<AudioContext> object =
    287      new AudioContext(window, false, 2, 0, sampleRate);
    288 
    289  RegisterWeakMemoryReporter(object);
    290 
    291  return object.forget();
    292 }
    293 
    294 /* static */
    295 already_AddRefed<AudioContext> AudioContext::Constructor(
    296    const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions,
    297    ErrorResult& aRv) {
    298  return Constructor(aGlobal, aOptions.mNumberOfChannels, aOptions.mLength,
    299                     aOptions.mSampleRate, aRv);
    300 }
    301 
    302 /* static */
    303 already_AddRefed<AudioContext> AudioContext::Constructor(
    304    const GlobalObject& aGlobal, uint32_t aNumberOfChannels, uint32_t aLength,
    305    float aSampleRate, ErrorResult& aRv) {
    306  nsCOMPtr<nsPIDOMWindowInner> window =
    307      do_QueryInterface(aGlobal.GetAsSupports());
    308  if (!window) {
    309    aRv.Throw(NS_ERROR_FAILURE);
    310    return nullptr;
    311  }
    312  /**
    313   * If the current settings object’s responsible document is NOT fully
    314   * active, throw an InvalidStateError and abort these steps.
    315   */
    316  if (!CheckFullyActive(window, aRv)) {
    317    return nullptr;
    318  }
    319 
    320  WEB_AUDIO_API_LOG(
    321      "OfflineAudioContext numberOfChannels={} length={} sampleRate={}",
    322      aNumberOfChannels, aLength, aSampleRate);
    323  if (aNumberOfChannels == 0 ||
    324      aNumberOfChannels > WebAudioUtils::MaxChannelCount) {
    325    aRv.ThrowNotSupportedError(
    326        nsPrintfCString("%u is not a valid channel count", aNumberOfChannels));
    327    return nullptr;
    328  }
    329 
    330  if (aLength == 0) {
    331    aRv.ThrowNotSupportedError("Length must be nonzero");
    332    return nullptr;
    333  }
    334 
    335  if (aSampleRate < WebAudioUtils::MinSampleRate ||
    336      aSampleRate > WebAudioUtils::MaxSampleRate) {
    337    // The DOM binding protects us against infinity and NaN
    338    aRv.ThrowNotSupportedError(nsPrintfCString(
    339        "Sample rate %g is not in the range [%u, %u]", aSampleRate,
    340        WebAudioUtils::MinSampleRate, WebAudioUtils::MaxSampleRate));
    341    return nullptr;
    342  }
    343 
    344  RefPtr<AudioContext> object =
    345      new AudioContext(window, true, aNumberOfChannels, aLength, aSampleRate);
    346 
    347  RegisterWeakMemoryReporter(object);
    348 
    349  return object.forget();
    350 }
    351 
    352 already_AddRefed<AudioBufferSourceNode> AudioContext::CreateBufferSource() {
    353  return AudioBufferSourceNode::Create(nullptr, *this,
    354                                       AudioBufferSourceOptions());
    355 }
    356 
    357 already_AddRefed<ConstantSourceNode> AudioContext::CreateConstantSource() {
    358  RefPtr<ConstantSourceNode> constantSourceNode = new ConstantSourceNode(this);
    359  return constantSourceNode.forget();
    360 }
    361 
    362 already_AddRefed<AudioBuffer> AudioContext::CreateBuffer(
    363    uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate,
    364    ErrorResult& aRv) {
    365  if (!aNumberOfChannels) {
    366    aRv.ThrowNotSupportedError("Number of channels must be nonzero");
    367    return nullptr;
    368  }
    369 
    370  return AudioBuffer::Create(GetOwnerWindow(), aNumberOfChannels, aLength,
    371                             aSampleRate, aRv);
    372 }
    373 
    374 namespace {
    375 
    376 bool IsValidBufferSize(uint32_t aBufferSize) {
    377  switch (aBufferSize) {
    378    case 0:  // let the implementation choose the buffer size
    379    case 256:
    380    case 512:
    381    case 1024:
    382    case 2048:
    383    case 4096:
    384    case 8192:
    385    case 16384:
    386      return true;
    387    default:
    388      return false;
    389  }
    390 }
    391 
    392 }  // namespace
    393 
    394 already_AddRefed<MediaStreamAudioDestinationNode>
    395 AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) {
    396  return MediaStreamAudioDestinationNode::Create(*this, AudioNodeOptions(),
    397                                                 aRv);
    398 }
    399 
    400 already_AddRefed<ScriptProcessorNode> AudioContext::CreateScriptProcessor(
    401    uint32_t aBufferSize, uint32_t aNumberOfInputChannels,
    402    uint32_t aNumberOfOutputChannels, ErrorResult& aRv) {
    403  if (aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) {
    404    aRv.ThrowIndexSizeError(
    405        "At least one of numberOfInputChannels and numberOfOutputChannels must "
    406        "be nonzero");
    407    return nullptr;
    408  }
    409 
    410  if (aNumberOfInputChannels > WebAudioUtils::MaxChannelCount) {
    411    aRv.ThrowIndexSizeError(nsPrintfCString(
    412        "%u is not a valid number of input channels", aNumberOfInputChannels));
    413    return nullptr;
    414  }
    415 
    416  if (aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount) {
    417    aRv.ThrowIndexSizeError(
    418        nsPrintfCString("%u is not a valid number of output channels",
    419                        aNumberOfOutputChannels));
    420    return nullptr;
    421  }
    422 
    423  if (!IsValidBufferSize(aBufferSize)) {
    424    aRv.ThrowIndexSizeError(
    425        nsPrintfCString("%u is not a valid bufferSize", aBufferSize));
    426    return nullptr;
    427  }
    428 
    429  RefPtr<ScriptProcessorNode> scriptProcessor = new ScriptProcessorNode(
    430      this, aBufferSize, aNumberOfInputChannels, aNumberOfOutputChannels);
    431  return scriptProcessor.forget();
    432 }
    433 
    434 already_AddRefed<AnalyserNode> AudioContext::CreateAnalyser(ErrorResult& aRv) {
    435  return AnalyserNode::Create(*this, AnalyserOptions(), aRv);
    436 }
    437 
    438 already_AddRefed<StereoPannerNode> AudioContext::CreateStereoPanner(
    439    ErrorResult& aRv) {
    440  return StereoPannerNode::Create(*this, StereoPannerOptions(), aRv);
    441 }
    442 
    443 already_AddRefed<MediaElementAudioSourceNode>
    444 AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
    445                                       ErrorResult& aRv) {
    446  MediaElementAudioSourceOptions options;
    447  options.mMediaElement = aMediaElement;
    448 
    449  return MediaElementAudioSourceNode::Create(*this, options, aRv);
    450 }
    451 
    452 already_AddRefed<MediaStreamAudioSourceNode>
    453 AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
    454                                      ErrorResult& aRv) {
    455  MediaStreamAudioSourceOptions options;
    456  options.mMediaStream = aMediaStream;
    457 
    458  return MediaStreamAudioSourceNode::Create(*this, options, aRv);
    459 }
    460 
    461 already_AddRefed<MediaStreamTrackAudioSourceNode>
    462 AudioContext::CreateMediaStreamTrackSource(MediaStreamTrack& aMediaStreamTrack,
    463                                           ErrorResult& aRv) {
    464  MediaStreamTrackAudioSourceOptions options;
    465  options.mMediaStreamTrack = aMediaStreamTrack;
    466 
    467  return MediaStreamTrackAudioSourceNode::Create(*this, options, aRv);
    468 }
    469 
    470 already_AddRefed<GainNode> AudioContext::CreateGain(ErrorResult& aRv) {
    471  return GainNode::Create(*this, GainOptions(), aRv);
    472 }
    473 
    474 already_AddRefed<WaveShaperNode> AudioContext::CreateWaveShaper(
    475    ErrorResult& aRv) {
    476  return WaveShaperNode::Create(*this, WaveShaperOptions(), aRv);
    477 }
    478 
    479 already_AddRefed<DelayNode> AudioContext::CreateDelay(double aMaxDelayTime,
    480                                                      ErrorResult& aRv) {
    481  DelayOptions options;
    482  options.mMaxDelayTime = aMaxDelayTime;
    483  return DelayNode::Create(*this, options, aRv);
    484 }
    485 
    486 already_AddRefed<PannerNode> AudioContext::CreatePanner(ErrorResult& aRv) {
    487  return PannerNode::Create(*this, PannerOptions(), aRv);
    488 }
    489 
    490 already_AddRefed<ConvolverNode> AudioContext::CreateConvolver(
    491    ErrorResult& aRv) {
    492  return ConvolverNode::Create(nullptr, *this, ConvolverOptions(), aRv);
    493 }
    494 
    495 already_AddRefed<ChannelSplitterNode> AudioContext::CreateChannelSplitter(
    496    uint32_t aNumberOfOutputs, ErrorResult& aRv) {
    497  ChannelSplitterOptions options;
    498  options.mNumberOfOutputs = aNumberOfOutputs;
    499  return ChannelSplitterNode::Create(*this, options, aRv);
    500 }
    501 
    502 already_AddRefed<ChannelMergerNode> AudioContext::CreateChannelMerger(
    503    uint32_t aNumberOfInputs, ErrorResult& aRv) {
    504  ChannelMergerOptions options;
    505  options.mNumberOfInputs = aNumberOfInputs;
    506  return ChannelMergerNode::Create(*this, options, aRv);
    507 }
    508 
    509 already_AddRefed<DynamicsCompressorNode> AudioContext::CreateDynamicsCompressor(
    510    ErrorResult& aRv) {
    511  return DynamicsCompressorNode::Create(*this, DynamicsCompressorOptions(),
    512                                        aRv);
    513 }
    514 
    515 already_AddRefed<BiquadFilterNode> AudioContext::CreateBiquadFilter(
    516    ErrorResult& aRv) {
    517  return BiquadFilterNode::Create(*this, BiquadFilterOptions(), aRv);
    518 }
    519 
    520 already_AddRefed<IIRFilterNode> AudioContext::CreateIIRFilter(
    521    const Sequence<double>& aFeedforward, const Sequence<double>& aFeedback,
    522    mozilla::ErrorResult& aRv) {
    523  IIRFilterOptions options;
    524  options.mFeedforward = aFeedforward;
    525  options.mFeedback = aFeedback;
    526  return IIRFilterNode::Create(*this, options, aRv);
    527 }
    528 
    529 already_AddRefed<OscillatorNode> AudioContext::CreateOscillator(
    530    ErrorResult& aRv) {
    531  return OscillatorNode::Create(*this, OscillatorOptions(), aRv);
    532 }
    533 
    534 already_AddRefed<PeriodicWave> AudioContext::CreatePeriodicWave(
    535    const Sequence<float>& aRealData, const Sequence<float>& aImagData,
    536    const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv) {
    537  RefPtr<PeriodicWave> periodicWave = new PeriodicWave(
    538      this, aRealData.Elements(), aRealData.Length(), aImagData.Elements(),
    539      aImagData.Length(), aConstraints.mDisableNormalization, aRv);
    540  if (aRv.Failed()) {
    541    return nullptr;
    542  }
    543  return periodicWave.forget();
    544 }
    545 
    546 AudioListener* AudioContext::Listener() {
    547  if (!mListener) {
    548    mListener = new AudioListener(this);
    549  }
    550  return mListener;
    551 }
    552 
    553 double AudioContext::OutputLatency() {
    554  if (mIsShutDown) {
    555    return 0.0;
    556  }
    557  // When reduceFingerprinting is enabled, return a latency figure that is
    558  // fixed, but plausible for the platform.
    559  double latency_s = 0.0;
    560  if (mShouldResistFingerprinting) {
    561 #ifdef XP_MACOSX
    562    latency_s = 512. / mSampleRate;
    563 #elif MOZ_WIDGET_ANDROID
    564    latency_s = 0.020;
    565 #elif XP_WIN
    566    latency_s = 0.04;
    567 #else  // Catchall for other OSes, including Linux.
    568    latency_s = 0.025;
    569 #endif
    570  } else {
    571    return Graph()->AudioOutputLatency();
    572  }
    573  return latency_s;
    574 }
    575 
    576 void AudioContext::GetOutputTimestamp(AudioTimestamp& aTimeStamp) {
    577  if (!Destination()) {
    578    aTimeStamp.mContextTime.Construct(0.0);
    579    aTimeStamp.mPerformanceTime.Construct(0.0);
    580    return;
    581  }
    582 
    583  // The currentTime currently being output is the currentTime minus the audio
    584  // output latency. The resolution of CurrentTime() is already reduced.
    585  aTimeStamp.mContextTime.Construct(
    586      std::max(0.0, CurrentTime() - OutputLatency()));
    587  nsGlobalWindowInner* win = GetOwnerWindow();
    588  Performance* perf = win ? win->GetPerformance() : nullptr;
    589  if (perf) {
    590    // perf->Now() already has reduced resolution here, no need to do it again.
    591    aTimeStamp.mPerformanceTime.Construct(
    592        std::max(0., perf->Now() - (OutputLatency() * 1000.)));
    593  } else {
    594    aTimeStamp.mPerformanceTime.Construct(0.0);
    595  }
    596 }
    597 
    598 AudioWorklet* AudioContext::GetAudioWorklet(ErrorResult& aRv) {
    599  if (!mWorklet) {
    600    mWorklet = AudioWorkletImpl::CreateWorklet(this, aRv);
    601  }
    602 
    603  return mWorklet;
    604 }
    605 bool AudioContext::IsRunning() const {
    606  return mAudioContextState == AudioContextState::Running;
    607 }
    608 
    609 already_AddRefed<Promise> AudioContext::CreatePromise(ErrorResult& aRv) {
    610  // Get the relevant global for the promise from the wrapper cache because
    611  // DOMEventTargetHelper::GetOwnerWindow() returns null if the document is
    612  // unloaded.
    613  // We know the wrapper exists because it is being used for |this| from JS.
    614  // See https://github.com/heycam/webidl/issues/932 for why the relevant
    615  // global is used instead of the current global.
    616  nsCOMPtr<nsIGlobalObject> global = xpc::NativeGlobal(GetWrapper());
    617  RefPtr<Promise> promise = Promise::Create(global, aRv);
    618  if (aRv.Failed()) {
    619    return nullptr;
    620  }
    621  /**
    622   * If this's relevant global object's associated Document is not fully
    623   * active then return a promise rejected with "InvalidStateError"
    624   * DOMException.
    625   */
    626  nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(global);
    627  if (!window->IsFullyActive()) {
    628    promise->MaybeRejectWithInvalidStateError(
    629        "The document is not fully active.");
    630  }
    631  return promise.forget();
    632 }
    633 
    634 already_AddRefed<Promise> AudioContext::DecodeAudioData(
    635    const ArrayBuffer& aBuffer,
    636    const Optional<OwningNonNull<DecodeSuccessCallback>>& aSuccessCallback,
    637    const Optional<OwningNonNull<DecodeErrorCallback>>& aFailureCallback,
    638    ErrorResult& aRv) {
    639  AutoJSAPI jsapi;
    640  jsapi.Init();
    641  JSContext* cx = jsapi.cx();
    642 
    643  // CheckedUnwrapStatic is OK, since we know we have an ArrayBuffer.
    644  JS::Rooted<JSObject*> obj(cx, js::CheckedUnwrapStatic(aBuffer.Obj()));
    645  if (!obj) {
    646    aRv.ThrowSecurityError("Can't get audio data from cross-origin object");
    647    return nullptr;
    648  }
    649 
    650  RefPtr<Promise> promise = CreatePromise(aRv);
    651  if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
    652    return promise.forget();
    653  }
    654 
    655  JSAutoRealm ar(cx, obj);
    656 
    657  // Detach the array buffer
    658  size_t length = JS::GetArrayBufferByteLength(obj);
    659  uint8_t* data = static_cast<uint8_t*>(JS::StealArrayBufferContents(cx, obj));
    660  if (!data) {
    661    JS_ClearPendingException(cx);
    662 
    663    // Throw if the buffer is detached
    664    aRv.ThrowTypeError("Buffer argument can't be a detached buffer");
    665    return nullptr;
    666  }
    667 
    668  // Sniff the content of the media.
    669  // Failed type sniffing will be handled by AsyncDecodeWebAudio.
    670  nsAutoCString contentType;
    671  NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType);
    672 
    673  RefPtr<DecodeErrorCallback> failureCallback;
    674  RefPtr<DecodeSuccessCallback> successCallback;
    675  if (aFailureCallback.WasPassed()) {
    676    failureCallback = &aFailureCallback.Value();
    677  }
    678  if (aSuccessCallback.WasPassed()) {
    679    successCallback = &aSuccessCallback.Value();
    680  }
    681  UniquePtr<WebAudioDecodeJob> job(
    682      new WebAudioDecodeJob(this, promise, successCallback, failureCallback));
    683  AsyncDecodeWebAudio(contentType.get(), data, length, *job);
    684  // Transfer the ownership to mDecodeJobs
    685  mDecodeJobs.AppendElement(std::move(job));
    686 
    687  return promise.forget();
    688 }
    689 
    690 void AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) {
    691  // Since UniquePtr doesn't provide an operator== which allows you to compare
    692  // against raw pointers, we need to iterate manually.
    693  for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
    694    if (mDecodeJobs[i].get() == aDecodeJob) {
    695      mDecodeJobs.RemoveElementAt(i);
    696      break;
    697    }
    698  }
    699 }
    700 
    701 void AudioContext::RegisterActiveNode(AudioNode* aNode) {
    702  if (!mCloseCalled) {
    703    mActiveNodes.Insert(aNode);
    704  }
    705 }
    706 
    707 void AudioContext::UnregisterActiveNode(AudioNode* aNode) {
    708  mActiveNodes.Remove(aNode);
    709 }
    710 
    711 uint32_t AudioContext::MaxChannelCount() const {
    712  if (mShouldResistFingerprinting) {
    713    return 2;
    714  }
    715  return std::min<uint32_t>(
    716      WebAudioUtils::MaxChannelCount,
    717      mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels());
    718 }
    719 
    720 uint32_t AudioContext::ActiveNodeCount() const { return mActiveNodes.Count(); }
    721 
    722 MediaTrackGraph* AudioContext::Graph() const {
    723  return Destination()->Track()->Graph();
    724 }
    725 
    726 AudioNodeTrack* AudioContext::DestinationTrack() const {
    727  if (Destination()) {
    728    return Destination()->Track();
    729  }
    730  return nullptr;
    731 }
    732 
    733 void AudioContext::ShutdownWorklet() {
    734  if (mWorklet) {
    735    mWorklet->Impl()->NotifyWorkletFinished();
    736  }
    737 }
    738 
    739 double AudioContext::CurrentTime() {
    740  mozilla::MediaTrack* track = Destination()->Track();
    741 
    742  double rawTime = track->TrackTimeToSeconds(track->GetCurrentTime());
    743 
    744  // CurrentTime increments in intervals of 128/sampleRate. If the Timer
    745  // Precision Reduction is smaller than this interval, the jittered time
    746  // can always be reversed to the raw step of the interval. In that case
    747  // we can simply return the un-reduced time; and avoid breaking tests.
    748  // We have to convert each variable into a common magnitude, we choose ms.
    749  if ((128 / mSampleRate) * 1000.0 >
    750      nsRFPService::TimerResolution(mRTPCallerType) / 1000.0) {
    751    return rawTime;
    752  }
    753 
    754  // The value of a MediaTrack's CurrentTime will always advance forward; it
    755  // will never reset (even if one rewinds a video.) Therefore we can use a
    756  // single Random Seed initialized at the same time as the object.
    757  return nsRFPService::ReduceTimePrecisionAsSecs(
    758      rawTime, GetRandomTimelineSeed(), mRTPCallerType);
    759 }
    760 
    761 nsISerialEventTarget* AudioContext::GetMainThread() const {
    762  if (nsIGlobalObject* global = GetOwnerGlobal()) {
    763    return global->SerialEventTarget();
    764  }
    765  return GetCurrentSerialEventTarget();
    766 }
    767 
    768 void AudioContext::DisconnectFromOwner() {
    769  mIsDisconnecting = true;
    770  MaybeClearPageAwakeRequest();
    771  OnWindowDestroy();
    772  DOMEventTargetHelper::DisconnectFromOwner();
    773 }
    774 
    775 void AudioContext::OnWindowDestroy() {
    776  mIsShutDown = true;
    777 
    778  CloseInternal(nullptr, AudioContextOperationFlags::None);
    779 
    780  // We don't want to touch promises if the global is going away soon.
    781  if (!mIsDisconnecting) {
    782    for (auto p : mPromiseGripArray) {
    783      p->MaybeRejectWithInvalidStateError("Navigated away from page");
    784    }
    785 
    786    mPromiseGripArray.Clear();
    787 
    788    for (const auto& p : mPendingResumePromises) {
    789      p->MaybeRejectWithInvalidStateError("Navigated away from page");
    790    }
    791    mPendingResumePromises.Clear();
    792  }
    793 
    794  // On process shutdown, the MTG thread shuts down before the destination
    795  // track is destroyed, but AudioWorklet needs to release objects on the MTG
    796  // thread.  AudioContext::Shutdown() is invoked on processing the
    797  // PBrowser::Destroy() message before xpcom shutdown begins.
    798  ShutdownWorklet();
    799 
    800  if (mDestination) {
    801    // We can destroy the MediaTrackGraph at this point.
    802    // Although there may be other clients using the graph, this graph is used
    803    // only for clients in the same window and this window is going away.
    804    // This will also interrupt any worklet script still running on the graph
    805    // thread.
    806    Graph()->ForceShutDown();
    807    // AudioDestinationNodes on rendering offline contexts have a
    808    // self-reference which needs removal.
    809    if (mIsOffline) {
    810      mDestination->OfflineShutdown();
    811    }
    812  }
    813 }
    814 
    815 /* This runnable allows to fire the "statechange" event */
    816 class OnStateChangeTask final : public Runnable {
    817 public:
    818  explicit OnStateChangeTask(AudioContext* aAudioContext)
    819      : Runnable("dom::OnStateChangeTask"), mAudioContext(aAudioContext) {}
    820 
    821  NS_IMETHODIMP
    822  Run() override {
    823    nsGlobalWindowInner* win = mAudioContext->GetOwnerWindow();
    824    if (!win) {
    825      return NS_ERROR_FAILURE;
    826    }
    827 
    828    Document* doc = win->GetExtantDoc();
    829    if (!doc) {
    830      return NS_ERROR_FAILURE;
    831    }
    832 
    833    return nsContentUtils::DispatchTrustedEvent(
    834        doc, mAudioContext, u"statechange"_ns, CanBubble::eNo, Cancelable::eNo);
    835  }
    836 
    837 private:
    838  RefPtr<AudioContext> mAudioContext;
    839 };
    840 
    841 void AudioContext::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) {
    842  MOZ_ASSERT(NS_IsMainThread());
    843  // It can happen that this runnable took a long time to reach the main thread,
    844  // and the global is not valid anymore.
    845  if (GetParentObject()) {
    846    AbstractThread::MainThread()->Dispatch(std::move(aRunnable));
    847  } else {
    848    RefPtr<nsIRunnable> runnable(aRunnable);
    849    runnable = nullptr;
    850  }
    851 }
    852 
    853 void AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) {
    854  MOZ_ASSERT(NS_IsMainThread());
    855 
    856  if (mAudioContextState == AudioContextState::Closed) {
    857    fprintf(stderr,
    858            "Invalid transition: mAudioContextState: %d -> aNewState %d\n",
    859            static_cast<int>(mAudioContextState), static_cast<int>(aNewState));
    860    MOZ_ASSERT(false);
    861  }
    862 
    863  if (aPromise) {
    864    Promise* promise = reinterpret_cast<Promise*>(aPromise);
    865    // It is possible for the promise to have been removed from
    866    // mPromiseGripArray if the cycle collector has severed our connections. DO
    867    // NOT dereference the promise pointer in that case since it may point to
    868    // already freed memory.
    869    if (mPromiseGripArray.Contains(promise)) {
    870      promise->MaybeResolveWithUndefined();
    871      DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
    872      MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
    873    }
    874  }
    875 
    876  // Resolve all pending promises once the audio context has been allowed to
    877  // start.
    878  if (aNewState == AudioContextState::Running) {
    879    for (const auto& p : mPendingResumePromises) {
    880      p->MaybeResolveWithUndefined();
    881    }
    882    mPendingResumePromises.Clear();
    883  }
    884 
    885  if (mAudioContextState != aNewState) {
    886    RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this);
    887    Dispatch(task.forget());
    888  }
    889 
    890  mAudioContextState = aNewState;
    891  Destination()->NotifyAudioContextStateChanged();
    892  MaybeUpdatePageAwakeRequest();
    893 }
    894 
    895 BrowsingContext* AudioContext::GetTopLevelBrowsingContext() {
    896  nsGlobalWindowInner* window = GetOwnerWindow();
    897  if (!window) {
    898    return nullptr;
    899  }
    900  BrowsingContext* bc = window->GetBrowsingContext();
    901  if (!bc || bc->IsDiscarded()) {
    902    return nullptr;
    903  }
    904  return bc->Top();
    905 }
    906 
    907 void AudioContext::MaybeUpdatePageAwakeRequest() {
    908  // No need to keep page awake for offline context.
    909  if (IsOffline()) {
    910    return;
    911  }
    912 
    913  if (mIsShutDown) {
    914    return;
    915  }
    916 
    917  if (IsRunning() && !mSetPageAwakeRequest) {
    918    SetPageAwakeRequest(true);
    919  } else if (!IsRunning() && mSetPageAwakeRequest) {
    920    SetPageAwakeRequest(false);
    921  }
    922 }
    923 
    924 void AudioContext::SetPageAwakeRequest(bool aShouldSet) {
    925  mSetPageAwakeRequest = aShouldSet;
    926  BrowsingContext* bc = GetTopLevelBrowsingContext();
    927  if (!bc) {
    928    return;
    929  }
    930  if (XRE_IsContentProcess()) {
    931    ContentChild* contentChild = ContentChild::GetSingleton();
    932    (void)contentChild->SendAddOrRemovePageAwakeRequest(bc, aShouldSet);
    933    return;
    934  }
    935  if (aShouldSet) {
    936    bc->Canonical()->AddPageAwakeRequest();
    937  } else {
    938    bc->Canonical()->RemovePageAwakeRequest();
    939  }
    940 }
    941 
    942 void AudioContext::MaybeClearPageAwakeRequest() {
    943  if (mSetPageAwakeRequest) {
    944    SetPageAwakeRequest(false);
    945  }
    946 }
    947 
    948 nsTArray<RefPtr<mozilla::MediaTrack>> AudioContext::GetAllTracks() const {
    949  nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
    950  for (AudioNode* node : mAllNodes) {
    951    mozilla::MediaTrack* t = node->GetTrack();
    952    if (t) {
    953      tracks.AppendElement(t);
    954    }
    955    // Add the tracks of AudioParam.
    956    const nsTArray<RefPtr<AudioParam>>& audioParams = node->GetAudioParams();
    957    if (!audioParams.IsEmpty()) {
    958      for (auto& param : audioParams) {
    959        t = param->GetTrack();
    960        if (t && !tracks.Contains(t)) {
    961          tracks.AppendElement(t);
    962        }
    963      }
    964    }
    965  }
    966  return tracks;
    967 }
    968 
    969 already_AddRefed<Promise> AudioContext::Suspend(ErrorResult& aRv) {
    970  TRACE("AudioContext::Suspend");
    971  RefPtr<Promise> promise = CreatePromise(aRv);
    972  if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
    973    return promise.forget();
    974  }
    975  if (mIsOffline) {
    976    // XXXbz This is not reachable, since we don't implement this
    977    // method on OfflineAudioContext at all!
    978    promise->MaybeRejectWithNotSupportedError(
    979        "Can't suspend OfflineAudioContext yet");
    980    return promise.forget();
    981  }
    982 
    983  if (mCloseCalled) {
    984    promise->MaybeRejectWithInvalidStateError(
    985        "Can't suspend if the control thread state is \"closed\"");
    986    return promise.forget();
    987  }
    988 
    989  mSuspendedByContent = true;
    990  mPromiseGripArray.AppendElement(promise);
    991  SuspendInternal(promise, AudioContextOperationFlags::SendStateChange);
    992  return promise.forget();
    993 }
    994 
    995 void AudioContext::SuspendFromChrome() {
    996  if (mIsOffline || mIsShutDown) {
    997    return;
    998  }
    999  MOZ_ASSERT(!mSuspendedByChrome);
   1000  mSuspendedByChrome = true;
   1001  SuspendInternal(nullptr, Preferences::GetBool("dom.audiocontext.testing")
   1002                               ? AudioContextOperationFlags::SendStateChange
   1003                               : AudioContextOperationFlags::None);
   1004 }
   1005 
   1006 void AudioContext::SuspendInternal(void* aPromise,
   1007                                   AudioContextOperationFlags aFlags) {
   1008  MOZ_ASSERT(NS_IsMainThread());
   1009  MOZ_ASSERT(!mIsOffline);
   1010  Destination()->Suspend();
   1011 
   1012  nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
   1013  // If mTracksAreSuspended is true then we already suspended all our tracks,
   1014  // so don't suspend them again (since suspend(); suspend(); resume(); should
   1015  // cancel both suspends). But we still need to do ApplyAudioContextOperation
   1016  // to ensure our new promise is resolved.
   1017  if (!mTracksAreSuspended) {
   1018    mTracksAreSuspended = true;
   1019    tracks = GetAllTracks();
   1020  }
   1021  auto promise = Graph()->ApplyAudioContextOperation(
   1022      DestinationTrack(), std::move(tracks), AudioContextOperation::Suspend);
   1023  if ((aFlags & AudioContextOperationFlags::SendStateChange)) {
   1024    promise->Then(
   1025        GetMainThread(), "AudioContext::OnStateChanged",
   1026        [self = RefPtr<AudioContext>(this),
   1027         aPromise](AudioContextState aNewState) {
   1028          self->OnStateChanged(aPromise, aNewState);
   1029        },
   1030        [] { MOZ_CRASH("Unexpected rejection"); });
   1031  }
   1032 }
   1033 
   1034 void AudioContext::ResumeFromChrome() {
   1035  if (mIsOffline || mIsShutDown) {
   1036    return;
   1037  }
   1038  MOZ_ASSERT(mSuspendedByChrome);
   1039  mSuspendedByChrome = false;
   1040  if (!mWasAllowedToStart) {
   1041    return;
   1042  }
   1043  ResumeInternal();
   1044 }
   1045 
   1046 already_AddRefed<Promise> AudioContext::Resume(ErrorResult& aRv) {
   1047  TRACE("AudioContext::Resume");
   1048  RefPtr<Promise> promise = CreatePromise(aRv);
   1049  if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
   1050    return promise.forget();
   1051  }
   1052 
   1053  if (mIsOffline) {
   1054    promise->MaybeRejectWithNotSupportedError(
   1055        "Can't resume OfflineAudioContext");
   1056    return promise.forget();
   1057  }
   1058 
   1059  if (mCloseCalled) {
   1060    promise->MaybeRejectWithInvalidStateError(
   1061        "Can't resume if the control thread state is \"closed\"");
   1062    return promise.forget();
   1063  }
   1064 
   1065  mSuspendedByContent = false;
   1066  mPendingResumePromises.AppendElement(promise);
   1067 
   1068  const bool isAllowedToPlay = media::AutoplayPolicy::IsAllowedToPlay(*this);
   1069  AUTOPLAY_LOG("Trying to resume AudioContext %p, IsAllowedToPlay=%d", this,
   1070               isAllowedToPlay);
   1071  if (isAllowedToPlay) {
   1072    ResumeInternal();
   1073  } else {
   1074    ReportBlocked();
   1075  }
   1076 
   1077  return promise.forget();
   1078 }
   1079 
   1080 void AudioContext::ResumeInternal() {
   1081  MOZ_ASSERT(!mIsOffline);
   1082  AUTOPLAY_LOG("Allow to resume AudioContext %p", this);
   1083  mWasAllowedToStart = true;
   1084 
   1085  if (mSuspendedByChrome || mSuspendedByContent || mCloseCalled) {
   1086    MOZ_ASSERT(mTracksAreSuspended);
   1087    return;
   1088  }
   1089 
   1090  Destination()->Resume();
   1091 
   1092  nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
   1093  // If mTracksAreSuspended is false then we already resumed all our tracks,
   1094  // so don't resume them again (since suspend(); resume(); resume(); should
   1095  // be OK). But we still need to do ApplyAudioContextOperation
   1096  // to ensure our new promise is resolved.
   1097  if (mTracksAreSuspended) {
   1098    mTracksAreSuspended = false;
   1099    tracks = GetAllTracks();
   1100  }
   1101  // Check for statechange even when resumed from chrome because content may
   1102  // have called Resume() before chrome resumed the window.
   1103  Graph()
   1104      ->ApplyAudioContextOperation(DestinationTrack(), std::move(tracks),
   1105                                   AudioContextOperation::Resume)
   1106      ->Then(
   1107          GetMainThread(), "AudioContext::OnStateChanged",
   1108          [self = RefPtr<AudioContext>(this)](AudioContextState aNewState) {
   1109            self->OnStateChanged(nullptr, aNewState);
   1110          },
   1111          [] {});  // Promise may be rejected after graph shutdown.
   1112 }
   1113 
   1114 void AudioContext::ReportBlocked() {
   1115  ReportToConsole(nsIScriptError::warningFlag,
   1116                  "BlockAutoplayWebAudioStartError");
   1117  mWasAllowedToStart = false;
   1118 
   1119  if (!StaticPrefs::media_autoplay_block_event_enabled()) {
   1120    return;
   1121  }
   1122 
   1123  RefPtr<nsIRunnable> r = NS_NewRunnableFunction(
   1124      "AudioContext::AutoplayBlocked", [self = RefPtr{this}]() {
   1125        nsGlobalWindowInner* win = self->GetOwnerWindow();
   1126        if (!win) {
   1127          return;
   1128        }
   1129 
   1130        Document* doc = win->GetExtantDoc();
   1131        if (!doc) {
   1132          return;
   1133        }
   1134 
   1135        AUTOPLAY_LOG("Dispatch `blocked` event for AudioContext %p",
   1136                     self.get());
   1137        nsContentUtils::DispatchTrustedEvent(doc, self, u"blocked"_ns,
   1138                                             CanBubble::eNo, Cancelable::eNo);
   1139      });
   1140  Dispatch(r.forget());
   1141 }
   1142 
   1143 already_AddRefed<Promise> AudioContext::Close(ErrorResult& aRv) {
   1144  TRACE("AudioContext::Close");
   1145  RefPtr<Promise> promise = CreatePromise(aRv);
   1146  if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
   1147    return promise.forget();
   1148  }
   1149 
   1150  if (mIsOffline) {
   1151    // XXXbz This is not reachable, since we don't implement this
   1152    // method on OfflineAudioContext at all!
   1153    promise->MaybeRejectWithNotSupportedError(
   1154        "Can't close OfflineAudioContext yet");
   1155    return promise.forget();
   1156  }
   1157 
   1158  if (mCloseCalled) {
   1159    promise->MaybeRejectWithInvalidStateError(
   1160        "Can't close an AudioContext twice");
   1161    return promise.forget();
   1162  }
   1163 
   1164  mPromiseGripArray.AppendElement(promise);
   1165 
   1166  CloseInternal(promise, AudioContextOperationFlags::SendStateChange);
   1167 
   1168  return promise.forget();
   1169 }
   1170 
   1171 void AudioContext::OfflineClose() {
   1172  CloseInternal(nullptr, AudioContextOperationFlags::None);
   1173 }
   1174 
   1175 void AudioContext::CloseInternal(void* aPromise,
   1176                                 AudioContextOperationFlags aFlags) {
   1177  // This can be called when freeing a document, and the tracks are dead at
   1178  // this point, so we need extra null-checks.
   1179  AudioNodeTrack* ds = DestinationTrack();
   1180  if (ds && !mIsOffline) {
   1181    Destination()->Close();
   1182 
   1183    nsTArray<RefPtr<mozilla::MediaTrack>> tracks;
   1184    // If mTracksAreSuspended or mCloseCalled are true then we already suspended
   1185    // all our tracks, so don't suspend them again. But we still need to do
   1186    // ApplyAudioContextOperation to ensure our new promise is resolved.
   1187    if (!mTracksAreSuspended && !mCloseCalled) {
   1188      tracks = GetAllTracks();
   1189    }
   1190    auto promise = Graph()->ApplyAudioContextOperation(
   1191        ds, std::move(tracks), AudioContextOperation::Close);
   1192    if ((aFlags & AudioContextOperationFlags::SendStateChange)) {
   1193      promise->Then(
   1194          GetMainThread(), "AudioContext::OnStateChanged",
   1195          [self = RefPtr<AudioContext>(this),
   1196           aPromise](AudioContextState aNewState) {
   1197            self->OnStateChanged(aPromise, aNewState);
   1198          },
   1199          [] {});  // Promise may be rejected after graph shutdown.
   1200    }
   1201  }
   1202  mCloseCalled = true;
   1203  // Release references to active nodes.
   1204  // Active AudioNodes don't unregister in destructors, at which point the
   1205  // Node is already unregistered.
   1206  mActiveNodes.Clear();
   1207 }
   1208 
   1209 void AudioContext::RegisterNode(AudioNode* aNode) {
   1210  MOZ_ASSERT(!mAllNodes.Contains(aNode));
   1211  mAllNodes.Insert(aNode);
   1212 }
   1213 
   1214 void AudioContext::UnregisterNode(AudioNode* aNode) {
   1215  MOZ_ASSERT(mAllNodes.Contains(aNode));
   1216  mAllNodes.Remove(aNode);
   1217 }
   1218 
   1219 already_AddRefed<Promise> AudioContext::StartRendering(ErrorResult& aRv) {
   1220  MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext");
   1221  RefPtr<Promise> promise = CreatePromise(aRv);
   1222  if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) {
   1223    return promise.forget();
   1224  }
   1225  if (mIsStarted) {
   1226    aRv.ThrowInvalidStateError("Rendering already started");
   1227    return nullptr;
   1228  }
   1229 
   1230  mIsStarted = true;
   1231  mDestination->StartRendering(promise);
   1232 
   1233  OnStateChanged(nullptr, AudioContextState::Running);
   1234 
   1235  return promise.forget();
   1236 }
   1237 
   1238 unsigned long AudioContext::Length() {
   1239  MOZ_ASSERT(mIsOffline);
   1240  return mDestination->Length();
   1241 }
   1242 
   1243 void AudioContext::Mute() const {
   1244  MOZ_ASSERT(!mIsOffline);
   1245  if (mDestination) {
   1246    mDestination->Mute();
   1247  }
   1248 }
   1249 
   1250 void AudioContext::Unmute() const {
   1251  MOZ_ASSERT(!mIsOffline);
   1252  if (mDestination) {
   1253    mDestination->Unmute();
   1254  }
   1255 }
   1256 
   1257 void AudioContext::SetParamMapForWorkletName(
   1258    const nsAString& aName, AudioParamDescriptorMap* aParamMap) {
   1259  MOZ_ASSERT(!mWorkletParamDescriptors.Contains(aName));
   1260  (void)mWorkletParamDescriptors.InsertOrUpdate(aName, std::move(*aParamMap),
   1261                                                fallible);
   1262 }
   1263 
   1264 size_t AudioContext::SizeOfIncludingThis(
   1265    mozilla::MallocSizeOf aMallocSizeOf) const {
   1266  // AudioNodes are tracked separately because we do not want the AudioContext
   1267  // to track all of the AudioNodes it creates, so we wouldn't be able to
   1268  // traverse them from here.
   1269 
   1270  size_t amount = aMallocSizeOf(this);
   1271  if (mListener) {
   1272    amount += mListener->SizeOfIncludingThis(aMallocSizeOf);
   1273  }
   1274  amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf);
   1275  for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
   1276    amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf);
   1277  }
   1278  amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf);
   1279  return amount;
   1280 }
   1281 
   1282 NS_IMETHODIMP
   1283 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport,
   1284                             nsISupports* aData, bool aAnonymize) {
   1285  const nsLiteralCString nodeDescription(
   1286      "Memory used by AudioNode DOM objects (Web Audio).");
   1287  for (AudioNode* node : mAllNodes) {
   1288    int64_t amount = node->SizeOfIncludingThis(MallocSizeOf);
   1289    nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes",
   1290                                node->NodeType());
   1291    aHandleReport->Callback(""_ns, domNodePath, KIND_HEAP, UNITS_BYTES, amount,
   1292                            nodeDescription, aData);
   1293  }
   1294 
   1295  int64_t amount = SizeOfIncludingThis(MallocSizeOf);
   1296  MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES,
   1297                     amount,
   1298                     "Memory used by AudioContext objects (Web Audio).");
   1299 
   1300  return NS_OK;
   1301 }
   1302 
   1303 BasicWaveFormCache* AudioContext::GetBasicWaveFormCache() {
   1304  MOZ_ASSERT(NS_IsMainThread());
   1305  if (!mBasicWaveFormCache) {
   1306    mBasicWaveFormCache = new BasicWaveFormCache(SampleRate());
   1307  }
   1308  return mBasicWaveFormCache;
   1309 }
   1310 
   1311 void AudioContext::ReportToConsole(uint32_t aErrorFlags,
   1312                                   const char* aMsg) const {
   1313  MOZ_ASSERT(aMsg);
   1314  Document* doc = GetOwnerWindow() ? GetOwnerWindow()->GetExtantDoc() : nullptr;
   1315  nsContentUtils::ReportToConsole(aErrorFlags, "Media"_ns, doc,
   1316                                  nsContentUtils::eDOM_PROPERTIES, aMsg);
   1317 }
   1318 
   1319 BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate)
   1320    : mSampleRate(aSampleRate) {
   1321  MOZ_ASSERT(NS_IsMainThread());
   1322 }
   1323 BasicWaveFormCache::~BasicWaveFormCache() = default;
   1324 
   1325 WebCore::PeriodicWave* BasicWaveFormCache::GetBasicWaveForm(
   1326    OscillatorType aType) {
   1327  MOZ_ASSERT(!NS_IsMainThread());
   1328  if (aType == OscillatorType::Sawtooth) {
   1329    if (!mSawtooth) {
   1330      mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate);
   1331    }
   1332    return mSawtooth;
   1333  }
   1334  if (aType == OscillatorType::Square) {
   1335    if (!mSquare) {
   1336      mSquare = WebCore::PeriodicWave::createSquare(mSampleRate);
   1337    }
   1338    return mSquare;
   1339  }
   1340  if (aType == OscillatorType::Triangle) {
   1341    if (!mTriangle) {
   1342      mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate);
   1343    }
   1344    return mTriangle;
   1345  }
   1346  MOZ_ASSERT(false, "Not reached");
   1347  return nullptr;
   1348 }
   1349 
   1350 }  // namespace mozilla::dom