tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

AudioContext.h (16969B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef AudioContext_h_
      8 #define AudioContext_h_
      9 
     10 #include "AudioParamDescriptorMap.h"
     11 #include "MediaBufferDecoder.h"
     12 #include "X11UndefineNone.h"
     13 #include "js/TypeDecls.h"
     14 #include "mozilla/DOMEventTargetHelper.h"
     15 #include "mozilla/MemoryReporting.h"
     16 #include "mozilla/RelativeTimeline.h"
     17 #include "mozilla/TypedEnumBits.h"
     18 #include "mozilla/UniquePtr.h"
     19 #include "mozilla/dom/AudioContextBinding.h"
     20 #include "mozilla/dom/OfflineAudioContextBinding.h"
     21 #include "mozilla/dom/TypedArray.h"
     22 #include "nsCOMPtr.h"
     23 #include "nsCycleCollectionParticipant.h"
     24 #include "nsHashKeys.h"
     25 #include "nsIMemoryReporter.h"
     26 #include "nsTHashMap.h"
     27 #include "nsTHashSet.h"
     28 
     29 namespace WebCore {
     30 class PeriodicWave;
     31 }  // namespace WebCore
     32 
     33 class nsPIDOMWindowInner;
     34 
     35 namespace mozilla {
     36 
     37 class DOMMediaStream;
     38 class ErrorResult;
     39 class MediaTrack;
     40 class MediaTrackGraph;
     41 class AudioNodeTrack;
     42 
     43 namespace dom {
     44 
     45 enum class AudioContextState : uint8_t;
     46 class AnalyserNode;
     47 class AudioBuffer;
     48 class AudioBufferSourceNode;
     49 class AudioDestinationNode;
     50 class AudioListener;
     51 class AudioNode;
     52 class AudioWorklet;
     53 class BiquadFilterNode;
     54 class BrowsingContext;
     55 class ChannelMergerNode;
     56 class ChannelSplitterNode;
     57 class ConstantSourceNode;
     58 class ConvolverNode;
     59 class DelayNode;
     60 class DynamicsCompressorNode;
     61 class GainNode;
     62 class GlobalObject;
     63 class HTMLMediaElement;
     64 class IIRFilterNode;
     65 class MediaElementAudioSourceNode;
     66 class MediaStreamAudioDestinationNode;
     67 class MediaStreamAudioSourceNode;
     68 class MediaStreamTrack;
     69 class MediaStreamTrackAudioSourceNode;
     70 class OscillatorNode;
     71 class PannerNode;
     72 class ScriptProcessorNode;
     73 class StereoPannerNode;
     74 class WaveShaperNode;
     75 class PeriodicWave;
     76 struct PeriodicWaveConstraints;
     77 class Promise;
     78 enum class OscillatorType : uint8_t;
     79 
     80 // This is addrefed by the OscillatorNodeEngine on the main thread
     81 // and then used from the MTG thread.
     82 // It can be released either from the graph thread or the main thread.
     83 class BasicWaveFormCache {
     84 public:
     85  explicit BasicWaveFormCache(uint32_t aSampleRate);
     86  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BasicWaveFormCache)
     87  WebCore::PeriodicWave* GetBasicWaveForm(OscillatorType aType);
     88 
     89 private:
     90  ~BasicWaveFormCache();
     91  RefPtr<WebCore::PeriodicWave> mSawtooth;
     92  RefPtr<WebCore::PeriodicWave> mSquare;
     93  RefPtr<WebCore::PeriodicWave> mTriangle;
     94  uint32_t mSampleRate;
     95 };
     96 
     97 /* This runnable allows the MTG to notify the main thread when audio is actually
     98 * flowing */
     99 class StateChangeTask final : public Runnable {
    100 public:
    101  /* This constructor should be used when this event is sent from the main
    102   * thread. */
    103  StateChangeTask(AudioContext* aAudioContext, void* aPromise,
    104                  AudioContextState aNewState);
    105 
    106  /* This constructor should be used when this event is sent from the audio
    107   * thread. */
    108  StateChangeTask(AudioNodeTrack* aTrack, void* aPromise,
    109                  AudioContextState aNewState);
    110 
    111  NS_IMETHOD Run() override;
    112 
    113 private:
    114  RefPtr<AudioContext> mAudioContext;
    115  void* mPromise;
    116  RefPtr<AudioNodeTrack> mAudioNodeTrack;
    117  AudioContextState mNewState;
    118 };
    119 
    120 enum class AudioContextOperation : uint8_t { Suspend, Resume, Close };
    121 static const char* const kAudioContextOptionsStrings[] = {"Suspend", "Resume",
    122                                                          "Close"};
    123 // When suspending or resuming an AudioContext, some operations have to notify
    124 // the main thread, so that the Promise is resolved, the state is modified, and
    125 // the statechanged event is sent. Some other operations don't go back to the
    126 // main thread, for example when the AudioContext is paused by something that is
    127 // not caused by the page itself: opening a debugger, breaking on a breakpoint,
    128 // reloading a document.
    129 enum class AudioContextOperationFlags { None, SendStateChange };
    130 MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(AudioContextOperationFlags);
    131 
    132 struct AudioContextOptions;
    133 
    134 class AudioContext final : public DOMEventTargetHelper,
    135                           public nsIMemoryReporter,
    136                           public RelativeTimeline {
    137  AudioContext(nsPIDOMWindowInner* aParentWindow, bool aIsOffline,
    138               uint32_t aNumberOfChannels = 0, uint32_t aLength = 0,
    139               float aSampleRate = 0.0f);
    140  ~AudioContext();
    141 
    142 public:
    143  typedef uint64_t AudioContextId;
    144 
    145  NS_DECL_ISUPPORTS_INHERITED
    146  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioContext, DOMEventTargetHelper)
    147  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
    148 
    149  nsIGlobalObject* GetParentObject() const { return GetOwnerGlobal(); }
    150 
    151  nsISerialEventTarget* GetMainThread() const;
    152 
    153  void DisconnectFromOwner() override;
    154 
    155  void OnWindowDestroy();  // idempotent
    156 
    157  JSObject* WrapObject(JSContext* aCx,
    158                       JS::Handle<JSObject*> aGivenProto) override;
    159 
    160  using DOMEventTargetHelper::DispatchTrustedEvent;
    161 
    162  // Constructor for regular AudioContext
    163  static already_AddRefed<AudioContext> Constructor(
    164      const GlobalObject& aGlobal, const AudioContextOptions& aOptions,
    165      ErrorResult& aRv);
    166 
    167  // Constructor for offline AudioContext with options object
    168  static already_AddRefed<AudioContext> Constructor(
    169      const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions,
    170      ErrorResult& aRv);
    171 
    172  // Constructor for offline AudioContext
    173  static already_AddRefed<AudioContext> Constructor(const GlobalObject& aGlobal,
    174                                                    uint32_t aNumberOfChannels,
    175                                                    uint32_t aLength,
    176                                                    float aSampleRate,
    177                                                    ErrorResult& aRv);
    178 
    179  // AudioContext methods
    180 
    181  AudioDestinationNode* Destination() const { return mDestination; }
    182 
    183  float SampleRate() const { return mSampleRate; }
    184 
    185  bool ShouldSuspendNewTrack() const {
    186    return mTracksAreSuspended || mCloseCalled;
    187  }
    188  double CurrentTime();
    189 
    190  AudioListener* Listener();
    191 
    192  AudioContextState State() const { return mAudioContextState; }
    193 
    194  double BaseLatency() const {
    195    // Gecko does not do any buffering between rendering the audio and sending
    196    // it to the audio subsystem.
    197    return 0.0;
    198  }
    199 
    200  double OutputLatency();
    201 
    202  void GetOutputTimestamp(AudioTimestamp& aTimeStamp);
    203 
    204  AudioWorklet* GetAudioWorklet(ErrorResult& aRv);
    205 
    206  bool IsRunning() const;
    207 
    208  // Called when an AudioScheduledSourceNode started or the source node starts,
    209  // this method might resume the AudioContext if it was not allowed to start.
    210  void StartBlockedAudioContextIfAllowed();
    211 
    212  // Those three methods return a promise to content, that is resolved when an
    213  // (possibly long) operation is completed on the MTG (and possibly other)
    214  // thread(s). To avoid having to match the calls and asychronous result when
    215  // the operation is completed, we keep a reference to the promises on the main
    216  // thread, and then send the promises pointers down the MTG thread, as a void*
    217  // (to make it very clear that the pointer is to merely be treated as an ID).
    218  // When back on the main thread, we can resolve or reject the promise, by
    219  // casting it back to a `Promise*` while asserting we're back on the main
    220  // thread and removing the reference we added.
    221  already_AddRefed<Promise> Suspend(ErrorResult& aRv);
    222  already_AddRefed<Promise> Resume(ErrorResult& aRv);
    223  already_AddRefed<Promise> Close(ErrorResult& aRv);
    224  IMPL_EVENT_HANDLER(statechange)
    225 
    226  // These two functions are similar with Suspend() and Resume(), the difference
    227  // is they are designed for calling from chrome side, not content side. eg.
    228  // calling from inner window, so we won't need to return promise for caller.
    229  void SuspendFromChrome();
    230  void ResumeFromChrome();
    231  // Called on completion of offline rendering:
    232  void OfflineClose();
    233 
    234  already_AddRefed<AudioBufferSourceNode> CreateBufferSource();
    235 
    236  already_AddRefed<ConstantSourceNode> CreateConstantSource();
    237 
    238  already_AddRefed<AudioBuffer> CreateBuffer(uint32_t aNumberOfChannels,
    239                                             uint32_t aLength,
    240                                             float aSampleRate,
    241                                             ErrorResult& aRv);
    242 
    243  already_AddRefed<MediaStreamAudioDestinationNode>
    244  CreateMediaStreamDestination(ErrorResult& aRv);
    245 
    246  already_AddRefed<ScriptProcessorNode> CreateScriptProcessor(
    247      uint32_t aBufferSize, uint32_t aNumberOfInputChannels,
    248      uint32_t aNumberOfOutputChannels, ErrorResult& aRv);
    249 
    250  already_AddRefed<StereoPannerNode> CreateStereoPanner(ErrorResult& aRv);
    251 
    252  already_AddRefed<AnalyserNode> CreateAnalyser(ErrorResult& aRv);
    253 
    254  already_AddRefed<GainNode> CreateGain(ErrorResult& aRv);
    255 
    256  already_AddRefed<WaveShaperNode> CreateWaveShaper(ErrorResult& aRv);
    257 
    258  already_AddRefed<MediaElementAudioSourceNode> CreateMediaElementSource(
    259      HTMLMediaElement& aMediaElement, ErrorResult& aRv);
    260  already_AddRefed<MediaStreamAudioSourceNode> CreateMediaStreamSource(
    261      DOMMediaStream& aMediaStream, ErrorResult& aRv);
    262  already_AddRefed<MediaStreamTrackAudioSourceNode>
    263  CreateMediaStreamTrackSource(MediaStreamTrack& aMediaStreamTrack,
    264                               ErrorResult& aRv);
    265 
    266  already_AddRefed<DelayNode> CreateDelay(double aMaxDelayTime,
    267                                          ErrorResult& aRv);
    268 
    269  already_AddRefed<PannerNode> CreatePanner(ErrorResult& aRv);
    270 
    271  already_AddRefed<ConvolverNode> CreateConvolver(ErrorResult& aRv);
    272 
    273  already_AddRefed<ChannelSplitterNode> CreateChannelSplitter(
    274      uint32_t aNumberOfOutputs, ErrorResult& aRv);
    275 
    276  already_AddRefed<ChannelMergerNode> CreateChannelMerger(
    277      uint32_t aNumberOfInputs, ErrorResult& aRv);
    278 
    279  already_AddRefed<DynamicsCompressorNode> CreateDynamicsCompressor(
    280      ErrorResult& aRv);
    281 
    282  already_AddRefed<BiquadFilterNode> CreateBiquadFilter(ErrorResult& aRv);
    283 
    284  already_AddRefed<IIRFilterNode> CreateIIRFilter(
    285      const Sequence<double>& aFeedforward, const Sequence<double>& aFeedback,
    286      mozilla::ErrorResult& aRv);
    287 
    288  already_AddRefed<OscillatorNode> CreateOscillator(ErrorResult& aRv);
    289 
    290  already_AddRefed<PeriodicWave> CreatePeriodicWave(
    291      const Sequence<float>& aRealData, const Sequence<float>& aImagData,
    292      const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv);
    293 
    294  already_AddRefed<Promise> DecodeAudioData(
    295      const ArrayBuffer& aBuffer,
    296      const Optional<OwningNonNull<DecodeSuccessCallback>>& aSuccessCallback,
    297      const Optional<OwningNonNull<DecodeErrorCallback>>& aFailureCallback,
    298      ErrorResult& aRv);
    299 
    300  // OfflineAudioContext methods
    301  already_AddRefed<Promise> StartRendering(ErrorResult& aRv);
    302  IMPL_EVENT_HANDLER(complete)
    303  unsigned long Length();
    304 
    305  bool IsOffline() const { return mIsOffline; }
    306 
    307  bool ShouldResistFingerprinting() const {
    308    return mShouldResistFingerprinting;
    309  }
    310 
    311  MediaTrackGraph* Graph() const;
    312  AudioNodeTrack* DestinationTrack() const;
    313 
    314  // Nodes register here if they will produce sound even if they have silent
    315  // or no input connections.  The AudioContext will keep registered nodes
    316  // alive until the context is collected.  This takes care of "playing"
    317  // references and "tail-time" references.
    318  void RegisterActiveNode(AudioNode* aNode);
    319  // Nodes unregister when they have finished producing sound for the
    320  // foreseeable future.
    321  // Do NOT call UnregisterActiveNode from an AudioNode destructor.
    322  // If the destructor is called, then the Node has already been unregistered.
    323  // The destructor may be called during hashtable enumeration, during which
    324  // unregistering would not be safe.
    325  void UnregisterActiveNode(AudioNode* aNode);
    326 
    327  uint32_t MaxChannelCount() const;
    328 
    329  uint32_t ActiveNodeCount() const;
    330 
    331  void Mute() const;
    332  void Unmute() const;
    333 
    334  void RegisterNode(AudioNode* aNode);
    335  void UnregisterNode(AudioNode* aNode);
    336 
    337  void OnStateChanged(void* aPromise, AudioContextState aNewState);
    338 
    339  BasicWaveFormCache* GetBasicWaveFormCache();
    340 
    341  void ShutdownWorklet();
    342  // Steals from |aParamMap|
    343  void SetParamMapForWorkletName(const nsAString& aName,
    344                                 AudioParamDescriptorMap* aParamMap);
    345  const AudioParamDescriptorMap* GetParamMapForWorkletName(
    346      const nsAString& aName) {
    347    return mWorkletParamDescriptors.Lookup(aName).DataPtrOrNull();
    348  }
    349 
    350  void Dispatch(already_AddRefed<nsIRunnable>&& aRunnable);
    351 
    352 private:
    353  void DisconnectFromWindow();
    354  already_AddRefed<Promise> CreatePromise(ErrorResult& aRv);
    355  void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob);
    356  void ShutdownDecoder();
    357 
    358  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
    359  NS_DECL_NSIMEMORYREPORTER
    360 
    361  friend struct ::mozilla::WebAudioDecodeJob;
    362 
    363  nsTArray<RefPtr<mozilla::MediaTrack>> GetAllTracks() const;
    364 
    365  void ResumeInternal();
    366  void SuspendInternal(void* aPromise, AudioContextOperationFlags aFlags);
    367  void CloseInternal(void* aPromise, AudioContextOperationFlags aFlags);
    368 
    369  // Will report error message to console and dispatch testing event if needed
    370  // when AudioContext is blocked by autoplay policy.
    371  void ReportBlocked();
    372 
    373  void ReportToConsole(uint32_t aErrorFlags, const char* aMsg) const;
    374 
    375  // If the pref `dom.suspend_inactive.enabled` is enabled, the dom window will
    376  // be suspended when the window becomes inactive. In order to keep audio
    377  // context running still, we will ask pages to keep awake in that situation.
    378  void MaybeUpdatePageAwakeRequest();
    379  void MaybeClearPageAwakeRequest();
    380  void SetPageAwakeRequest(bool aShouldSet);
    381 
    382  BrowsingContext* GetTopLevelBrowsingContext();
    383 
    384 private:
    385  // Each AudioContext has an id, that is passed down the MediaTracks that
    386  // back the AudioNodes, so we can easily compute the set of all the
    387  // MediaTracks for a given context, on the MediasTrackGraph side.
    388  const AudioContextId mId;
    389  // Note that it's important for mSampleRate to be initialized before
    390  // mDestination, as mDestination's constructor needs to access it!
    391  const float mSampleRate;
    392  AudioContextState mAudioContextState;
    393  RefPtr<AudioDestinationNode> mDestination;
    394  RefPtr<AudioListener> mListener;
    395  RefPtr<AudioWorklet> mWorklet;
    396  nsTArray<UniquePtr<WebAudioDecodeJob>> mDecodeJobs;
    397  // This array is used to keep the suspend/close promises alive until
    398  // they are resolved, so we can safely pass them accross threads.
    399  nsTArray<RefPtr<Promise>> mPromiseGripArray;
    400  // This array is used to onlly keep the resume promises alive until they are
    401  // resolved, so we can safely pass them accross threads. If the audio context
    402  // is not allowed to play, the promise would be pending in this array and be
    403  // resolved until audio context has been allowed and user call resume() again.
    404  nsTArray<RefPtr<Promise>> mPendingResumePromises;
    405  // See RegisterActiveNode.  These will keep the AudioContext alive while it
    406  // is rendering and the window remains alive.
    407  nsTHashSet<RefPtr<AudioNode>> mActiveNodes;
    408  // Raw (non-owning) references to all AudioNodes for this AudioContext.
    409  nsTHashSet<AudioNode*> mAllNodes;
    410  nsTHashMap<nsStringHashKey, AudioParamDescriptorMap> mWorkletParamDescriptors;
    411  // Cache to avoid recomputing basic waveforms all the time.
    412  RefPtr<BasicWaveFormCache> mBasicWaveFormCache;
    413  // Number of channels passed in the OfflineAudioContext ctor.
    414  uint32_t mNumberOfChannels;
    415  const RTPCallerType mRTPCallerType;
    416  const bool mShouldResistFingerprinting;
    417  const bool mIsOffline;
    418  // true iff realtime or startRendering() has been called.
    419  bool mIsStarted;
    420  bool mIsShutDown;
    421  bool mIsDisconnecting;
    422  // Close has been called; reject suspend and resume calls.
    423  bool mCloseCalled;
    424  // Whether the MediaTracks are suspended, due to one or more of
    425  // !mWasAllowedToStart, mSuspendedByContent, or mSuspendedByChrome.
    426  // false if offline.
    427  bool mTracksAreSuspended;
    428  // This flag stores the value of previous status of `allowed-to-start`.
    429  // true if offline.
    430  bool mWasAllowedToStart;
    431  // Whether this AudioContext is suspended because the page called suspend().
    432  // Unused if offline.
    433  bool mSuspendedByContent;
    434  // Whether this AudioContext is suspended because the Window is suspended.
    435  // Unused if offline.
    436  bool mSuspendedByChrome;
    437 
    438  // Whether we have set the page awake reqeust when non-offline audio context
    439  // is running. That will keep the audio context being able to continue running
    440  // even if the window is inactive.
    441  bool mSetPageAwakeRequest = false;
    442 };
    443 
    444 static const dom::AudioContext::AudioContextId NO_AUDIO_CONTEXT = 0;
    445 
    446 }  // namespace dom
    447 }  // namespace mozilla
    448 
    449 inline nsISupports* ToSupports(mozilla::dom::AudioContext* p) {
    450  return NS_CYCLE_COLLECTION_CLASSNAME(mozilla::dom::AudioContext)::Upcast(p);
    451 }
    452 
    453 #endif