tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

TestAudioInputProcessing.cpp (31577B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
      5 * You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "AudioGenerator.h"
      8 #include "MediaEngineWebRTCAudio.h"
      9 #include "MediaTrackGraphImpl.h"
     10 #include "PrincipalHandle.h"
     11 #include "gmock/gmock.h"
     12 #include "gtest/gtest.h"
     13 #include "libwebrtcglue/SystemTime.h"
     14 #include "libwebrtcglue/WebrtcEnvironmentWrapper.h"
     15 #include "mozilla/NullPrincipal.h"
     16 #include "nsContentUtils.h"
     17 #include "nsTArray.h"
     18 
     19 using namespace mozilla;
     20 using testing::NiceMock;
     21 using testing::Return;
     22 
     23 template <class T>
     24 AudioChunk CreateAudioChunk(uint32_t aFrames, uint32_t aChannels,
     25                            AudioSampleFormat aSampleFormat);
     26 
     27 class MockGraph : public MediaTrackGraphImpl {
     28 public:
     29  explicit MockGraph(TrackRate aRate)
     30      : MediaTrackGraphImpl(0, aRate, nullptr, AbstractThread::MainThread()) {
     31    ON_CALL(*this, OnGraphThread).WillByDefault(Return(true));
     32  }
     33 
     34  void Init(uint32_t aChannels) {
     35    MediaTrackGraphImpl::Init(OFFLINE_THREAD_DRIVER, DIRECT_DRIVER, aChannels);
     36 
     37    MonitorAutoLock lock(mMonitor);
     38    // We don't need a graph driver.  Advance to
     39    // LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION so that the driver never
     40    // starts.  Graph control messages run as in shutdown, synchronously.
     41    // This permits the main thread part of track initialization through
     42    // AudioProcessingTrack::Create().
     43    mLifecycleState = LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION;
     44 #ifdef DEBUG
     45    mCanRunMessagesSynchronously = true;
     46 #endif
     47    // Remove this graph's driver since it holds a ref. We are still kept
     48    // alive by the self-ref. Destroy() must be called to break that cycle if
     49    // no tracks are created and destroyed.
     50    mDriver = nullptr;
     51  }
     52 
     53  void ForceOutputDeviceForAEC(CubebUtils::AudioDeviceID aID) {
     54    mOutputDeviceForAEC = aID;
     55  }
     56 
     57  void ForceDefaultOutputDevice(CubebUtils::AudioDeviceID aID) {
     58    mDefaultOutputDeviceID = aID;
     59  }
     60 
     61  void UpdateEnumeratorDefaultDeviceTracking() override {}
     62 
     63  MOCK_CONST_METHOD0(OnGraphThread, bool());
     64 
     65 protected:
     66  ~MockGraph() = default;
     67 };
     68 
     69 // AudioInputProcessing will put extra frames as pre-buffering data to avoid
     70 // glitchs in non pass-through mode. The main goal of the test is to check how
     71 // many frames left in the AudioInputProcessing's mSegment in various situations
     72 // after input data has been processed.
     73 TEST(TestAudioInputProcessing, Buffering)
     74 {
     75  const TrackRate rate = 8000;  // So packet size is 80
     76  const uint32_t channels = 1;
     77  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
     78  graph->Init(channels);
     79  RefPtr track = AudioProcessingTrack::Create(graph);
     80 
     81  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
     82  auto envWrapper = WebrtcEnvironmentWrapper::Create(
     83      mozilla::dom::RTCStatsTimestampMaker::Create());
     84  aip->SetEnvironmentWrapper(track, std::move(envWrapper));
     85 
     86  const size_t frames = 72;
     87 
     88  AudioGenerator<AudioDataValue> generator(channels, rate);
     89  GraphTime processedTime;
     90  GraphTime nextTime;
     91  AudioSegment output;
     92  MediaEnginePrefs settings;
     93  settings.mChannels = channels;
     94  // pref "media.getusermedia.agc2_forced" defaults to true.
     95  // mAgc would need to be set to something other than kAdaptiveAnalog
     96  // for mobile, as asserted in AudioInputProcessing::ConfigForPrefs,
     97  // if gain_controller1 were used.
     98  settings.mAgc2Forced = true;
     99 
    100  // Toggle pass-through mode without starting
    101  {
    102    EXPECT_EQ(aip->IsPassThrough(graph), true);
    103    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    104 
    105    settings.mAgcOn = true;
    106    aip->ApplySettings(graph, nullptr, settings);
    107    EXPECT_EQ(aip->IsPassThrough(graph), false);
    108    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    109 
    110    settings.mAgcOn = false;
    111    aip->ApplySettings(graph, nullptr, settings);
    112    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    113  }
    114 
    115  {
    116    // Need (nextTime - processedTime) = 128 - 0 = 128 frames this round.
    117    // aip has not started and set to processing mode yet, so output will be
    118    // filled with silence data directly.
    119    processedTime = 0;
    120    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
    121 
    122    AudioSegment input;
    123    generator.Generate(input, nextTime - processedTime);
    124 
    125    aip->Process(track, processedTime, nextTime, &input, &output);
    126    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    127    EXPECT_EQ(output.GetDuration(), nextTime);
    128    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    129  }
    130 
    131  // Set aip to processing/non-pass-through mode
    132  settings.mAgcOn = true;
    133  aip->ApplySettings(graph, nullptr, settings);
    134  {
    135    // Need (nextTime - processedTime) = 256 - 128 = 128 frames this round.
    136    // aip has not started yet, so output will be filled with silence data
    137    // directly.
    138    processedTime = nextTime;
    139    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(2 * frames);
    140 
    141    AudioSegment input;
    142    generator.Generate(input, nextTime - processedTime);
    143 
    144    aip->Process(track, processedTime, nextTime, &input, &output);
    145    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    146    EXPECT_EQ(output.GetDuration(), nextTime);
    147    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    148  }
    149 
    150  // aip has been set to processing mode and is started.
    151  aip->Start(graph);
    152  {
    153    // Need (nextTime - processedTime) = 256 - 256 = 0 frames this round.
    154    // Process() will return early on 0 frames of input.
    155    // Pre-buffering is not triggered.
    156    processedTime = nextTime;
    157    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(3 * frames);
    158 
    159    AudioSegment input;
    160    generator.Generate(input, nextTime - processedTime);
    161 
    162    aip->Process(track, processedTime, nextTime, &input, &output);
    163    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    164    EXPECT_EQ(output.GetDuration(), nextTime);
    165    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    166  }
    167 
    168  {
    169    // Need (nextTime - processedTime) = 384 - 256 = 128 frames this round.
    170    // On receipt of the these first frames, aip will insert 80 frames
    171    // into its internal buffer as pre-buffering.
    172    // Process() will take 128 frames from input, packetize and process
    173    // these frames into floor(128/80) = 1 80-frame packet (48 frames left in
    174    // packetizer), insert packets into aip's internal buffer, then move 128
    175    // frames the internal buffer to output, leaving 80 + 80 - 128 = 32 frames
    176    // in aip's internal buffer.
    177    processedTime = nextTime;
    178    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(4 * frames);
    179 
    180    AudioSegment input;
    181    generator.Generate(input, nextTime - processedTime);
    182 
    183    aip->Process(track, processedTime, nextTime, &input, &output);
    184    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    185    EXPECT_EQ(output.GetDuration(), nextTime);
    186    EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
    187  }
    188 
    189  {
    190    // Need (nextTime - processedTime) = 384 - 384 = 0 frames this round.
    191    processedTime = nextTime;
    192    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(5 * frames);
    193 
    194    AudioSegment input;
    195    generator.Generate(input, nextTime - processedTime);
    196 
    197    aip->Process(track, processedTime, nextTime, &input, &output);
    198    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    199    EXPECT_EQ(output.GetDuration(), nextTime);
    200    EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
    201  }
    202 
    203  {
    204    // Need (nextTime - processedTime) = 512 - 384 = 128 frames this round.
    205    // The Process() aip will take 128 frames from input, packetize and process
    206    // these frames into floor(128+48/80) = 2 80-frame packet (16 frames left in
    207    // packetizer), insert packets into aip's internal buffer, then move 128
    208    // frames the internal buffer to output, leaving 32 + 2*80 - 128 = 64 frames
    209    // in aip's internal buffer.
    210    processedTime = nextTime;
    211    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(6 * frames);
    212 
    213    AudioSegment input;
    214    generator.Generate(input, nextTime - processedTime);
    215 
    216    aip->Process(track, processedTime, nextTime, &input, &output);
    217    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    218    EXPECT_EQ(output.GetDuration(), nextTime);
    219    EXPECT_EQ(aip->NumBufferedFrames(graph), 64);
    220  }
    221 
    222  // Set aip to pass-through mode
    223  settings.mAgcOn = false;
    224  aip->ApplySettings(graph, nullptr, settings);
    225  {
    226    // Need (nextTime - processedTime) = 512 - 512 = 0 frames this round.
    227    // No buffering in pass-through mode
    228    processedTime = nextTime;
    229    nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(7 * frames);
    230 
    231    AudioSegment input;
    232    generator.Generate(input, nextTime - processedTime);
    233 
    234    aip->Process(track, processedTime, nextTime, &input, &output);
    235    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    236    EXPECT_EQ(output.GetDuration(), processedTime);
    237    EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
    238  }
    239 
    240  aip->Stop(graph);
    241  track->Destroy();
    242 }
    243 
    244 TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
    245 {
    246  const TrackRate rate = 48000;  // so # of output frames from packetizer is 480
    247  const uint32_t channels = 2;
    248  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
    249  graph->Init(channels);
    250  RefPtr track = AudioProcessingTrack::Create(graph);
    251 
    252  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
    253  auto envWrapper = WebrtcEnvironmentWrapper::Create(
    254      mozilla::dom::RTCStatsTimestampMaker::Create());
    255  aip->SetEnvironmentWrapper(track, std::move(envWrapper));
    256 
    257  AudioGenerator<AudioDataValue> generator(channels, rate);
    258 
    259  RefPtr<nsIPrincipal> dummy_principal =
    260      NullPrincipal::CreateWithoutOriginAttributes();
    261  const PrincipalHandle principal1 = MakePrincipalHandle(dummy_principal.get());
    262  const PrincipalHandle principal2 =
    263      MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
    264 
    265  // Total 4800 frames. It's easier to test with frames of multiples of 480.
    266  nsTArray<std::pair<TrackTime, PrincipalHandle>> framesWithPrincipal = {
    267      {100, principal1},
    268      {200, PRINCIPAL_HANDLE_NONE},
    269      {300, principal2},
    270      {400, principal1},
    271      {440, PRINCIPAL_HANDLE_NONE},
    272      // 3 packet-size above.
    273      {480, principal1},
    274      {480, principal2},
    275      {480, PRINCIPAL_HANDLE_NONE},
    276      // 3 packet-size above.
    277      {500, principal2},
    278      {490, principal1},
    279      {600, principal1},
    280      {330, principal1}
    281      // 4 packet-size above.
    282  };
    283 
    284  // Generate 4800 frames of data with different principals.
    285  AudioSegment input;
    286  {
    287    for (const auto& [duration, principal] : framesWithPrincipal) {
    288      AudioSegment data;
    289      generator.Generate(data, duration);
    290      for (AudioSegment::ChunkIterator it(data); !it.IsEnded(); it.Next()) {
    291        it->mPrincipalHandle = principal;
    292      }
    293 
    294      input.AppendFrom(&data);
    295    }
    296  }
    297 
    298  auto verifyPrincipals = [&](const AudioSegment& data) {
    299    TrackTime start = 0;
    300    for (const auto& [duration, principal] : framesWithPrincipal) {
    301      const TrackTime end = start + duration;
    302 
    303      AudioSegment slice;
    304      slice.AppendSlice(data, start, end);
    305      start = end;
    306 
    307      for (AudioSegment::ChunkIterator it(slice); !it.IsEnded(); it.Next()) {
    308        EXPECT_EQ(it->mPrincipalHandle, principal);
    309      }
    310    }
    311  };
    312 
    313  // Check the principals in audio-processing mode.
    314  MediaEnginePrefs settings;
    315  settings.mChannels = channels;
    316  settings.mAgcOn = true;
    317  settings.mAgc2Forced = true;
    318  aip->ApplySettings(graph, nullptr, settings);
    319  EXPECT_EQ(aip->IsPassThrough(graph), false);
    320  aip->Start(graph);
    321  {
    322    AudioSegment output;
    323    {
    324      AudioSegment data;
    325      aip->Process(track, 0, 4800, &input, &data);
    326      EXPECT_EQ(input.GetDuration(), 4800);
    327      EXPECT_EQ(data.GetDuration(), 4800);
    328 
    329      // Extract another 480 frames to account for delay from pre-buffering.
    330      EXPECT_EQ(aip->NumBufferedFrames(graph), 480);
    331      AudioSegment dummy;
    332      dummy.AppendNullData(480);
    333      aip->Process(track, 0, 480, &dummy, &data);
    334      EXPECT_EQ(dummy.GetDuration(), 480);
    335      EXPECT_EQ(data.GetDuration(), 480 + 4800);
    336 
    337      // Ignore the pre-buffering silence.
    338      output.AppendSlice(data, 480, 480 + 4800);
    339    }
    340 
    341    verifyPrincipals(output);
    342  }
    343 
    344  // Check the principals in pass-through mode.
    345  settings.mAgcOn = false;
    346  aip->ApplySettings(graph, nullptr, settings);
    347  EXPECT_EQ(aip->IsPassThrough(graph), true);
    348  {
    349    AudioSegment output;
    350    aip->Process(track, 0, 4800, &input, &output);
    351    EXPECT_EQ(input.GetDuration(), 4800);
    352    EXPECT_EQ(output.GetDuration(), 4800);
    353 
    354    verifyPrincipals(output);
    355  }
    356 
    357  aip->Stop(graph);
    358  track->Destroy();
    359 }
    360 
    361 TEST(TestAudioInputProcessing, Downmixing)
    362 {
    363  const TrackRate rate = 44100;
    364  const uint32_t channels = 4;
    365  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
    366  graph->Init(channels);
    367  RefPtr track = AudioProcessingTrack::Create(graph);
    368 
    369  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
    370  auto envWrapper = WebrtcEnvironmentWrapper::Create(
    371      mozilla::dom::RTCStatsTimestampMaker::Create());
    372  aip->SetEnvironmentWrapper(track, std::move(envWrapper));
    373 
    374  const size_t frames = 44100;
    375 
    376  AudioGenerator<AudioDataValue> generator(channels, rate);
    377  GraphTime processedTime;
    378  GraphTime nextTime;
    379 
    380  MediaEnginePrefs settings;
    381  settings.mChannels = channels;
    382  settings.mAgcOn = true;
    383  settings.mAgc2Forced = true;
    384  aip->ApplySettings(graph, nullptr, settings);
    385  EXPECT_EQ(aip->IsPassThrough(graph), false);
    386  aip->Start(graph);
    387 
    388  processedTime = 0;
    389  nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
    390 
    391  {
    392    AudioSegment input;
    393    AudioSegment output;
    394    generator.Generate(input, nextTime - processedTime);
    395 
    396    // Intentionally reduce the amplitude of the generated sine wave so there's
    397    // no chance the max amplitude reaches 1.0, but not enough so that 4
    398    // channels summed together won't clip.
    399    input.ApplyVolume(0.9);
    400 
    401    // Process is going to see that it has 4 channels of input, and is going to
    402    // downmix to mono, scaling the input by 1/4 in the process.
    403    // We can't compare the input and output signal because the sine is going to
    404    // be mangledui
    405    aip->Process(track, processedTime, nextTime, &input, &output);
    406    EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    407    EXPECT_EQ(output.GetDuration(), nextTime);
    408    EXPECT_EQ(output.MaxChannelCount(), 1u);
    409 
    410    // Verify that it doesn't clip: the input signal has likely been mangled by
    411    // the various processing passes, but at least it shouldn't clip. We know we
    412    // always have floating point audio here, regardless of the sample-type used
    413    // by Gecko.
    414    for (AudioSegment::ChunkIterator iterOutput(output); !iterOutput.IsEnded();
    415         iterOutput.Next()) {
    416      const float* const output = iterOutput->ChannelData<float>()[0];
    417      for (uint32_t i = 0; i < iterOutput->GetDuration(); i++) {
    418        // Very conservative here, it's likely that the AGC lowers the volume a
    419        // lot.
    420        EXPECT_LE(std::abs(output[i]), 0.95);
    421      }
    422    }
    423  }
    424 
    425  // Now, repeat the test in pass-through mode, checking we get the unmodified
    426  // 4 channels.
    427  settings.mAgcOn = false;
    428  aip->ApplySettings(graph, nullptr, settings);
    429  EXPECT_EQ(aip->IsPassThrough(graph), true);
    430 
    431  AudioSegment input, output;
    432  processedTime = nextTime;
    433  nextTime += MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
    434  generator.Generate(input, nextTime - processedTime);
    435 
    436  aip->Process(track, processedTime, nextTime, &input, &output);
    437  EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
    438  EXPECT_EQ(output.GetDuration(), nextTime - processedTime);
    439  // This time, no downmix: 4 channels of input, 4 channels of output
    440  EXPECT_EQ(output.MaxChannelCount(), 4u);
    441 
    442  nsTArray<AudioDataValue> inputLinearized, outputLinearized;
    443  input.WriteToInterleavedBuffer(inputLinearized, input.MaxChannelCount());
    444  output.WriteToInterleavedBuffer(outputLinearized, output.MaxChannelCount());
    445 
    446  // The data should be passed through, and exactly equal.
    447  for (uint32_t i = 0; i < frames * channels; i++) {
    448    EXPECT_EQ(inputLinearized[i], outputLinearized[i]);
    449  }
    450 
    451  aip->Stop(graph);
    452  track->Destroy();
    453 }
    454 
    455 TEST(TestAudioInputProcessing, DisabledPlatformProcessing)
    456 {
    457  const TrackRate rate = 44100;
    458  const uint32_t channels = 1;
    459  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
    460  graph->Init(channels);
    461 
    462  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
    463 
    464  MediaEnginePrefs settings;
    465  settings.mUsePlatformProcessing = false;
    466  settings.mAecOn = true;
    467  aip->ApplySettings(graph, nullptr, settings);
    468  aip->Start(graph);
    469 
    470  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    471            CUBEB_INPUT_PROCESSING_PARAM_NONE);
    472 
    473  aip->Stop(graph);
    474  graph->Destroy();
    475 }
    476 
    477 TEST(TestAudioInputProcessing, EnabledPlatformProcessing)
    478 {
    479  const TrackRate rate = 44100;
    480  const uint32_t channels = 1;
    481  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
    482  graph->Init(channels);
    483 
    484  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
    485 
    486  MediaEnginePrefs settings;
    487  settings.mUsePlatformProcessing = true;
    488  settings.mAecOn = true;
    489  aip->ApplySettings(graph, nullptr, settings);
    490  aip->Start(graph);
    491 
    492  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    493            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    494 
    495  aip->Stop(graph);
    496  graph->Destroy();
    497 }
    498 
    499 namespace webrtc {
    500 bool operator==(const AudioProcessing::Config& aLhs,
    501                const AudioProcessing::Config& aRhs) {
    502  return aLhs.echo_canceller.enabled == aRhs.echo_canceller.enabled &&
    503         (aLhs.gain_controller1.enabled == aRhs.gain_controller1.enabled ||
    504          aLhs.gain_controller2.enabled == aRhs.gain_controller2.enabled) &&
    505         aLhs.noise_suppression.enabled == aRhs.noise_suppression.enabled;
    506 }
    507 
    508 static std::ostream& operator<<(
    509    std::ostream& aStream, const webrtc::AudioProcessing::Config& aConfig) {
    510  aStream << "webrtc::AudioProcessing::Config[";
    511  bool hadPrior = false;
    512  if (aConfig.echo_canceller.enabled) {
    513    aStream << "AEC";
    514    hadPrior = true;
    515  }
    516  if (aConfig.gain_controller1.enabled || aConfig.gain_controller2.enabled) {
    517    if (hadPrior) {
    518      aStream << ", ";
    519    }
    520    aStream << "AGC";
    521  }
    522  if (aConfig.noise_suppression.enabled) {
    523    if (hadPrior) {
    524      aStream << ", ";
    525    }
    526    aStream << "NS";
    527  }
    528  aStream << "]";
    529  return aStream;
    530 }
    531 }  // namespace webrtc
    532 
    533 TEST(TestAudioInputProcessing, PlatformProcessing)
    534 {
    535  const TrackRate rate = 44100;
    536  const uint32_t channels = 1;
    537  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
    538  graph->Init(channels);
    539 
    540  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
    541 
    542  MediaEnginePrefs settings;
    543  settings.mUsePlatformProcessing = true;
    544  settings.mAecOn = true;
    545  aip->ApplySettings(graph, nullptr, settings);
    546  aip->Start(graph);
    547 
    548  webrtc::AudioProcessing::Config echoOnlyConfig;
    549  echoOnlyConfig.echo_canceller.enabled = true;
    550  webrtc::AudioProcessing::Config noiseOnlyConfig;
    551  noiseOnlyConfig.noise_suppression.enabled = true;
    552  webrtc::AudioProcessing::Config echoNoiseConfig = echoOnlyConfig;
    553  echoNoiseConfig.noise_suppression.enabled = true;
    554 
    555  // Config is applied, and platform processing requested.
    556  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    557            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    558  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    559  EXPECT_FALSE(aip->IsPassThrough(graph));
    560 
    561  // No other constraint requests present.
    562  aip->NotifySetRequestedInputProcessingParams(
    563      graph, 1, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    564  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    565  EXPECT_FALSE(aip->IsPassThrough(graph));
    566 
    567  // Platform processing params successfully applied.
    568  aip->NotifySetRequestedInputProcessingParamsResult(
    569      graph, 1, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    570  // Turns off the equivalent APM config.
    571  EXPECT_EQ(aip->AppliedConfig(graph), webrtc::AudioProcessing::Config());
    572  EXPECT_TRUE(aip->IsPassThrough(graph));
    573 
    574  // Request for a response that comes back out-of-order later.
    575  aip->NotifySetRequestedInputProcessingParams(
    576      graph, 2, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    577 
    578  // Simulate an error after a driver switch.
    579  aip->NotifySetRequestedInputProcessingParams(
    580      graph, 3, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    581  // Requesting the same config that is already applied; does nothing.
    582  EXPECT_EQ(aip->AppliedConfig(graph), webrtc::AudioProcessing::Config());
    583  EXPECT_TRUE(aip->IsPassThrough(graph));
    584  // Error notification.
    585  aip->NotifySetRequestedInputProcessingParamsResult(graph, 3,
    586                                                     Err(CUBEB_ERROR));
    587  // The APM config is turned back on, and platform processing is requested to
    588  // be turned off.
    589  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    590            CUBEB_INPUT_PROCESSING_PARAM_NONE);
    591  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    592  EXPECT_FALSE(aip->IsPassThrough(graph));
    593 
    594  // The request for turning platform processing off.
    595  aip->NotifySetRequestedInputProcessingParams(
    596      graph, 4, CUBEB_INPUT_PROCESSING_PARAM_NONE);
    597  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    598  EXPECT_FALSE(aip->IsPassThrough(graph));
    599 
    600  // Pretend there was a response for an old request.
    601  aip->NotifySetRequestedInputProcessingParamsResult(
    602      graph, 2, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    603  // It does nothing since we are requesting NONE now.
    604  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    605            CUBEB_INPUT_PROCESSING_PARAM_NONE);
    606  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    607  EXPECT_FALSE(aip->IsPassThrough(graph));
    608 
    609  // Turn it off as requested.
    610  aip->NotifySetRequestedInputProcessingParamsResult(
    611      graph, 4, CUBEB_INPUT_PROCESSING_PARAM_NONE);
    612  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    613            CUBEB_INPUT_PROCESSING_PARAM_NONE);
    614  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    615  EXPECT_FALSE(aip->IsPassThrough(graph));
    616 
    617  // Test partial support for the requested params.
    618  settings.mNoiseOn = true;
    619  aip->ApplySettings(graph, nullptr, settings);
    620  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    621            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    622                CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    623  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    624  EXPECT_FALSE(aip->IsPassThrough(graph));
    625  // The request doesn't change anything.
    626  aip->NotifySetRequestedInputProcessingParams(
    627      graph, 5,
    628      CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    629          CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    630  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    631  EXPECT_FALSE(aip->IsPassThrough(graph));
    632  // Only noise suppression was supported in the platform.
    633  aip->NotifySetRequestedInputProcessingParamsResult(
    634      graph, 5, CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    635  // In the APM only echo cancellation is applied.
    636  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    637  EXPECT_FALSE(aip->IsPassThrough(graph));
    638 
    639  // Test error for partial support.
    640  aip->NotifySetRequestedInputProcessingParams(
    641      graph, 6,
    642      CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    643          CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    644  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    645  EXPECT_FALSE(aip->IsPassThrough(graph));
    646  aip->NotifySetRequestedInputProcessingParamsResult(graph, 6,
    647                                                     Err(CUBEB_ERROR));
    648  // The full config is applied in the APM, and NONE is requested.
    649  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    650            CUBEB_INPUT_PROCESSING_PARAM_NONE);
    651  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    652  EXPECT_FALSE(aip->IsPassThrough(graph));
    653 
    654  // Enable platform processing again.
    655  aip->ApplySettings(graph, nullptr, settings);
    656  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    657            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    658                CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    659  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    660  EXPECT_FALSE(aip->IsPassThrough(graph));
    661  // Request.
    662  aip->NotifySetRequestedInputProcessingParams(
    663      graph, 7,
    664      CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    665          CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    666  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    667  EXPECT_FALSE(aip->IsPassThrough(graph));
    668  // It succeeded.
    669  aip->NotifySetRequestedInputProcessingParamsResult(
    670      graph, 7,
    671      static_cast<cubeb_input_processing_params>(
    672          CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    673          CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION));
    674  // No config is applied in the APM, and the full set is requested.
    675  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    676            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    677                CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    678  EXPECT_EQ(aip->AppliedConfig(graph), webrtc::AudioProcessing::Config());
    679  EXPECT_TRUE(aip->IsPassThrough(graph));
    680 
    681  // Simulate that another concurrent request was made, i.e. two tracks are
    682  // using the same device with different processing params, where the
    683  // intersection of processing params is NONE.
    684  aip->NotifySetRequestedInputProcessingParams(
    685      graph, 8, CUBEB_INPUT_PROCESSING_PARAM_NONE);
    686  // The full config is applied in the APM.
    687  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    688  EXPECT_FALSE(aip->IsPassThrough(graph));
    689  // The result succeeds, leading to no change since sw processing is already
    690  // applied.
    691  aip->NotifySetRequestedInputProcessingParamsResult(
    692      graph, 8, CUBEB_INPUT_PROCESSING_PARAM_NONE);
    693  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    694            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    695                CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    696  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    697  EXPECT_FALSE(aip->IsPassThrough(graph));
    698 
    699  // The other concurrent request goes away.
    700  aip->NotifySetRequestedInputProcessingParams(
    701      graph, 9,
    702      CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    703          CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    704  // The full config is still applied in the APM.
    705  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    706  EXPECT_FALSE(aip->IsPassThrough(graph));
    707  // The result succeeds, leading to no change since sw processing is already
    708  // applied.
    709  aip->NotifySetRequestedInputProcessingParamsResult(
    710      graph, 9,
    711      static_cast<cubeb_input_processing_params>(
    712          CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    713          CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION));
    714  EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
    715            CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    716                CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
    717  EXPECT_EQ(aip->AppliedConfig(graph), webrtc::AudioProcessing::Config());
    718  EXPECT_TRUE(aip->IsPassThrough(graph));
    719 
    720  // Changing input track resets the processing params generation. The applied
    721  // config (AEC, NS) is adapted to the subset applied in the platform (AEC).
    722  aip->Disconnect(graph);
    723  aip->NotifySetRequestedInputProcessingParams(
    724      graph, 1, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    725  EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
    726  EXPECT_FALSE(aip->IsPassThrough(graph));
    727  aip->NotifySetRequestedInputProcessingParamsResult(
    728      graph, 1, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
    729  EXPECT_EQ(aip->AppliedConfig(graph), noiseOnlyConfig);
    730  EXPECT_FALSE(aip->IsPassThrough(graph));
    731 
    732  aip->Stop(graph);
    733  graph->Destroy();
    734 }
    735 
    736 TEST(TestAudioInputProcessing, PlatformProcessingSetSinkId)
    737 {
    738  const TrackRate rate = 44100;
    739  const uint32_t channels = 1;
    740  auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
    741  graph->ForceDefaultOutputDevice(CubebUtils::AudioDeviceID(1));
    742  graph->ForceOutputDeviceForAEC(CubebUtils::AudioDeviceID(2));
    743  graph->Init(channels);
    744  ASSERT_EQ(graph->PrimaryOutputDeviceID(), nullptr);
    745 
    746  RefPtr track = AudioProcessingTrack::Create(graph);
    747  auto aip = MakeRefPtr<AudioInputProcessing>(channels);
    748  aip->SetEnvironmentWrapper(track, WebrtcEnvironmentWrapper::Create(
    749                                        dom::RTCStatsTimestampMaker::Create()));
    750  track->ConnectDeviceInput(nullptr, aip, PRINCIPAL_HANDLE_NONE);
    751 
    752  MediaEnginePrefs settings;
    753  settings.mUsePlatformProcessing = true;
    754  settings.mAecOn = true;
    755  settings.mAgcOn = true;
    756  settings.mAgc2Forced = true;
    757  settings.mNoiseOn = true;
    758  settings.mChannels = channels;
    759  aip->ApplySettings(graph, nullptr, settings);
    760  aip->Start(graph);
    761 
    762  webrtc::AudioProcessing::Config echoOnlyConfig;
    763  echoOnlyConfig.echo_canceller.enabled = true;
    764  webrtc::AudioProcessing::Config allConfig;
    765  allConfig.echo_canceller.enabled = allConfig.noise_suppression.enabled =
    766      allConfig.gain_controller2.enabled = true;
    767 
    768  constexpr cubeb_input_processing_params PROCESSING_PARAM_ALL =
    769      CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
    770      CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL |
    771      CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION;
    772 
    773  // Config is applied, and platform processing requested.
    774  EXPECT_EQ(aip->RequestedInputProcessingParams(graph), PROCESSING_PARAM_ALL);
    775  EXPECT_EQ(aip->AppliedConfig(graph), allConfig);
    776  EXPECT_FALSE(aip->IsPassThrough(graph));
    777 
    778  // No other constraint requests present.
    779  aip->NotifySetRequestedInputProcessingParams(graph, 1, PROCESSING_PARAM_ALL);
    780  EXPECT_EQ(aip->AppliedConfig(graph), allConfig);
    781  EXPECT_FALSE(aip->IsPassThrough(graph));
    782 
    783  // Platform processing params successfully applied.
    784  aip->NotifySetRequestedInputProcessingParamsResult(graph, 1,
    785                                                     PROCESSING_PARAM_ALL);
    786  // Because setSinkId is used, AEC (only) is still applied.
    787  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    788  EXPECT_FALSE(aip->IsPassThrough(graph));
    789 
    790  // Changing to primary device for AEC should enable passthrough.
    791  const GraphTime frames = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(100);
    792  AudioGenerator<AudioDataValue> generator(channels, rate);
    793 
    794  graph->ForceOutputDeviceForAEC(CubebUtils::AudioDeviceID(1));
    795  GraphTime processedTime = 0, nextTime = 0;
    796  {
    797    AudioSegment input, output;
    798    processedTime = nextTime;
    799    nextTime += frames;
    800    generator.Generate(input, frames);
    801    aip->Process(track, processedTime, nextTime, &input, &output);
    802    for (AudioSegment::ConstChunkIterator it(output); !it.IsEnded();
    803         it.Next()) {
    804      aip->ProcessOutputData(track, *it);
    805    }
    806  }
    807  EXPECT_EQ(aip->RequestedInputProcessingParams(graph), PROCESSING_PARAM_ALL);
    808  EXPECT_EQ(aip->AppliedConfig(graph), webrtc::AudioProcessing::Config());
    809  EXPECT_TRUE(aip->IsPassThrough(graph));
    810 
    811  // Changing to non-primary device for AEC should turn on AEC again.
    812  graph->ForceOutputDeviceForAEC(CubebUtils::AudioDeviceID(2));
    813  {
    814    AudioSegment input, output;
    815    processedTime = nextTime;
    816    nextTime += frames;
    817    generator.Generate(input, frames);
    818    aip->Process(track, processedTime, nextTime, &input, &output);
    819    for (AudioSegment::ConstChunkIterator it(output); !it.IsEnded();
    820         it.Next()) {
    821      aip->ProcessOutputData(track, *it);
    822    }
    823  }
    824  EXPECT_EQ(aip->RequestedInputProcessingParams(graph), PROCESSING_PARAM_ALL);
    825  EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
    826  EXPECT_FALSE(aip->IsPassThrough(graph));
    827 
    828  aip->Stop(graph);
    829  track->DisconnectDeviceInput();
    830  track->Destroy();
    831  graph->Destroy();
    832 }