tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

TestAudioSegment.cpp (12566B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
      5 * You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include <iostream>
      8 
      9 #include "AudioGenerator.h"
     10 #include "AudioSegment.h"
     11 #include "gtest/gtest.h"
     12 
     13 using namespace mozilla;
     14 
     15 namespace audio_segment {
     16 
     17 /* Helper function to give us the maximum and minimum value that don't clip,
     18 * for a given sample format (integer or floating-point). */
     19 template <typename T>
     20 T GetLowValue();
     21 
     22 template <typename T>
     23 T GetHighValue();
     24 
     25 template <typename T>
     26 T GetSilentValue();
     27 
     28 template <>
     29 float GetLowValue<float>() {
     30  return -1.0;
     31 }
     32 
     33 template <>
     34 int16_t GetLowValue<short>() {
     35  return INT16_MIN;
     36 }
     37 
     38 template <>
     39 float GetHighValue<float>() {
     40  return 1.0;
     41 }
     42 
     43 template <>
     44 int16_t GetHighValue<short>() {
     45  return INT16_MAX;
     46 }
     47 
     48 template <>
     49 float GetSilentValue() {
     50  return 0.0;
     51 }
     52 
     53 template <>
     54 int16_t GetSilentValue() {
     55  return 0;
     56 }
     57 
     58 // Get an array of planar audio buffers that has the inverse of the index of the
     59 // channel (1-indexed) as samples.
     60 template <typename T>
     61 const T* const* GetPlanarChannelArray(size_t aChannels, size_t aSize) {
     62  T** channels = new T*[aChannels];
     63  for (size_t c = 0; c < aChannels; c++) {
     64    channels[c] = new T[aSize];
     65    for (size_t i = 0; i < aSize; i++) {
     66      channels[c][i] = ConvertAudioSample<T>(1.f / static_cast<float>(c + 1));
     67    }
     68  }
     69  return channels;
     70 }
     71 
     72 template <typename T>
     73 void DeletePlanarChannelsArray(const T* const* aArrays, size_t aChannels) {
     74  for (size_t channel = 0; channel < aChannels; channel++) {
     75    delete[] aArrays[channel];
     76  }
     77  delete[] aArrays;
     78 }
     79 
     80 template <typename T>
     81 T** GetPlanarArray(size_t aChannels, size_t aSize) {
     82  T** channels = new T*[aChannels];
     83  for (size_t c = 0; c < aChannels; c++) {
     84    channels[c] = new T[aSize];
     85    for (size_t i = 0; i < aSize; i++) {
     86      channels[c][i] = 0.0f;
     87    }
     88  }
     89  return channels;
     90 }
     91 
     92 template <typename T>
     93 void DeletePlanarArray(T** aArrays, size_t aChannels) {
     94  for (size_t channel = 0; channel < aChannels; channel++) {
     95    delete[] aArrays[channel];
     96  }
     97  delete[] aArrays;
     98 }
     99 
    100 // Get an array of audio samples that have the inverse of the index of the
    101 // channel (1-indexed) as samples.
    102 template <typename T>
    103 const T* GetInterleavedChannelArray(size_t aChannels, size_t aSize) {
    104  size_t sampleCount = aChannels * aSize;
    105  T* samples = new T[sampleCount];
    106  for (size_t i = 0; i < sampleCount; i++) {
    107    uint32_t channel = (i % aChannels) + 1;
    108    samples[i] = ConvertAudioSample<T>(1.f / static_cast<float>(channel));
    109  }
    110  return samples;
    111 }
    112 
    113 template <typename T>
    114 void DeleteInterleavedChannelArray(const T* aArray) {
    115  delete[] aArray;
    116 }
    117 
    118 bool FuzzyEqual(float aLhs, float aRhs) { return std::abs(aLhs - aRhs) < 0.01; }
    119 
    120 template <typename SrcT, typename DstT>
    121 void TestInterleaveAndConvert() {
    122  size_t arraySize = 1024;
    123  size_t maxChannels = 8;  // 7.1
    124  for (uint32_t channels = 1; channels < maxChannels; channels++) {
    125    const SrcT* const* src = GetPlanarChannelArray<SrcT>(channels, arraySize);
    126    DstT* dst = new DstT[channels * arraySize];
    127 
    128    InterleaveAndConvertBuffer(src, arraySize, 1.0, channels, dst);
    129 
    130    uint32_t channelIndex = 0;
    131    for (size_t i = 0; i < arraySize * channels; i++) {
    132      ASSERT_TRUE(
    133          FuzzyEqual(dst[i], ConvertAudioSample<DstT>(
    134                                 1.f / static_cast<float>(channelIndex + 1))));
    135      channelIndex++;
    136      channelIndex %= channels;
    137    }
    138 
    139    DeletePlanarChannelsArray(src, channels);
    140    delete[] dst;
    141  }
    142 }
    143 
    144 template <typename SrcT, typename DstT>
    145 void TestDeinterleaveAndConvert() {
    146  size_t arraySize = 1024;
    147  size_t maxChannels = 8;  // 7.1
    148  for (uint32_t channels = 1; channels < maxChannels; channels++) {
    149    const SrcT* src = GetInterleavedChannelArray<SrcT>(channels, arraySize);
    150    DstT** dst = GetPlanarArray<DstT>(channels, arraySize);
    151 
    152    DeinterleaveAndConvertBuffer(src, arraySize, channels, dst);
    153 
    154    for (size_t channel = 0; channel < channels; channel++) {
    155      for (size_t i = 0; i < arraySize; i++) {
    156        ASSERT_TRUE(FuzzyEqual(
    157            dst[channel][i],
    158            ConvertAudioSample<DstT>(1.f / static_cast<float>(channel + 1))));
    159      }
    160    }
    161 
    162    DeleteInterleavedChannelArray(src);
    163    DeletePlanarArray(dst, channels);
    164  }
    165 }
    166 
    167 uint8_t gSilence[4096] = {0};
    168 
    169 template <typename T>
    170 T* SilentChannel() {
    171  return reinterpret_cast<T*>(gSilence);
    172 }
    173 
    174 template <typename T>
    175 void TestUpmixStereo() {
    176  size_t arraySize = 1024;
    177  nsTArray<T*> channels;
    178  nsTArray<const T*> channelsptr;
    179 
    180  channels.SetLength(1);
    181  channelsptr.SetLength(1);
    182 
    183  channels[0] = new T[arraySize];
    184 
    185  for (size_t i = 0; i < arraySize; i++) {
    186    channels[0][i] = GetHighValue<T>();
    187  }
    188  channelsptr[0] = channels[0];
    189 
    190  AudioChannelsUpMix(&channelsptr, 2, SilentChannel<T>());
    191 
    192  for (size_t channel = 0; channel < 2; channel++) {
    193    for (size_t i = 0; i < arraySize; i++) {
    194      ASSERT_TRUE(channelsptr[channel][i] == GetHighValue<T>());
    195    }
    196  }
    197  delete[] channels[0];
    198 }
    199 
    200 template <typename T>
    201 void TestDownmixStereo() {
    202  const size_t arraySize = 1024;
    203  nsTArray<const T*> inputptr;
    204  nsTArray<T*> input;
    205  T** output;
    206 
    207  output = new T*[1];
    208  output[0] = new T[arraySize];
    209 
    210  input.SetLength(2);
    211  inputptr.SetLength(2);
    212 
    213  for (size_t channel = 0; channel < input.Length(); channel++) {
    214    input[channel] = new T[arraySize];
    215    for (size_t i = 0; i < arraySize; i++) {
    216      input[channel][i] = channel == 0 ? GetLowValue<T>() : GetHighValue<T>();
    217    }
    218    inputptr[channel] = input[channel];
    219  }
    220 
    221  AudioChannelsDownMix<T, T>(inputptr, Span(output, 1), arraySize);
    222 
    223  for (size_t i = 0; i < arraySize; i++) {
    224    ASSERT_TRUE(output[0][i] == GetSilentValue<T>());
    225    ASSERT_TRUE(output[0][i] == GetSilentValue<T>());
    226  }
    227 
    228  delete[] output[0];
    229  delete[] output;
    230 }
    231 
    232 TEST(AudioSegment, Test)
    233 {
    234  TestInterleaveAndConvert<float, float>();
    235  TestInterleaveAndConvert<float, int16_t>();
    236  TestInterleaveAndConvert<int16_t, float>();
    237  TestInterleaveAndConvert<int16_t, int16_t>();
    238  TestDeinterleaveAndConvert<float, float>();
    239  TestDeinterleaveAndConvert<float, int16_t>();
    240  TestDeinterleaveAndConvert<int16_t, float>();
    241  TestDeinterleaveAndConvert<int16_t, int16_t>();
    242  TestUpmixStereo<float>();
    243  TestUpmixStereo<int16_t>();
    244  TestDownmixStereo<float>();
    245  TestDownmixStereo<int16_t>();
    246 }
    247 
    248 template <class T, uint32_t Channels>
    249 void fillChunk(AudioChunk* aChunk, int aDuration) {
    250  static_assert(Channels != 0, "Filling 0 channels is a no-op");
    251 
    252  aChunk->mDuration = aDuration;
    253 
    254  AutoTArray<nsTArray<T>, Channels> buffer;
    255  buffer.SetLength(Channels);
    256  aChunk->mChannelData.ClearAndRetainStorage();
    257  aChunk->mChannelData.SetCapacity(Channels);
    258  for (nsTArray<T>& channel : buffer) {
    259    T* ch = channel.AppendElements(aDuration);
    260    for (int i = 0; i < aDuration; ++i) {
    261      ch[i] = GetHighValue<T>();
    262    }
    263    aChunk->mChannelData.AppendElement(ch);
    264  }
    265 
    266  aChunk->mBuffer = new mozilla::SharedChannelArrayBuffer<T>(std::move(buffer));
    267  aChunk->mBufferFormat = AudioSampleTypeToFormat<T>::Format;
    268 }
    269 
    270 TEST(AudioSegment, FlushAfter_ZeroDuration)
    271 {
    272  AudioChunk c;
    273  fillChunk<float, 2>(&c, 10);
    274 
    275  AudioSegment s;
    276  s.AppendAndConsumeChunk(std::move(c));
    277  s.FlushAfter(0);
    278  EXPECT_EQ(s.GetDuration(), 0);
    279 }
    280 
    281 TEST(AudioSegment, FlushAfter_SmallerDuration)
    282 {
    283  // It was crashing when the first chunk was silence (null) and FlushAfter
    284  // was called for a duration, smaller or equal to the duration of the
    285  // first chunk.
    286  TrackTime duration = 10;
    287  TrackTime smaller_duration = 8;
    288  AudioChunk c1;
    289  c1.SetNull(duration);
    290  AudioChunk c2;
    291  fillChunk<float, 2>(&c2, duration);
    292 
    293  AudioSegment s;
    294  s.AppendAndConsumeChunk(std::move(c1));
    295  s.AppendAndConsumeChunk(std::move(c2));
    296  s.FlushAfter(smaller_duration);
    297  EXPECT_EQ(s.GetDuration(), smaller_duration) << "Check new duration";
    298 
    299  TrackTime chunkByChunkDuration = 0;
    300  for (AudioSegment::ChunkIterator iter(s); !iter.IsEnded(); iter.Next()) {
    301    chunkByChunkDuration += iter->GetDuration();
    302  }
    303  EXPECT_EQ(s.GetDuration(), chunkByChunkDuration)
    304      << "Confirm duration chunk by chunk";
    305 }
    306 
    307 TEST(AudioSegment, MemoizedOutputChannelCount)
    308 {
    309  AudioSegment s;
    310  EXPECT_EQ(s.MaxChannelCount(), 0U) << "0 channels on init";
    311 
    312  s.AppendNullData(1);
    313  EXPECT_EQ(s.MaxChannelCount(), 0U) << "Null data has 0 channels";
    314 
    315  s.Clear();
    316  EXPECT_EQ(s.MaxChannelCount(), 0U) << "Still 0 after clearing";
    317 
    318  AudioChunk c1;
    319  fillChunk<float, 1>(&c1, 1);
    320  s.AppendAndConsumeChunk(std::move(c1));
    321  EXPECT_EQ(s.MaxChannelCount(), 1U) << "A single chunk's channel count";
    322 
    323  AudioChunk c2;
    324  fillChunk<float, 2>(&c2, 1);
    325  s.AppendAndConsumeChunk(std::move(c2));
    326  EXPECT_EQ(s.MaxChannelCount(), 2U) << "The max of two chunks' channel count";
    327 
    328  s.ForgetUpTo(2);
    329  EXPECT_EQ(s.MaxChannelCount(), 2U) << "Memoized value with null chunks";
    330 
    331  s.Clear();
    332  EXPECT_EQ(s.MaxChannelCount(), 2U) << "Still memoized after clearing";
    333 
    334  AudioChunk c3;
    335  fillChunk<float, 1>(&c3, 1);
    336  s.AppendAndConsumeChunk(std::move(c3));
    337  EXPECT_EQ(s.MaxChannelCount(), 1U) << "Real chunk trumps memoized value";
    338 
    339  s.Clear();
    340  EXPECT_EQ(s.MaxChannelCount(), 1U) << "Memoized value was updated";
    341 }
    342 
    343 TEST(AudioSegment, AppendAndConsumeChunk)
    344 {
    345  AudioChunk c;
    346  fillChunk<float, 2>(&c, 10);
    347  AudioChunk temp(c);
    348  EXPECT_TRUE(c.mBuffer->IsShared());
    349 
    350  AudioSegment s;
    351  s.AppendAndConsumeChunk(std::move(temp));
    352  EXPECT_FALSE(s.IsEmpty());
    353  EXPECT_TRUE(c.mBuffer->IsShared());
    354 
    355  s.Clear();
    356  EXPECT_FALSE(c.mBuffer->IsShared());
    357 }
    358 
    359 TEST(AudioSegment, AppendAndConsumeEmptyChunk)
    360 {
    361  AudioChunk c;
    362  AudioSegment s;
    363  s.AppendAndConsumeChunk(std::move(c));
    364  EXPECT_TRUE(s.IsEmpty());
    365 }
    366 
    367 TEST(AudioSegment, AppendAndConsumeNonEmptyZeroDurationChunk)
    368 {
    369  AudioChunk c;
    370  fillChunk<float, 2>(&c, 0);
    371  AudioChunk temp(c);
    372  EXPECT_TRUE(c.mBuffer->IsShared());
    373 
    374  AudioSegment s;
    375  s.AppendAndConsumeChunk(std::move(temp));
    376  EXPECT_TRUE(s.IsEmpty());
    377  EXPECT_FALSE(c.mBuffer->IsShared());
    378 }
    379 
    380 TEST(AudioSegment, CombineChunksInAppendAndConsumeChunk)
    381 {
    382  AudioChunk source;
    383  fillChunk<float, 2>(&source, 10);
    384 
    385  auto checkChunks = [&](const AudioSegment& aSegement,
    386                         const nsTArray<TrackTime>& aDurations) {
    387    size_t i = 0;
    388    for (AudioSegment::ConstChunkIterator iter(aSegement); !iter.IsEnded();
    389         iter.Next()) {
    390      EXPECT_EQ(iter->GetDuration(), aDurations[i++]);
    391    }
    392    EXPECT_EQ(i, aDurations.Length());
    393  };
    394 
    395  // The chunks can be merged if their duration are adjacent.
    396  {
    397    AudioChunk c1(source);
    398    c1.SliceTo(2, 5);
    399 
    400    AudioChunk c2(source);
    401    c2.SliceTo(5, 9);
    402 
    403    AudioSegment s;
    404    s.AppendAndConsumeChunk(std::move(c1));
    405    EXPECT_EQ(s.GetDuration(), 3);
    406 
    407    s.AppendAndConsumeChunk(std::move(c2));
    408    EXPECT_EQ(s.GetDuration(), 7);
    409 
    410    checkChunks(s, {7});
    411  }
    412  // Otherwise, they cannot be merged.
    413  {
    414    // If durations of chunks are overlapped, they cannot be merged.
    415    AudioChunk c1(source);
    416    c1.SliceTo(2, 5);
    417 
    418    AudioChunk c2(source);
    419    c2.SliceTo(4, 9);
    420 
    421    AudioSegment s;
    422    s.AppendAndConsumeChunk(std::move(c1));
    423    EXPECT_EQ(s.GetDuration(), 3);
    424 
    425    s.AppendAndConsumeChunk(std::move(c2));
    426    EXPECT_EQ(s.GetDuration(), 8);
    427 
    428    checkChunks(s, {3, 5});
    429  }
    430  {
    431    // If durations of chunks are discontinuous, they cannot be merged.
    432    AudioChunk c1(source);
    433    c1.SliceTo(2, 4);
    434 
    435    AudioChunk c2(source);
    436    c2.SliceTo(5, 9);
    437 
    438    AudioSegment s;
    439    s.AppendAndConsumeChunk(std::move(c1));
    440    EXPECT_EQ(s.GetDuration(), 2);
    441 
    442    s.AppendAndConsumeChunk(std::move(c2));
    443    EXPECT_EQ(s.GetDuration(), 6);
    444 
    445    checkChunks(s, {2, 4});
    446  }
    447 }
    448 
    449 TEST(AudioSegment, ConvertFromAndToInterleaved)
    450 {
    451  const uint32_t channels = 2;
    452  const uint32_t rate = 44100;
    453  AudioGenerator<AudioDataValue> generator(channels, rate);
    454 
    455  const size_t frames = 10;
    456  const size_t bufferSize = frames * channels;
    457  nsTArray<AudioDataValue> buffer(bufferSize);
    458  buffer.AppendElements(bufferSize);
    459 
    460  generator.GenerateInterleaved(buffer.Elements(), frames);
    461 
    462  AudioSegment data;
    463  data.AppendFromInterleavedBuffer(buffer.Elements(), frames, channels,
    464                                   PRINCIPAL_HANDLE_NONE);
    465 
    466  nsTArray<AudioDataValue> interleaved;
    467  size_t sampleCount = data.WriteToInterleavedBuffer(interleaved, channels);
    468 
    469  EXPECT_EQ(sampleCount, bufferSize);
    470  EXPECT_EQ(interleaved, buffer);
    471 }
    472 
    473 }  // namespace audio_segment