tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

TestVideoFrameConverter.cpp (29564B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
      5 * You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include <array>
      8 #include <iterator>
      9 #include <thread>
     10 
     11 #include "MediaEventSource.h"
     12 #include "VideoFrameConverter.h"
     13 #include "VideoUtils.h"
     14 #include "YUVBufferGenerator.h"
     15 #include "gmock/gmock-matchers.h"
     16 #include "gtest/gtest.h"
     17 #include "mozilla/gtest/WaitFor.h"
     18 #include "rtc_base/ref_counted_object.h"
     19 
     20 using namespace mozilla;
     21 using testing::Not;
     22 
     23 class VideoFrameConverterTest;
     24 
     25 class FrameListener : public webrtc::VideoSinkInterface<webrtc::VideoFrame> {
     26 public:
     27  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(FrameListener)
     28 
     29  explicit FrameListener(webrtc::VideoTrackSourceInterface* aSource)
     30      : mSource(aSource) {
     31    mSource->AddOrUpdateSink(this, {});
     32  }
     33 
     34  void OnFrame(const webrtc::VideoFrame& aVideoFrame) override {
     35    mVideoFrameConvertedEvent.Notify(aVideoFrame, TimeStamp::Now());
     36  }
     37 
     38  void OnDiscardedFrame() override {
     39    webrtc::VideoFrame frame(nullptr, webrtc::VideoRotation::kVideoRotation_0,
     40                             0);
     41    mVideoFrameConvertedEvent.Notify(frame, TimeStamp::Now());
     42  }
     43 
     44  void SetWants(const webrtc::VideoSinkWants& aWants) {
     45    mSource->AddOrUpdateSink(this, aWants);
     46  }
     47 
     48  MediaEventSource<webrtc::VideoFrame, TimeStamp>& VideoFrameConvertedEvent() {
     49    return mVideoFrameConvertedEvent;
     50  }
     51 
     52 private:
     53  ~FrameListener() { mSource->RemoveSink(this); }
     54 
     55  const RefPtr<webrtc::VideoTrackSourceInterface> mSource;
     56  MediaEventProducer<webrtc::VideoFrame, TimeStamp> mVideoFrameConvertedEvent;
     57 };
     58 
     59 class DebugVideoFrameConverter
     60    : public webrtc::RefCountedObject<
     61          VideoFrameConverterImpl<FrameDroppingPolicy::Disabled>> {
     62 public:
     63  explicit DebugVideoFrameConverter(
     64      const dom::RTCStatsTimestampMaker& aTimestampMaker)
     65      : webrtc::RefCountedObject<VideoFrameConverterImpl>(
     66            do_AddRef(GetMainThreadSerialEventTarget()), aTimestampMaker,
     67            /* aLockScaling= */ false) {}
     68 
     69  using VideoFrameConverterImpl::mLastFrameQueuedForProcessing;
     70  using VideoFrameConverterImpl::ProcessVideoFrame;
     71  using VideoFrameConverterImpl::QueueForProcessing;
     72  using VideoFrameConverterImpl::RegisterListener;
     73 };
     74 
     75 class VideoFrameConverterTest : public ::testing::Test {
     76 protected:
     77  const dom::RTCStatsTimestampMaker mTimestampMaker;
     78  RefPtr<DebugVideoFrameConverter> mConverter;
     79  RefPtr<FrameListener> mListener;
     80 
     81  VideoFrameConverterTest()
     82      : mTimestampMaker(dom::RTCStatsTimestampMaker::Create()),
     83        mConverter(MakeAndAddRef<DebugVideoFrameConverter>(mTimestampMaker)),
     84        mListener(MakeAndAddRef<FrameListener>(mConverter)) {
     85    mConverter->RegisterListener();
     86    mConverter->SetTrackingId({TrackingId::Source::Camera, 0});
     87  }
     88 
     89  void TearDown() override { mConverter->Shutdown(); }
     90 
     91  RefPtr<TakeNPromise<webrtc::VideoFrame, TimeStamp>> TakeNConvertedFrames(
     92      size_t aN) {
     93    return TakeN(mListener->VideoFrameConvertedEvent(), aN);
     94  }
     95 };
     96 
     97 static bool IsPlane(const uint8_t* aData, int aWidth, int aHeight, int aStride,
     98                    uint8_t aValue) {
     99  for (int i = 0; i < aHeight; ++i) {
    100    for (int j = 0; j < aWidth; ++j) {
    101      if (aData[i * aStride + j] != aValue) {
    102        return false;
    103      }
    104    }
    105  }
    106  return true;
    107 }
    108 
    109 MATCHER(IsFrameBlack,
    110        std::string(nsPrintfCString("%s all black pixels",
    111                                    negation ? "doesn't have" : "has")
    112                        .get())) {
    113  static_assert(
    114      std::is_same_v<webrtc::VideoFrame, std::decay_t<decltype(arg)>>);
    115  RefPtr<webrtc::I420BufferInterface> buffer =
    116      arg.video_frame_buffer()->ToI420().get();
    117  return IsPlane(buffer->DataY(), buffer->width(), buffer->height(),
    118                 buffer->StrideY(), 0x00) &&
    119         IsPlane(buffer->DataU(), buffer->ChromaWidth(), buffer->ChromaHeight(),
    120                 buffer->StrideU(), 0x80) &&
    121         IsPlane(buffer->DataV(), buffer->ChromaWidth(), buffer->ChromaHeight(),
    122                 buffer->StrideV(), 0x80);
    123 }
    124 
    125 static std::tuple</*multiples*/ int64_t, /*remainder*/ int64_t>
    126 CalcMultiplesInMillis(TimeDuration aArg, TimeDuration aDenom) {
    127  int64_t denom = llround(aDenom.ToMilliseconds());
    128  int64_t arg = llround(aArg.ToMilliseconds());
    129  const auto multiples = arg / denom;
    130  const auto remainder = arg % denom;
    131  return {multiples, remainder};
    132 }
    133 
    134 MATCHER_P(
    135    IsDurationInMillisMultipleOf, aDenom,
    136    std::string(
    137        nsPrintfCString("%s a multiple of %sms", negation ? "isn't" : "is",
    138                        testing::PrintToString(aDenom.ToMilliseconds()).data())
    139            .get())) {
    140  using T = std::decay_t<decltype(arg)>;
    141  using U = std::decay_t<decltype(aDenom)>;
    142  static_assert(std::is_same_v<T, TimeDuration>);
    143  static_assert(std::is_same_v<U, TimeDuration>);
    144  auto [multiples, remainder] = CalcMultiplesInMillis(arg, aDenom);
    145  return multiples >= 0 && remainder == 0;
    146 }
    147 
    148 MATCHER_P(
    149    IsDurationInMillisPositiveMultipleOf, aDenom,
    150    std::string(
    151        nsPrintfCString("%s a positive non-zero multiple of %sms",
    152                        negation ? "isn't" : "is",
    153                        testing::PrintToString(aDenom.ToMilliseconds()).data())
    154            .get())) {
    155  using T = std::decay_t<decltype(arg)>;
    156  using U = std::decay_t<decltype(aDenom)>;
    157  static_assert(std::is_same_v<T, TimeDuration>);
    158  static_assert(std::is_same_v<U, TimeDuration>);
    159  auto [multiples, remainder] = CalcMultiplesInMillis(arg, aDenom);
    160  return multiples > 0 && remainder == 0;
    161 }
    162 
    163 VideoChunk GenerateChunk(int32_t aWidth, int32_t aHeight, TimeStamp aTime) {
    164  YUVBufferGenerator generator;
    165  generator.Init(gfx::IntSize(aWidth, aHeight));
    166  VideoFrame f(generator.GenerateI420Image(), gfx::IntSize(aWidth, aHeight));
    167  VideoChunk c;
    168  c.mFrame.TakeFrom(&f);
    169  c.mTimeStamp = aTime;
    170  c.mDuration = 0;
    171  return c;
    172 }
    173 
    174 TEST_F(VideoFrameConverterTest, BasicConversion) {
    175  auto framesPromise = TakeNConvertedFrames(1);
    176  TimeStamp now = TimeStamp::Now();
    177  VideoChunk chunk = GenerateChunk(640, 480, now);
    178  mConverter->SetActive(true);
    179  mConverter->QueueVideoChunk(chunk, false);
    180  auto frames = WaitFor(framesPromise).unwrap();
    181  ASSERT_EQ(frames.size(), 1U);
    182  const auto& [frame, conversionTime] = frames[0];
    183  EXPECT_EQ(frame.width(), 640);
    184  EXPECT_EQ(frame.height(), 480);
    185  EXPECT_THAT(frame, Not(IsFrameBlack()));
    186  EXPECT_GT(conversionTime - now, TimeDuration::FromMilliseconds(0));
    187 }
    188 
    189 TEST_F(VideoFrameConverterTest, BasicPacing) {
    190  auto framesPromise = TakeNConvertedFrames(1);
    191  TimeStamp now = TimeStamp::Now();
    192  TimeStamp future = now + TimeDuration::FromMilliseconds(100);
    193  VideoChunk chunk = GenerateChunk(640, 480, future);
    194  mConverter->SetActive(true);
    195  mConverter->QueueVideoChunk(chunk, false);
    196  auto frames = WaitFor(framesPromise).unwrap();
    197  EXPECT_GT(TimeStamp::Now() - now, future - now);
    198  ASSERT_EQ(frames.size(), 1U);
    199  const auto& [frame, conversionTime] = frames[0];
    200  EXPECT_EQ(frame.width(), 640);
    201  EXPECT_EQ(frame.height(), 480);
    202  EXPECT_THAT(frame, Not(IsFrameBlack()));
    203  EXPECT_GT(conversionTime - now, future - now);
    204 }
    205 
    206 TEST_F(VideoFrameConverterTest, MultiPacing) {
    207  auto framesPromise = TakeNConvertedFrames(2);
    208  TimeStamp now = TimeStamp::Now();
    209  TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
    210  TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
    211  VideoChunk chunk = GenerateChunk(640, 480, future1);
    212  mConverter->SetActive(true);
    213  mConverter->SetIdleFrameDuplicationInterval(TimeDuration::FromSeconds(1));
    214  mConverter->QueueVideoChunk(chunk, false);
    215  chunk = GenerateChunk(640, 480, future2);
    216  mConverter->QueueVideoChunk(chunk, false);
    217  auto frames = WaitFor(framesPromise).unwrap();
    218  EXPECT_GT(TimeStamp::Now(), future2);
    219  ASSERT_EQ(frames.size(), 2U);
    220  const auto& [frame0, conversionTime0] = frames[0];
    221  EXPECT_EQ(frame0.width(), 640);
    222  EXPECT_EQ(frame0.height(), 480);
    223  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    224  EXPECT_GT(conversionTime0 - now, future1 - now);
    225 
    226  const auto& [frame1, conversionTime1] = frames[1];
    227  EXPECT_EQ(frame1.width(), 640);
    228  EXPECT_EQ(frame1.height(), 480);
    229  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    230  EXPECT_GT(conversionTime1, future2);
    231  EXPECT_GT(conversionTime1 - now, conversionTime0 - now);
    232 }
    233 
    234 TEST_F(VideoFrameConverterTest, Duplication) {
    235  auto framesPromise = TakeNConvertedFrames(2);
    236  TimeStamp now = TimeStamp::Now();
    237  TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
    238  TimeDuration duplicationInterval = TimeDuration::FromMilliseconds(20);
    239  VideoChunk chunk = GenerateChunk(640, 480, future1);
    240  mConverter->SetActive(true);
    241  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval);
    242  mConverter->QueueVideoChunk(chunk, false);
    243  auto frames = WaitFor(framesPromise).unwrap();
    244  EXPECT_GT(TimeStamp::Now() - now, future1 + duplicationInterval - now);
    245  ASSERT_EQ(frames.size(), 2U);
    246  const auto& [frame0, conversionTime0] = frames[0];
    247  EXPECT_EQ(frame0.width(), 640);
    248  EXPECT_EQ(frame0.height(), 480);
    249  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    250  EXPECT_GT(conversionTime0, future1);
    251 
    252  const auto& [frame1, conversionTime1] = frames[1];
    253  EXPECT_EQ(frame1.width(), 640);
    254  EXPECT_EQ(frame1.height(), 480);
    255  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    256  EXPECT_GT(conversionTime1 - now, future1 + duplicationInterval - now);
    257  EXPECT_THAT(TimeDuration::FromMicroseconds(frame1.timestamp_us() -
    258                                             frame0.timestamp_us()),
    259              IsDurationInMillisPositiveMultipleOf(duplicationInterval));
    260 
    261  // Check that we re-used the old buffer.
    262  EXPECT_EQ(frame0.video_frame_buffer(), frame1.video_frame_buffer());
    263 }
    264 
    265 TEST_F(VideoFrameConverterTest, MutableDuplication) {
    266  auto framesPromise = TakeNConvertedFrames(1);
    267  TimeStamp now = TimeStamp::Now();
    268  TimeStamp future1 = now + TimeDuration::FromMilliseconds(20);
    269  TimeDuration noDuplicationPeriod = TimeDuration::FromMilliseconds(100);
    270  TimeDuration duplicationInterval1 = TimeDuration::FromMilliseconds(50);
    271  TimeDuration duplicationInterval2 = TimeDuration::FromMilliseconds(10);
    272  VideoChunk chunk = GenerateChunk(640, 480, future1);
    273  mConverter->SetActive(true);
    274  mConverter->QueueVideoChunk(chunk, false);
    275  while (TimeStamp::Now() < future1 + noDuplicationPeriod) {
    276    if (!NS_ProcessNextEvent(nullptr, false)) {
    277      std::this_thread::sleep_for(std::chrono::milliseconds(1));
    278    }
    279  }
    280  auto frames = WaitFor(framesPromise).unwrap();
    281  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval1);
    282  auto frames1 = WaitFor(TakeNConvertedFrames(2)).unwrap();
    283  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval2);
    284  auto frames2 = WaitFor(TakeNConvertedFrames(2)).unwrap();
    285  frames.insert(frames.end(), frames1.begin(), frames1.end());
    286  frames.insert(frames.end(), frames2.begin(), frames2.end());
    287 
    288  EXPECT_GT(TimeStamp::Now() - now, noDuplicationPeriod + duplicationInterval1 +
    289                                        duplicationInterval2 * 2);
    290  ASSERT_EQ(frames.size(), 5U);
    291  const auto& [frame0, conversionTime0] = frames[0];
    292  EXPECT_EQ(frame0.width(), 640);
    293  EXPECT_EQ(frame0.height(), 480);
    294  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    295  EXPECT_GT(conversionTime0 - now, future1 - now);
    296 
    297  const auto& [frame1, conversionTime1] = frames[1];
    298  EXPECT_EQ(frame1.width(), 640);
    299  EXPECT_EQ(frame1.height(), 480);
    300  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    301  EXPECT_EQ(frame0.video_frame_buffer(), frame1.video_frame_buffer());
    302  EXPECT_GT(conversionTime1 - now, future1 - now + noDuplicationPeriod);
    303  EXPECT_THAT(TimeDuration::FromMicroseconds(frame1.timestamp_us() -
    304                                             frame0.timestamp_us()) -
    305                  noDuplicationPeriod,
    306              IsDurationInMillisMultipleOf(duplicationInterval1));
    307 
    308  const auto& [frame2, conversionTime2] = frames[2];
    309  EXPECT_EQ(frame2.width(), 640);
    310  EXPECT_EQ(frame2.height(), 480);
    311  EXPECT_THAT(frame2, Not(IsFrameBlack()));
    312  EXPECT_EQ(frame0.video_frame_buffer(), frame2.video_frame_buffer());
    313  EXPECT_GT(conversionTime2 - now, noDuplicationPeriod + duplicationInterval1);
    314  EXPECT_THAT(TimeDuration::FromMicroseconds(frame2.timestamp_us() -
    315                                             frame1.timestamp_us()),
    316              IsDurationInMillisPositiveMultipleOf(duplicationInterval1));
    317 
    318  const auto& [frame3, conversionTime3] = frames[3];
    319  EXPECT_EQ(frame3.width(), 640);
    320  EXPECT_EQ(frame3.height(), 480);
    321  EXPECT_THAT(frame3, Not(IsFrameBlack()));
    322  EXPECT_EQ(frame0.video_frame_buffer(), frame3.video_frame_buffer());
    323  EXPECT_GT(conversionTime3 - now,
    324            noDuplicationPeriod + duplicationInterval1 + duplicationInterval2);
    325  EXPECT_THAT(TimeDuration::FromMicroseconds(frame3.timestamp_us() -
    326                                             frame2.timestamp_us()),
    327              IsDurationInMillisPositiveMultipleOf(duplicationInterval2));
    328 
    329  const auto& [frame4, conversionTime4] = frames[4];
    330  EXPECT_EQ(frame4.width(), 640);
    331  EXPECT_EQ(frame4.height(), 480);
    332  EXPECT_THAT(frame4, Not(IsFrameBlack()));
    333  EXPECT_EQ(frame0.video_frame_buffer(), frame4.video_frame_buffer());
    334  EXPECT_GT(conversionTime4 - now, noDuplicationPeriod + duplicationInterval1 +
    335                                       duplicationInterval2 * 2);
    336  EXPECT_THAT(TimeDuration::FromMicroseconds(frame4.timestamp_us() -
    337                                             frame3.timestamp_us()),
    338              IsDurationInMillisPositiveMultipleOf(duplicationInterval2));
    339 }
    340 
    341 TEST_F(VideoFrameConverterTest, DropsOld) {
    342  auto framesPromise = TakeNConvertedFrames(1);
    343  TimeStamp now = TimeStamp::Now();
    344  TimeStamp future1 = now + TimeDuration::FromMilliseconds(1000);
    345  TimeStamp future2 = now + TimeDuration::FromMilliseconds(100);
    346  mConverter->SetActive(true);
    347  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
    348  mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
    349  auto frames = WaitFor(framesPromise).unwrap();
    350  EXPECT_GT(TimeStamp::Now(), future2);
    351  ASSERT_EQ(frames.size(), 1U);
    352  const auto& [frame, conversionTime] = frames[0];
    353  EXPECT_EQ(frame.width(), 640);
    354  EXPECT_EQ(frame.height(), 480);
    355  EXPECT_THAT(frame, Not(IsFrameBlack()));
    356  EXPECT_GT(conversionTime - now, future2 - now);
    357 }
    358 
    359 // We check that the disabling code was triggered by sending multiple,
    360 // different, frames to the converter within one second. While black, it shall
    361 // treat all frames identical and issue one black frame per second.
    362 // This version disables before queuing a frame. A frame will have to be
    363 // invented.
    364 TEST_F(VideoFrameConverterTest, BlackOnDisableCreated) {
    365  auto framesPromise = TakeNConvertedFrames(2);
    366  TimeStamp now = TimeStamp::Now();
    367  TimeStamp future1 = now + TimeDuration::FromMilliseconds(10);
    368  TimeStamp future2 = now + TimeDuration::FromMilliseconds(20);
    369  TimeStamp future3 = now + TimeDuration::FromMilliseconds(40);
    370  TimeDuration duplicationInterval = TimeDuration::FromMilliseconds(10);
    371  mConverter->SetActive(true);
    372  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval);
    373  mConverter->SetTrackEnabled(false);
    374  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
    375  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
    376  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future3), false);
    377  auto frames = WaitFor(framesPromise).unwrap();
    378  EXPECT_GT(TimeStamp::Now() - now, duplicationInterval);
    379  ASSERT_EQ(frames.size(), 2U);
    380  // The first frame was created instantly by SetTrackEnabled().
    381  const auto& [frame0, conversionTime0] = frames[0];
    382  EXPECT_EQ(frame0.width(), 640);
    383  EXPECT_EQ(frame0.height(), 480);
    384  EXPECT_THAT(frame0, IsFrameBlack());
    385  EXPECT_GT(conversionTime0 - now, TimeDuration::FromSeconds(0));
    386  // The second frame was created by the same-frame timer. (We check multiples
    387  // because timing and scheduling can make it slower than requested)
    388  const auto& [frame1, conversionTime1] = frames[1];
    389  EXPECT_EQ(frame1.width(), 640);
    390  EXPECT_EQ(frame1.height(), 480);
    391  EXPECT_THAT(frame1, IsFrameBlack());
    392  EXPECT_GT(conversionTime1 - now, duplicationInterval);
    393  EXPECT_THAT(TimeDuration::FromMicroseconds(frame1.timestamp_us() -
    394                                             frame0.timestamp_us()),
    395              IsDurationInMillisPositiveMultipleOf(duplicationInterval));
    396 }
    397 
    398 // We check that the disabling code was triggered by sending multiple,
    399 // different, frames to the converter within a duplicationInterval. While black,
    400 // it shall treat all frames identical and issue one black frame per
    401 // duplicationInterval. This version queues a frame before disabling.
    402 TEST_F(VideoFrameConverterTest, BlackOnDisableDuplicated) {
    403  TimeStamp now = TimeStamp::Now();
    404  mConverter->SetActive(true);
    405  mConverter->QueueVideoChunk(GenerateChunk(800, 600, now), false);
    406  const auto [frame0, conversionTime0] =
    407      WaitFor(TakeNConvertedFrames(1)).unwrap()[0];
    408 
    409  // The first frame was queued.
    410  EXPECT_EQ(frame0.width(), 800);
    411  EXPECT_EQ(frame0.height(), 600);
    412  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    413 
    414  TimeStamp then = TimeStamp::Now();
    415  TimeStamp future1 = then + TimeDuration::FromMilliseconds(20);
    416  TimeStamp future2 = then + TimeDuration::FromMilliseconds(40);
    417  TimeDuration duplicationInterval = TimeDuration::FromMilliseconds(100);
    418 
    419  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
    420  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
    421 
    422  const auto framesPromise = TakeNConvertedFrames(2);
    423  mConverter->SetTrackEnabled(false);
    424  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval);
    425 
    426  auto frames = WaitFor(framesPromise).unwrap();
    427  ASSERT_EQ(frames.size(), 2U);
    428  // The second frame was duplicated by SetTrackEnabled.
    429  const auto& [frame1, conversionTime1] = frames[0];
    430  EXPECT_EQ(frame1.width(), 800);
    431  EXPECT_EQ(frame1.height(), 600);
    432  EXPECT_THAT(frame1, IsFrameBlack());
    433  EXPECT_GT(conversionTime1 - now, TimeDuration::Zero());
    434  // The third frame was created by the same-frame timer.
    435  const auto& [frame2, conversionTime2] = frames[1];
    436  EXPECT_EQ(frame2.width(), 800);
    437  EXPECT_EQ(frame2.height(), 600);
    438  EXPECT_THAT(frame2, IsFrameBlack());
    439  EXPECT_GT(conversionTime2 - now, duplicationInterval);
    440  EXPECT_THAT(TimeDuration::FromMicroseconds(frame2.timestamp_us() -
    441                                             frame1.timestamp_us()),
    442              IsDurationInMillisPositiveMultipleOf(duplicationInterval));
    443 }
    444 
    445 TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) {
    446  TimeStamp start = TimeStamp::Now();
    447  TimeStamp future1 = start + TimeDuration::FromMilliseconds(10);
    448 
    449  auto framesPromise = TakeNConvertedFrames(1);
    450  mConverter->SetActive(true);
    451  mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
    452  auto frames = WaitFor(framesPromise).unwrap();
    453 
    454  // We are now at t=10ms+. Queue a future frame and jump back in time to
    455  // signal a reset.
    456 
    457  framesPromise = TakeNConvertedFrames(1);
    458  TimeStamp step1 = TimeStamp::Now();
    459  ASSERT_GT(step1 - start, future1 - start);
    460  TimeStamp future2 = step1 + TimeDuration::FromMilliseconds(20);
    461  TimeStamp future3 = step1 + TimeDuration::FromMilliseconds(10);
    462  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
    463  VideoChunk nullChunk;
    464  nullChunk.mFrame = VideoFrame(nullptr, gfx::IntSize(800, 600));
    465  nullChunk.mTimeStamp = step1;
    466  mConverter->QueueVideoChunk(nullChunk, false);
    467 
    468  // We queue one more chunk after the reset so we don't have to wait for the
    469  // same-frame timer. It has a different time and resolution so we can
    470  // differentiate them.
    471  mConverter->QueueVideoChunk(GenerateChunk(320, 240, future3), false);
    472 
    473  {
    474    auto newFrames = WaitFor(framesPromise).unwrap();
    475    frames.insert(frames.end(), std::make_move_iterator(newFrames.begin()),
    476                  std::make_move_iterator(newFrames.end()));
    477  }
    478  TimeStamp step2 = TimeStamp::Now();
    479  EXPECT_GT(step2 - start, future3 - start);
    480  ASSERT_EQ(frames.size(), 2U);
    481  const auto& [frame0, conversionTime0] = frames[0];
    482  EXPECT_EQ(frame0.width(), 640);
    483  EXPECT_EQ(frame0.height(), 480);
    484  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    485  EXPECT_GT(conversionTime0 - start, future1 - start);
    486  const auto& [frame1, conversionTime1] = frames[1];
    487  EXPECT_EQ(frame1.width(), 320);
    488  EXPECT_EQ(frame1.height(), 240);
    489  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    490  EXPECT_GT(conversionTime1 - start, future3 - start);
    491 }
    492 
    493 // We check that no frame is converted while inactive, and that on
    494 // activating the most recently queued frame gets converted.
    495 TEST_F(VideoFrameConverterTest, NoConversionsWhileInactive) {
    496  auto framesPromise = TakeNConvertedFrames(1);
    497  TimeStamp now = TimeStamp::Now();
    498  TimeStamp future1 = now + TimeDuration::FromMilliseconds(10);
    499  TimeStamp future2 = now + TimeDuration::FromMilliseconds(20);
    500  TimeDuration activeDelay = TimeDuration::FromMilliseconds(100);
    501  mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
    502  mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
    503 
    504  // SetActive needs to follow the same async path as the frames to be in sync.
    505  auto q = TaskQueue::Create(GetMediaThreadPool(MediaThreadType::WEBRTC_WORKER),
    506                             "VideoFrameConverterTest");
    507  auto timer = MakeRefPtr<MediaTimer<TimeStamp>>(false);
    508  timer->WaitUntil(now + activeDelay, __func__)
    509      ->Then(q, __func__,
    510             [converter = mConverter] { converter->SetActive(true); });
    511 
    512  auto frames = WaitFor(framesPromise).unwrap();
    513  ASSERT_EQ(frames.size(), 1U);
    514  const auto& [frame, conversionTime] = frames[0];
    515  (void)conversionTime;
    516  EXPECT_EQ(frame.width(), 800);
    517  EXPECT_EQ(frame.height(), 600);
    518  EXPECT_GT(frame.timestamp_us(), dom::RTCStatsTimestamp::FromMozTime(
    519                                      mTimestampMaker, now + activeDelay)
    520                                      .ToRealtime()
    521                                      .us());
    522  EXPECT_THAT(frame, Not(IsFrameBlack()));
    523 }
    524 
    525 TEST_F(VideoFrameConverterTest, TimestampPropagation) {
    526  auto framesPromise = TakeNConvertedFrames(2);
    527  TimeStamp now = TimeStamp::Now();
    528  TimeDuration d1 = TimeDuration::FromMilliseconds(1);
    529  TimeDuration d2 = TimeDuration::FromMilliseconds(29);
    530 
    531  mConverter->SetActive(true);
    532  mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d1), false);
    533  mConverter->QueueVideoChunk(GenerateChunk(800, 600, now + d2), false);
    534 
    535  auto frames = WaitFor(framesPromise).unwrap();
    536  ASSERT_EQ(frames.size(), 2U);
    537  const auto& [frame0, conversionTime0] = frames[0];
    538  EXPECT_EQ(frame0.width(), 640);
    539  EXPECT_EQ(frame0.height(), 480);
    540  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    541  EXPECT_NEAR(frame0.timestamp_us(),
    542              dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d1)
    543                  .ToRealtime()
    544                  .us(),
    545              1);
    546  EXPECT_GE(conversionTime0 - now, d1);
    547 
    548  const auto& [frame1, conversionTime1] = frames[1];
    549  EXPECT_EQ(frame1.width(), 800);
    550  EXPECT_EQ(frame1.height(), 600);
    551  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    552  EXPECT_NEAR(frame1.timestamp_us(),
    553              dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d2)
    554                  .ToRealtime()
    555                  .us(),
    556              1);
    557  EXPECT_GE(conversionTime1 - now, d2);
    558 }
    559 
    560 TEST_F(VideoFrameConverterTest, IgnoreOldFrames) {
    561  // Do this in a task on the converter's TaskQueue, so it can call into
    562  // QueueForProcessing directly.
    563  TimeStamp now = TimeStamp::Now();
    564  TimeDuration d1 = TimeDuration::FromMilliseconds(10);
    565  TimeDuration duplicationInterval = TimeDuration::FromMilliseconds(50);
    566  TimeDuration d2 = d1 * 2;
    567  TimeDuration d3 = d2 - TimeDuration::FromMilliseconds(1);
    568 
    569  auto framesPromise = TakeNConvertedFrames(1);
    570  mConverter->SetActive(true);
    571  mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d1), false);
    572  auto frames = WaitFor(framesPromise).unwrap();
    573 
    574  framesPromise = TakeNConvertedFrames(2);
    575 
    576  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval);
    577  (void)WaitFor(InvokeAsync(mConverter->mTarget, __func__, [&] {
    578    // Time is now ~t1. This processes an extra frame similar to what
    579    // `SetActive(false); SetActive(true);` (using t=now()) would do.
    580    mConverter->mLastFrameQueuedForProcessing.mTime = now + d2;
    581    mConverter->ProcessVideoFrame(mConverter->mLastFrameQueuedForProcessing);
    582 
    583    // This queues a new chunk with an earlier timestamp than the extra frame
    584    // above. But it gets processed after the extra frame, so time will appear
    585    // to go backwards. This simulates a frame from the pacer being in flight
    586    // when we flip SetActive() above, for time t' < t. This frame is expected
    587    // to get ignored.
    588    mConverter->QueueForProcessing(
    589        GenerateChunk(800, 600, now + d3).mFrame.GetImage(), now + d3,
    590        gfx::IntSize(800, 600), false);
    591    return GenericPromise::CreateAndResolve(true, __func__);
    592  }));
    593 
    594  {
    595    auto newFrames = WaitFor(framesPromise).unwrap();
    596    frames.insert(frames.end(), std::make_move_iterator(newFrames.begin()),
    597                  std::make_move_iterator(newFrames.end()));
    598  }
    599 
    600  auto t0 = dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now)
    601                .ToRealtime()
    602                .us();
    603  ASSERT_EQ(frames.size(), 3U);
    604  const auto& [frame0, conversionTime0] = frames[0];
    605  EXPECT_EQ(frame0.width(), 640);
    606  EXPECT_EQ(frame0.height(), 480);
    607  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    608  EXPECT_NEAR(frame0.timestamp_us() - t0,
    609              static_cast<int64_t>(d1.ToMicroseconds()), 1);
    610 
    611  const auto& [frame1, conversionTime1] = frames[1];
    612  EXPECT_EQ(frame1.width(), 640);
    613  EXPECT_EQ(frame1.height(), 480);
    614  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    615  EXPECT_NEAR(frame1.timestamp_us() - t0,
    616              static_cast<int64_t>(d2.ToMicroseconds()), 1);
    617  EXPECT_GE(conversionTime1 - now, d1);
    618 
    619  const auto& [frame2, conversionTime2] = frames[2];
    620  EXPECT_EQ(frame2.width(), 640);
    621  EXPECT_EQ(frame2.height(), 480);
    622  EXPECT_THAT(frame2, Not(IsFrameBlack()));
    623  EXPECT_NEAR(frame2.timestamp_us() - t0,
    624              static_cast<int64_t>((d2 + duplicationInterval).ToMicroseconds()),
    625              1);
    626  EXPECT_GE(conversionTime2 - now, d2 + duplicationInterval);
    627 }
    628 
    629 TEST_F(VideoFrameConverterTest, SameFrameTimerRacingWithPacing) {
    630  TimeStamp now = TimeStamp::Now();
    631  TimeDuration d1 = TimeDuration::FromMilliseconds(10);
    632  TimeDuration duplicationInterval = TimeDuration::FromMilliseconds(5);
    633  TimeDuration d2 =
    634      d1 + duplicationInterval - TimeDuration::FromMilliseconds(1);
    635 
    636  auto framesPromise = TakeNConvertedFrames(3);
    637  mConverter->SetActive(true);
    638  mConverter->SetIdleFrameDuplicationInterval(duplicationInterval);
    639  mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d1), false);
    640  mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d2), false);
    641  auto frames = WaitFor(framesPromise).unwrap();
    642 
    643  // The expected order here (in timestamps) is t1, t2, t2+5ms.
    644  //
    645  // If the same-frame timer doesn't check what is queued we could end up with
    646  // t1, t1+5ms, t2.
    647 
    648  auto t0 = dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now)
    649                .ToRealtime()
    650                .us();
    651  ASSERT_EQ(frames.size(), 3U);
    652  const auto& [frame0, conversionTime0] = frames[0];
    653  EXPECT_EQ(frame0.width(), 640);
    654  EXPECT_EQ(frame0.height(), 480);
    655  EXPECT_THAT(frame0, Not(IsFrameBlack()));
    656  EXPECT_NEAR(frame0.timestamp_us() - t0,
    657              static_cast<int64_t>(d1.ToMicroseconds()), 1);
    658  EXPECT_GE(conversionTime0 - now, d1);
    659 
    660  const auto& [frame1, conversionTime1] = frames[1];
    661  EXPECT_EQ(frame1.width(), 640);
    662  EXPECT_EQ(frame1.height(), 480);
    663  EXPECT_THAT(frame1, Not(IsFrameBlack()));
    664  EXPECT_NEAR(frame1.timestamp_us() - t0,
    665              static_cast<int64_t>(d2.ToMicroseconds()), 1);
    666  EXPECT_GE(conversionTime1 - now, d2);
    667 
    668  const auto& [frame2, conversionTime2] = frames[2];
    669  EXPECT_EQ(frame2.width(), 640);
    670  EXPECT_EQ(frame2.height(), 480);
    671  EXPECT_THAT(frame2, Not(IsFrameBlack()));
    672  EXPECT_THAT(TimeDuration::FromMicroseconds(frame2.timestamp_us() -
    673                                             frame1.timestamp_us()),
    674              IsDurationInMillisPositiveMultipleOf(duplicationInterval));
    675  EXPECT_GE(conversionTime2 - now, d2 + duplicationInterval);
    676 }
    677 
    678 TEST_F(VideoFrameConverterTest, SinkWantsResolutionAlignment) {
    679  const std::array<int, 5> alignments{2, 16, 39, 400, 1000};
    680  const int width = 640;
    681  const int height = 480;
    682 
    683  TimeStamp now = TimeStamp::Now();
    684  TimeDuration interval = TimeDuration::FromMilliseconds(1);
    685  mConverter->SetActive(true);
    686  webrtc::VideoSinkWants wants;
    687  for (uint32_t i = 0; i < alignments.size(); ++i) {
    688    const TimeStamp t = now + interval * (i + 1);
    689    // Test that requesting specific alignment always results in the expected
    690    // number of layers and valid alignment.
    691    wants.resolution_alignment = alignments[i];
    692    mListener->SetWants(wants);
    693    auto framesPromise = TakeNConvertedFrames(1);
    694    mConverter->QueueVideoChunk(GenerateChunk(width, height, t), false);
    695    const auto [frame, time] = WaitFor(framesPromise).unwrap()[0];
    696 
    697    EXPECT_EQ(frame.width() % alignments[i], 0)
    698        << " for width " << frame.width() << " and alignment " << alignments[i];
    699    EXPECT_EQ(frame.height() % alignments[i], 0)
    700        << " for height " << frame.height() << " and alignment "
    701        << alignments[i];
    702  }
    703 }