tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit c2490c82adc5963d21bb06d1b383ad924bae3cbc
parent c6fba600415077858ee301b2c53a799e70f44c4d
Author: Michael Froman <mfroman@mozilla.com>
Date:   Wed,  8 Oct 2025 17:11:13 -0500

Bug 1993083 - Vendor libwebrtc from 912a0d864d

Upstream commit: https://webrtc.googlesource.com/src/+/912a0d864df7fa23399228a5a6ff358cfcc4d860
    Change HaltonFrameSampler to use VideoFrame, add support for NV12.

    This CL changes the `GetSampleValuesForFrame()` to accept a `VideoFrame`
    instance instead of a `scoped_refptr<I420BufferInterface>` and also adds
    native support for NV12 buffers without need for conversion to I420.

    Bug: webrtc:358039777
    Change-Id: Ie488c21cabbda171d3adc2f7b329be9d4e4a1008
    Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/398100
    Reviewed-by: Fanny Linderborg <linderborg@webrtc.org>
    Commit-Queue: Fanny Linderborg <linderborg@webrtc.org>
    Commit-Queue: Erik Språng <sprang@webrtc.org>
    Cr-Commit-Position: refs/heads/main@{#45036}

Diffstat:
Mthird_party/libwebrtc/README.mozilla.last-vendor | 4++--
Mthird_party/libwebrtc/video/corruption_detection/BUILD.gn | 2++
Mthird_party/libwebrtc/video/corruption_detection/frame_instrumentation_evaluation.cc | 17++---------------
Mthird_party/libwebrtc/video/corruption_detection/frame_instrumentation_generator.cc | 17++---------------
Mthird_party/libwebrtc/video/corruption_detection/frame_pair_corruption_score.cc | 24+++++++++++++-----------
Mthird_party/libwebrtc/video/corruption_detection/halton_frame_sampler.cc | 125++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------
Mthird_party/libwebrtc/video/corruption_detection/halton_frame_sampler.h | 17+++++++++++++----
Mthird_party/libwebrtc/video/corruption_detection/halton_frame_sampler_unittest.cc | 137++++++++++++++++++++++++++++++++++++++-----------------------------------------
8 files changed, 185 insertions(+), 158 deletions(-)

diff --git a/third_party/libwebrtc/README.mozilla.last-vendor b/third_party/libwebrtc/README.mozilla.last-vendor @@ -1,4 +1,4 @@ # ./mach python dom/media/webrtc/third_party_build/vendor-libwebrtc.py --from-local /home/mfroman/mozilla/elm/.moz-fast-forward/moz-libwebrtc --commit mozpatches libwebrtc -libwebrtc updated from /home/mfroman/mozilla/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2025-10-08T22:09:59.866508+00:00. +libwebrtc updated from /home/mfroman/mozilla/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2025-10-08T22:11:03.587841+00:00. # base of lastest vendoring -5d4d8d70de +912a0d864d diff --git a/third_party/libwebrtc/video/corruption_detection/BUILD.gn b/third_party/libwebrtc/video/corruption_detection/BUILD.gn @@ -214,6 +214,8 @@ if (rtc_include_tests) { sources = [ "halton_frame_sampler_unittest.cc" ] deps = [ ":halton_frame_sampler", + ":video_frame_sampler", + "../../api:array_view", "../../api:scoped_refptr", "../../api/video:video_frame", "../../test:test_support", diff --git a/third_party/libwebrtc/video/corruption_detection/frame_instrumentation_evaluation.cc b/third_party/libwebrtc/video/corruption_detection/frame_instrumentation_evaluation.cc @@ -15,9 +15,7 @@ #include <vector> #include "api/array_view.h" -#include "api/scoped_refptr.h" #include "api/video/video_frame.h" -#include "api/video/video_frame_buffer.h" #include "common_video/frame_instrumentation_data.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -51,16 +49,6 @@ std::optional<double> GetCorruptionScore(const FrameInstrumentationData& data, return std::nullopt; } - scoped_refptr<I420BufferInterface> frame_buffer_as_i420 = - frame.video_frame_buffer()->ToI420(); - if (!frame_buffer_as_i420) { - RTC_LOG(LS_ERROR) << "Failed to convert " - << VideoFrameBufferTypeToString( - frame.video_frame_buffer()->type()) - << " image to I420"; - return std::nullopt; - } - HaltonFrameSampler frame_sampler; frame_sampler.SetCurrentIndex(data.sequence_index); std::vector<HaltonFrameSampler::Coordinates> sample_coordinates = @@ -70,9 +58,8 @@ std::optional<double> GetCorruptionScore(const FrameInstrumentationData& data, return std::nullopt; } - std::vector<FilteredSample> samples = - GetSampleValuesForFrame(frame_buffer_as_i420, sample_coordinates, - frame.width(), frame.height(), data.std_dev); + std::vector<FilteredSample> samples = GetSampleValuesForFrame( + frame, sample_coordinates, frame.width(), frame.height(), data.std_dev); if (samples.empty()) { RTC_LOG(LS_ERROR) << "Failed to get sample values for frame"; return std::nullopt; diff --git a/third_party/libwebrtc/video/corruption_detection/frame_instrumentation_generator.cc b/third_party/libwebrtc/video/corruption_detection/frame_instrumentation_generator.cc @@ -19,12 +19,10 @@ #include <vector> #include "absl/algorithm/container.h" -#include "api/scoped_refptr.h" #include "api/video/corruption_detection_filter_settings.h" #include "api/video/encoded_image.h" #include "api/video/video_codec_type.h" #include "api/video/video_frame.h" -#include "api/video/video_frame_buffer.h" #include "api/video/video_frame_type.h" #include "api/video_codecs/video_codec.h" #include "common_video/frame_instrumentation_data.h" @@ -184,16 +182,6 @@ FrameInstrumentationGenerator::OnEncodedImage( return std::nullopt; } - scoped_refptr<I420BufferInterface> captured_frame_buffer_as_i420 = - captured_frame->video_frame_buffer()->ToI420(); - if (!captured_frame_buffer_as_i420) { - RTC_LOG(LS_ERROR) << "Failed to convert " - << VideoFrameBufferTypeToString( - captured_frame->video_frame_buffer()->type()) - << " image to I420."; - return std::nullopt; - } - FrameInstrumentationData data = { .sequence_index = sequence_index, .communicate_upper_bits = communicate_upper_bits, @@ -201,9 +189,8 @@ FrameInstrumentationGenerator::OnEncodedImage( .luma_error_threshold = filter_settings->luma_error_threshold, .chroma_error_threshold = filter_settings->chroma_error_threshold}; std::vector<FilteredSample> samples = GetSampleValuesForFrame( - captured_frame_buffer_as_i420, sample_coordinates, - encoded_image._encodedWidth, encoded_image._encodedHeight, - filter_settings->std_dev); + *captured_frame, sample_coordinates, encoded_image._encodedWidth, + encoded_image._encodedHeight, filter_settings->std_dev); data.sample_values.reserve(samples.size()); absl::c_transform(samples, std::back_inserter(data.sample_values), [](const FilteredSample& sample) { return sample.value; }); diff --git a/third_party/libwebrtc/video/corruption_detection/frame_pair_corruption_score.cc b/third_party/libwebrtc/video/corruption_detection/frame_pair_corruption_score.cc @@ -16,7 +16,7 @@ #include "absl/strings/string_view.h" #include "api/scoped_refptr.h" #include "api/video/corruption_detection_filter_settings.h" -#include "api/video/i420_buffer.h" +#include "api/video/video_frame.h" #include "api/video/video_frame_buffer.h" #include "rtc_base/checks.h" #include "video/corruption_detection/generic_mapping_functions.h" @@ -73,22 +73,24 @@ double FramePairCorruptionScorer::CalculateScore( halton_frame_sampler_.GetSampleCoordinatesForFrame(num_samples); RTC_DCHECK_EQ(halton_samples.size(), num_samples); - scoped_refptr<I420Buffer> reference_i420_buffer = - GetAsI420Buffer(reference_buffer.ToI420()); - scoped_refptr<I420Buffer> test_i420_buffer = - GetAsI420Buffer(test_buffer.ToI420()); + VideoFrame reference_frame = + VideoFrame::Builder() + .set_video_frame_buffer(reference_buffer.ToI420()) + .build(); + VideoFrame test_frame = VideoFrame::Builder() + .set_video_frame_buffer(test_buffer.ToI420()) + .build(); CorruptionDetectionFilterSettings filter_settings = GetCorruptionFilterSettings(qp, codec_type_); const std::vector<FilteredSample> filtered_reference_sample_values = - GetSampleValuesForFrame( - reference_i420_buffer, halton_samples, test_i420_buffer->width(), - test_i420_buffer->height(), filter_settings.std_dev); + GetSampleValuesForFrame(reference_frame, halton_samples, + test_frame.width(), test_frame.height(), + filter_settings.std_dev); const std::vector<FilteredSample> filtered_test_sample_values = - GetSampleValuesForFrame( - test_i420_buffer, halton_samples, test_i420_buffer->width(), - test_i420_buffer->height(), filter_settings.std_dev); + GetSampleValuesForFrame(test_frame, halton_samples, test_frame.width(), + test_frame.height(), filter_settings.std_dev); RTC_CHECK_EQ(filtered_reference_sample_values.size(), filtered_test_sample_values.size()); diff --git a/third_party/libwebrtc/video/corruption_detection/halton_frame_sampler.cc b/third_party/libwebrtc/video/corruption_detection/halton_frame_sampler.cc @@ -13,15 +13,19 @@ #include <algorithm> #include <cmath> #include <cstdint> +#include <memory> #include <vector> #include "api/scoped_refptr.h" #include "api/video/i420_buffer.h" +#include "api/video/nv12_buffer.h" +#include "api/video/video_frame.h" #include "api/video/video_frame_buffer.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_minmax.h" #include "video/corruption_detection/halton_sequence.h" +#include "video/corruption_detection/video_frame_sampler.h" namespace webrtc { namespace { @@ -98,18 +102,15 @@ void HaltonFrameSampler::SetCurrentIndex(int index) { } // Apply Gaussian filtering to the data. -double GetFilteredElement(int width, - int height, - int stride, - const uint8_t* data, +double GetFilteredElement(const VideoFrameSampler& frame_sampler, + VideoFrameSampler::ChannelType channel, int row, int column, double std_dev) { RTC_CHECK_GE(row, 0); - RTC_CHECK_LT(row, height); + RTC_CHECK_LT(row, frame_sampler.height(channel)); RTC_CHECK_GE(column, 0); - RTC_CHECK_LT(column, width); - RTC_CHECK_GE(stride, width); + RTC_CHECK_LT(column, frame_sampler.width(channel)); RTC_CHECK_GT(std_dev, 0.0) << "Standard deviation = 0 yields improper Gaussian weights."; @@ -127,13 +128,15 @@ double GetFilteredElement(int width, double element_sum = 0.0; double total_weight = 0.0; for (int r = std::max(row - max_distance, 0); - r < std::min(row + max_distance + 1, height); ++r) { + r < std::min(row + max_distance + 1, frame_sampler.height(channel)); + ++r) { for (int c = std::max(column - max_distance, 0); - c < std::min(column + max_distance + 1, width); ++c) { + c < std::min(column + max_distance + 1, frame_sampler.width(channel)); + ++c) { double weight = std::exp(-1.0 * (std::pow(row - r, 2) + std::pow(column - c, 2)) / (2.0 * std::pow(std_dev, 2))); - element_sum += data[r * stride + c] * weight; + element_sum += frame_sampler.GetSampleValue(channel, c, r) * weight; total_weight += weight; } } @@ -143,13 +146,13 @@ double GetFilteredElement(int width, } std::vector<FilteredSample> GetSampleValuesForFrame( - const scoped_refptr<I420BufferInterface> i420_frame_buffer, + const VideoFrame& frame, std::vector<HaltonFrameSampler::Coordinates> sample_coordinates, int scaled_width, int scaled_height, double std_dev_gaussian_blur) { // Validate input. - if (i420_frame_buffer == nullptr) { + if (frame.video_frame_buffer() == nullptr) { RTC_LOG(LS_WARNING) << "The framebuffer must not be nullptr"; return {}; } @@ -178,20 +181,51 @@ std::vector<FilteredSample> GetSampleValuesForFrame( << std_dev_gaussian_blur << ".\n"; return {}; } - if (scaled_width > i420_frame_buffer->width() || - scaled_height > i420_frame_buffer->height()) { + if (scaled_width > frame.width() || scaled_height > frame.height()) { RTC_LOG(LS_WARNING) << "Upscaling causes corruption. Therefore, only down-scaling is " "permissible."; return {}; } - // Scale the frame to the desired resolution: - // 1. Create a new buffer with the desired resolution. - // 2. Scale the old buffer to the size of the new buffer. - scoped_refptr<I420Buffer> scaled_i420_buffer = - I420Buffer::Create(scaled_width, scaled_height); - scaled_i420_buffer->ScaleFrom(*i420_frame_buffer); + VideoFrame scaled_frame = frame; + std::unique_ptr<VideoFrameSampler> frame_sampler; + if (scaled_width == frame.width() && scaled_height == frame.height()) { + frame_sampler = VideoFrameSampler::Create(frame); + } else { + // Scale the frame to the desired resolution: + // 1. Create a new buffer with the desired resolution. + // 2. Scale the old buffer to the size of the new buffer. + if (frame.video_frame_buffer()->type() == VideoFrameBuffer::Type::kNV12) { + scoped_refptr<NV12Buffer> scaled_buffer = + NV12Buffer::Create(scaled_width, scaled_height); + // Set crop width/height to full width/height so this is only a scaling + // operation, no cropping happening. + scaled_buffer->CropAndScaleFrom( + *frame.video_frame_buffer()->GetNV12(), /*offset_x=*/0, + /*offset_y=*/0, /*crop_width=*/frame.width(), + /*crop_height=*/frame.height()); + scaled_frame.set_video_frame_buffer(scaled_buffer); + } else { + scoped_refptr<I420Buffer> scaled_buffer = + I420Buffer::Create(scaled_width, scaled_height); + scoped_refptr<I420BufferInterface> buffer = + frame.video_frame_buffer()->ToI420(); + if (buffer == nullptr) { + RTC_LOG(LS_WARNING) << "Unable to convert frame to I420 format."; + return {}; + } + scaled_buffer->ScaleFrom(*buffer); + scaled_frame.set_video_frame_buffer(scaled_buffer); + } + frame_sampler = VideoFrameSampler::Create(scaled_frame); + } + if (frame_sampler == nullptr) { + RTC_LOG(LS_WARNING) << "Unable to create frame sampler for buffer type " + << VideoFrameBufferTypeToString( + frame.video_frame_buffer()->type()); + return {}; + } // Treat the planes as if they would have the following 2-dimensional layout: // +------+---+ @@ -204,9 +238,14 @@ std::vector<FilteredSample> GetSampleValuesForFrame( // as if they were taken from the above layout. We then need to translate the // coordinates back to the corresponding plane's corresponding 2D coordinates. // Then we find the filtered value that corresponds to those coordinates. + RTC_DCHECK_EQ(frame_sampler->width(VideoFrameSampler::ChannelType::U), + frame_sampler->width(VideoFrameSampler::ChannelType::V)) + << "Chroma channels are expected to be equal in resolution."; int width_merged_planes = - scaled_i420_buffer->width() + scaled_i420_buffer->ChromaWidth(); - int height_merged_planes = scaled_i420_buffer->height(); + frame_sampler->width(VideoFrameSampler::ChannelType::Y) + + frame_sampler->width(VideoFrameSampler::ChannelType::U); + int height_merged_planes = + frame_sampler->height(VideoFrameSampler::ChannelType::Y); // Fetch the sample value for all of the requested coordinates. std::vector<FilteredSample> filtered_samples; filtered_samples.reserve(sample_coordinates.size()); @@ -218,31 +257,28 @@ std::vector<FilteredSample> GetSampleValuesForFrame( // Map to plane coordinates and fetch the value. double value_for_coordinate; - if (column < scaled_i420_buffer->width()) { + if (column < frame_sampler->width(VideoFrameSampler::ChannelType::Y)) { // Y plane. - value_for_coordinate = GetFilteredElement( - scaled_i420_buffer->width(), scaled_i420_buffer->height(), - scaled_i420_buffer->StrideY(), scaled_i420_buffer->DataY(), row, - column, std_dev_gaussian_blur); + value_for_coordinate = + GetFilteredElement(*frame_sampler, VideoFrameSampler::ChannelType::Y, + row, column, std_dev_gaussian_blur); filtered_samples.push_back( {.value = value_for_coordinate, .plane = ImagePlane::kLuma}); - } else if (row < scaled_i420_buffer->ChromaHeight()) { + } else if (row < frame_sampler->height(VideoFrameSampler::ChannelType::U)) { // U plane. - column -= scaled_i420_buffer->width(); - value_for_coordinate = GetFilteredElement( - scaled_i420_buffer->ChromaWidth(), scaled_i420_buffer->ChromaHeight(), - scaled_i420_buffer->StrideU(), scaled_i420_buffer->DataU(), row, - column, std_dev_gaussian_blur); + column -= frame_sampler->width(VideoFrameSampler::ChannelType::Y); + value_for_coordinate = + GetFilteredElement(*frame_sampler, VideoFrameSampler::ChannelType::U, + row, column, std_dev_gaussian_blur); filtered_samples.push_back( {.value = value_for_coordinate, .plane = ImagePlane::kChroma}); } else { // V plane. - column -= scaled_i420_buffer->width(); - row -= scaled_i420_buffer->ChromaHeight(); - value_for_coordinate = GetFilteredElement( - scaled_i420_buffer->ChromaWidth(), scaled_i420_buffer->ChromaHeight(), - scaled_i420_buffer->StrideV(), scaled_i420_buffer->DataV(), row, - column, std_dev_gaussian_blur); + column -= frame_sampler->width(VideoFrameSampler::ChannelType::Y); + row -= frame_sampler->height(VideoFrameSampler::ChannelType::U); + value_for_coordinate = + GetFilteredElement(*frame_sampler, VideoFrameSampler::ChannelType::V, + row, column, std_dev_gaussian_blur); filtered_samples.push_back( {.value = value_for_coordinate, .plane = ImagePlane::kChroma}); } @@ -250,4 +286,15 @@ std::vector<FilteredSample> GetSampleValuesForFrame( return filtered_samples; } +[[deprecated]] std::vector<FilteredSample> GetSampleValuesForFrame( + scoped_refptr<I420BufferInterface> i420_frame_buffer, + std::vector<HaltonFrameSampler::Coordinates> sample_coordinates, + int scaled_width, + int scaled_height, + double std_dev_gaussian_blur) { + return GetSampleValuesForFrame( + VideoFrame::Builder().set_video_frame_buffer(i420_frame_buffer).build(), + sample_coordinates, scaled_width, scaled_height, std_dev_gaussian_blur); +} + } // namespace webrtc diff --git a/third_party/libwebrtc/video/corruption_detection/halton_frame_sampler.h b/third_party/libwebrtc/video/corruption_detection/halton_frame_sampler.h @@ -16,8 +16,10 @@ #include <vector> #include "api/scoped_refptr.h" +#include "api/video/video_frame.h" #include "api/video/video_frame_buffer.h" #include "video/corruption_detection/halton_sequence.h" +#include "video/corruption_detection/video_frame_sampler.h" namespace webrtc { @@ -67,6 +69,15 @@ class HaltonFrameSampler { // 3. Apply the Gaussian filtering given by `std_dev_gaussian_blur`. // 4. Fetch the values at the scaled coordinates in the filtered frame. std::vector<FilteredSample> GetSampleValuesForFrame( + const VideoFrame& frame, + std::vector<HaltonFrameSampler::Coordinates> sample_coordinates, + int scaled_width, + int scaled_height, + double std_dev_gaussian_blur); + +// For backwards compatiblity only. +// TODO(bugs.webrtc.org/398100): Remove when downstream usage is gone. +[[deprecated]] std::vector<FilteredSample> GetSampleValuesForFrame( scoped_refptr<I420BufferInterface> i420_frame_buffer, std::vector<HaltonFrameSampler::Coordinates> sample_coordinates, int scaled_width, @@ -74,10 +85,8 @@ std::vector<FilteredSample> GetSampleValuesForFrame( double std_dev_gaussian_blur); // Returns the blurred value. The minimum half-kernel size is 3 pixels. -double GetFilteredElement(int width, - int height, - int stride, - const uint8_t* data, +double GetFilteredElement(const VideoFrameSampler& frame_sampler, + VideoFrameSampler::ChannelType channel, int row, int column, double std_dev); diff --git a/third_party/libwebrtc/video/corruption_detection/halton_frame_sampler_unittest.cc b/third_party/libwebrtc/video/corruption_detection/halton_frame_sampler_unittest.cc @@ -11,12 +11,17 @@ #include "video/corruption_detection/halton_frame_sampler.h" #include <cstdint> +#include <cstring> +#include <memory> #include <vector> +#include "api/array_view.h" #include "api/scoped_refptr.h" #include "api/video/i420_buffer.h" +#include "api/video/video_frame.h" #include "test/gmock.h" #include "test/gtest.h" +#include "video/corruption_detection/video_frame_sampler.h" namespace webrtc { namespace { @@ -39,17 +44,12 @@ const double kDefaultStdDevGaussianBlur = 0.02; #if GTEST_HAS_DEATH_TEST // Defaults for blurring tests. -const int kDefaultWidth = 4; -const int kDefaultHeight = 4; -const int kDefaultStride = 4; -const uint8_t kDefaultData[kDefaultWidth * kDefaultHeight] = { - 20, 196, 250, 115, 139, 39, 99, 197, 21, 166, 254, 28, 227, 54, 64, 46}; const int kDefaultRow = 3; const int kDefaultColumn = 2; const double kDefaultStdDev = 1.12; #endif // GTEST_HAS_DEATH_TEST -scoped_refptr<I420Buffer> MakeDefaultI420FrameBuffer() { +VideoFrame MakeDefaultFrame() { // Create an I420 frame of size 4x4. const int kDefaultLumaWidth = 4; const int kDefaultLumaHeight = 4; @@ -59,10 +59,20 @@ scoped_refptr<I420Buffer> MakeDefaultI420FrameBuffer() { const uint8_t kDefaultUContent[4] = {156, 203, 36, 128}; const uint8_t kDefaultVContent[4] = {112, 2, 0, 24}; - return I420Buffer::Copy(kDefaultLumaWidth, kDefaultLumaHeight, - kDefaultYContent, kDefaultLumaWidth, kDefaultUContent, - kDefaultChromaWidth, kDefaultVContent, - kDefaultChromaWidth); + return VideoFrame::Builder() + .set_video_frame_buffer(I420Buffer::Copy( + kDefaultLumaWidth, kDefaultLumaHeight, kDefaultYContent, + kDefaultLumaWidth, kDefaultUContent, kDefaultChromaWidth, + kDefaultVContent, kDefaultChromaWidth)) + .build(); +} + +VideoFrame MakeFrame(int width, + int height, + webrtc::ArrayView<const uint8_t> data) { + scoped_refptr<I420Buffer> buffer = I420Buffer::Create(width, height); + memcpy(buffer->MutableDataY(), data.data(), width * height); + return VideoFrame::Builder().set_video_frame_buffer(buffer).build(); } std::vector<Coordinates> MakeDefaultSampleCoordinates() { @@ -76,7 +86,6 @@ std::vector<Coordinates> MakeDefaultSampleCoordinates() { TEST(GaussianFilteringTest, ShouldReturnFilteredValueWhenInputIsValid) { const int kWidth = 8; const int kHeight = 8; - const int kStride = 8; const uint8_t kData[kWidth * kHeight] = { 219, 38, 75, 13, 77, 22, 108, 5, // 199, 105, 237, 3, 194, 63, 200, 95, // @@ -92,50 +101,51 @@ TEST(GaussianFilteringTest, ShouldReturnFilteredValueWhenInputIsValid) { // Resulting in a filter size of 3 pixels. const double kStdDev = 1; - EXPECT_THAT(GetFilteredElement(kWidth, kHeight, kStride, kData, kRow, kColumn, - kStdDev), + std::unique_ptr<VideoFrameSampler> sampler = + VideoFrameSampler::Create(MakeFrame(kWidth, kHeight, kData)); + EXPECT_THAT(GetFilteredElement(*sampler, VideoFrameSampler::ChannelType::Y, + kRow, kColumn, kStdDev), DoubleEq(126.45897447350468)); } #if GTEST_HAS_DEATH_TEST +std::unique_ptr<VideoFrameSampler> MakeDefaultSampler() { + return VideoFrameSampler::Create(MakeDefaultFrame()); +} + TEST(GaussianFilteringTest, ShouldCrashWhenRowIsNegative) { - EXPECT_DEATH( - GetFilteredElement(kDefaultWidth, kDefaultHeight, kDefaultStride, - kDefaultData, -1, kDefaultColumn, kDefaultStdDev), - _); + EXPECT_DEATH(GetFilteredElement(*MakeDefaultSampler(), + VideoFrameSampler::ChannelType::Y, -1, + kDefaultColumn, kDefaultStdDev), + _); } TEST(GaussianFilteringTest, ShouldCrashWhenRowIsOutOfRange) { - EXPECT_DEATH( - GetFilteredElement(kDefaultWidth, 4, kDefaultStride, kDefaultData, 4, - kDefaultColumn, kDefaultStdDev), - _); + EXPECT_DEATH(GetFilteredElement(*MakeDefaultSampler(), + VideoFrameSampler::ChannelType::Y, 4, + kDefaultColumn, kDefaultStdDev), + _); } TEST(GaussianFilteringTest, ShouldCrashWhenColumnIsNegative) { - EXPECT_DEATH( - GetFilteredElement(kDefaultWidth, kDefaultHeight, kDefaultStride, - kDefaultData, kDefaultRow, -1, kDefaultStdDev), - _); -} - -TEST(GaussianFilteringTest, ShouldCrashWhenColumnIsOutOfRange) { - EXPECT_DEATH(GetFilteredElement(4, kDefaultHeight, kDefaultStride, - kDefaultData, kDefaultRow, 4, kDefaultStdDev), + EXPECT_DEATH(GetFilteredElement(*MakeDefaultSampler(), + VideoFrameSampler::ChannelType::Y, + kDefaultRow, -1, kDefaultStdDev), _); } -TEST(GaussianFilteringTest, ShouldCrashWhenStrideIsSmallerThanWidth) { - EXPECT_DEATH(GetFilteredElement(4, kDefaultHeight, 3, kDefaultData, - kDefaultRow, kDefaultColumn, kDefaultStdDev), +TEST(GaussianFilteringTest, ShouldCrashWhenColumnIsOutOfRange) { + EXPECT_DEATH(GetFilteredElement(*MakeDefaultSampler(), + VideoFrameSampler::ChannelType::Y, + kDefaultRow, 4, kDefaultStdDev), _); } TEST(GaussianFilteringTest, ShouldCrashWhenStdDevIsNegative) { - EXPECT_DEATH( - GetFilteredElement(kDefaultWidth, kDefaultHeight, kDefaultStride, - kDefaultData, kDefaultRow, kDefaultColumn, -1.0), - _); + EXPECT_DEATH(GetFilteredElement(*MakeDefaultSampler(), + VideoFrameSampler::ChannelType::Y, + kDefaultRow, kDefaultColumn, -1.0), + _); } TEST(GaussianFilteringTest, RoundingErrorsShouldNotHappen) { @@ -144,8 +154,10 @@ TEST(GaussianFilteringTest, RoundingErrorsShouldNotHappen) { constexpr int kHeight = 128; constexpr double kStdDev = 40; const std::vector<uint8_t> data(kWidth * kHeight, 255); + std::unique_ptr<VideoFrameSampler> sampler = + VideoFrameSampler::Create(MakeFrame(kWidth, kHeight, data)); - EXPECT_THAT(GetFilteredElement(kWidth, kHeight, kHeight, data.data(), + EXPECT_THAT(GetFilteredElement(*sampler, VideoFrameSampler::ChannelType::Y, kWidth / 2, kHeight / 2, kStdDev), 255); } @@ -166,38 +178,22 @@ TEST(HaltonFrameSamplerTest, FrameIsNotSampledWhenTimestampsAreEqual) { #endif // GTEST_HAS_DEATH_TEST TEST(HaltonFrameSamplerGaussianFilteringTest, - ShouldReturnEmptyListGivenInvalidInputNoFrameBuffer) { - const std::vector<Coordinates> kDefaultSampleCoordinates = - MakeDefaultSampleCoordinates(); - - EXPECT_THAT(GetSampleValuesForFrame(nullptr, kDefaultSampleCoordinates, - kDefaultScaledWidth, kDefaultScaledHeight, - kDefaultStdDevGaussianBlur), - IsEmpty()); -} - -TEST(HaltonFrameSamplerGaussianFilteringTest, ShouldReturnEmptyListGivenInvalidInputNoCoordinates) { - const scoped_refptr<I420Buffer> kDefaultI420Buffer = - MakeDefaultI420FrameBuffer(); - EXPECT_THAT( - GetSampleValuesForFrame(kDefaultI420Buffer, {}, kDefaultScaledWidth, + GetSampleValuesForFrame(MakeDefaultFrame(), {}, kDefaultScaledWidth, kDefaultScaledHeight, kDefaultStdDevGaussianBlur), IsEmpty()); } TEST(HaltonFrameSamplerGaussianFilteringTest, ShouldReturnEmptyListGivenInvalidInputOutOfRangeCoordinates) { - const scoped_refptr<I420Buffer> kDefaultI420Buffer = - MakeDefaultI420FrameBuffer(); const std::vector<Coordinates> kSampleCoordinates = { {.row = 0.2, .column = 0.7}, {.row = 0.5, .column = 1.0}, {.row = 0.3, .column = 0.7}, {.row = 0.8, .column = 0.4}}; - EXPECT_THAT(GetSampleValuesForFrame(kDefaultI420Buffer, kSampleCoordinates, + EXPECT_THAT(GetSampleValuesForFrame(MakeDefaultFrame(), kSampleCoordinates, kDefaultScaledWidth, kDefaultScaledHeight, kDefaultStdDevGaussianBlur), IsEmpty()); @@ -205,49 +201,40 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, TEST(HaltonFrameSamplerGaussianFilteringTest, ShouldReturnEmptyListGivenInvalidInputWidthZero) { - const scoped_refptr<I420Buffer> kDefaultI420Buffer = - MakeDefaultI420FrameBuffer(); const std::vector<Coordinates> kDefaultSampleCoordinates = MakeDefaultSampleCoordinates(); EXPECT_THAT( - GetSampleValuesForFrame(kDefaultI420Buffer, kDefaultSampleCoordinates, 0, + GetSampleValuesForFrame(MakeDefaultFrame(), kDefaultSampleCoordinates, 0, kDefaultScaledHeight, kDefaultStdDevGaussianBlur), IsEmpty()); } TEST(HaltonFrameSamplerGaussianFilteringTest, ShouldReturnEmptyListGivenInvalidInputHeightZero) { - const scoped_refptr<I420Buffer> kDefaultI420Buffer = - MakeDefaultI420FrameBuffer(); const std::vector<Coordinates> kDefaultSampleCoordinates = MakeDefaultSampleCoordinates(); EXPECT_THAT(GetSampleValuesForFrame( - kDefaultI420Buffer, kDefaultSampleCoordinates, + MakeDefaultFrame(), kDefaultSampleCoordinates, kDefaultScaledWidth, 0, kDefaultStdDevGaussianBlur), IsEmpty()); } TEST(HaltonFrameSamplerGaussianFilteringTest, ShouldReturnEmptyListGivenInvalidInputStdDevNegative) { - const scoped_refptr<I420Buffer> kDefaultI420Buffer = - MakeDefaultI420FrameBuffer(); const std::vector<Coordinates> kDefaultSampleCoordinates = MakeDefaultSampleCoordinates(); EXPECT_THAT( - GetSampleValuesForFrame(kDefaultI420Buffer, kDefaultSampleCoordinates, + GetSampleValuesForFrame(MakeDefaultFrame(), kDefaultSampleCoordinates, kDefaultScaledWidth, kDefaultScaledHeight, -1.0), IsEmpty()); } TEST(HaltonFrameSamplerGaussianFilteringTest, ShouldReturnEmptyListWhenUpscaling) { - const scoped_refptr<I420Buffer> kDefaultI420Buffer = - MakeDefaultI420FrameBuffer(); - - EXPECT_THAT(GetSampleValuesForFrame(kDefaultI420Buffer, + EXPECT_THAT(GetSampleValuesForFrame(MakeDefaultFrame(), MakeDefaultSampleCoordinates(), /*scaled_width=*/8, /*scaled_height=*/8, kDefaultStdDevGaussianBlur), @@ -267,6 +254,8 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, const scoped_refptr<I420Buffer> kI420Buffer = I420Buffer::Copy(kLumaWidth, kLumaHeight, kYContent, kLumaWidth, kUContent, kChromaWidth, kVContent, kChromaWidth); + VideoFrame frame = + VideoFrame::Builder().set_video_frame_buffer(kI420Buffer).build(); // Coordinates in all planes. const std::vector<Coordinates> kSampleCoordinates = { @@ -283,7 +272,7 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, const double kStdDevGaussianBlur = 0.02; EXPECT_THAT( - GetSampleValuesForFrame(kI420Buffer, kSampleCoordinates, kScaledWidth, + GetSampleValuesForFrame(frame, kSampleCoordinates, kScaledWidth, kScaledHeight, kStdDevGaussianBlur), ElementsAre(AllOf(Field(&FilteredSample::value, DoubleEq(156.0)), Field(&FilteredSample::plane, ImagePlane::kChroma)), @@ -308,6 +297,8 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, const scoped_refptr<I420Buffer> kI420Buffer = I420Buffer::Copy(kLumaWidth, kLumaHeight, kYContent, kLumaWidth, kUContent, kChromaWidth, kVContent, kChromaWidth); + VideoFrame frame = + VideoFrame::Builder().set_video_frame_buffer(kI420Buffer).build(); // Coordinates in all planes. const std::vector<Coordinates> kSampleCoordinates = { @@ -324,7 +315,7 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, const double kStdDevGaussianBlur = 0.02; EXPECT_THAT( - GetSampleValuesForFrame(kI420Buffer, kSampleCoordinates, kScaledWidth, + GetSampleValuesForFrame(frame, kSampleCoordinates, kScaledWidth, kScaledHeight, kStdDevGaussianBlur), ElementsAre(AllOf(Field(&FilteredSample::value, DoubleEq(131.0)), Field(&FilteredSample::plane, ImagePlane::kChroma)), @@ -362,6 +353,8 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, const scoped_refptr<I420Buffer> kI420Buffer = I420Buffer::Copy(kLumaWidth, kLumaHeight, kYContent, kLumaWidth, kUContent, kChromaWidth, kVContent, kChromaWidth); + VideoFrame frame = + VideoFrame::Builder().set_video_frame_buffer(kI420Buffer).build(); // Coordinates in all (YUV) planes. const std::vector<Coordinates> kSampleCoordinates = { @@ -378,7 +371,7 @@ TEST(HaltonFrameSamplerGaussianFilteringTest, const double kStdDevGaussianBlur = 1; EXPECT_THAT( - GetSampleValuesForFrame(kI420Buffer, kSampleCoordinates, kScaledWidth, + GetSampleValuesForFrame(frame, kSampleCoordinates, kScaledWidth, kScaledHeight, kStdDevGaussianBlur), ElementsAre( AllOf(Field(&FilteredSample::value, DoubleEq(114.6804322931639)),