tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

h264_encoder_impl.cc (31409B)


      1 /*
      2 *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
      3 *
      4 *  Use of this source code is governed by a BSD-style license
      5 *  that can be found in the LICENSE file in the root of the source
      6 *  tree. An additional intellectual property rights grant can be found
      7 *  in the file PATENTS.  All contributing project authors may
      8 *  be found in the AUTHORS file in the root of the source tree.
      9 *
     10 */
     11 
     12 // Everything declared/defined in this header is only required when WebRTC is
     13 // build with H264 support, please do not move anything out of the
     14 // #ifdef unless needed and tested.
     15 #ifdef WEBRTC_USE_H264
     16 
     17 #include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
     18 
     19 #include <algorithm>
     20 #include <cstddef>
     21 #include <cstdint>
     22 #include <cstring>
     23 #include <limits>
     24 #include <optional>
     25 #include <string>
     26 #include <vector>
     27 
     28 #include "api/environment/environment.h"
     29 #include "api/scoped_refptr.h"
     30 #include "api/units/data_rate.h"
     31 #include "api/video/encoded_image.h"
     32 #include "api/video/i420_buffer.h"
     33 #include "api/video/video_bitrate_allocation.h"
     34 #include "api/video/video_bitrate_allocator.h"
     35 #include "api/video/video_codec_constants.h"
     36 #include "api/video/video_codec_type.h"
     37 #include "api/video/video_frame.h"
     38 #include "api/video/video_frame_buffer.h"
     39 #include "api/video/video_frame_type.h"
     40 #include "api/video_codecs/scalability_mode.h"
     41 #include "api/video_codecs/video_codec.h"
     42 #include "api/video_codecs/video_encoder.h"
     43 #include "common_video/libyuv/include/webrtc_libyuv.h"
     44 #include "modules/video_coding/codecs/h264/include/h264.h"
     45 #include "modules/video_coding/codecs/h264/include/h264_globals.h"
     46 #include "modules/video_coding/codecs/interface/common_constants.h"
     47 #include "modules/video_coding/include/video_codec_interface.h"
     48 #include "modules/video_coding/include/video_error_codes.h"
     49 #include "modules/video_coding/svc/create_scalability_structure.h"
     50 #include "modules/video_coding/svc/scalable_video_controller.h"
     51 #include "modules/video_coding/utility/simulcast_rate_allocator.h"
     52 #include "modules/video_coding/utility/simulcast_utility.h"
     53 #include "rtc_base/checks.h"
     54 #include "rtc_base/logging.h"
     55 #include "system_wrappers/include/metrics.h"
     56 #include "third_party/libyuv/include/libyuv/scale.h"
     57 #include "third_party/openh264/src/codec/api/wels/codec_api.h"
     58 #include "third_party/openh264/src/codec/api/wels/codec_app_def.h"
     59 #include "third_party/openh264/src/codec/api/wels/codec_def.h"
     60 #include "third_party/openh264/src/codec/api/wels/codec_ver.h"
     61 
     62 namespace webrtc {
     63 
     64 namespace {
     65 
     66 const bool kOpenH264EncoderDetailedLogging = false;
     67 
     68 // QP scaling thresholds.
     69 const int kLowH264QpThreshold = 24;
     70 const int kHighH264QpThreshold = 37;
     71 
     72 // Used by histograms. Values of entries should not be changed.
     73 enum H264EncoderImplEvent {
     74  kH264EncoderEventInit = 0,
     75  kH264EncoderEventError = 1,
     76  kH264EncoderEventMax = 16,
     77 };
     78 
     79 int NumberOfThreads(std::optional<int> encoder_thread_limit,
     80                    int width,
     81                    int height,
     82                    int number_of_cores) {
     83  // TODO(hbos): In Chromium, multiple threads do not work with sandbox on Mac,
     84  // see crbug.com/583348. Until further investigated, only use one thread.
     85  // While this limitation is gone, this changes the bitstream format (see
     86  // bugs.webrtc.org/14368) so still guarded by field trial to allow for
     87  // experimentation using th experimental
     88  // WebRTC-VideoEncoderSettings/encoder_thread_limit trial.
     89  if (encoder_thread_limit.has_value()) {
     90    int limit = encoder_thread_limit.value();
     91    RTC_DCHECK_GE(limit, 1);
     92    if (width * height >= 1920 * 1080 && number_of_cores > 8) {
     93      return std::min(limit, 8);  // 8 threads for 1080p on high perf machines.
     94    } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
     95      return std::min(limit, 3);  // 3 threads for 1080p.
     96    } else if (width * height > 640 * 480 && number_of_cores >= 3) {
     97      return std::min(limit, 2);  // 2 threads for qHD/HD.
     98    } else {
     99      return 1;  // 1 thread for VGA or less.
    100    }
    101  }
    102  // TODO(sprang): Also check sSliceArgument.uiSliceNum on GetEncoderParams(),
    103  //               before enabling multithreading here.
    104  return 1;
    105 }
    106 
    107 VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
    108  switch (type) {
    109    case videoFrameTypeIDR:
    110      return VideoFrameType::kVideoFrameKey;
    111    case videoFrameTypeSkip:
    112    case videoFrameTypeI:
    113    case videoFrameTypeP:
    114    case videoFrameTypeIPMixed:
    115      return VideoFrameType::kVideoFrameDelta;
    116    case videoFrameTypeInvalid:
    117      break;
    118  }
    119  RTC_DCHECK_NOTREACHED() << "Unexpected/invalid frame type: " << type;
    120  return VideoFrameType::kEmptyFrame;
    121 }
    122 
    123 std::optional<ScalabilityMode> ScalabilityModeFromTemporalLayers(
    124    int num_temporal_layers) {
    125  switch (num_temporal_layers) {
    126    case 0:
    127      break;
    128    case 1:
    129      return ScalabilityMode::kL1T1;
    130    case 2:
    131      return ScalabilityMode::kL1T2;
    132    case 3:
    133      return ScalabilityMode::kL1T3;
    134    default:
    135      RTC_DCHECK_NOTREACHED();
    136  }
    137  return std::nullopt;
    138 }
    139 
    140 }  // namespace
    141 
    142 // Helper method used by H264EncoderImpl::Encode.
    143 // Copies the encoded bytes from `info` to `encoded_image`. The
    144 // `encoded_image->_buffer` may be deleted and reallocated if a bigger buffer is
    145 // required.
    146 //
    147 // After OpenH264 encoding, the encoded bytes are stored in `info` spread out
    148 // over a number of layers and "NAL units". Each NAL unit is a fragment starting
    149 // with the four-byte start code {0,0,0,1}. All of this data (including the
    150 // start codes) is copied to the `encoded_image->_buffer`.
    151 static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
    152  // Calculate minimum buffer size required to hold encoded data.
    153  size_t required_capacity = 0;
    154  size_t fragments_count = 0;
    155  for (int layer = 0; layer < info->iLayerNum; ++layer) {
    156    const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
    157    for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
    158      RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
    159      // Ensure `required_capacity` will not overflow.
    160      RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal],
    161                   std::numeric_limits<size_t>::max() - required_capacity);
    162      required_capacity += layerInfo.pNalLengthInByte[nal];
    163    }
    164  }
    165  auto buffer = EncodedImageBuffer::Create(required_capacity);
    166  encoded_image->SetEncodedData(buffer);
    167 
    168  // Iterate layers and NAL units, note each NAL unit as a fragment and copy
    169  // the data to `encoded_image->_buffer`.
    170  const uint8_t start_code[4] = {0, 0, 0, 1};
    171  size_t frag = 0;
    172  encoded_image->set_size(0);
    173  for (int layer = 0; layer < info->iLayerNum; ++layer) {
    174    const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
    175    // Iterate NAL units making up this layer, noting fragments.
    176    size_t layer_len = 0;
    177    for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
    178      // Because the sum of all layer lengths, `required_capacity`, fits in a
    179      // `size_t`, we know that any indices in-between will not overflow.
    180      RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
    181      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
    182      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
    183      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
    184      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
    185      layer_len += layerInfo.pNalLengthInByte[nal];
    186    }
    187    // Copy the entire layer's data (including start codes).
    188    memcpy(buffer->data() + encoded_image->size(), layerInfo.pBsBuf, layer_len);
    189    encoded_image->set_size(encoded_image->size() + layer_len);
    190  }
    191 }
    192 
    193 H264EncoderImpl::H264EncoderImpl(const Environment& env,
    194                                 H264EncoderSettings settings)
    195    : env_(env),
    196      packetization_mode_(settings.packetization_mode),
    197      max_payload_size_(0),
    198      number_of_cores_(0),
    199      encoded_image_callback_(nullptr),
    200      has_reported_init_(false),
    201      has_reported_error_(false),
    202      calculate_psnr_(
    203          env.field_trials().IsEnabled("WebRTC-Video-CalculatePsnr")) {
    204  downscaled_buffers_.reserve(kMaxSimulcastStreams - 1);
    205  encoded_images_.reserve(kMaxSimulcastStreams);
    206  encoders_.reserve(kMaxSimulcastStreams);
    207  configurations_.reserve(kMaxSimulcastStreams);
    208  tl0sync_limit_.reserve(kMaxSimulcastStreams);
    209  svc_controllers_.reserve(kMaxSimulcastStreams);
    210 }
    211 
    212 H264EncoderImpl::~H264EncoderImpl() {
    213  Release();
    214 }
    215 
    216 int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
    217                                    const VideoEncoder::Settings& settings) {
    218  ReportInit();
    219  if (!inst || inst->codecType != kVideoCodecH264) {
    220    ReportError();
    221    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    222  }
    223  if (inst->maxFramerate == 0) {
    224    ReportError();
    225    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    226  }
    227  if (inst->width < 1 || inst->height < 1) {
    228    ReportError();
    229    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    230  }
    231 
    232  int32_t release_ret = Release();
    233  if (release_ret != WEBRTC_VIDEO_CODEC_OK) {
    234    ReportError();
    235    return release_ret;
    236  }
    237 
    238  int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
    239  bool doing_simulcast = (number_of_streams > 1);
    240 
    241  if (doing_simulcast &&
    242      !SimulcastUtility::ValidSimulcastParameters(*inst, number_of_streams)) {
    243    return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
    244  }
    245  downscaled_buffers_.resize(number_of_streams - 1);
    246  encoded_images_.resize(number_of_streams);
    247  encoders_.resize(number_of_streams);
    248  pictures_.resize(number_of_streams);
    249  svc_controllers_.resize(number_of_streams);
    250  scalability_modes_.resize(number_of_streams);
    251  configurations_.resize(number_of_streams);
    252  tl0sync_limit_.resize(number_of_streams);
    253 
    254  max_payload_size_ = settings.max_payload_size;
    255  number_of_cores_ = settings.number_of_cores;
    256  encoder_thread_limit_ = settings.encoder_thread_limit;
    257  codec_ = *inst;
    258 
    259  // Code expects simulcastStream resolutions to be correct, make sure they are
    260  // filled even when there are no simulcast layers.
    261  if (codec_.numberOfSimulcastStreams == 0) {
    262    codec_.simulcastStream[0].width = codec_.width;
    263    codec_.simulcastStream[0].height = codec_.height;
    264  }
    265 
    266  for (int i = 0, idx = number_of_streams - 1; i < number_of_streams;
    267       ++i, --idx) {
    268    ISVCEncoder* openh264_encoder;
    269    // Create encoder.
    270    if (WelsCreateSVCEncoder(&openh264_encoder) != 0) {
    271      // Failed to create encoder.
    272      RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
    273      RTC_DCHECK(!openh264_encoder);
    274      Release();
    275      ReportError();
    276      return WEBRTC_VIDEO_CODEC_ERROR;
    277    }
    278    RTC_DCHECK(openh264_encoder);
    279    if (kOpenH264EncoderDetailedLogging) {
    280      int trace_level = WELS_LOG_DETAIL;
    281      openh264_encoder->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
    282    }
    283    // else WELS_LOG_DEFAULT is used by default.
    284 
    285    // Store h264 encoder.
    286    encoders_[i] = openh264_encoder;
    287 
    288    // Set internal settings from codec_settings
    289    configurations_[i].simulcast_idx = idx;
    290    configurations_[i].sending = false;
    291    configurations_[i].width = codec_.simulcastStream[idx].width;
    292    configurations_[i].height = codec_.simulcastStream[idx].height;
    293    configurations_[i].max_frame_rate = static_cast<float>(codec_.maxFramerate);
    294    configurations_[i].frame_dropping_on = codec_.GetFrameDropEnabled();
    295    configurations_[i].key_frame_interval = codec_.H264()->keyFrameInterval;
    296    configurations_[i].num_temporal_layers =
    297        std::max(codec_.H264()->numberOfTemporalLayers,
    298                 codec_.simulcastStream[idx].numberOfTemporalLayers);
    299 
    300    // Create downscaled image buffers.
    301    if (i > 0) {
    302      downscaled_buffers_[i - 1] = I420Buffer::Create(
    303          configurations_[i].width, configurations_[i].height,
    304          configurations_[i].width, configurations_[i].width / 2,
    305          configurations_[i].width / 2);
    306    }
    307 
    308    // Codec_settings uses kbits/second; encoder uses bits/second.
    309    configurations_[i].max_bps = codec_.maxBitrate * 1000;
    310    configurations_[i].target_bps = codec_.startBitrate * 1000;
    311 
    312    // Create encoder parameters based on the layer configuration.
    313    SEncParamExt encoder_params = CreateEncoderParams(i);
    314 
    315    // Initialize.
    316    if (openh264_encoder->InitializeExt(&encoder_params) != 0) {
    317      RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
    318      Release();
    319      ReportError();
    320      return WEBRTC_VIDEO_CODEC_ERROR;
    321    }
    322    // TODO(pbos): Base init params on these values before submitting.
    323    int video_format = EVideoFormatType::videoFormatI420;
    324    openh264_encoder->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
    325 
    326    // Initialize encoded image. Default buffer size: size of unencoded data.
    327 
    328    const size_t new_capacity =
    329        CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
    330                       codec_.simulcastStream[idx].height);
    331    encoded_images_[i].SetEncodedData(EncodedImageBuffer::Create(new_capacity));
    332    encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
    333    encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
    334    encoded_images_[i].set_size(0);
    335 
    336    tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
    337    scalability_modes_[i] = ScalabilityModeFromTemporalLayers(
    338        configurations_[i].num_temporal_layers);
    339    if (scalability_modes_[i].has_value()) {
    340      svc_controllers_[i] = CreateScalabilityStructure(*scalability_modes_[i]);
    341      if (svc_controllers_[i] == nullptr) {
    342        RTC_LOG(LS_ERROR) << "Failed to create scalability structure";
    343        Release();
    344        ReportError();
    345        return WEBRTC_VIDEO_CODEC_ERROR;
    346      }
    347    }
    348  }
    349 
    350  SimulcastRateAllocator init_allocator(env_, codec_);
    351  VideoBitrateAllocation allocation =
    352      init_allocator.Allocate(VideoBitrateAllocationParameters(
    353          DataRate::KilobitsPerSec(codec_.startBitrate), codec_.maxFramerate));
    354  SetRates(RateControlParameters(allocation, codec_.maxFramerate));
    355  return WEBRTC_VIDEO_CODEC_OK;
    356 }
    357 
    358 int32_t H264EncoderImpl::Release() {
    359  while (!encoders_.empty()) {
    360    ISVCEncoder* openh264_encoder = encoders_.back();
    361    if (openh264_encoder) {
    362      RTC_CHECK_EQ(0, openh264_encoder->Uninitialize());
    363      WelsDestroySVCEncoder(openh264_encoder);
    364    }
    365    encoders_.pop_back();
    366  }
    367  downscaled_buffers_.clear();
    368  configurations_.clear();
    369  encoded_images_.clear();
    370  pictures_.clear();
    371  tl0sync_limit_.clear();
    372  svc_controllers_.clear();
    373  scalability_modes_.clear();
    374  return WEBRTC_VIDEO_CODEC_OK;
    375 }
    376 
    377 int32_t H264EncoderImpl::RegisterEncodeCompleteCallback(
    378    EncodedImageCallback* callback) {
    379  encoded_image_callback_ = callback;
    380  return WEBRTC_VIDEO_CODEC_OK;
    381 }
    382 
    383 void H264EncoderImpl::SetRates(const RateControlParameters& parameters) {
    384  if (encoders_.empty()) {
    385    RTC_LOG(LS_WARNING) << "SetRates() while uninitialized.";
    386    return;
    387  }
    388 
    389  if (parameters.framerate_fps < 1.0) {
    390    RTC_LOG(LS_WARNING) << "Invalid frame rate: " << parameters.framerate_fps;
    391    return;
    392  }
    393 
    394  if (parameters.bitrate.get_sum_bps() == 0) {
    395    // Encoder paused, turn off all encoding.
    396    for (size_t i = 0; i < configurations_.size(); ++i) {
    397      configurations_[i].SetStreamState(false);
    398    }
    399    return;
    400  }
    401 
    402  codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps);
    403 
    404  size_t stream_idx = encoders_.size() - 1;
    405  for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
    406    // Update layer config.
    407    configurations_[i].target_bps =
    408        parameters.bitrate.GetSpatialLayerSum(stream_idx);
    409    configurations_[i].max_frame_rate = parameters.framerate_fps;
    410 
    411    if (configurations_[i].target_bps) {
    412      configurations_[i].SetStreamState(true);
    413 
    414      // Update h264 encoder.
    415      SBitrateInfo target_bitrate;
    416      memset(&target_bitrate, 0, sizeof(SBitrateInfo));
    417      target_bitrate.iLayer = SPATIAL_LAYER_ALL,
    418      target_bitrate.iBitrate = configurations_[i].target_bps;
    419      encoders_[i]->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
    420      encoders_[i]->SetOption(ENCODER_OPTION_FRAME_RATE,
    421                              &configurations_[i].max_frame_rate);
    422    } else {
    423      configurations_[i].SetStreamState(false);
    424    }
    425  }
    426 }
    427 
    428 int32_t H264EncoderImpl::Encode(
    429    const VideoFrame& input_frame,
    430    const std::vector<VideoFrameType>* frame_types) {
    431  if (encoders_.empty()) {
    432    ReportError();
    433    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    434  }
    435  if (!encoded_image_callback_) {
    436    RTC_LOG(LS_WARNING)
    437        << "InitEncode() has been called, but a callback function "
    438           "has not been set with RegisterEncodeCompleteCallback()";
    439    ReportError();
    440    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    441  }
    442 
    443  scoped_refptr<I420BufferInterface> frame_buffer =
    444      input_frame.video_frame_buffer()->ToI420();
    445  if (!frame_buffer) {
    446    RTC_LOG(LS_ERROR) << "Failed to convert "
    447                      << VideoFrameBufferTypeToString(
    448                             input_frame.video_frame_buffer()->type())
    449                      << " image to I420. Can't encode frame.";
    450    return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
    451  }
    452  RTC_CHECK(frame_buffer->type() == VideoFrameBuffer::Type::kI420 ||
    453            frame_buffer->type() == VideoFrameBuffer::Type::kI420A);
    454 
    455  bool is_keyframe_needed = false;
    456  for (size_t i = 0; i < configurations_.size(); ++i) {
    457    if (configurations_[i].key_frame_request && configurations_[i].sending) {
    458      // This is legacy behavior, generating a keyframe on all layers
    459      // when generating one for a layer that became active for the first time
    460      // or after being disabled.
    461      is_keyframe_needed = true;
    462      break;
    463    }
    464  }
    465 
    466  RTC_DCHECK_EQ(configurations_[0].width, frame_buffer->width());
    467  RTC_DCHECK_EQ(configurations_[0].height, frame_buffer->height());
    468 
    469 #ifdef WEBRTC_ENCODER_PSNR_STATS
    470  bool calculate_psnr =
    471      calculate_psnr_ && psnr_frame_sampler_.ShouldBeSampled(input_frame);
    472 #endif
    473 
    474  // Encode image for each layer.
    475  for (size_t i = 0; i < encoders_.size(); ++i) {
    476    // EncodeFrame input.
    477    pictures_[i] = {};
    478    pictures_[i].iPicWidth = configurations_[i].width;
    479    pictures_[i].iPicHeight = configurations_[i].height;
    480    pictures_[i].iColorFormat = EVideoFormatType::videoFormatI420;
    481    pictures_[i].uiTimeStamp = input_frame.ntp_time_ms();
    482 #ifdef WEBRTC_ENCODER_PSNR_STATS
    483    pictures_[i].bPsnrY = calculate_psnr;
    484    pictures_[i].bPsnrU = calculate_psnr;
    485    pictures_[i].bPsnrV = calculate_psnr;
    486 #endif
    487    // Downscale images on second and ongoing layers.
    488    if (i == 0) {
    489      pictures_[i].iStride[0] = frame_buffer->StrideY();
    490      pictures_[i].iStride[1] = frame_buffer->StrideU();
    491      pictures_[i].iStride[2] = frame_buffer->StrideV();
    492      pictures_[i].pData[0] = const_cast<uint8_t*>(frame_buffer->DataY());
    493      pictures_[i].pData[1] = const_cast<uint8_t*>(frame_buffer->DataU());
    494      pictures_[i].pData[2] = const_cast<uint8_t*>(frame_buffer->DataV());
    495    } else {
    496      pictures_[i].iStride[0] = downscaled_buffers_[i - 1]->StrideY();
    497      pictures_[i].iStride[1] = downscaled_buffers_[i - 1]->StrideU();
    498      pictures_[i].iStride[2] = downscaled_buffers_[i - 1]->StrideV();
    499      pictures_[i].pData[0] =
    500          const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataY());
    501      pictures_[i].pData[1] =
    502          const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataU());
    503      pictures_[i].pData[2] =
    504          const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataV());
    505      // Scale the image down a number of times by downsampling factor.
    506      libyuv::I420Scale(pictures_[i - 1].pData[0], pictures_[i - 1].iStride[0],
    507                        pictures_[i - 1].pData[1], pictures_[i - 1].iStride[1],
    508                        pictures_[i - 1].pData[2], pictures_[i - 1].iStride[2],
    509                        configurations_[i - 1].width,
    510                        configurations_[i - 1].height, pictures_[i].pData[0],
    511                        pictures_[i].iStride[0], pictures_[i].pData[1],
    512                        pictures_[i].iStride[1], pictures_[i].pData[2],
    513                        pictures_[i].iStride[2], configurations_[i].width,
    514                        configurations_[i].height, libyuv::kFilterBox);
    515    }
    516 
    517    if (!configurations_[i].sending) {
    518      continue;
    519    }
    520    if (frame_types != nullptr && i < frame_types->size()) {
    521      // Skip frame?
    522      if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
    523        continue;
    524      }
    525    }
    526    // Send a key frame either when this layer is configured to require one
    527    // or we have explicitly been asked to.
    528    const size_t simulcast_idx =
    529        static_cast<size_t>(configurations_[i].simulcast_idx);
    530    bool send_key_frame =
    531        is_keyframe_needed ||
    532        (frame_types && simulcast_idx < frame_types->size() &&
    533         (*frame_types)[simulcast_idx] == VideoFrameType::kVideoFrameKey);
    534    if (send_key_frame) {
    535      // API doc says ForceIntraFrame(false) does nothing, but calling this
    536      // function forces a key frame regardless of the `bIDR` argument's value.
    537      // (If every frame is a key frame we get lag/delays.)
    538      encoders_[i]->ForceIntraFrame(true);
    539      configurations_[i].key_frame_request = false;
    540    }
    541    // EncodeFrame output.
    542    SFrameBSInfo info;
    543    memset(&info, 0, sizeof(SFrameBSInfo));
    544 
    545    std::vector<ScalableVideoController::LayerFrameConfig> layer_frames;
    546    if (svc_controllers_[i]) {
    547      layer_frames = svc_controllers_[i]->NextFrameConfig(send_key_frame);
    548      RTC_CHECK_EQ(layer_frames.size(), 1);
    549    }
    550 
    551    // Encode!
    552    int enc_ret = encoders_[i]->EncodeFrame(&pictures_[i], &info);
    553    if (enc_ret != 0) {
    554      RTC_LOG(LS_ERROR)
    555          << "OpenH264 frame encoding failed, EncodeFrame returned " << enc_ret
    556          << ".";
    557      ReportError();
    558      return WEBRTC_VIDEO_CODEC_ERROR;
    559    }
    560 
    561    encoded_images_[i]._encodedWidth = configurations_[i].width;
    562    encoded_images_[i]._encodedHeight = configurations_[i].height;
    563    encoded_images_[i].SetRtpTimestamp(input_frame.rtp_timestamp());
    564    encoded_images_[i].SetColorSpace(input_frame.color_space());
    565    encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
    566    encoded_images_[i].SetSimulcastIndex(configurations_[i].simulcast_idx);
    567 
    568    // Split encoded image up into fragments. This also updates
    569    // `encoded_image_`.
    570    RtpFragmentize(&encoded_images_[i], &info);
    571 
    572    // Encoder can skip frames to save bandwidth in which case
    573    // `encoded_images_[i]._length` == 0.
    574    if (encoded_images_[i].size() > 0) {
    575      // Parse QP.
    576      h264_bitstream_parser_.ParseBitstream(encoded_images_[i]);
    577      encoded_images_[i].qp_ =
    578          h264_bitstream_parser_.GetLastSliceQp().value_or(-1);
    579 #ifdef WEBRTC_ENCODER_PSNR_STATS
    580      if (calculate_psnr) {
    581        encoded_images_[i].set_psnr(EncodedImage::Psnr({
    582            .y = info.sLayerInfo[info.iLayerNum - 1].rPsnr[0],
    583            .u = info.sLayerInfo[info.iLayerNum - 1].rPsnr[1],
    584            .v = info.sLayerInfo[info.iLayerNum - 1].rPsnr[2],
    585        }));
    586      } else {
    587        encoded_images_[i].set_psnr(std::nullopt);
    588      }
    589 #endif
    590 
    591      // Deliver encoded image.
    592      CodecSpecificInfo codec_specific;
    593      codec_specific.codecType = kVideoCodecH264;
    594      codec_specific.codecSpecific.H264.packetization_mode =
    595          packetization_mode_;
    596      codec_specific.codecSpecific.H264.temporal_idx = kNoTemporalIdx;
    597      codec_specific.codecSpecific.H264.idr_frame =
    598          info.eFrameType == videoFrameTypeIDR;
    599      codec_specific.codecSpecific.H264.base_layer_sync = false;
    600      if (configurations_[i].num_temporal_layers > 1) {
    601        const uint8_t tid = info.sLayerInfo[0].uiTemporalId;
    602        codec_specific.codecSpecific.H264.temporal_idx = tid;
    603        codec_specific.codecSpecific.H264.base_layer_sync =
    604            tid > 0 && tid < tl0sync_limit_[i];
    605        if (svc_controllers_[i]) {
    606          if (encoded_images_[i]._frameType == VideoFrameType::kVideoFrameKey) {
    607            // Reset the ScalableVideoController on key frame
    608            // to reset the expected dependency structure.
    609            layer_frames =
    610                svc_controllers_[i]->NextFrameConfig(/* restart= */ true);
    611            RTC_CHECK_EQ(layer_frames.size(), 1);
    612            RTC_DCHECK_EQ(layer_frames[0].TemporalId(), 0);
    613            RTC_DCHECK_EQ(layer_frames[0].IsKeyframe(), true);
    614          }
    615 
    616          if (layer_frames[0].TemporalId() != tid) {
    617            RTC_LOG(LS_WARNING)
    618                << "Encoder produced a frame with temporal id " << tid
    619                << ", expected " << layer_frames[0].TemporalId() << ".";
    620            continue;
    621          }
    622          encoded_images_[i].SetTemporalIndex(tid);
    623        }
    624        if (codec_specific.codecSpecific.H264.base_layer_sync) {
    625          tl0sync_limit_[i] = tid;
    626        }
    627        if (tid == 0) {
    628          tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
    629        }
    630      }
    631      if (svc_controllers_[i]) {
    632        codec_specific.generic_frame_info =
    633            svc_controllers_[i]->OnEncodeDone(layer_frames[0]);
    634        if (encoded_images_[i]._frameType == VideoFrameType::kVideoFrameKey &&
    635            codec_specific.generic_frame_info.has_value()) {
    636          codec_specific.template_structure =
    637              svc_controllers_[i]->DependencyStructure();
    638        }
    639        codec_specific.scalability_mode = scalability_modes_[i];
    640      }
    641      encoded_image_callback_->OnEncodedImage(encoded_images_[i],
    642                                              &codec_specific);
    643    }
    644  }
    645  return WEBRTC_VIDEO_CODEC_OK;
    646 }
    647 
    648 // Initialization parameters.
    649 // There are two ways to initialize. There is SEncParamBase (cleared with
    650 // memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt
    651 // which is a superset of SEncParamBase (cleared with GetDefaultParams) used
    652 // in InitializeExt.
    653 SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const {
    654  SEncParamExt encoder_params;
    655  encoders_[i]->GetDefaultParams(&encoder_params);
    656  if (codec_.mode == VideoCodecMode::kRealtimeVideo) {
    657    encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
    658  } else if (codec_.mode == VideoCodecMode::kScreensharing) {
    659    encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
    660  } else {
    661    RTC_DCHECK_NOTREACHED();
    662  }
    663  encoder_params.iPicWidth = configurations_[i].width;
    664  encoder_params.iPicHeight = configurations_[i].height;
    665  encoder_params.iTargetBitrate = configurations_[i].target_bps;
    666  // Keep unspecified. WebRTC's max codec bitrate is not the same setting
    667  // as OpenH264's iMaxBitrate. More details in https://crbug.com/webrtc/11543
    668  encoder_params.iMaxBitrate = UNSPECIFIED_BIT_RATE;
    669  // Rate Control mode
    670  encoder_params.iRCMode = RC_BITRATE_MODE;
    671  encoder_params.fMaxFrameRate = configurations_[i].max_frame_rate;
    672 
    673  // The following parameters are extension parameters (they're in SEncParamExt,
    674  // not in SEncParamBase).
    675  encoder_params.bEnableFrameSkip = configurations_[i].frame_dropping_on;
    676  // `uiIntraPeriod`    - multiple of GOP size
    677  // `keyFrameInterval` - number of frames
    678  encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval;
    679  // Reuse SPS id if possible. This helps to avoid reset of chromium HW decoder
    680  // on each key-frame.
    681  // Note that WebRTC resets encoder on resolution change which makes all
    682  // EParameterSetStrategy modes except INCREASING_ID (default) essentially
    683  // equivalent to CONSTANT_ID.
    684  encoder_params.eSpsPpsIdStrategy = SPS_LISTING;
    685  encoder_params.uiMaxNalSize = 0;
    686  // Threading model: use auto.
    687  //  0: auto (dynamic imp. internal encoder)
    688  //  1: single thread (default value)
    689  // >1: number of threads
    690  encoder_params.iMultipleThreadIdc =
    691      NumberOfThreads(encoder_thread_limit_, encoder_params.iPicWidth,
    692                      encoder_params.iPicHeight, number_of_cores_);
    693  // The base spatial layer 0 is the only one we use.
    694  encoder_params.sSpatialLayers[0].iVideoWidth = encoder_params.iPicWidth;
    695  encoder_params.sSpatialLayers[0].iVideoHeight = encoder_params.iPicHeight;
    696  encoder_params.sSpatialLayers[0].fFrameRate = encoder_params.fMaxFrameRate;
    697  encoder_params.sSpatialLayers[0].iSpatialBitrate =
    698      encoder_params.iTargetBitrate;
    699  encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
    700      encoder_params.iMaxBitrate;
    701  encoder_params.iTemporalLayerNum = configurations_[i].num_temporal_layers;
    702  if (encoder_params.iTemporalLayerNum > 1) {
    703    // iNumRefFrame specifies total number of reference buffers to allocate.
    704    // For N temporal layers we need at least (N - 1) buffers to store last
    705    // encoded frames of all reference temporal layers.
    706    // Note that there is no API in OpenH264 encoder to specify exact set of
    707    // references to be used to prediction of a given frame. Encoder can
    708    // theoretically use all available reference buffers.
    709    encoder_params.iNumRefFrame = encoder_params.iTemporalLayerNum - 1;
    710  }
    711  RTC_LOG(LS_INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
    712                   << OPENH264_MINOR;
    713  switch (packetization_mode_) {
    714    case H264PacketizationMode::SingleNalUnit:
    715      // Limit the size of the packets produced.
    716      encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
    717      encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
    718          SM_SIZELIMITED_SLICE;
    719      encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint =
    720          static_cast<unsigned int>(max_payload_size_);
    721      RTC_LOG(LS_INFO) << "Encoder is configured with NALU constraint: "
    722                       << max_payload_size_ << " bytes";
    723      break;
    724    case H264PacketizationMode::NonInterleaved:
    725      // When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
    726      // design it with cpu core number.
    727      // TODO(sprang): Set to 0 when we understand why the rate controller borks
    728      //               when uiSliceNum > 1.
    729      encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
    730      encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
    731          SM_FIXEDSLCNUM_SLICE;
    732      break;
    733  }
    734  return encoder_params;
    735 }
    736 
    737 void H264EncoderImpl::ReportInit() {
    738  if (has_reported_init_)
    739    return;
    740  RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
    741                            kH264EncoderEventInit, kH264EncoderEventMax);
    742  has_reported_init_ = true;
    743 }
    744 
    745 void H264EncoderImpl::ReportError() {
    746  if (has_reported_error_)
    747    return;
    748  RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
    749                            kH264EncoderEventError, kH264EncoderEventMax);
    750  has_reported_error_ = true;
    751 }
    752 
    753 VideoEncoder::EncoderInfo H264EncoderImpl::GetEncoderInfo() const {
    754  EncoderInfo info;
    755  info.supports_native_handle = false;
    756  info.implementation_name = "OpenH264";
    757  info.scaling_settings =
    758      VideoEncoder::ScalingSettings(kLowH264QpThreshold, kHighH264QpThreshold);
    759  if (!configurations_.empty()) {
    760    info.mapped_resolution = VideoEncoder::Resolution(
    761        configurations_.back().width, configurations_.back().height);
    762  }
    763  info.is_hardware_accelerated = false;
    764  info.supports_simulcast = true;
    765  info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420};
    766  return info;
    767 }
    768 
    769 void H264EncoderImpl::LayerConfig::SetStreamState(bool send_stream) {
    770  if (send_stream && !sending) {
    771    // Need a key frame if we have not sent this stream before.
    772    key_frame_request = true;
    773  }
    774  sending = send_stream;
    775 }
    776 
    777 }  // namespace webrtc
    778 
    779 #endif  // WEBRTC_USE_H264