tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

h264_decoder_impl.cc (26775B)


      1 /*
      2 *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
      3 *
      4 *  Use of this source code is governed by a BSD-style license
      5 *  that can be found in the LICENSE file in the root of the source
      6 *  tree. An additional intellectual property rights grant can be found
      7 *  in the file PATENTS.  All contributing project authors may
      8 *  be found in the AUTHORS file in the root of the source tree.
      9 *
     10 */
     11 
     12 // Everything declared/defined in this header is only required when WebRTC is
     13 // build with H264 support, please do not move anything out of the
     14 // #ifdef unless needed and tested.
     15 #ifdef WEBRTC_USE_H264
     16 
     17 #include "modules/video_coding/codecs/h264/h264_decoder_impl.h"
     18 
     19 extern "C" {
     20 #include <libavcodec/avcodec.h>
     21 #include <libavformat/avformat.h>
     22 #include <libavutil/imgutils.h>
     23 }  // extern "C"
     24 
     25 #include <algorithm>
     26 #include <array>
     27 #include <cstddef>
     28 #include <cstdint>
     29 #include <limits>
     30 #include <memory>
     31 #include <optional>
     32 
     33 #include "api/scoped_refptr.h"
     34 #include "api/video/color_space.h"
     35 #include "api/video/encoded_image.h"
     36 #include "api/video/i010_buffer.h"
     37 #include "api/video/i210_buffer.h"
     38 #include "api/video/i410_buffer.h"
     39 #include "api/video/i420_buffer.h"
     40 #include "api/video/i422_buffer.h"
     41 #include "api/video/i444_buffer.h"
     42 #include "api/video/render_resolution.h"
     43 #include "api/video/video_codec_type.h"
     44 #include "api/video/video_frame.h"
     45 #include "api/video/video_frame_buffer.h"
     46 #include "api/video/video_rotation.h"
     47 #include "api/video_codecs/video_decoder.h"
     48 #include "common_video/include/video_frame_buffer.h"
     49 #include "modules/video_coding/codecs/h264/h264_color_space.h"
     50 #include "modules/video_coding/include/video_error_codes.h"
     51 #include "rtc_base/checks.h"
     52 #include "rtc_base/logging.h"
     53 #include "system_wrappers/include/metrics.h"
     54 
     55 namespace webrtc {
     56 
     57 namespace {
     58 
     59 constexpr std::array<AVPixelFormat, 9> kPixelFormatsSupported = {
     60    AV_PIX_FMT_YUV420P,     AV_PIX_FMT_YUV422P,     AV_PIX_FMT_YUV444P,
     61    AV_PIX_FMT_YUVJ420P,    AV_PIX_FMT_YUVJ422P,    AV_PIX_FMT_YUVJ444P,
     62    AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV444P10LE};
     63 const size_t kYPlaneIndex = 0;
     64 const size_t kUPlaneIndex = 1;
     65 const size_t kVPlaneIndex = 2;
     66 
     67 // Used by histograms. Values of entries should not be changed.
     68 enum H264DecoderImplEvent {
     69  kH264DecoderEventInit = 0,
     70  kH264DecoderEventError = 1,
     71  kH264DecoderEventMax = 16,
     72 };
     73 
     74 struct ScopedPtrAVFreePacket {
     75  void operator()(AVPacket* packet) { av_packet_free(&packet); }
     76 };
     77 typedef std::unique_ptr<AVPacket, ScopedPtrAVFreePacket> ScopedAVPacket;
     78 
     79 ScopedAVPacket MakeScopedAVPacket() {
     80  ScopedAVPacket packet(av_packet_alloc());
     81  return packet;
     82 }
     83 
     84 }  // namespace
     85 
     86 int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
     87                                  AVFrame* av_frame,
     88                                  int flags) {
     89  // Set in `Configure`.
     90  H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
     91  // DCHECK values set in `Configure`.
     92  RTC_DCHECK(decoder);
     93  // Necessary capability to be allowed to provide our own buffers.
     94  RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
     95 
     96  auto pixelFormatSupported = std::find_if(
     97      kPixelFormatsSupported.begin(), kPixelFormatsSupported.end(),
     98      [context](AVPixelFormat format) { return context->pix_fmt == format; });
     99 
    100  if (pixelFormatSupported == kPixelFormatsSupported.end()) {
    101    RTC_LOG(LS_ERROR) << "Unsupported pixel format: " << context->pix_fmt;
    102    decoder->ReportError();
    103    return -1;
    104  }
    105 
    106  // `av_frame->width` and `av_frame->height` are set by FFmpeg. These are the
    107  // actual image's dimensions and may be different from `context->width` and
    108  // `context->coded_width` due to reordering.
    109  int width = av_frame->width;
    110  int height = av_frame->height;
    111  // See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
    112  // has implications on which resolutions are valid, but we don't use it.
    113  RTC_CHECK_EQ(context->lowres, 0);
    114  // Adjust the `width` and `height` to values acceptable by the decoder.
    115  // Without this, FFmpeg may overflow the buffer. If modified, `width` and/or
    116  // `height` are larger than the actual image and the image has to be cropped
    117  // (top-left corner) after decoding to avoid visible borders to the right and
    118  // bottom of the actual image.
    119  avcodec_align_dimensions(context, &width, &height);
    120 
    121  RTC_CHECK_GE(width, 0);
    122  RTC_CHECK_GE(height, 0);
    123  int ret = av_image_check_size(static_cast<unsigned int>(width),
    124                                static_cast<unsigned int>(height), 0, nullptr);
    125  if (ret < 0) {
    126    RTC_LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
    127    decoder->ReportError();
    128    return ret;
    129  }
    130 
    131  // The video frame is stored in `frame_buffer`. `av_frame` is FFmpeg's version
    132  // of a video frame and will be set up to reference `frame_buffer`'s data.
    133 
    134  // FFmpeg expects the initial allocation to be zero-initialized according to
    135  // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
    136  // TODO(https://crbug.com/390941): Delete that feature from the video pool,
    137  // instead add an explicit call to InitializeData here.
    138  scoped_refptr<PlanarYuvBuffer> frame_buffer;
    139  scoped_refptr<I444Buffer> i444_buffer;
    140  scoped_refptr<I420Buffer> i420_buffer;
    141  scoped_refptr<I422Buffer> i422_buffer;
    142  scoped_refptr<I010Buffer> i010_buffer;
    143  scoped_refptr<I210Buffer> i210_buffer;
    144  scoped_refptr<I410Buffer> i410_buffer;
    145  int bytes_per_pixel = 1;
    146  switch (context->pix_fmt) {
    147    case AV_PIX_FMT_YUV420P:
    148    case AV_PIX_FMT_YUVJ420P:
    149      i420_buffer =
    150          decoder->ffmpeg_buffer_pool_.CreateI420Buffer(width, height);
    151      // Set `av_frame` members as required by FFmpeg.
    152      av_frame->data[kYPlaneIndex] = i420_buffer->MutableDataY();
    153      av_frame->linesize[kYPlaneIndex] = i420_buffer->StrideY();
    154      av_frame->data[kUPlaneIndex] = i420_buffer->MutableDataU();
    155      av_frame->linesize[kUPlaneIndex] = i420_buffer->StrideU();
    156      av_frame->data[kVPlaneIndex] = i420_buffer->MutableDataV();
    157      av_frame->linesize[kVPlaneIndex] = i420_buffer->StrideV();
    158      RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
    159      frame_buffer = i420_buffer;
    160      break;
    161    case AV_PIX_FMT_YUV444P:
    162    case AV_PIX_FMT_YUVJ444P:
    163      i444_buffer =
    164          decoder->ffmpeg_buffer_pool_.CreateI444Buffer(width, height);
    165      // Set `av_frame` members as required by FFmpeg.
    166      av_frame->data[kYPlaneIndex] = i444_buffer->MutableDataY();
    167      av_frame->linesize[kYPlaneIndex] = i444_buffer->StrideY();
    168      av_frame->data[kUPlaneIndex] = i444_buffer->MutableDataU();
    169      av_frame->linesize[kUPlaneIndex] = i444_buffer->StrideU();
    170      av_frame->data[kVPlaneIndex] = i444_buffer->MutableDataV();
    171      av_frame->linesize[kVPlaneIndex] = i444_buffer->StrideV();
    172      frame_buffer = i444_buffer;
    173      break;
    174    case AV_PIX_FMT_YUV422P:
    175    case AV_PIX_FMT_YUVJ422P:
    176      i422_buffer =
    177          decoder->ffmpeg_buffer_pool_.CreateI422Buffer(width, height);
    178      // Set `av_frame` members as required by FFmpeg.
    179      av_frame->data[kYPlaneIndex] = i422_buffer->MutableDataY();
    180      av_frame->linesize[kYPlaneIndex] = i422_buffer->StrideY();
    181      av_frame->data[kUPlaneIndex] = i422_buffer->MutableDataU();
    182      av_frame->linesize[kUPlaneIndex] = i422_buffer->StrideU();
    183      av_frame->data[kVPlaneIndex] = i422_buffer->MutableDataV();
    184      av_frame->linesize[kVPlaneIndex] = i422_buffer->StrideV();
    185      frame_buffer = i422_buffer;
    186      break;
    187    case AV_PIX_FMT_YUV420P10LE:
    188      i010_buffer =
    189          decoder->ffmpeg_buffer_pool_.CreateI010Buffer(width, height);
    190      // Set `av_frame` members as required by FFmpeg.
    191      av_frame->data[kYPlaneIndex] =
    192          reinterpret_cast<uint8_t*>(i010_buffer->MutableDataY());
    193      av_frame->linesize[kYPlaneIndex] = i010_buffer->StrideY() * 2;
    194      av_frame->data[kUPlaneIndex] =
    195          reinterpret_cast<uint8_t*>(i010_buffer->MutableDataU());
    196      av_frame->linesize[kUPlaneIndex] = i010_buffer->StrideU() * 2;
    197      av_frame->data[kVPlaneIndex] =
    198          reinterpret_cast<uint8_t*>(i010_buffer->MutableDataV());
    199      av_frame->linesize[kVPlaneIndex] = i010_buffer->StrideV() * 2;
    200      frame_buffer = i010_buffer;
    201      bytes_per_pixel = 2;
    202      break;
    203    case AV_PIX_FMT_YUV422P10LE:
    204      i210_buffer =
    205          decoder->ffmpeg_buffer_pool_.CreateI210Buffer(width, height);
    206      // Set `av_frame` members as required by FFmpeg.
    207      av_frame->data[kYPlaneIndex] =
    208          reinterpret_cast<uint8_t*>(i210_buffer->MutableDataY());
    209      av_frame->linesize[kYPlaneIndex] = i210_buffer->StrideY() * 2;
    210      av_frame->data[kUPlaneIndex] =
    211          reinterpret_cast<uint8_t*>(i210_buffer->MutableDataU());
    212      av_frame->linesize[kUPlaneIndex] = i210_buffer->StrideU() * 2;
    213      av_frame->data[kVPlaneIndex] =
    214          reinterpret_cast<uint8_t*>(i210_buffer->MutableDataV());
    215      av_frame->linesize[kVPlaneIndex] = i210_buffer->StrideV() * 2;
    216      frame_buffer = i210_buffer;
    217      bytes_per_pixel = 2;
    218      break;
    219    case AV_PIX_FMT_YUV444P10LE:
    220      i410_buffer =
    221          decoder->ffmpeg_buffer_pool_.CreateI410Buffer(width, height);
    222      // Set `av_frame` members as required by FFmpeg.
    223      av_frame->data[kYPlaneIndex] =
    224          reinterpret_cast<uint8_t*>(i410_buffer->MutableDataY());
    225      av_frame->linesize[kYPlaneIndex] = i410_buffer->StrideY() * 2;
    226      av_frame->data[kUPlaneIndex] =
    227          reinterpret_cast<uint8_t*>(i410_buffer->MutableDataU());
    228      av_frame->linesize[kUPlaneIndex] = i410_buffer->StrideU() * 2;
    229      av_frame->data[kVPlaneIndex] =
    230          reinterpret_cast<uint8_t*>(i410_buffer->MutableDataV());
    231      av_frame->linesize[kVPlaneIndex] = i410_buffer->StrideV() * 2;
    232      frame_buffer = i410_buffer;
    233      bytes_per_pixel = 2;
    234      break;
    235    default:
    236      RTC_LOG(LS_ERROR) << "Unsupported buffer type " << context->pix_fmt
    237                        << ". Check supported supported pixel formats!";
    238      decoder->ReportError();
    239      return -1;
    240  }
    241 
    242  int y_size = width * height * bytes_per_pixel;
    243  int uv_size = frame_buffer->ChromaWidth() * frame_buffer->ChromaHeight() *
    244                bytes_per_pixel;
    245  // DCHECK that we have a continuous buffer as is required.
    246  RTC_DCHECK_EQ(av_frame->data[kUPlaneIndex],
    247                av_frame->data[kYPlaneIndex] + y_size);
    248  RTC_DCHECK_EQ(av_frame->data[kVPlaneIndex],
    249                av_frame->data[kUPlaneIndex] + uv_size);
    250  int total_size = y_size + 2 * uv_size;
    251 
    252  av_frame->format = context->pix_fmt;
    253 
    254  // Create a VideoFrame object, to keep a reference to the buffer.
    255  // TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
    256  // Refactor to do not use a VideoFrame object at all.
    257  av_frame->buf[0] = av_buffer_create(
    258      av_frame->data[kYPlaneIndex], total_size, AVFreeBuffer2,
    259      static_cast<void*>(
    260          std::make_unique<VideoFrame>(VideoFrame::Builder()
    261                                           .set_video_frame_buffer(frame_buffer)
    262                                           .set_rotation(kVideoRotation_0)
    263                                           .set_timestamp_us(0)
    264                                           .build())
    265              .release()),
    266      0);
    267  RTC_CHECK(av_frame->buf[0]);
    268  return 0;
    269 }
    270 
    271 void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
    272  // The buffer pool recycles the buffer used by `video_frame` when there are no
    273  // more references to it. `video_frame` is a thin buffer holder and is not
    274  // recycled.
    275  VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
    276  delete video_frame;
    277 }
    278 
    279 H264DecoderImpl::H264DecoderImpl()
    280    : ffmpeg_buffer_pool_(true),
    281      decoded_image_callback_(nullptr),
    282      has_reported_init_(false),
    283      has_reported_error_(false) {}
    284 
    285 H264DecoderImpl::~H264DecoderImpl() {
    286  Release();
    287 }
    288 
    289 bool H264DecoderImpl::Configure(const Settings& settings) {
    290  ReportInit();
    291  if (settings.codec_type() != kVideoCodecH264) {
    292    ReportError();
    293    return false;
    294  }
    295 
    296  // Release necessary in case of re-initializing.
    297  int32_t ret = Release();
    298  if (ret != WEBRTC_VIDEO_CODEC_OK) {
    299    ReportError();
    300    return false;
    301  }
    302  RTC_DCHECK(!av_context_);
    303 
    304  // Initialize AVCodecContext.
    305  av_context_.reset(avcodec_alloc_context3(nullptr));
    306 
    307  av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
    308  av_context_->codec_id = AV_CODEC_ID_H264;
    309  const RenderResolution& resolution = settings.max_render_resolution();
    310  if (resolution.Valid()) {
    311    av_context_->coded_width = resolution.Width();
    312    av_context_->coded_height = resolution.Height();
    313  }
    314  av_context_->extradata = nullptr;
    315  av_context_->extradata_size = 0;
    316 
    317  // If this is ever increased, look at `av_context_->thread_safe_callbacks` and
    318  // make it possible to disable the thread checker in the frame buffer pool.
    319  av_context_->thread_count = 1;
    320  av_context_->thread_type = FF_THREAD_SLICE;
    321 
    322  // Function used by FFmpeg to get buffers to store decoded frames in.
    323  av_context_->get_buffer2 = AVGetBuffer2;
    324  // `get_buffer2` is called with the context, there `opaque` can be used to get
    325  // a pointer `this`.
    326  av_context_->opaque = this;
    327 
    328  const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
    329  if (!codec) {
    330    // This is an indication that FFmpeg has not been initialized or it has not
    331    // been compiled/initialized with the correct set of codecs.
    332    RTC_LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
    333    Release();
    334    ReportError();
    335    return false;
    336  }
    337  int res = avcodec_open2(av_context_.get(), codec, nullptr);
    338  if (res < 0) {
    339    RTC_LOG(LS_ERROR) << "avcodec_open2 error: " << res;
    340    Release();
    341    ReportError();
    342    return false;
    343  }
    344 
    345  av_frame_.reset(av_frame_alloc());
    346 
    347  if (std::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
    348    if (!ffmpeg_buffer_pool_.Resize(*buffer_pool_size)) {
    349      return false;
    350    }
    351  }
    352  return true;
    353 }
    354 
    355 int32_t H264DecoderImpl::Release() {
    356  av_context_.reset();
    357  av_frame_.reset();
    358  return WEBRTC_VIDEO_CODEC_OK;
    359 }
    360 
    361 int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
    362    DecodedImageCallback* callback) {
    363  decoded_image_callback_ = callback;
    364  return WEBRTC_VIDEO_CODEC_OK;
    365 }
    366 
    367 int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
    368                                bool /*missing_frames*/,
    369                                int64_t /*render_time_ms*/) {
    370  if (!IsInitialized()) {
    371    ReportError();
    372    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    373  }
    374  if (!decoded_image_callback_) {
    375    RTC_LOG(LS_WARNING)
    376        << "Configure() has been called, but a callback function "
    377           "has not been set with RegisterDecodeCompleteCallback()";
    378    ReportError();
    379    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    380  }
    381  if (!input_image.data() || !input_image.size()) {
    382    ReportError();
    383    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    384  }
    385 
    386  ScopedAVPacket packet = MakeScopedAVPacket();
    387  if (!packet) {
    388    ReportError();
    389    return WEBRTC_VIDEO_CODEC_ERROR;
    390  }
    391  // packet.data has a non-const type, but isn't modified by
    392  // avcodec_send_packet.
    393  packet->data = const_cast<uint8_t*>(input_image.data());
    394  if (input_image.size() >
    395      static_cast<size_t>(std::numeric_limits<int>::max())) {
    396    ReportError();
    397    return WEBRTC_VIDEO_CODEC_ERROR;
    398  }
    399  packet->size = static_cast<int>(input_image.size());
    400 
    401  int result = avcodec_send_packet(av_context_.get(), packet.get());
    402 
    403  if (result < 0) {
    404    RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
    405    ReportError();
    406    return WEBRTC_VIDEO_CODEC_ERROR;
    407  }
    408 
    409  result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
    410  if (result < 0) {
    411    RTC_LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
    412    ReportError();
    413    return WEBRTC_VIDEO_CODEC_ERROR;
    414  }
    415 
    416  // TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
    417  h264_bitstream_parser_.ParseBitstream(input_image);
    418  std::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
    419 
    420  // Obtain the `video_frame` containing the decoded image.
    421  VideoFrame* input_frame =
    422      static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
    423  RTC_DCHECK(input_frame);
    424  scoped_refptr<VideoFrameBuffer> frame_buffer =
    425      input_frame->video_frame_buffer();
    426 
    427  // Instantiate Planar YUV buffer according to video frame buffer type
    428  const PlanarYuvBuffer* planar_yuv_buffer = nullptr;
    429  const PlanarYuv8Buffer* planar_yuv8_buffer = nullptr;
    430  const PlanarYuv16BBuffer* planar_yuv16_buffer = nullptr;
    431  VideoFrameBuffer::Type video_frame_buffer_type = frame_buffer->type();
    432  switch (video_frame_buffer_type) {
    433    case VideoFrameBuffer::Type::kI420:
    434      planar_yuv_buffer = frame_buffer->GetI420();
    435      planar_yuv8_buffer =
    436          reinterpret_cast<const PlanarYuv8Buffer*>(planar_yuv_buffer);
    437      break;
    438    case VideoFrameBuffer::Type::kI444:
    439      planar_yuv_buffer = frame_buffer->GetI444();
    440      planar_yuv8_buffer =
    441          reinterpret_cast<const PlanarYuv8Buffer*>(planar_yuv_buffer);
    442      break;
    443    case VideoFrameBuffer::Type::kI422:
    444      planar_yuv_buffer = frame_buffer->GetI422();
    445      planar_yuv8_buffer =
    446          reinterpret_cast<const PlanarYuv8Buffer*>(planar_yuv_buffer);
    447      break;
    448    case VideoFrameBuffer::Type::kI010:
    449      planar_yuv_buffer = frame_buffer->GetI010();
    450      planar_yuv16_buffer =
    451          reinterpret_cast<const PlanarYuv16BBuffer*>(planar_yuv_buffer);
    452      break;
    453    case VideoFrameBuffer::Type::kI210:
    454      planar_yuv_buffer = frame_buffer->GetI210();
    455      planar_yuv16_buffer =
    456          reinterpret_cast<const PlanarYuv16BBuffer*>(planar_yuv_buffer);
    457      break;
    458    case VideoFrameBuffer::Type::kI410:
    459      planar_yuv_buffer = frame_buffer->GetI410();
    460      planar_yuv16_buffer =
    461          reinterpret_cast<const PlanarYuv16BBuffer*>(planar_yuv_buffer);
    462      break;
    463    default:
    464      // If this code is changed to allow other video frame buffer type,
    465      // make sure that the code below which wraps I420/I422/I444 buffer and
    466      // code which converts to NV12 is changed
    467      // to work with new video frame buffer type
    468 
    469      RTC_LOG(LS_ERROR) << "frame_buffer type: "
    470                        << static_cast<int32_t>(video_frame_buffer_type)
    471                        << " is not supported!";
    472      ReportError();
    473      return WEBRTC_VIDEO_CODEC_ERROR;
    474  }
    475 
    476  // When needed, FFmpeg applies cropping by moving plane pointers and adjusting
    477  // frame width/height. Ensure that cropped buffers lie within the allocated
    478  // memory.
    479  RTC_DCHECK_LE(av_frame_->width, planar_yuv_buffer->width());
    480  RTC_DCHECK_LE(av_frame_->height, planar_yuv_buffer->height());
    481  switch (video_frame_buffer_type) {
    482    case VideoFrameBuffer::Type::kI420:
    483    case VideoFrameBuffer::Type::kI444:
    484    case VideoFrameBuffer::Type::kI422: {
    485      RTC_DCHECK_GE(av_frame_->data[kYPlaneIndex], planar_yuv8_buffer->DataY());
    486      RTC_DCHECK_LE(
    487          av_frame_->data[kYPlaneIndex] +
    488              av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
    489          planar_yuv8_buffer->DataY() +
    490              planar_yuv8_buffer->StrideY() * planar_yuv8_buffer->height());
    491      RTC_DCHECK_GE(av_frame_->data[kUPlaneIndex], planar_yuv8_buffer->DataU());
    492      RTC_DCHECK_LE(
    493          av_frame_->data[kUPlaneIndex] +
    494              av_frame_->linesize[kUPlaneIndex] *
    495                  planar_yuv8_buffer->ChromaHeight(),
    496          planar_yuv8_buffer->DataU() + planar_yuv8_buffer->StrideU() *
    497                                            planar_yuv8_buffer->ChromaHeight());
    498      RTC_DCHECK_GE(av_frame_->data[kVPlaneIndex], planar_yuv8_buffer->DataV());
    499      RTC_DCHECK_LE(
    500          av_frame_->data[kVPlaneIndex] +
    501              av_frame_->linesize[kVPlaneIndex] *
    502                  planar_yuv8_buffer->ChromaHeight(),
    503          planar_yuv8_buffer->DataV() + planar_yuv8_buffer->StrideV() *
    504                                            planar_yuv8_buffer->ChromaHeight());
    505      break;
    506    }
    507    case VideoFrameBuffer::Type::kI010:
    508    case VideoFrameBuffer::Type::kI210:
    509    case VideoFrameBuffer::Type::kI410: {
    510      RTC_DCHECK_GE(
    511          av_frame_->data[kYPlaneIndex],
    512          reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()));
    513      RTC_DCHECK_LE(
    514          av_frame_->data[kYPlaneIndex] +
    515              av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
    516          reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()) +
    517              planar_yuv16_buffer->StrideY() * 2 *
    518                  planar_yuv16_buffer->height());
    519      RTC_DCHECK_GE(
    520          av_frame_->data[kUPlaneIndex],
    521          reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()));
    522      RTC_DCHECK_LE(
    523          av_frame_->data[kUPlaneIndex] +
    524              av_frame_->linesize[kUPlaneIndex] *
    525                  planar_yuv16_buffer->ChromaHeight(),
    526          reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()) +
    527              planar_yuv16_buffer->StrideU() * 2 *
    528                  planar_yuv16_buffer->ChromaHeight());
    529      RTC_DCHECK_GE(
    530          av_frame_->data[kVPlaneIndex],
    531          reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()));
    532      RTC_DCHECK_LE(
    533          av_frame_->data[kVPlaneIndex] +
    534              av_frame_->linesize[kVPlaneIndex] *
    535                  planar_yuv16_buffer->ChromaHeight(),
    536          reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()) +
    537              planar_yuv16_buffer->StrideV() * 2 *
    538                  planar_yuv16_buffer->ChromaHeight());
    539      break;
    540    }
    541    default:
    542      RTC_LOG(LS_ERROR) << "frame_buffer type: "
    543                        << static_cast<int32_t>(video_frame_buffer_type)
    544                        << " is not supported!";
    545      ReportError();
    546      return WEBRTC_VIDEO_CODEC_ERROR;
    547  }
    548 
    549  scoped_refptr<VideoFrameBuffer> cropped_buffer;
    550  switch (video_frame_buffer_type) {
    551    case VideoFrameBuffer::Type::kI420:
    552      cropped_buffer = WrapI420Buffer(
    553          av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
    554          av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
    555          av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
    556          av_frame_->linesize[kVPlaneIndex],
    557          // To keep reference alive.
    558          [frame_buffer] {});
    559      break;
    560    case VideoFrameBuffer::Type::kI444:
    561      cropped_buffer = WrapI444Buffer(
    562          av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
    563          av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
    564          av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
    565          av_frame_->linesize[kVPlaneIndex],
    566          // To keep reference alive.
    567          [frame_buffer] {});
    568      break;
    569    case VideoFrameBuffer::Type::kI422:
    570      cropped_buffer = WrapI422Buffer(
    571          av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
    572          av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
    573          av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
    574          av_frame_->linesize[kVPlaneIndex],
    575          // To keep reference alive.
    576          [frame_buffer] {});
    577      break;
    578    case VideoFrameBuffer::Type::kI010:
    579      cropped_buffer = WrapI010Buffer(
    580          av_frame_->width, av_frame_->height,
    581          reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
    582          av_frame_->linesize[kYPlaneIndex] / 2,
    583          reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
    584          av_frame_->linesize[kUPlaneIndex] / 2,
    585          reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
    586          av_frame_->linesize[kVPlaneIndex] / 2,
    587          // To keep reference alive.
    588          [frame_buffer] {});
    589      break;
    590    case VideoFrameBuffer::Type::kI210:
    591      cropped_buffer = WrapI210Buffer(
    592          av_frame_->width, av_frame_->height,
    593          reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
    594          av_frame_->linesize[kYPlaneIndex] / 2,
    595          reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
    596          av_frame_->linesize[kUPlaneIndex] / 2,
    597          reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
    598          av_frame_->linesize[kVPlaneIndex] / 2,
    599          // To keep reference alive.
    600          [frame_buffer] {});
    601      break;
    602    case VideoFrameBuffer::Type::kI410:
    603      cropped_buffer = WrapI410Buffer(
    604          av_frame_->width, av_frame_->height,
    605          reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
    606          av_frame_->linesize[kYPlaneIndex] / 2,
    607          reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
    608          av_frame_->linesize[kUPlaneIndex] / 2,
    609          reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
    610          av_frame_->linesize[kVPlaneIndex] / 2,
    611          // To keep reference alive.
    612          [frame_buffer] {});
    613      break;
    614    default:
    615      RTC_LOG(LS_ERROR) << "frame_buffer type: "
    616                        << static_cast<int32_t>(video_frame_buffer_type)
    617                        << " is not supported!";
    618      ReportError();
    619      return WEBRTC_VIDEO_CODEC_ERROR;
    620  }
    621 
    622  // Pass on color space from input frame if explicitly specified.
    623  const ColorSpace& color_space =
    624      input_image.ColorSpace() ? *input_image.ColorSpace()
    625                               : ExtractH264ColorSpace(av_context_.get());
    626 
    627  VideoFrame decoded_frame = VideoFrame::Builder()
    628                                 .set_video_frame_buffer(cropped_buffer)
    629                                 .set_rtp_timestamp(input_image.RtpTimestamp())
    630                                 .set_color_space(color_space)
    631                                 .build();
    632 
    633  // Return decoded frame.
    634  // TODO(nisse): Timestamp and rotation are all zero here. Change decoder
    635  // interface to pass a VideoFrameBuffer instead of a VideoFrame?
    636  decoded_image_callback_->Decoded(decoded_frame, std::nullopt, qp);
    637 
    638  // Stop referencing it, possibly freeing `input_frame`.
    639  av_frame_unref(av_frame_.get());
    640  input_frame = nullptr;
    641 
    642  return WEBRTC_VIDEO_CODEC_OK;
    643 }
    644 
    645 const char* H264DecoderImpl::ImplementationName() const {
    646  return "FFmpeg";
    647 }
    648 
    649 bool H264DecoderImpl::IsInitialized() const {
    650  return av_context_ != nullptr;
    651 }
    652 
    653 void H264DecoderImpl::ReportInit() {
    654  if (has_reported_init_)
    655    return;
    656  RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
    657                            kH264DecoderEventInit, kH264DecoderEventMax);
    658  has_reported_init_ = true;
    659 }
    660 
    661 void H264DecoderImpl::ReportError() {
    662  if (has_reported_error_)
    663    return;
    664  RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
    665                            kH264DecoderEventError, kH264DecoderEventMax);
    666  has_reported_error_ = true;
    667 }
    668 
    669 }  // namespace webrtc
    670 
    671 #endif  // WEBRTC_USE_H264