tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

decode.c (74668B)


      1 /*
      2 * generic decoding-related code
      3 *
      4 * This file is part of FFmpeg.
      5 *
      6 * FFmpeg is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * FFmpeg is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with FFmpeg; if not, write to the Free Software
     18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
     19 */
     20 
     21 #include <stdint.h>
     22 #include <stdbool.h>
     23 #include <string.h>
     24 
     25 #include "config.h"
     26 
     27 #if CONFIG_ICONV
     28 # include <iconv.h>
     29 #endif
     30 
     31 #include "libavutil/avassert.h"
     32 #include "libavutil/channel_layout.h"
     33 #include "libavutil/common.h"
     34 #include "libavutil/emms.h"
     35 #include "libavutil/frame.h"
     36 #include "libavutil/hwcontext.h"
     37 #include "libavutil/imgutils.h"
     38 #include "libavutil/internal.h"
     39 #include "libavutil/mastering_display_metadata.h"
     40 #include "libavutil/mem.h"
     41 #include "libavutil/stereo3d.h"
     42 
     43 #include "avcodec.h"
     44 #include "avcodec_internal.h"
     45 #include "bytestream.h"
     46 #include "bsf.h"
     47 #include "codec_desc.h"
     48 #include "codec_internal.h"
     49 #include "decode.h"
     50 #include "hwaccel_internal.h"
     51 #include "hwconfig.h"
     52 #include "internal.h"
     53 #include "lcevcdec.h"
     54 #include "packet_internal.h"
     55 #include "progressframe.h"
     56 #include "libavutil/refstruct.h"
     57 #include "thread.h"
     58 #include "threadprogress.h"
     59 
     60 typedef struct DecodeContext {
     61    AVCodecInternal avci;
     62 
     63    /**
     64     * This is set to AV_FRAME_FLAG_KEY for decoders of intra-only formats
     65     * (those whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set)
     66     * to set the flag generically.
     67     */
     68    int intra_only_flag;
     69 
     70    /**
     71     * This is set to AV_PICTURE_TYPE_I for intra only video decoders
     72     * and to AV_PICTURE_TYPE_NONE for other decoders. It is used to set
     73     * the AVFrame's pict_type before the decoder receives it.
     74     */
     75    enum AVPictureType initial_pict_type;
     76 
     77    /* to prevent infinite loop on errors when draining */
     78    int nb_draining_errors;
     79 
     80    /**
     81     * The caller has submitted a NULL packet on input.
     82     */
     83    int draining_started;
     84 
     85    int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
     86    int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
     87    int64_t pts_correction_last_pts;       /// PTS of the last frame
     88    int64_t pts_correction_last_dts;       /// DTS of the last frame
     89 
     90    /**
     91     * Bitmask indicating for which side data types we prefer user-supplied
     92     * (global or attached to packets) side data over bytestream.
     93     */
     94    uint64_t side_data_pref_mask;
     95 
     96    FFLCEVCContext *lcevc;
     97    int lcevc_frame;
     98    int width;
     99    int height;
    100 } DecodeContext;
    101 
    102 static DecodeContext *decode_ctx(AVCodecInternal *avci)
    103 {
    104    return (DecodeContext *)avci;
    105 }
    106 
    107 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
    108 {
    109    int ret;
    110    size_t size;
    111    const uint8_t *data;
    112    uint32_t flags;
    113    int64_t val;
    114 
    115    data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
    116    if (!data)
    117        return 0;
    118 
    119    if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
    120        av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
    121               "changes, but PARAM_CHANGE side data was sent to it.\n");
    122        ret = AVERROR(EINVAL);
    123        goto fail2;
    124    }
    125 
    126    if (size < 4)
    127        goto fail;
    128 
    129    flags = bytestream_get_le32(&data);
    130    size -= 4;
    131 
    132    if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
    133        if (size < 4)
    134            goto fail;
    135        val = bytestream_get_le32(&data);
    136        if (val <= 0 || val > INT_MAX) {
    137            av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
    138            ret = AVERROR_INVALIDDATA;
    139            goto fail2;
    140        }
    141        avctx->sample_rate = val;
    142        size -= 4;
    143    }
    144    if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
    145        if (size < 8)
    146            goto fail;
    147        avctx->width  = bytestream_get_le32(&data);
    148        avctx->height = bytestream_get_le32(&data);
    149        size -= 8;
    150        ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
    151        if (ret < 0)
    152            goto fail2;
    153    }
    154 
    155    return 0;
    156 fail:
    157    av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
    158    ret = AVERROR_INVALIDDATA;
    159 fail2:
    160    if (ret < 0) {
    161        av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
    162        if (avctx->err_recognition & AV_EF_EXPLODE)
    163            return ret;
    164    }
    165    return 0;
    166 }
    167 
    168 static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
    169 {
    170    int ret = 0;
    171 
    172    av_packet_unref(avci->last_pkt_props);
    173    if (pkt) {
    174        ret = av_packet_copy_props(avci->last_pkt_props, pkt);
    175 #if FF_API_FRAME_PKT
    176        if (!ret)
    177            avci->last_pkt_props->stream_index = pkt->size; // Needed for ff_decode_frame_props().
    178 #endif
    179    }
    180    return ret;
    181 }
    182 
    183 static int decode_bsfs_init(AVCodecContext *avctx)
    184 {
    185    AVCodecInternal *avci = avctx->internal;
    186    const FFCodec *const codec = ffcodec(avctx->codec);
    187    int ret;
    188 
    189    if (avci->bsf)
    190        return 0;
    191 
    192    ret = av_bsf_list_parse_str(codec->bsfs, &avci->bsf);
    193    if (ret < 0) {
    194        av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", codec->bsfs, av_err2str(ret));
    195        if (ret != AVERROR(ENOMEM))
    196            ret = AVERROR_BUG;
    197        goto fail;
    198    }
    199 
    200    /* We do not currently have an API for passing the input timebase into decoders,
    201     * but no filters used here should actually need it.
    202     * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
    203    avci->bsf->time_base_in = (AVRational){ 1, 90000 };
    204    ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
    205    if (ret < 0)
    206        goto fail;
    207 
    208    ret = av_bsf_init(avci->bsf);
    209    if (ret < 0)
    210        goto fail;
    211 
    212    return 0;
    213 fail:
    214    av_bsf_free(&avci->bsf);
    215    return ret;
    216 }
    217 
    218 #if !HAVE_THREADS
    219 #define ff_thread_get_packet(avctx, pkt) (AVERROR_BUG)
    220 #define ff_thread_receive_frame(avctx, frame) (AVERROR_BUG)
    221 #endif
    222 
    223 static int decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
    224 {
    225    AVCodecInternal *avci = avctx->internal;
    226    int ret;
    227 
    228    ret = av_bsf_receive_packet(avci->bsf, pkt);
    229    if (ret < 0)
    230        return ret;
    231 
    232    if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
    233        ret = extract_packet_props(avctx->internal, pkt);
    234        if (ret < 0)
    235            goto finish;
    236    }
    237 
    238    ret = apply_param_change(avctx, pkt);
    239    if (ret < 0)
    240        goto finish;
    241 
    242    return 0;
    243 finish:
    244    av_packet_unref(pkt);
    245    return ret;
    246 }
    247 
    248 int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
    249 {
    250    AVCodecInternal *avci = avctx->internal;
    251    DecodeContext     *dc = decode_ctx(avci);
    252 
    253    if (avci->draining)
    254        return AVERROR_EOF;
    255 
    256    /* If we are a worker thread, get the next packet from the threading
    257     * context. Otherwise we are the main (user-facing) context, so we get the
    258     * next packet from the input filterchain.
    259     */
    260    if (avctx->internal->is_frame_mt)
    261        return ff_thread_get_packet(avctx, pkt);
    262 
    263    while (1) {
    264        int ret = decode_get_packet(avctx, pkt);
    265        if (ret == AVERROR(EAGAIN) &&
    266            (!AVPACKET_IS_EMPTY(avci->buffer_pkt) || dc->draining_started)) {
    267            ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
    268            if (ret >= 0)
    269                continue;
    270 
    271            av_packet_unref(avci->buffer_pkt);
    272        }
    273 
    274        if (ret == AVERROR_EOF)
    275            avci->draining = 1;
    276        return ret;
    277    }
    278 }
    279 
    280 /**
    281 * Attempt to guess proper monotonic timestamps for decoded video frames
    282 * which might have incorrect times. Input timestamps may wrap around, in
    283 * which case the output will as well.
    284 *
    285 * @param pts the pts field of the decoded AVPacket, as passed through
    286 * AVFrame.pts
    287 * @param dts the dts field of the decoded AVPacket
    288 * @return one of the input values, may be AV_NOPTS_VALUE
    289 */
    290 static int64_t guess_correct_pts(DecodeContext *dc,
    291                                 int64_t reordered_pts, int64_t dts)
    292 {
    293    int64_t pts = AV_NOPTS_VALUE;
    294 
    295    if (dts != AV_NOPTS_VALUE) {
    296        dc->pts_correction_num_faulty_dts += dts <= dc->pts_correction_last_dts;
    297        dc->pts_correction_last_dts = dts;
    298    } else if (reordered_pts != AV_NOPTS_VALUE)
    299        dc->pts_correction_last_dts = reordered_pts;
    300 
    301    if (reordered_pts != AV_NOPTS_VALUE) {
    302        dc->pts_correction_num_faulty_pts += reordered_pts <= dc->pts_correction_last_pts;
    303        dc->pts_correction_last_pts = reordered_pts;
    304    } else if(dts != AV_NOPTS_VALUE)
    305        dc->pts_correction_last_pts = dts;
    306 
    307    if ((dc->pts_correction_num_faulty_pts<=dc->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
    308       && reordered_pts != AV_NOPTS_VALUE)
    309        pts = reordered_pts;
    310    else
    311        pts = dts;
    312 
    313    return pts;
    314 }
    315 
    316 static int discard_samples(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
    317 {
    318    AVCodecInternal *avci = avctx->internal;
    319    AVFrameSideData *side;
    320    uint32_t discard_padding = 0;
    321    uint8_t skip_reason = 0;
    322    uint8_t discard_reason = 0;
    323 
    324    side = av_frame_get_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES);
    325    if (side && side->size >= 10) {
    326        avci->skip_samples = AV_RL32(side->data);
    327        avci->skip_samples = FFMAX(0, avci->skip_samples);
    328        discard_padding = AV_RL32(side->data + 4);
    329        av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
    330               avci->skip_samples, (int)discard_padding);
    331        skip_reason = AV_RL8(side->data + 8);
    332        discard_reason = AV_RL8(side->data + 9);
    333    }
    334 
    335    if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
    336        if (!side && (avci->skip_samples || discard_padding))
    337            side = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
    338        if (side && (avci->skip_samples || discard_padding)) {
    339            AV_WL32(side->data, avci->skip_samples);
    340            AV_WL32(side->data + 4, discard_padding);
    341            AV_WL8(side->data + 8, skip_reason);
    342            AV_WL8(side->data + 9, discard_reason);
    343            avci->skip_samples = 0;
    344        }
    345        return 0;
    346    }
    347    av_frame_remove_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES);
    348 
    349    if ((frame->flags & AV_FRAME_FLAG_DISCARD)) {
    350        avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
    351        *discarded_samples += frame->nb_samples;
    352        return AVERROR(EAGAIN);
    353    }
    354 
    355    if (avci->skip_samples > 0) {
    356        if (frame->nb_samples <= avci->skip_samples){
    357            *discarded_samples += frame->nb_samples;
    358            avci->skip_samples -= frame->nb_samples;
    359            av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
    360                   avci->skip_samples);
    361            return AVERROR(EAGAIN);
    362        } else {
    363            av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
    364                            frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format);
    365            if (avctx->pkt_timebase.num && avctx->sample_rate) {
    366                int64_t diff_ts = av_rescale_q(avci->skip_samples,
    367                                               (AVRational){1, avctx->sample_rate},
    368                                               avctx->pkt_timebase);
    369                if (frame->pts != AV_NOPTS_VALUE)
    370                    frame->pts += diff_ts;
    371                if (frame->pkt_dts != AV_NOPTS_VALUE)
    372                    frame->pkt_dts += diff_ts;
    373                if (frame->duration >= diff_ts)
    374                    frame->duration -= diff_ts;
    375            } else
    376                av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
    377 
    378            av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
    379                   avci->skip_samples, frame->nb_samples);
    380            *discarded_samples += avci->skip_samples;
    381            frame->nb_samples -= avci->skip_samples;
    382            avci->skip_samples = 0;
    383        }
    384    }
    385 
    386    if (discard_padding > 0 && discard_padding <= frame->nb_samples) {
    387        if (discard_padding == frame->nb_samples) {
    388            *discarded_samples += frame->nb_samples;
    389            return AVERROR(EAGAIN);
    390        } else {
    391            if (avctx->pkt_timebase.num && avctx->sample_rate) {
    392                int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
    393                                               (AVRational){1, avctx->sample_rate},
    394                                               avctx->pkt_timebase);
    395                frame->duration = diff_ts;
    396            } else
    397                av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
    398 
    399            av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
    400                   (int)discard_padding, frame->nb_samples);
    401            frame->nb_samples -= discard_padding;
    402        }
    403    }
    404 
    405    return 0;
    406 }
    407 
    408 /*
    409 * The core of the receive_frame_wrapper for the decoders implementing
    410 * the simple API. Certain decoders might consume partial packets without
    411 * returning any output, so this function needs to be called in a loop until it
    412 * returns EAGAIN.
    413 **/
    414 static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
    415 {
    416    AVCodecInternal   *avci = avctx->internal;
    417    DecodeContext     *dc = decode_ctx(avci);
    418    AVPacket     *const pkt = avci->in_pkt;
    419    const FFCodec *const codec = ffcodec(avctx->codec);
    420    int got_frame, consumed;
    421    int ret;
    422 
    423    if (!pkt->data && !avci->draining) {
    424        av_packet_unref(pkt);
    425        ret = ff_decode_get_packet(avctx, pkt);
    426        if (ret < 0 && ret != AVERROR_EOF)
    427            return ret;
    428    }
    429 
    430    // Some codecs (at least wma lossless) will crash when feeding drain packets
    431    // after EOF was signaled.
    432    if (avci->draining_done)
    433        return AVERROR_EOF;
    434 
    435    if (!pkt->data &&
    436        !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
    437        return AVERROR_EOF;
    438 
    439    got_frame = 0;
    440 
    441    frame->pict_type = dc->initial_pict_type;
    442    frame->flags    |= dc->intra_only_flag;
    443    consumed = codec->cb.decode(avctx, frame, &got_frame, pkt);
    444 
    445    if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
    446        frame->pkt_dts = pkt->dts;
    447    if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
    448 #if FF_API_FRAME_PKT
    449 FF_DISABLE_DEPRECATION_WARNINGS
    450        if(!avctx->has_b_frames)
    451            frame->pkt_pos = pkt->pos;
    452 FF_ENABLE_DEPRECATION_WARNINGS
    453 #endif
    454    }
    455    emms_c();
    456 
    457    if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
    458        ret = (!got_frame || frame->flags & AV_FRAME_FLAG_DISCARD)
    459                          ? AVERROR(EAGAIN)
    460                          : 0;
    461    } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
    462        ret =  !got_frame ? AVERROR(EAGAIN)
    463                          : discard_samples(avctx, frame, discarded_samples);
    464    } else
    465        av_assert0(0);
    466 
    467    if (ret == AVERROR(EAGAIN))
    468        av_frame_unref(frame);
    469 
    470    // FF_CODEC_CB_TYPE_DECODE decoders must not return AVERROR EAGAIN
    471    // code later will add AVERROR(EAGAIN) to a pointer
    472    av_assert0(consumed != AVERROR(EAGAIN));
    473    if (consumed < 0)
    474        ret = consumed;
    475    if (consumed >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
    476        consumed = pkt->size;
    477 
    478    if (!ret)
    479        av_assert0(frame->buf[0]);
    480    if (ret == AVERROR(EAGAIN))
    481        ret = 0;
    482 
    483    /* do not stop draining when got_frame != 0 or ret < 0 */
    484    if (avci->draining && !got_frame) {
    485        if (ret < 0) {
    486            /* prevent infinite loop if a decoder wrongly always return error on draining */
    487            /* reasonable nb_errors_max = maximum b frames + thread count */
    488            int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
    489                                avctx->thread_count : 1);
    490 
    491            if (decode_ctx(avci)->nb_draining_errors++ >= nb_errors_max) {
    492                av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
    493                       "Stop draining and force EOF.\n");
    494                avci->draining_done = 1;
    495                ret = AVERROR_BUG;
    496            }
    497        } else {
    498            avci->draining_done = 1;
    499        }
    500    }
    501 
    502    if (consumed >= pkt->size || ret < 0) {
    503        av_packet_unref(pkt);
    504    } else {
    505        pkt->data                += consumed;
    506        pkt->size                -= consumed;
    507        pkt->pts                  = AV_NOPTS_VALUE;
    508        pkt->dts                  = AV_NOPTS_VALUE;
    509        if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
    510 #if FF_API_FRAME_PKT
    511            // See extract_packet_props() comment.
    512            avci->last_pkt_props->stream_index = avci->last_pkt_props->stream_index - consumed;
    513 #endif
    514            avci->last_pkt_props->pts = AV_NOPTS_VALUE;
    515            avci->last_pkt_props->dts = AV_NOPTS_VALUE;
    516        }
    517    }
    518 
    519    return ret;
    520 }
    521 
    522 #if CONFIG_LCMS2
    523 static int detect_colorspace(AVCodecContext *avctx, AVFrame *frame)
    524 {
    525    AVCodecInternal *avci = avctx->internal;
    526    enum AVColorTransferCharacteristic trc;
    527    AVColorPrimariesDesc coeffs;
    528    enum AVColorPrimaries prim;
    529    cmsHPROFILE profile;
    530    AVFrameSideData *sd;
    531    int ret;
    532    if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES))
    533        return 0;
    534 
    535    sd = av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE);
    536    if (!sd || !sd->size)
    537        return 0;
    538 
    539    if (!avci->icc.avctx) {
    540        ret = ff_icc_context_init(&avci->icc, avctx);
    541        if (ret < 0)
    542            return ret;
    543    }
    544 
    545    profile = cmsOpenProfileFromMemTHR(avci->icc.ctx, sd->data, sd->size);
    546    if (!profile)
    547        return AVERROR_INVALIDDATA;
    548 
    549    ret = ff_icc_profile_sanitize(&avci->icc, profile);
    550    if (!ret)
    551        ret = ff_icc_profile_read_primaries(&avci->icc, profile, &coeffs);
    552    if (!ret)
    553        ret = ff_icc_profile_detect_transfer(&avci->icc, profile, &trc);
    554    cmsCloseProfile(profile);
    555    if (ret < 0)
    556        return ret;
    557 
    558    prim = av_csp_primaries_id_from_desc(&coeffs);
    559    if (prim != AVCOL_PRI_UNSPECIFIED)
    560        frame->color_primaries = prim;
    561    if (trc != AVCOL_TRC_UNSPECIFIED)
    562        frame->color_trc = trc;
    563    return 0;
    564 }
    565 #else /* !CONFIG_LCMS2 */
    566 static int detect_colorspace(av_unused AVCodecContext *c, av_unused AVFrame *f)
    567 {
    568    return 0;
    569 }
    570 #endif
    571 
    572 static int fill_frame_props(const AVCodecContext *avctx, AVFrame *frame)
    573 {
    574    int ret;
    575 
    576    if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
    577        frame->color_primaries = avctx->color_primaries;
    578    if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
    579        frame->color_trc = avctx->color_trc;
    580    if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
    581        frame->colorspace = avctx->colorspace;
    582    if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
    583        frame->color_range = avctx->color_range;
    584    if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
    585        frame->chroma_location = avctx->chroma_sample_location;
    586 
    587    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
    588            if (!frame->sample_aspect_ratio.num)  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
    589            if (frame->format == AV_PIX_FMT_NONE) frame->format              = avctx->pix_fmt;
    590    } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
    591        if (frame->format == AV_SAMPLE_FMT_NONE)
    592            frame->format = avctx->sample_fmt;
    593        if (!frame->ch_layout.nb_channels) {
    594            ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
    595            if (ret < 0)
    596                return ret;
    597        }
    598        if (!frame->sample_rate)
    599            frame->sample_rate = avctx->sample_rate;
    600    }
    601 
    602    return 0;
    603 }
    604 
    605 static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
    606 {
    607    int ret;
    608    int64_t discarded_samples = 0;
    609 
    610    while (!frame->buf[0]) {
    611        if (discarded_samples > avctx->max_samples)
    612            return AVERROR(EAGAIN);
    613        ret = decode_simple_internal(avctx, frame, &discarded_samples);
    614        if (ret < 0)
    615            return ret;
    616    }
    617 
    618    return 0;
    619 }
    620 
    621 int ff_decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
    622 {
    623    AVCodecInternal *avci = avctx->internal;
    624    DecodeContext     *dc = decode_ctx(avci);
    625    const FFCodec *const codec = ffcodec(avctx->codec);
    626    int ret;
    627 
    628    av_assert0(!frame->buf[0]);
    629 
    630    if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
    631        while (1) {
    632            frame->pict_type = dc->initial_pict_type;
    633            frame->flags    |= dc->intra_only_flag;
    634            ret = codec->cb.receive_frame(avctx, frame);
    635            emms_c();
    636            if (!ret) {
    637                if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
    638                    int64_t discarded_samples = 0;
    639                    ret = discard_samples(avctx, frame, &discarded_samples);
    640                }
    641                if (ret == AVERROR(EAGAIN) || (frame->flags & AV_FRAME_FLAG_DISCARD)) {
    642                    av_frame_unref(frame);
    643                    continue;
    644                }
    645            }
    646            break;
    647        }
    648    } else
    649        ret = decode_simple_receive_frame(avctx, frame);
    650 
    651    if (ret == AVERROR_EOF)
    652        avci->draining_done = 1;
    653 
    654    return ret;
    655 }
    656 
    657 static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
    658 {
    659    AVCodecInternal *avci = avctx->internal;
    660    DecodeContext     *dc = decode_ctx(avci);
    661    int ret, ok;
    662 
    663    if (avctx->active_thread_type & FF_THREAD_FRAME)
    664        ret = ff_thread_receive_frame(avctx, frame);
    665    else
    666        ret = ff_decode_receive_frame_internal(avctx, frame);
    667 
    668    /* preserve ret */
    669    ok = detect_colorspace(avctx, frame);
    670    if (ok < 0) {
    671        av_frame_unref(frame);
    672        return ok;
    673    }
    674 
    675    if (!ret) {
    676        if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
    677            if (!frame->width)
    678                frame->width = avctx->width;
    679            if (!frame->height)
    680                frame->height = avctx->height;
    681        }
    682 
    683        ret = fill_frame_props(avctx, frame);
    684        if (ret < 0) {
    685            av_frame_unref(frame);
    686            return ret;
    687        }
    688 
    689 #if FF_API_FRAME_KEY
    690 FF_DISABLE_DEPRECATION_WARNINGS
    691        frame->key_frame = !!(frame->flags & AV_FRAME_FLAG_KEY);
    692 FF_ENABLE_DEPRECATION_WARNINGS
    693 #endif
    694 #if FF_API_INTERLACED_FRAME
    695 FF_DISABLE_DEPRECATION_WARNINGS
    696        frame->interlaced_frame = !!(frame->flags & AV_FRAME_FLAG_INTERLACED);
    697        frame->top_field_first =  !!(frame->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST);
    698 FF_ENABLE_DEPRECATION_WARNINGS
    699 #endif
    700        frame->best_effort_timestamp = guess_correct_pts(dc,
    701                                                         frame->pts,
    702                                                         frame->pkt_dts);
    703 
    704        /* the only case where decode data is not set should be decoders
    705         * that do not call ff_get_buffer() */
    706        av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
    707                   !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
    708 
    709        if (frame->private_ref) {
    710            FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
    711 
    712            if (fdd->post_process) {
    713                ret = fdd->post_process(avctx, frame);
    714                if (ret < 0) {
    715                    av_frame_unref(frame);
    716                    return ret;
    717                }
    718            }
    719        }
    720    }
    721 
    722    /* free the per-frame decode data */
    723    av_buffer_unref(&frame->private_ref);
    724 
    725    return ret;
    726 }
    727 
    728 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
    729 {
    730    AVCodecInternal *avci = avctx->internal;
    731    DecodeContext     *dc = decode_ctx(avci);
    732    int ret;
    733 
    734    if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
    735        return AVERROR(EINVAL);
    736 
    737    if (dc->draining_started)
    738        return AVERROR_EOF;
    739 
    740    if (avpkt && !avpkt->size && avpkt->data)
    741        return AVERROR(EINVAL);
    742 
    743    if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
    744        if (!AVPACKET_IS_EMPTY(avci->buffer_pkt))
    745            return AVERROR(EAGAIN);
    746        ret = av_packet_ref(avci->buffer_pkt, avpkt);
    747        if (ret < 0)
    748            return ret;
    749    } else
    750        dc->draining_started = 1;
    751 
    752    if (!avci->buffer_frame->buf[0] && !dc->draining_started) {
    753        ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
    754        if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
    755            return ret;
    756    }
    757 
    758    return 0;
    759 }
    760 
    761 static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
    762 {
    763    /* make sure we are noisy about decoders returning invalid cropping data */
    764    if (frame->crop_left >= INT_MAX - frame->crop_right        ||
    765        frame->crop_top  >= INT_MAX - frame->crop_bottom       ||
    766        (frame->crop_left + frame->crop_right) >= frame->width ||
    767        (frame->crop_top + frame->crop_bottom) >= frame->height) {
    768        av_log(avctx, AV_LOG_WARNING,
    769               "Invalid cropping information set by a decoder: "
    770               "%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER" "
    771               "(frame size %dx%d). This is a bug, please report it\n",
    772               frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
    773               frame->width, frame->height);
    774        frame->crop_left   = 0;
    775        frame->crop_right  = 0;
    776        frame->crop_top    = 0;
    777        frame->crop_bottom = 0;
    778        return 0;
    779    }
    780 
    781    if (!avctx->apply_cropping)
    782        return 0;
    783 
    784    return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
    785                                          AV_FRAME_CROP_UNALIGNED : 0);
    786 }
    787 
    788 // make sure frames returned to the caller are valid
    789 static int frame_validate(AVCodecContext *avctx, AVFrame *frame)
    790 {
    791    if (!frame->buf[0] || frame->format < 0)
    792        goto fail;
    793 
    794    switch (avctx->codec_type) {
    795    case AVMEDIA_TYPE_VIDEO:
    796        if (frame->width <= 0 || frame->height <= 0)
    797            goto fail;
    798        break;
    799    case AVMEDIA_TYPE_AUDIO:
    800        if (!av_channel_layout_check(&frame->ch_layout) ||
    801            frame->sample_rate <= 0)
    802            goto fail;
    803 
    804        break;
    805    default: av_assert0(0);
    806    }
    807 
    808    return 0;
    809 fail:
    810    av_log(avctx, AV_LOG_ERROR, "An invalid frame was output by a decoder. "
    811           "This is a bug, please report it.\n");
    812    return AVERROR_BUG;
    813 }
    814 
    815 int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
    816 {
    817    AVCodecInternal *avci = avctx->internal;
    818    int ret;
    819 
    820    if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
    821        return AVERROR(EINVAL);
    822 
    823    if (avci->buffer_frame->buf[0]) {
    824        av_frame_move_ref(frame, avci->buffer_frame);
    825    } else {
    826        ret = decode_receive_frame_internal(avctx, frame);
    827        if (ret < 0)
    828            return ret;
    829    }
    830 
    831    ret = frame_validate(avctx, frame);
    832    if (ret < 0)
    833        goto fail;
    834 
    835    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
    836        ret = apply_cropping(avctx, frame);
    837        if (ret < 0)
    838            goto fail;
    839    }
    840 
    841    avctx->frame_num++;
    842 
    843 #if FF_API_DROPCHANGED
    844    if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
    845 
    846        if (avctx->frame_num == 1) {
    847            avci->initial_format = frame->format;
    848            switch(avctx->codec_type) {
    849            case AVMEDIA_TYPE_VIDEO:
    850                avci->initial_width  = frame->width;
    851                avci->initial_height = frame->height;
    852                break;
    853            case AVMEDIA_TYPE_AUDIO:
    854                avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
    855                                                                 avctx->sample_rate;
    856                ret = av_channel_layout_copy(&avci->initial_ch_layout, &frame->ch_layout);
    857                if (ret < 0)
    858                    goto fail;
    859                break;
    860            }
    861        }
    862 
    863        if (avctx->frame_num > 1) {
    864            int changed = avci->initial_format != frame->format;
    865 
    866            switch(avctx->codec_type) {
    867            case AVMEDIA_TYPE_VIDEO:
    868                changed |= avci->initial_width  != frame->width ||
    869                           avci->initial_height != frame->height;
    870                break;
    871            case AVMEDIA_TYPE_AUDIO:
    872                changed |= avci->initial_sample_rate    != frame->sample_rate ||
    873                           avci->initial_sample_rate    != avctx->sample_rate ||
    874                           av_channel_layout_compare(&avci->initial_ch_layout, &frame->ch_layout);
    875                break;
    876            }
    877 
    878            if (changed) {
    879                avci->changed_frames_dropped++;
    880                av_log(avctx, AV_LOG_INFO, "dropped changed frame #%"PRId64" pts %"PRId64
    881                                            " drop count: %d \n",
    882                                            avctx->frame_num, frame->pts,
    883                                            avci->changed_frames_dropped);
    884                ret = AVERROR_INPUT_CHANGED;
    885                goto fail;
    886            }
    887        }
    888    }
    889 #endif
    890    return 0;
    891 fail:
    892    av_frame_unref(frame);
    893    return ret;
    894 }
    895 
    896 static void get_subtitle_defaults(AVSubtitle *sub)
    897 {
    898    memset(sub, 0, sizeof(*sub));
    899    sub->pts = AV_NOPTS_VALUE;
    900 }
    901 
    902 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
    903 static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt,
    904                           const AVPacket *inpkt, AVPacket *buf_pkt)
    905 {
    906 #if CONFIG_ICONV
    907    iconv_t cd = (iconv_t)-1;
    908    int ret = 0;
    909    char *inb, *outb;
    910    size_t inl, outl;
    911 #endif
    912 
    913    if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) {
    914        *outpkt = inpkt;
    915        return 0;
    916    }
    917 
    918 #if CONFIG_ICONV
    919    inb = inpkt->data;
    920    inl = inpkt->size;
    921 
    922    if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
    923        av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
    924        return AVERROR(ERANGE);
    925    }
    926 
    927    cd = iconv_open("UTF-8", avctx->sub_charenc);
    928    av_assert0(cd != (iconv_t)-1);
    929 
    930    ret = av_new_packet(buf_pkt, inl * UTF8_MAX_BYTES);
    931    if (ret < 0)
    932        goto end;
    933    ret = av_packet_copy_props(buf_pkt, inpkt);
    934    if (ret < 0)
    935        goto end;
    936    outb = buf_pkt->data;
    937    outl = buf_pkt->size;
    938 
    939    if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
    940        iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
    941        outl >= buf_pkt->size || inl != 0) {
    942        ret = FFMIN(AVERROR(errno), -1);
    943        av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
    944               "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
    945        goto end;
    946    }
    947    buf_pkt->size -= outl;
    948    memset(buf_pkt->data + buf_pkt->size, 0, outl);
    949    *outpkt = buf_pkt;
    950 
    951    ret = 0;
    952 end:
    953    if (ret < 0)
    954        av_packet_unref(buf_pkt);
    955    if (cd != (iconv_t)-1)
    956        iconv_close(cd);
    957    return ret;
    958 #else
    959    av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
    960    return AVERROR(EINVAL);
    961 #endif
    962 }
    963 
    964 static int utf8_check(const uint8_t *str)
    965 {
    966    const uint8_t *byte;
    967    uint32_t codepoint, min;
    968 
    969    while (*str) {
    970        byte = str;
    971        GET_UTF8(codepoint, *(byte++), return 0;);
    972        min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
    973              1 << (5 * (byte - str) - 4);
    974        if (codepoint < min || codepoint >= 0x110000 ||
    975            codepoint == 0xFFFE /* BOM */ ||
    976            codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
    977            return 0;
    978        str = byte;
    979    }
    980    return 1;
    981 }
    982 
    983 int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
    984                             int *got_sub_ptr, const AVPacket *avpkt)
    985 {
    986    int ret = 0;
    987 
    988    if (!avpkt->data && avpkt->size) {
    989        av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
    990        return AVERROR(EINVAL);
    991    }
    992    if (!avctx->codec)
    993        return AVERROR(EINVAL);
    994    if (ffcodec(avctx->codec)->cb_type != FF_CODEC_CB_TYPE_DECODE_SUB) {
    995        av_log(avctx, AV_LOG_ERROR, "Codec not subtitle decoder\n");
    996        return AVERROR(EINVAL);
    997    }
    998 
    999    *got_sub_ptr = 0;
   1000    get_subtitle_defaults(sub);
   1001 
   1002    if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
   1003        AVCodecInternal *avci = avctx->internal;
   1004        const AVPacket *pkt;
   1005 
   1006        ret = recode_subtitle(avctx, &pkt, avpkt, avci->buffer_pkt);
   1007        if (ret < 0)
   1008            return ret;
   1009 
   1010        if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
   1011            sub->pts = av_rescale_q(avpkt->pts,
   1012                                    avctx->pkt_timebase, AV_TIME_BASE_Q);
   1013        ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt);
   1014        if (pkt == avci->buffer_pkt) // did we recode?
   1015            av_packet_unref(avci->buffer_pkt);
   1016        if (ret < 0) {
   1017            *got_sub_ptr = 0;
   1018            avsubtitle_free(sub);
   1019            return ret;
   1020        }
   1021        av_assert1(!sub->num_rects || *got_sub_ptr);
   1022 
   1023        if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
   1024            avctx->pkt_timebase.num) {
   1025            AVRational ms = { 1, 1000 };
   1026            sub->end_display_time = av_rescale_q(avpkt->duration,
   1027                                                 avctx->pkt_timebase, ms);
   1028        }
   1029 
   1030        if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
   1031            sub->format = 0;
   1032        else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
   1033            sub->format = 1;
   1034 
   1035        for (unsigned i = 0; i < sub->num_rects; i++) {
   1036            if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE &&
   1037                sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
   1038                av_log(avctx, AV_LOG_ERROR,
   1039                       "Invalid UTF-8 in decoded subtitles text; "
   1040                       "maybe missing -sub_charenc option\n");
   1041                avsubtitle_free(sub);
   1042                *got_sub_ptr = 0;
   1043                return AVERROR_INVALIDDATA;
   1044            }
   1045        }
   1046 
   1047        if (*got_sub_ptr)
   1048            avctx->frame_num++;
   1049    }
   1050 
   1051    return ret;
   1052 }
   1053 
   1054 enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx,
   1055                                              const enum AVPixelFormat *fmt)
   1056 {
   1057    const AVPixFmtDescriptor *desc;
   1058    const AVCodecHWConfig *config;
   1059    int i, n;
   1060 
   1061    // If a device was supplied when the codec was opened, assume that the
   1062    // user wants to use it.
   1063    if (avctx->hw_device_ctx && ffcodec(avctx->codec)->hw_configs) {
   1064        AVHWDeviceContext *device_ctx =
   1065            (AVHWDeviceContext*)avctx->hw_device_ctx->data;
   1066        for (i = 0;; i++) {
   1067            config = &ffcodec(avctx->codec)->hw_configs[i]->public;
   1068            if (!config)
   1069                break;
   1070            if (!(config->methods &
   1071                  AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
   1072                continue;
   1073            if (device_ctx->type != config->device_type)
   1074                continue;
   1075            for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
   1076                if (config->pix_fmt == fmt[n])
   1077                    return fmt[n];
   1078            }
   1079        }
   1080    }
   1081    // No device or other setup, so we have to choose from things which
   1082    // don't any other external information.
   1083 
   1084    // If the last element of the list is a software format, choose it
   1085    // (this should be best software format if any exist).
   1086    for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
   1087    desc = av_pix_fmt_desc_get(fmt[n - 1]);
   1088    if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
   1089        return fmt[n - 1];
   1090 
   1091    // Finally, traverse the list in order and choose the first entry
   1092    // with no external dependencies (if there is no hardware configuration
   1093    // information available then this just picks the first entry).
   1094    for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
   1095        for (i = 0;; i++) {
   1096            config = avcodec_get_hw_config(avctx->codec, i);
   1097            if (!config)
   1098                break;
   1099            if (config->pix_fmt == fmt[n])
   1100                break;
   1101        }
   1102        if (!config) {
   1103            // No specific config available, so the decoder must be able
   1104            // to handle this format without any additional setup.
   1105            return fmt[n];
   1106        }
   1107        if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
   1108            // Usable with only internal setup.
   1109            return fmt[n];
   1110        }
   1111    }
   1112 
   1113    // Nothing is usable, give up.
   1114    return AV_PIX_FMT_NONE;
   1115 }
   1116 
   1117 int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
   1118                                enum AVHWDeviceType dev_type)
   1119 {
   1120    AVHWDeviceContext *device_ctx;
   1121    AVHWFramesContext *frames_ctx;
   1122    int ret;
   1123 
   1124    if (!avctx->hwaccel)
   1125        return AVERROR(ENOSYS);
   1126 
   1127    if (avctx->hw_frames_ctx)
   1128        return 0;
   1129    if (!avctx->hw_device_ctx) {
   1130        av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
   1131                "required for hardware accelerated decoding.\n");
   1132        return AVERROR(EINVAL);
   1133    }
   1134 
   1135    device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
   1136    if (device_ctx->type != dev_type) {
   1137        av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
   1138               "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
   1139               av_hwdevice_get_type_name(device_ctx->type));
   1140        return AVERROR(EINVAL);
   1141    }
   1142 
   1143    ret = avcodec_get_hw_frames_parameters(avctx,
   1144                                           avctx->hw_device_ctx,
   1145                                           avctx->hwaccel->pix_fmt,
   1146                                           &avctx->hw_frames_ctx);
   1147    if (ret < 0)
   1148        return ret;
   1149 
   1150    frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
   1151 
   1152 
   1153    if (frames_ctx->initial_pool_size) {
   1154        // We guarantee 4 base work surfaces. The function above guarantees 1
   1155        // (the absolute minimum), so add the missing count.
   1156        frames_ctx->initial_pool_size += 3;
   1157    }
   1158 
   1159    ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
   1160    if (ret < 0) {
   1161        av_buffer_unref(&avctx->hw_frames_ctx);
   1162        return ret;
   1163    }
   1164 
   1165    return 0;
   1166 }
   1167 
   1168 int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
   1169                                     AVBufferRef *device_ref,
   1170                                     enum AVPixelFormat hw_pix_fmt,
   1171                                     AVBufferRef **out_frames_ref)
   1172 {
   1173    AVBufferRef *frames_ref = NULL;
   1174    const AVCodecHWConfigInternal *hw_config;
   1175    const FFHWAccel *hwa;
   1176    int i, ret;
   1177    bool clean_priv_data = false;
   1178 
   1179    for (i = 0;; i++) {
   1180        hw_config = ffcodec(avctx->codec)->hw_configs[i];
   1181        if (!hw_config)
   1182            return AVERROR(ENOENT);
   1183        if (hw_config->public.pix_fmt == hw_pix_fmt)
   1184            break;
   1185    }
   1186 
   1187    hwa = hw_config->hwaccel;
   1188    if (!hwa || !hwa->frame_params)
   1189        return AVERROR(ENOENT);
   1190 
   1191    frames_ref = av_hwframe_ctx_alloc(device_ref);
   1192    if (!frames_ref)
   1193        return AVERROR(ENOMEM);
   1194 
   1195    if (!avctx->internal->hwaccel_priv_data) {
   1196        avctx->internal->hwaccel_priv_data =
   1197            av_mallocz(hwa->priv_data_size);
   1198        if (!avctx->internal->hwaccel_priv_data) {
   1199            av_buffer_unref(&frames_ref);
   1200            return AVERROR(ENOMEM);
   1201        }
   1202        clean_priv_data = true;
   1203    }
   1204 
   1205    ret = hwa->frame_params(avctx, frames_ref);
   1206    if (ret >= 0) {
   1207        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
   1208 
   1209        if (frames_ctx->initial_pool_size) {
   1210            // If the user has requested that extra output surfaces be
   1211            // available then add them here.
   1212            if (avctx->extra_hw_frames > 0)
   1213                frames_ctx->initial_pool_size += avctx->extra_hw_frames;
   1214 
   1215            // If frame threading is enabled then an extra surface per thread
   1216            // is also required.
   1217            if (avctx->active_thread_type & FF_THREAD_FRAME)
   1218                frames_ctx->initial_pool_size += avctx->thread_count;
   1219        }
   1220 
   1221        *out_frames_ref = frames_ref;
   1222    } else {
   1223        if (clean_priv_data)
   1224            av_freep(&avctx->internal->hwaccel_priv_data);
   1225        av_buffer_unref(&frames_ref);
   1226    }
   1227    return ret;
   1228 }
   1229 
   1230 static int hwaccel_init(AVCodecContext *avctx,
   1231                        const FFHWAccel *hwaccel)
   1232 {
   1233    int err;
   1234 
   1235    if (hwaccel->p.capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
   1236        avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
   1237        av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
   1238               hwaccel->p.name);
   1239        return AVERROR_PATCHWELCOME;
   1240    }
   1241 
   1242    if (!avctx->internal->hwaccel_priv_data && hwaccel->priv_data_size) {
   1243        avctx->internal->hwaccel_priv_data =
   1244            av_mallocz(hwaccel->priv_data_size);
   1245        if (!avctx->internal->hwaccel_priv_data)
   1246            return AVERROR(ENOMEM);
   1247    }
   1248 
   1249    avctx->hwaccel = &hwaccel->p;
   1250    if (hwaccel->init) {
   1251        err = hwaccel->init(avctx);
   1252        if (err < 0) {
   1253            av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
   1254                   "hwaccel initialisation returned error.\n",
   1255                   av_get_pix_fmt_name(hwaccel->p.pix_fmt));
   1256            av_freep(&avctx->internal->hwaccel_priv_data);
   1257            avctx->hwaccel = NULL;
   1258            return err;
   1259        }
   1260    }
   1261 
   1262    return 0;
   1263 }
   1264 
   1265 void ff_hwaccel_uninit(AVCodecContext *avctx)
   1266 {
   1267    if (FF_HW_HAS_CB(avctx, uninit))
   1268        FF_HW_SIMPLE_CALL(avctx, uninit);
   1269 
   1270    av_freep(&avctx->internal->hwaccel_priv_data);
   1271 
   1272    avctx->hwaccel = NULL;
   1273 
   1274    av_buffer_unref(&avctx->hw_frames_ctx);
   1275 }
   1276 
   1277 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
   1278 {
   1279    const AVPixFmtDescriptor *desc;
   1280    enum AVPixelFormat *choices;
   1281    enum AVPixelFormat ret, user_choice;
   1282    const AVCodecHWConfigInternal *hw_config;
   1283    const AVCodecHWConfig *config;
   1284    int i, n, err;
   1285 
   1286    // Find end of list.
   1287    for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
   1288    // Must contain at least one entry.
   1289    av_assert0(n >= 1);
   1290    // If a software format is available, it must be the last entry.
   1291    desc = av_pix_fmt_desc_get(fmt[n - 1]);
   1292    if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
   1293        // No software format is available.
   1294    } else {
   1295        avctx->sw_pix_fmt = fmt[n - 1];
   1296    }
   1297 
   1298    choices = av_memdup(fmt, (n + 1) * sizeof(*choices));
   1299    if (!choices)
   1300        return AV_PIX_FMT_NONE;
   1301 
   1302    for (;;) {
   1303        // Remove the previous hwaccel, if there was one.
   1304        ff_hwaccel_uninit(avctx);
   1305 
   1306        user_choice = avctx->get_format(avctx, choices);
   1307        if (user_choice == AV_PIX_FMT_NONE) {
   1308            // Explicitly chose nothing, give up.
   1309            ret = AV_PIX_FMT_NONE;
   1310            break;
   1311        }
   1312 
   1313        desc = av_pix_fmt_desc_get(user_choice);
   1314        if (!desc) {
   1315            av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
   1316                   "get_format() callback.\n");
   1317            ret = AV_PIX_FMT_NONE;
   1318            break;
   1319        }
   1320        av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
   1321               desc->name);
   1322 
   1323        for (i = 0; i < n; i++) {
   1324            if (choices[i] == user_choice)
   1325                break;
   1326        }
   1327        if (i == n) {
   1328            av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
   1329                   "%s not in possible list.\n", desc->name);
   1330            ret = AV_PIX_FMT_NONE;
   1331            break;
   1332        }
   1333 
   1334        if (ffcodec(avctx->codec)->hw_configs) {
   1335            for (i = 0;; i++) {
   1336                hw_config = ffcodec(avctx->codec)->hw_configs[i];
   1337                if (!hw_config)
   1338                    break;
   1339                if (hw_config->public.pix_fmt == user_choice)
   1340                    break;
   1341            }
   1342        } else {
   1343            hw_config = NULL;
   1344        }
   1345 
   1346        if (!hw_config) {
   1347            // No config available, so no extra setup required.
   1348            ret = user_choice;
   1349            break;
   1350        }
   1351        config = &hw_config->public;
   1352 
   1353        if (config->methods &
   1354            AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
   1355            avctx->hw_frames_ctx) {
   1356            const AVHWFramesContext *frames_ctx =
   1357                (AVHWFramesContext*)avctx->hw_frames_ctx->data;
   1358            if (frames_ctx->format != user_choice) {
   1359                av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
   1360                       "does not match the format of the provided frames "
   1361                       "context.\n", desc->name);
   1362                goto try_again;
   1363            }
   1364        } else if (config->methods &
   1365                   AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
   1366                   avctx->hw_device_ctx) {
   1367            const AVHWDeviceContext *device_ctx =
   1368                (AVHWDeviceContext*)avctx->hw_device_ctx->data;
   1369            if (device_ctx->type != config->device_type) {
   1370                av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
   1371                       "does not match the type of the provided device "
   1372                       "context.\n", desc->name);
   1373                goto try_again;
   1374            }
   1375        } else if (config->methods &
   1376                   AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
   1377            // Internal-only setup, no additional configuration.
   1378        } else if (config->methods &
   1379                   AV_CODEC_HW_CONFIG_METHOD_AD_HOC) {
   1380            // Some ad-hoc configuration we can't see and can't check.
   1381        } else {
   1382            av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
   1383                   "missing configuration.\n", desc->name);
   1384            goto try_again;
   1385        }
   1386        if (hw_config->hwaccel) {
   1387            av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel %s "
   1388                   "initialisation.\n", desc->name, hw_config->hwaccel->p.name);
   1389            err = hwaccel_init(avctx, hw_config->hwaccel);
   1390            if (err < 0)
   1391                goto try_again;
   1392        }
   1393        ret = user_choice;
   1394        break;
   1395 
   1396    try_again:
   1397        av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
   1398               "get_format() without it.\n", desc->name);
   1399        for (i = 0; i < n; i++) {
   1400            if (choices[i] == user_choice)
   1401                break;
   1402        }
   1403        for (; i + 1 < n; i++)
   1404            choices[i] = choices[i + 1];
   1405        --n;
   1406    }
   1407 
   1408    if (ret < 0)
   1409        ff_hwaccel_uninit(avctx);
   1410 
   1411    av_freep(&choices);
   1412    return ret;
   1413 }
   1414 
   1415 static const AVPacketSideData*
   1416 packet_side_data_get(const AVPacketSideData *sd, int nb_sd,
   1417                     enum AVPacketSideDataType type)
   1418 {
   1419    for (int i = 0; i < nb_sd; i++)
   1420        if (sd[i].type == type)
   1421            return &sd[i];
   1422 
   1423    return NULL;
   1424 }
   1425 
   1426 const AVPacketSideData *ff_get_coded_side_data(const AVCodecContext *avctx,
   1427                                               enum AVPacketSideDataType type)
   1428 {
   1429    return packet_side_data_get(avctx->coded_side_data, avctx->nb_coded_side_data, type);
   1430 }
   1431 
   1432 static int side_data_stereo3d_merge(AVFrameSideData *sd_frame,
   1433                                    const AVPacketSideData *sd_pkt)
   1434 {
   1435    const AVStereo3D *src;
   1436    AVStereo3D       *dst;
   1437    int ret;
   1438 
   1439    ret = av_buffer_make_writable(&sd_frame->buf);
   1440    if (ret < 0)
   1441        return ret;
   1442    sd_frame->data = sd_frame->buf->data;
   1443 
   1444    dst = (      AVStereo3D*)sd_frame->data;
   1445    src = (const AVStereo3D*)sd_pkt->data;
   1446 
   1447    if (dst->type == AV_STEREO3D_UNSPEC)
   1448        dst->type = src->type;
   1449 
   1450    if (dst->view == AV_STEREO3D_VIEW_UNSPEC)
   1451        dst->view = src->view;
   1452 
   1453    if (dst->primary_eye == AV_PRIMARY_EYE_NONE)
   1454        dst->primary_eye = src->primary_eye;
   1455 
   1456    if (!dst->baseline)
   1457        dst->baseline = src->baseline;
   1458 
   1459    if (!dst->horizontal_disparity_adjustment.num)
   1460        dst->horizontal_disparity_adjustment = src->horizontal_disparity_adjustment;
   1461 
   1462    if (!dst->horizontal_field_of_view.num)
   1463        dst->horizontal_field_of_view = src->horizontal_field_of_view;
   1464 
   1465    return 0;
   1466 }
   1467 
   1468 static int side_data_map(AVFrame *dst,
   1469                         const AVPacketSideData *sd_src, int nb_sd_src,
   1470                         const SideDataMap *map)
   1471 
   1472 {
   1473    for (int i = 0; map[i].packet < AV_PKT_DATA_NB; i++) {
   1474        const enum AVPacketSideDataType type_pkt   = map[i].packet;
   1475        const enum AVFrameSideDataType  type_frame = map[i].frame;
   1476        const AVPacketSideData *sd_pkt;
   1477        AVFrameSideData *sd_frame;
   1478 
   1479        sd_pkt = packet_side_data_get(sd_src, nb_sd_src, type_pkt);
   1480        if (!sd_pkt)
   1481            continue;
   1482 
   1483        sd_frame = av_frame_get_side_data(dst, type_frame);
   1484        if (sd_frame) {
   1485            if (type_frame == AV_FRAME_DATA_STEREO3D) {
   1486                int ret = side_data_stereo3d_merge(sd_frame, sd_pkt);
   1487                if (ret < 0)
   1488                    return ret;
   1489            }
   1490 
   1491            continue;
   1492        }
   1493 
   1494        sd_frame = av_frame_new_side_data(dst, type_frame, sd_pkt->size);
   1495        if (!sd_frame)
   1496            return AVERROR(ENOMEM);
   1497 
   1498        memcpy(sd_frame->data, sd_pkt->data, sd_pkt->size);
   1499    }
   1500 
   1501    return 0;
   1502 }
   1503 
   1504 static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
   1505 {
   1506    size_t size;
   1507    const uint8_t *side_metadata;
   1508 
   1509    AVDictionary **frame_md = &frame->metadata;
   1510 
   1511    side_metadata = av_packet_get_side_data(avpkt,
   1512                                            AV_PKT_DATA_STRINGS_METADATA, &size);
   1513    return av_packet_unpack_dictionary(side_metadata, size, frame_md);
   1514 }
   1515 
   1516 int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
   1517                                   AVFrame *frame, const AVPacket *pkt)
   1518 {
   1519    static const SideDataMap sd[] = {
   1520        { AV_PKT_DATA_A53_CC,                     AV_FRAME_DATA_A53_CC },
   1521        { AV_PKT_DATA_AFD,                        AV_FRAME_DATA_AFD },
   1522        { AV_PKT_DATA_DYNAMIC_HDR10_PLUS,         AV_FRAME_DATA_DYNAMIC_HDR_PLUS },
   1523        { AV_PKT_DATA_S12M_TIMECODE,              AV_FRAME_DATA_S12M_TIMECODE },
   1524        { AV_PKT_DATA_SKIP_SAMPLES,               AV_FRAME_DATA_SKIP_SAMPLES },
   1525        { AV_PKT_DATA_LCEVC,                      AV_FRAME_DATA_LCEVC },
   1526        { AV_PKT_DATA_NB }
   1527    };
   1528 
   1529    int ret = 0;
   1530 
   1531    frame->pts          = pkt->pts;
   1532    frame->duration     = pkt->duration;
   1533 #if FF_API_FRAME_PKT
   1534 FF_DISABLE_DEPRECATION_WARNINGS
   1535    frame->pkt_pos      = pkt->pos;
   1536    frame->pkt_size     = pkt->size;
   1537 FF_ENABLE_DEPRECATION_WARNINGS
   1538 #endif
   1539 
   1540    ret = side_data_map(frame, pkt->side_data, pkt->side_data_elems, ff_sd_global_map);
   1541    if (ret < 0)
   1542        return ret;
   1543 
   1544    ret = side_data_map(frame, pkt->side_data, pkt->side_data_elems, sd);
   1545    if (ret < 0)
   1546        return ret;
   1547 
   1548    add_metadata_from_side_data(pkt, frame);
   1549 
   1550    if (pkt->flags & AV_PKT_FLAG_DISCARD) {
   1551        frame->flags |= AV_FRAME_FLAG_DISCARD;
   1552    }
   1553 
   1554    if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
   1555        int ret = av_buffer_replace(&frame->opaque_ref, pkt->opaque_ref);
   1556        if (ret < 0)
   1557            return ret;
   1558        frame->opaque = pkt->opaque;
   1559    }
   1560 
   1561    return 0;
   1562 }
   1563 
   1564 int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
   1565 {
   1566    int ret;
   1567 
   1568    ret = side_data_map(frame, avctx->coded_side_data, avctx->nb_coded_side_data,
   1569                        ff_sd_global_map);
   1570    if (ret < 0)
   1571        return ret;
   1572 
   1573    if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
   1574        const AVPacket *pkt = avctx->internal->last_pkt_props;
   1575 
   1576        ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
   1577        if (ret < 0)
   1578            return ret;
   1579 #if FF_API_FRAME_PKT
   1580 FF_DISABLE_DEPRECATION_WARNINGS
   1581        frame->pkt_size = pkt->stream_index;
   1582 FF_ENABLE_DEPRECATION_WARNINGS
   1583 #endif
   1584    }
   1585 
   1586    ret = fill_frame_props(avctx, frame);
   1587    if (ret < 0)
   1588        return ret;
   1589 
   1590    switch (avctx->codec->type) {
   1591    case AVMEDIA_TYPE_VIDEO:
   1592        if (frame->width && frame->height &&
   1593            av_image_check_sar(frame->width, frame->height,
   1594                               frame->sample_aspect_ratio) < 0) {
   1595            av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
   1596                   frame->sample_aspect_ratio.num,
   1597                   frame->sample_aspect_ratio.den);
   1598            frame->sample_aspect_ratio = (AVRational){ 0, 1 };
   1599        }
   1600        break;
   1601    }
   1602    return 0;
   1603 }
   1604 
   1605 static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
   1606 {
   1607    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
   1608        int i;
   1609        int num_planes = av_pix_fmt_count_planes(frame->format);
   1610        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
   1611        int flags = desc ? desc->flags : 0;
   1612        if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
   1613            num_planes = 2;
   1614        for (i = 0; i < num_planes; i++) {
   1615            av_assert0(frame->data[i]);
   1616        }
   1617        // For formats without data like hwaccel allow unused pointers to be non-NULL.
   1618        for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
   1619            if (frame->data[i])
   1620                av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
   1621            frame->data[i] = NULL;
   1622        }
   1623    }
   1624 }
   1625 
   1626 static void decode_data_free(void *opaque, uint8_t *data)
   1627 {
   1628    FrameDecodeData *fdd = (FrameDecodeData*)data;
   1629 
   1630    if (fdd->post_process_opaque_free)
   1631        fdd->post_process_opaque_free(fdd->post_process_opaque);
   1632 
   1633    if (fdd->hwaccel_priv_free)
   1634        fdd->hwaccel_priv_free(fdd->hwaccel_priv);
   1635 
   1636    av_freep(&fdd);
   1637 }
   1638 
   1639 int ff_attach_decode_data(AVFrame *frame)
   1640 {
   1641    AVBufferRef *fdd_buf;
   1642    FrameDecodeData *fdd;
   1643 
   1644    av_assert1(!frame->private_ref);
   1645    av_buffer_unref(&frame->private_ref);
   1646 
   1647    fdd = av_mallocz(sizeof(*fdd));
   1648    if (!fdd)
   1649        return AVERROR(ENOMEM);
   1650 
   1651    fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
   1652                               NULL, AV_BUFFER_FLAG_READONLY);
   1653    if (!fdd_buf) {
   1654        av_freep(&fdd);
   1655        return AVERROR(ENOMEM);
   1656    }
   1657 
   1658    frame->private_ref = fdd_buf;
   1659 
   1660    return 0;
   1661 }
   1662 
   1663 static void update_frame_props(AVCodecContext *avctx, AVFrame *frame)
   1664 {
   1665    AVCodecInternal    *avci = avctx->internal;
   1666    DecodeContext        *dc = decode_ctx(avci);
   1667 
   1668    dc->lcevc_frame = dc->lcevc && avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
   1669                      av_frame_get_side_data(frame, AV_FRAME_DATA_LCEVC);
   1670 
   1671    if (dc->lcevc_frame) {
   1672        dc->width     = frame->width;
   1673        dc->height    = frame->height;
   1674        frame->width  = frame->width  * 2 / FFMAX(frame->sample_aspect_ratio.den, 1);
   1675        frame->height = frame->height * 2 / FFMAX(frame->sample_aspect_ratio.num, 1);
   1676    }
   1677 }
   1678 
   1679 static void attach_post_process_data(AVCodecContext *avctx, AVFrame *frame)
   1680 {
   1681    AVCodecInternal    *avci = avctx->internal;
   1682    DecodeContext        *dc = decode_ctx(avci);
   1683 
   1684    if (dc->lcevc_frame) {
   1685        FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
   1686 
   1687        fdd->post_process_opaque = av_refstruct_ref(dc->lcevc);
   1688        fdd->post_process_opaque_free = ff_lcevc_unref;
   1689        fdd->post_process = ff_lcevc_process;
   1690 
   1691        frame->width  = dc->width;
   1692        frame->height = dc->height;
   1693    }
   1694    dc->lcevc_frame = 0;
   1695 }
   1696 
   1697 int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
   1698 {
   1699    const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
   1700    int override_dimensions = 1;
   1701    int ret;
   1702 
   1703    av_assert0(av_codec_is_decoder(avctx->codec));
   1704 
   1705    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
   1706        if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
   1707            (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
   1708            av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
   1709            ret = AVERROR(EINVAL);
   1710            goto fail;
   1711        }
   1712 
   1713        if (frame->width <= 0 || frame->height <= 0) {
   1714            frame->width  = FFMAX(avctx->width,  AV_CEIL_RSHIFT(avctx->coded_width,  avctx->lowres));
   1715            frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
   1716            override_dimensions = 0;
   1717        }
   1718 
   1719        if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
   1720            av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
   1721            ret = AVERROR(EINVAL);
   1722            goto fail;
   1723        }
   1724    } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
   1725        if (frame->nb_samples * (int64_t)avctx->ch_layout.nb_channels > avctx->max_samples) {
   1726            av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
   1727            ret = AVERROR(EINVAL);
   1728            goto fail;
   1729        }
   1730    }
   1731    ret = ff_decode_frame_props(avctx, frame);
   1732    if (ret < 0)
   1733        goto fail;
   1734 
   1735    if (hwaccel) {
   1736        if (hwaccel->alloc_frame) {
   1737            ret = hwaccel->alloc_frame(avctx, frame);
   1738            goto end;
   1739        }
   1740    } else {
   1741        avctx->sw_pix_fmt = avctx->pix_fmt;
   1742        update_frame_props(avctx, frame);
   1743    }
   1744 
   1745    ret = avctx->get_buffer2(avctx, frame, flags);
   1746    if (ret < 0)
   1747        goto fail;
   1748 
   1749    validate_avframe_allocation(avctx, frame);
   1750 
   1751    ret = ff_attach_decode_data(frame);
   1752    if (ret < 0)
   1753        goto fail;
   1754 
   1755    attach_post_process_data(avctx, frame);
   1756 
   1757 end:
   1758    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
   1759        !(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
   1760        frame->width  = avctx->width;
   1761        frame->height = avctx->height;
   1762    }
   1763 
   1764 fail:
   1765    if (ret < 0) {
   1766        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
   1767        av_frame_unref(frame);
   1768    }
   1769 
   1770    return ret;
   1771 }
   1772 
   1773 static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
   1774 {
   1775    AVFrame *tmp;
   1776    int ret;
   1777 
   1778    av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
   1779 
   1780    // make sure the discard flag does not persist
   1781    frame->flags &= ~AV_FRAME_FLAG_DISCARD;
   1782 
   1783    if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
   1784        av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
   1785               frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
   1786        av_frame_unref(frame);
   1787    }
   1788 
   1789    if (!frame->data[0])
   1790        return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
   1791 
   1792    av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
   1793 
   1794    if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
   1795        return ff_decode_frame_props(avctx, frame);
   1796 
   1797    tmp = av_frame_alloc();
   1798    if (!tmp)
   1799        return AVERROR(ENOMEM);
   1800 
   1801    av_frame_move_ref(tmp, frame);
   1802 
   1803    ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
   1804    if (ret < 0) {
   1805        av_frame_free(&tmp);
   1806        return ret;
   1807    }
   1808 
   1809    av_frame_copy(frame, tmp);
   1810    av_frame_free(&tmp);
   1811 
   1812    return 0;
   1813 }
   1814 
   1815 int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
   1816 {
   1817    int ret = reget_buffer_internal(avctx, frame, flags);
   1818    if (ret < 0)
   1819        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
   1820    return ret;
   1821 }
   1822 
   1823 typedef struct ProgressInternal {
   1824    ThreadProgress progress;
   1825    struct AVFrame *f;
   1826 } ProgressInternal;
   1827 
   1828 static void check_progress_consistency(const ProgressFrame *f)
   1829 {
   1830    av_assert1(!!f->f == !!f->progress);
   1831    av_assert1(!f->progress || f->progress->f == f->f);
   1832 }
   1833 
   1834 int ff_progress_frame_alloc(AVCodecContext *avctx, ProgressFrame *f)
   1835 {
   1836    AVRefStructPool *pool = avctx->internal->progress_frame_pool;
   1837 
   1838    av_assert1(!f->f && !f->progress);
   1839 
   1840    f->progress = av_refstruct_pool_get(pool);
   1841    if (!f->progress)
   1842        return AVERROR(ENOMEM);
   1843 
   1844    f->f = f->progress->f;
   1845    return 0;
   1846 }
   1847 
   1848 int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
   1849 {
   1850    int ret;
   1851 
   1852    check_progress_consistency(f);
   1853    if (!f->f) {
   1854        ret = ff_progress_frame_alloc(avctx, f);
   1855        if (ret < 0)
   1856            return ret;
   1857    }
   1858 
   1859    ret = ff_thread_get_buffer(avctx, f->progress->f, flags);
   1860    if (ret < 0) {
   1861        f->f = NULL;
   1862        av_refstruct_unref(&f->progress);
   1863        return ret;
   1864    }
   1865    return 0;
   1866 }
   1867 
   1868 void ff_progress_frame_ref(ProgressFrame *dst, const ProgressFrame *src)
   1869 {
   1870    av_assert1(src->progress && src->f && src->f == src->progress->f);
   1871    av_assert1(!dst->f && !dst->progress);
   1872    dst->f = src->f;
   1873    dst->progress = av_refstruct_ref(src->progress);
   1874 }
   1875 
   1876 void ff_progress_frame_unref(ProgressFrame *f)
   1877 {
   1878    check_progress_consistency(f);
   1879    f->f = NULL;
   1880    av_refstruct_unref(&f->progress);
   1881 }
   1882 
   1883 void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
   1884 {
   1885    if (dst == src)
   1886        return;
   1887    ff_progress_frame_unref(dst);
   1888    check_progress_consistency(src);
   1889    if (src->f)
   1890        ff_progress_frame_ref(dst, src);
   1891 }
   1892 
   1893 void ff_progress_frame_report(ProgressFrame *f, int n)
   1894 {
   1895    ff_thread_progress_report(&f->progress->progress, n);
   1896 }
   1897 
   1898 void ff_progress_frame_await(const ProgressFrame *f, int n)
   1899 {
   1900    ff_thread_progress_await(&f->progress->progress, n);
   1901 }
   1902 
   1903 #if !HAVE_THREADS
   1904 enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
   1905 {
   1906    return FF_THREAD_NO_FRAME_THREADING;
   1907 }
   1908 #endif /* !HAVE_THREADS */
   1909 
   1910 static av_cold int progress_frame_pool_init_cb(AVRefStructOpaque opaque, void *obj)
   1911 {
   1912    const AVCodecContext *avctx = opaque.nc;
   1913    ProgressInternal *progress = obj;
   1914    int ret;
   1915 
   1916    ret = ff_thread_progress_init(&progress->progress, avctx->active_thread_type & FF_THREAD_FRAME);
   1917    if (ret < 0)
   1918        return ret;
   1919 
   1920    progress->f = av_frame_alloc();
   1921    if (!progress->f)
   1922        return AVERROR(ENOMEM);
   1923 
   1924    return 0;
   1925 }
   1926 
   1927 static void progress_frame_pool_reset_cb(AVRefStructOpaque unused, void *obj)
   1928 {
   1929    ProgressInternal *progress = obj;
   1930 
   1931    ff_thread_progress_reset(&progress->progress);
   1932    av_frame_unref(progress->f);
   1933 }
   1934 
   1935 static av_cold void progress_frame_pool_free_entry_cb(AVRefStructOpaque opaque, void *obj)
   1936 {
   1937    ProgressInternal *progress = obj;
   1938 
   1939    ff_thread_progress_destroy(&progress->progress);
   1940    av_frame_free(&progress->f);
   1941 }
   1942 
   1943 int ff_decode_preinit(AVCodecContext *avctx)
   1944 {
   1945    AVCodecInternal *avci = avctx->internal;
   1946    DecodeContext     *dc = decode_ctx(avci);
   1947    int ret = 0;
   1948 
   1949    dc->initial_pict_type = AV_PICTURE_TYPE_NONE;
   1950    if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY) {
   1951        dc->intra_only_flag = AV_FRAME_FLAG_KEY;
   1952        if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
   1953            dc->initial_pict_type = AV_PICTURE_TYPE_I;
   1954    }
   1955 
   1956    /* if the decoder init function was already called previously,
   1957     * free the already allocated subtitle_header before overwriting it */
   1958    av_freep(&avctx->subtitle_header);
   1959 
   1960    if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
   1961        av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
   1962               avctx->codec->max_lowres);
   1963        avctx->lowres = avctx->codec->max_lowres;
   1964    }
   1965    if (avctx->sub_charenc) {
   1966        if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
   1967            av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
   1968                   "supported with subtitles codecs\n");
   1969            return AVERROR(EINVAL);
   1970        } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
   1971            av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
   1972                   "subtitles character encoding will be ignored\n",
   1973                   avctx->codec_descriptor->name);
   1974            avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING;
   1975        } else {
   1976            /* input character encoding is set for a text based subtitle
   1977             * codec at this point */
   1978            if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC)
   1979                avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER;
   1980 
   1981            if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) {
   1982 #if CONFIG_ICONV
   1983                iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
   1984                if (cd == (iconv_t)-1) {
   1985                    ret = AVERROR(errno);
   1986                    av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
   1987                           "with input character encoding \"%s\"\n", avctx->sub_charenc);
   1988                    return ret;
   1989                }
   1990                iconv_close(cd);
   1991 #else
   1992                av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
   1993                       "conversion needs a libavcodec built with iconv support "
   1994                       "for this codec\n");
   1995                return AVERROR(ENOSYS);
   1996 #endif
   1997            }
   1998        }
   1999    }
   2000 
   2001    dc->pts_correction_num_faulty_pts =
   2002    dc->pts_correction_num_faulty_dts = 0;
   2003    dc->pts_correction_last_pts =
   2004    dc->pts_correction_last_dts = INT64_MIN;
   2005 
   2006    if (   !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
   2007        && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO)
   2008        av_log(avctx, AV_LOG_WARNING,
   2009               "gray decoding requested but not enabled at configuration time\n");
   2010    if (avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) {
   2011        avctx->export_side_data |= AV_CODEC_EXPORT_DATA_MVS;
   2012    }
   2013 
   2014    if (avctx->nb_side_data_prefer_packet == 1 &&
   2015        avctx->side_data_prefer_packet[0] == -1)
   2016        dc->side_data_pref_mask = ~0ULL;
   2017    else {
   2018        for (unsigned i = 0; i < avctx->nb_side_data_prefer_packet; i++) {
   2019            int val = avctx->side_data_prefer_packet[i];
   2020 
   2021            if (val < 0 || val >= AV_PKT_DATA_NB) {
   2022                av_log(avctx, AV_LOG_ERROR, "Invalid side data type: %d\n", val);
   2023                return AVERROR(EINVAL);
   2024            }
   2025 
   2026            for (unsigned j = 0; ff_sd_global_map[j].packet < AV_PKT_DATA_NB; j++) {
   2027                if (ff_sd_global_map[j].packet == val) {
   2028                    val = ff_sd_global_map[j].frame;
   2029 
   2030                    // this code will need to be changed when we have more than
   2031                    // 64 frame side data types
   2032                    if (val >= 64) {
   2033                        av_log(avctx, AV_LOG_ERROR, "Side data type too big\n");
   2034                        return AVERROR_BUG;
   2035                    }
   2036 
   2037                    dc->side_data_pref_mask |= 1ULL << val;
   2038                }
   2039            }
   2040        }
   2041    }
   2042 
   2043    avci->in_pkt         = av_packet_alloc();
   2044    avci->last_pkt_props = av_packet_alloc();
   2045    if (!avci->in_pkt || !avci->last_pkt_props)
   2046        return AVERROR(ENOMEM);
   2047 
   2048    if (ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_USES_PROGRESSFRAMES) {
   2049        avci->progress_frame_pool =
   2050            av_refstruct_pool_alloc_ext(sizeof(ProgressInternal),
   2051                                        AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR,
   2052                                        avctx, progress_frame_pool_init_cb,
   2053                                        progress_frame_pool_reset_cb,
   2054                                        progress_frame_pool_free_entry_cb, NULL);
   2055        if (!avci->progress_frame_pool)
   2056            return AVERROR(ENOMEM);
   2057    }
   2058    ret = decode_bsfs_init(avctx);
   2059    if (ret < 0)
   2060        return ret;
   2061 
   2062    if (!(avctx->export_side_data & AV_CODEC_EXPORT_DATA_ENHANCEMENTS)) {
   2063        ret = ff_lcevc_alloc(&dc->lcevc);
   2064        if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
   2065            return ret;
   2066    }
   2067 
   2068 #if FF_API_DROPCHANGED
   2069    if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED)
   2070        av_log(avctx, AV_LOG_WARNING, "The dropchanged flag is deprecated.\n");
   2071 #endif
   2072 
   2073    return 0;
   2074 }
   2075 
   2076 /**
   2077 * Check side data preference and clear existing side data from frame
   2078 * if needed.
   2079 *
   2080 * @retval 0 side data of this type can be added to frame
   2081 * @retval 1 side data of this type should not be added to frame
   2082 */
   2083 static int side_data_pref(const AVCodecContext *avctx, AVFrameSideData ***sd,
   2084                          int *nb_sd, enum AVFrameSideDataType type)
   2085 {
   2086    DecodeContext *dc = decode_ctx(avctx->internal);
   2087 
   2088    // Note: could be skipped for `type` without corresponding packet sd
   2089    if (av_frame_side_data_get(*sd, *nb_sd, type)) {
   2090        if (dc->side_data_pref_mask & (1ULL << type))
   2091            return 1;
   2092        av_frame_side_data_remove(sd, nb_sd, type);
   2093    }
   2094 
   2095    return 0;
   2096 }
   2097 
   2098 
   2099 int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame,
   2100                           enum AVFrameSideDataType type, size_t size,
   2101                           AVFrameSideData **psd)
   2102 {
   2103    AVFrameSideData *sd;
   2104 
   2105    if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data, type)) {
   2106        if (psd)
   2107            *psd = NULL;
   2108        return 0;
   2109    }
   2110 
   2111    sd = av_frame_new_side_data(frame, type, size);
   2112    if (psd)
   2113        *psd = sd;
   2114 
   2115    return sd ? 0 : AVERROR(ENOMEM);
   2116 }
   2117 
   2118 int ff_frame_new_side_data_from_buf_ext(const AVCodecContext *avctx,
   2119                                        AVFrameSideData ***sd, int *nb_sd,
   2120                                        enum AVFrameSideDataType type,
   2121                                        AVBufferRef **buf)
   2122 {
   2123    int ret = 0;
   2124 
   2125    if (side_data_pref(avctx, sd, nb_sd, type))
   2126        goto finish;
   2127 
   2128    if (!av_frame_side_data_add(sd, nb_sd, type, buf, 0))
   2129        ret = AVERROR(ENOMEM);
   2130 
   2131 finish:
   2132    av_buffer_unref(buf);
   2133 
   2134    return ret;
   2135 }
   2136 
   2137 int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx,
   2138                                    AVFrame *frame, enum AVFrameSideDataType type,
   2139                                    AVBufferRef **buf)
   2140 {
   2141    return ff_frame_new_side_data_from_buf_ext(avctx,
   2142                                               &frame->side_data, &frame->nb_side_data,
   2143                                               type, buf);
   2144 }
   2145 
   2146 int ff_decode_mastering_display_new_ext(const AVCodecContext *avctx,
   2147                                        AVFrameSideData ***sd, int *nb_sd,
   2148                                        struct AVMasteringDisplayMetadata **mdm)
   2149 {
   2150    AVBufferRef *buf;
   2151    size_t size;
   2152 
   2153    if (side_data_pref(avctx, sd, nb_sd, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA)) {
   2154        *mdm = NULL;
   2155        return 0;
   2156    }
   2157 
   2158    *mdm = av_mastering_display_metadata_alloc_size(&size);
   2159    if (!*mdm)
   2160        return AVERROR(ENOMEM);
   2161 
   2162    buf = av_buffer_create((uint8_t *)*mdm, size, NULL, NULL, 0);
   2163    if (!buf) {
   2164        av_freep(mdm);
   2165        return AVERROR(ENOMEM);
   2166    }
   2167 
   2168    if (!av_frame_side_data_add(sd, nb_sd, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
   2169                                &buf, 0)) {
   2170        *mdm = NULL;
   2171        av_buffer_unref(&buf);
   2172        return AVERROR(ENOMEM);
   2173    }
   2174 
   2175    return 0;
   2176 }
   2177 
   2178 int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame,
   2179                                    AVMasteringDisplayMetadata **mdm)
   2180 {
   2181    if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data,
   2182                       AV_FRAME_DATA_MASTERING_DISPLAY_METADATA)) {
   2183        *mdm = NULL;
   2184        return 0;
   2185    }
   2186 
   2187    *mdm = av_mastering_display_metadata_create_side_data(frame);
   2188    return *mdm ? 0 : AVERROR(ENOMEM);
   2189 }
   2190 
   2191 int ff_decode_content_light_new_ext(const AVCodecContext *avctx,
   2192                                    AVFrameSideData ***sd, int *nb_sd,
   2193                                    AVContentLightMetadata **clm)
   2194 {
   2195    AVBufferRef *buf;
   2196    size_t size;
   2197 
   2198    if (side_data_pref(avctx, sd, nb_sd, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL)) {
   2199        *clm = NULL;
   2200        return 0;
   2201    }
   2202 
   2203    *clm = av_content_light_metadata_alloc(&size);
   2204    if (!*clm)
   2205        return AVERROR(ENOMEM);
   2206 
   2207    buf = av_buffer_create((uint8_t *)*clm, size, NULL, NULL, 0);
   2208    if (!buf) {
   2209        av_freep(clm);
   2210        return AVERROR(ENOMEM);
   2211    }
   2212 
   2213    if (!av_frame_side_data_add(sd, nb_sd, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
   2214                                &buf, 0)) {
   2215        *clm = NULL;
   2216        av_buffer_unref(&buf);
   2217        return AVERROR(ENOMEM);
   2218    }
   2219 
   2220    return 0;
   2221 }
   2222 
   2223 int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame,
   2224                                AVContentLightMetadata **clm)
   2225 {
   2226    if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data,
   2227                       AV_FRAME_DATA_CONTENT_LIGHT_LEVEL)) {
   2228        *clm = NULL;
   2229        return 0;
   2230    }
   2231 
   2232    *clm = av_content_light_metadata_create_side_data(frame);
   2233    return *clm ? 0 : AVERROR(ENOMEM);
   2234 }
   2235 
   2236 int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
   2237 {
   2238    size_t size;
   2239    const void *pal = av_packet_get_side_data(src, AV_PKT_DATA_PALETTE, &size);
   2240 
   2241    if (pal && size == AVPALETTE_SIZE) {
   2242        memcpy(dst, pal, AVPALETTE_SIZE);
   2243        return 1;
   2244    } else if (pal) {
   2245        av_log(logctx, AV_LOG_ERROR,
   2246               "Palette size %"SIZE_SPECIFIER" is wrong\n", size);
   2247    }
   2248    return 0;
   2249 }
   2250 
   2251 int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
   2252 {
   2253    const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
   2254 
   2255    if (!hwaccel || !hwaccel->frame_priv_data_size)
   2256        return 0;
   2257 
   2258    av_assert0(!*hwaccel_picture_private);
   2259 
   2260    if (hwaccel->free_frame_priv) {
   2261        AVHWFramesContext *frames_ctx;
   2262 
   2263        if (!avctx->hw_frames_ctx)
   2264            return AVERROR(EINVAL);
   2265 
   2266        frames_ctx = (AVHWFramesContext *) avctx->hw_frames_ctx->data;
   2267        *hwaccel_picture_private = av_refstruct_alloc_ext(hwaccel->frame_priv_data_size, 0,
   2268                                                          frames_ctx->device_ctx,
   2269                                                          hwaccel->free_frame_priv);
   2270    } else {
   2271        *hwaccel_picture_private = av_refstruct_allocz(hwaccel->frame_priv_data_size);
   2272    }
   2273 
   2274    if (!*hwaccel_picture_private)
   2275        return AVERROR(ENOMEM);
   2276 
   2277    return 0;
   2278 }
   2279 
   2280 void ff_decode_flush_buffers(AVCodecContext *avctx)
   2281 {
   2282    AVCodecInternal *avci = avctx->internal;
   2283    DecodeContext     *dc = decode_ctx(avci);
   2284 
   2285    av_packet_unref(avci->last_pkt_props);
   2286    av_packet_unref(avci->in_pkt);
   2287 
   2288    dc->pts_correction_last_pts =
   2289    dc->pts_correction_last_dts = INT64_MIN;
   2290 
   2291    if (avci->bsf)
   2292        av_bsf_flush(avci->bsf);
   2293 
   2294    dc->nb_draining_errors = 0;
   2295    dc->draining_started   = 0;
   2296 }
   2297 
   2298 AVCodecInternal *ff_decode_internal_alloc(void)
   2299 {
   2300    return av_mallocz(sizeof(DecodeContext));
   2301 }
   2302 
   2303 void ff_decode_internal_sync(AVCodecContext *dst, const AVCodecContext *src)
   2304 {
   2305    const DecodeContext *src_dc = decode_ctx(src->internal);
   2306    DecodeContext *dst_dc = decode_ctx(dst->internal);
   2307 
   2308    av_refstruct_replace(&dst_dc->lcevc, src_dc->lcevc);
   2309 }
   2310 
   2311 void ff_decode_internal_uninit(AVCodecContext *avctx)
   2312 {
   2313    AVCodecInternal *avci = avctx->internal;
   2314    DecodeContext *dc = decode_ctx(avci);
   2315 
   2316    av_refstruct_unref(&dc->lcevc);
   2317 }