pass2_strategy.c (180457B)
1 /* 2 * Copyright (c) 2019, Alliance for Open Media. All rights reserved. 3 * 4 * This source code is subject to the terms of the BSD 2 Clause License and 5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License 6 * was not distributed with this source code in the LICENSE file, you can 7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open 8 * Media Patent License 1.0 was not distributed with this source code in the 9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent. 10 */ 11 12 /*!\defgroup gf_group_algo Golden Frame Group 13 * \ingroup high_level_algo 14 * Algorithms regarding determining the length of GF groups and defining GF 15 * group structures. 16 * @{ 17 */ 18 /*! @} - end defgroup gf_group_algo */ 19 20 #include <assert.h> 21 #include <limits.h> 22 #include <stdint.h> 23 24 #include "aom_dsp/aom_dsp_common.h" 25 #include "aom_mem/aom_mem.h" 26 #include "config/aom_config.h" 27 #include "config/aom_scale_rtcd.h" 28 29 #include "aom/aom_codec.h" 30 #include "aom/aom_encoder.h" 31 32 #include "av1/common/av1_common_int.h" 33 34 #include "av1/encoder/encoder.h" 35 #include "av1/encoder/firstpass.h" 36 #include "av1/encoder/gop_structure.h" 37 #include "av1/encoder/pass2_strategy.h" 38 #include "av1/encoder/ratectrl.h" 39 #include "av1/encoder/rc_utils.h" 40 #include "av1/encoder/temporal_filter.h" 41 #if CONFIG_THREE_PASS 42 #include "av1/encoder/thirdpass.h" 43 #endif 44 #include "av1/encoder/tpl_model.h" 45 #include "av1/encoder/encode_strategy.h" 46 47 #define DEFAULT_KF_BOOST 2300 48 #define DEFAULT_GF_BOOST 2000 49 #define GROUP_ADAPTIVE_MAXQ 1 50 51 static void init_gf_stats(GF_GROUP_STATS *gf_stats); 52 #if CONFIG_THREE_PASS 53 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params, 54 int is_final_pass); 55 #endif 56 57 // Calculate an active area of the image that discounts formatting 58 // bars and partially discounts other 0 energy areas. 59 #define MIN_ACTIVE_AREA 0.5 60 #define MAX_ACTIVE_AREA 1.0 61 static double calculate_active_area(const FRAME_INFO *frame_info, 62 const FIRSTPASS_STATS *this_frame) { 63 const double active_pct = 64 1.0 - 65 ((this_frame->intra_skip_pct / 2) + 66 ((this_frame->inactive_zone_rows * 2) / (double)frame_info->mb_rows)); 67 return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA); 68 } 69 70 // Calculate a modified Error used in distributing bits between easier and 71 // harder frames. 72 #define ACT_AREA_CORRECTION 0.5 73 static double calculate_modified_err_new(const FRAME_INFO *frame_info, 74 const FIRSTPASS_STATS *total_stats, 75 const FIRSTPASS_STATS *this_stats, 76 int vbrbias, double modified_error_min, 77 double modified_error_max) { 78 if (total_stats == NULL) { 79 return 0; 80 } 81 const double av_weight = total_stats->weight / total_stats->count; 82 const double av_err = 83 (total_stats->coded_error * av_weight) / total_stats->count; 84 double modified_error = 85 av_err * pow(this_stats->coded_error * this_stats->weight / 86 DOUBLE_DIVIDE_CHECK(av_err), 87 vbrbias / 100.0); 88 89 // Correction for active area. Frames with a reduced active area 90 // (eg due to formatting bars) have a higher error per mb for the 91 // remaining active MBs. The correction here assumes that coding 92 // 0.5N blocks of complexity 2X is a little easier than coding N 93 // blocks of complexity X. 94 modified_error *= 95 pow(calculate_active_area(frame_info, this_stats), ACT_AREA_CORRECTION); 96 97 return fclamp(modified_error, modified_error_min, modified_error_max); 98 } 99 100 static double calculate_modified_err(const FRAME_INFO *frame_info, 101 const TWO_PASS *twopass, 102 const AV1EncoderConfig *oxcf, 103 const FIRSTPASS_STATS *this_frame) { 104 const FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats; 105 return calculate_modified_err_new( 106 frame_info, total_stats, this_frame, oxcf->rc_cfg.vbrbias, 107 twopass->modified_error_min, twopass->modified_error_max); 108 } 109 110 // Resets the first pass file to the given position using a relative seek from 111 // the current position. 112 static void reset_fpf_position(TWO_PASS_FRAME *p_frame, 113 const FIRSTPASS_STATS *position) { 114 p_frame->stats_in = position; 115 } 116 117 static int input_stats(TWO_PASS *p, TWO_PASS_FRAME *p_frame, 118 FIRSTPASS_STATS *fps) { 119 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF; 120 121 *fps = *p_frame->stats_in; 122 ++p_frame->stats_in; 123 return 1; 124 } 125 126 static int input_stats_lap(TWO_PASS *p, TWO_PASS_FRAME *p_frame, 127 FIRSTPASS_STATS *fps) { 128 if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF; 129 130 *fps = *p_frame->stats_in; 131 /* Move old stats[0] out to accommodate for next frame stats */ 132 memmove(p->frame_stats_arr[0], p->frame_stats_arr[1], 133 (p->stats_buf_ctx->stats_in_end - p_frame->stats_in - 1) * 134 sizeof(FIRSTPASS_STATS)); 135 p->stats_buf_ctx->stats_in_end--; 136 return 1; 137 } 138 139 // Read frame stats at an offset from the current position. 140 static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, 141 const TWO_PASS_FRAME *p_frame, 142 int offset) { 143 if ((offset >= 0 && 144 p_frame->stats_in + offset >= p->stats_buf_ctx->stats_in_end) || 145 (offset < 0 && 146 p_frame->stats_in + offset < p->stats_buf_ctx->stats_in_start)) { 147 return NULL; 148 } 149 150 return &p_frame->stats_in[offset]; 151 } 152 153 // This function returns the maximum target rate per frame. 154 static int frame_max_bits(const RATE_CONTROL *rc, 155 const AV1EncoderConfig *oxcf) { 156 int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth * 157 (int64_t)oxcf->rc_cfg.vbrmax_section) / 158 100; 159 if (max_bits < 0) 160 max_bits = 0; 161 else if (max_bits > rc->max_frame_bandwidth) 162 max_bits = rc->max_frame_bandwidth; 163 164 return (int)max_bits; 165 } 166 167 static const double q_pow_term[(QINDEX_RANGE >> 5) + 1] = { 0.65, 0.70, 0.75, 168 0.80, 0.85, 0.90, 169 0.95, 0.95, 0.95 }; 170 #define ERR_DIVISOR 96.0 171 static double calc_correction_factor(double err_per_mb, int q) { 172 const double error_term = err_per_mb / ERR_DIVISOR; 173 const int index = q >> 5; 174 // Adjustment to power term based on qindex 175 const double power_term = 176 q_pow_term[index] + 177 (((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0); 178 assert(error_term >= 0.0); 179 return fclamp(pow(error_term, power_term), 0.05, 5.0); 180 } 181 182 // Based on history adjust expectations of bits per macroblock. 183 static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) { 184 TWO_PASS *const twopass = &cpi->ppi->twopass; 185 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 186 187 // Based on recent history adjust expectations of bits per macroblock. 188 double damp_fac = AOMMAX(5.0, rate_err_tol / 10.0); 189 double rate_err_factor = 1.0; 190 const double adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0); 191 const double min_fac = 1.0 - adj_limit; 192 const double max_fac = 1.0 + adj_limit; 193 194 #if CONFIG_THREE_PASS 195 if (cpi->third_pass_ctx && cpi->third_pass_ctx->frame_info_count > 0) { 196 int64_t actual_bits = 0; 197 int64_t target_bits = 0; 198 double factor = 0.0; 199 int count = 0; 200 for (int i = 0; i < cpi->third_pass_ctx->frame_info_count; i++) { 201 actual_bits += cpi->third_pass_ctx->frame_info[i].actual_bits; 202 target_bits += cpi->third_pass_ctx->frame_info[i].bits_allocated; 203 factor += cpi->third_pass_ctx->frame_info[i].bpm_factor; 204 count++; 205 } 206 207 if (count == 0) { 208 factor = 1.0; 209 } else { 210 factor /= (double)count; 211 } 212 213 factor *= (double)actual_bits / DOUBLE_DIVIDE_CHECK((double)target_bits); 214 215 if ((twopass->bpm_factor <= 1 && factor < twopass->bpm_factor) || 216 (twopass->bpm_factor >= 1 && factor > twopass->bpm_factor)) { 217 twopass->bpm_factor = fclamp(factor, min_fac, max_fac); 218 } 219 } 220 #endif // CONFIG_THREE_PASS 221 222 int err_estimate = p_rc->rate_error_estimate; 223 int64_t bits_left = twopass->bits_left; 224 int64_t total_actual_bits = p_rc->total_actual_bits; 225 int64_t bits_off_target = p_rc->vbr_bits_off_target; 226 double rolling_arf_group_actual_bits = 227 (double)twopass->rolling_arf_group_actual_bits; 228 double rolling_arf_group_target_bits = 229 (double)twopass->rolling_arf_group_target_bits; 230 231 #if CONFIG_FPMT_TEST 232 const int is_parallel_frame = 233 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 ? 1 : 0; 234 const int simulate_parallel_frame = 235 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE 236 ? is_parallel_frame 237 : 0; 238 total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits 239 : p_rc->total_actual_bits; 240 bits_off_target = simulate_parallel_frame ? p_rc->temp_vbr_bits_off_target 241 : p_rc->vbr_bits_off_target; 242 bits_left = 243 simulate_parallel_frame ? p_rc->temp_bits_left : twopass->bits_left; 244 rolling_arf_group_target_bits = 245 (double)(simulate_parallel_frame 246 ? p_rc->temp_rolling_arf_group_target_bits 247 : twopass->rolling_arf_group_target_bits); 248 rolling_arf_group_actual_bits = 249 (double)(simulate_parallel_frame 250 ? p_rc->temp_rolling_arf_group_actual_bits 251 : twopass->rolling_arf_group_actual_bits); 252 err_estimate = simulate_parallel_frame ? p_rc->temp_rate_error_estimate 253 : p_rc->rate_error_estimate; 254 #endif 255 256 if (p_rc->bits_off_target && total_actual_bits > 0) { 257 if (cpi->ppi->lap_enabled) { 258 rate_err_factor = rolling_arf_group_actual_bits / 259 DOUBLE_DIVIDE_CHECK(rolling_arf_group_target_bits); 260 } else { 261 rate_err_factor = 1.0 - ((double)(bits_off_target) / 262 AOMMAX(total_actual_bits, bits_left)); 263 } 264 265 // Adjustment is damped if this is 1 pass with look ahead processing 266 // (as there are only ever a few frames of data) and for all but the first 267 // GOP in normal two pass. 268 if ((twopass->bpm_factor != 1.0) || cpi->ppi->lap_enabled) { 269 rate_err_factor = 1.0 + ((rate_err_factor - 1.0) / damp_fac); 270 } 271 rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor)); 272 } 273 274 // Is the rate control trending in the right direction. Only make 275 // an adjustment if things are getting worse. 276 if ((rate_err_factor < 1.0 && err_estimate >= 0) || 277 (rate_err_factor > 1.0 && err_estimate <= 0)) { 278 twopass->bpm_factor *= rate_err_factor; 279 if (rate_err_tol >= 100) { 280 twopass->bpm_factor = 281 AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor)); 282 } else { 283 twopass->bpm_factor = AOMMAX(0.1, AOMMIN(10.0, twopass->bpm_factor)); 284 } 285 } 286 } 287 288 static int qbpm_enumerator(int rate_err_tol) { 289 return 1200000 + ((300000 * AOMMIN(75, AOMMAX(rate_err_tol - 25, 0))) / 75); 290 } 291 292 // Similar to find_qindex_by_rate() function in ratectrl.c, but includes 293 // calculation of a correction_factor. 294 static int find_qindex_by_rate_with_correction( 295 uint64_t desired_bits_per_mb, aom_bit_depth_t bit_depth, 296 double error_per_mb, double group_weight_factor, int rate_err_tol, 297 int best_qindex, int worst_qindex) { 298 assert(best_qindex <= worst_qindex); 299 int low = best_qindex; 300 int high = worst_qindex; 301 302 while (low < high) { 303 const int mid = (low + high) >> 1; 304 const double mid_factor = calc_correction_factor(error_per_mb, mid); 305 const double q = av1_convert_qindex_to_q(mid, bit_depth); 306 const int enumerator = qbpm_enumerator(rate_err_tol); 307 const uint64_t mid_bits_per_mb = 308 (uint64_t)((enumerator * mid_factor * group_weight_factor) / q); 309 310 if (mid_bits_per_mb > desired_bits_per_mb) { 311 low = mid + 1; 312 } else { 313 high = mid; 314 } 315 } 316 return low; 317 } 318 319 /*!\brief Choose a target maximum Q for a group of frames 320 * 321 * \ingroup rate_control 322 * 323 * This function is used to estimate a suitable maximum Q for a 324 * group of frames. Inititally it is called to get a crude estimate 325 * for the whole clip. It is then called for each ARF/GF group to get 326 * a revised estimate for that group. 327 * 328 * \param[in] cpi Top-level encoder structure 329 * \param[in] av_frame_err The average per frame coded error score 330 * for frames making up this section/group. 331 * \param[in] inactive_zone Used to mask off /ignore part of the 332 * frame. The most common use case is where 333 * a wide format video (e.g. 16:9) is 334 * letter-boxed into a more square format. 335 * Here we want to ignore the bands at the 336 * top and bottom. 337 * \param[in] av_target_bandwidth The target bits per frame 338 * 339 * \return The maximum Q for frames in the group. 340 */ 341 static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err, 342 double inactive_zone, 343 int av_target_bandwidth) { 344 const RATE_CONTROL *const rc = &cpi->rc; 345 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 346 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg; 347 inactive_zone = fclamp(inactive_zone, 0.0, 0.9999); 348 349 if (av_target_bandwidth <= 0) { 350 return rc->worst_quality; // Highest value allowed 351 } else { 352 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE) 353 ? cpi->initial_mbs 354 : cpi->common.mi_params.MBs; 355 const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone)); 356 const double av_err_per_mb = av_frame_err / (1.0 - inactive_zone); 357 const uint64_t target_norm_bits_per_mb = 358 ((uint64_t)av_target_bandwidth << BPER_MB_NORMBITS) / active_mbs; 359 int rate_err_tol = AOMMIN(rc_cfg->under_shoot_pct, rc_cfg->over_shoot_pct); 360 361 // Update bpm correction factor based on previous GOP rate error. 362 twopass_update_bpm_factor(cpi, rate_err_tol); 363 364 // Try and pick a max Q that will be high enough to encode the 365 // content at the given rate. 366 int q = find_qindex_by_rate_with_correction( 367 target_norm_bits_per_mb, cpi->common.seq_params->bit_depth, 368 av_err_per_mb, cpi->ppi->twopass.bpm_factor, rate_err_tol, 369 rc->best_quality, rc->worst_quality); 370 371 // Restriction on active max q for constrained quality mode. 372 if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level); 373 return q; 374 } 375 } 376 377 #define INTRA_PART 0.005 378 #define DEFAULT_DECAY_LIMIT 0.75 379 #define LOW_SR_DIFF_TRHESH 0.01 380 #define NCOUNT_FRAME_II_THRESH 5.0 381 #define LOW_CODED_ERR_PER_MB 0.01 382 383 /* This function considers how the quality of prediction may be deteriorating 384 * with distance. It comapres the coded error for the last frame and the 385 * second reference frame (usually two frames old) and also applies a factor 386 * based on the extent of INTRA coding. 387 * 388 * The decay factor is then used to reduce the contribution of frames further 389 * from the alt-ref or golden frame, to the bitframe boost calculation for that 390 * alt-ref or golden frame. 391 */ 392 static double get_sr_decay_rate(const FIRSTPASS_STATS *frame) { 393 double sr_diff = (frame->sr_coded_error - frame->coded_error); 394 double sr_decay = 1.0; 395 double modified_pct_inter; 396 double modified_pcnt_intra; 397 398 modified_pct_inter = frame->pcnt_inter; 399 if ((frame->coded_error > LOW_CODED_ERR_PER_MB) && 400 ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) < 401 (double)NCOUNT_FRAME_II_THRESH)) { 402 modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral; 403 } 404 modified_pcnt_intra = 100 * (1.0 - modified_pct_inter); 405 406 if ((sr_diff > LOW_SR_DIFF_TRHESH)) { 407 double sr_diff_part = ((sr_diff * 0.25) / frame->intra_error); 408 sr_decay = 1.0 - sr_diff_part - (INTRA_PART * modified_pcnt_intra); 409 } 410 return AOMMAX(sr_decay, DEFAULT_DECAY_LIMIT); 411 } 412 413 // This function gives an estimate of how badly we believe the prediction 414 // quality is decaying from frame to frame. 415 static double get_zero_motion_factor(const FIRSTPASS_STATS *frame) { 416 const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion; 417 double sr_decay = get_sr_decay_rate(frame); 418 return AOMMIN(sr_decay, zero_motion_pct); 419 } 420 421 #define DEFAULT_ZM_FACTOR 0.5 422 static double get_prediction_decay_rate(const FIRSTPASS_STATS *frame_stats) { 423 const double sr_decay_rate = get_sr_decay_rate(frame_stats); 424 double zero_motion_factor = 425 DEFAULT_ZM_FACTOR * (frame_stats->pcnt_inter - frame_stats->pcnt_motion); 426 427 // Clamp value to range 0.0 to 1.0 428 // This should happen anyway if input values are sensibly clamped but checked 429 // here just in case. 430 if (zero_motion_factor > 1.0) 431 zero_motion_factor = 1.0; 432 else if (zero_motion_factor < 0.0) 433 zero_motion_factor = 0.0; 434 435 return AOMMAX(zero_motion_factor, 436 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); 437 } 438 439 // Function to test for a condition where a complex transition is followed 440 // by a static section. For example in slide shows where there is a fade 441 // between slides. This is to help with more optimal kf and gf positioning. 442 static int detect_transition_to_still(const FIRSTPASS_INFO *firstpass_info, 443 int next_stats_index, 444 const int min_gf_interval, 445 const int frame_interval, 446 const int still_interval, 447 const double loop_decay_rate, 448 const double last_decay_rate) { 449 // Break clause to detect very still sections after motion 450 // For example a static image after a fade or other transition 451 // instead of a clean scene cut. 452 if (frame_interval > min_gf_interval && loop_decay_rate >= 0.999 && 453 last_decay_rate < 0.9) { 454 int stats_left = 455 av1_firstpass_info_future_count(firstpass_info, next_stats_index); 456 if (stats_left >= still_interval) { 457 int j; 458 // Look ahead a few frames to see if static condition persists... 459 for (j = 0; j < still_interval; ++j) { 460 const FIRSTPASS_STATS *stats = 461 av1_firstpass_info_peek(firstpass_info, next_stats_index + j); 462 if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break; 463 } 464 // Only if it does do we signal a transition to still. 465 return j == still_interval; 466 } 467 } 468 return 0; 469 } 470 471 // This function detects a flash through the high relative pcnt_second_ref 472 // score in the frame following a flash frame. The offset passed in should 473 // reflect this. 474 static int detect_flash(const TWO_PASS *twopass, 475 const TWO_PASS_FRAME *twopass_frame, const int offset) { 476 const FIRSTPASS_STATS *const next_frame = 477 read_frame_stats(twopass, twopass_frame, offset); 478 479 // What we are looking for here is a situation where there is a 480 // brief break in prediction (such as a flash) but subsequent frames 481 // are reasonably well predicted by an earlier (pre flash) frame. 482 // The recovery after a flash is indicated by a high pcnt_second_ref 483 // compared to pcnt_inter. 484 return next_frame != NULL && 485 next_frame->pcnt_second_ref > next_frame->pcnt_inter && 486 next_frame->pcnt_second_ref >= 0.5; 487 } 488 489 // Update the motion related elements to the GF arf boost calculation. 490 static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats, 491 GF_GROUP_STATS *gf_stats, double f_w, 492 double f_h) { 493 const double pct = stats->pcnt_motion; 494 495 // Accumulate Motion In/Out of frame stats. 496 gf_stats->this_frame_mv_in_out = stats->mv_in_out_count * pct; 497 gf_stats->mv_in_out_accumulator += gf_stats->this_frame_mv_in_out; 498 gf_stats->abs_mv_in_out_accumulator += fabs(gf_stats->this_frame_mv_in_out); 499 500 // Accumulate a measure of how uniform (or conversely how random) the motion 501 // field is (a ratio of abs(mv) / mv). 502 if (pct > 0.05) { 503 const double mvr_ratio = 504 fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr)); 505 const double mvc_ratio = 506 fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc)); 507 508 gf_stats->mv_ratio_accumulator += 509 pct * 510 (mvr_ratio < stats->mvr_abs * f_h ? mvr_ratio : stats->mvr_abs * f_h); 511 gf_stats->mv_ratio_accumulator += 512 pct * 513 (mvc_ratio < stats->mvc_abs * f_w ? mvc_ratio : stats->mvc_abs * f_w); 514 } 515 } 516 517 static void accumulate_this_frame_stats(const FIRSTPASS_STATS *stats, 518 const double mod_frame_err, 519 GF_GROUP_STATS *gf_stats) { 520 gf_stats->gf_group_err += mod_frame_err; 521 #if GROUP_ADAPTIVE_MAXQ 522 gf_stats->gf_group_raw_error += stats->coded_error; 523 #endif 524 gf_stats->gf_group_skip_pct += stats->intra_skip_pct; 525 gf_stats->gf_group_inactive_zone_rows += stats->inactive_zone_rows; 526 } 527 528 static void accumulate_next_frame_stats(const FIRSTPASS_STATS *stats, 529 const int flash_detected, 530 const int frames_since_key, 531 const int cur_idx, 532 GF_GROUP_STATS *gf_stats, int f_w, 533 int f_h) { 534 accumulate_frame_motion_stats(stats, gf_stats, f_w, f_h); 535 // sum up the metric values of current gf group 536 gf_stats->avg_sr_coded_error += stats->sr_coded_error; 537 gf_stats->avg_pcnt_second_ref += stats->pcnt_second_ref; 538 gf_stats->avg_new_mv_count += stats->new_mv_count; 539 gf_stats->avg_wavelet_energy += stats->frame_avg_wavelet_energy; 540 if (fabs(stats->raw_error_stdev) > 0.000001) { 541 gf_stats->non_zero_stdev_count++; 542 gf_stats->avg_raw_err_stdev += stats->raw_error_stdev; 543 } 544 545 // Accumulate the effect of prediction quality decay 546 if (!flash_detected) { 547 gf_stats->last_loop_decay_rate = gf_stats->loop_decay_rate; 548 gf_stats->loop_decay_rate = get_prediction_decay_rate(stats); 549 550 gf_stats->decay_accumulator = 551 gf_stats->decay_accumulator * gf_stats->loop_decay_rate; 552 553 // Monitor for static sections. 554 if ((frames_since_key + cur_idx - 1) > 1) { 555 gf_stats->zero_motion_accumulator = AOMMIN( 556 gf_stats->zero_motion_accumulator, get_zero_motion_factor(stats)); 557 } 558 } 559 } 560 561 static void average_gf_stats(const int total_frame, GF_GROUP_STATS *gf_stats) { 562 if (total_frame) { 563 gf_stats->avg_sr_coded_error /= total_frame; 564 gf_stats->avg_pcnt_second_ref /= total_frame; 565 gf_stats->avg_new_mv_count /= total_frame; 566 gf_stats->avg_wavelet_energy /= total_frame; 567 } 568 569 if (gf_stats->non_zero_stdev_count) 570 gf_stats->avg_raw_err_stdev /= gf_stats->non_zero_stdev_count; 571 } 572 573 #define BOOST_FACTOR 12.5 574 static double baseline_err_per_mb(const FRAME_INFO *frame_info) { 575 unsigned int screen_area = frame_info->frame_height * frame_info->frame_width; 576 577 // Use a different error per mb factor for calculating boost for 578 // different formats. 579 if (screen_area <= 640 * 360) { 580 return 500.0; 581 } else { 582 return 1000.0; 583 } 584 } 585 586 static double calc_frame_boost(const PRIMARY_RATE_CONTROL *p_rc, 587 const FRAME_INFO *frame_info, 588 const FIRSTPASS_STATS *this_frame, 589 double this_frame_mv_in_out, double max_boost) { 590 double frame_boost; 591 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME], 592 frame_info->bit_depth); 593 const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5); 594 const double active_area = calculate_active_area(frame_info, this_frame); 595 596 // Underlying boost factor is based on inter error ratio. 597 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area, 598 this_frame->intra_error * active_area) / 599 DOUBLE_DIVIDE_CHECK(this_frame->coded_error); 600 frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction; 601 602 // Increase boost for frames where new data coming into frame (e.g. zoom out). 603 // Slightly reduce boost if there is a net balance of motion out of the frame 604 // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0. 605 if (this_frame_mv_in_out > 0.0) 606 frame_boost += frame_boost * (this_frame_mv_in_out * 2.0); 607 // In the extreme case the boost is halved. 608 else 609 frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); 610 611 return AOMMIN(frame_boost, max_boost * boost_q_correction); 612 } 613 614 static double calc_kf_frame_boost(const PRIMARY_RATE_CONTROL *p_rc, 615 const FRAME_INFO *frame_info, 616 const FIRSTPASS_STATS *this_frame, 617 double *sr_accumulator, double max_boost) { 618 double frame_boost; 619 const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME], 620 frame_info->bit_depth); 621 const double boost_q_correction = AOMMIN((0.50 + (lq * 0.015)), 2.00); 622 const double active_area = calculate_active_area(frame_info, this_frame); 623 624 // Underlying boost factor is based on inter error ratio. 625 frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area, 626 this_frame->intra_error * active_area) / 627 DOUBLE_DIVIDE_CHECK( 628 (this_frame->coded_error + *sr_accumulator) * active_area); 629 630 // Update the accumulator for second ref error difference. 631 // This is intended to give an indication of how much the coded error is 632 // increasing over time. 633 *sr_accumulator += (this_frame->sr_coded_error - this_frame->coded_error); 634 *sr_accumulator = AOMMAX(0.0, *sr_accumulator); 635 636 // Q correction and scaling 637 // The 40.0 value here is an experimentally derived baseline minimum. 638 // This value is in line with the minimum per frame boost in the alt_ref 639 // boost calculation. 640 frame_boost = ((frame_boost + 40.0) * boost_q_correction); 641 642 return AOMMIN(frame_boost, max_boost * boost_q_correction); 643 } 644 645 static int get_projected_gfu_boost(const PRIMARY_RATE_CONTROL *p_rc, 646 int gfu_boost, int frames_to_project, 647 int num_stats_used_for_gfu_boost) { 648 /* 649 * If frames_to_project is equal to num_stats_used_for_gfu_boost, 650 * it means that gfu_boost was calculated over frames_to_project to 651 * begin with(ie; all stats required were available), hence return 652 * the original boost. 653 */ 654 if (num_stats_used_for_gfu_boost >= frames_to_project) return gfu_boost; 655 656 double min_boost_factor = sqrt(p_rc->baseline_gf_interval); 657 // Get the current tpl factor (number of frames = frames_to_project). 658 double tpl_factor = av1_get_gfu_boost_projection_factor( 659 min_boost_factor, MAX_GFUBOOST_FACTOR, frames_to_project); 660 // Get the tpl factor when number of frames = num_stats_used_for_prior_boost. 661 double tpl_factor_num_stats = av1_get_gfu_boost_projection_factor( 662 min_boost_factor, MAX_GFUBOOST_FACTOR, num_stats_used_for_gfu_boost); 663 int projected_gfu_boost = 664 (int)rint((tpl_factor * gfu_boost) / tpl_factor_num_stats); 665 return projected_gfu_boost; 666 } 667 668 #define GF_MAX_BOOST 90.0 669 #define GF_MIN_BOOST 50 670 #define MIN_DECAY_FACTOR 0.01 671 int av1_calc_arf_boost(const TWO_PASS *twopass, 672 const TWO_PASS_FRAME *twopass_frame, 673 const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info, 674 int offset, int f_frames, int b_frames, 675 int *num_fpstats_used, int *num_fpstats_required, 676 int project_gfu_boost) { 677 int i; 678 GF_GROUP_STATS gf_stats; 679 init_gf_stats(&gf_stats); 680 double boost_score = (double)NORMAL_BOOST; 681 int arf_boost; 682 int flash_detected = 0; 683 if (num_fpstats_used) *num_fpstats_used = 0; 684 685 // Search forward from the proposed arf/next gf position. 686 for (i = 0; i < f_frames; ++i) { 687 const FIRSTPASS_STATS *this_frame = 688 read_frame_stats(twopass, twopass_frame, i + offset); 689 if (this_frame == NULL) break; 690 691 // Update the motion related elements to the boost calculation. 692 accumulate_frame_motion_stats(this_frame, &gf_stats, 693 frame_info->frame_width, 694 frame_info->frame_height); 695 696 // We want to discount the flash frame itself and the recovery 697 // frame that follows as both will have poor scores. 698 flash_detected = detect_flash(twopass, twopass_frame, i + offset) || 699 detect_flash(twopass, twopass_frame, i + offset + 1); 700 701 // Accumulate the effect of prediction quality decay. 702 if (!flash_detected) { 703 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame); 704 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR 705 ? MIN_DECAY_FACTOR 706 : gf_stats.decay_accumulator; 707 } 708 709 boost_score += 710 gf_stats.decay_accumulator * 711 calc_frame_boost(p_rc, frame_info, this_frame, 712 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST); 713 if (num_fpstats_used) (*num_fpstats_used)++; 714 } 715 716 arf_boost = (int)boost_score; 717 718 // Reset for backward looking loop. 719 boost_score = 0.0; 720 init_gf_stats(&gf_stats); 721 // Search backward towards last gf position. 722 for (i = -1; i >= -b_frames; --i) { 723 const FIRSTPASS_STATS *this_frame = 724 read_frame_stats(twopass, twopass_frame, i + offset); 725 if (this_frame == NULL) break; 726 727 // Update the motion related elements to the boost calculation. 728 accumulate_frame_motion_stats(this_frame, &gf_stats, 729 frame_info->frame_width, 730 frame_info->frame_height); 731 732 // We want to discount the the flash frame itself and the recovery 733 // frame that follows as both will have poor scores. 734 flash_detected = detect_flash(twopass, twopass_frame, i + offset) || 735 detect_flash(twopass, twopass_frame, i + offset + 1); 736 737 // Cumulative effect of prediction quality decay. 738 if (!flash_detected) { 739 gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame); 740 gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR 741 ? MIN_DECAY_FACTOR 742 : gf_stats.decay_accumulator; 743 } 744 745 boost_score += 746 gf_stats.decay_accumulator * 747 calc_frame_boost(p_rc, frame_info, this_frame, 748 gf_stats.this_frame_mv_in_out, GF_MAX_BOOST); 749 if (num_fpstats_used) (*num_fpstats_used)++; 750 } 751 arf_boost += (int)boost_score; 752 753 if (project_gfu_boost) { 754 assert(num_fpstats_required != NULL); 755 assert(num_fpstats_used != NULL); 756 *num_fpstats_required = f_frames + b_frames; 757 arf_boost = get_projected_gfu_boost(p_rc, arf_boost, *num_fpstats_required, 758 *num_fpstats_used); 759 } 760 761 if (arf_boost < ((b_frames + f_frames) * GF_MIN_BOOST)) 762 arf_boost = ((b_frames + f_frames) * GF_MIN_BOOST); 763 764 return arf_boost; 765 } 766 767 // Calculate a section intra ratio used in setting max loop filter. 768 static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin, 769 const FIRSTPASS_STATS *end, 770 int section_length) { 771 const FIRSTPASS_STATS *s = begin; 772 double intra_error = 0.0; 773 double coded_error = 0.0; 774 int i = 0; 775 776 while (s < end && i < section_length) { 777 intra_error += s->intra_error; 778 coded_error += s->coded_error; 779 ++s; 780 ++i; 781 } 782 783 return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error)); 784 } 785 786 /*!\brief Calculates the bit target for this GF/ARF group 787 * 788 * \ingroup rate_control 789 * 790 * Calculates the total bits to allocate in this GF/ARF group. 791 * 792 * \param[in] cpi Top-level encoder structure 793 * \param[in] gf_group_err Cumulative coded error score for the 794 * frames making up this group. 795 * 796 * \return The target total number of bits for this GF/ARF group. 797 */ 798 static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi, 799 double gf_group_err) { 800 const RATE_CONTROL *const rc = &cpi->rc; 801 const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 802 const TWO_PASS *const twopass = &cpi->ppi->twopass; 803 const int max_bits = frame_max_bits(rc, &cpi->oxcf); 804 int64_t total_group_bits; 805 806 // Calculate the bits to be allocated to the group as a whole. 807 if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) { 808 total_group_bits = (int64_t)(twopass->kf_group_bits * 809 (gf_group_err / twopass->kf_group_error_left)); 810 } else { 811 total_group_bits = 0; 812 } 813 814 // Clamp odd edge cases. 815 total_group_bits = (total_group_bits < 0) ? 0 816 : (total_group_bits > twopass->kf_group_bits) 817 ? twopass->kf_group_bits 818 : total_group_bits; 819 820 // Clip based on user supplied data rate variability limit. 821 if (total_group_bits > (int64_t)max_bits * p_rc->baseline_gf_interval) 822 total_group_bits = (int64_t)max_bits * p_rc->baseline_gf_interval; 823 824 return total_group_bits; 825 } 826 827 // Calculate the number of bits to assign to boosted frames in a group. 828 static int calculate_boost_bits(int frame_count, int boost, 829 int64_t total_group_bits) { 830 int allocation_chunks; 831 832 // return 0 for invalid inputs (could arise e.g. through rounding errors) 833 if (!boost || (total_group_bits <= 0)) return 0; 834 835 if (frame_count <= 0) return (int)(AOMMIN(total_group_bits, INT_MAX)); 836 837 allocation_chunks = (frame_count * 100) + boost; 838 839 // Prevent overflow. 840 if (boost > 1023) { 841 int divisor = boost >> 10; 842 boost /= divisor; 843 allocation_chunks /= divisor; 844 } 845 846 // Calculate the number of extra bits for use in the boosted frame or frames. 847 return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks), 848 0); 849 } 850 851 // Calculate the boost factor based on the number of bits assigned, i.e. the 852 // inverse of calculate_boost_bits(). 853 static int calculate_boost_factor(int frame_count, int bits, 854 int64_t total_group_bits) { 855 return (int)(100.0 * frame_count * bits / (total_group_bits - bits)); 856 } 857 858 // Reduce the number of bits assigned to keyframe or arf if necessary, to 859 // prevent bitrate spikes that may break level constraints. 860 // frame_type: 0: keyframe; 1: arf. 861 static int adjust_boost_bits_for_target_level(const AV1_COMP *const cpi, 862 RATE_CONTROL *const rc, 863 int bits_assigned, 864 int64_t group_bits, 865 int frame_type) { 866 const AV1_COMMON *const cm = &cpi->common; 867 const SequenceHeader *const seq_params = cm->seq_params; 868 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 869 const int temporal_layer_id = cm->temporal_layer_id; 870 const int spatial_layer_id = cm->spatial_layer_id; 871 for (int index = 0; index < seq_params->operating_points_cnt_minus_1 + 1; 872 ++index) { 873 if (!is_in_operating_point(seq_params->operating_point_idc[index], 874 temporal_layer_id, spatial_layer_id)) { 875 continue; 876 } 877 878 const AV1_LEVEL target_level = 879 cpi->ppi->level_params.target_seq_level_idx[index]; 880 if (target_level >= SEQ_LEVELS) continue; 881 882 assert(is_valid_seq_level_idx(target_level)); 883 884 const double level_bitrate_limit = av1_get_max_bitrate_for_level( 885 target_level, seq_params->tier[0], seq_params->profile); 886 const int target_bits_per_frame = 887 (int)(level_bitrate_limit / cpi->framerate); 888 if (frame_type == 0) { 889 // Maximum bits for keyframe is 8 times the target_bits_per_frame. 890 const int level_enforced_max_kf_bits = target_bits_per_frame * 8; 891 if (bits_assigned > level_enforced_max_kf_bits) { 892 const int frames = rc->frames_to_key - 1; 893 p_rc->kf_boost = calculate_boost_factor( 894 frames, level_enforced_max_kf_bits, group_bits); 895 bits_assigned = 896 calculate_boost_bits(frames, p_rc->kf_boost, group_bits); 897 } 898 } else if (frame_type == 1) { 899 // Maximum bits for arf is 4 times the target_bits_per_frame. 900 const int level_enforced_max_arf_bits = target_bits_per_frame * 4; 901 if (bits_assigned > level_enforced_max_arf_bits) { 902 p_rc->gfu_boost = 903 calculate_boost_factor(p_rc->baseline_gf_interval, 904 level_enforced_max_arf_bits, group_bits); 905 bits_assigned = calculate_boost_bits(p_rc->baseline_gf_interval, 906 p_rc->gfu_boost, group_bits); 907 } 908 } else { 909 assert(0); 910 } 911 } 912 913 return bits_assigned; 914 } 915 916 // Allocate bits to each frame in a GF / ARF group 917 static void allocate_gf_group_bits(GF_GROUP *gf_group, 918 PRIMARY_RATE_CONTROL *const p_rc, 919 RATE_CONTROL *const rc, 920 int64_t gf_group_bits, int gf_arf_bits, 921 int key_frame, int use_arf) { 922 static const double layer_fraction[MAX_ARF_LAYERS + 1] = { 1.0, 0.70, 0.55, 923 0.60, 0.60, 1.0, 924 1.0 }; 925 int64_t total_group_bits = gf_group_bits; 926 int base_frame_bits; 927 const int gf_group_size = gf_group->size; 928 int layer_frames[MAX_ARF_LAYERS + 1] = { 0 }; 929 930 // For key frames the frame target rate is already set and it 931 // is also the golden frame. 932 // === [frame_index == 0] === 933 int frame_index = !!key_frame; 934 935 // Subtract the extra bits set aside for ARF frames from the Group Total 936 if (use_arf) total_group_bits -= gf_arf_bits; 937 938 int num_frames = 939 AOMMAX(1, p_rc->baseline_gf_interval - (rc->frames_since_key == 0)); 940 base_frame_bits = (int)(total_group_bits / num_frames); 941 942 // Check the number of frames in each layer in case we have a 943 // non standard group length. 944 int max_arf_layer = gf_group->max_layer_depth - 1; 945 for (int idx = frame_index; idx < gf_group_size; ++idx) { 946 if ((gf_group->update_type[idx] == ARF_UPDATE) || 947 (gf_group->update_type[idx] == INTNL_ARF_UPDATE)) { 948 layer_frames[gf_group->layer_depth[idx]]++; 949 } 950 } 951 952 // Allocate extra bits to each ARF layer 953 int i; 954 int layer_extra_bits[MAX_ARF_LAYERS + 1] = { 0 }; 955 assert(max_arf_layer <= MAX_ARF_LAYERS); 956 for (i = 1; i <= max_arf_layer; ++i) { 957 double fraction = (i == max_arf_layer) ? 1.0 : layer_fraction[i]; 958 layer_extra_bits[i] = 959 (int)((gf_arf_bits * fraction) / AOMMAX(1, layer_frames[i])); 960 gf_arf_bits -= (int)(gf_arf_bits * fraction); 961 } 962 963 // Now combine ARF layer and baseline bits to give total bits for each frame. 964 int arf_extra_bits; 965 for (int idx = frame_index; idx < gf_group_size; ++idx) { 966 switch (gf_group->update_type[idx]) { 967 case ARF_UPDATE: 968 case INTNL_ARF_UPDATE: 969 arf_extra_bits = layer_extra_bits[gf_group->layer_depth[idx]]; 970 gf_group->bit_allocation[idx] = 971 (base_frame_bits > INT_MAX - arf_extra_bits) 972 ? INT_MAX 973 : (base_frame_bits + arf_extra_bits); 974 break; 975 case INTNL_OVERLAY_UPDATE: 976 case OVERLAY_UPDATE: gf_group->bit_allocation[idx] = 0; break; 977 default: gf_group->bit_allocation[idx] = base_frame_bits; break; 978 } 979 } 980 981 // Set the frame following the current GOP to 0 bit allocation. For ARF 982 // groups, this next frame will be overlay frame, which is the first frame 983 // in the next GOP. For GF group, next GOP will overwrite the rate allocation. 984 // Setting this frame to use 0 bit (of out the current GOP budget) will 985 // simplify logics in reference frame management. 986 if (gf_group_size < MAX_STATIC_GF_GROUP_LENGTH) 987 gf_group->bit_allocation[gf_group_size] = 0; 988 } 989 990 // Returns true if KF group and GF group both are almost completely static. 991 static inline int is_almost_static(double gf_zero_motion, int kf_zero_motion, 992 int is_lap_enabled) { 993 if (is_lap_enabled) { 994 /* 995 * when LAP enabled kf_zero_motion is not reliable, so use strict 996 * constraint on gf_zero_motion. 997 */ 998 return (gf_zero_motion >= 0.999); 999 } else { 1000 return (gf_zero_motion >= 0.995) && 1001 (kf_zero_motion >= STATIC_KF_GROUP_THRESH); 1002 } 1003 } 1004 1005 #define ARF_ABS_ZOOM_THRESH 4.4 1006 static inline int detect_gf_cut(AV1_COMP *cpi, int frame_index, int cur_start, 1007 int flash_detected, int active_max_gf_interval, 1008 int active_min_gf_interval, 1009 GF_GROUP_STATS *gf_stats) { 1010 RATE_CONTROL *const rc = &cpi->rc; 1011 TWO_PASS *const twopass = &cpi->ppi->twopass; 1012 AV1_COMMON *const cm = &cpi->common; 1013 // Motion breakout threshold for loop below depends on image size. 1014 const double mv_ratio_accumulator_thresh = (cm->height + cm->width) / 4.0; 1015 1016 if (!flash_detected) { 1017 // Break clause to detect very still sections after motion. For example, 1018 // a static image after a fade or other transition. 1019 1020 // TODO(angiebird): This is a temporary change, we will avoid using 1021 // twopass_frame.stats_in in the follow-up CL 1022 int index = (int)(cpi->twopass_frame.stats_in - 1023 twopass->stats_buf_ctx->stats_in_start); 1024 if (detect_transition_to_still(&twopass->firstpass_info, index, 1025 rc->min_gf_interval, frame_index - cur_start, 1026 5, gf_stats->loop_decay_rate, 1027 gf_stats->last_loop_decay_rate)) { 1028 return 1; 1029 } 1030 } 1031 1032 // Some conditions to breakout after min interval. 1033 if (frame_index - cur_start >= active_min_gf_interval && 1034 // If possible don't break very close to a kf 1035 (rc->frames_to_key - frame_index >= rc->min_gf_interval) && 1036 ((frame_index - cur_start) & 0x01) && !flash_detected && 1037 (gf_stats->mv_ratio_accumulator > mv_ratio_accumulator_thresh || 1038 gf_stats->abs_mv_in_out_accumulator > ARF_ABS_ZOOM_THRESH)) { 1039 return 1; 1040 } 1041 1042 // If almost totally static, we will not use the the max GF length later, 1043 // so we can continue for more frames. 1044 if (((frame_index - cur_start) >= active_max_gf_interval + 1) && 1045 !is_almost_static(gf_stats->zero_motion_accumulator, 1046 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled)) { 1047 return 1; 1048 } 1049 return 0; 1050 } 1051 1052 static int is_shorter_gf_interval_better( 1053 AV1_COMP *cpi, const EncodeFrameParams *frame_params) { 1054 const RATE_CONTROL *const rc = &cpi->rc; 1055 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 1056 int gop_length_decision_method = cpi->sf.tpl_sf.gop_length_decision_method; 1057 int shorten_gf_interval; 1058 1059 av1_tpl_preload_rc_estimate(cpi, frame_params); 1060 1061 if (gop_length_decision_method == 2) { 1062 // GF group length is decided based on GF boost and tpl stats of ARFs from 1063 // base layer, (base+1) layer. 1064 shorten_gf_interval = 1065 (p_rc->gfu_boost < 1066 p_rc->num_stats_used_for_gfu_boost * GF_MIN_BOOST * 1.4) && 1067 !av1_tpl_setup_stats(cpi, 3, frame_params); 1068 } else { 1069 int do_complete_tpl = 1; 1070 GF_GROUP *const gf_group = &cpi->ppi->gf_group; 1071 int is_temporal_filter_enabled = 1072 (rc->frames_since_key > 0 && gf_group->arf_index > -1); 1073 1074 if (gop_length_decision_method == 1) { 1075 // Check if tpl stats of ARFs from base layer, (base+1) layer, 1076 // (base+2) layer can decide the GF group length. 1077 int gop_length_eval = av1_tpl_setup_stats(cpi, 2, frame_params); 1078 1079 if (gop_length_eval != 2) { 1080 do_complete_tpl = 0; 1081 shorten_gf_interval = !gop_length_eval; 1082 } 1083 } 1084 1085 if (do_complete_tpl) { 1086 // Decide GF group length based on complete tpl stats. 1087 shorten_gf_interval = !av1_tpl_setup_stats(cpi, 1, frame_params); 1088 // Tpl stats is reused when the ARF is temporally filtered and GF 1089 // interval is not shortened. 1090 if (is_temporal_filter_enabled && !shorten_gf_interval) { 1091 cpi->skip_tpl_setup_stats = 1; 1092 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS 1093 assert(cpi->gf_frame_index == 0); 1094 av1_vbr_rc_update_q_index_list(&cpi->vbr_rc_info, &cpi->ppi->tpl_data, 1095 gf_group, 1096 cpi->common.seq_params->bit_depth); 1097 #endif // CONFIG_BITRATE_ACCURACY 1098 } 1099 } 1100 } 1101 return shorten_gf_interval; 1102 } 1103 1104 #define MIN_SHRINK_LEN 6 // the minimum length of gf if we are shrinking 1105 #define SMOOTH_FILT_LEN 7 1106 #define HALF_FILT_LEN (SMOOTH_FILT_LEN / 2) 1107 #define WINDOW_SIZE 7 1108 #define HALF_WIN (WINDOW_SIZE / 2) 1109 1110 // Smooth filter intra_error and coded_error in firstpass stats. 1111 // If stats[i].is_flash==1, the ith element should not be used in the filtering. 1112 static void smooth_filter_stats(const FIRSTPASS_STATS *stats, int start_idx, 1113 int last_idx, double *filt_intra_err, 1114 double *filt_coded_err) { 1115 // A 7-tap gaussian smooth filter 1116 static const double smooth_filt[SMOOTH_FILT_LEN] = { 0.006, 0.061, 0.242, 1117 0.383, 0.242, 0.061, 1118 0.006 }; 1119 int i, j; 1120 for (i = start_idx; i <= last_idx; i++) { 1121 double total_wt = 0; 1122 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) { 1123 int idx = clamp(i + j, start_idx, last_idx); 1124 if (stats[idx].is_flash) continue; 1125 1126 filt_intra_err[i] += 1127 smooth_filt[j + HALF_FILT_LEN] * stats[idx].intra_error; 1128 total_wt += smooth_filt[j + HALF_FILT_LEN]; 1129 } 1130 if (total_wt > 0.01) { 1131 filt_intra_err[i] /= total_wt; 1132 } else { 1133 filt_intra_err[i] = stats[i].intra_error; 1134 } 1135 } 1136 for (i = start_idx; i <= last_idx; i++) { 1137 double total_wt = 0; 1138 for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) { 1139 int idx = clamp(i + j, start_idx, last_idx); 1140 // Coded error involves idx and idx - 1. 1141 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue; 1142 1143 filt_coded_err[i] += 1144 smooth_filt[j + HALF_FILT_LEN] * stats[idx].coded_error; 1145 total_wt += smooth_filt[j + HALF_FILT_LEN]; 1146 } 1147 if (total_wt > 0.01) { 1148 filt_coded_err[i] /= total_wt; 1149 } else { 1150 filt_coded_err[i] = stats[i].coded_error; 1151 } 1152 } 1153 } 1154 1155 // Calculate gradient 1156 static void get_gradient(const double *values, int start, int last, 1157 double *grad) { 1158 if (start == last) { 1159 grad[start] = 0; 1160 return; 1161 } 1162 for (int i = start; i <= last; i++) { 1163 int prev = AOMMAX(i - 1, start); 1164 int next = AOMMIN(i + 1, last); 1165 grad[i] = (values[next] - values[prev]) / (next - prev); 1166 } 1167 } 1168 1169 static int find_next_scenecut(const FIRSTPASS_STATS *const stats_start, 1170 int first, int last) { 1171 // Identify unstable areas caused by scenecuts. 1172 // Find the max and 2nd max coded error, and the average of the rest frames. 1173 // If there is only one frame that yields a huge coded error, it is likely a 1174 // scenecut. 1175 double this_ratio, max_prev_ratio, max_next_ratio, max_prev_coded, 1176 max_next_coded; 1177 1178 if (last - first == 0) return -1; 1179 1180 for (int i = first; i <= last; i++) { 1181 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash)) 1182 continue; 1183 double temp_intra = AOMMAX(stats_start[i].intra_error, 0.01); 1184 this_ratio = stats_start[i].coded_error / temp_intra; 1185 // find the avg ratio in the preceding neighborhood 1186 max_prev_ratio = 0; 1187 max_prev_coded = 0; 1188 for (int j = AOMMAX(first, i - HALF_WIN); j < i; j++) { 1189 if (stats_start[j].is_flash || (j > 0 && stats_start[j - 1].is_flash)) 1190 continue; 1191 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01); 1192 double temp_ratio = stats_start[j].coded_error / temp_intra; 1193 if (temp_ratio > max_prev_ratio) { 1194 max_prev_ratio = temp_ratio; 1195 } 1196 if (stats_start[j].coded_error > max_prev_coded) { 1197 max_prev_coded = stats_start[j].coded_error; 1198 } 1199 } 1200 // find the avg ratio in the following neighborhood 1201 max_next_ratio = 0; 1202 max_next_coded = 0; 1203 for (int j = i + 1; j <= AOMMIN(i + HALF_WIN, last); j++) { 1204 if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash)) 1205 continue; 1206 temp_intra = AOMMAX(stats_start[j].intra_error, 0.01); 1207 double temp_ratio = stats_start[j].coded_error / temp_intra; 1208 if (temp_ratio > max_next_ratio) { 1209 max_next_ratio = temp_ratio; 1210 } 1211 if (stats_start[j].coded_error > max_next_coded) { 1212 max_next_coded = stats_start[j].coded_error; 1213 } 1214 } 1215 1216 if (max_prev_ratio < 0.001 && max_next_ratio < 0.001) { 1217 // the ratios are very small, only check a small fixed threshold 1218 if (this_ratio < 0.02) continue; 1219 } else { 1220 // check if this frame has a larger ratio than the neighborhood 1221 double max_sr = stats_start[i].sr_coded_error; 1222 if (i < last) max_sr = AOMMAX(max_sr, stats_start[i + 1].sr_coded_error); 1223 double max_sr_fr_ratio = 1224 max_sr / AOMMAX(stats_start[i].coded_error, 0.01); 1225 1226 if (max_sr_fr_ratio > 1.2) continue; 1227 if (this_ratio < 2 * AOMMAX(max_prev_ratio, max_next_ratio) && 1228 stats_start[i].coded_error < 1229 2 * AOMMAX(max_prev_coded, max_next_coded)) { 1230 continue; 1231 } 1232 } 1233 return i; 1234 } 1235 return -1; 1236 } 1237 1238 // Remove the region with index next_region. 1239 // parameter merge: 0: merge with previous; 1: merge with next; 2: 1240 // merge with both, take type from previous if possible 1241 // After removing, next_region will be the index of the next region. 1242 static void remove_region(int merge, REGIONS *regions, int *num_regions, 1243 int *next_region) { 1244 int k = *next_region; 1245 assert(k < *num_regions); 1246 if (*num_regions == 1) { 1247 *num_regions = 0; 1248 return; 1249 } 1250 if (k == 0) { 1251 merge = 1; 1252 } else if (k == *num_regions - 1) { 1253 merge = 0; 1254 } 1255 int num_merge = (merge == 2) ? 2 : 1; 1256 switch (merge) { 1257 case 0: 1258 regions[k - 1].last = regions[k].last; 1259 *next_region = k; 1260 break; 1261 case 1: 1262 regions[k + 1].start = regions[k].start; 1263 *next_region = k + 1; 1264 break; 1265 case 2: 1266 regions[k - 1].last = regions[k + 1].last; 1267 *next_region = k; 1268 break; 1269 default: assert(0); 1270 } 1271 *num_regions -= num_merge; 1272 for (k = *next_region - (merge == 1); k < *num_regions; k++) { 1273 regions[k] = regions[k + num_merge]; 1274 } 1275 } 1276 1277 // Insert a region in the cur_region_idx. The start and last should both be in 1278 // the current region. After insertion, the cur_region_idx will point to the 1279 // last region that was splitted from the original region. 1280 static void insert_region(int start, int last, REGION_TYPES type, 1281 REGIONS *regions, int *num_regions, 1282 int *cur_region_idx) { 1283 int k = *cur_region_idx; 1284 REGION_TYPES this_region_type = regions[k].type; 1285 int this_region_last = regions[k].last; 1286 int num_add = (start != regions[k].start) + (last != regions[k].last); 1287 // move the following regions further to the back 1288 for (int r = *num_regions - 1; r > k; r--) { 1289 regions[r + num_add] = regions[r]; 1290 } 1291 *num_regions += num_add; 1292 if (start > regions[k].start) { 1293 regions[k].last = start - 1; 1294 k++; 1295 regions[k].start = start; 1296 } 1297 regions[k].type = type; 1298 if (last < this_region_last) { 1299 regions[k].last = last; 1300 k++; 1301 regions[k].start = last + 1; 1302 regions[k].last = this_region_last; 1303 regions[k].type = this_region_type; 1304 } else { 1305 regions[k].last = this_region_last; 1306 } 1307 *cur_region_idx = k; 1308 } 1309 1310 // Get the average of stats inside a region. 1311 static void analyze_region(const FIRSTPASS_STATS *stats, int k, 1312 REGIONS *regions) { 1313 int i; 1314 regions[k].avg_cor_coeff = 0; 1315 regions[k].avg_sr_fr_ratio = 0; 1316 regions[k].avg_intra_err = 0; 1317 regions[k].avg_coded_err = 0; 1318 1319 int check_first_sr = (k != 0); 1320 1321 for (i = regions[k].start; i <= regions[k].last; i++) { 1322 if (i > regions[k].start || check_first_sr) { 1323 double num_frames = 1324 (double)(regions[k].last - regions[k].start + check_first_sr); 1325 double max_coded_error = 1326 AOMMAX(stats[i].coded_error, stats[i - 1].coded_error); 1327 double this_ratio = 1328 stats[i].sr_coded_error / AOMMAX(max_coded_error, 0.001); 1329 regions[k].avg_sr_fr_ratio += this_ratio / num_frames; 1330 } 1331 1332 regions[k].avg_intra_err += 1333 stats[i].intra_error / (double)(regions[k].last - regions[k].start + 1); 1334 regions[k].avg_coded_err += 1335 stats[i].coded_error / (double)(regions[k].last - regions[k].start + 1); 1336 1337 regions[k].avg_cor_coeff += 1338 AOMMAX(stats[i].cor_coeff, 0.001) / 1339 (double)(regions[k].last - regions[k].start + 1); 1340 regions[k].avg_noise_var += 1341 AOMMAX(stats[i].noise_var, 0.001) / 1342 (double)(regions[k].last - regions[k].start + 1); 1343 } 1344 } 1345 1346 // Calculate the regions stats of every region. 1347 static void get_region_stats(const FIRSTPASS_STATS *stats, REGIONS *regions, 1348 int num_regions) { 1349 for (int k = 0; k < num_regions; k++) { 1350 analyze_region(stats, k, regions); 1351 } 1352 } 1353 1354 // Find tentative stable regions 1355 static int find_stable_regions(const FIRSTPASS_STATS *stats, 1356 const double *grad_coded, int this_start, 1357 int this_last, REGIONS *regions) { 1358 int i, j, k = 0; 1359 regions[k].start = this_start; 1360 for (i = this_start; i <= this_last; i++) { 1361 // Check mean and variance of stats in a window 1362 double mean_intra = 0.001, var_intra = 0.001; 1363 double mean_coded = 0.001, var_coded = 0.001; 1364 int count = 0; 1365 for (j = -HALF_WIN; j <= HALF_WIN; j++) { 1366 int idx = clamp(i + j, this_start, this_last); 1367 if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue; 1368 mean_intra += stats[idx].intra_error; 1369 var_intra += stats[idx].intra_error * stats[idx].intra_error; 1370 mean_coded += stats[idx].coded_error; 1371 var_coded += stats[idx].coded_error * stats[idx].coded_error; 1372 count++; 1373 } 1374 1375 REGION_TYPES cur_type; 1376 if (count > 0) { 1377 mean_intra /= (double)count; 1378 var_intra /= (double)count; 1379 mean_coded /= (double)count; 1380 var_coded /= (double)count; 1381 int is_intra_stable = (var_intra / (mean_intra * mean_intra) < 1.03); 1382 int is_coded_stable = (var_coded / (mean_coded * mean_coded) < 1.04 && 1383 fabs(grad_coded[i]) / mean_coded < 0.05) || 1384 mean_coded / mean_intra < 0.05; 1385 int is_coded_small = mean_coded < 0.5 * mean_intra; 1386 cur_type = (is_intra_stable && is_coded_stable && is_coded_small) 1387 ? STABLE_REGION 1388 : HIGH_VAR_REGION; 1389 } else { 1390 cur_type = HIGH_VAR_REGION; 1391 } 1392 1393 // mark a new region if type changes 1394 if (i == regions[k].start) { 1395 // first frame in the region 1396 regions[k].type = cur_type; 1397 } else if (cur_type != regions[k].type) { 1398 // Append a new region 1399 regions[k].last = i - 1; 1400 regions[k + 1].start = i; 1401 regions[k + 1].type = cur_type; 1402 k++; 1403 } 1404 } 1405 regions[k].last = this_last; 1406 return k + 1; 1407 } 1408 1409 // Clean up regions that should be removed or merged. 1410 static void cleanup_regions(REGIONS *regions, int *num_regions) { 1411 int k = 0; 1412 while (k < *num_regions) { 1413 if ((k > 0 && regions[k - 1].type == regions[k].type && 1414 regions[k].type != SCENECUT_REGION) || 1415 regions[k].last < regions[k].start) { 1416 remove_region(0, regions, num_regions, &k); 1417 } else { 1418 k++; 1419 } 1420 } 1421 } 1422 1423 // Remove regions that are of type and shorter than length. 1424 // Merge it with its neighboring regions. 1425 static void remove_short_regions(REGIONS *regions, int *num_regions, 1426 REGION_TYPES type, int length) { 1427 int k = 0; 1428 while (k < *num_regions && (*num_regions) > 1) { 1429 if ((regions[k].last - regions[k].start + 1 < length && 1430 regions[k].type == type)) { 1431 // merge current region with the previous and next regions 1432 remove_region(2, regions, num_regions, &k); 1433 } else { 1434 k++; 1435 } 1436 } 1437 cleanup_regions(regions, num_regions); 1438 } 1439 1440 static void adjust_unstable_region_bounds(const FIRSTPASS_STATS *stats, 1441 REGIONS *regions, int *num_regions) { 1442 int i, j, k; 1443 // Remove regions that are too short. Likely noise. 1444 remove_short_regions(regions, num_regions, STABLE_REGION, HALF_WIN); 1445 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN); 1446 1447 get_region_stats(stats, regions, *num_regions); 1448 1449 // Adjust region boundaries. The thresholds are empirically obtained, but 1450 // overall the performance is not very sensitive to small changes to them. 1451 for (k = 0; k < *num_regions; k++) { 1452 if (regions[k].type == STABLE_REGION) continue; 1453 if (k > 0) { 1454 // Adjust previous boundary. 1455 // First find the average intra/coded error in the previous 1456 // neighborhood. 1457 double avg_intra_err = 0; 1458 const int starti = AOMMAX(regions[k - 1].last - WINDOW_SIZE + 1, 1459 regions[k - 1].start + 1); 1460 const int lasti = regions[k - 1].last; 1461 int counti = 0; 1462 for (i = starti; i <= lasti; i++) { 1463 avg_intra_err += stats[i].intra_error; 1464 counti++; 1465 } 1466 if (counti > 0) { 1467 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001); 1468 int count_coded = 0, count_grad = 0; 1469 for (j = lasti + 1; j <= regions[k].last; j++) { 1470 const int intra_close = 1471 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1; 1472 const int coded_small = stats[j].coded_error / avg_intra_err < 0.1; 1473 const int coeff_close = stats[j].cor_coeff > 0.995; 1474 if (!coeff_close || !coded_small) count_coded--; 1475 if (intra_close && count_coded >= 0 && count_grad >= 0) { 1476 // this frame probably belongs to the previous stable region 1477 regions[k - 1].last = j; 1478 regions[k].start = j + 1; 1479 } else { 1480 break; 1481 } 1482 } 1483 } 1484 } // if k > 0 1485 if (k < *num_regions - 1) { 1486 // Adjust next boundary. 1487 // First find the average intra/coded error in the next neighborhood. 1488 double avg_intra_err = 0; 1489 const int starti = regions[k + 1].start; 1490 const int lasti = AOMMIN(regions[k + 1].last - 1, 1491 regions[k + 1].start + WINDOW_SIZE - 1); 1492 int counti = 0; 1493 for (i = starti; i <= lasti; i++) { 1494 avg_intra_err += stats[i].intra_error; 1495 counti++; 1496 } 1497 if (counti > 0) { 1498 avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001); 1499 // At the boundary, coded error is large, but still the frame is stable 1500 int count_coded = 1, count_grad = 1; 1501 for (j = starti - 1; j >= regions[k].start; j--) { 1502 const int intra_close = 1503 fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1; 1504 const int coded_small = 1505 stats[j + 1].coded_error / avg_intra_err < 0.1; 1506 const int coeff_close = stats[j].cor_coeff > 0.995; 1507 if (!coeff_close || !coded_small) count_coded--; 1508 if (intra_close && count_coded >= 0 && count_grad >= 0) { 1509 // this frame probably belongs to the next stable region 1510 regions[k + 1].start = j; 1511 regions[k].last = j - 1; 1512 } else { 1513 break; 1514 } 1515 } 1516 } 1517 } // if k < *num_regions - 1 1518 } // end of loop over all regions 1519 1520 cleanup_regions(regions, num_regions); 1521 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN); 1522 get_region_stats(stats, regions, *num_regions); 1523 1524 // If a stable regions has higher error than neighboring high var regions, 1525 // or if the stable region has a lower average correlation, 1526 // then it should be merged with them 1527 k = 0; 1528 while (k < *num_regions && (*num_regions) > 1) { 1529 if (regions[k].type == STABLE_REGION && 1530 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE && 1531 ((k > 0 && // previous regions 1532 (regions[k].avg_coded_err > regions[k - 1].avg_coded_err * 1.01 || 1533 regions[k].avg_cor_coeff < regions[k - 1].avg_cor_coeff * 0.999)) && 1534 (k < *num_regions - 1 && // next region 1535 (regions[k].avg_coded_err > regions[k + 1].avg_coded_err * 1.01 || 1536 regions[k].avg_cor_coeff < regions[k + 1].avg_cor_coeff * 0.999)))) { 1537 // merge current region with the previous and next regions 1538 remove_region(2, regions, num_regions, &k); 1539 analyze_region(stats, k - 1, regions); 1540 } else if (regions[k].type == HIGH_VAR_REGION && 1541 (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE && 1542 ((k > 0 && // previous regions 1543 (regions[k].avg_coded_err < 1544 regions[k - 1].avg_coded_err * 0.99 || 1545 regions[k].avg_cor_coeff > 1546 regions[k - 1].avg_cor_coeff * 1.001)) && 1547 (k < *num_regions - 1 && // next region 1548 (regions[k].avg_coded_err < 1549 regions[k + 1].avg_coded_err * 0.99 || 1550 regions[k].avg_cor_coeff > 1551 regions[k + 1].avg_cor_coeff * 1.001)))) { 1552 // merge current region with the previous and next regions 1553 remove_region(2, regions, num_regions, &k); 1554 analyze_region(stats, k - 1, regions); 1555 } else { 1556 k++; 1557 } 1558 } 1559 1560 remove_short_regions(regions, num_regions, STABLE_REGION, WINDOW_SIZE); 1561 remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN); 1562 } 1563 1564 // Identify blending regions. 1565 static void find_blending_regions(const FIRSTPASS_STATS *stats, 1566 REGIONS *regions, int *num_regions) { 1567 int i, k = 0; 1568 // Blending regions will have large content change, therefore will have a 1569 // large consistent change in intra error. 1570 int count_stable = 0; 1571 while (k < *num_regions) { 1572 if (regions[k].type == STABLE_REGION) { 1573 k++; 1574 count_stable++; 1575 continue; 1576 } 1577 int dir = 0; 1578 int start = 0, last; 1579 for (i = regions[k].start; i <= regions[k].last; i++) { 1580 // First mark the regions that has consistent large change of intra error. 1581 if (k == 0 && i == regions[k].start) continue; 1582 if (stats[i].is_flash || (i > 0 && stats[i - 1].is_flash)) continue; 1583 double grad = stats[i].intra_error - stats[i - 1].intra_error; 1584 int large_change = fabs(grad) / AOMMAX(stats[i].intra_error, 0.01) > 0.05; 1585 int this_dir = 0; 1586 if (large_change) { 1587 this_dir = (grad > 0) ? 1 : -1; 1588 } 1589 // the current trend continues 1590 if (dir == this_dir) continue; 1591 if (dir != 0) { 1592 // Mark the end of a new large change group and add it 1593 last = i - 1; 1594 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k); 1595 } 1596 dir = this_dir; 1597 if (k == 0 && i == regions[k].start + 1) { 1598 start = i - 1; 1599 } else { 1600 start = i; 1601 } 1602 } 1603 if (dir != 0) { 1604 last = regions[k].last; 1605 insert_region(start, last, BLENDING_REGION, regions, num_regions, &k); 1606 } 1607 k++; 1608 } 1609 1610 // If the blending region has very low correlation, mark it as high variance 1611 // since we probably cannot benefit from it anyways. 1612 get_region_stats(stats, regions, *num_regions); 1613 for (k = 0; k < *num_regions; k++) { 1614 if (regions[k].type != BLENDING_REGION) continue; 1615 if (regions[k].last == regions[k].start || regions[k].avg_cor_coeff < 0.6 || 1616 count_stable == 0) 1617 regions[k].type = HIGH_VAR_REGION; 1618 } 1619 get_region_stats(stats, regions, *num_regions); 1620 1621 // It is possible for blending to result in a "dip" in intra error (first 1622 // decrease then increase). Therefore we need to find the dip and combine the 1623 // two regions. 1624 k = 1; 1625 while (k < *num_regions) { 1626 if (k < *num_regions - 1 && regions[k].type == HIGH_VAR_REGION) { 1627 // Check if this short high variance regions is actually in the middle of 1628 // a blending region. 1629 if (regions[k - 1].type == BLENDING_REGION && 1630 regions[k + 1].type == BLENDING_REGION && 1631 regions[k].last - regions[k].start < 3) { 1632 int prev_dir = (stats[regions[k - 1].last].intra_error - 1633 stats[regions[k - 1].last - 1].intra_error) > 0 1634 ? 1 1635 : -1; 1636 int next_dir = (stats[regions[k + 1].last].intra_error - 1637 stats[regions[k + 1].last - 1].intra_error) > 0 1638 ? 1 1639 : -1; 1640 if (prev_dir < 0 && next_dir > 0) { 1641 // This is possibly a mid region of blending. Check the ratios 1642 double ratio_thres = AOMMIN(regions[k - 1].avg_sr_fr_ratio, 1643 regions[k + 1].avg_sr_fr_ratio) * 1644 0.95; 1645 if (regions[k].avg_sr_fr_ratio > ratio_thres) { 1646 regions[k].type = BLENDING_REGION; 1647 remove_region(2, regions, num_regions, &k); 1648 analyze_region(stats, k - 1, regions); 1649 continue; 1650 } 1651 } 1652 } 1653 } 1654 // Check if we have a pair of consecutive blending regions. 1655 if (regions[k - 1].type == BLENDING_REGION && 1656 regions[k].type == BLENDING_REGION) { 1657 int prev_dir = (stats[regions[k - 1].last].intra_error - 1658 stats[regions[k - 1].last - 1].intra_error) > 0 1659 ? 1 1660 : -1; 1661 int next_dir = (stats[regions[k].last].intra_error - 1662 stats[regions[k].last - 1].intra_error) > 0 1663 ? 1 1664 : -1; 1665 1666 // if both are too short, no need to check 1667 int total_length = regions[k].last - regions[k - 1].start + 1; 1668 if (total_length < 4) { 1669 regions[k - 1].type = HIGH_VAR_REGION; 1670 k++; 1671 continue; 1672 } 1673 1674 int to_merge = 0; 1675 if (prev_dir < 0 && next_dir > 0) { 1676 // In this case we check the last frame in the previous region. 1677 double prev_length = 1678 (double)(regions[k - 1].last - regions[k - 1].start + 1); 1679 double last_ratio, ratio_thres; 1680 if (prev_length < 2.01) { 1681 // if the previous region is very short 1682 double max_coded_error = 1683 AOMMAX(stats[regions[k - 1].last].coded_error, 1684 stats[regions[k - 1].last - 1].coded_error); 1685 last_ratio = stats[regions[k - 1].last].sr_coded_error / 1686 AOMMAX(max_coded_error, 0.001); 1687 ratio_thres = regions[k].avg_sr_fr_ratio * 0.95; 1688 } else { 1689 double max_coded_error = 1690 AOMMAX(stats[regions[k - 1].last].coded_error, 1691 stats[regions[k - 1].last - 1].coded_error); 1692 last_ratio = stats[regions[k - 1].last].sr_coded_error / 1693 AOMMAX(max_coded_error, 0.001); 1694 double prev_ratio = 1695 (regions[k - 1].avg_sr_fr_ratio * prev_length - last_ratio) / 1696 (prev_length - 1.0); 1697 ratio_thres = AOMMIN(prev_ratio, regions[k].avg_sr_fr_ratio) * 0.95; 1698 } 1699 if (last_ratio > ratio_thres) { 1700 to_merge = 1; 1701 } 1702 } 1703 1704 if (to_merge) { 1705 remove_region(0, regions, num_regions, &k); 1706 analyze_region(stats, k - 1, regions); 1707 continue; 1708 } else { 1709 // These are possibly two separate blending regions. Mark the boundary 1710 // frame as HIGH_VAR_REGION to separate the two. 1711 int prev_k = k - 1; 1712 insert_region(regions[prev_k].last, regions[prev_k].last, 1713 HIGH_VAR_REGION, regions, num_regions, &prev_k); 1714 analyze_region(stats, prev_k, regions); 1715 k = prev_k + 1; 1716 analyze_region(stats, k, regions); 1717 } 1718 } 1719 k++; 1720 } 1721 cleanup_regions(regions, num_regions); 1722 } 1723 1724 // Clean up decision for blendings. Remove blending regions that are too short. 1725 // Also if a very short high var region is between a blending and a stable 1726 // region, just merge it with one of them. 1727 static void cleanup_blendings(REGIONS *regions, int *num_regions) { 1728 int k = 0; 1729 while (k < (*num_regions) && (*num_regions) > 1) { 1730 int is_short_blending = regions[k].type == BLENDING_REGION && 1731 regions[k].last - regions[k].start + 1 < 5; 1732 int is_short_hv = regions[k].type == HIGH_VAR_REGION && 1733 regions[k].last - regions[k].start + 1 < 5; 1734 int has_stable_neighbor = 1735 ((k > 0 && regions[k - 1].type == STABLE_REGION) || 1736 (k < *num_regions - 1 && regions[k + 1].type == STABLE_REGION)); 1737 int has_blend_neighbor = 1738 ((k > 0 && regions[k - 1].type == BLENDING_REGION) || 1739 (k < *num_regions - 1 && regions[k + 1].type == BLENDING_REGION)); 1740 int total_neighbors = (k > 0) + (k < *num_regions - 1); 1741 1742 if (is_short_blending || 1743 (is_short_hv && 1744 has_stable_neighbor + has_blend_neighbor >= total_neighbors)) { 1745 // Remove this region.Try to determine whether to combine it with the 1746 // previous or next region. 1747 int merge; 1748 double prev_diff = 1749 (k > 0) 1750 ? fabs(regions[k].avg_cor_coeff - regions[k - 1].avg_cor_coeff) 1751 : 1; 1752 double next_diff = 1753 (k < *num_regions - 1) 1754 ? fabs(regions[k].avg_cor_coeff - regions[k + 1].avg_cor_coeff) 1755 : 1; 1756 // merge == 0 means to merge with previous, 1 means to merge with next 1757 merge = prev_diff > next_diff; 1758 remove_region(merge, regions, num_regions, &k); 1759 } else { 1760 k++; 1761 } 1762 } 1763 cleanup_regions(regions, num_regions); 1764 } 1765 1766 static void free_firstpass_stats_buffers(REGIONS *temp_regions, 1767 double *filt_intra_err, 1768 double *filt_coded_err, 1769 double *grad_coded) { 1770 aom_free(temp_regions); 1771 aom_free(filt_intra_err); 1772 aom_free(filt_coded_err); 1773 aom_free(grad_coded); 1774 } 1775 1776 // Identify stable and unstable regions from first pass stats. 1777 // stats_start points to the first frame to analyze. 1778 // |offset| is the offset from the current frame to the frame stats_start is 1779 // pointing to. 1780 // Returns 0 on success, -1 on memory allocation failure. 1781 static int identify_regions(const FIRSTPASS_STATS *const stats_start, 1782 int total_frames, int offset, REGIONS *regions, 1783 int *total_regions) { 1784 int k; 1785 if (total_frames <= 1) return 0; 1786 1787 // store the initial decisions 1788 REGIONS *temp_regions = 1789 (REGIONS *)aom_malloc(total_frames * sizeof(temp_regions[0])); 1790 // buffers for filtered stats 1791 double *filt_intra_err = 1792 (double *)aom_calloc(total_frames, sizeof(*filt_intra_err)); 1793 double *filt_coded_err = 1794 (double *)aom_calloc(total_frames, sizeof(*filt_coded_err)); 1795 double *grad_coded = (double *)aom_calloc(total_frames, sizeof(*grad_coded)); 1796 if (!(temp_regions && filt_intra_err && filt_coded_err && grad_coded)) { 1797 free_firstpass_stats_buffers(temp_regions, filt_intra_err, filt_coded_err, 1798 grad_coded); 1799 return -1; 1800 } 1801 av1_zero_array(temp_regions, total_frames); 1802 1803 int cur_region = 0, this_start = 0, this_last; 1804 1805 int next_scenecut = -1; 1806 do { 1807 // first get the obvious scenecuts 1808 next_scenecut = 1809 find_next_scenecut(stats_start, this_start, total_frames - 1); 1810 this_last = (next_scenecut >= 0) ? (next_scenecut - 1) : total_frames - 1; 1811 1812 // low-pass filter the needed stats 1813 smooth_filter_stats(stats_start, this_start, this_last, filt_intra_err, 1814 filt_coded_err); 1815 get_gradient(filt_coded_err, this_start, this_last, grad_coded); 1816 1817 // find tentative stable regions and unstable regions 1818 int num_regions = find_stable_regions(stats_start, grad_coded, this_start, 1819 this_last, temp_regions); 1820 1821 adjust_unstable_region_bounds(stats_start, temp_regions, &num_regions); 1822 1823 get_region_stats(stats_start, temp_regions, num_regions); 1824 1825 // Try to identify blending regions in the unstable regions 1826 find_blending_regions(stats_start, temp_regions, &num_regions); 1827 cleanup_blendings(temp_regions, &num_regions); 1828 1829 // The flash points should all be considered high variance points 1830 k = 0; 1831 while (k < num_regions) { 1832 if (temp_regions[k].type != STABLE_REGION) { 1833 k++; 1834 continue; 1835 } 1836 int start = temp_regions[k].start; 1837 int last = temp_regions[k].last; 1838 for (int i = start; i <= last; i++) { 1839 if (stats_start[i].is_flash) { 1840 insert_region(i, i, HIGH_VAR_REGION, temp_regions, &num_regions, &k); 1841 } 1842 } 1843 k++; 1844 } 1845 cleanup_regions(temp_regions, &num_regions); 1846 1847 // copy the regions in the scenecut group 1848 for (k = 0; k < num_regions; k++) { 1849 if (temp_regions[k].last < temp_regions[k].start && 1850 k == num_regions - 1) { 1851 num_regions--; 1852 break; 1853 } 1854 regions[k + cur_region] = temp_regions[k]; 1855 } 1856 cur_region += num_regions; 1857 1858 // add the scenecut region 1859 if (next_scenecut > -1) { 1860 // add the scenecut region, and find the next scenecut 1861 regions[cur_region].type = SCENECUT_REGION; 1862 regions[cur_region].start = next_scenecut; 1863 regions[cur_region].last = next_scenecut; 1864 cur_region++; 1865 this_start = next_scenecut + 1; 1866 } 1867 } while (next_scenecut >= 0); 1868 1869 *total_regions = cur_region; 1870 get_region_stats(stats_start, regions, *total_regions); 1871 1872 for (k = 0; k < *total_regions; k++) { 1873 // If scenecuts are very minor, mark them as high variance. 1874 if (regions[k].type != SCENECUT_REGION || 1875 regions[k].avg_cor_coeff * 1876 (1 - stats_start[regions[k].start].noise_var / 1877 regions[k].avg_intra_err) < 1878 0.8) { 1879 continue; 1880 } 1881 regions[k].type = HIGH_VAR_REGION; 1882 } 1883 cleanup_regions(regions, total_regions); 1884 get_region_stats(stats_start, regions, *total_regions); 1885 1886 for (k = 0; k < *total_regions; k++) { 1887 regions[k].start += offset; 1888 regions[k].last += offset; 1889 } 1890 1891 free_firstpass_stats_buffers(temp_regions, filt_intra_err, filt_coded_err, 1892 grad_coded); 1893 return 0; 1894 } 1895 1896 static int find_regions_index(const REGIONS *regions, int num_regions, 1897 int frame_idx) { 1898 for (int k = 0; k < num_regions; k++) { 1899 if (regions[k].start <= frame_idx && regions[k].last >= frame_idx) { 1900 return k; 1901 } 1902 } 1903 return -1; 1904 } 1905 1906 /*!\brief Determine the length of future GF groups. 1907 * 1908 * \ingroup gf_group_algo 1909 * This function decides the gf group length of future frames in batch 1910 * 1911 * \param[in] cpi Top-level encoder structure 1912 * \param[in] max_gop_length Maximum length of the GF group 1913 * \param[in] max_intervals Maximum number of intervals to decide 1914 * 1915 * \remark Nothing is returned. Instead, cpi->ppi->rc.gf_intervals is 1916 * changed to store the decided GF group lengths. 1917 */ 1918 static void calculate_gf_length(AV1_COMP *cpi, int max_gop_length, 1919 int max_intervals) { 1920 RATE_CONTROL *const rc = &cpi->rc; 1921 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 1922 TWO_PASS *const twopass = &cpi->ppi->twopass; 1923 FIRSTPASS_STATS next_frame; 1924 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in; 1925 const FIRSTPASS_STATS *const stats = start_pos - (rc->frames_since_key == 0); 1926 1927 const int f_w = cpi->common.width; 1928 const int f_h = cpi->common.height; 1929 int i; 1930 1931 int flash_detected; 1932 1933 av1_zero(next_frame); 1934 1935 if (has_no_stats_stage(cpi)) { 1936 for (i = 0; i < MAX_NUM_GF_INTERVALS; i++) { 1937 p_rc->gf_intervals[i] = AOMMIN(rc->max_gf_interval, max_gop_length); 1938 } 1939 p_rc->cur_gf_index = 0; 1940 rc->intervals_till_gf_calculate_due = MAX_NUM_GF_INTERVALS; 1941 return; 1942 } 1943 1944 // TODO(urvang): Try logic to vary min and max interval based on q. 1945 const int active_min_gf_interval = rc->min_gf_interval; 1946 const int active_max_gf_interval = 1947 AOMMIN(rc->max_gf_interval, max_gop_length); 1948 const int min_shrink_int = AOMMAX(MIN_SHRINK_LEN, active_min_gf_interval); 1949 1950 i = (rc->frames_since_key == 0); 1951 max_intervals = cpi->ppi->lap_enabled ? 1 : max_intervals; 1952 int count_cuts = 1; 1953 // If cpi->gf_state.arf_gf_boost_lst is 0, we are starting with a KF or GF. 1954 int cur_start = -1 + !cpi->ppi->gf_state.arf_gf_boost_lst, cur_last; 1955 int cut_pos[MAX_NUM_GF_INTERVALS + 1] = { -1 }; 1956 int cut_here; 1957 GF_GROUP_STATS gf_stats; 1958 init_gf_stats(&gf_stats); 1959 while (count_cuts < max_intervals + 1) { 1960 // reaches next key frame, break here 1961 if (i >= rc->frames_to_key) { 1962 cut_here = 2; 1963 } else if (i - cur_start >= rc->static_scene_max_gf_interval) { 1964 // reached maximum len, but nothing special yet (almost static) 1965 // let's look at the next interval 1966 cut_here = 1; 1967 } else if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) { 1968 // reaches last frame, break 1969 cut_here = 2; 1970 } else { 1971 // Test for the case where there is a brief flash but the prediction 1972 // quality back to an earlier frame is then restored. 1973 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0); 1974 // TODO(bohanli): remove redundant accumulations here, or unify 1975 // this and the ones in define_gf_group 1976 accumulate_next_frame_stats(&next_frame, flash_detected, 1977 rc->frames_since_key, i, &gf_stats, f_w, f_h); 1978 1979 cut_here = detect_gf_cut(cpi, i, cur_start, flash_detected, 1980 active_max_gf_interval, active_min_gf_interval, 1981 &gf_stats); 1982 } 1983 if (cut_here) { 1984 cur_last = i - 1; // the current last frame in the gf group 1985 int ori_last = cur_last; 1986 // The region frame idx does not start from the same frame as cur_start 1987 // and cur_last. Need to offset them. 1988 int offset = rc->frames_since_key - p_rc->regions_offset; 1989 REGIONS *regions = p_rc->regions; 1990 int num_regions = p_rc->num_regions; 1991 1992 int scenecut_idx = -1; 1993 // only try shrinking if interval smaller than active_max_gf_interval 1994 if (cur_last - cur_start <= active_max_gf_interval && 1995 cur_last > cur_start) { 1996 // find the region indices of where the first and last frame belong. 1997 int k_start = 1998 find_regions_index(regions, num_regions, cur_start + offset); 1999 int k_last = 2000 find_regions_index(regions, num_regions, cur_last + offset); 2001 if (cur_start + offset == 0) k_start = 0; 2002 2003 // See if we have a scenecut in between 2004 for (int r = k_start + 1; r <= k_last; r++) { 2005 if (regions[r].type == SCENECUT_REGION && 2006 regions[r].last - offset - cur_start > active_min_gf_interval) { 2007 scenecut_idx = r; 2008 break; 2009 } 2010 } 2011 2012 // if the found scenecut is very close to the end, ignore it. 2013 if (regions[num_regions - 1].last - regions[scenecut_idx].last < 4) { 2014 scenecut_idx = -1; 2015 } 2016 2017 if (scenecut_idx != -1) { 2018 // If we have a scenecut, then stop at it. 2019 // TODO(bohanli): add logic here to stop before the scenecut and for 2020 // the next gop start from the scenecut with GF 2021 int is_minor_sc = 2022 (regions[scenecut_idx].avg_cor_coeff * 2023 (1 - stats[regions[scenecut_idx].start - offset].noise_var / 2024 regions[scenecut_idx].avg_intra_err) > 2025 0.6); 2026 cur_last = regions[scenecut_idx].last - offset - !is_minor_sc; 2027 } else { 2028 int is_last_analysed = (k_last == num_regions - 1) && 2029 (cur_last + offset == regions[k_last].last); 2030 int not_enough_regions = 2031 k_last - k_start <= 2032 1 + (regions[k_start].type == SCENECUT_REGION); 2033 // if we are very close to the end, then do not shrink since it may 2034 // introduce intervals that are too short 2035 if (!(is_last_analysed && not_enough_regions)) { 2036 const double arf_length_factor = 0.1; 2037 double best_score = 0; 2038 int best_j = -1; 2039 const int first_frame = regions[0].start - offset; 2040 const int last_frame = regions[num_regions - 1].last - offset; 2041 // score of how much the arf helps the whole GOP 2042 double base_score = 0.0; 2043 // Accumulate base_score in 2044 for (int j = cur_start + 1; j < cur_start + min_shrink_int; j++) { 2045 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break; 2046 base_score = (base_score + 1.0) * stats[j].cor_coeff; 2047 } 2048 int met_blending = 0; // Whether we have met blending areas before 2049 int last_blending = 0; // Whether the previous frame if blending 2050 for (int j = cur_start + min_shrink_int; j <= cur_last; j++) { 2051 if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break; 2052 base_score = (base_score + 1.0) * stats[j].cor_coeff; 2053 int this_reg = 2054 find_regions_index(regions, num_regions, j + offset); 2055 if (this_reg < 0) continue; 2056 // A GOP should include at most 1 blending region. 2057 if (regions[this_reg].type == BLENDING_REGION) { 2058 last_blending = 1; 2059 if (met_blending) { 2060 break; 2061 } else { 2062 base_score = 0; 2063 continue; 2064 } 2065 } else { 2066 if (last_blending) met_blending = 1; 2067 last_blending = 0; 2068 } 2069 2070 // Add the factor of how good the neighborhood is for this 2071 // candidate arf. 2072 double this_score = arf_length_factor * base_score; 2073 double temp_accu_coeff = 1.0; 2074 // following frames 2075 int count_f = 0; 2076 for (int n = j + 1; n <= j + 3 && n <= last_frame; n++) { 2077 if (stats + n >= twopass->stats_buf_ctx->stats_in_end) break; 2078 temp_accu_coeff *= stats[n].cor_coeff; 2079 this_score += 2080 temp_accu_coeff * 2081 sqrt(AOMMAX(0.5, 2082 1 - stats[n].noise_var / 2083 AOMMAX(stats[n].intra_error, 0.001))); 2084 count_f++; 2085 } 2086 // preceding frames 2087 temp_accu_coeff = 1.0; 2088 for (int n = j; n > j - 3 * 2 + count_f && n > first_frame; n--) { 2089 if (stats + n < twopass->stats_buf_ctx->stats_in_start) break; 2090 temp_accu_coeff *= stats[n].cor_coeff; 2091 this_score += 2092 temp_accu_coeff * 2093 sqrt(AOMMAX(0.5, 2094 1 - stats[n].noise_var / 2095 AOMMAX(stats[n].intra_error, 0.001))); 2096 } 2097 2098 if (this_score > best_score) { 2099 best_score = this_score; 2100 best_j = j; 2101 } 2102 } 2103 2104 // For blending areas, move one more frame in case we missed the 2105 // first blending frame. 2106 int best_reg = 2107 find_regions_index(regions, num_regions, best_j + offset); 2108 if (best_reg < num_regions - 1 && best_reg > 0) { 2109 if (regions[best_reg - 1].type == BLENDING_REGION && 2110 regions[best_reg + 1].type == BLENDING_REGION) { 2111 if (best_j + offset == regions[best_reg].start && 2112 best_j + offset < regions[best_reg].last) { 2113 best_j += 1; 2114 } else if (best_j + offset == regions[best_reg].last && 2115 best_j + offset > regions[best_reg].start) { 2116 best_j -= 1; 2117 } 2118 } 2119 } 2120 2121 if (cur_last - best_j < 2) best_j = cur_last; 2122 if (best_j > 0 && best_score > 0.1) cur_last = best_j; 2123 // if cannot find anything, just cut at the original place. 2124 } 2125 } 2126 } 2127 cut_pos[count_cuts] = cur_last; 2128 count_cuts++; 2129 2130 // reset pointers to the shrunken location 2131 cpi->twopass_frame.stats_in = start_pos + cur_last; 2132 cur_start = cur_last; 2133 int cur_region_idx = 2134 find_regions_index(regions, num_regions, cur_start + 1 + offset); 2135 if (cur_region_idx >= 0) 2136 if (regions[cur_region_idx].type == SCENECUT_REGION) cur_start++; 2137 2138 i = cur_last; 2139 2140 if (cut_here > 1 && cur_last == ori_last) break; 2141 2142 // reset accumulators 2143 init_gf_stats(&gf_stats); 2144 } 2145 ++i; 2146 } 2147 2148 // save intervals 2149 rc->intervals_till_gf_calculate_due = count_cuts - 1; 2150 for (int n = 1; n < count_cuts; n++) { 2151 p_rc->gf_intervals[n - 1] = cut_pos[n] - cut_pos[n - 1]; 2152 } 2153 p_rc->cur_gf_index = 0; 2154 cpi->twopass_frame.stats_in = start_pos; 2155 } 2156 2157 static void correct_frames_to_key(AV1_COMP *cpi) { 2158 int lookahead_size = 2159 av1_lookahead_depth(cpi->ppi->lookahead, cpi->compressor_stage); 2160 if (lookahead_size < 2161 av1_lookahead_pop_sz(cpi->ppi->lookahead, cpi->compressor_stage)) { 2162 assert( 2163 IMPLIES(cpi->oxcf.pass != AOM_RC_ONE_PASS && cpi->ppi->frames_left > 0, 2164 lookahead_size == cpi->ppi->frames_left)); 2165 cpi->rc.frames_to_key = AOMMIN(cpi->rc.frames_to_key, lookahead_size); 2166 } else if (cpi->ppi->frames_left > 0) { 2167 // Correct frames to key based on limit 2168 cpi->rc.frames_to_key = 2169 AOMMIN(cpi->rc.frames_to_key, cpi->ppi->frames_left); 2170 } 2171 } 2172 2173 /*!\brief Define a GF group in one pass mode when no look ahead stats are 2174 * available. 2175 * 2176 * \ingroup gf_group_algo 2177 * This function defines the structure of a GF group, along with various 2178 * parameters regarding bit-allocation and quality setup in the special 2179 * case of one pass encoding where no lookahead stats are avialable. 2180 * 2181 * \param[in] cpi Top-level encoder structure 2182 * 2183 * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed. 2184 */ 2185 static void define_gf_group_pass0(AV1_COMP *cpi) { 2186 RATE_CONTROL *const rc = &cpi->rc; 2187 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2188 GF_GROUP *const gf_group = &cpi->ppi->gf_group; 2189 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 2190 const GFConfig *const gf_cfg = &oxcf->gf_cfg; 2191 int target; 2192 2193 if (oxcf->q_cfg.aq_mode == CYCLIC_REFRESH_AQ) { 2194 av1_cyclic_refresh_set_golden_update(cpi); 2195 } else { 2196 p_rc->baseline_gf_interval = p_rc->gf_intervals[p_rc->cur_gf_index]; 2197 rc->intervals_till_gf_calculate_due--; 2198 p_rc->cur_gf_index++; 2199 } 2200 2201 // correct frames_to_key when lookahead queue is flushing 2202 correct_frames_to_key(cpi); 2203 2204 if (p_rc->baseline_gf_interval > rc->frames_to_key) 2205 p_rc->baseline_gf_interval = rc->frames_to_key; 2206 2207 p_rc->gfu_boost = DEFAULT_GF_BOOST; 2208 p_rc->constrained_gf_group = 2209 (p_rc->baseline_gf_interval >= rc->frames_to_key) ? 1 : 0; 2210 2211 gf_group->max_layer_depth_allowed = oxcf->gf_cfg.gf_max_pyr_height; 2212 2213 // Rare case when the look-ahead is less than the target GOP length, can't 2214 // generate ARF frame. 2215 if (p_rc->baseline_gf_interval > gf_cfg->lag_in_frames || 2216 !is_altref_enabled(gf_cfg->lag_in_frames, gf_cfg->enable_auto_arf) || 2217 p_rc->baseline_gf_interval < rc->min_gf_interval) 2218 gf_group->max_layer_depth_allowed = 0; 2219 2220 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP) 2221 av1_gop_setup_structure(cpi); 2222 2223 // Allocate bits to each of the frames in the GF group. 2224 // TODO(sarahparker) Extend this to work with pyramid structure. 2225 for (int cur_index = 0; cur_index < gf_group->size; ++cur_index) { 2226 const FRAME_UPDATE_TYPE cur_update_type = gf_group->update_type[cur_index]; 2227 if (oxcf->rc_cfg.mode == AOM_CBR) { 2228 if (cur_update_type == KF_UPDATE) { 2229 target = av1_calc_iframe_target_size_one_pass_cbr(cpi); 2230 } else { 2231 target = av1_calc_pframe_target_size_one_pass_cbr(cpi, cur_update_type); 2232 } 2233 } else { 2234 if (cur_update_type == KF_UPDATE) { 2235 target = av1_calc_iframe_target_size_one_pass_vbr(cpi); 2236 } else { 2237 target = av1_calc_pframe_target_size_one_pass_vbr(cpi, cur_update_type); 2238 } 2239 } 2240 gf_group->bit_allocation[cur_index] = target; 2241 } 2242 } 2243 2244 static inline void set_baseline_gf_interval(PRIMARY_RATE_CONTROL *p_rc, 2245 int arf_position) { 2246 p_rc->baseline_gf_interval = arf_position; 2247 } 2248 2249 // initialize GF_GROUP_STATS 2250 static void init_gf_stats(GF_GROUP_STATS *gf_stats) { 2251 gf_stats->gf_group_err = 0.0; 2252 gf_stats->gf_group_raw_error = 0.0; 2253 gf_stats->gf_group_skip_pct = 0.0; 2254 gf_stats->gf_group_inactive_zone_rows = 0.0; 2255 2256 gf_stats->mv_ratio_accumulator = 0.0; 2257 gf_stats->decay_accumulator = 1.0; 2258 gf_stats->zero_motion_accumulator = 1.0; 2259 gf_stats->loop_decay_rate = 1.0; 2260 gf_stats->last_loop_decay_rate = 1.0; 2261 gf_stats->this_frame_mv_in_out = 0.0; 2262 gf_stats->mv_in_out_accumulator = 0.0; 2263 gf_stats->abs_mv_in_out_accumulator = 0.0; 2264 2265 gf_stats->avg_sr_coded_error = 0.0; 2266 gf_stats->avg_pcnt_second_ref = 0.0; 2267 gf_stats->avg_new_mv_count = 0.0; 2268 gf_stats->avg_wavelet_energy = 0.0; 2269 gf_stats->avg_raw_err_stdev = 0.0; 2270 gf_stats->non_zero_stdev_count = 0; 2271 } 2272 2273 static void accumulate_gop_stats(AV1_COMP *cpi, int is_intra_only, int f_w, 2274 int f_h, FIRSTPASS_STATS *next_frame, 2275 const FIRSTPASS_STATS *start_pos, 2276 GF_GROUP_STATS *gf_stats, int *idx) { 2277 int i, flash_detected; 2278 TWO_PASS *const twopass = &cpi->ppi->twopass; 2279 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2280 RATE_CONTROL *const rc = &cpi->rc; 2281 FRAME_INFO *frame_info = &cpi->frame_info; 2282 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 2283 2284 init_gf_stats(gf_stats); 2285 av1_zero(*next_frame); 2286 2287 // If this is a key frame or the overlay from a previous arf then 2288 // the error score / cost of this frame has already been accounted for. 2289 i = is_intra_only; 2290 // get the determined gf group length from p_rc->gf_intervals 2291 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) { 2292 // read in the next frame 2293 if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break; 2294 // Accumulate error score of frames in this gf group. 2295 double mod_frame_err = 2296 calculate_modified_err(frame_info, twopass, oxcf, next_frame); 2297 // accumulate stats for this frame 2298 accumulate_this_frame_stats(next_frame, mod_frame_err, gf_stats); 2299 ++i; 2300 } 2301 2302 reset_fpf_position(&cpi->twopass_frame, start_pos); 2303 2304 i = is_intra_only; 2305 input_stats(twopass, &cpi->twopass_frame, next_frame); 2306 while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) { 2307 // read in the next frame 2308 if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break; 2309 2310 // Test for the case where there is a brief flash but the prediction 2311 // quality back to an earlier frame is then restored. 2312 flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0); 2313 2314 // accumulate stats for next frame 2315 accumulate_next_frame_stats(next_frame, flash_detected, 2316 rc->frames_since_key, i, gf_stats, f_w, f_h); 2317 2318 ++i; 2319 } 2320 2321 i = p_rc->gf_intervals[p_rc->cur_gf_index]; 2322 average_gf_stats(i, gf_stats); 2323 2324 *idx = i; 2325 } 2326 2327 static void update_gop_length(RATE_CONTROL *rc, PRIMARY_RATE_CONTROL *p_rc, 2328 int idx, int is_final_pass) { 2329 if (is_final_pass) { 2330 rc->intervals_till_gf_calculate_due--; 2331 p_rc->cur_gf_index++; 2332 } 2333 2334 // Was the group length constrained by the requirement for a new KF? 2335 p_rc->constrained_gf_group = (idx >= rc->frames_to_key) ? 1 : 0; 2336 2337 set_baseline_gf_interval(p_rc, idx); 2338 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval; 2339 } 2340 2341 // #define FIXED_ARF_BITS 2342 #ifdef FIXED_ARF_BITS 2343 #define ARF_BITS_FRACTION 0.75 2344 #endif 2345 /*!\brief Distributes bits to frames in a group 2346 * 2347 *\ingroup rate_control 2348 * 2349 * This function decides on the allocation of bits between the different 2350 * frames and types of frame in a GF/ARF group. 2351 * 2352 * \param[in] cpi Top - level encoder instance structure 2353 * \param[in] rc Rate control data 2354 * \param[in] gf_group GF/ARF group data structure 2355 * \param[in] is_key_frame Indicates if the first frame in the group is 2356 * also a key frame. 2357 * \param[in] use_arf Are ARF frames enabled or is this a GF only 2358 * uni-directional group. 2359 * \param[in] gf_group_bits Bits available to be allocated. 2360 * 2361 * \remark No return but updates the rate control and group data structures 2362 * to reflect the allocation of bits. 2363 */ 2364 void av1_gop_bit_allocation(const AV1_COMP *cpi, RATE_CONTROL *const rc, 2365 GF_GROUP *gf_group, int is_key_frame, int use_arf, 2366 int64_t gf_group_bits) { 2367 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2368 // Calculate the extra bits to be used for boosted frame(s) 2369 #ifdef FIXED_ARF_BITS 2370 int gf_arf_bits = (int)(ARF_BITS_FRACTION * gf_group_bits); 2371 #else 2372 int gf_arf_bits = calculate_boost_bits( 2373 p_rc->baseline_gf_interval - (rc->frames_since_key == 0), p_rc->gfu_boost, 2374 gf_group_bits); 2375 #endif 2376 2377 gf_arf_bits = adjust_boost_bits_for_target_level(cpi, rc, gf_arf_bits, 2378 gf_group_bits, 1); 2379 2380 // Allocate bits to each of the frames in the GF group. 2381 allocate_gf_group_bits(gf_group, p_rc, rc, gf_group_bits, gf_arf_bits, 2382 is_key_frame, use_arf); 2383 } 2384 #undef ARF_BITS_FRACTION 2385 2386 #define MAX_GF_BOOST 5400 2387 #define REDUCE_GF_LENGTH_THRESH 4 2388 #define REDUCE_GF_LENGTH_TO_KEY_THRESH 9 2389 #define REDUCE_GF_LENGTH_BY 1 2390 static void set_gop_bits_boost(AV1_COMP *cpi, int i, int is_intra_only, 2391 int is_final_pass, int use_alt_ref, 2392 int alt_offset, const FIRSTPASS_STATS *start_pos, 2393 GF_GROUP_STATS *gf_stats) { 2394 // Should we use the alternate reference frame. 2395 AV1_COMMON *const cm = &cpi->common; 2396 RATE_CONTROL *const rc = &cpi->rc; 2397 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2398 TWO_PASS *const twopass = &cpi->ppi->twopass; 2399 GF_GROUP *gf_group = &cpi->ppi->gf_group; 2400 FRAME_INFO *frame_info = &cpi->frame_info; 2401 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 2402 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg; 2403 2404 int ext_len = i - is_intra_only; 2405 if (use_alt_ref) { 2406 const int forward_frames = (rc->frames_to_key - i >= ext_len) 2407 ? ext_len 2408 : AOMMAX(0, rc->frames_to_key - i); 2409 2410 // Calculate the boost for alt ref. 2411 p_rc->gfu_boost = av1_calc_arf_boost( 2412 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset, 2413 forward_frames, ext_len, &p_rc->num_stats_used_for_gfu_boost, 2414 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled); 2415 } else { 2416 reset_fpf_position(&cpi->twopass_frame, start_pos); 2417 p_rc->gfu_boost = AOMMIN( 2418 MAX_GF_BOOST, 2419 av1_calc_arf_boost( 2420 twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset, ext_len, 2421 0, &p_rc->num_stats_used_for_gfu_boost, 2422 &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled)); 2423 } 2424 2425 #define LAST_ALR_BOOST_FACTOR 0.2f 2426 p_rc->arf_boost_factor = 1.0; 2427 if (use_alt_ref && !is_lossless_requested(rc_cfg)) { 2428 // Reduce the boost of altref in the last gf group 2429 if (rc->frames_to_key - ext_len == REDUCE_GF_LENGTH_BY || 2430 rc->frames_to_key - ext_len == 0) { 2431 p_rc->arf_boost_factor = LAST_ALR_BOOST_FACTOR; 2432 } 2433 } 2434 2435 // Reset the file position. 2436 reset_fpf_position(&cpi->twopass_frame, start_pos); 2437 if (cpi->ppi->lap_enabled) { 2438 // Since we don't have enough stats to know the actual error of the 2439 // gf group, we assume error of each frame to be equal to 1 and set 2440 // the error of the group as baseline_gf_interval. 2441 gf_stats->gf_group_err = p_rc->baseline_gf_interval; 2442 } 2443 // Calculate the bits to be allocated to the gf/arf group as a whole 2444 p_rc->gf_group_bits = 2445 calculate_total_gf_group_bits(cpi, gf_stats->gf_group_err); 2446 2447 #if GROUP_ADAPTIVE_MAXQ 2448 // Calculate an estimate of the maxq needed for the group. 2449 // We are more aggressive about correcting for sections 2450 // where there could be significant overshoot than for easier 2451 // sections where we do not wish to risk creating an overshoot 2452 // of the allocated bit budget. 2453 if ((rc_cfg->mode != AOM_Q) && (p_rc->baseline_gf_interval > 1) && 2454 is_final_pass) { 2455 const int vbr_group_bits_per_frame = 2456 (int)(p_rc->gf_group_bits / p_rc->baseline_gf_interval); 2457 const double group_av_err = 2458 gf_stats->gf_group_raw_error / p_rc->baseline_gf_interval; 2459 const double group_av_skip_pct = 2460 gf_stats->gf_group_skip_pct / p_rc->baseline_gf_interval; 2461 const double group_av_inactive_zone = 2462 ((gf_stats->gf_group_inactive_zone_rows * 2) / 2463 (p_rc->baseline_gf_interval * (double)cm->mi_params.mb_rows)); 2464 2465 int tmp_q; 2466 tmp_q = get_twopass_worst_quality( 2467 cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone), 2468 vbr_group_bits_per_frame); 2469 rc->active_worst_quality = AOMMAX(tmp_q, rc->active_worst_quality >> 1); 2470 } 2471 #endif 2472 2473 // Adjust KF group bits and error remaining. 2474 if (is_final_pass) twopass->kf_group_error_left -= gf_stats->gf_group_err; 2475 2476 // Reset the file position. 2477 reset_fpf_position(&cpi->twopass_frame, start_pos); 2478 2479 // Calculate a section intra ratio used in setting max loop filter. 2480 if (rc->frames_since_key != 0) { 2481 twopass->section_intra_rating = calculate_section_intra_ratio( 2482 start_pos, twopass->stats_buf_ctx->stats_in_end, 2483 p_rc->baseline_gf_interval); 2484 } 2485 2486 av1_gop_bit_allocation(cpi, rc, gf_group, rc->frames_since_key == 0, 2487 use_alt_ref, p_rc->gf_group_bits); 2488 2489 // TODO(jingning): Generalize this condition. 2490 if (is_final_pass) { 2491 cpi->ppi->gf_state.arf_gf_boost_lst = use_alt_ref; 2492 2493 // Reset rolling actual and target bits counters for ARF groups. 2494 twopass->rolling_arf_group_target_bits = 1; 2495 twopass->rolling_arf_group_actual_bits = 1; 2496 } 2497 #if CONFIG_BITRATE_ACCURACY 2498 if (is_final_pass) { 2499 av1_vbr_rc_set_gop_bit_budget(&cpi->vbr_rc_info, 2500 p_rc->baseline_gf_interval); 2501 } 2502 #endif 2503 } 2504 2505 /*!\brief Define a GF group. 2506 * 2507 * \ingroup gf_group_algo 2508 * This function defines the structure of a GF group, along with various 2509 * parameters regarding bit-allocation and quality setup. 2510 * 2511 * \param[in] cpi Top-level encoder structure 2512 * \param[in] frame_params Structure with frame parameters 2513 * \param[in] is_final_pass Whether this is the final pass for the 2514 * GF group, or a trial (non-zero) 2515 * 2516 * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed. 2517 */ 2518 static void define_gf_group(AV1_COMP *cpi, EncodeFrameParams *frame_params, 2519 int is_final_pass) { 2520 AV1_COMMON *const cm = &cpi->common; 2521 RATE_CONTROL *const rc = &cpi->rc; 2522 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2523 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 2524 TWO_PASS *const twopass = &cpi->ppi->twopass; 2525 FIRSTPASS_STATS next_frame; 2526 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in; 2527 GF_GROUP *gf_group = &cpi->ppi->gf_group; 2528 const GFConfig *const gf_cfg = &oxcf->gf_cfg; 2529 const RateControlCfg *const rc_cfg = &oxcf->rc_cfg; 2530 const int f_w = cm->width; 2531 const int f_h = cm->height; 2532 int i; 2533 const int is_intra_only = rc->frames_since_key == 0; 2534 2535 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1); 2536 2537 // Reset the GF group data structures unless this is a key 2538 // frame in which case it will already have been done. 2539 if (!is_intra_only) { 2540 av1_zero(cpi->ppi->gf_group); 2541 cpi->gf_frame_index = 0; 2542 } 2543 2544 if (has_no_stats_stage(cpi)) { 2545 define_gf_group_pass0(cpi); 2546 return; 2547 } 2548 2549 #if CONFIG_THREE_PASS 2550 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) { 2551 int ret = define_gf_group_pass3(cpi, frame_params, is_final_pass); 2552 if (ret == 0) return; 2553 2554 av1_free_thirdpass_ctx(cpi->third_pass_ctx); 2555 cpi->third_pass_ctx = NULL; 2556 } 2557 #endif // CONFIG_THREE_PASS 2558 2559 // correct frames_to_key when lookahead queue is emptying 2560 if (cpi->ppi->lap_enabled) { 2561 correct_frames_to_key(cpi); 2562 } 2563 2564 GF_GROUP_STATS gf_stats; 2565 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos, 2566 &gf_stats, &i); 2567 2568 const int can_disable_arf = !gf_cfg->gf_min_pyr_height; 2569 2570 // If this is a key frame or the overlay from a previous arf then 2571 // the error score / cost of this frame has already been accounted for. 2572 const int active_min_gf_interval = rc->min_gf_interval; 2573 2574 // Disable internal ARFs for "still" gf groups. 2575 // zero_motion_accumulator: minimum percentage of (0,0) motion; 2576 // avg_sr_coded_error: average of the SSE per pixel of each frame; 2577 // avg_raw_err_stdev: average of the standard deviation of (0,0) 2578 // motion error per block of each frame. 2579 const int can_disable_internal_arfs = gf_cfg->gf_min_pyr_height <= 1; 2580 if (can_disable_internal_arfs && 2581 gf_stats.zero_motion_accumulator > MIN_ZERO_MOTION && 2582 gf_stats.avg_sr_coded_error < MAX_SR_CODED_ERROR && 2583 gf_stats.avg_raw_err_stdev < MAX_RAW_ERR_VAR) { 2584 cpi->ppi->internal_altref_allowed = 0; 2585 } 2586 2587 int use_alt_ref; 2588 if (can_disable_arf) { 2589 use_alt_ref = 2590 !is_almost_static(gf_stats.zero_motion_accumulator, 2591 twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled) && 2592 p_rc->use_arf_in_this_kf_group && (i < gf_cfg->lag_in_frames) && 2593 (i >= MIN_GF_INTERVAL); 2594 } else { 2595 use_alt_ref = p_rc->use_arf_in_this_kf_group && 2596 (i < gf_cfg->lag_in_frames) && (i > 2); 2597 } 2598 if (use_alt_ref) { 2599 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height; 2600 } else { 2601 gf_group->max_layer_depth_allowed = 0; 2602 } 2603 2604 int alt_offset = 0; 2605 // The length reduction strategy is tweaked for certain cases, and doesn't 2606 // work well for certain other cases. 2607 const int allow_gf_length_reduction = 2608 ((rc_cfg->mode == AOM_Q && rc_cfg->cq_level <= 128) || 2609 !cpi->ppi->internal_altref_allowed) && 2610 !is_lossless_requested(rc_cfg); 2611 2612 if (allow_gf_length_reduction && use_alt_ref) { 2613 // adjust length of this gf group if one of the following condition met 2614 // 1: only one overlay frame left and this gf is too long 2615 // 2: next gf group is too short to have arf compared to the current gf 2616 2617 // maximum length of next gf group 2618 const int next_gf_len = rc->frames_to_key - i; 2619 const int single_overlay_left = 2620 next_gf_len == 0 && i > REDUCE_GF_LENGTH_THRESH; 2621 // the next gf is probably going to have a ARF but it will be shorter than 2622 // this gf 2623 const int unbalanced_gf = 2624 i > REDUCE_GF_LENGTH_TO_KEY_THRESH && 2625 next_gf_len + 1 < REDUCE_GF_LENGTH_TO_KEY_THRESH && 2626 next_gf_len + 1 >= rc->min_gf_interval; 2627 2628 if (single_overlay_left || unbalanced_gf) { 2629 const int roll_back = REDUCE_GF_LENGTH_BY; 2630 // Reduce length only if active_min_gf_interval will be respected later. 2631 if (i - roll_back >= active_min_gf_interval + 1) { 2632 alt_offset = -roll_back; 2633 i -= roll_back; 2634 if (is_final_pass) rc->intervals_till_gf_calculate_due = 0; 2635 p_rc->gf_intervals[p_rc->cur_gf_index] -= roll_back; 2636 reset_fpf_position(&cpi->twopass_frame, start_pos); 2637 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, 2638 start_pos, &gf_stats, &i); 2639 } 2640 } 2641 } 2642 2643 update_gop_length(rc, p_rc, i, is_final_pass); 2644 2645 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP) 2646 av1_gop_setup_structure(cpi); 2647 2648 set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref, 2649 alt_offset, start_pos, &gf_stats); 2650 2651 frame_params->frame_type = 2652 rc->frames_since_key == 0 ? KEY_FRAME : INTER_FRAME; 2653 frame_params->show_frame = 2654 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE || 2655 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE); 2656 } 2657 2658 #if CONFIG_THREE_PASS 2659 /*!\brief Define a GF group for the third apss. 2660 * 2661 * \ingroup gf_group_algo 2662 * This function defines the structure of a GF group for the third pass, along 2663 * with various parameters regarding bit-allocation and quality setup based on 2664 * the two-pass bitstream. 2665 * Much of the function still uses the strategies used for the second pass and 2666 * relies on first pass statistics. It is expected that over time these portions 2667 * would be replaced with strategies specific to the third pass. 2668 * 2669 * \param[in] cpi Top-level encoder structure 2670 * \param[in] frame_params Structure with frame parameters 2671 * \param[in] is_final_pass Whether this is the final pass for the 2672 * GF group, or a trial (non-zero) 2673 * 2674 * \return 0: Success; 2675 * -1: There are conflicts between the bitstream and current config 2676 * The values in cpi->ppi->gf_group are also changed. 2677 */ 2678 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params, 2679 int is_final_pass) { 2680 if (!cpi->third_pass_ctx) return -1; 2681 AV1_COMMON *const cm = &cpi->common; 2682 RATE_CONTROL *const rc = &cpi->rc; 2683 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2684 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 2685 FIRSTPASS_STATS next_frame; 2686 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in; 2687 GF_GROUP *gf_group = &cpi->ppi->gf_group; 2688 const GFConfig *const gf_cfg = &oxcf->gf_cfg; 2689 const int f_w = cm->width; 2690 const int f_h = cm->height; 2691 int i; 2692 const int is_intra_only = rc->frames_since_key == 0; 2693 2694 cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1); 2695 2696 // Reset the GF group data structures unless this is a key 2697 // frame in which case it will already have been done. 2698 if (!is_intra_only) { 2699 av1_zero(cpi->ppi->gf_group); 2700 cpi->gf_frame_index = 0; 2701 } 2702 2703 GF_GROUP_STATS gf_stats; 2704 accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos, 2705 &gf_stats, &i); 2706 2707 const int can_disable_arf = !gf_cfg->gf_min_pyr_height; 2708 2709 // TODO(any): set cpi->ppi->internal_altref_allowed accordingly; 2710 2711 int use_alt_ref = av1_check_use_arf(cpi->third_pass_ctx); 2712 if (use_alt_ref == 0 && !can_disable_arf) return -1; 2713 if (use_alt_ref) { 2714 gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height; 2715 } else { 2716 gf_group->max_layer_depth_allowed = 0; 2717 } 2718 2719 update_gop_length(rc, p_rc, i, is_final_pass); 2720 2721 // Set up the structure of this Group-Of-Pictures (same as GF_GROUP) 2722 av1_gop_setup_structure(cpi); 2723 2724 set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref, 0, 2725 start_pos, &gf_stats); 2726 2727 frame_params->frame_type = cpi->third_pass_ctx->frame_info[0].frame_type; 2728 frame_params->show_frame = cpi->third_pass_ctx->frame_info[0].is_show_frame; 2729 return 0; 2730 } 2731 #endif // CONFIG_THREE_PASS 2732 2733 // Minimum % intra coding observed in first pass (1.0 = 100%) 2734 #define MIN_INTRA_LEVEL 0.25 2735 // Minimum ratio between the % of intra coding and inter coding in the first 2736 // pass after discounting neutral blocks (discounting neutral blocks in this 2737 // way helps catch scene cuts in clips with very flat areas or letter box 2738 // format clips with image padding. 2739 #define INTRA_VS_INTER_THRESH 2.0 2740 // Hard threshold where the first pass chooses intra for almost all blocks. 2741 // In such a case even if the frame is not a scene cut coding a key frame 2742 // may be a good option. 2743 #define VERY_LOW_INTER_THRESH 0.05 2744 // Maximum threshold for the relative ratio of intra error score vs best 2745 // inter error score. 2746 #define KF_II_ERR_THRESHOLD 1.9 2747 // In real scene cuts there is almost always a sharp change in the intra 2748 // or inter error score. 2749 #define ERR_CHANGE_THRESHOLD 0.4 2750 // For real scene cuts we expect an improvment in the intra inter error 2751 // ratio in the next frame. 2752 #define II_IMPROVEMENT_THRESHOLD 3.5 2753 #define KF_II_MAX 128.0 2754 // Intra / Inter threshold very low 2755 #define VERY_LOW_II 1.5 2756 // Clean slide transitions we expect a sharp single frame spike in error. 2757 #define ERROR_SPIKE 5.0 2758 2759 // Slide show transition detection. 2760 // Tests for case where there is very low error either side of the current frame 2761 // but much higher just for this frame. This can help detect key frames in 2762 // slide shows even where the slides are pictures of different sizes. 2763 // Also requires that intra and inter errors are very similar to help eliminate 2764 // harmful false positives. 2765 // It will not help if the transition is a fade or other multi-frame effect. 2766 static int slide_transition(const FIRSTPASS_STATS *this_frame, 2767 const FIRSTPASS_STATS *last_frame, 2768 const FIRSTPASS_STATS *next_frame) { 2769 return (this_frame->intra_error < (this_frame->coded_error * VERY_LOW_II)) && 2770 (this_frame->coded_error > (last_frame->coded_error * ERROR_SPIKE)) && 2771 (this_frame->coded_error > (next_frame->coded_error * ERROR_SPIKE)); 2772 } 2773 2774 // Threshold for use of the lagging second reference frame. High second ref 2775 // usage may point to a transient event like a flash or occlusion rather than 2776 // a real scene cut. 2777 // We adapt the threshold based on number of frames in this key-frame group so 2778 // far. 2779 static double get_second_ref_usage_thresh(int frame_count_so_far) { 2780 const int adapt_upto = 32; 2781 const double min_second_ref_usage_thresh = 0.085; 2782 const double second_ref_usage_thresh_max_delta = 0.035; 2783 if (frame_count_so_far >= adapt_upto) { 2784 return min_second_ref_usage_thresh + second_ref_usage_thresh_max_delta; 2785 } 2786 return min_second_ref_usage_thresh + 2787 ((double)frame_count_so_far / (adapt_upto - 1)) * 2788 second_ref_usage_thresh_max_delta; 2789 } 2790 2791 static int test_candidate_kf(const FIRSTPASS_INFO *firstpass_info, 2792 int this_stats_index, int frame_count_so_far, 2793 enum aom_rc_mode rc_mode, int scenecut_mode, 2794 int num_mbs) { 2795 const FIRSTPASS_STATS *last_stats = 2796 av1_firstpass_info_peek(firstpass_info, this_stats_index - 1); 2797 const FIRSTPASS_STATS *this_stats = 2798 av1_firstpass_info_peek(firstpass_info, this_stats_index); 2799 const FIRSTPASS_STATS *next_stats = 2800 av1_firstpass_info_peek(firstpass_info, this_stats_index + 1); 2801 if (last_stats == NULL || this_stats == NULL || next_stats == NULL) { 2802 return 0; 2803 } 2804 2805 int is_viable_kf = 0; 2806 double pcnt_intra = 1.0 - this_stats->pcnt_inter; 2807 double modified_pcnt_inter = 2808 this_stats->pcnt_inter - this_stats->pcnt_neutral; 2809 const double second_ref_usage_thresh = 2810 get_second_ref_usage_thresh(frame_count_so_far); 2811 int frames_to_test_after_candidate_key = SCENE_CUT_KEY_TEST_INTERVAL; 2812 int count_for_tolerable_prediction = 3; 2813 2814 // We do "-1" because the candidate key is not counted. 2815 int stats_after_this_stats = 2816 av1_firstpass_info_future_count(firstpass_info, this_stats_index) - 1; 2817 2818 if (scenecut_mode == ENABLE_SCENECUT_MODE_1) { 2819 if (stats_after_this_stats < 3) { 2820 return 0; 2821 } else { 2822 frames_to_test_after_candidate_key = 3; 2823 count_for_tolerable_prediction = 1; 2824 } 2825 } 2826 // Make sure we have enough stats after the candidate key. 2827 frames_to_test_after_candidate_key = 2828 AOMMIN(frames_to_test_after_candidate_key, stats_after_this_stats); 2829 2830 // Does the frame satisfy the primary criteria of a key frame? 2831 // See above for an explanation of the test criteria. 2832 // If so, then examine how well it predicts subsequent frames. 2833 if (IMPLIES(rc_mode == AOM_Q, frame_count_so_far >= 3) && 2834 (this_stats->pcnt_second_ref < second_ref_usage_thresh) && 2835 (next_stats->pcnt_second_ref < second_ref_usage_thresh) && 2836 ((this_stats->pcnt_inter < VERY_LOW_INTER_THRESH) || 2837 slide_transition(this_stats, last_stats, next_stats) || 2838 ((pcnt_intra > MIN_INTRA_LEVEL) && 2839 (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) && 2840 ((this_stats->intra_error / 2841 DOUBLE_DIVIDE_CHECK(this_stats->coded_error)) < 2842 KF_II_ERR_THRESHOLD) && 2843 ((fabs(last_stats->coded_error - this_stats->coded_error) / 2844 DOUBLE_DIVIDE_CHECK(this_stats->coded_error) > 2845 ERR_CHANGE_THRESHOLD) || 2846 (fabs(last_stats->intra_error - this_stats->intra_error) / 2847 DOUBLE_DIVIDE_CHECK(this_stats->intra_error) > 2848 ERR_CHANGE_THRESHOLD) || 2849 ((next_stats->intra_error / 2850 DOUBLE_DIVIDE_CHECK(next_stats->coded_error)) > 2851 II_IMPROVEMENT_THRESHOLD))))) { 2852 int i; 2853 double boost_score = 0.0; 2854 double old_boost_score = 0.0; 2855 double decay_accumulator = 1.0; 2856 2857 // Examine how well the key frame predicts subsequent frames. 2858 for (i = 1; i <= frames_to_test_after_candidate_key; ++i) { 2859 // Get the next frame details 2860 const FIRSTPASS_STATS *local_next_frame = 2861 av1_firstpass_info_peek(firstpass_info, this_stats_index + i); 2862 double next_iiratio = 2863 (BOOST_FACTOR * local_next_frame->intra_error / 2864 DOUBLE_DIVIDE_CHECK(local_next_frame->coded_error)); 2865 2866 if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX; 2867 2868 // Cumulative effect of decay in prediction quality. 2869 if (local_next_frame->pcnt_inter > 0.85) 2870 decay_accumulator *= local_next_frame->pcnt_inter; 2871 else 2872 decay_accumulator *= (0.85 + local_next_frame->pcnt_inter) / 2.0; 2873 2874 // Keep a running total. 2875 boost_score += (decay_accumulator * next_iiratio); 2876 2877 // Test various breakout clauses. 2878 // TODO(any): Test of intra error should be normalized to an MB. 2879 if ((local_next_frame->pcnt_inter < 0.05) || (next_iiratio < 1.5) || 2880 (((local_next_frame->pcnt_inter - local_next_frame->pcnt_neutral) < 2881 0.20) && 2882 (next_iiratio < 3.0)) || 2883 ((boost_score - old_boost_score) < 3.0) || 2884 (local_next_frame->intra_error < (200.0 / (double)num_mbs))) { 2885 break; 2886 } 2887 2888 old_boost_score = boost_score; 2889 } 2890 2891 // If there is tolerable prediction for at least the next 3 frames then 2892 // break out else discard this potential key frame and move on 2893 if (boost_score > 30.0 && (i > count_for_tolerable_prediction)) { 2894 is_viable_kf = 1; 2895 } else { 2896 is_viable_kf = 0; 2897 } 2898 } 2899 return is_viable_kf; 2900 } 2901 2902 #define FRAMES_TO_CHECK_DECAY 8 2903 #define KF_MIN_FRAME_BOOST 80.0 2904 #define KF_MAX_FRAME_BOOST 128.0 2905 #define MIN_KF_BOOST 600 // Minimum boost for non-static KF interval 2906 #define MAX_KF_BOOST 3200 2907 #define MIN_STATIC_KF_BOOST 5400 // Minimum boost for static KF interval 2908 2909 static int detect_app_forced_key(AV1_COMP *cpi) { 2910 int num_frames_to_app_forced_key = is_forced_keyframe_pending( 2911 cpi->ppi->lookahead, cpi->ppi->lookahead->max_sz, cpi->compressor_stage); 2912 return num_frames_to_app_forced_key; 2913 } 2914 2915 static int get_projected_kf_boost(AV1_COMP *cpi) { 2916 /* 2917 * If num_stats_used_for_kf_boost >= frames_to_key, then 2918 * all stats needed for prior boost calculation are available. 2919 * Hence projecting the prior boost is not needed in this cases. 2920 */ 2921 if (cpi->ppi->p_rc.num_stats_used_for_kf_boost >= cpi->rc.frames_to_key) 2922 return cpi->ppi->p_rc.kf_boost; 2923 2924 // Get the current tpl factor (number of frames = frames_to_key). 2925 double tpl_factor = av1_get_kf_boost_projection_factor(cpi->rc.frames_to_key); 2926 // Get the tpl factor when number of frames = num_stats_used_for_kf_boost. 2927 double tpl_factor_num_stats = av1_get_kf_boost_projection_factor( 2928 cpi->ppi->p_rc.num_stats_used_for_kf_boost); 2929 int projected_kf_boost = 2930 (int)rint((tpl_factor * cpi->ppi->p_rc.kf_boost) / tpl_factor_num_stats); 2931 return projected_kf_boost; 2932 } 2933 2934 /*!\brief Determine the location of the next key frame 2935 * 2936 * \ingroup gf_group_algo 2937 * This function decides the placement of the next key frame when a 2938 * scenecut is detected or the maximum key frame distance is reached. 2939 * 2940 * \param[in] cpi Top-level encoder structure 2941 * \param[in] firstpass_info struct for firstpass info 2942 * \param[in] num_frames_to_detect_scenecut Maximum lookahead frames. 2943 * \param[in] search_start_idx the start index for searching key frame. 2944 * Set it to one if we already know the 2945 * current frame is key frame. Otherwise, 2946 * set it to zero. 2947 * 2948 * \return Number of frames to the next key including the current frame. 2949 */ 2950 static int define_kf_interval(AV1_COMP *cpi, 2951 const FIRSTPASS_INFO *firstpass_info, 2952 int num_frames_to_detect_scenecut, 2953 int search_start_idx) { 2954 const TWO_PASS *const twopass = &cpi->ppi->twopass; 2955 const RATE_CONTROL *const rc = &cpi->rc; 2956 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 2957 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 2958 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg; 2959 double recent_loop_decay[FRAMES_TO_CHECK_DECAY]; 2960 double decay_accumulator = 1.0; 2961 int i = 0, j; 2962 int frames_to_key = search_start_idx; 2963 int frames_since_key = rc->frames_since_key + 1; 2964 int scenecut_detected = 0; 2965 2966 int num_frames_to_next_key = detect_app_forced_key(cpi); 2967 2968 if (num_frames_to_detect_scenecut == 0) { 2969 if (num_frames_to_next_key != -1) 2970 return num_frames_to_next_key; 2971 else 2972 return rc->frames_to_key; 2973 } 2974 2975 if (num_frames_to_next_key != -1) 2976 num_frames_to_detect_scenecut = 2977 AOMMIN(num_frames_to_detect_scenecut, num_frames_to_next_key); 2978 2979 // Initialize the decay rates for the recent frames to check 2980 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0; 2981 2982 i = 0; 2983 const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE) 2984 ? cpi->initial_mbs 2985 : cpi->common.mi_params.MBs; 2986 const int future_stats_count = 2987 av1_firstpass_info_future_count(firstpass_info, 0); 2988 while (frames_to_key < future_stats_count && 2989 frames_to_key < num_frames_to_detect_scenecut) { 2990 // Provided that we are not at the end of the file... 2991 if ((cpi->ppi->p_rc.enable_scenecut_detection > 0) && kf_cfg->auto_key && 2992 frames_to_key + 1 < future_stats_count) { 2993 double loop_decay_rate; 2994 2995 // Check for a scene cut. 2996 if (frames_since_key >= kf_cfg->key_freq_min) { 2997 scenecut_detected = test_candidate_kf( 2998 &twopass->firstpass_info, frames_to_key, frames_since_key, 2999 oxcf->rc_cfg.mode, cpi->ppi->p_rc.enable_scenecut_detection, 3000 num_mbs); 3001 if (scenecut_detected) { 3002 break; 3003 } 3004 } 3005 3006 // How fast is the prediction quality decaying? 3007 const FIRSTPASS_STATS *next_stats = 3008 av1_firstpass_info_peek(firstpass_info, frames_to_key + 1); 3009 loop_decay_rate = get_prediction_decay_rate(next_stats); 3010 3011 // We want to know something about the recent past... rather than 3012 // as used elsewhere where we are concerned with decay in prediction 3013 // quality since the last GF or KF. 3014 recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate; 3015 decay_accumulator = 1.0; 3016 for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) 3017 decay_accumulator *= recent_loop_decay[j]; 3018 3019 // Special check for transition or high motion followed by a 3020 // static scene. 3021 if (frames_since_key >= kf_cfg->key_freq_min) { 3022 scenecut_detected = detect_transition_to_still( 3023 firstpass_info, frames_to_key + 1, rc->min_gf_interval, i, 3024 kf_cfg->key_freq_max - i, loop_decay_rate, decay_accumulator); 3025 if (scenecut_detected) { 3026 // In the case of transition followed by a static scene, the key frame 3027 // could be a good predictor for the following frames, therefore we 3028 // do not use an arf. 3029 p_rc->use_arf_in_this_kf_group = 0; 3030 break; 3031 } 3032 } 3033 3034 // Step on to the next frame. 3035 ++frames_to_key; 3036 ++frames_since_key; 3037 3038 // If we don't have a real key frame within the next two 3039 // key_freq_max intervals then break out of the loop. 3040 if (frames_to_key >= 2 * kf_cfg->key_freq_max) { 3041 break; 3042 } 3043 } else { 3044 ++frames_to_key; 3045 ++frames_since_key; 3046 } 3047 ++i; 3048 } 3049 if (cpi->ppi->lap_enabled && !scenecut_detected) 3050 frames_to_key = num_frames_to_next_key; 3051 3052 return frames_to_key; 3053 } 3054 3055 static double get_kf_group_avg_error(TWO_PASS *twopass, 3056 TWO_PASS_FRAME *twopass_frame, 3057 const FIRSTPASS_STATS *first_frame, 3058 const FIRSTPASS_STATS *start_position, 3059 int frames_to_key) { 3060 FIRSTPASS_STATS cur_frame = *first_frame; 3061 int num_frames, i; 3062 double kf_group_avg_error = 0.0; 3063 3064 reset_fpf_position(twopass_frame, start_position); 3065 3066 for (i = 0; i < frames_to_key; ++i) { 3067 kf_group_avg_error += cur_frame.coded_error; 3068 if (EOF == input_stats(twopass, twopass_frame, &cur_frame)) break; 3069 } 3070 num_frames = i + 1; 3071 num_frames = AOMMIN(num_frames, frames_to_key); 3072 kf_group_avg_error = kf_group_avg_error / num_frames; 3073 3074 return (kf_group_avg_error); 3075 } 3076 3077 static int64_t get_kf_group_bits(AV1_COMP *cpi, double kf_group_err, 3078 double kf_group_avg_error) { 3079 RATE_CONTROL *const rc = &cpi->rc; 3080 TWO_PASS *const twopass = &cpi->ppi->twopass; 3081 int64_t kf_group_bits; 3082 if (cpi->ppi->lap_enabled) { 3083 kf_group_bits = (int64_t)rc->frames_to_key * rc->avg_frame_bandwidth; 3084 if (cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap) { 3085 double vbr_corpus_complexity_lap = 3086 cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap / 10.0; 3087 /* Get the average corpus complexity of the frame */ 3088 kf_group_bits = (int64_t)(kf_group_bits * (kf_group_avg_error / 3089 vbr_corpus_complexity_lap)); 3090 } 3091 } else { 3092 kf_group_bits = (int64_t)(twopass->bits_left * 3093 (kf_group_err / twopass->modified_error_left)); 3094 } 3095 3096 return kf_group_bits; 3097 } 3098 3099 static int calc_avg_stats(AV1_COMP *cpi, FIRSTPASS_STATS *avg_frame_stat) { 3100 RATE_CONTROL *const rc = &cpi->rc; 3101 TWO_PASS *const twopass = &cpi->ppi->twopass; 3102 FIRSTPASS_STATS cur_frame; 3103 av1_zero(cur_frame); 3104 int num_frames = 0; 3105 // Accumulate total stat using available number of stats. 3106 for (num_frames = 0; num_frames < (rc->frames_to_key - 1); ++num_frames) { 3107 if (EOF == input_stats(twopass, &cpi->twopass_frame, &cur_frame)) break; 3108 av1_accumulate_stats(avg_frame_stat, &cur_frame); 3109 } 3110 3111 if (num_frames < 2) { 3112 return num_frames; 3113 } 3114 // Average the total stat 3115 avg_frame_stat->weight = avg_frame_stat->weight / num_frames; 3116 avg_frame_stat->intra_error = avg_frame_stat->intra_error / num_frames; 3117 avg_frame_stat->frame_avg_wavelet_energy = 3118 avg_frame_stat->frame_avg_wavelet_energy / num_frames; 3119 avg_frame_stat->coded_error = avg_frame_stat->coded_error / num_frames; 3120 avg_frame_stat->sr_coded_error = avg_frame_stat->sr_coded_error / num_frames; 3121 avg_frame_stat->pcnt_inter = avg_frame_stat->pcnt_inter / num_frames; 3122 avg_frame_stat->pcnt_motion = avg_frame_stat->pcnt_motion / num_frames; 3123 avg_frame_stat->pcnt_second_ref = 3124 avg_frame_stat->pcnt_second_ref / num_frames; 3125 avg_frame_stat->pcnt_neutral = avg_frame_stat->pcnt_neutral / num_frames; 3126 avg_frame_stat->intra_skip_pct = avg_frame_stat->intra_skip_pct / num_frames; 3127 avg_frame_stat->inactive_zone_rows = 3128 avg_frame_stat->inactive_zone_rows / num_frames; 3129 avg_frame_stat->inactive_zone_cols = 3130 avg_frame_stat->inactive_zone_cols / num_frames; 3131 avg_frame_stat->MVr = avg_frame_stat->MVr / num_frames; 3132 avg_frame_stat->mvr_abs = avg_frame_stat->mvr_abs / num_frames; 3133 avg_frame_stat->MVc = avg_frame_stat->MVc / num_frames; 3134 avg_frame_stat->mvc_abs = avg_frame_stat->mvc_abs / num_frames; 3135 avg_frame_stat->MVrv = avg_frame_stat->MVrv / num_frames; 3136 avg_frame_stat->MVcv = avg_frame_stat->MVcv / num_frames; 3137 avg_frame_stat->mv_in_out_count = 3138 avg_frame_stat->mv_in_out_count / num_frames; 3139 avg_frame_stat->new_mv_count = avg_frame_stat->new_mv_count / num_frames; 3140 avg_frame_stat->count = avg_frame_stat->count / num_frames; 3141 avg_frame_stat->duration = avg_frame_stat->duration / num_frames; 3142 3143 return num_frames; 3144 } 3145 3146 static double get_kf_boost_score(AV1_COMP *cpi, double kf_raw_err, 3147 double *zero_motion_accumulator, 3148 double *sr_accumulator, int use_avg_stat) { 3149 RATE_CONTROL *const rc = &cpi->rc; 3150 TWO_PASS *const twopass = &cpi->ppi->twopass; 3151 FRAME_INFO *const frame_info = &cpi->frame_info; 3152 FIRSTPASS_STATS frame_stat; 3153 av1_zero(frame_stat); 3154 int i = 0, num_stat_used = 0; 3155 double boost_score = 0.0; 3156 const double kf_max_boost = 3157 cpi->oxcf.rc_cfg.mode == AOM_Q 3158 ? fclamp(rc->frames_to_key * 2.0, KF_MIN_FRAME_BOOST, 3159 KF_MAX_FRAME_BOOST) 3160 : KF_MAX_FRAME_BOOST; 3161 3162 // Calculate the average using available number of stats. 3163 if (use_avg_stat) num_stat_used = calc_avg_stats(cpi, &frame_stat); 3164 3165 for (i = num_stat_used; i < (rc->frames_to_key - 1); ++i) { 3166 if (!use_avg_stat && 3167 EOF == input_stats(twopass, &cpi->twopass_frame, &frame_stat)) 3168 break; 3169 3170 // Monitor for static sections. 3171 // For the first frame in kf group, the second ref indicator is invalid. 3172 if (i > 0) { 3173 *zero_motion_accumulator = 3174 AOMMIN(*zero_motion_accumulator, get_zero_motion_factor(&frame_stat)); 3175 } else { 3176 *zero_motion_accumulator = frame_stat.pcnt_inter - frame_stat.pcnt_motion; 3177 } 3178 3179 // Not all frames in the group are necessarily used in calculating boost. 3180 if ((*sr_accumulator < (kf_raw_err * 1.50)) && 3181 (i <= rc->max_gf_interval * 2)) { 3182 double frame_boost; 3183 double zm_factor; 3184 3185 // Factor 0.75-1.25 based on how much of frame is static. 3186 zm_factor = (0.75 + (*zero_motion_accumulator / 2.0)); 3187 3188 if (i < 2) *sr_accumulator = 0.0; 3189 frame_boost = 3190 calc_kf_frame_boost(&cpi->ppi->p_rc, frame_info, &frame_stat, 3191 sr_accumulator, kf_max_boost); 3192 boost_score += frame_boost * zm_factor; 3193 } 3194 } 3195 return boost_score; 3196 } 3197 3198 /*!\brief Interval(in seconds) to clip key-frame distance to in LAP. 3199 */ 3200 #define MAX_KF_BITS_INTERVAL_SINGLE_PASS 5 3201 3202 /*!\brief Determine the next key frame group 3203 * 3204 * \ingroup gf_group_algo 3205 * This function decides the placement of the next key frame, and 3206 * calculates the bit allocation of the KF group and the keyframe itself. 3207 * 3208 * \param[in] cpi Top-level encoder structure 3209 * \param[in] this_frame Pointer to first pass stats 3210 */ 3211 static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) { 3212 RATE_CONTROL *const rc = &cpi->rc; 3213 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 3214 TWO_PASS *const twopass = &cpi->ppi->twopass; 3215 GF_GROUP *const gf_group = &cpi->ppi->gf_group; 3216 FRAME_INFO *const frame_info = &cpi->frame_info; 3217 AV1_COMMON *const cm = &cpi->common; 3218 CurrentFrame *const current_frame = &cm->current_frame; 3219 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 3220 const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg; 3221 const FIRSTPASS_STATS first_frame = *this_frame; 3222 FIRSTPASS_STATS next_frame; 3223 const FIRSTPASS_INFO *firstpass_info = &twopass->firstpass_info; 3224 av1_zero(next_frame); 3225 3226 rc->frames_since_key = 0; 3227 // Use arfs if possible. 3228 p_rc->use_arf_in_this_kf_group = is_altref_enabled( 3229 oxcf->gf_cfg.lag_in_frames, oxcf->gf_cfg.enable_auto_arf); 3230 3231 // Reset the GF group data structures. 3232 av1_zero(*gf_group); 3233 cpi->gf_frame_index = 0; 3234 3235 // KF is always a GF so clear frames till next gf counter. 3236 rc->frames_till_gf_update_due = 0; 3237 3238 if (has_no_stats_stage(cpi)) { 3239 int num_frames_to_app_forced_key = detect_app_forced_key(cpi); 3240 p_rc->this_key_frame_forced = 3241 current_frame->frame_number != 0 && rc->frames_to_key == 0; 3242 if (num_frames_to_app_forced_key != -1) 3243 rc->frames_to_key = num_frames_to_app_forced_key; 3244 else 3245 rc->frames_to_key = AOMMAX(1, kf_cfg->key_freq_max); 3246 correct_frames_to_key(cpi); 3247 p_rc->kf_boost = DEFAULT_KF_BOOST; 3248 gf_group->update_type[0] = KF_UPDATE; 3249 return; 3250 } 3251 int i; 3252 const FIRSTPASS_STATS *const start_position = cpi->twopass_frame.stats_in; 3253 int kf_bits = 0; 3254 double zero_motion_accumulator = 1.0; 3255 double boost_score = 0.0; 3256 double kf_raw_err = 0.0; 3257 double kf_mod_err = 0.0; 3258 double sr_accumulator = 0.0; 3259 double kf_group_avg_error = 0.0; 3260 int frames_to_key, frames_to_key_clipped = INT_MAX; 3261 int64_t kf_group_bits_clipped = INT64_MAX; 3262 3263 // Is this a forced key frame by interval. 3264 p_rc->this_key_frame_forced = p_rc->next_key_frame_forced; 3265 3266 twopass->kf_group_bits = 0; // Total bits available to kf group 3267 twopass->kf_group_error_left = 0; // Group modified error score. 3268 3269 kf_raw_err = this_frame->intra_error; 3270 kf_mod_err = calculate_modified_err(frame_info, twopass, oxcf, this_frame); 3271 3272 // We assume the current frame is a key frame and we are looking for the next 3273 // key frame. Therefore search_start_idx = 1 3274 frames_to_key = define_kf_interval(cpi, firstpass_info, kf_cfg->key_freq_max, 3275 /*search_start_idx=*/1); 3276 3277 if (frames_to_key != -1) { 3278 rc->frames_to_key = AOMMIN(kf_cfg->key_freq_max, frames_to_key); 3279 } else { 3280 rc->frames_to_key = kf_cfg->key_freq_max; 3281 } 3282 3283 if (cpi->ppi->lap_enabled) correct_frames_to_key(cpi); 3284 3285 // If there is a max kf interval set by the user we must obey it. 3286 // We already breakout of the loop above at 2x max. 3287 // This code centers the extra kf if the actual natural interval 3288 // is between 1x and 2x. 3289 if (kf_cfg->auto_key && rc->frames_to_key > kf_cfg->key_freq_max) { 3290 FIRSTPASS_STATS tmp_frame = first_frame; 3291 3292 rc->frames_to_key /= 2; 3293 3294 // Reset to the start of the group. 3295 reset_fpf_position(&cpi->twopass_frame, start_position); 3296 // Rescan to get the correct error data for the forced kf group. 3297 for (i = 0; i < rc->frames_to_key; ++i) { 3298 if (EOF == input_stats(twopass, &cpi->twopass_frame, &tmp_frame)) break; 3299 } 3300 p_rc->next_key_frame_forced = 1; 3301 } else if ((cpi->twopass_frame.stats_in == 3302 twopass->stats_buf_ctx->stats_in_end && 3303 is_stat_consumption_stage_twopass(cpi)) || 3304 rc->frames_to_key >= kf_cfg->key_freq_max) { 3305 p_rc->next_key_frame_forced = 1; 3306 } else { 3307 p_rc->next_key_frame_forced = 0; 3308 } 3309 3310 double kf_group_err = 0; 3311 for (i = 0; i < rc->frames_to_key; ++i) { 3312 const FIRSTPASS_STATS *this_stats = 3313 av1_firstpass_info_peek(&twopass->firstpass_info, i); 3314 if (this_stats != NULL) { 3315 // Accumulate kf group error. 3316 kf_group_err += calculate_modified_err_new( 3317 frame_info, &firstpass_info->total_stats, this_stats, 3318 oxcf->rc_cfg.vbrbias, twopass->modified_error_min, 3319 twopass->modified_error_max); 3320 ++p_rc->num_stats_used_for_kf_boost; 3321 } 3322 } 3323 3324 // Calculate the number of bits that should be assigned to the kf group. 3325 if ((twopass->bits_left > 0 && twopass->modified_error_left > 0.0) || 3326 (cpi->ppi->lap_enabled && oxcf->rc_cfg.mode != AOM_Q)) { 3327 // Maximum number of bits for a single normal frame (not key frame). 3328 const int max_bits = frame_max_bits(rc, oxcf); 3329 3330 // Maximum number of bits allocated to the key frame group. 3331 int64_t max_grp_bits; 3332 3333 if (oxcf->rc_cfg.vbr_corpus_complexity_lap) { 3334 kf_group_avg_error = 3335 get_kf_group_avg_error(twopass, &cpi->twopass_frame, &first_frame, 3336 start_position, rc->frames_to_key); 3337 } 3338 3339 // Default allocation based on bits left and relative 3340 // complexity of the section. 3341 twopass->kf_group_bits = 3342 get_kf_group_bits(cpi, kf_group_err, kf_group_avg_error); 3343 // Clip based on maximum per frame rate defined by the user. 3344 max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key; 3345 if (twopass->kf_group_bits > max_grp_bits) 3346 twopass->kf_group_bits = max_grp_bits; 3347 } else { 3348 twopass->kf_group_bits = 0; 3349 } 3350 twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits); 3351 3352 if (cpi->ppi->lap_enabled) { 3353 // In the case of single pass based on LAP, frames to key may have an 3354 // inaccurate value, and hence should be clipped to an appropriate 3355 // interval. 3356 frames_to_key_clipped = 3357 (int)(MAX_KF_BITS_INTERVAL_SINGLE_PASS * cpi->framerate); 3358 3359 // This variable calculates the bits allocated to kf_group with a clipped 3360 // frames_to_key. 3361 if (rc->frames_to_key > frames_to_key_clipped) { 3362 kf_group_bits_clipped = 3363 (int64_t)((double)twopass->kf_group_bits * frames_to_key_clipped / 3364 rc->frames_to_key); 3365 } 3366 } 3367 3368 // Reset the first pass file position. 3369 reset_fpf_position(&cpi->twopass_frame, start_position); 3370 3371 // Scan through the kf group collating various stats used to determine 3372 // how many bits to spend on it. 3373 boost_score = get_kf_boost_score(cpi, kf_raw_err, &zero_motion_accumulator, 3374 &sr_accumulator, 0); 3375 reset_fpf_position(&cpi->twopass_frame, start_position); 3376 // Store the zero motion percentage 3377 twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0); 3378 3379 // Calculate a section intra ratio used in setting max loop filter. 3380 twopass->section_intra_rating = calculate_section_intra_ratio( 3381 start_position, twopass->stats_buf_ctx->stats_in_end, rc->frames_to_key); 3382 3383 p_rc->kf_boost = (int)boost_score; 3384 3385 if (cpi->ppi->lap_enabled) { 3386 if (oxcf->rc_cfg.mode == AOM_Q) { 3387 p_rc->kf_boost = get_projected_kf_boost(cpi); 3388 } else { 3389 // TODO(any): Explore using average frame stats for AOM_Q as well. 3390 boost_score = get_kf_boost_score( 3391 cpi, kf_raw_err, &zero_motion_accumulator, &sr_accumulator, 1); 3392 reset_fpf_position(&cpi->twopass_frame, start_position); 3393 p_rc->kf_boost += (int)boost_score; 3394 } 3395 } 3396 3397 // Special case for static / slide show content but don't apply 3398 // if the kf group is very short. 3399 if ((zero_motion_accumulator > STATIC_KF_GROUP_FLOAT_THRESH) && 3400 (rc->frames_to_key > 8)) { 3401 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_STATIC_KF_BOOST); 3402 } else { 3403 // Apply various clamps for min and max boost 3404 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, (rc->frames_to_key * 3)); 3405 p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_KF_BOOST); 3406 #ifdef STRICT_RC 3407 p_rc->kf_boost = AOMMIN(p_rc->kf_boost, MAX_KF_BOOST); 3408 #endif 3409 } 3410 3411 // Work out how many bits to allocate for the key frame itself. 3412 // In case of LAP enabled for VBR, if the frames_to_key value is 3413 // very high, we calculate the bits based on a clipped value of 3414 // frames_to_key. 3415 kf_bits = calculate_boost_bits( 3416 AOMMIN(rc->frames_to_key, frames_to_key_clipped) - 1, p_rc->kf_boost, 3417 AOMMIN(twopass->kf_group_bits, kf_group_bits_clipped)); 3418 kf_bits = adjust_boost_bits_for_target_level(cpi, rc, kf_bits, 3419 twopass->kf_group_bits, 0); 3420 3421 twopass->kf_group_bits -= kf_bits; 3422 3423 // Save the bits to spend on the key frame. 3424 gf_group->bit_allocation[0] = kf_bits; 3425 gf_group->update_type[0] = KF_UPDATE; 3426 3427 // Note the total error score of the kf group minus the key frame itself. 3428 if (cpi->ppi->lap_enabled) 3429 // As we don't have enough stats to know the actual error of the group, 3430 // we assume the complexity of each frame to be equal to 1, and set the 3431 // error as the number of frames in the group(minus the keyframe). 3432 twopass->kf_group_error_left = (double)(rc->frames_to_key - 1); 3433 else 3434 twopass->kf_group_error_left = kf_group_err - kf_mod_err; 3435 3436 // Adjust the count of total modified error left. 3437 // The count of bits left is adjusted elsewhere based on real coded frame 3438 // sizes. 3439 twopass->modified_error_left -= kf_group_err; 3440 } 3441 3442 #define ARF_STATS_OUTPUT 0 3443 #if ARF_STATS_OUTPUT 3444 unsigned int arf_count = 0; 3445 #endif 3446 3447 static int get_section_target_bandwidth(AV1_COMP *cpi) { 3448 AV1_COMMON *const cm = &cpi->common; 3449 CurrentFrame *const current_frame = &cm->current_frame; 3450 RATE_CONTROL *const rc = &cpi->rc; 3451 TWO_PASS *const twopass = &cpi->ppi->twopass; 3452 int64_t section_target_bandwidth; 3453 const int frames_left = (int)(twopass->stats_buf_ctx->total_stats->count - 3454 current_frame->frame_number); 3455 if (cpi->ppi->lap_enabled) 3456 section_target_bandwidth = rc->avg_frame_bandwidth; 3457 else { 3458 section_target_bandwidth = twopass->bits_left / frames_left; 3459 section_target_bandwidth = AOMMIN(section_target_bandwidth, INT_MAX); 3460 } 3461 return (int)section_target_bandwidth; 3462 } 3463 3464 static inline void set_twopass_params_based_on_fp_stats( 3465 AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame_ptr) { 3466 if (this_frame_ptr == NULL) return; 3467 3468 TWO_PASS_FRAME *twopass_frame = &cpi->twopass_frame; 3469 // The multiplication by 256 reverses a scaling factor of (>> 8) 3470 // applied when combining MB error values for the frame. 3471 twopass_frame->mb_av_energy = log1p(this_frame_ptr->intra_error); 3472 3473 const FIRSTPASS_STATS *const total_stats = 3474 cpi->ppi->twopass.stats_buf_ctx->total_stats; 3475 if (is_fp_wavelet_energy_invalid(total_stats) == 0) { 3476 twopass_frame->frame_avg_haar_energy = 3477 log1p(this_frame_ptr->frame_avg_wavelet_energy); 3478 } 3479 3480 // Set the frame content type flag. 3481 if (this_frame_ptr->intra_skip_pct >= FC_ANIMATION_THRESH) 3482 twopass_frame->fr_content_type = FC_GRAPHICS_ANIMATION; 3483 else 3484 twopass_frame->fr_content_type = FC_NORMAL; 3485 } 3486 3487 static void process_first_pass_stats(AV1_COMP *cpi, 3488 FIRSTPASS_STATS *this_frame) { 3489 AV1_COMMON *const cm = &cpi->common; 3490 CurrentFrame *const current_frame = &cm->current_frame; 3491 RATE_CONTROL *const rc = &cpi->rc; 3492 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 3493 TWO_PASS *const twopass = &cpi->ppi->twopass; 3494 FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats; 3495 3496 if (cpi->oxcf.rc_cfg.mode != AOM_Q && current_frame->frame_number == 0 && 3497 cpi->gf_frame_index == 0 && total_stats && 3498 twopass->stats_buf_ctx->total_left_stats) { 3499 if (cpi->ppi->lap_enabled) { 3500 /* 3501 * Accumulate total_stats using available limited number of stats, 3502 * and assign it to total_left_stats. 3503 */ 3504 *twopass->stats_buf_ctx->total_left_stats = *total_stats; 3505 } 3506 // Special case code for first frame. 3507 const int section_target_bandwidth = get_section_target_bandwidth(cpi); 3508 const double section_length = 3509 twopass->stats_buf_ctx->total_left_stats->count; 3510 const double section_error = 3511 twopass->stats_buf_ctx->total_left_stats->coded_error / section_length; 3512 const double section_intra_skip = 3513 twopass->stats_buf_ctx->total_left_stats->intra_skip_pct / 3514 section_length; 3515 const double section_inactive_zone = 3516 (twopass->stats_buf_ctx->total_left_stats->inactive_zone_rows * 2) / 3517 ((double)cm->mi_params.mb_rows * section_length); 3518 const int tmp_q = get_twopass_worst_quality( 3519 cpi, section_error, section_intra_skip + section_inactive_zone, 3520 section_target_bandwidth); 3521 3522 rc->active_worst_quality = tmp_q; 3523 rc->ni_av_qi = tmp_q; 3524 p_rc->last_q[INTER_FRAME] = tmp_q; 3525 p_rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->seq_params->bit_depth); 3526 p_rc->avg_frame_qindex[INTER_FRAME] = tmp_q; 3527 p_rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.rc_cfg.best_allowed_q) / 2; 3528 p_rc->avg_frame_qindex[KEY_FRAME] = p_rc->last_q[KEY_FRAME]; 3529 } 3530 3531 if (cpi->twopass_frame.stats_in < twopass->stats_buf_ctx->stats_in_end) { 3532 *this_frame = *cpi->twopass_frame.stats_in; 3533 ++cpi->twopass_frame.stats_in; 3534 } 3535 set_twopass_params_based_on_fp_stats(cpi, this_frame); 3536 } 3537 3538 void av1_setup_target_rate(AV1_COMP *cpi) { 3539 RATE_CONTROL *const rc = &cpi->rc; 3540 GF_GROUP *const gf_group = &cpi->ppi->gf_group; 3541 3542 int target_rate = gf_group->bit_allocation[cpi->gf_frame_index]; 3543 3544 if (has_no_stats_stage(cpi)) { 3545 av1_rc_set_frame_target(cpi, target_rate, cpi->common.width, 3546 cpi->common.height); 3547 } 3548 3549 rc->base_frame_target = target_rate; 3550 } 3551 3552 static void mark_flashes(FIRSTPASS_STATS *first_stats, 3553 FIRSTPASS_STATS *last_stats) { 3554 FIRSTPASS_STATS *this_stats = first_stats, *next_stats; 3555 while (this_stats < last_stats - 1) { 3556 next_stats = this_stats + 1; 3557 if (next_stats->pcnt_second_ref > next_stats->pcnt_inter && 3558 next_stats->pcnt_second_ref >= 0.5) { 3559 this_stats->is_flash = 1; 3560 } else { 3561 this_stats->is_flash = 0; 3562 } 3563 this_stats = next_stats; 3564 } 3565 // We always treat the last one as none flash. 3566 if (last_stats - 1 >= first_stats) { 3567 (last_stats - 1)->is_flash = 0; 3568 } 3569 } 3570 3571 // Smooth-out the noise variance so it is more stable 3572 // Returns 0 on success, -1 on memory allocation failure. 3573 // TODO(bohanli): Use a better low-pass filter than averaging 3574 static int smooth_filter_noise(FIRSTPASS_STATS *first_stats, 3575 FIRSTPASS_STATS *last_stats) { 3576 int len = (int)(last_stats - first_stats); 3577 double *smooth_noise = aom_malloc(len * sizeof(*smooth_noise)); 3578 if (!smooth_noise) return -1; 3579 3580 for (int i = 0; i < len; i++) { 3581 double total_noise = 0; 3582 double total_wt = 0; 3583 for (int j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) { 3584 int idx = clamp(i + j, 0, len - 1); 3585 if (first_stats[idx].is_flash) continue; 3586 3587 total_noise += first_stats[idx].noise_var; 3588 total_wt += 1.0; 3589 } 3590 if (total_wt > 0.01) { 3591 total_noise /= total_wt; 3592 } else { 3593 total_noise = first_stats[i].noise_var; 3594 } 3595 smooth_noise[i] = total_noise; 3596 } 3597 3598 for (int i = 0; i < len; i++) { 3599 first_stats[i].noise_var = smooth_noise[i]; 3600 } 3601 3602 aom_free(smooth_noise); 3603 return 0; 3604 } 3605 3606 // Estimate the noise variance of each frame from the first pass stats 3607 static void estimate_noise(FIRSTPASS_STATS *first_stats, 3608 FIRSTPASS_STATS *last_stats, 3609 struct aom_internal_error_info *error_info) { 3610 FIRSTPASS_STATS *this_stats, *next_stats; 3611 double C1, C2, C3, noise; 3612 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) { 3613 this_stats->noise_var = 0.0; 3614 // flashes tend to have high correlation of innovations, so ignore them. 3615 if (this_stats->is_flash || (this_stats - 1)->is_flash || 3616 (this_stats - 2)->is_flash) 3617 continue; 3618 3619 C1 = (this_stats - 1)->intra_error * 3620 (this_stats->intra_error - this_stats->coded_error); 3621 C2 = (this_stats - 2)->intra_error * 3622 ((this_stats - 1)->intra_error - (this_stats - 1)->coded_error); 3623 C3 = (this_stats - 2)->intra_error * 3624 (this_stats->intra_error - this_stats->sr_coded_error); 3625 if (C1 <= 0 || C2 <= 0 || C3 <= 0) continue; 3626 C1 = sqrt(C1); 3627 C2 = sqrt(C2); 3628 C3 = sqrt(C3); 3629 3630 noise = (this_stats - 1)->intra_error - C1 * C2 / C3; 3631 noise = AOMMAX(noise, 0.01); 3632 this_stats->noise_var = noise; 3633 } 3634 3635 // Copy noise from the neighbor if the noise value is not trustworthy 3636 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) { 3637 if (this_stats->is_flash || (this_stats - 1)->is_flash || 3638 (this_stats - 2)->is_flash) 3639 continue; 3640 if (this_stats->noise_var < 1.0) { 3641 int found = 0; 3642 // TODO(bohanli): consider expanding to two directions at the same time 3643 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) { 3644 if (next_stats->is_flash || (next_stats - 1)->is_flash || 3645 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0) 3646 continue; 3647 found = 1; 3648 this_stats->noise_var = next_stats->noise_var; 3649 break; 3650 } 3651 if (found) continue; 3652 for (next_stats = this_stats - 1; next_stats >= first_stats + 2; 3653 next_stats--) { 3654 if (next_stats->is_flash || (next_stats - 1)->is_flash || 3655 (next_stats - 2)->is_flash || next_stats->noise_var < 1.0) 3656 continue; 3657 this_stats->noise_var = next_stats->noise_var; 3658 break; 3659 } 3660 } 3661 } 3662 3663 // copy the noise if this is a flash 3664 for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) { 3665 if (this_stats->is_flash || (this_stats - 1)->is_flash || 3666 (this_stats - 2)->is_flash) { 3667 int found = 0; 3668 for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) { 3669 if (next_stats->is_flash || (next_stats - 1)->is_flash || 3670 (next_stats - 2)->is_flash) 3671 continue; 3672 found = 1; 3673 this_stats->noise_var = next_stats->noise_var; 3674 break; 3675 } 3676 if (found) continue; 3677 for (next_stats = this_stats - 1; next_stats >= first_stats + 2; 3678 next_stats--) { 3679 if (next_stats->is_flash || (next_stats - 1)->is_flash || 3680 (next_stats - 2)->is_flash) 3681 continue; 3682 this_stats->noise_var = next_stats->noise_var; 3683 break; 3684 } 3685 } 3686 } 3687 3688 // if we are at the first 2 frames, copy the noise 3689 for (this_stats = first_stats; 3690 this_stats < first_stats + 2 && (first_stats + 2) < last_stats; 3691 this_stats++) { 3692 this_stats->noise_var = (first_stats + 2)->noise_var; 3693 } 3694 3695 if (smooth_filter_noise(first_stats, last_stats) == -1) { 3696 aom_internal_error(error_info, AOM_CODEC_MEM_ERROR, 3697 "Error allocating buffers in smooth_filter_noise()"); 3698 } 3699 } 3700 3701 // Estimate correlation coefficient of each frame with its previous frame. 3702 static void estimate_coeff(FIRSTPASS_STATS *first_stats, 3703 FIRSTPASS_STATS *last_stats) { 3704 FIRSTPASS_STATS *this_stats; 3705 for (this_stats = first_stats + 1; this_stats < last_stats; this_stats++) { 3706 const double C = 3707 sqrt(AOMMAX((this_stats - 1)->intra_error * 3708 (this_stats->intra_error - this_stats->coded_error), 3709 0.001)); 3710 const double cor_coeff = 3711 C / 3712 AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var, 0.001); 3713 3714 this_stats->cor_coeff = 3715 cor_coeff * 3716 sqrt(AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var, 3717 0.001) / 3718 AOMMAX(this_stats->intra_error - this_stats->noise_var, 0.001)); 3719 // clip correlation coefficient. 3720 this_stats->cor_coeff = fclamp(this_stats->cor_coeff, 0.0, 1.0); 3721 } 3722 first_stats->cor_coeff = 1.0; 3723 } 3724 3725 void av1_get_second_pass_params(AV1_COMP *cpi, 3726 EncodeFrameParams *const frame_params, 3727 unsigned int frame_flags) { 3728 RATE_CONTROL *const rc = &cpi->rc; 3729 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 3730 TWO_PASS *const twopass = &cpi->ppi->twopass; 3731 GF_GROUP *const gf_group = &cpi->ppi->gf_group; 3732 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 3733 3734 if (cpi->use_ducky_encode && 3735 cpi->ducky_encode_info.frame_info.gop_mode == DUCKY_ENCODE_GOP_MODE_RCL) { 3736 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index]; 3737 frame_params->show_frame = 3738 !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE || 3739 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE); 3740 if (cpi->gf_frame_index == 0) { 3741 av1_tf_info_reset(&cpi->ppi->tf_info); 3742 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group); 3743 } 3744 return; 3745 } 3746 3747 const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in; 3748 int update_total_stats = 0; 3749 3750 if (is_stat_consumption_stage(cpi) && !cpi->twopass_frame.stats_in) return; 3751 3752 // Check forced key frames. 3753 const int frames_to_next_forced_key = detect_app_forced_key(cpi); 3754 if (frames_to_next_forced_key == 0) { 3755 rc->frames_to_key = 0; 3756 frame_flags &= FRAMEFLAGS_KEY; 3757 } else if (frames_to_next_forced_key > 0 && 3758 frames_to_next_forced_key < rc->frames_to_key) { 3759 rc->frames_to_key = frames_to_next_forced_key; 3760 } 3761 3762 assert(cpi->twopass_frame.stats_in != NULL); 3763 const int update_type = gf_group->update_type[cpi->gf_frame_index]; 3764 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index]; 3765 3766 if (cpi->gf_frame_index < gf_group->size && !(frame_flags & FRAMEFLAGS_KEY)) { 3767 assert(cpi->gf_frame_index < gf_group->size); 3768 3769 av1_setup_target_rate(cpi); 3770 3771 // If this is an arf frame then we dont want to read the stats file or 3772 // advance the input pointer as we already have what we need. 3773 if (update_type == ARF_UPDATE || update_type == INTNL_ARF_UPDATE) { 3774 const FIRSTPASS_STATS *const this_frame_ptr = 3775 read_frame_stats(twopass, &cpi->twopass_frame, 3776 gf_group->arf_src_offset[cpi->gf_frame_index]); 3777 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr); 3778 return; 3779 } 3780 } 3781 3782 if (oxcf->rc_cfg.mode == AOM_Q) 3783 rc->active_worst_quality = oxcf->rc_cfg.cq_level; 3784 3785 if (cpi->gf_frame_index == gf_group->size) { 3786 if (cpi->ppi->lap_enabled && cpi->ppi->p_rc.enable_scenecut_detection) { 3787 const int num_frames_to_detect_scenecut = MAX_GF_LENGTH_LAP + 1; 3788 const int frames_to_key = define_kf_interval( 3789 cpi, &twopass->firstpass_info, num_frames_to_detect_scenecut, 3790 /*search_start_idx=*/0); 3791 if (frames_to_key != -1) 3792 rc->frames_to_key = AOMMIN(rc->frames_to_key, frames_to_key); 3793 } 3794 } 3795 3796 FIRSTPASS_STATS this_frame; 3797 av1_zero(this_frame); 3798 // call above fn 3799 if (is_stat_consumption_stage(cpi)) { 3800 if (cpi->gf_frame_index < gf_group->size || rc->frames_to_key == 0) { 3801 process_first_pass_stats(cpi, &this_frame); 3802 update_total_stats = 1; 3803 } 3804 } else { 3805 rc->active_worst_quality = oxcf->rc_cfg.cq_level; 3806 } 3807 3808 // Keyframe and section processing. 3809 FIRSTPASS_STATS this_frame_copy; 3810 this_frame_copy = this_frame; 3811 if (rc->frames_to_key <= 0) { 3812 assert(rc->frames_to_key == 0); 3813 // Define next KF group and assign bits to it. 3814 frame_params->frame_type = KEY_FRAME; 3815 find_next_key_frame(cpi, &this_frame); 3816 this_frame = this_frame_copy; 3817 } 3818 3819 if (rc->frames_to_fwd_kf <= 0) 3820 rc->frames_to_fwd_kf = oxcf->kf_cfg.fwd_kf_dist; 3821 3822 // Define a new GF/ARF group. (Should always enter here for key frames). 3823 if (cpi->gf_frame_index == gf_group->size) { 3824 av1_tf_info_reset(&cpi->ppi->tf_info); 3825 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS 3826 vbr_rc_reset_gop_data(&cpi->vbr_rc_info); 3827 #endif // CONFIG_BITRATE_ACCURACY 3828 int max_gop_length = 3829 (oxcf->gf_cfg.lag_in_frames >= 32) 3830 ? AOMMIN(MAX_GF_INTERVAL, oxcf->gf_cfg.lag_in_frames - 3831 oxcf->algo_cfg.arnr_max_frames / 2) 3832 : MAX_GF_LENGTH_LAP; 3833 3834 // Handle forward key frame when enabled. 3835 if (oxcf->kf_cfg.fwd_kf_dist > 0) 3836 max_gop_length = AOMMIN(rc->frames_to_fwd_kf + 1, max_gop_length); 3837 3838 // Use the provided gop size in low delay setting 3839 if (oxcf->gf_cfg.lag_in_frames == 0) max_gop_length = rc->max_gf_interval; 3840 3841 // Limit the max gop length for the last gop in 1 pass setting. 3842 max_gop_length = AOMMIN(max_gop_length, rc->frames_to_key); 3843 3844 // Identify regions if needed. 3845 // TODO(bohanli): identify regions for all stats available. 3846 if (rc->frames_since_key == 0 || rc->frames_since_key == 1 || 3847 (p_rc->frames_till_regions_update - rc->frames_since_key < 3848 rc->frames_to_key && 3849 p_rc->frames_till_regions_update - rc->frames_since_key < 3850 max_gop_length + 1)) { 3851 // how many frames we can analyze from this frame 3852 int rest_frames = 3853 AOMMIN(rc->frames_to_key, MAX_FIRSTPASS_ANALYSIS_FRAMES); 3854 rest_frames = 3855 AOMMIN(rest_frames, (int)(twopass->stats_buf_ctx->stats_in_end - 3856 cpi->twopass_frame.stats_in + 3857 (rc->frames_since_key == 0))); 3858 p_rc->frames_till_regions_update = rest_frames; 3859 3860 int ret; 3861 if (cpi->ppi->lap_enabled) { 3862 mark_flashes(twopass->stats_buf_ctx->stats_in_start, 3863 twopass->stats_buf_ctx->stats_in_end); 3864 estimate_noise(twopass->stats_buf_ctx->stats_in_start, 3865 twopass->stats_buf_ctx->stats_in_end, cpi->common.error); 3866 estimate_coeff(twopass->stats_buf_ctx->stats_in_start, 3867 twopass->stats_buf_ctx->stats_in_end); 3868 ret = identify_regions(cpi->twopass_frame.stats_in, rest_frames, 3869 (rc->frames_since_key == 0), p_rc->regions, 3870 &p_rc->num_regions); 3871 } else { 3872 ret = identify_regions( 3873 cpi->twopass_frame.stats_in - (rc->frames_since_key == 0), 3874 rest_frames, 0, p_rc->regions, &p_rc->num_regions); 3875 } 3876 if (ret == -1) { 3877 aom_internal_error(cpi->common.error, AOM_CODEC_MEM_ERROR, 3878 "Error allocating buffers in identify_regions"); 3879 } 3880 } 3881 3882 int cur_region_idx = 3883 find_regions_index(p_rc->regions, p_rc->num_regions, 3884 rc->frames_since_key - p_rc->regions_offset); 3885 if ((cur_region_idx >= 0 && 3886 p_rc->regions[cur_region_idx].type == SCENECUT_REGION) || 3887 rc->frames_since_key == 0) { 3888 // If we start from a scenecut, then the last GOP's arf boost is not 3889 // needed for this GOP. 3890 cpi->ppi->gf_state.arf_gf_boost_lst = 0; 3891 } 3892 3893 int need_gf_len = 1; 3894 #if CONFIG_THREE_PASS 3895 if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) { 3896 // set up bitstream to read 3897 if (!cpi->third_pass_ctx->input_file_name && oxcf->two_pass_output) { 3898 cpi->third_pass_ctx->input_file_name = oxcf->two_pass_output; 3899 } 3900 av1_open_second_pass_log(cpi, 1); 3901 THIRD_PASS_GOP_INFO *gop_info = &cpi->third_pass_ctx->gop_info; 3902 // Read in GOP information from the second pass file. 3903 av1_read_second_pass_gop_info(cpi->second_pass_log_stream, gop_info, 3904 cpi->common.error); 3905 #if CONFIG_BITRATE_ACCURACY 3906 TPL_INFO *tpl_info; 3907 AOM_CHECK_MEM_ERROR(cpi->common.error, tpl_info, 3908 aom_malloc(sizeof(*tpl_info))); 3909 av1_read_tpl_info(tpl_info, cpi->second_pass_log_stream, 3910 cpi->common.error); 3911 aom_free(tpl_info); 3912 #if CONFIG_THREE_PASS 3913 // TODO(angiebird): Put this part into a func 3914 cpi->vbr_rc_info.cur_gop_idx++; 3915 #endif // CONFIG_THREE_PASS 3916 #endif // CONFIG_BITRATE_ACCURACY 3917 // Read in third_pass_info from the bitstream. 3918 av1_set_gop_third_pass(cpi->third_pass_ctx); 3919 // Read in per-frame info from second-pass encoding 3920 av1_read_second_pass_per_frame_info( 3921 cpi->second_pass_log_stream, cpi->third_pass_ctx->frame_info, 3922 gop_info->num_frames, cpi->common.error); 3923 3924 p_rc->cur_gf_index = 0; 3925 p_rc->gf_intervals[0] = cpi->third_pass_ctx->gop_info.gf_length; 3926 need_gf_len = 0; 3927 } 3928 #endif // CONFIG_THREE_PASS 3929 3930 if (need_gf_len) { 3931 // If we cannot obtain GF group length from second_pass_file 3932 // TODO(jingning): Resolve the redundant calls here. 3933 if (rc->intervals_till_gf_calculate_due == 0 || 1) { 3934 calculate_gf_length(cpi, max_gop_length, MAX_NUM_GF_INTERVALS); 3935 } 3936 3937 if (max_gop_length > 16 && oxcf->algo_cfg.enable_tpl_model && 3938 oxcf->gf_cfg.lag_in_frames >= 32 && 3939 cpi->sf.tpl_sf.gop_length_decision_method != 3) { 3940 int this_idx = rc->frames_since_key + 3941 p_rc->gf_intervals[p_rc->cur_gf_index] - 3942 p_rc->regions_offset - 1; 3943 int this_region = 3944 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx); 3945 int next_region = 3946 find_regions_index(p_rc->regions, p_rc->num_regions, this_idx + 1); 3947 // TODO(angiebird): Figure out why this_region and next_region are -1 in 3948 // unit test like AltRefFramePresenceTestLarge (aomedia:3134) 3949 int is_last_scenecut = 3950 p_rc->gf_intervals[p_rc->cur_gf_index] >= rc->frames_to_key || 3951 (this_region != -1 && 3952 p_rc->regions[this_region].type == SCENECUT_REGION) || 3953 (next_region != -1 && 3954 p_rc->regions[next_region].type == SCENECUT_REGION); 3955 3956 int ori_gf_int = p_rc->gf_intervals[p_rc->cur_gf_index]; 3957 3958 if (p_rc->gf_intervals[p_rc->cur_gf_index] > 16 && 3959 rc->min_gf_interval <= 16) { 3960 // The calculate_gf_length function is previously used with 3961 // max_gop_length = 32 with look-ahead gf intervals. 3962 define_gf_group(cpi, frame_params, 0); 3963 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group); 3964 this_frame = this_frame_copy; 3965 3966 if (is_shorter_gf_interval_better(cpi, frame_params)) { 3967 // A shorter gf interval is better. 3968 // TODO(jingning): Remove redundant computations here. 3969 max_gop_length = 16; 3970 calculate_gf_length(cpi, max_gop_length, 1); 3971 if (is_last_scenecut && 3972 (ori_gf_int - p_rc->gf_intervals[p_rc->cur_gf_index] < 4)) { 3973 p_rc->gf_intervals[p_rc->cur_gf_index] = ori_gf_int; 3974 } 3975 } 3976 } 3977 } 3978 } 3979 3980 define_gf_group(cpi, frame_params, 0); 3981 3982 if (gf_group->update_type[cpi->gf_frame_index] != ARF_UPDATE && 3983 rc->frames_since_key > 0) 3984 process_first_pass_stats(cpi, &this_frame); 3985 3986 define_gf_group(cpi, frame_params, 1); 3987 3988 #if CONFIG_THREE_PASS 3989 // write gop info if needed for third pass. Per-frame info is written after 3990 // each frame is encoded. 3991 av1_write_second_pass_gop_info(cpi); 3992 #endif // CONFIG_THREE_PASS 3993 3994 av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group); 3995 3996 rc->frames_till_gf_update_due = p_rc->baseline_gf_interval; 3997 assert(cpi->gf_frame_index == 0); 3998 #if ARF_STATS_OUTPUT 3999 { 4000 FILE *fpfile; 4001 fpfile = fopen("arf.stt", "a"); 4002 ++arf_count; 4003 fprintf(fpfile, "%10d %10d %10d %10d %10d\n", 4004 cpi->common.current_frame.frame_number, 4005 rc->frames_till_gf_update_due, cpi->ppi->p_rc.kf_boost, arf_count, 4006 p_rc->gfu_boost); 4007 4008 fclose(fpfile); 4009 } 4010 #endif 4011 } 4012 assert(cpi->gf_frame_index < gf_group->size); 4013 4014 if (gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE || 4015 gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE) { 4016 reset_fpf_position(&cpi->twopass_frame, start_pos); 4017 4018 const FIRSTPASS_STATS *const this_frame_ptr = 4019 read_frame_stats(twopass, &cpi->twopass_frame, 4020 gf_group->arf_src_offset[cpi->gf_frame_index]); 4021 set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr); 4022 } else { 4023 // Back up this frame's stats for updating total stats during post encode. 4024 cpi->twopass_frame.this_frame = update_total_stats ? start_pos : NULL; 4025 } 4026 4027 frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index]; 4028 av1_setup_target_rate(cpi); 4029 } 4030 4031 void av1_init_second_pass(AV1_COMP *cpi) { 4032 const AV1EncoderConfig *const oxcf = &cpi->oxcf; 4033 TWO_PASS *const twopass = &cpi->ppi->twopass; 4034 FRAME_INFO *const frame_info = &cpi->frame_info; 4035 double frame_rate; 4036 FIRSTPASS_STATS *stats; 4037 4038 if (!twopass->stats_buf_ctx->stats_in_end) return; 4039 4040 mark_flashes(twopass->stats_buf_ctx->stats_in_start, 4041 twopass->stats_buf_ctx->stats_in_end); 4042 estimate_noise(twopass->stats_buf_ctx->stats_in_start, 4043 twopass->stats_buf_ctx->stats_in_end, cpi->common.error); 4044 estimate_coeff(twopass->stats_buf_ctx->stats_in_start, 4045 twopass->stats_buf_ctx->stats_in_end); 4046 4047 stats = twopass->stats_buf_ctx->total_stats; 4048 4049 *stats = *twopass->stats_buf_ctx->stats_in_end; 4050 *twopass->stats_buf_ctx->total_left_stats = *stats; 4051 4052 frame_rate = 10000000.0 * stats->count / stats->duration; 4053 // Each frame can have a different duration, as the frame rate in the source 4054 // isn't guaranteed to be constant. The frame rate prior to the first frame 4055 // encoded in the second pass is a guess. However, the sum duration is not. 4056 // It is calculated based on the actual durations of all frames from the 4057 // first pass. 4058 av1_new_framerate(cpi, frame_rate); 4059 twopass->bits_left = 4060 (int64_t)(stats->duration * oxcf->rc_cfg.target_bandwidth / 10000000.0); 4061 4062 #if CONFIG_BITRATE_ACCURACY 4063 av1_vbr_rc_init(&cpi->vbr_rc_info, twopass->bits_left, 4064 (int)round(stats->count)); 4065 #endif 4066 4067 #if CONFIG_RATECTRL_LOG 4068 rc_log_init(&cpi->rc_log); 4069 #endif 4070 4071 // This variable monitors how far behind the second ref update is lagging. 4072 twopass->sr_update_lag = 1; 4073 4074 // Scan the first pass file and calculate a modified total error based upon 4075 // the bias/power function used to allocate bits. 4076 { 4077 const double avg_error = 4078 stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count); 4079 const FIRSTPASS_STATS *s = cpi->twopass_frame.stats_in; 4080 double modified_error_total = 0.0; 4081 twopass->modified_error_min = 4082 (avg_error * oxcf->rc_cfg.vbrmin_section) / 100; 4083 twopass->modified_error_max = 4084 (avg_error * oxcf->rc_cfg.vbrmax_section) / 100; 4085 while (s < twopass->stats_buf_ctx->stats_in_end) { 4086 modified_error_total += 4087 calculate_modified_err(frame_info, twopass, oxcf, s); 4088 ++s; 4089 } 4090 twopass->modified_error_left = modified_error_total; 4091 } 4092 4093 // Reset the vbr bits off target counters 4094 cpi->ppi->p_rc.vbr_bits_off_target = 0; 4095 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0; 4096 4097 cpi->ppi->p_rc.rate_error_estimate = 0; 4098 4099 // Static sequence monitor variables. 4100 twopass->kf_zeromotion_pct = 100; 4101 twopass->last_kfgroup_zeromotion_pct = 100; 4102 4103 // Initialize bits per macro_block estimate correction factor. 4104 twopass->bpm_factor = 1.0; 4105 // Initialize actual and target bits counters for ARF groups so that 4106 // at the start we have a neutral bpm adjustment. 4107 twopass->rolling_arf_group_target_bits = 1; 4108 twopass->rolling_arf_group_actual_bits = 1; 4109 } 4110 4111 void av1_init_single_pass_lap(AV1_COMP *cpi) { 4112 TWO_PASS *const twopass = &cpi->ppi->twopass; 4113 4114 if (!twopass->stats_buf_ctx->stats_in_end) return; 4115 4116 // This variable monitors how far behind the second ref update is lagging. 4117 twopass->sr_update_lag = 1; 4118 4119 twopass->bits_left = 0; 4120 twopass->modified_error_min = 0.0; 4121 twopass->modified_error_max = 0.0; 4122 twopass->modified_error_left = 0.0; 4123 4124 // Reset the vbr bits off target counters 4125 cpi->ppi->p_rc.vbr_bits_off_target = 0; 4126 cpi->ppi->p_rc.vbr_bits_off_target_fast = 0; 4127 4128 cpi->ppi->p_rc.rate_error_estimate = 0; 4129 4130 // Static sequence monitor variables. 4131 twopass->kf_zeromotion_pct = 100; 4132 twopass->last_kfgroup_zeromotion_pct = 100; 4133 4134 // Initialize bits per macro_block estimate correction factor. 4135 twopass->bpm_factor = 1.0; 4136 // Initialize actual and target bits counters for ARF groups so that 4137 // at the start we have a neutral bpm adjustment. 4138 twopass->rolling_arf_group_target_bits = 1; 4139 twopass->rolling_arf_group_actual_bits = 1; 4140 } 4141 4142 #define MINQ_ADJ_LIMIT 48 4143 #define MINQ_ADJ_LIMIT_CQ 20 4144 #define HIGH_UNDERSHOOT_RATIO 2 4145 void av1_twopass_postencode_update(AV1_COMP *cpi) { 4146 TWO_PASS *const twopass = &cpi->ppi->twopass; 4147 RATE_CONTROL *const rc = &cpi->rc; 4148 PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc; 4149 const RateControlCfg *const rc_cfg = &cpi->oxcf.rc_cfg; 4150 4151 // Increment the stats_in pointer. 4152 if (is_stat_consumption_stage(cpi) && 4153 !(cpi->use_ducky_encode && cpi->ducky_encode_info.frame_info.gop_mode == 4154 DUCKY_ENCODE_GOP_MODE_RCL) && 4155 (cpi->gf_frame_index < cpi->ppi->gf_group.size || 4156 rc->frames_to_key == 0)) { 4157 const int update_type = cpi->ppi->gf_group.update_type[cpi->gf_frame_index]; 4158 if (update_type != ARF_UPDATE && update_type != INTNL_ARF_UPDATE) { 4159 FIRSTPASS_STATS this_frame; 4160 assert(cpi->twopass_frame.stats_in > 4161 twopass->stats_buf_ctx->stats_in_start); 4162 --cpi->twopass_frame.stats_in; 4163 if (cpi->ppi->lap_enabled) { 4164 input_stats_lap(twopass, &cpi->twopass_frame, &this_frame); 4165 } else { 4166 input_stats(twopass, &cpi->twopass_frame, &this_frame); 4167 } 4168 } else if (cpi->ppi->lap_enabled) { 4169 cpi->twopass_frame.stats_in = twopass->stats_buf_ctx->stats_in_start; 4170 } 4171 } 4172 4173 // VBR correction is done through rc->vbr_bits_off_target. Based on the 4174 // sign of this value, a limited % adjustment is made to the target rate 4175 // of subsequent frames, to try and push it back towards 0. This method 4176 // is designed to prevent extreme behaviour at the end of a clip 4177 // or group of frames. 4178 p_rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size; 4179 twopass->bits_left = AOMMAX(twopass->bits_left - rc->base_frame_target, 0); 4180 4181 if (cpi->do_update_vbr_bits_off_target_fast) { 4182 // Subtract current frame's fast_extra_bits. 4183 p_rc->vbr_bits_off_target_fast -= rc->frame_level_fast_extra_bits; 4184 rc->frame_level_fast_extra_bits = 0; 4185 } 4186 4187 // Target vs actual bits for this arf group. 4188 if (twopass->rolling_arf_group_target_bits > 4189 INT_MAX - rc->base_frame_target) { 4190 twopass->rolling_arf_group_target_bits = INT_MAX; 4191 } else { 4192 twopass->rolling_arf_group_target_bits += rc->base_frame_target; 4193 } 4194 twopass->rolling_arf_group_actual_bits += rc->projected_frame_size; 4195 4196 // Calculate the pct rc error. 4197 if (p_rc->total_actual_bits) { 4198 p_rc->rate_error_estimate = 4199 (int)((p_rc->vbr_bits_off_target * 100) / p_rc->total_actual_bits); 4200 p_rc->rate_error_estimate = clamp(p_rc->rate_error_estimate, -100, 100); 4201 } else { 4202 p_rc->rate_error_estimate = 0; 4203 } 4204 4205 #if CONFIG_FPMT_TEST 4206 /* The variables temp_vbr_bits_off_target, temp_bits_left, 4207 * temp_rolling_arf_group_target_bits, temp_rolling_arf_group_actual_bits 4208 * temp_rate_error_estimate are introduced for quality simulation purpose, 4209 * it retains the value previous to the parallel encode frames. The 4210 * variables are updated based on the update flag. 4211 * 4212 * If there exist show_existing_frames between parallel frames, then to 4213 * retain the temp state do not update it. */ 4214 const int simulate_parallel_frame = 4215 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE; 4216 int show_existing_between_parallel_frames = 4217 (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] == 4218 INTNL_OVERLAY_UPDATE && 4219 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2); 4220 4221 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames && 4222 simulate_parallel_frame) { 4223 cpi->ppi->p_rc.temp_vbr_bits_off_target = p_rc->vbr_bits_off_target; 4224 cpi->ppi->p_rc.temp_bits_left = twopass->bits_left; 4225 cpi->ppi->p_rc.temp_rolling_arf_group_target_bits = 4226 twopass->rolling_arf_group_target_bits; 4227 cpi->ppi->p_rc.temp_rolling_arf_group_actual_bits = 4228 twopass->rolling_arf_group_actual_bits; 4229 cpi->ppi->p_rc.temp_rate_error_estimate = p_rc->rate_error_estimate; 4230 } 4231 #endif 4232 // Update the active best quality pyramid. 4233 if (!rc->is_src_frame_alt_ref) { 4234 const int pyramid_level = 4235 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index]; 4236 int i; 4237 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i) { 4238 p_rc->active_best_quality[i] = cpi->common.quant_params.base_qindex; 4239 #if CONFIG_TUNE_VMAF 4240 if (cpi->vmaf_info.original_qindex != -1 && 4241 (cpi->oxcf.tune_cfg.tuning >= AOM_TUNE_VMAF_WITH_PREPROCESSING && 4242 cpi->oxcf.tune_cfg.tuning <= AOM_TUNE_VMAF_NEG_MAX_GAIN)) { 4243 p_rc->active_best_quality[i] = cpi->vmaf_info.original_qindex; 4244 } 4245 #endif 4246 } 4247 } 4248 4249 #if 0 4250 { 4251 AV1_COMMON *cm = &cpi->common; 4252 FILE *fpfile; 4253 fpfile = fopen("details.stt", "a"); 4254 fprintf(fpfile, 4255 "%10d %10d %10d %10" PRId64 " %10" PRId64 4256 " %10d %10d %10d %10.4lf %10.4lf %10.4lf %10.4lf\n", 4257 cm->current_frame.frame_number, rc->base_frame_target, 4258 rc->projected_frame_size, rc->total_actual_bits, 4259 rc->vbr_bits_off_target, p_rc->rate_error_estimate, 4260 twopass->rolling_arf_group_target_bits, 4261 twopass->rolling_arf_group_actual_bits, 4262 (double)twopass->rolling_arf_group_actual_bits / 4263 (double)twopass->rolling_arf_group_target_bits, 4264 twopass->bpm_factor, 4265 av1_convert_qindex_to_q(cpi->common.quant_params.base_qindex, 4266 cm->seq_params->bit_depth), 4267 av1_convert_qindex_to_q(rc->active_worst_quality, 4268 cm->seq_params->bit_depth)); 4269 fclose(fpfile); 4270 } 4271 #endif 4272 4273 if (cpi->common.current_frame.frame_type != KEY_FRAME) { 4274 twopass->kf_group_bits -= rc->base_frame_target; 4275 twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct; 4276 } 4277 twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0); 4278 4279 // If the rate control is drifting consider adjustment to min or maxq. 4280 if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref && 4281 (p_rc->rolling_target_bits > 0)) { 4282 int minq_adj_limit; 4283 int maxq_adj_limit; 4284 minq_adj_limit = 4285 (rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT); 4286 maxq_adj_limit = rc->worst_quality - rc->active_worst_quality; 4287 4288 // Undershoot 4289 if ((rc_cfg->under_shoot_pct < 100) && 4290 (p_rc->rolling_actual_bits < p_rc->rolling_target_bits)) { 4291 int pct_error = 4292 ((p_rc->rolling_target_bits - p_rc->rolling_actual_bits) * 100) / 4293 p_rc->rolling_target_bits; 4294 4295 if ((pct_error >= rc_cfg->under_shoot_pct) && 4296 (p_rc->rate_error_estimate > 0)) { 4297 twopass->extend_minq += 1; 4298 } 4299 twopass->extend_maxq -= 1; 4300 // Overshoot 4301 } else if ((rc_cfg->over_shoot_pct < 100) && 4302 (p_rc->rolling_actual_bits > p_rc->rolling_target_bits)) { 4303 int pct_error = 4304 ((p_rc->rolling_actual_bits - p_rc->rolling_target_bits) * 100) / 4305 p_rc->rolling_target_bits; 4306 4307 pct_error = clamp(pct_error, 0, 100); 4308 if ((pct_error >= rc_cfg->over_shoot_pct) && 4309 (p_rc->rate_error_estimate < 0)) { 4310 twopass->extend_maxq += 1; 4311 } 4312 twopass->extend_minq -= 1; 4313 } else { 4314 // Adjustment for extreme local overshoot. 4315 // Only applies when normal adjustment above is not used (e.g. 4316 // when threshold is set to 100). 4317 if (rc->projected_frame_size > (2 * rc->base_frame_target) && 4318 rc->projected_frame_size > (2 * rc->avg_frame_bandwidth)) 4319 ++twopass->extend_maxq; 4320 // Unwind extreme overshoot adjustment. 4321 else if (p_rc->rolling_target_bits > p_rc->rolling_actual_bits) 4322 --twopass->extend_maxq; 4323 } 4324 twopass->extend_minq = 4325 clamp(twopass->extend_minq, -minq_adj_limit, minq_adj_limit); 4326 twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit); 4327 4328 // If there is a big and undexpected undershoot then feed the extra 4329 // bits back in quickly. One situation where this may happen is if a 4330 // frame is unexpectedly almost perfectly predicted by the ARF or GF 4331 // but not very well predcited by the previous frame. 4332 if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) { 4333 int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO; 4334 if (rc->projected_frame_size < fast_extra_thresh) { 4335 p_rc->vbr_bits_off_target_fast += 4336 fast_extra_thresh - rc->projected_frame_size; 4337 p_rc->vbr_bits_off_target_fast = 4338 AOMMIN(p_rc->vbr_bits_off_target_fast, 4339 (4 * (int64_t)rc->avg_frame_bandwidth)); 4340 } 4341 } 4342 4343 #if CONFIG_FPMT_TEST 4344 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames && 4345 simulate_parallel_frame) { 4346 cpi->ppi->p_rc.temp_vbr_bits_off_target_fast = 4347 p_rc->vbr_bits_off_target_fast; 4348 cpi->ppi->p_rc.temp_extend_minq = twopass->extend_minq; 4349 cpi->ppi->p_rc.temp_extend_maxq = twopass->extend_maxq; 4350 } 4351 #endif 4352 } 4353 4354 // Update the frame probabilities obtained from parallel encode frames 4355 FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs; 4356 #if CONFIG_FPMT_TEST 4357 /* The variable temp_active_best_quality is introduced only for quality 4358 * simulation purpose, it retains the value previous to the parallel 4359 * encode frames. The variable is updated based on the update flag. 4360 * 4361 * If there exist show_existing_frames between parallel frames, then to 4362 * retain the temp state do not update it. */ 4363 if (cpi->do_frame_data_update && !show_existing_between_parallel_frames && 4364 simulate_parallel_frame) { 4365 int i; 4366 const int pyramid_level = 4367 cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index]; 4368 if (!rc->is_src_frame_alt_ref) { 4369 for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i) 4370 cpi->ppi->p_rc.temp_active_best_quality[i] = 4371 p_rc->active_best_quality[i]; 4372 } 4373 } 4374 4375 // Update the frame probabilities obtained from parallel encode frames 4376 FrameProbInfo *const temp_frame_probs_simulation = 4377 simulate_parallel_frame ? &cpi->ppi->temp_frame_probs_simulation 4378 : frame_probs; 4379 FrameProbInfo *const temp_frame_probs = 4380 simulate_parallel_frame ? &cpi->ppi->temp_frame_probs : NULL; 4381 #endif 4382 int i, j, loop; 4383 // Sequentially do average on temp_frame_probs_simulation which holds 4384 // probabilities of last frame before parallel encode 4385 for (loop = 0; loop <= cpi->num_frame_recode; loop++) { 4386 // Sequentially update tx_type_probs 4387 if (cpi->do_update_frame_probs_txtype[loop] && 4388 (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)) { 4389 const FRAME_UPDATE_TYPE update_type = 4390 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index); 4391 for (i = 0; i < TX_SIZES_ALL; i++) { 4392 int left = 1024; 4393 4394 for (j = TX_TYPES - 1; j >= 0; j--) { 4395 const int new_prob = 4396 cpi->frame_new_probs[loop].tx_type_probs[update_type][i][j]; 4397 #if CONFIG_FPMT_TEST 4398 int prob = 4399 (temp_frame_probs_simulation->tx_type_probs[update_type][i][j] + 4400 new_prob) >> 4401 1; 4402 left -= prob; 4403 if (j == 0) prob += left; 4404 temp_frame_probs_simulation->tx_type_probs[update_type][i][j] = prob; 4405 #else 4406 int prob = 4407 (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1; 4408 left -= prob; 4409 if (j == 0) prob += left; 4410 frame_probs->tx_type_probs[update_type][i][j] = prob; 4411 #endif 4412 } 4413 } 4414 } 4415 4416 // Sequentially update obmc_probs 4417 if (cpi->do_update_frame_probs_obmc[loop] && 4418 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) { 4419 const FRAME_UPDATE_TYPE update_type = 4420 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index); 4421 4422 for (i = 0; i < BLOCK_SIZES_ALL; i++) { 4423 const int new_prob = 4424 cpi->frame_new_probs[loop].obmc_probs[update_type][i]; 4425 #if CONFIG_FPMT_TEST 4426 temp_frame_probs_simulation->obmc_probs[update_type][i] = 4427 (temp_frame_probs_simulation->obmc_probs[update_type][i] + 4428 new_prob) >> 4429 1; 4430 #else 4431 frame_probs->obmc_probs[update_type][i] = 4432 (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1; 4433 #endif 4434 } 4435 } 4436 4437 // Sequentially update warped_probs 4438 if (cpi->do_update_frame_probs_warp[loop] && 4439 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) { 4440 const FRAME_UPDATE_TYPE update_type = 4441 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index); 4442 const int new_prob = cpi->frame_new_probs[loop].warped_probs[update_type]; 4443 #if CONFIG_FPMT_TEST 4444 temp_frame_probs_simulation->warped_probs[update_type] = 4445 (temp_frame_probs_simulation->warped_probs[update_type] + new_prob) >> 4446 1; 4447 #else 4448 frame_probs->warped_probs[update_type] = 4449 (frame_probs->warped_probs[update_type] + new_prob) >> 1; 4450 #endif 4451 } 4452 4453 // Sequentially update switchable_interp_probs 4454 if (cpi->do_update_frame_probs_interpfilter[loop] && 4455 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) { 4456 const FRAME_UPDATE_TYPE update_type = 4457 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index); 4458 4459 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { 4460 int left = 1536; 4461 4462 for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) { 4463 const int new_prob = cpi->frame_new_probs[loop] 4464 .switchable_interp_probs[update_type][i][j]; 4465 #if CONFIG_FPMT_TEST 4466 int prob = (temp_frame_probs_simulation 4467 ->switchable_interp_probs[update_type][i][j] + 4468 new_prob) >> 4469 1; 4470 left -= prob; 4471 if (j == 0) prob += left; 4472 4473 temp_frame_probs_simulation 4474 ->switchable_interp_probs[update_type][i][j] = prob; 4475 #else 4476 int prob = (frame_probs->switchable_interp_probs[update_type][i][j] + 4477 new_prob) >> 4478 1; 4479 left -= prob; 4480 if (j == 0) prob += left; 4481 frame_probs->switchable_interp_probs[update_type][i][j] = prob; 4482 #endif 4483 } 4484 } 4485 } 4486 } 4487 4488 #if CONFIG_FPMT_TEST 4489 // Copying temp_frame_probs_simulation to temp_frame_probs based on 4490 // the flag 4491 if (cpi->do_frame_data_update && 4492 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 && 4493 simulate_parallel_frame) { 4494 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES; 4495 update_type_idx++) { 4496 for (i = 0; i < BLOCK_SIZES_ALL; i++) { 4497 temp_frame_probs->obmc_probs[update_type_idx][i] = 4498 temp_frame_probs_simulation->obmc_probs[update_type_idx][i]; 4499 } 4500 temp_frame_probs->warped_probs[update_type_idx] = 4501 temp_frame_probs_simulation->warped_probs[update_type_idx]; 4502 for (i = 0; i < TX_SIZES_ALL; i++) { 4503 for (j = 0; j < TX_TYPES; j++) { 4504 temp_frame_probs->tx_type_probs[update_type_idx][i][j] = 4505 temp_frame_probs_simulation->tx_type_probs[update_type_idx][i][j]; 4506 } 4507 } 4508 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { 4509 for (j = 0; j < SWITCHABLE_FILTERS; j++) { 4510 temp_frame_probs->switchable_interp_probs[update_type_idx][i][j] = 4511 temp_frame_probs_simulation 4512 ->switchable_interp_probs[update_type_idx][i][j]; 4513 } 4514 } 4515 } 4516 } 4517 #endif 4518 // Update framerate obtained from parallel encode frames 4519 if (cpi->common.show_frame && 4520 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) 4521 cpi->framerate = cpi->new_framerate; 4522 #if CONFIG_FPMT_TEST 4523 // SIMULATION PURPOSE 4524 int show_existing_between_parallel_frames_cndn = 4525 (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] == 4526 INTNL_OVERLAY_UPDATE && 4527 cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2); 4528 if (cpi->common.show_frame && !show_existing_between_parallel_frames_cndn && 4529 cpi->do_frame_data_update && simulate_parallel_frame) 4530 cpi->temp_framerate = cpi->framerate; 4531 #endif 4532 }