analog_agc.cc (38521B)
1 /* 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 /* 12 * 13 * Using a feedback system, determines an appropriate analog volume level 14 * given an input signal and current volume level. Targets a conservative 15 * signal level and is intended for use with a digital AGC to apply 16 * additional gain. 17 * 18 */ 19 20 #include "modules/audio_processing/agc/legacy/analog_agc.h" 21 22 #include <cstdint> 23 #include <cstdlib> 24 #include <cstring> 25 26 #include "common_audio/signal_processing/dot_product_with_scale.h" 27 #include "common_audio/signal_processing/include/signal_processing_library.h" 28 #include "common_audio/signal_processing/include/spl_inl.h" 29 #include "modules/audio_processing/agc/legacy/digital_agc.h" 30 #include "modules/audio_processing/agc/legacy/gain_control.h" 31 #include "rtc_base/checks.h" 32 33 namespace webrtc { 34 35 namespace { 36 37 // Errors 38 #define AGC_UNSPECIFIED_ERROR 18000 39 #define AGC_UNINITIALIZED_ERROR 18002 40 #define AGC_NULL_POINTER_ERROR 18003 41 #define AGC_BAD_PARAMETER_ERROR 18004 42 43 /* The slope of in Q13*/ 44 const int16_t kSlope1[8] = {21793, 12517, 7189, 4129, 2372, 1362, 472, 78}; 45 46 /* The offset in Q14 */ 47 const int16_t kOffset1[8] = {25395, 23911, 22206, 20737, 48 19612, 18805, 17951, 17367}; 49 50 /* The slope of in Q13*/ 51 const int16_t kSlope2[8] = {2063, 1731, 1452, 1218, 1021, 857, 597, 337}; 52 53 /* The offset in Q14 */ 54 const int16_t kOffset2[8] = {18432, 18379, 18290, 18177, 55 18052, 17920, 17670, 17286}; 56 57 const int16_t kMuteGuardTimeMs = 8000; 58 const int16_t kInitCheck = 42; 59 const size_t kNumSubframes = 10; 60 61 /* Default settings if config is not used */ 62 #define AGC_DEFAULT_TARGET_LEVEL 3 63 #define AGC_DEFAULT_COMP_GAIN 9 64 /* This is the target level for the analog part in ENV scale. To convert to RMS 65 * scale you 66 * have to add OFFSET_ENV_TO_RMS. 67 */ 68 #define ANALOG_TARGET_LEVEL 11 69 #define ANALOG_TARGET_LEVEL_2 5 // ANALOG_TARGET_LEVEL / 2 70 /* Offset between RMS scale (analog part) and ENV scale (digital part). This 71 * value actually 72 * varies with the FIXED_ANALOG_TARGET_LEVEL, hence we should in the future 73 * replace it with 74 * a table. 75 */ 76 #define OFFSET_ENV_TO_RMS 9 77 /* The reference input level at which the digital part gives an output of 78 * targetLevelDbfs 79 * (desired level) if we have no compression gain. This level should be set high 80 * enough not 81 * to compress the peaks due to the dynamics. 82 */ 83 #define DIGITAL_REF_AT_0_COMP_GAIN 4 84 /* Speed of reference level decrease. 85 */ 86 #define DIFF_REF_TO_ANALOG 5 87 88 /* Size of analog gain table */ 89 #define GAIN_TBL_LEN 32 90 /* Matlab code: 91 * fprintf(1, '\t%i, %i, %i, %i,\n', round(10.^(linspace(0,10,32)/20) * 2^12)); 92 */ 93 /* Q12 */ 94 const uint16_t kGainTableAnalog[GAIN_TBL_LEN] = { 95 4096, 4251, 4412, 4579, 4752, 4932, 5118, 5312, 5513, 5722, 5938, 96 6163, 6396, 6638, 6889, 7150, 7420, 7701, 7992, 8295, 8609, 8934, 97 9273, 9623, 9987, 10365, 10758, 11165, 11587, 12025, 12480, 12953}; 98 99 /* Gain/Suppression tables for virtual Mic (in Q10) */ 100 const uint16_t kGainTableVirtualMic[128] = { 101 1052, 1081, 1110, 1141, 1172, 1204, 1237, 1271, 1305, 1341, 1378, 102 1416, 1454, 1494, 1535, 1577, 1620, 1664, 1710, 1757, 1805, 1854, 103 1905, 1957, 2010, 2065, 2122, 2180, 2239, 2301, 2364, 2428, 2495, 104 2563, 2633, 2705, 2779, 2855, 2933, 3013, 3096, 3180, 3267, 3357, 105 3449, 3543, 3640, 3739, 3842, 3947, 4055, 4166, 4280, 4397, 4517, 106 4640, 4767, 4898, 5032, 5169, 5311, 5456, 5605, 5758, 5916, 6078, 107 6244, 6415, 6590, 6770, 6956, 7146, 7341, 7542, 7748, 7960, 8178, 108 8402, 8631, 8867, 9110, 9359, 9615, 9878, 10148, 10426, 10711, 11004, 109 11305, 11614, 11932, 12258, 12593, 12938, 13292, 13655, 14029, 14412, 14807, 110 15212, 15628, 16055, 16494, 16945, 17409, 17885, 18374, 18877, 19393, 19923, 111 20468, 21028, 21603, 22194, 22801, 23425, 24065, 24724, 25400, 26095, 26808, 112 27541, 28295, 29069, 29864, 30681, 31520, 32382}; 113 const uint16_t kSuppressionTableVirtualMic[128] = { 114 1024, 1006, 988, 970, 952, 935, 918, 902, 886, 870, 854, 839, 824, 809, 794, 115 780, 766, 752, 739, 726, 713, 700, 687, 675, 663, 651, 639, 628, 616, 605, 116 594, 584, 573, 563, 553, 543, 533, 524, 514, 505, 496, 487, 478, 470, 461, 117 453, 445, 437, 429, 421, 414, 406, 399, 392, 385, 378, 371, 364, 358, 351, 118 345, 339, 333, 327, 321, 315, 309, 304, 298, 293, 288, 283, 278, 273, 268, 119 263, 258, 254, 249, 244, 240, 236, 232, 227, 223, 219, 215, 211, 208, 204, 120 200, 197, 193, 190, 186, 183, 180, 176, 173, 170, 167, 164, 161, 158, 155, 121 153, 150, 147, 145, 142, 139, 137, 134, 132, 130, 127, 125, 123, 121, 118, 122 116, 114, 112, 110, 108, 106, 104, 102}; 123 124 /* Table for target energy levels. Values in Q(-7) 125 * Matlab code 126 * targetLevelTable = fprintf('%d,\t%d,\t%d,\t%d,\n', 127 * round((32767*10.^(-(0:63)'/20)).^2*16/2^7) */ 128 129 const int32_t kTargetLevelTable[64] = { 130 134209536, 106606424, 84680493, 67264106, 53429779, 42440782, 33711911, 131 26778323, 21270778, 16895980, 13420954, 10660642, 8468049, 6726411, 132 5342978, 4244078, 3371191, 2677832, 2127078, 1689598, 1342095, 133 1066064, 846805, 672641, 534298, 424408, 337119, 267783, 134 212708, 168960, 134210, 106606, 84680, 67264, 53430, 135 42441, 33712, 26778, 21271, 16896, 13421, 10661, 136 8468, 6726, 5343, 4244, 3371, 2678, 2127, 137 1690, 1342, 1066, 847, 673, 534, 424, 138 337, 268, 213, 169, 134, 107, 85, 139 67}; 140 141 } // namespace 142 143 int WebRtcAgc_AddMic(void* state, 144 int16_t* const* in_mic, 145 size_t num_bands, 146 size_t samples) { 147 int32_t nrg, max_nrg, sample, tmp32; 148 int32_t* ptr; 149 uint16_t targetGainIdx, gain; 150 size_t i; 151 int16_t n, L, tmp16, tmp_speech[16]; 152 LegacyAgc* stt; 153 stt = reinterpret_cast<LegacyAgc*>(state); 154 155 if (stt->fs == 8000) { 156 L = 8; 157 if (samples != 80) { 158 return -1; 159 } 160 } else { 161 L = 16; 162 if (samples != 160) { 163 return -1; 164 } 165 } 166 167 /* apply slowly varying digital gain */ 168 if (stt->micVol > stt->maxAnalog) { 169 /* `maxLevel` is strictly >= `micVol`, so this condition should be 170 * satisfied here, ensuring there is no divide-by-zero. */ 171 RTC_DCHECK_GT(stt->maxLevel, stt->maxAnalog); 172 173 /* Q1 */ 174 tmp16 = (int16_t)(stt->micVol - stt->maxAnalog); 175 tmp32 = (GAIN_TBL_LEN - 1) * tmp16; 176 tmp16 = (int16_t)(stt->maxLevel - stt->maxAnalog); 177 targetGainIdx = tmp32 / tmp16; 178 RTC_DCHECK_LT(targetGainIdx, GAIN_TBL_LEN); 179 180 /* Increment through the table towards the target gain. 181 * If micVol drops below maxAnalog, we allow the gain 182 * to be dropped immediately. */ 183 if (stt->gainTableIdx < targetGainIdx) { 184 stt->gainTableIdx++; 185 } else if (stt->gainTableIdx > targetGainIdx) { 186 stt->gainTableIdx--; 187 } 188 189 /* Q12 */ 190 gain = kGainTableAnalog[stt->gainTableIdx]; 191 192 for (i = 0; i < samples; i++) { 193 size_t j; 194 for (j = 0; j < num_bands; ++j) { 195 sample = (in_mic[j][i] * gain) >> 12; 196 if (sample > 32767) { 197 in_mic[j][i] = 32767; 198 } else if (sample < -32768) { 199 in_mic[j][i] = -32768; 200 } else { 201 in_mic[j][i] = (int16_t)sample; 202 } 203 } 204 } 205 } else { 206 stt->gainTableIdx = 0; 207 } 208 209 /* compute envelope */ 210 if (stt->inQueue > 0) { 211 ptr = stt->env[1]; 212 } else { 213 ptr = stt->env[0]; 214 } 215 216 for (i = 0; i < kNumSubframes; i++) { 217 /* iterate over samples */ 218 max_nrg = 0; 219 for (n = 0; n < L; n++) { 220 nrg = in_mic[0][i * L + n] * in_mic[0][i * L + n]; 221 if (nrg > max_nrg) { 222 max_nrg = nrg; 223 } 224 } 225 ptr[i] = max_nrg; 226 } 227 228 /* compute energy */ 229 if (stt->inQueue > 0) { 230 ptr = stt->Rxx16w32_array[1]; 231 } else { 232 ptr = stt->Rxx16w32_array[0]; 233 } 234 235 for (i = 0; i < kNumSubframes / 2; i++) { 236 if (stt->fs == 16000) { 237 WebRtcSpl_DownsampleBy2(&in_mic[0][i * 32], 32, tmp_speech, 238 stt->filterState); 239 } else { 240 memcpy(tmp_speech, &in_mic[0][i * 16], 16 * sizeof(int16_t)); 241 } 242 /* Compute energy in blocks of 16 samples */ 243 ptr[i] = WebRtcSpl_DotProductWithScale(tmp_speech, tmp_speech, 16, 4); 244 } 245 246 /* update queue information */ 247 if (stt->inQueue == 0) { 248 stt->inQueue = 1; 249 } else { 250 stt->inQueue = 2; 251 } 252 253 /* call VAD (use low band only) */ 254 WebRtcAgc_ProcessVad(&stt->vadMic, in_mic[0], samples); 255 256 return 0; 257 } 258 259 int WebRtcAgc_AddFarend(void* state, const int16_t* in_far, size_t samples) { 260 LegacyAgc* stt = reinterpret_cast<LegacyAgc*>(state); 261 262 int err = WebRtcAgc_GetAddFarendError(state, samples); 263 264 if (err != 0) 265 return err; 266 267 return WebRtcAgc_AddFarendToDigital(&stt->digitalAgc, in_far, samples); 268 } 269 270 int WebRtcAgc_GetAddFarendError(void* state, size_t samples) { 271 LegacyAgc* stt; 272 stt = reinterpret_cast<LegacyAgc*>(state); 273 274 if (stt == nullptr) 275 return -1; 276 277 if (stt->fs == 8000) { 278 if (samples != 80) 279 return -1; 280 } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) { 281 if (samples != 160) 282 return -1; 283 } else { 284 return -1; 285 } 286 287 return 0; 288 } 289 290 int WebRtcAgc_VirtualMic(void* agcInst, 291 int16_t* const* in_near, 292 size_t num_bands, 293 size_t samples, 294 int32_t micLevelIn, 295 int32_t* micLevelOut) { 296 int32_t tmpFlt, micLevelTmp, gainIdx; 297 uint16_t gain; 298 size_t ii, j; 299 LegacyAgc* stt; 300 301 uint32_t nrg; 302 size_t sampleCntr; 303 uint32_t frameNrg = 0; 304 uint32_t frameNrgLimit = 5500; 305 int16_t numZeroCrossing = 0; 306 const int16_t kZeroCrossingLowLim = 15; 307 const int16_t kZeroCrossingHighLim = 20; 308 309 stt = reinterpret_cast<LegacyAgc*>(agcInst); 310 311 /* 312 * Before applying gain decide if this is a low-level signal. 313 * The idea is that digital AGC will not adapt to low-level 314 * signals. 315 */ 316 if (stt->fs != 8000) { 317 frameNrgLimit = frameNrgLimit << 1; 318 } 319 320 frameNrg = (uint32_t)(in_near[0][0] * in_near[0][0]); 321 for (sampleCntr = 1; sampleCntr < samples; sampleCntr++) { 322 // increment frame energy if it is less than the limit 323 // the correct value of the energy is not important 324 if (frameNrg < frameNrgLimit) { 325 nrg = (uint32_t)(in_near[0][sampleCntr] * in_near[0][sampleCntr]); 326 frameNrg += nrg; 327 } 328 329 // Count the zero crossings 330 numZeroCrossing += 331 ((in_near[0][sampleCntr] ^ in_near[0][sampleCntr - 1]) < 0); 332 } 333 334 if ((frameNrg < 500) || (numZeroCrossing <= 5)) { 335 stt->lowLevelSignal = 1; 336 } else if (numZeroCrossing <= kZeroCrossingLowLim) { 337 stt->lowLevelSignal = 0; 338 } else if (frameNrg <= frameNrgLimit) { 339 stt->lowLevelSignal = 1; 340 } else if (numZeroCrossing >= kZeroCrossingHighLim) { 341 stt->lowLevelSignal = 1; 342 } else { 343 stt->lowLevelSignal = 0; 344 } 345 346 micLevelTmp = micLevelIn << stt->scale; 347 /* Set desired level */ 348 gainIdx = stt->micVol; 349 if (stt->micVol > stt->maxAnalog) { 350 gainIdx = stt->maxAnalog; 351 } 352 if (micLevelTmp != stt->micRef) { 353 /* Something has happened with the physical level, restart. */ 354 stt->micRef = micLevelTmp; 355 stt->micVol = 127; 356 *micLevelOut = 127; 357 stt->micGainIdx = 127; 358 gainIdx = 127; 359 } 360 /* Pre-process the signal to emulate the microphone level. */ 361 /* Take one step at a time in the gain table. */ 362 if (gainIdx > 127) { 363 gain = kGainTableVirtualMic[gainIdx - 128]; 364 } else { 365 gain = kSuppressionTableVirtualMic[127 - gainIdx]; 366 } 367 for (ii = 0; ii < samples; ii++) { 368 tmpFlt = (in_near[0][ii] * gain) >> 10; 369 if (tmpFlt > 32767) { 370 tmpFlt = 32767; 371 gainIdx--; 372 if (gainIdx >= 127) { 373 gain = kGainTableVirtualMic[gainIdx - 127]; 374 } else { 375 gain = kSuppressionTableVirtualMic[127 - gainIdx]; 376 } 377 } 378 if (tmpFlt < -32768) { 379 tmpFlt = -32768; 380 gainIdx--; 381 if (gainIdx >= 127) { 382 gain = kGainTableVirtualMic[gainIdx - 127]; 383 } else { 384 gain = kSuppressionTableVirtualMic[127 - gainIdx]; 385 } 386 } 387 in_near[0][ii] = (int16_t)tmpFlt; 388 for (j = 1; j < num_bands; ++j) { 389 tmpFlt = (in_near[j][ii] * gain) >> 10; 390 if (tmpFlt > 32767) { 391 tmpFlt = 32767; 392 } 393 if (tmpFlt < -32768) { 394 tmpFlt = -32768; 395 } 396 in_near[j][ii] = (int16_t)tmpFlt; 397 } 398 } 399 /* Set the level we (finally) used */ 400 stt->micGainIdx = gainIdx; 401 // *micLevelOut = stt->micGainIdx; 402 *micLevelOut = stt->micGainIdx >> stt->scale; 403 /* Add to Mic as if it was the output from a true microphone */ 404 if (WebRtcAgc_AddMic(agcInst, in_near, num_bands, samples) != 0) { 405 return -1; 406 } 407 return 0; 408 } 409 410 void WebRtcAgc_UpdateAgcThresholds(LegacyAgc* stt) { 411 int16_t tmp16; 412 413 /* Set analog target level in envelope dBOv scale */ 414 tmp16 = (DIFF_REF_TO_ANALOG * stt->compressionGaindB) + ANALOG_TARGET_LEVEL_2; 415 tmp16 = WebRtcSpl_DivW32W16ResW16((int32_t)tmp16, ANALOG_TARGET_LEVEL); 416 stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN + tmp16; 417 if (stt->analogTarget < DIGITAL_REF_AT_0_COMP_GAIN) { 418 stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN; 419 } 420 if (stt->agcMode == kAgcModeFixedDigital) { 421 /* Adjust for different parameter interpretation in FixedDigital mode */ 422 stt->analogTarget = stt->compressionGaindB; 423 } 424 /* Since the offset between RMS and ENV is not constant, we should make this 425 * into a 426 * table, but for now, we'll stick with a constant, tuned for the chosen 427 * analog 428 * target level. 429 */ 430 stt->targetIdx = ANALOG_TARGET_LEVEL + OFFSET_ENV_TO_RMS; 431 /* Analog adaptation limits */ 432 /* analogTargetLevel = round((32767*10^(-targetIdx/20))^2*16/2^7) */ 433 stt->analogTargetLevel = 434 kRxxBufferLen * kTargetLevelTable[stt->targetIdx]; /* ex. -20 dBov */ 435 stt->startUpperLimit = 436 kRxxBufferLen * kTargetLevelTable[stt->targetIdx - 1]; /* -19 dBov */ 437 stt->startLowerLimit = 438 kRxxBufferLen * kTargetLevelTable[stt->targetIdx + 1]; /* -21 dBov */ 439 stt->upperPrimaryLimit = 440 kRxxBufferLen * kTargetLevelTable[stt->targetIdx - 2]; /* -18 dBov */ 441 stt->lowerPrimaryLimit = 442 kRxxBufferLen * kTargetLevelTable[stt->targetIdx + 2]; /* -22 dBov */ 443 stt->upperSecondaryLimit = 444 kRxxBufferLen * kTargetLevelTable[stt->targetIdx - 5]; /* -15 dBov */ 445 stt->lowerSecondaryLimit = 446 kRxxBufferLen * kTargetLevelTable[stt->targetIdx + 5]; /* -25 dBov */ 447 stt->upperLimit = stt->startUpperLimit; 448 stt->lowerLimit = stt->startLowerLimit; 449 } 450 451 void WebRtcAgc_SaturationCtrl(LegacyAgc* stt, 452 uint8_t* saturated, 453 int32_t* env) { 454 int16_t i, tmpW16; 455 456 /* Check if the signal is saturated */ 457 for (i = 0; i < 10; i++) { 458 tmpW16 = (int16_t)(env[i] >> 20); 459 if (tmpW16 > 875) { 460 stt->envSum += tmpW16; 461 } 462 } 463 464 if (stt->envSum > 25000) { 465 *saturated = 1; 466 stt->envSum = 0; 467 } 468 469 /* stt->envSum *= 0.99; */ 470 stt->envSum = (int16_t)((stt->envSum * 32440) >> 15); 471 } 472 473 void WebRtcAgc_ZeroCtrl(LegacyAgc* stt, int32_t* inMicLevel, int32_t* env) { 474 int16_t i; 475 int64_t tmp = 0; 476 int32_t midVal; 477 478 /* Is the input signal zero? */ 479 for (i = 0; i < 10; i++) { 480 tmp += env[i]; 481 } 482 483 /* Each block is allowed to have a few non-zero 484 * samples. 485 */ 486 if (tmp < 500) { 487 stt->msZero += 10; 488 } else { 489 stt->msZero = 0; 490 } 491 492 if (stt->muteGuardMs > 0) { 493 stt->muteGuardMs -= 10; 494 } 495 496 if (stt->msZero > 500) { 497 stt->msZero = 0; 498 499 /* Increase microphone level only if it's less than 50% */ 500 midVal = (stt->maxAnalog + stt->minLevel + 1) / 2; 501 if (*inMicLevel < midVal) { 502 /* *inMicLevel *= 1.1; */ 503 *inMicLevel = (1126 * *inMicLevel) >> 10; 504 /* Reduces risk of a muted mic repeatedly triggering excessive levels due 505 * to zero signal detection. */ 506 *inMicLevel = WEBRTC_SPL_MIN(*inMicLevel, stt->zeroCtrlMax); 507 stt->micVol = *inMicLevel; 508 } 509 510 stt->activeSpeech = 0; 511 stt->Rxx16_LPw32Max = 0; 512 513 /* The AGC has a tendency (due to problems with the VAD parameters), to 514 * vastly increase the volume after a muting event. This timer prevents 515 * upwards adaptation for a short period. */ 516 stt->muteGuardMs = kMuteGuardTimeMs; 517 } 518 } 519 520 void WebRtcAgc_SpeakerInactiveCtrl(LegacyAgc* stt) { 521 /* Check if the near end speaker is inactive. 522 * If that is the case the VAD threshold is 523 * increased since the VAD speech model gets 524 * more sensitive to any sound after a long 525 * silence. 526 */ 527 528 int32_t tmp32; 529 int16_t vadThresh; 530 531 if (stt->vadMic.stdLongTerm < 2500) { 532 stt->vadThreshold = 1500; 533 } else { 534 vadThresh = kNormalVadThreshold; 535 if (stt->vadMic.stdLongTerm < 4500) { 536 /* Scale between min and max threshold */ 537 vadThresh += (4500 - stt->vadMic.stdLongTerm) / 2; 538 } 539 540 /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */ 541 tmp32 = vadThresh + 31 * stt->vadThreshold; 542 stt->vadThreshold = (int16_t)(tmp32 >> 5); 543 } 544 } 545 546 void WebRtcAgc_ExpCurve(int16_t volume, int16_t* index) { 547 // volume in Q14 548 // index in [0-7] 549 /* 8 different curves */ 550 if (volume > 5243) { 551 if (volume > 7864) { 552 if (volume > 12124) { 553 *index = 7; 554 } else { 555 *index = 6; 556 } 557 } else { 558 if (volume > 6554) { 559 *index = 5; 560 } else { 561 *index = 4; 562 } 563 } 564 } else { 565 if (volume > 2621) { 566 if (volume > 3932) { 567 *index = 3; 568 } else { 569 *index = 2; 570 } 571 } else { 572 if (volume > 1311) { 573 *index = 1; 574 } else { 575 *index = 0; 576 } 577 } 578 } 579 } 580 581 int32_t WebRtcAgc_ProcessAnalog(void* state, 582 int32_t inMicLevel, 583 int32_t* outMicLevel, 584 int16_t vadLogRatio, 585 int16_t echo, 586 uint8_t* saturationWarning) { 587 uint32_t tmpU32; 588 int32_t Rxx16w32, tmp32; 589 int32_t inMicLevelTmp, lastMicVol; 590 int16_t i; 591 uint8_t saturated = 0; 592 LegacyAgc* stt; 593 594 stt = reinterpret_cast<LegacyAgc*>(state); 595 inMicLevelTmp = inMicLevel << stt->scale; 596 597 if (inMicLevelTmp > stt->maxAnalog) { 598 return -1; 599 } else if (inMicLevelTmp < stt->minLevel) { 600 return -1; 601 } 602 603 if (stt->firstCall == 0) { 604 int32_t tmpVol; 605 stt->firstCall = 1; 606 tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9; 607 tmpVol = (stt->minLevel + tmp32); 608 609 /* If the mic level is very low at start, increase it! */ 610 if ((inMicLevelTmp < tmpVol) && (stt->agcMode == kAgcModeAdaptiveAnalog)) { 611 inMicLevelTmp = tmpVol; 612 } 613 stt->micVol = inMicLevelTmp; 614 } 615 616 /* Set the mic level to the previous output value if there is digital input 617 * gain */ 618 if ((inMicLevelTmp == stt->maxAnalog) && (stt->micVol > stt->maxAnalog)) { 619 inMicLevelTmp = stt->micVol; 620 } 621 622 /* If the mic level was manually changed to a very low value raise it! */ 623 if ((inMicLevelTmp != stt->micVol) && (inMicLevelTmp < stt->minOutput)) { 624 tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9; 625 inMicLevelTmp = (stt->minLevel + tmp32); 626 stt->micVol = inMicLevelTmp; 627 } 628 629 if (inMicLevelTmp != stt->micVol) { 630 if (inMicLevel == stt->lastInMicLevel) { 631 // We requested a volume adjustment, but it didn't occur. This is 632 // probably due to a coarse quantization of the volume slider. 633 // Restore the requested value to prevent getting stuck. 634 inMicLevelTmp = stt->micVol; 635 } else { 636 // As long as the value changed, update to match. 637 stt->micVol = inMicLevelTmp; 638 } 639 } 640 641 if (inMicLevelTmp > stt->maxLevel) { 642 // Always allow the user to raise the volume above the maxLevel. 643 stt->maxLevel = inMicLevelTmp; 644 } 645 646 // Store last value here, after we've taken care of manual updates etc. 647 stt->lastInMicLevel = inMicLevel; 648 lastMicVol = stt->micVol; 649 650 /* Checks if the signal is saturated. Also a check if individual samples 651 * are larger than 12000 is done. If they are the counter for increasing 652 * the volume level is set to -100ms 653 */ 654 WebRtcAgc_SaturationCtrl(stt, &saturated, stt->env[0]); 655 656 /* The AGC is always allowed to lower the level if the signal is saturated */ 657 if (saturated == 1) { 658 /* Lower the recording level 659 * Rxx160_LP is adjusted down because it is so slow it could 660 * cause the AGC to make wrong decisions. */ 661 /* stt->Rxx160_LPw32 *= 0.875; */ 662 stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 8) * 7; 663 664 stt->zeroCtrlMax = stt->micVol; 665 666 /* stt->micVol *= 0.903; */ 667 tmp32 = inMicLevelTmp - stt->minLevel; 668 tmpU32 = WEBRTC_SPL_UMUL(29591, (uint32_t)(tmp32)); 669 stt->micVol = (tmpU32 >> 15) + stt->minLevel; 670 if (stt->micVol > lastMicVol - 2) { 671 stt->micVol = lastMicVol - 2; 672 } 673 inMicLevelTmp = stt->micVol; 674 675 if (stt->micVol < stt->minOutput) { 676 *saturationWarning = 1; 677 } 678 679 /* Reset counter for decrease of volume level to avoid 680 * decreasing too much. The saturation control can still 681 * lower the level if needed. */ 682 stt->msTooHigh = -100; 683 684 /* Enable the control mechanism to ensure that our measure, 685 * Rxx160_LP, is in the correct range. This must be done since 686 * the measure is very slow. */ 687 stt->activeSpeech = 0; 688 stt->Rxx16_LPw32Max = 0; 689 690 /* Reset to initial values */ 691 stt->msecSpeechInnerChange = kMsecSpeechInner; 692 stt->msecSpeechOuterChange = kMsecSpeechOuter; 693 stt->changeToSlowMode = 0; 694 695 stt->muteGuardMs = 0; 696 697 stt->upperLimit = stt->startUpperLimit; 698 stt->lowerLimit = stt->startLowerLimit; 699 } 700 701 /* Check if the input speech is zero. If so the mic volume 702 * is increased. On some computers the input is zero up as high 703 * level as 17% */ 704 WebRtcAgc_ZeroCtrl(stt, &inMicLevelTmp, stt->env[0]); 705 706 /* Check if the near end speaker is inactive. 707 * If that is the case the VAD threshold is 708 * increased since the VAD speech model gets 709 * more sensitive to any sound after a long 710 * silence. 711 */ 712 WebRtcAgc_SpeakerInactiveCtrl(stt); 713 714 for (i = 0; i < 5; i++) { 715 /* Computed on blocks of 16 samples */ 716 717 Rxx16w32 = stt->Rxx16w32_array[0][i]; 718 719 /* Rxx160w32 in Q(-7) */ 720 tmp32 = (Rxx16w32 - stt->Rxx16_vectorw32[stt->Rxx16pos]) >> 3; 721 stt->Rxx160w32 = stt->Rxx160w32 + tmp32; 722 stt->Rxx16_vectorw32[stt->Rxx16pos] = Rxx16w32; 723 724 /* Circular buffer */ 725 stt->Rxx16pos++; 726 if (stt->Rxx16pos == kRxxBufferLen) { 727 stt->Rxx16pos = 0; 728 } 729 730 /* Rxx16_LPw32 in Q(-4) */ 731 tmp32 = (Rxx16w32 - stt->Rxx16_LPw32) >> kAlphaShortTerm; 732 stt->Rxx16_LPw32 = (stt->Rxx16_LPw32) + tmp32; 733 734 if (vadLogRatio > stt->vadThreshold) { 735 /* Speech detected! */ 736 737 /* Check if Rxx160_LP is in the correct range. If 738 * it is too high/low then we set it to the maximum of 739 * Rxx16_LPw32 during the first 200ms of speech. 740 */ 741 if (stt->activeSpeech < 250) { 742 stt->activeSpeech += 2; 743 744 if (stt->Rxx16_LPw32 > stt->Rxx16_LPw32Max) { 745 stt->Rxx16_LPw32Max = stt->Rxx16_LPw32; 746 } 747 } else if (stt->activeSpeech == 250) { 748 stt->activeSpeech += 2; 749 tmp32 = stt->Rxx16_LPw32Max >> 3; 750 stt->Rxx160_LPw32 = tmp32 * kRxxBufferLen; 751 } 752 753 tmp32 = (stt->Rxx160w32 - stt->Rxx160_LPw32) >> kAlphaLongTerm; 754 stt->Rxx160_LPw32 = stt->Rxx160_LPw32 + tmp32; 755 756 if (stt->Rxx160_LPw32 > stt->upperSecondaryLimit) { 757 stt->msTooHigh += 2; 758 stt->msTooLow = 0; 759 stt->changeToSlowMode = 0; 760 761 if (stt->msTooHigh > stt->msecSpeechOuterChange) { 762 stt->msTooHigh = 0; 763 764 /* Lower the recording level */ 765 /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */ 766 tmp32 = stt->Rxx160_LPw32 >> 6; 767 stt->Rxx160_LPw32 = tmp32 * 53; 768 769 /* Reduce the max gain to avoid excessive oscillation 770 * (but never drop below the maximum analog level). 771 */ 772 stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16; 773 stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog); 774 775 stt->zeroCtrlMax = stt->micVol; 776 777 /* 0.95 in Q15 */ 778 tmp32 = inMicLevelTmp - stt->minLevel; 779 tmpU32 = WEBRTC_SPL_UMUL(31130, (uint32_t)(tmp32)); 780 stt->micVol = (tmpU32 >> 15) + stt->minLevel; 781 if (stt->micVol > lastMicVol - 1) { 782 stt->micVol = lastMicVol - 1; 783 } 784 inMicLevelTmp = stt->micVol; 785 786 /* Enable the control mechanism to ensure that our measure, 787 * Rxx160_LP, is in the correct range. 788 */ 789 stt->activeSpeech = 0; 790 stt->Rxx16_LPw32Max = 0; 791 } 792 } else if (stt->Rxx160_LPw32 > stt->upperLimit) { 793 stt->msTooHigh += 2; 794 stt->msTooLow = 0; 795 stt->changeToSlowMode = 0; 796 797 if (stt->msTooHigh > stt->msecSpeechInnerChange) { 798 /* Lower the recording level */ 799 stt->msTooHigh = 0; 800 /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */ 801 stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 53; 802 803 /* Reduce the max gain to avoid excessive oscillation 804 * (but never drop below the maximum analog level). 805 */ 806 stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16; 807 stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog); 808 809 stt->zeroCtrlMax = stt->micVol; 810 811 /* 0.965 in Q15 */ 812 tmp32 = inMicLevelTmp - stt->minLevel; 813 tmpU32 = 814 WEBRTC_SPL_UMUL(31621, (uint32_t)(inMicLevelTmp - stt->minLevel)); 815 stt->micVol = (tmpU32 >> 15) + stt->minLevel; 816 if (stt->micVol > lastMicVol - 1) { 817 stt->micVol = lastMicVol - 1; 818 } 819 inMicLevelTmp = stt->micVol; 820 } 821 } else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit) { 822 stt->msTooHigh = 0; 823 stt->changeToSlowMode = 0; 824 stt->msTooLow += 2; 825 826 if (stt->msTooLow > stt->msecSpeechOuterChange) { 827 /* Raise the recording level */ 828 int16_t index, weightFIX; 829 int16_t volNormFIX = 16384; // =1 in Q14. 830 831 stt->msTooLow = 0; 832 833 /* Normalize the volume level */ 834 tmp32 = (inMicLevelTmp - stt->minLevel) << 14; 835 if (stt->maxInit != stt->minLevel) { 836 volNormFIX = tmp32 / (stt->maxInit - stt->minLevel); 837 } 838 839 /* Find correct curve */ 840 WebRtcAgc_ExpCurve(volNormFIX, &index); 841 842 /* Compute weighting factor for the volume increase, 32^(-2*X)/2+1.05 843 */ 844 weightFIX = 845 kOffset1[index] - (int16_t)((kSlope1[index] * volNormFIX) >> 13); 846 847 /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */ 848 stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67; 849 850 tmp32 = inMicLevelTmp - stt->minLevel; 851 tmpU32 = 852 ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel)); 853 stt->micVol = (tmpU32 >> 14) + stt->minLevel; 854 if (stt->micVol < lastMicVol + 2) { 855 stt->micVol = lastMicVol + 2; 856 } 857 858 inMicLevelTmp = stt->micVol; 859 } 860 } else if (stt->Rxx160_LPw32 < stt->lowerLimit) { 861 stt->msTooHigh = 0; 862 stt->changeToSlowMode = 0; 863 stt->msTooLow += 2; 864 865 if (stt->msTooLow > stt->msecSpeechInnerChange) { 866 /* Raise the recording level */ 867 int16_t index, weightFIX; 868 int16_t volNormFIX = 16384; // =1 in Q14. 869 870 stt->msTooLow = 0; 871 872 /* Normalize the volume level */ 873 tmp32 = (inMicLevelTmp - stt->minLevel) << 14; 874 if (stt->maxInit != stt->minLevel) { 875 volNormFIX = tmp32 / (stt->maxInit - stt->minLevel); 876 } 877 878 /* Find correct curve */ 879 WebRtcAgc_ExpCurve(volNormFIX, &index); 880 881 /* Compute weighting factor for the volume increase, (3.^(-2.*X))/8+1 882 */ 883 weightFIX = 884 kOffset2[index] - (int16_t)((kSlope2[index] * volNormFIX) >> 13); 885 886 /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */ 887 stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67; 888 889 tmp32 = inMicLevelTmp - stt->minLevel; 890 tmpU32 = 891 ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel)); 892 stt->micVol = (tmpU32 >> 14) + stt->minLevel; 893 if (stt->micVol < lastMicVol + 1) { 894 stt->micVol = lastMicVol + 1; 895 } 896 897 inMicLevelTmp = stt->micVol; 898 } 899 } else { 900 /* The signal is inside the desired range which is: 901 * lowerLimit < Rxx160_LP/640 < upperLimit 902 */ 903 if (stt->changeToSlowMode > 4000) { 904 stt->msecSpeechInnerChange = 1000; 905 stt->msecSpeechOuterChange = 500; 906 stt->upperLimit = stt->upperPrimaryLimit; 907 stt->lowerLimit = stt->lowerPrimaryLimit; 908 } else { 909 stt->changeToSlowMode += 2; // in milliseconds 910 } 911 stt->msTooLow = 0; 912 stt->msTooHigh = 0; 913 914 stt->micVol = inMicLevelTmp; 915 } 916 } 917 } 918 919 /* Ensure gain is not increased in presence of echo or after a mute event 920 * (but allow the zeroCtrl() increase on the frame of a mute detection). 921 */ 922 if (echo == 1 || 923 (stt->muteGuardMs > 0 && stt->muteGuardMs < kMuteGuardTimeMs)) { 924 if (stt->micVol > lastMicVol) { 925 stt->micVol = lastMicVol; 926 } 927 } 928 929 /* limit the gain */ 930 if (stt->micVol > stt->maxLevel) { 931 stt->micVol = stt->maxLevel; 932 } else if (stt->micVol < stt->minOutput) { 933 stt->micVol = stt->minOutput; 934 } 935 936 *outMicLevel = WEBRTC_SPL_MIN(stt->micVol, stt->maxAnalog) >> stt->scale; 937 938 return 0; 939 } 940 941 int WebRtcAgc_Analyze(void* agcInst, 942 const int16_t* const* in_near, 943 size_t num_bands, 944 size_t samples, 945 int32_t inMicLevel, 946 int32_t* outMicLevel, 947 int16_t echo, 948 uint8_t* saturationWarning, 949 int32_t gains[11]) { 950 LegacyAgc* stt = reinterpret_cast<LegacyAgc*>(agcInst); 951 952 if (stt == nullptr) { 953 return -1; 954 } 955 956 if (stt->fs == 8000) { 957 if (samples != 80) { 958 return -1; 959 } 960 } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) { 961 if (samples != 160) { 962 return -1; 963 } 964 } else { 965 return -1; 966 } 967 968 *saturationWarning = 0; 969 // TODO(minyue): PUT IN RANGE CHECKING FOR INPUT LEVELS 970 *outMicLevel = inMicLevel; 971 972 int32_t error = 973 WebRtcAgc_ComputeDigitalGains(&stt->digitalAgc, in_near, num_bands, 974 stt->fs, stt->lowLevelSignal, gains); 975 if (error == -1) { 976 return -1; 977 } 978 979 if (stt->agcMode < kAgcModeFixedDigital && 980 (stt->lowLevelSignal == 0 || stt->agcMode != kAgcModeAdaptiveDigital)) { 981 if (WebRtcAgc_ProcessAnalog(agcInst, inMicLevel, outMicLevel, 982 stt->vadMic.logRatio, echo, 983 saturationWarning) == -1) { 984 return -1; 985 } 986 } 987 988 /* update queue */ 989 if (stt->inQueue > 1) { 990 memcpy(stt->env[0], stt->env[1], 10 * sizeof(int32_t)); 991 memcpy(stt->Rxx16w32_array[0], stt->Rxx16w32_array[1], 5 * sizeof(int32_t)); 992 } 993 994 if (stt->inQueue > 0) { 995 stt->inQueue--; 996 } 997 998 return 0; 999 } 1000 1001 int WebRtcAgc_Process(const void* agcInst, 1002 const int32_t gains[11], 1003 const int16_t* const* in_near, 1004 size_t num_bands, 1005 int16_t* const* out) { 1006 const LegacyAgc* stt = (const LegacyAgc*)agcInst; 1007 return WebRtcAgc_ApplyDigitalGains(gains, num_bands, stt->fs, in_near, out); 1008 } 1009 1010 int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) { 1011 LegacyAgc* stt; 1012 stt = reinterpret_cast<LegacyAgc*>(agcInst); 1013 1014 if (stt == nullptr) { 1015 return -1; 1016 } 1017 1018 if (stt->initFlag != kInitCheck) { 1019 stt->lastError = AGC_UNINITIALIZED_ERROR; 1020 return -1; 1021 } 1022 1023 if (agcConfig.limiterEnable != kAgcFalse && 1024 agcConfig.limiterEnable != kAgcTrue) { 1025 stt->lastError = AGC_BAD_PARAMETER_ERROR; 1026 return -1; 1027 } 1028 stt->limiterEnable = agcConfig.limiterEnable; 1029 stt->compressionGaindB = agcConfig.compressionGaindB; 1030 if ((agcConfig.targetLevelDbfs < 0) || (agcConfig.targetLevelDbfs > 31)) { 1031 stt->lastError = AGC_BAD_PARAMETER_ERROR; 1032 return -1; 1033 } 1034 stt->targetLevelDbfs = agcConfig.targetLevelDbfs; 1035 1036 if (stt->agcMode == kAgcModeFixedDigital) { 1037 /* Adjust for different parameter interpretation in FixedDigital mode */ 1038 stt->compressionGaindB += agcConfig.targetLevelDbfs; 1039 } 1040 1041 /* Update threshold levels for analog adaptation */ 1042 WebRtcAgc_UpdateAgcThresholds(stt); 1043 1044 /* Recalculate gain table */ 1045 if (WebRtcAgc_CalculateGainTable( 1046 &(stt->digitalAgc.gainTable[0]), stt->compressionGaindB, 1047 stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1) { 1048 return -1; 1049 } 1050 /* Store the config in a WebRtcAgcConfig */ 1051 stt->usedConfig.compressionGaindB = agcConfig.compressionGaindB; 1052 stt->usedConfig.limiterEnable = agcConfig.limiterEnable; 1053 stt->usedConfig.targetLevelDbfs = agcConfig.targetLevelDbfs; 1054 1055 return 0; 1056 } 1057 1058 int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config) { 1059 LegacyAgc* stt; 1060 stt = reinterpret_cast<LegacyAgc*>(agcInst); 1061 1062 if (stt == nullptr) { 1063 return -1; 1064 } 1065 1066 if (config == nullptr) { 1067 stt->lastError = AGC_NULL_POINTER_ERROR; 1068 return -1; 1069 } 1070 1071 if (stt->initFlag != kInitCheck) { 1072 stt->lastError = AGC_UNINITIALIZED_ERROR; 1073 return -1; 1074 } 1075 1076 config->limiterEnable = stt->usedConfig.limiterEnable; 1077 config->targetLevelDbfs = stt->usedConfig.targetLevelDbfs; 1078 config->compressionGaindB = stt->usedConfig.compressionGaindB; 1079 1080 return 0; 1081 } 1082 1083 void* WebRtcAgc_Create() { 1084 LegacyAgc* stt = static_cast<LegacyAgc*>(malloc(sizeof(LegacyAgc))); 1085 1086 stt->initFlag = 0; 1087 stt->lastError = 0; 1088 1089 return stt; 1090 } 1091 1092 void WebRtcAgc_Free(void* state) { 1093 LegacyAgc* stt; 1094 1095 stt = reinterpret_cast<LegacyAgc*>(state); 1096 free(stt); 1097 } 1098 1099 /* minLevel - Minimum volume level 1100 * maxLevel - Maximum volume level 1101 */ 1102 int WebRtcAgc_Init(void* agcInst, 1103 int32_t minLevel, 1104 int32_t maxLevel, 1105 int16_t agcMode, 1106 uint32_t fs) { 1107 int32_t max_add, tmp32; 1108 int16_t i; 1109 int tmpNorm; 1110 LegacyAgc* stt; 1111 1112 /* typecast state pointer */ 1113 stt = reinterpret_cast<LegacyAgc*>(agcInst); 1114 1115 if (WebRtcAgc_InitDigital(&stt->digitalAgc, agcMode) != 0) { 1116 stt->lastError = AGC_UNINITIALIZED_ERROR; 1117 return -1; 1118 } 1119 1120 /* Analog AGC variables */ 1121 stt->envSum = 0; 1122 1123 /* mode = 0 - Only saturation protection 1124 * 1 - Analog Automatic Gain Control [-targetLevelDbfs (default -3 1125 * dBOv)] 1126 * 2 - Digital Automatic Gain Control [-targetLevelDbfs (default -3 1127 * dBOv)] 1128 * 3 - Fixed Digital Gain [compressionGaindB (default 8 dB)] 1129 */ 1130 if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital) { 1131 return -1; 1132 } 1133 stt->agcMode = agcMode; 1134 stt->fs = fs; 1135 1136 /* initialize input VAD */ 1137 WebRtcAgc_InitVad(&stt->vadMic); 1138 1139 /* If the volume range is smaller than 0-256 then 1140 * the levels are shifted up to Q8-domain */ 1141 tmpNorm = WebRtcSpl_NormU32((uint32_t)maxLevel); 1142 stt->scale = tmpNorm - 23; 1143 if (stt->scale < 0) { 1144 stt->scale = 0; 1145 } 1146 // TODO(bjornv): Investigate if we really need to scale up a small range now 1147 // when we have 1148 // a guard against zero-increments. For now, we do not support scale up (scale 1149 // = 0). 1150 stt->scale = 0; 1151 maxLevel <<= stt->scale; 1152 minLevel <<= stt->scale; 1153 1154 /* Make minLevel and maxLevel static in AdaptiveDigital */ 1155 if (stt->agcMode == kAgcModeAdaptiveDigital) { 1156 minLevel = 0; 1157 maxLevel = 255; 1158 stt->scale = 0; 1159 } 1160 /* The maximum supplemental volume range is based on a vague idea 1161 * of how much lower the gain will be than the real analog gain. */ 1162 max_add = (maxLevel - minLevel) / 4; 1163 1164 /* Minimum/maximum volume level that can be set */ 1165 stt->minLevel = minLevel; 1166 stt->maxAnalog = maxLevel; 1167 stt->maxLevel = maxLevel + max_add; 1168 stt->maxInit = stt->maxLevel; 1169 1170 stt->zeroCtrlMax = stt->maxAnalog; 1171 stt->lastInMicLevel = 0; 1172 1173 /* Initialize micVol parameter */ 1174 stt->micVol = stt->maxAnalog; 1175 if (stt->agcMode == kAgcModeAdaptiveDigital) { 1176 stt->micVol = 127; /* Mid-point of mic level */ 1177 } 1178 stt->micRef = stt->micVol; 1179 stt->micGainIdx = 127; 1180 1181 /* Minimum output volume is 4% higher than the available lowest volume level 1182 */ 1183 tmp32 = ((stt->maxLevel - stt->minLevel) * 10) >> 8; 1184 stt->minOutput = (stt->minLevel + tmp32); 1185 1186 stt->msTooLow = 0; 1187 stt->msTooHigh = 0; 1188 stt->changeToSlowMode = 0; 1189 stt->firstCall = 0; 1190 stt->msZero = 0; 1191 stt->muteGuardMs = 0; 1192 stt->gainTableIdx = 0; 1193 1194 stt->msecSpeechInnerChange = kMsecSpeechInner; 1195 stt->msecSpeechOuterChange = kMsecSpeechOuter; 1196 1197 stt->activeSpeech = 0; 1198 stt->Rxx16_LPw32Max = 0; 1199 1200 stt->vadThreshold = kNormalVadThreshold; 1201 stt->inActive = 0; 1202 1203 for (i = 0; i < kRxxBufferLen; i++) { 1204 stt->Rxx16_vectorw32[i] = (int32_t)1000; /* -54dBm0 */ 1205 } 1206 stt->Rxx160w32 = 125 * kRxxBufferLen; /* (stt->Rxx16_vectorw32[0]>>3) = 125 */ 1207 1208 stt->Rxx16pos = 0; 1209 stt->Rxx16_LPw32 = (int32_t)16284; /* Q(-4) */ 1210 1211 for (i = 0; i < 5; i++) { 1212 stt->Rxx16w32_array[0][i] = 0; 1213 } 1214 for (i = 0; i < 10; i++) { 1215 stt->env[0][i] = 0; 1216 stt->env[1][i] = 0; 1217 } 1218 stt->inQueue = 0; 1219 1220 WebRtcSpl_MemSetW32(stt->filterState, 0, 8); 1221 1222 stt->initFlag = kInitCheck; 1223 // Default config settings. 1224 stt->defaultConfig.limiterEnable = kAgcTrue; 1225 stt->defaultConfig.targetLevelDbfs = AGC_DEFAULT_TARGET_LEVEL; 1226 stt->defaultConfig.compressionGaindB = AGC_DEFAULT_COMP_GAIN; 1227 1228 if (WebRtcAgc_set_config(stt, stt->defaultConfig) == -1) { 1229 stt->lastError = AGC_UNSPECIFIED_ERROR; 1230 return -1; 1231 } 1232 stt->Rxx160_LPw32 = stt->analogTargetLevel; // Initialize rms value 1233 1234 stt->lowLevelSignal = 0; 1235 1236 /* Only positive values are allowed that are not too large */ 1237 if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000)) { 1238 return -1; 1239 } else { 1240 return 0; 1241 } 1242 } 1243 1244 } // namespace webrtc