OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 /* | |
12 * The core AEC algorithm, which is presented with time-aligned signals. | |
13 */ | |
14 | |
15 #include "webrtc/modules/audio_processing/aec/aec_core.h" | |
16 | |
17 #ifdef WEBRTC_AEC_DEBUG_DUMP | |
18 #include <stdio.h> | |
19 #endif | |
20 | |
21 #include <assert.h> | |
22 #include <math.h> | |
23 #include <stddef.h> // size_t | |
24 #include <stdlib.h> | |
25 #include <string.h> | |
26 | |
27 #include "webrtc/common_audio/ring_buffer.h" | |
28 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | |
29 #include "webrtc/modules/audio_processing/aec/aec_common.h" | |
30 #include "webrtc/modules/audio_processing/aec/aec_core_internal.h" | |
31 #include "webrtc/modules/audio_processing/aec/aec_rdft.h" | |
32 #include "webrtc/modules/audio_processing/logging/aec_logging.h" | |
33 #include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h" | |
34 #include "webrtc/system_wrappers/include/cpu_features_wrapper.h" | |
35 #include "webrtc/typedefs.h" | |
36 | |
37 // Buffer size (samples) | |
38 static const size_t kBufSizePartitions = 250; // 1 second of audio in 16 kHz. | |
39 | |
40 // Metrics | |
41 static const int subCountLen = 4; | |
42 static const int countLen = 50; | |
43 static const int kDelayMetricsAggregationWindow = 1250; // 5 seconds at 16 kHz. | |
44 | |
45 // Quantities to control H band scaling for SWB input | |
46 static const float cnScaleHband = | |
47 (float)0.4; // scale for comfort noise in H band | |
48 // Initial bin for averaging nlp gain in low band | |
49 static const int freqAvgIc = PART_LEN / 2; | |
50 | |
51 // Matlab code to produce table: | |
52 // win = sqrt(hanning(63)); win = [0 ; win(1:32)]; | |
53 // fprintf(1, '\t%.14f, %.14f, %.14f,\n', win); | |
54 ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65] = { | |
55 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f, | |
56 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f, | |
57 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f, | |
58 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f, | |
59 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f, | |
60 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f, | |
61 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f, | |
62 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f, | |
63 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f, | |
64 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f, | |
65 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f, | |
66 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f, | |
67 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f, | |
68 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f, | |
69 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f, | |
70 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f, | |
71 1.00000000000000f}; | |
72 | |
73 // Matlab code to produce table: | |
74 // weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1]; | |
75 // fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve); | |
76 ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65] = { | |
77 0.0000f, 0.1000f, 0.1378f, 0.1535f, 0.1655f, 0.1756f, 0.1845f, 0.1926f, | |
78 0.2000f, 0.2069f, 0.2134f, 0.2195f, 0.2254f, 0.2309f, 0.2363f, 0.2414f, | |
79 0.2464f, 0.2512f, 0.2558f, 0.2604f, 0.2648f, 0.2690f, 0.2732f, 0.2773f, | |
80 0.2813f, 0.2852f, 0.2890f, 0.2927f, 0.2964f, 0.3000f, 0.3035f, 0.3070f, | |
81 0.3104f, 0.3138f, 0.3171f, 0.3204f, 0.3236f, 0.3268f, 0.3299f, 0.3330f, | |
82 0.3360f, 0.3390f, 0.3420f, 0.3449f, 0.3478f, 0.3507f, 0.3535f, 0.3563f, | |
83 0.3591f, 0.3619f, 0.3646f, 0.3673f, 0.3699f, 0.3726f, 0.3752f, 0.3777f, | |
84 0.3803f, 0.3828f, 0.3854f, 0.3878f, 0.3903f, 0.3928f, 0.3952f, 0.3976f, | |
85 0.4000f}; | |
86 | |
87 // Matlab code to produce table: | |
88 // overDriveCurve = [sqrt(linspace(0,1,65))' + 1]; | |
89 // fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve); | |
90 ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65] = { | |
91 1.0000f, 1.1250f, 1.1768f, 1.2165f, 1.2500f, 1.2795f, 1.3062f, 1.3307f, | |
92 1.3536f, 1.3750f, 1.3953f, 1.4146f, 1.4330f, 1.4507f, 1.4677f, 1.4841f, | |
93 1.5000f, 1.5154f, 1.5303f, 1.5449f, 1.5590f, 1.5728f, 1.5863f, 1.5995f, | |
94 1.6124f, 1.6250f, 1.6374f, 1.6495f, 1.6614f, 1.6731f, 1.6847f, 1.6960f, | |
95 1.7071f, 1.7181f, 1.7289f, 1.7395f, 1.7500f, 1.7603f, 1.7706f, 1.7806f, | |
96 1.7906f, 1.8004f, 1.8101f, 1.8197f, 1.8292f, 1.8385f, 1.8478f, 1.8570f, | |
97 1.8660f, 1.8750f, 1.8839f, 1.8927f, 1.9014f, 1.9100f, 1.9186f, 1.9270f, | |
98 1.9354f, 1.9437f, 1.9520f, 1.9601f, 1.9682f, 1.9763f, 1.9843f, 1.9922f, | |
99 2.0000f}; | |
100 | |
101 // Delay Agnostic AEC parameters, still under development and may change. | |
102 static const float kDelayQualityThresholdMax = 0.07f; | |
103 static const float kDelayQualityThresholdMin = 0.01f; | |
104 static const int kInitialShiftOffset = 5; | |
105 #if !defined(WEBRTC_ANDROID) | |
106 static const int kDelayCorrectionStart = 1500; // 10 ms chunks | |
107 #endif | |
108 | |
109 // Target suppression levels for nlp modes. | |
110 // log{0.001, 0.00001, 0.00000001} | |
111 static const float kTargetSupp[3] = {-6.9f, -11.5f, -18.4f}; | |
112 | |
113 // Two sets of parameters, one for the extended filter mode. | |
114 static const float kExtendedMinOverDrive[3] = {3.0f, 6.0f, 15.0f}; | |
115 static const float kNormalMinOverDrive[3] = {1.0f, 2.0f, 5.0f}; | |
116 const float WebRtcAec_kExtendedSmoothingCoefficients[2][2] = {{0.9f, 0.1f}, | |
117 {0.92f, 0.08f}}; | |
118 const float WebRtcAec_kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f}, | |
119 {0.93f, 0.07f}}; | |
120 | |
121 // Number of partitions forming the NLP's "preferred" bands. | |
122 enum { kPrefBandSize = 24 }; | |
123 | |
124 #ifdef WEBRTC_AEC_DEBUG_DUMP | |
125 extern int webrtc_aec_instance_count; | |
126 #endif | |
127 | |
128 WebRtcAecFilterFar WebRtcAec_FilterFar; | |
129 WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal; | |
130 WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation; | |
131 WebRtcAecOverdriveAndSuppress WebRtcAec_OverdriveAndSuppress; | |
132 WebRtcAecComfortNoise WebRtcAec_ComfortNoise; | |
133 WebRtcAecSubBandCoherence WebRtcAec_SubbandCoherence; | |
134 WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex; | |
135 WebRtcAecPartitionDelay WebRtcAec_PartitionDelay; | |
136 WebRtcAecWindowData WebRtcAec_WindowData; | |
137 | |
138 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { | |
139 return aRe * bRe - aIm * bIm; | |
140 } | |
141 | |
142 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { | |
143 return aRe * bIm + aIm * bRe; | |
144 } | |
145 | |
146 static int CmpFloat(const void* a, const void* b) { | |
147 const float* da = (const float*)a; | |
148 const float* db = (const float*)b; | |
149 | |
150 return (*da > *db) - (*da < *db); | |
151 } | |
152 | |
153 static void FilterFar(int num_partitions, | |
154 int x_fft_buf_block_pos, | |
155 float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], | |
156 float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1], | |
157 float y_fft[2][PART_LEN1]) { | |
158 int i; | |
159 for (i = 0; i < num_partitions; i++) { | |
160 int j; | |
161 int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; | |
162 int pos = i * PART_LEN1; | |
163 // Check for wrap | |
164 if (i + x_fft_buf_block_pos >= num_partitions) { | |
165 xPos -= num_partitions * (PART_LEN1); | |
166 } | |
167 | |
168 for (j = 0; j < PART_LEN1; j++) { | |
169 y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], | |
170 h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); | |
171 y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], | |
172 h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); | |
173 } | |
174 } | |
175 } | |
176 | |
177 static void ScaleErrorSignal(int extended_filter_enabled, | |
178 float normal_mu, | |
179 float normal_error_threshold, | |
180 float x_pow[PART_LEN1], | |
181 float ef[2][PART_LEN1]) { | |
182 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; | |
183 const float error_threshold = extended_filter_enabled | |
184 ? kExtendedErrorThreshold | |
185 : normal_error_threshold; | |
186 int i; | |
187 float abs_ef; | |
188 for (i = 0; i < (PART_LEN1); i++) { | |
189 ef[0][i] /= (x_pow[i] + 1e-10f); | |
190 ef[1][i] /= (x_pow[i] + 1e-10f); | |
191 abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]); | |
192 | |
193 if (abs_ef > error_threshold) { | |
194 abs_ef = error_threshold / (abs_ef + 1e-10f); | |
195 ef[0][i] *= abs_ef; | |
196 ef[1][i] *= abs_ef; | |
197 } | |
198 | |
199 // Stepsize factor | |
200 ef[0][i] *= mu; | |
201 ef[1][i] *= mu; | |
202 } | |
203 } | |
204 | |
205 static void FilterAdaptation( | |
206 int num_partitions, | |
207 int x_fft_buf_block_pos, | |
208 float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], | |
209 float e_fft[2][PART_LEN1], | |
210 float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { | |
211 int i, j; | |
212 float fft[PART_LEN2]; | |
213 for (i = 0; i < num_partitions; i++) { | |
214 int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1); | |
215 int pos; | |
216 // Check for wrap | |
217 if (i + x_fft_buf_block_pos >= num_partitions) { | |
218 xPos -= num_partitions * PART_LEN1; | |
219 } | |
220 | |
221 pos = i * PART_LEN1; | |
222 | |
223 for (j = 0; j < PART_LEN; j++) { | |
224 fft[2 * j] = MulRe(x_fft_buf[0][xPos + j], -x_fft_buf[1][xPos + j], | |
225 e_fft[0][j], e_fft[1][j]); | |
226 fft[2 * j + 1] = MulIm(x_fft_buf[0][xPos + j], -x_fft_buf[1][xPos + j], | |
227 e_fft[0][j], e_fft[1][j]); | |
228 } | |
229 fft[1] = | |
230 MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN], | |
231 e_fft[0][PART_LEN], e_fft[1][PART_LEN]); | |
232 | |
233 aec_rdft_inverse_128(fft); | |
234 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); | |
235 | |
236 // fft scaling | |
237 { | |
238 float scale = 2.0f / PART_LEN2; | |
239 for (j = 0; j < PART_LEN; j++) { | |
240 fft[j] *= scale; | |
241 } | |
242 } | |
243 aec_rdft_forward_128(fft); | |
244 | |
245 h_fft_buf[0][pos] += fft[0]; | |
246 h_fft_buf[0][pos + PART_LEN] += fft[1]; | |
247 | |
248 for (j = 1; j < PART_LEN; j++) { | |
249 h_fft_buf[0][pos + j] += fft[2 * j]; | |
250 h_fft_buf[1][pos + j] += fft[2 * j + 1]; | |
251 } | |
252 } | |
253 } | |
254 | |
255 static void OverdriveAndSuppress(AecCore* aec, | |
256 float hNl[PART_LEN1], | |
257 const float hNlFb, | |
258 float efw[2][PART_LEN1]) { | |
259 int i; | |
260 for (i = 0; i < PART_LEN1; i++) { | |
261 // Weight subbands | |
262 if (hNl[i] > hNlFb) { | |
263 hNl[i] = WebRtcAec_weightCurve[i] * hNlFb + | |
264 (1 - WebRtcAec_weightCurve[i]) * hNl[i]; | |
265 } | |
266 hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]); | |
267 | |
268 // Suppress error signal | |
269 efw[0][i] *= hNl[i]; | |
270 efw[1][i] *= hNl[i]; | |
271 | |
272 // Ooura fft returns incorrect sign on imaginary component. It matters here | |
273 // because we are making an additive change with comfort noise. | |
274 efw[1][i] *= -1; | |
275 } | |
276 } | |
277 | |
278 static int PartitionDelay(const AecCore* aec) { | |
279 // Measures the energy in each filter partition and returns the partition with | |
280 // highest energy. | |
281 // TODO(bjornv): Spread computational cost by computing one partition per | |
282 // block? | |
283 float wfEnMax = 0; | |
284 int i; | |
285 int delay = 0; | |
286 | |
287 for (i = 0; i < aec->num_partitions; i++) { | |
288 int j; | |
289 int pos = i * PART_LEN1; | |
290 float wfEn = 0; | |
291 for (j = 0; j < PART_LEN1; j++) { | |
292 wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] + | |
293 aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j]; | |
294 } | |
295 | |
296 if (wfEn > wfEnMax) { | |
297 wfEnMax = wfEn; | |
298 delay = i; | |
299 } | |
300 } | |
301 return delay; | |
302 } | |
303 | |
304 // Threshold to protect against the ill-effects of a zero far-end. | |
305 const float WebRtcAec_kMinFarendPSD = 15; | |
306 | |
307 // Updates the following smoothed Power Spectral Densities (PSD): | |
308 // - sd : near-end | |
309 // - se : residual echo | |
310 // - sx : far-end | |
311 // - sde : cross-PSD of near-end and residual echo | |
312 // - sxd : cross-PSD of near-end and far-end | |
313 // | |
314 // In addition to updating the PSDs, also the filter diverge state is | |
315 // determined. | |
316 static void SmoothedPSD(AecCore* aec, | |
317 float efw[2][PART_LEN1], | |
318 float dfw[2][PART_LEN1], | |
319 float xfw[2][PART_LEN1], | |
320 int* extreme_filter_divergence) { | |
321 // Power estimate smoothing coefficients. | |
322 const float* ptrGCoh = | |
323 aec->extended_filter_enabled | |
324 ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1] | |
325 : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1]; | |
326 int i; | |
327 float sdSum = 0, seSum = 0; | |
328 | |
329 for (i = 0; i < PART_LEN1; i++) { | |
330 aec->sd[i] = ptrGCoh[0] * aec->sd[i] + | |
331 ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]); | |
332 aec->se[i] = ptrGCoh[0] * aec->se[i] + | |
333 ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]); | |
334 // We threshold here to protect against the ill-effects of a zero farend. | |
335 // The threshold is not arbitrarily chosen, but balances protection and | |
336 // adverse interaction with the algorithm's tuning. | |
337 // TODO(bjornv): investigate further why this is so sensitive. | |
338 aec->sx[i] = ptrGCoh[0] * aec->sx[i] + | |
339 ptrGCoh[1] * WEBRTC_SPL_MAX( | |
340 xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], | |
341 WebRtcAec_kMinFarendPSD); | |
342 | |
343 aec->sde[i][0] = | |
344 ptrGCoh[0] * aec->sde[i][0] + | |
345 ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]); | |
346 aec->sde[i][1] = | |
347 ptrGCoh[0] * aec->sde[i][1] + | |
348 ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]); | |
349 | |
350 aec->sxd[i][0] = | |
351 ptrGCoh[0] * aec->sxd[i][0] + | |
352 ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]); | |
353 aec->sxd[i][1] = | |
354 ptrGCoh[0] * aec->sxd[i][1] + | |
355 ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]); | |
356 | |
357 sdSum += aec->sd[i]; | |
358 seSum += aec->se[i]; | |
359 } | |
360 | |
361 // Divergent filter safeguard update. | |
362 aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum; | |
363 | |
364 // Signal extreme filter divergence if the error is significantly larger | |
365 // than the nearend (13 dB). | |
366 *extreme_filter_divergence = (seSum > (19.95f * sdSum)); | |
367 } | |
368 | |
369 // Window time domain data to be used by the fft. | |
370 __inline static void WindowData(float* x_windowed, const float* x) { | |
371 int i; | |
372 for (i = 0; i < PART_LEN; i++) { | |
373 x_windowed[i] = x[i] * WebRtcAec_sqrtHanning[i]; | |
374 x_windowed[PART_LEN + i] = | |
375 x[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i]; | |
376 } | |
377 } | |
378 | |
379 // Puts fft output data into a complex valued array. | |
380 __inline static void StoreAsComplex(const float* data, | |
381 float data_complex[2][PART_LEN1]) { | |
382 int i; | |
383 data_complex[0][0] = data[0]; | |
384 data_complex[1][0] = 0; | |
385 for (i = 1; i < PART_LEN; i++) { | |
386 data_complex[0][i] = data[2 * i]; | |
387 data_complex[1][i] = data[2 * i + 1]; | |
388 } | |
389 data_complex[0][PART_LEN] = data[1]; | |
390 data_complex[1][PART_LEN] = 0; | |
391 } | |
392 | |
393 static void SubbandCoherence(AecCore* aec, | |
394 float efw[2][PART_LEN1], | |
395 float dfw[2][PART_LEN1], | |
396 float xfw[2][PART_LEN1], | |
397 float* fft, | |
398 float* cohde, | |
399 float* cohxd, | |
400 int* extreme_filter_divergence) { | |
401 int i; | |
402 | |
403 SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence); | |
404 | |
405 // Subband coherence | |
406 for (i = 0; i < PART_LEN1; i++) { | |
407 cohde[i] = | |
408 (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) / | |
409 (aec->sd[i] * aec->se[i] + 1e-10f); | |
410 cohxd[i] = | |
411 (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) / | |
412 (aec->sx[i] * aec->sd[i] + 1e-10f); | |
413 } | |
414 } | |
415 | |
416 static void GetHighbandGain(const float* lambda, float* nlpGainHband) { | |
417 int i; | |
418 | |
419 *nlpGainHband = (float)0.0; | |
420 for (i = freqAvgIc; i < PART_LEN1 - 1; i++) { | |
421 *nlpGainHband += lambda[i]; | |
422 } | |
423 *nlpGainHband /= (float)(PART_LEN1 - 1 - freqAvgIc); | |
424 } | |
425 | |
426 static void ComfortNoise(AecCore* aec, | |
427 float efw[2][PART_LEN1], | |
428 float comfortNoiseHband[2][PART_LEN1], | |
429 const float* noisePow, | |
430 const float* lambda) { | |
431 int i, num; | |
432 float rand[PART_LEN]; | |
433 float noise, noiseAvg, tmp, tmpAvg; | |
434 int16_t randW16[PART_LEN]; | |
435 float u[2][PART_LEN1]; | |
436 | |
437 const float pi2 = 6.28318530717959f; | |
438 | |
439 // Generate a uniform random array on [0 1] | |
440 WebRtcSpl_RandUArray(randW16, PART_LEN, &aec->seed); | |
441 for (i = 0; i < PART_LEN; i++) { | |
442 rand[i] = ((float)randW16[i]) / 32768; | |
443 } | |
444 | |
445 // Reject LF noise | |
446 u[0][0] = 0; | |
447 u[1][0] = 0; | |
448 for (i = 1; i < PART_LEN1; i++) { | |
449 tmp = pi2 * rand[i - 1]; | |
450 | |
451 noise = sqrtf(noisePow[i]); | |
452 u[0][i] = noise * cosf(tmp); | |
453 u[1][i] = -noise * sinf(tmp); | |
454 } | |
455 u[1][PART_LEN] = 0; | |
456 | |
457 for (i = 0; i < PART_LEN1; i++) { | |
458 // This is the proper weighting to match the background noise power | |
459 tmp = sqrtf(WEBRTC_SPL_MAX(1 - lambda[i] * lambda[i], 0)); | |
460 // tmp = 1 - lambda[i]; | |
461 efw[0][i] += tmp * u[0][i]; | |
462 efw[1][i] += tmp * u[1][i]; | |
463 } | |
464 | |
465 // For H band comfort noise | |
466 // TODO: don't compute noise and "tmp" twice. Use the previous results. | |
467 noiseAvg = 0.0; | |
468 tmpAvg = 0.0; | |
469 num = 0; | |
470 if (aec->num_bands > 1) { | |
471 // average noise scale | |
472 // average over second half of freq spectrum (i.e., 4->8khz) | |
473 // TODO: we shouldn't need num. We know how many elements we're summing. | |
474 for (i = PART_LEN1 >> 1; i < PART_LEN1; i++) { | |
475 num++; | |
476 noiseAvg += sqrtf(noisePow[i]); | |
477 } | |
478 noiseAvg /= (float)num; | |
479 | |
480 // average nlp scale | |
481 // average over second half of freq spectrum (i.e., 4->8khz) | |
482 // TODO: we shouldn't need num. We know how many elements we're summing. | |
483 num = 0; | |
484 for (i = PART_LEN1 >> 1; i < PART_LEN1; i++) { | |
485 num++; | |
486 tmpAvg += sqrtf(WEBRTC_SPL_MAX(1 - lambda[i] * lambda[i], 0)); | |
487 } | |
488 tmpAvg /= (float)num; | |
489 | |
490 // Use average noise for H band | |
491 // TODO: we should probably have a new random vector here. | |
492 // Reject LF noise | |
493 u[0][0] = 0; | |
494 u[1][0] = 0; | |
495 for (i = 1; i < PART_LEN1; i++) { | |
496 tmp = pi2 * rand[i - 1]; | |
497 | |
498 // Use average noise for H band | |
499 u[0][i] = noiseAvg * (float)cos(tmp); | |
500 u[1][i] = -noiseAvg * (float)sin(tmp); | |
501 } | |
502 u[1][PART_LEN] = 0; | |
503 | |
504 for (i = 0; i < PART_LEN1; i++) { | |
505 // Use average NLP weight for H band | |
506 comfortNoiseHband[0][i] = tmpAvg * u[0][i]; | |
507 comfortNoiseHband[1][i] = tmpAvg * u[1][i]; | |
508 } | |
509 } else { | |
510 memset(comfortNoiseHband, 0, | |
511 2 * PART_LEN1 * sizeof(comfortNoiseHband[0][0])); | |
512 } | |
513 } | |
514 | |
515 static void InitLevel(PowerLevel* level) { | |
516 const float kBigFloat = 1E17f; | |
517 | |
518 level->averagelevel = 0; | |
519 level->framelevel = 0; | |
520 level->minlevel = kBigFloat; | |
521 level->frsum = 0; | |
522 level->sfrsum = 0; | |
523 level->frcounter = 0; | |
524 level->sfrcounter = 0; | |
525 } | |
526 | |
527 static void InitStats(Stats* stats) { | |
528 stats->instant = kOffsetLevel; | |
529 stats->average = kOffsetLevel; | |
530 stats->max = kOffsetLevel; | |
531 stats->min = kOffsetLevel * (-1); | |
532 stats->sum = 0; | |
533 stats->hisum = 0; | |
534 stats->himean = kOffsetLevel; | |
535 stats->counter = 0; | |
536 stats->hicounter = 0; | |
537 } | |
538 | |
539 static void InitMetrics(AecCore* self) { | |
540 self->stateCounter = 0; | |
541 InitLevel(&self->farlevel); | |
542 InitLevel(&self->nearlevel); | |
543 InitLevel(&self->linoutlevel); | |
544 InitLevel(&self->nlpoutlevel); | |
545 | |
546 InitStats(&self->erl); | |
547 InitStats(&self->erle); | |
548 InitStats(&self->aNlp); | |
549 InitStats(&self->rerl); | |
550 } | |
551 | |
552 static float CalculatePower(const float* in, size_t num_samples) { | |
553 size_t k; | |
554 float energy = 0.0f; | |
555 | |
556 for (k = 0; k < num_samples; ++k) { | |
557 energy += in[k] * in[k]; | |
558 } | |
559 return energy / num_samples; | |
560 } | |
561 | |
562 static void UpdateLevel(PowerLevel* level, float energy) { | |
563 level->sfrsum += energy; | |
564 level->sfrcounter++; | |
565 | |
566 if (level->sfrcounter > subCountLen) { | |
567 level->framelevel = level->sfrsum / (subCountLen * PART_LEN); | |
568 level->sfrsum = 0; | |
569 level->sfrcounter = 0; | |
570 if (level->framelevel > 0) { | |
571 if (level->framelevel < level->minlevel) { | |
572 level->minlevel = level->framelevel; // New minimum. | |
573 } else { | |
574 level->minlevel *= (1 + 0.001f); // Small increase. | |
575 } | |
576 } | |
577 level->frcounter++; | |
578 level->frsum += level->framelevel; | |
579 if (level->frcounter > countLen) { | |
580 level->averagelevel = level->frsum / countLen; | |
581 level->frsum = 0; | |
582 level->frcounter = 0; | |
583 } | |
584 } | |
585 } | |
586 | |
587 static void UpdateMetrics(AecCore* aec) { | |
588 float dtmp, dtmp2; | |
589 | |
590 const float actThresholdNoisy = 8.0f; | |
591 const float actThresholdClean = 40.0f; | |
592 const float safety = 0.99995f; | |
593 | |
594 // To make noisePower consistent with the legacy code, a factor of | |
595 // 2.0f / PART_LEN2 is applied to noisyPower, since the legacy code uses | |
596 // the energy of a frame as the audio levels, while the new code uses a | |
597 // a per-sample energy (i.e., power). | |
598 const float noisyPower = 300000.0f * 2.0f / PART_LEN2; | |
599 | |
600 float actThreshold; | |
601 float echo, suppressedEcho; | |
602 | |
603 if (aec->echoState) { // Check if echo is likely present | |
604 aec->stateCounter++; | |
605 } | |
606 | |
607 if (aec->farlevel.frcounter == 0) { | |
608 if (aec->farlevel.minlevel < noisyPower) { | |
609 actThreshold = actThresholdClean; | |
610 } else { | |
611 actThreshold = actThresholdNoisy; | |
612 } | |
613 | |
614 if ((aec->stateCounter > (0.5f * countLen * subCountLen)) && | |
615 (aec->farlevel.sfrcounter == 0) | |
616 | |
617 // Estimate in active far-end segments only | |
618 && (aec->farlevel.averagelevel > | |
619 (actThreshold * aec->farlevel.minlevel))) { | |
620 // Subtract noise power | |
621 echo = aec->nearlevel.averagelevel - safety * aec->nearlevel.minlevel; | |
622 | |
623 // ERL | |
624 dtmp = 10 * (float)log10(aec->farlevel.averagelevel / | |
625 aec->nearlevel.averagelevel + | |
626 1e-10f); | |
627 dtmp2 = 10 * (float)log10(aec->farlevel.averagelevel / echo + 1e-10f); | |
628 | |
629 aec->erl.instant = dtmp; | |
630 if (dtmp > aec->erl.max) { | |
631 aec->erl.max = dtmp; | |
632 } | |
633 | |
634 if (dtmp < aec->erl.min) { | |
635 aec->erl.min = dtmp; | |
636 } | |
637 | |
638 aec->erl.counter++; | |
639 aec->erl.sum += dtmp; | |
640 aec->erl.average = aec->erl.sum / aec->erl.counter; | |
641 | |
642 // Upper mean | |
643 if (dtmp > aec->erl.average) { | |
644 aec->erl.hicounter++; | |
645 aec->erl.hisum += dtmp; | |
646 aec->erl.himean = aec->erl.hisum / aec->erl.hicounter; | |
647 } | |
648 | |
649 // A_NLP | |
650 dtmp = 10 * (float)log10(aec->nearlevel.averagelevel / | |
651 aec->linoutlevel.averagelevel + 1e-10f); | |
652 | |
653 // subtract noise power | |
654 suppressedEcho = aec->linoutlevel.averagelevel - | |
655 safety * aec->linoutlevel.minlevel; | |
656 | |
657 dtmp2 = 10 * (float)log10(echo / suppressedEcho + 1e-10f); | |
658 | |
659 aec->aNlp.instant = dtmp2; | |
660 if (dtmp > aec->aNlp.max) { | |
661 aec->aNlp.max = dtmp; | |
662 } | |
663 | |
664 if (dtmp < aec->aNlp.min) { | |
665 aec->aNlp.min = dtmp; | |
666 } | |
667 | |
668 aec->aNlp.counter++; | |
669 aec->aNlp.sum += dtmp; | |
670 aec->aNlp.average = aec->aNlp.sum / aec->aNlp.counter; | |
671 | |
672 // Upper mean | |
673 if (dtmp > aec->aNlp.average) { | |
674 aec->aNlp.hicounter++; | |
675 aec->aNlp.hisum += dtmp; | |
676 aec->aNlp.himean = aec->aNlp.hisum / aec->aNlp.hicounter; | |
677 } | |
678 | |
679 // ERLE | |
680 | |
681 // subtract noise power | |
682 suppressedEcho = 2 * (aec->nlpoutlevel.averagelevel - | |
683 safety * aec->nlpoutlevel.minlevel); | |
684 | |
685 dtmp = 10 * (float)log10(aec->nearlevel.averagelevel / | |
686 (2 * aec->nlpoutlevel.averagelevel) + | |
687 1e-10f); | |
688 dtmp2 = 10 * (float)log10(echo / suppressedEcho + 1e-10f); | |
689 | |
690 dtmp = dtmp2; | |
691 aec->erle.instant = dtmp; | |
692 if (dtmp > aec->erle.max) { | |
693 aec->erle.max = dtmp; | |
694 } | |
695 | |
696 if (dtmp < aec->erle.min) { | |
697 aec->erle.min = dtmp; | |
698 } | |
699 | |
700 aec->erle.counter++; | |
701 aec->erle.sum += dtmp; | |
702 aec->erle.average = aec->erle.sum / aec->erle.counter; | |
703 | |
704 // Upper mean | |
705 if (dtmp > aec->erle.average) { | |
706 aec->erle.hicounter++; | |
707 aec->erle.hisum += dtmp; | |
708 aec->erle.himean = aec->erle.hisum / aec->erle.hicounter; | |
709 } | |
710 } | |
711 | |
712 aec->stateCounter = 0; | |
713 } | |
714 } | |
715 | |
716 static void UpdateDelayMetrics(AecCore* self) { | |
717 int i = 0; | |
718 int delay_values = 0; | |
719 int median = 0; | |
720 int lookahead = WebRtc_lookahead(self->delay_estimator); | |
721 const int kMsPerBlock = PART_LEN / (self->mult * 8); | |
722 int64_t l1_norm = 0; | |
723 | |
724 if (self->num_delay_values == 0) { | |
725 // We have no new delay value data. Even though -1 is a valid |median| in | |
726 // the sense that we allow negative values, it will practically never be | |
727 // used since multiples of |kMsPerBlock| will always be returned. | |
728 // We therefore use -1 to indicate in the logs that the delay estimator was | |
729 // not able to estimate the delay. | |
730 self->delay_median = -1; | |
731 self->delay_std = -1; | |
732 self->fraction_poor_delays = -1; | |
733 return; | |
734 } | |
735 | |
736 // Start value for median count down. | |
737 delay_values = self->num_delay_values >> 1; | |
738 // Get median of delay values since last update. | |
739 for (i = 0; i < kHistorySizeBlocks; i++) { | |
740 delay_values -= self->delay_histogram[i]; | |
741 if (delay_values < 0) { | |
742 median = i; | |
743 break; | |
744 } | |
745 } | |
746 // Account for lookahead. | |
747 self->delay_median = (median - lookahead) * kMsPerBlock; | |
748 | |
749 // Calculate the L1 norm, with median value as central moment. | |
750 for (i = 0; i < kHistorySizeBlocks; i++) { | |
751 l1_norm += abs(i - median) * self->delay_histogram[i]; | |
752 } | |
753 self->delay_std = | |
754 (int)((l1_norm + self->num_delay_values / 2) / self->num_delay_values) * | |
755 kMsPerBlock; | |
756 | |
757 // Determine fraction of delays that are out of bounds, that is, either | |
758 // negative (anti-causal system) or larger than the AEC filter length. | |
759 { | |
760 int num_delays_out_of_bounds = self->num_delay_values; | |
761 const int histogram_length = | |
762 sizeof(self->delay_histogram) / sizeof(self->delay_histogram[0]); | |
763 for (i = lookahead; i < lookahead + self->num_partitions; ++i) { | |
764 if (i < histogram_length) | |
765 num_delays_out_of_bounds -= self->delay_histogram[i]; | |
766 } | |
767 self->fraction_poor_delays = | |
768 (float)num_delays_out_of_bounds / self->num_delay_values; | |
769 } | |
770 | |
771 // Reset histogram. | |
772 memset(self->delay_histogram, 0, sizeof(self->delay_histogram)); | |
773 self->num_delay_values = 0; | |
774 | |
775 return; | |
776 } | |
777 | |
778 static void ScaledInverseFft(float freq_data[2][PART_LEN1], | |
779 float time_data[PART_LEN2], | |
780 float scale, | |
781 int conjugate) { | |
782 int i; | |
783 const float normalization = scale / ((float)PART_LEN2); | |
784 const float sign = (conjugate ? -1 : 1); | |
785 time_data[0] = freq_data[0][0] * normalization; | |
786 time_data[1] = freq_data[0][PART_LEN] * normalization; | |
787 for (i = 1; i < PART_LEN; i++) { | |
788 time_data[2 * i] = freq_data[0][i] * normalization; | |
789 time_data[2 * i + 1] = sign * freq_data[1][i] * normalization; | |
790 } | |
791 aec_rdft_inverse_128(time_data); | |
792 } | |
793 | |
794 static void Fft(float time_data[PART_LEN2], float freq_data[2][PART_LEN1]) { | |
795 int i; | |
796 aec_rdft_forward_128(time_data); | |
797 | |
798 // Reorder fft output data. | |
799 freq_data[1][0] = 0; | |
800 freq_data[1][PART_LEN] = 0; | |
801 freq_data[0][0] = time_data[0]; | |
802 freq_data[0][PART_LEN] = time_data[1]; | |
803 for (i = 1; i < PART_LEN; i++) { | |
804 freq_data[0][i] = time_data[2 * i]; | |
805 freq_data[1][i] = time_data[2 * i + 1]; | |
806 } | |
807 } | |
808 | |
809 static int SignalBasedDelayCorrection(AecCore* self) { | |
810 int delay_correction = 0; | |
811 int last_delay = -2; | |
812 assert(self != NULL); | |
813 #if !defined(WEBRTC_ANDROID) | |
814 // On desktops, turn on correction after |kDelayCorrectionStart| frames. This | |
815 // is to let the delay estimation get a chance to converge. Also, if the | |
816 // playout audio volume is low (or even muted) the delay estimation can return | |
817 // a very large delay, which will break the AEC if it is applied. | |
818 if (self->frame_count < kDelayCorrectionStart) { | |
819 return 0; | |
820 } | |
821 #endif | |
822 | |
823 // 1. Check for non-negative delay estimate. Note that the estimates we get | |
824 // from the delay estimation are not compensated for lookahead. Hence, a | |
825 // negative |last_delay| is an invalid one. | |
826 // 2. Verify that there is a delay change. In addition, only allow a change | |
827 // if the delay is outside a certain region taking the AEC filter length | |
828 // into account. | |
829 // TODO(bjornv): Investigate if we can remove the non-zero delay change check. | |
830 // 3. Only allow delay correction if the delay estimation quality exceeds | |
831 // |delay_quality_threshold|. | |
832 // 4. Finally, verify that the proposed |delay_correction| is feasible by | |
833 // comparing with the size of the far-end buffer. | |
834 last_delay = WebRtc_last_delay(self->delay_estimator); | |
835 if ((last_delay >= 0) && (last_delay != self->previous_delay) && | |
836 (WebRtc_last_delay_quality(self->delay_estimator) > | |
837 self->delay_quality_threshold)) { | |
838 int delay = last_delay - WebRtc_lookahead(self->delay_estimator); | |
839 // Allow for a slack in the actual delay, defined by a |lower_bound| and an | |
840 // |upper_bound|. The adaptive echo cancellation filter is currently | |
841 // |num_partitions| (of 64 samples) long. If the delay estimate is negative | |
842 // or at least 3/4 of the filter length we open up for correction. | |
843 const int lower_bound = 0; | |
844 const int upper_bound = self->num_partitions * 3 / 4; | |
845 const int do_correction = delay <= lower_bound || delay > upper_bound; | |
846 if (do_correction == 1) { | |
847 int available_read = (int)WebRtc_available_read(self->far_time_buf); | |
848 // With |shift_offset| we gradually rely on the delay estimates. For | |
849 // positive delays we reduce the correction by |shift_offset| to lower the | |
850 // risk of pushing the AEC into a non causal state. For negative delays | |
851 // we rely on the values up to a rounding error, hence compensate by 1 | |
852 // element to make sure to push the delay into the causal region. | |
853 delay_correction = -delay; | |
854 delay_correction += delay > self->shift_offset ? self->shift_offset : 1; | |
855 self->shift_offset--; | |
856 self->shift_offset = (self->shift_offset <= 1 ? 1 : self->shift_offset); | |
857 if (delay_correction > available_read - self->mult - 1) { | |
858 // There is not enough data in the buffer to perform this shift. Hence, | |
859 // we do not rely on the delay estimate and do nothing. | |
860 delay_correction = 0; | |
861 } else { | |
862 self->previous_delay = last_delay; | |
863 ++self->delay_correction_count; | |
864 } | |
865 } | |
866 } | |
867 // Update the |delay_quality_threshold| once we have our first delay | |
868 // correction. | |
869 if (self->delay_correction_count > 0) { | |
870 float delay_quality = WebRtc_last_delay_quality(self->delay_estimator); | |
871 delay_quality = | |
872 (delay_quality > kDelayQualityThresholdMax ? kDelayQualityThresholdMax | |
873 : delay_quality); | |
874 self->delay_quality_threshold = | |
875 (delay_quality > self->delay_quality_threshold | |
876 ? delay_quality | |
877 : self->delay_quality_threshold); | |
878 } | |
879 return delay_correction; | |
880 } | |
881 | |
882 static void EchoSubtraction(AecCore* aec, | |
883 int num_partitions, | |
884 int extended_filter_enabled, | |
885 float normal_mu, | |
886 float normal_error_threshold, | |
887 float* x_fft, | |
888 int* x_fft_buf_block_pos, | |
889 float x_fft_buf[2] | |
890 [kExtendedNumPartitions * PART_LEN1], | |
891 float* const y, | |
892 float x_pow[PART_LEN1], | |
893 float h_fft_buf[2] | |
894 [kExtendedNumPartitions * PART_LEN1], | |
895 float echo_subtractor_output[PART_LEN]) { | |
896 float s_fft[2][PART_LEN1]; | |
897 float e_extended[PART_LEN2]; | |
898 float s_extended[PART_LEN2]; | |
899 float* s; | |
900 float e[PART_LEN]; | |
901 float e_fft[2][PART_LEN1]; | |
902 int i; | |
903 | |
904 // Update the x_fft_buf block position. | |
905 (*x_fft_buf_block_pos)--; | |
906 if ((*x_fft_buf_block_pos) == -1) { | |
907 *x_fft_buf_block_pos = num_partitions - 1; | |
908 } | |
909 | |
910 // Buffer x_fft. | |
911 memcpy(x_fft_buf[0] + (*x_fft_buf_block_pos) * PART_LEN1, x_fft, | |
912 sizeof(float) * PART_LEN1); | |
913 memcpy(x_fft_buf[1] + (*x_fft_buf_block_pos) * PART_LEN1, &x_fft[PART_LEN1], | |
914 sizeof(float) * PART_LEN1); | |
915 | |
916 memset(s_fft, 0, sizeof(s_fft)); | |
917 | |
918 // Conditionally reset the echo subtraction filter if the filter has diverged | |
919 // significantly. | |
920 if (!aec->extended_filter_enabled && aec->extreme_filter_divergence) { | |
921 memset(aec->wfBuf, 0, sizeof(aec->wfBuf)); | |
922 aec->extreme_filter_divergence = 0; | |
923 } | |
924 | |
925 // Produce echo estimate s_fft. | |
926 WebRtcAec_FilterFar(num_partitions, *x_fft_buf_block_pos, x_fft_buf, | |
927 h_fft_buf, s_fft); | |
928 | |
929 // Compute the time-domain echo estimate s. | |
930 ScaledInverseFft(s_fft, s_extended, 2.0f, 0); | |
931 s = &s_extended[PART_LEN]; | |
932 | |
933 // Compute the time-domain echo prediction error. | |
934 for (i = 0; i < PART_LEN; ++i) { | |
935 e[i] = y[i] - s[i]; | |
936 } | |
937 | |
938 // Compute the frequency domain echo prediction error. | |
939 memset(e_extended, 0, sizeof(float) * PART_LEN); | |
940 memcpy(e_extended + PART_LEN, e, sizeof(float) * PART_LEN); | |
941 Fft(e_extended, e_fft); | |
942 | |
943 RTC_AEC_DEBUG_RAW_WRITE(aec->e_fft_file, &e_fft[0][0], | |
944 sizeof(e_fft[0][0]) * PART_LEN1 * 2); | |
945 | |
946 // Scale error signal inversely with far power. | |
947 WebRtcAec_ScaleErrorSignal(extended_filter_enabled, normal_mu, | |
948 normal_error_threshold, x_pow, e_fft); | |
949 WebRtcAec_FilterAdaptation(num_partitions, *x_fft_buf_block_pos, x_fft_buf, | |
950 e_fft, h_fft_buf); | |
951 memcpy(echo_subtractor_output, e, sizeof(float) * PART_LEN); | |
952 } | |
953 | |
954 static void EchoSuppression(AecCore* aec, | |
955 float farend[PART_LEN2], | |
956 float* echo_subtractor_output, | |
957 float* output, | |
958 float* const* outputH) { | |
959 float efw[2][PART_LEN1]; | |
960 float xfw[2][PART_LEN1]; | |
961 float dfw[2][PART_LEN1]; | |
962 float comfortNoiseHband[2][PART_LEN1]; | |
963 float fft[PART_LEN2]; | |
964 float nlpGainHband; | |
965 int i; | |
966 size_t j; | |
967 | |
968 // Coherence and non-linear filter | |
969 float cohde[PART_LEN1], cohxd[PART_LEN1]; | |
970 float hNlDeAvg, hNlXdAvg; | |
971 float hNl[PART_LEN1]; | |
972 float hNlPref[kPrefBandSize]; | |
973 float hNlFb = 0, hNlFbLow = 0; | |
974 const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f; | |
975 const int prefBandSize = kPrefBandSize / aec->mult; | |
976 const int minPrefBand = 4 / aec->mult; | |
977 // Power estimate smoothing coefficients. | |
978 const float* min_overdrive = aec->extended_filter_enabled | |
979 ? kExtendedMinOverDrive | |
980 : kNormalMinOverDrive; | |
981 | |
982 // Filter energy | |
983 const int delayEstInterval = 10 * aec->mult; | |
984 | |
985 float* xfw_ptr = NULL; | |
986 | |
987 // Update eBuf with echo subtractor output. | |
988 memcpy(aec->eBuf + PART_LEN, echo_subtractor_output, | |
989 sizeof(float) * PART_LEN); | |
990 | |
991 // Analysis filter banks for the echo suppressor. | |
992 // Windowed near-end ffts. | |
993 WindowData(fft, aec->dBuf); | |
994 aec_rdft_forward_128(fft); | |
995 StoreAsComplex(fft, dfw); | |
996 | |
997 // Windowed echo suppressor output ffts. | |
998 WindowData(fft, aec->eBuf); | |
999 aec_rdft_forward_128(fft); | |
1000 StoreAsComplex(fft, efw); | |
1001 | |
1002 // NLP | |
1003 | |
1004 // Convert far-end partition to the frequency domain with windowing. | |
1005 WindowData(fft, farend); | |
1006 Fft(fft, xfw); | |
1007 xfw_ptr = &xfw[0][0]; | |
1008 | |
1009 // Buffer far. | |
1010 memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1); | |
1011 | |
1012 aec->delayEstCtr++; | |
1013 if (aec->delayEstCtr == delayEstInterval) { | |
1014 aec->delayEstCtr = 0; | |
1015 aec->delayIdx = WebRtcAec_PartitionDelay(aec); | |
1016 } | |
1017 | |
1018 // Use delayed far. | |
1019 memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, | |
1020 sizeof(xfw[0][0]) * 2 * PART_LEN1); | |
1021 | |
1022 WebRtcAec_SubbandCoherence(aec, efw, dfw, xfw, fft, cohde, cohxd, | |
1023 &aec->extreme_filter_divergence); | |
1024 | |
1025 // Select the microphone signal as output if the filter is deemed to have | |
1026 // diverged. | |
1027 if (aec->divergeState) { | |
1028 memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1); | |
1029 } | |
1030 | |
1031 hNlXdAvg = 0; | |
1032 for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) { | |
1033 hNlXdAvg += cohxd[i]; | |
1034 } | |
1035 hNlXdAvg /= prefBandSize; | |
1036 hNlXdAvg = 1 - hNlXdAvg; | |
1037 | |
1038 hNlDeAvg = 0; | |
1039 for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) { | |
1040 hNlDeAvg += cohde[i]; | |
1041 } | |
1042 hNlDeAvg /= prefBandSize; | |
1043 | |
1044 if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) { | |
1045 aec->hNlXdAvgMin = hNlXdAvg; | |
1046 } | |
1047 | |
1048 if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) { | |
1049 aec->stNearState = 1; | |
1050 } else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) { | |
1051 aec->stNearState = 0; | |
1052 } | |
1053 | |
1054 if (aec->hNlXdAvgMin == 1) { | |
1055 aec->echoState = 0; | |
1056 aec->overDrive = min_overdrive[aec->nlp_mode]; | |
1057 | |
1058 if (aec->stNearState == 1) { | |
1059 memcpy(hNl, cohde, sizeof(hNl)); | |
1060 hNlFb = hNlDeAvg; | |
1061 hNlFbLow = hNlDeAvg; | |
1062 } else { | |
1063 for (i = 0; i < PART_LEN1; i++) { | |
1064 hNl[i] = 1 - cohxd[i]; | |
1065 } | |
1066 hNlFb = hNlXdAvg; | |
1067 hNlFbLow = hNlXdAvg; | |
1068 } | |
1069 } else { | |
1070 if (aec->stNearState == 1) { | |
1071 aec->echoState = 0; | |
1072 memcpy(hNl, cohde, sizeof(hNl)); | |
1073 hNlFb = hNlDeAvg; | |
1074 hNlFbLow = hNlDeAvg; | |
1075 } else { | |
1076 aec->echoState = 1; | |
1077 for (i = 0; i < PART_LEN1; i++) { | |
1078 hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]); | |
1079 } | |
1080 | |
1081 // Select an order statistic from the preferred bands. | |
1082 // TODO: Using quicksort now, but a selection algorithm may be preferred. | |
1083 memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize); | |
1084 qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat); | |
1085 hNlFb = hNlPref[(int)floor(prefBandQuant * (prefBandSize - 1))]; | |
1086 hNlFbLow = hNlPref[(int)floor(prefBandQuantLow * (prefBandSize - 1))]; | |
1087 } | |
1088 } | |
1089 | |
1090 // Track the local filter minimum to determine suppression overdrive. | |
1091 if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) { | |
1092 aec->hNlFbLocalMin = hNlFbLow; | |
1093 aec->hNlFbMin = hNlFbLow; | |
1094 aec->hNlNewMin = 1; | |
1095 aec->hNlMinCtr = 0; | |
1096 } | |
1097 aec->hNlFbLocalMin = | |
1098 WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1); | |
1099 aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1); | |
1100 | |
1101 if (aec->hNlNewMin == 1) { | |
1102 aec->hNlMinCtr++; | |
1103 } | |
1104 if (aec->hNlMinCtr == 2) { | |
1105 aec->hNlNewMin = 0; | |
1106 aec->hNlMinCtr = 0; | |
1107 aec->overDrive = | |
1108 WEBRTC_SPL_MAX(kTargetSupp[aec->nlp_mode] / | |
1109 ((float)log(aec->hNlFbMin + 1e-10f) + 1e-10f), | |
1110 min_overdrive[aec->nlp_mode]); | |
1111 } | |
1112 | |
1113 // Smooth the overdrive. | |
1114 if (aec->overDrive < aec->overDriveSm) { | |
1115 aec->overDriveSm = 0.99f * aec->overDriveSm + 0.01f * aec->overDrive; | |
1116 } else { | |
1117 aec->overDriveSm = 0.9f * aec->overDriveSm + 0.1f * aec->overDrive; | |
1118 } | |
1119 | |
1120 WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw); | |
1121 | |
1122 // Add comfort noise. | |
1123 WebRtcAec_ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl); | |
1124 | |
1125 // Inverse error fft. | |
1126 ScaledInverseFft(efw, fft, 2.0f, 1); | |
1127 | |
1128 // TODO(bjornv): Investigate how to take the windowing below into account if | |
1129 // needed. | |
1130 if (aec->metricsMode == 1) { | |
1131 // Note that we have a scaling by two in the time domain |eBuf|. | |
1132 // In addition the time domain signal is windowed before transformation, | |
1133 // losing half the energy on the average. We take care of the first | |
1134 // scaling only in UpdateMetrics(). | |
1135 UpdateLevel(&aec->nlpoutlevel, CalculatePower(fft, PART_LEN2)); | |
1136 } | |
1137 | |
1138 // Overlap and add to obtain output. | |
1139 for (i = 0; i < PART_LEN; i++) { | |
1140 output[i] = (fft[i] * WebRtcAec_sqrtHanning[i] + | |
1141 aec->outBuf[i] * WebRtcAec_sqrtHanning[PART_LEN - i]); | |
1142 | |
1143 // Saturate output to keep it in the allowed range. | |
1144 output[i] = | |
1145 WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, output[i], WEBRTC_SPL_WORD16_MIN); | |
1146 } | |
1147 memcpy(aec->outBuf, &fft[PART_LEN], PART_LEN * sizeof(aec->outBuf[0])); | |
1148 | |
1149 // For H band | |
1150 if (aec->num_bands > 1) { | |
1151 // H band gain | |
1152 // average nlp over low band: average over second half of freq spectrum | |
1153 // (4->8khz) | |
1154 GetHighbandGain(hNl, &nlpGainHband); | |
1155 | |
1156 // Inverse comfort_noise | |
1157 ScaledInverseFft(comfortNoiseHband, fft, 2.0f, 0); | |
1158 | |
1159 // compute gain factor | |
1160 for (j = 0; j < aec->num_bands - 1; ++j) { | |
1161 for (i = 0; i < PART_LEN; i++) { | |
1162 outputH[j][i] = aec->dBufH[j][i] * nlpGainHband; | |
1163 } | |
1164 } | |
1165 | |
1166 // Add some comfort noise where Hband is attenuated. | |
1167 for (i = 0; i < PART_LEN; i++) { | |
1168 outputH[0][i] += cnScaleHband * fft[i]; | |
1169 } | |
1170 | |
1171 // Saturate output to keep it in the allowed range. | |
1172 for (j = 0; j < aec->num_bands - 1; ++j) { | |
1173 for (i = 0; i < PART_LEN; i++) { | |
1174 outputH[j][i] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, outputH[j][i], | |
1175 WEBRTC_SPL_WORD16_MIN); | |
1176 } | |
1177 } | |
1178 } | |
1179 | |
1180 // Copy the current block to the old position. | |
1181 memcpy(aec->dBuf, aec->dBuf + PART_LEN, sizeof(float) * PART_LEN); | |
1182 memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN); | |
1183 | |
1184 // Copy the current block to the old position for H band | |
1185 for (j = 0; j < aec->num_bands - 1; ++j) { | |
1186 memcpy(aec->dBufH[j], aec->dBufH[j] + PART_LEN, sizeof(float) * PART_LEN); | |
1187 } | |
1188 | |
1189 memmove(aec->xfwBuf + PART_LEN1, aec->xfwBuf, | |
1190 sizeof(aec->xfwBuf) - sizeof(complex_t) * PART_LEN1); | |
1191 } | |
1192 | |
1193 static void ProcessBlock(AecCore* aec) { | |
1194 size_t i; | |
1195 | |
1196 float fft[PART_LEN2]; | |
1197 float x_fft[2][PART_LEN1]; | |
1198 float df[2][PART_LEN1]; | |
1199 float far_spectrum = 0.0f; | |
1200 float near_spectrum = 0.0f; | |
1201 float abs_far_spectrum[PART_LEN1]; | |
1202 float abs_near_spectrum[PART_LEN1]; | |
1203 | |
1204 const float gPow[2] = {0.9f, 0.1f}; | |
1205 | |
1206 // Noise estimate constants. | |
1207 const int noiseInitBlocks = 500 * aec->mult; | |
1208 const float step = 0.1f; | |
1209 const float ramp = 1.0002f; | |
1210 const float gInitNoise[2] = {0.999f, 0.001f}; | |
1211 | |
1212 float nearend[PART_LEN]; | |
1213 float* nearend_ptr = NULL; | |
1214 float farend[PART_LEN2]; | |
1215 float* farend_ptr = NULL; | |
1216 float echo_subtractor_output[PART_LEN]; | |
1217 float output[PART_LEN]; | |
1218 float outputH[NUM_HIGH_BANDS_MAX][PART_LEN]; | |
1219 float* outputH_ptr[NUM_HIGH_BANDS_MAX]; | |
1220 float* x_fft_ptr = NULL; | |
1221 | |
1222 for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) { | |
1223 outputH_ptr[i] = outputH[i]; | |
1224 } | |
1225 | |
1226 // Concatenate old and new nearend blocks. | |
1227 for (i = 0; i < aec->num_bands - 1; ++i) { | |
1228 WebRtc_ReadBuffer(aec->nearFrBufH[i], (void**)&nearend_ptr, nearend, | |
1229 PART_LEN); | |
1230 memcpy(aec->dBufH[i] + PART_LEN, nearend_ptr, sizeof(nearend)); | |
1231 } | |
1232 WebRtc_ReadBuffer(aec->nearFrBuf, (void**)&nearend_ptr, nearend, PART_LEN); | |
1233 memcpy(aec->dBuf + PART_LEN, nearend_ptr, sizeof(nearend)); | |
1234 | |
1235 // We should always have at least one element stored in |far_buf|. | |
1236 assert(WebRtc_available_read(aec->far_time_buf) > 0); | |
1237 WebRtc_ReadBuffer(aec->far_time_buf, (void**)&farend_ptr, farend, 1); | |
1238 | |
1239 #ifdef WEBRTC_AEC_DEBUG_DUMP | |
1240 { | |
1241 // TODO(minyue): |farend_ptr| starts from buffered samples. This will be | |
1242 // modified when |aec->far_time_buf| is revised. | |
1243 RTC_AEC_DEBUG_WAV_WRITE(aec->farFile, &farend_ptr[PART_LEN], PART_LEN); | |
1244 | |
1245 RTC_AEC_DEBUG_WAV_WRITE(aec->nearFile, nearend_ptr, PART_LEN); | |
1246 } | |
1247 #endif | |
1248 | |
1249 if (aec->metricsMode == 1) { | |
1250 // Update power levels | |
1251 UpdateLevel(&aec->farlevel, | |
1252 CalculatePower(&farend_ptr[PART_LEN], PART_LEN)); | |
1253 UpdateLevel(&aec->nearlevel, CalculatePower(nearend_ptr, PART_LEN)); | |
1254 } | |
1255 | |
1256 // Convert far-end signal to the frequency domain. | |
1257 memcpy(fft, farend_ptr, sizeof(float) * PART_LEN2); | |
1258 Fft(fft, x_fft); | |
1259 x_fft_ptr = &x_fft[0][0]; | |
1260 | |
1261 // Near fft | |
1262 memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2); | |
1263 Fft(fft, df); | |
1264 | |
1265 // Power smoothing | |
1266 for (i = 0; i < PART_LEN1; i++) { | |
1267 far_spectrum = (x_fft_ptr[i] * x_fft_ptr[i]) + | |
1268 (x_fft_ptr[PART_LEN1 + i] * x_fft_ptr[PART_LEN1 + i]); | |
1269 aec->xPow[i] = | |
1270 gPow[0] * aec->xPow[i] + gPow[1] * aec->num_partitions * far_spectrum; | |
1271 // Calculate absolute spectra | |
1272 abs_far_spectrum[i] = sqrtf(far_spectrum); | |
1273 | |
1274 near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i]; | |
1275 aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum; | |
1276 // Calculate absolute spectra | |
1277 abs_near_spectrum[i] = sqrtf(near_spectrum); | |
1278 } | |
1279 | |
1280 // Estimate noise power. Wait until dPow is more stable. | |
1281 if (aec->noiseEstCtr > 50) { | |
1282 for (i = 0; i < PART_LEN1; i++) { | |
1283 if (aec->dPow[i] < aec->dMinPow[i]) { | |
1284 aec->dMinPow[i] = | |
1285 (aec->dPow[i] + step * (aec->dMinPow[i] - aec->dPow[i])) * ramp; | |
1286 } else { | |
1287 aec->dMinPow[i] *= ramp; | |
1288 } | |
1289 } | |
1290 } | |
1291 | |
1292 // Smooth increasing noise power from zero at the start, | |
1293 // to avoid a sudden burst of comfort noise. | |
1294 if (aec->noiseEstCtr < noiseInitBlocks) { | |
1295 aec->noiseEstCtr++; | |
1296 for (i = 0; i < PART_LEN1; i++) { | |
1297 if (aec->dMinPow[i] > aec->dInitMinPow[i]) { | |
1298 aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] + | |
1299 gInitNoise[1] * aec->dMinPow[i]; | |
1300 } else { | |
1301 aec->dInitMinPow[i] = aec->dMinPow[i]; | |
1302 } | |
1303 } | |
1304 aec->noisePow = aec->dInitMinPow; | |
1305 } else { | |
1306 aec->noisePow = aec->dMinPow; | |
1307 } | |
1308 | |
1309 // Block wise delay estimation used for logging | |
1310 if (aec->delay_logging_enabled) { | |
1311 if (WebRtc_AddFarSpectrumFloat(aec->delay_estimator_farend, | |
1312 abs_far_spectrum, PART_LEN1) == 0) { | |
1313 int delay_estimate = WebRtc_DelayEstimatorProcessFloat( | |
1314 aec->delay_estimator, abs_near_spectrum, PART_LEN1); | |
1315 if (delay_estimate >= 0) { | |
1316 // Update delay estimate buffer. | |
1317 aec->delay_histogram[delay_estimate]++; | |
1318 aec->num_delay_values++; | |
1319 } | |
1320 if (aec->delay_metrics_delivered == 1 && | |
1321 aec->num_delay_values >= kDelayMetricsAggregationWindow) { | |
1322 UpdateDelayMetrics(aec); | |
1323 } | |
1324 } | |
1325 } | |
1326 | |
1327 // Perform echo subtraction. | |
1328 EchoSubtraction(aec, aec->num_partitions, aec->extended_filter_enabled, | |
1329 aec->normal_mu, aec->normal_error_threshold, &x_fft[0][0], | |
1330 &aec->xfBufBlockPos, aec->xfBuf, nearend_ptr, aec->xPow, | |
1331 aec->wfBuf, echo_subtractor_output); | |
1332 | |
1333 RTC_AEC_DEBUG_WAV_WRITE(aec->outLinearFile, echo_subtractor_output, PART_LEN); | |
1334 | |
1335 if (aec->metricsMode == 1) { | |
1336 UpdateLevel(&aec->linoutlevel, | |
1337 CalculatePower(echo_subtractor_output, PART_LEN)); | |
1338 } | |
1339 | |
1340 // Perform echo suppression. | |
1341 EchoSuppression(aec, farend_ptr, echo_subtractor_output, output, outputH_ptr); | |
1342 | |
1343 if (aec->metricsMode == 1) { | |
1344 UpdateMetrics(aec); | |
1345 } | |
1346 | |
1347 // Store the output block. | |
1348 WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN); | |
1349 // For high bands | |
1350 for (i = 0; i < aec->num_bands - 1; ++i) { | |
1351 WebRtc_WriteBuffer(aec->outFrBufH[i], outputH[i], PART_LEN); | |
1352 } | |
1353 | |
1354 RTC_AEC_DEBUG_WAV_WRITE(aec->outFile, output, PART_LEN); | |
1355 } | |
1356 | |
1357 AecCore* WebRtcAec_CreateAec() { | |
1358 int i; | |
1359 AecCore* aec = malloc(sizeof(AecCore)); | |
1360 if (!aec) { | |
1361 return NULL; | |
1362 } | |
1363 | |
1364 aec->nearFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float)); | |
1365 if (!aec->nearFrBuf) { | |
1366 WebRtcAec_FreeAec(aec); | |
1367 return NULL; | |
1368 } | |
1369 | |
1370 aec->outFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float)); | |
1371 if (!aec->outFrBuf) { | |
1372 WebRtcAec_FreeAec(aec); | |
1373 return NULL; | |
1374 } | |
1375 | |
1376 for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) { | |
1377 aec->nearFrBufH[i] = | |
1378 WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float)); | |
1379 if (!aec->nearFrBufH[i]) { | |
1380 WebRtcAec_FreeAec(aec); | |
1381 return NULL; | |
1382 } | |
1383 aec->outFrBufH[i] = | |
1384 WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float)); | |
1385 if (!aec->outFrBufH[i]) { | |
1386 WebRtcAec_FreeAec(aec); | |
1387 return NULL; | |
1388 } | |
1389 } | |
1390 | |
1391 // Create far-end buffers. | |
1392 // For bit exactness with legacy code, each element in |far_time_buf| is | |
1393 // supposed to contain |PART_LEN2| samples with an overlap of |PART_LEN| | |
1394 // samples from the last frame. | |
1395 // TODO(minyue): reduce |far_time_buf| to non-overlapped |PART_LEN| samples. | |
1396 aec->far_time_buf = | |
1397 WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * PART_LEN2); | |
1398 if (!aec->far_time_buf) { | |
1399 WebRtcAec_FreeAec(aec); | |
1400 return NULL; | |
1401 } | |
1402 | |
1403 #ifdef WEBRTC_AEC_DEBUG_DUMP | |
1404 aec->instance_index = webrtc_aec_instance_count; | |
1405 | |
1406 aec->farFile = aec->nearFile = aec->outFile = aec->outLinearFile = NULL; | |
1407 aec->debug_dump_count = 0; | |
1408 #endif | |
1409 aec->delay_estimator_farend = | |
1410 WebRtc_CreateDelayEstimatorFarend(PART_LEN1, kHistorySizeBlocks); | |
1411 if (aec->delay_estimator_farend == NULL) { | |
1412 WebRtcAec_FreeAec(aec); | |
1413 return NULL; | |
1414 } | |
1415 // We create the delay_estimator with the same amount of maximum lookahead as | |
1416 // the delay history size (kHistorySizeBlocks) for symmetry reasons. | |
1417 aec->delay_estimator = WebRtc_CreateDelayEstimator( | |
1418 aec->delay_estimator_farend, kHistorySizeBlocks); | |
1419 if (aec->delay_estimator == NULL) { | |
1420 WebRtcAec_FreeAec(aec); | |
1421 return NULL; | |
1422 } | |
1423 #ifdef WEBRTC_ANDROID | |
1424 aec->delay_agnostic_enabled = 1; // DA-AEC enabled by default. | |
1425 // DA-AEC assumes the system is causal from the beginning and will self adjust | |
1426 // the lookahead when shifting is required. | |
1427 WebRtc_set_lookahead(aec->delay_estimator, 0); | |
1428 #else | |
1429 aec->delay_agnostic_enabled = 0; | |
1430 WebRtc_set_lookahead(aec->delay_estimator, kLookaheadBlocks); | |
1431 #endif | |
1432 aec->extended_filter_enabled = 0; | |
1433 aec->next_generation_aec_enabled = 0; | |
1434 | |
1435 // Assembly optimization | |
1436 WebRtcAec_FilterFar = FilterFar; | |
1437 WebRtcAec_ScaleErrorSignal = ScaleErrorSignal; | |
1438 WebRtcAec_FilterAdaptation = FilterAdaptation; | |
1439 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppress; | |
1440 WebRtcAec_ComfortNoise = ComfortNoise; | |
1441 WebRtcAec_SubbandCoherence = SubbandCoherence; | |
1442 WebRtcAec_StoreAsComplex = StoreAsComplex; | |
1443 WebRtcAec_PartitionDelay = PartitionDelay; | |
1444 WebRtcAec_WindowData = WindowData; | |
1445 | |
1446 #if defined(WEBRTC_ARCH_X86_FAMILY) | |
1447 if (WebRtc_GetCPUInfo(kSSE2)) { | |
1448 WebRtcAec_InitAec_SSE2(); | |
1449 } | |
1450 #endif | |
1451 | |
1452 #if defined(MIPS_FPU_LE) | |
1453 WebRtcAec_InitAec_mips(); | |
1454 #endif | |
1455 | |
1456 #if defined(WEBRTC_HAS_NEON) | |
1457 WebRtcAec_InitAec_neon(); | |
1458 #elif defined(WEBRTC_DETECT_NEON) | |
1459 if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) { | |
1460 WebRtcAec_InitAec_neon(); | |
1461 } | |
1462 #endif | |
1463 | |
1464 aec_rdft_init(); | |
1465 | |
1466 return aec; | |
1467 } | |
1468 | |
1469 void WebRtcAec_FreeAec(AecCore* aec) { | |
1470 int i; | |
1471 if (aec == NULL) { | |
1472 return; | |
1473 } | |
1474 | |
1475 WebRtc_FreeBuffer(aec->nearFrBuf); | |
1476 WebRtc_FreeBuffer(aec->outFrBuf); | |
1477 | |
1478 for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) { | |
1479 WebRtc_FreeBuffer(aec->nearFrBufH[i]); | |
1480 WebRtc_FreeBuffer(aec->outFrBufH[i]); | |
1481 } | |
1482 | |
1483 WebRtc_FreeBuffer(aec->far_time_buf); | |
1484 | |
1485 RTC_AEC_DEBUG_WAV_CLOSE(aec->farFile); | |
1486 RTC_AEC_DEBUG_WAV_CLOSE(aec->nearFile); | |
1487 RTC_AEC_DEBUG_WAV_CLOSE(aec->outFile); | |
1488 RTC_AEC_DEBUG_WAV_CLOSE(aec->outLinearFile); | |
1489 RTC_AEC_DEBUG_RAW_CLOSE(aec->e_fft_file); | |
1490 | |
1491 WebRtc_FreeDelayEstimator(aec->delay_estimator); | |
1492 WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend); | |
1493 | |
1494 free(aec); | |
1495 } | |
1496 | |
1497 int WebRtcAec_InitAec(AecCore* aec, int sampFreq) { | |
1498 int i; | |
1499 | |
1500 aec->sampFreq = sampFreq; | |
1501 | |
1502 if (sampFreq == 8000) { | |
1503 aec->normal_mu = 0.6f; | |
1504 aec->normal_error_threshold = 2e-6f; | |
1505 aec->num_bands = 1; | |
1506 } else { | |
1507 aec->normal_mu = 0.5f; | |
1508 aec->normal_error_threshold = 1.5e-6f; | |
1509 aec->num_bands = (size_t)(sampFreq / 16000); | |
1510 } | |
1511 | |
1512 WebRtc_InitBuffer(aec->nearFrBuf); | |
1513 WebRtc_InitBuffer(aec->outFrBuf); | |
1514 for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) { | |
1515 WebRtc_InitBuffer(aec->nearFrBufH[i]); | |
1516 WebRtc_InitBuffer(aec->outFrBufH[i]); | |
1517 } | |
1518 | |
1519 // Initialize far-end buffers. | |
1520 WebRtc_InitBuffer(aec->far_time_buf); | |
1521 | |
1522 #ifdef WEBRTC_AEC_DEBUG_DUMP | |
1523 { | |
1524 int process_rate = sampFreq > 16000 ? 16000 : sampFreq; | |
1525 RTC_AEC_DEBUG_WAV_REOPEN("aec_far", aec->instance_index, | |
1526 aec->debug_dump_count, process_rate, | |
1527 &aec->farFile); | |
1528 RTC_AEC_DEBUG_WAV_REOPEN("aec_near", aec->instance_index, | |
1529 aec->debug_dump_count, process_rate, | |
1530 &aec->nearFile); | |
1531 RTC_AEC_DEBUG_WAV_REOPEN("aec_out", aec->instance_index, | |
1532 aec->debug_dump_count, process_rate, | |
1533 &aec->outFile); | |
1534 RTC_AEC_DEBUG_WAV_REOPEN("aec_out_linear", aec->instance_index, | |
1535 aec->debug_dump_count, process_rate, | |
1536 &aec->outLinearFile); | |
1537 } | |
1538 | |
1539 RTC_AEC_DEBUG_RAW_OPEN("aec_e_fft", aec->debug_dump_count, &aec->e_fft_file); | |
1540 | |
1541 ++aec->debug_dump_count; | |
1542 #endif | |
1543 aec->system_delay = 0; | |
1544 | |
1545 if (WebRtc_InitDelayEstimatorFarend(aec->delay_estimator_farend) != 0) { | |
1546 return -1; | |
1547 } | |
1548 if (WebRtc_InitDelayEstimator(aec->delay_estimator) != 0) { | |
1549 return -1; | |
1550 } | |
1551 aec->delay_logging_enabled = 0; | |
1552 aec->delay_metrics_delivered = 0; | |
1553 memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram)); | |
1554 aec->num_delay_values = 0; | |
1555 aec->delay_median = -1; | |
1556 aec->delay_std = -1; | |
1557 aec->fraction_poor_delays = -1.0f; | |
1558 | |
1559 aec->signal_delay_correction = 0; | |
1560 aec->previous_delay = -2; // (-2): Uninitialized. | |
1561 aec->delay_correction_count = 0; | |
1562 aec->shift_offset = kInitialShiftOffset; | |
1563 aec->delay_quality_threshold = kDelayQualityThresholdMin; | |
1564 | |
1565 aec->num_partitions = kNormalNumPartitions; | |
1566 | |
1567 // Update the delay estimator with filter length. We use half the | |
1568 // |num_partitions| to take the echo path into account. In practice we say | |
1569 // that the echo has a duration of maximum half |num_partitions|, which is not | |
1570 // true, but serves as a crude measure. | |
1571 WebRtc_set_allowed_offset(aec->delay_estimator, aec->num_partitions / 2); | |
1572 // TODO(bjornv): I currently hard coded the enable. Once we've established | |
1573 // that AECM has no performance regression, robust_validation will be enabled | |
1574 // all the time and the APIs to turn it on/off will be removed. Hence, remove | |
1575 // this line then. | |
1576 WebRtc_enable_robust_validation(aec->delay_estimator, 1); | |
1577 aec->frame_count = 0; | |
1578 | |
1579 // Default target suppression mode. | |
1580 aec->nlp_mode = 1; | |
1581 | |
1582 // Sampling frequency multiplier w.r.t. 8 kHz. | |
1583 // In case of multiple bands we process the lower band in 16 kHz, hence the | |
1584 // multiplier is always 2. | |
1585 if (aec->num_bands > 1) { | |
1586 aec->mult = 2; | |
1587 } else { | |
1588 aec->mult = (short)aec->sampFreq / 8000; | |
1589 } | |
1590 | |
1591 aec->farBufWritePos = 0; | |
1592 aec->farBufReadPos = 0; | |
1593 | |
1594 aec->inSamples = 0; | |
1595 aec->outSamples = 0; | |
1596 aec->knownDelay = 0; | |
1597 | |
1598 // Initialize buffers | |
1599 memset(aec->dBuf, 0, sizeof(aec->dBuf)); | |
1600 memset(aec->eBuf, 0, sizeof(aec->eBuf)); | |
1601 // For H bands | |
1602 for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) { | |
1603 memset(aec->dBufH[i], 0, sizeof(aec->dBufH[i])); | |
1604 } | |
1605 | |
1606 memset(aec->xPow, 0, sizeof(aec->xPow)); | |
1607 memset(aec->dPow, 0, sizeof(aec->dPow)); | |
1608 memset(aec->dInitMinPow, 0, sizeof(aec->dInitMinPow)); | |
1609 aec->noisePow = aec->dInitMinPow; | |
1610 aec->noiseEstCtr = 0; | |
1611 | |
1612 // Initial comfort noise power | |
1613 for (i = 0; i < PART_LEN1; i++) { | |
1614 aec->dMinPow[i] = 1.0e6f; | |
1615 } | |
1616 | |
1617 // Holds the last block written to | |
1618 aec->xfBufBlockPos = 0; | |
1619 // TODO: Investigate need for these initializations. Deleting them doesn't | |
1620 // change the output at all and yields 0.4% overall speedup. | |
1621 memset(aec->xfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1); | |
1622 memset(aec->wfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1); | |
1623 memset(aec->sde, 0, sizeof(complex_t) * PART_LEN1); | |
1624 memset(aec->sxd, 0, sizeof(complex_t) * PART_LEN1); | |
1625 memset(aec->xfwBuf, 0, | |
1626 sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1); | |
1627 memset(aec->se, 0, sizeof(float) * PART_LEN1); | |
1628 | |
1629 // To prevent numerical instability in the first block. | |
1630 for (i = 0; i < PART_LEN1; i++) { | |
1631 aec->sd[i] = 1; | |
1632 } | |
1633 for (i = 0; i < PART_LEN1; i++) { | |
1634 aec->sx[i] = 1; | |
1635 } | |
1636 | |
1637 memset(aec->hNs, 0, sizeof(aec->hNs)); | |
1638 memset(aec->outBuf, 0, sizeof(float) * PART_LEN); | |
1639 | |
1640 aec->hNlFbMin = 1; | |
1641 aec->hNlFbLocalMin = 1; | |
1642 aec->hNlXdAvgMin = 1; | |
1643 aec->hNlNewMin = 0; | |
1644 aec->hNlMinCtr = 0; | |
1645 aec->overDrive = 2; | |
1646 aec->overDriveSm = 2; | |
1647 aec->delayIdx = 0; | |
1648 aec->stNearState = 0; | |
1649 aec->echoState = 0; | |
1650 aec->divergeState = 0; | |
1651 | |
1652 aec->seed = 777; | |
1653 aec->delayEstCtr = 0; | |
1654 | |
1655 aec->extreme_filter_divergence = 0; | |
1656 | |
1657 // Metrics disabled by default | |
1658 aec->metricsMode = 0; | |
1659 InitMetrics(aec); | |
1660 | |
1661 return 0; | |
1662 } | |
1663 | |
1664 // For bit exactness with a legacy code, |farend| is supposed to contain | |
1665 // |PART_LEN2| samples with an overlap of |PART_LEN| samples from the last | |
1666 // frame. | |
1667 // TODO(minyue): reduce |farend| to non-overlapped |PART_LEN| samples. | |
1668 void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend) { | |
1669 // Check if the buffer is full, and in that case flush the oldest data. | |
1670 if (WebRtc_available_write(aec->far_time_buf) < 1) { | |
1671 WebRtcAec_MoveFarReadPtr(aec, 1); | |
1672 } | |
1673 | |
1674 WebRtc_WriteBuffer(aec->far_time_buf, farend, 1); | |
1675 } | |
1676 | |
1677 int WebRtcAec_MoveFarReadPtr(AecCore* aec, int elements) { | |
1678 int elements_moved = WebRtc_MoveReadPtr(aec->far_time_buf, elements); | |
1679 aec->system_delay -= elements_moved * PART_LEN; | |
1680 return elements_moved; | |
1681 } | |
1682 | |
1683 void WebRtcAec_ProcessFrames(AecCore* aec, | |
1684 const float* const* nearend, | |
1685 size_t num_bands, | |
1686 size_t num_samples, | |
1687 int knownDelay, | |
1688 float* const* out) { | |
1689 size_t i, j; | |
1690 int out_elements = 0; | |
1691 | |
1692 aec->frame_count++; | |
1693 // For each frame the process is as follows: | |
1694 // 1) If the system_delay indicates on being too small for processing a | |
1695 // frame we stuff the buffer with enough data for 10 ms. | |
1696 // 2 a) Adjust the buffer to the system delay, by moving the read pointer. | |
1697 // b) Apply signal based delay correction, if we have detected poor AEC | |
1698 // performance. | |
1699 // 3) TODO(bjornv): Investigate if we need to add this: | |
1700 // If we can't move read pointer due to buffer size limitations we | |
1701 // flush/stuff the buffer. | |
1702 // 4) Process as many partitions as possible. | |
1703 // 5) Update the |system_delay| with respect to a full frame of FRAME_LEN | |
1704 // samples. Even though we will have data left to process (we work with | |
1705 // partitions) we consider updating a whole frame, since that's the | |
1706 // amount of data we input and output in audio_processing. | |
1707 // 6) Update the outputs. | |
1708 | |
1709 // The AEC has two different delay estimation algorithms built in. The | |
1710 // first relies on delay input values from the user and the amount of | |
1711 // shifted buffer elements is controlled by |knownDelay|. This delay will | |
1712 // give a guess on how much we need to shift far-end buffers to align with | |
1713 // the near-end signal. The other delay estimation algorithm uses the | |
1714 // far- and near-end signals to find the offset between them. This one | |
1715 // (called "signal delay") is then used to fine tune the alignment, or | |
1716 // simply compensate for errors in the system based one. | |
1717 // Note that the two algorithms operate independently. Currently, we only | |
1718 // allow one algorithm to be turned on. | |
1719 | |
1720 assert(aec->num_bands == num_bands); | |
1721 | |
1722 for (j = 0; j < num_samples; j += FRAME_LEN) { | |
1723 // TODO(bjornv): Change the near-end buffer handling to be the same as for | |
1724 // far-end, that is, with a near_pre_buf. | |
1725 // Buffer the near-end frame. | |
1726 WebRtc_WriteBuffer(aec->nearFrBuf, &nearend[0][j], FRAME_LEN); | |
1727 // For H band | |
1728 for (i = 1; i < num_bands; ++i) { | |
1729 WebRtc_WriteBuffer(aec->nearFrBufH[i - 1], &nearend[i][j], FRAME_LEN); | |
1730 } | |
1731 | |
1732 // 1) At most we process |aec->mult|+1 partitions in 10 ms. Make sure we | |
1733 // have enough far-end data for that by stuffing the buffer if the | |
1734 // |system_delay| indicates others. | |
1735 if (aec->system_delay < FRAME_LEN) { | |
1736 // We don't have enough data so we rewind 10 ms. | |
1737 WebRtcAec_MoveFarReadPtr(aec, -(aec->mult + 1)); | |
1738 } | |
1739 | |
1740 if (!aec->delay_agnostic_enabled) { | |
1741 // 2 a) Compensate for a possible change in the system delay. | |
1742 | |
1743 // TODO(bjornv): Investigate how we should round the delay difference; | |
1744 // right now we know that incoming |knownDelay| is underestimated when | |
1745 // it's less than |aec->knownDelay|. We therefore, round (-32) in that | |
1746 // direction. In the other direction, we don't have this situation, but | |
1747 // might flush one partition too little. This can cause non-causality, | |
1748 // which should be investigated. Maybe, allow for a non-symmetric | |
1749 // rounding, like -16. | |
1750 int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN; | |
1751 int moved_elements = WebRtc_MoveReadPtr(aec->far_time_buf, move_elements); | |
1752 aec->knownDelay -= moved_elements * PART_LEN; | |
1753 } else { | |
1754 // 2 b) Apply signal based delay correction. | |
1755 int move_elements = SignalBasedDelayCorrection(aec); | |
1756 int moved_elements = WebRtc_MoveReadPtr(aec->far_time_buf, move_elements); | |
1757 int far_near_buffer_diff = | |
1758 WebRtc_available_read(aec->far_time_buf) - | |
1759 WebRtc_available_read(aec->nearFrBuf) / PART_LEN; | |
1760 WebRtc_SoftResetDelayEstimator(aec->delay_estimator, moved_elements); | |
1761 WebRtc_SoftResetDelayEstimatorFarend(aec->delay_estimator_farend, | |
1762 moved_elements); | |
1763 aec->signal_delay_correction += moved_elements; | |
1764 // If we rely on reported system delay values only, a buffer underrun here | |
1765 // can never occur since we've taken care of that in 1) above. Here, we | |
1766 // apply signal based delay correction and can therefore end up with | |
1767 // buffer underruns since the delay estimation can be wrong. We therefore | |
1768 // stuff the buffer with enough elements if needed. | |
1769 if (far_near_buffer_diff < 0) { | |
1770 WebRtcAec_MoveFarReadPtr(aec, far_near_buffer_diff); | |
1771 } | |
1772 } | |
1773 | |
1774 // 4) Process as many blocks as possible. | |
1775 while (WebRtc_available_read(aec->nearFrBuf) >= PART_LEN) { | |
1776 ProcessBlock(aec); | |
1777 } | |
1778 | |
1779 // 5) Update system delay with respect to the entire frame. | |
1780 aec->system_delay -= FRAME_LEN; | |
1781 | |
1782 // 6) Update output frame. | |
1783 // Stuff the out buffer if we have less than a frame to output. | |
1784 // This should only happen for the first frame. | |
1785 out_elements = (int)WebRtc_available_read(aec->outFrBuf); | |
1786 if (out_elements < FRAME_LEN) { | |
1787 WebRtc_MoveReadPtr(aec->outFrBuf, out_elements - FRAME_LEN); | |
1788 for (i = 0; i < num_bands - 1; ++i) { | |
1789 WebRtc_MoveReadPtr(aec->outFrBufH[i], out_elements - FRAME_LEN); | |
1790 } | |
1791 } | |
1792 // Obtain an output frame. | |
1793 WebRtc_ReadBuffer(aec->outFrBuf, NULL, &out[0][j], FRAME_LEN); | |
1794 // For H bands. | |
1795 for (i = 1; i < num_bands; ++i) { | |
1796 WebRtc_ReadBuffer(aec->outFrBufH[i - 1], NULL, &out[i][j], FRAME_LEN); | |
1797 } | |
1798 } | |
1799 } | |
1800 | |
1801 int WebRtcAec_GetDelayMetricsCore(AecCore* self, | |
1802 int* median, | |
1803 int* std, | |
1804 float* fraction_poor_delays) { | |
1805 assert(self != NULL); | |
1806 assert(median != NULL); | |
1807 assert(std != NULL); | |
1808 | |
1809 if (self->delay_logging_enabled == 0) { | |
1810 // Logging disabled. | |
1811 return -1; | |
1812 } | |
1813 | |
1814 if (self->delay_metrics_delivered == 0) { | |
1815 UpdateDelayMetrics(self); | |
1816 self->delay_metrics_delivered = 1; | |
1817 } | |
1818 *median = self->delay_median; | |
1819 *std = self->delay_std; | |
1820 *fraction_poor_delays = self->fraction_poor_delays; | |
1821 | |
1822 return 0; | |
1823 } | |
1824 | |
1825 int WebRtcAec_echo_state(AecCore* self) { | |
1826 return self->echoState; | |
1827 } | |
1828 | |
1829 void WebRtcAec_GetEchoStats(AecCore* self, | |
1830 Stats* erl, | |
1831 Stats* erle, | |
1832 Stats* a_nlp) { | |
1833 assert(erl != NULL); | |
1834 assert(erle != NULL); | |
1835 assert(a_nlp != NULL); | |
1836 *erl = self->erl; | |
1837 *erle = self->erle; | |
1838 *a_nlp = self->aNlp; | |
1839 } | |
1840 | |
1841 void WebRtcAec_SetConfigCore(AecCore* self, | |
1842 int nlp_mode, | |
1843 int metrics_mode, | |
1844 int delay_logging) { | |
1845 assert(nlp_mode >= 0 && nlp_mode < 3); | |
1846 self->nlp_mode = nlp_mode; | |
1847 self->metricsMode = metrics_mode; | |
1848 if (self->metricsMode) { | |
1849 InitMetrics(self); | |
1850 } | |
1851 // Turn on delay logging if it is either set explicitly or if delay agnostic | |
1852 // AEC is enabled (which requires delay estimates). | |
1853 self->delay_logging_enabled = delay_logging || self->delay_agnostic_enabled; | |
1854 if (self->delay_logging_enabled) { | |
1855 memset(self->delay_histogram, 0, sizeof(self->delay_histogram)); | |
1856 } | |
1857 } | |
1858 | |
1859 void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable) { | |
1860 self->delay_agnostic_enabled = enable; | |
1861 } | |
1862 | |
1863 int WebRtcAec_delay_agnostic_enabled(AecCore* self) { | |
1864 return self->delay_agnostic_enabled; | |
1865 } | |
1866 | |
1867 void WebRtcAec_enable_next_generation_aec(AecCore* self, int enable) { | |
1868 self->next_generation_aec_enabled = (enable != 0); | |
1869 } | |
1870 | |
1871 int WebRtcAec_next_generation_aec_enabled(AecCore* self) { | |
1872 assert(self->next_generation_aec_enabled == 0 || | |
1873 self->next_generation_aec_enabled == 1); | |
1874 return self->next_generation_aec_enabled; | |
1875 } | |
1876 | |
1877 | |
1878 void WebRtcAec_enable_extended_filter(AecCore* self, int enable) { | |
1879 self->extended_filter_enabled = enable; | |
1880 self->num_partitions = enable ? kExtendedNumPartitions : kNormalNumPartitions; | |
1881 // Update the delay estimator with filter length. See InitAEC() for details. | |
1882 WebRtc_set_allowed_offset(self->delay_estimator, self->num_partitions / 2); | |
1883 } | |
1884 | |
1885 int WebRtcAec_extended_filter_enabled(AecCore* self) { | |
1886 return self->extended_filter_enabled; | |
1887 } | |
1888 | |
1889 int WebRtcAec_system_delay(AecCore* self) { | |
1890 return self->system_delay; | |
1891 } | |
1892 | |
1893 void WebRtcAec_SetSystemDelay(AecCore* self, int delay) { | |
1894 assert(delay >= 0); | |
1895 self->system_delay = delay; | |
1896 } | |
OLD | NEW |