| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 /* | |
| 12 * The core AEC algorithm, SSE2 version of speed-critical functions. | |
| 13 */ | |
| 14 | |
| 15 #include <emmintrin.h> | |
| 16 #include <math.h> | |
| 17 #include <string.h> // memset | |
| 18 | |
| 19 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | |
| 20 #include "webrtc/modules/audio_processing/aec/aec_common.h" | |
| 21 #include "webrtc/modules/audio_processing/aec/aec_core_internal.h" | |
| 22 #include "webrtc/modules/audio_processing/aec/aec_rdft.h" | |
| 23 | |
| 24 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { | |
| 25 return aRe * bRe - aIm * bIm; | |
| 26 } | |
| 27 | |
| 28 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { | |
| 29 return aRe * bIm + aIm * bRe; | |
| 30 } | |
| 31 | |
| 32 static void FilterFarSSE2(int num_partitions, | |
| 33 int x_fft_buf_block_pos, | |
| 34 float x_fft_buf[2] | |
| 35 [kExtendedNumPartitions * PART_LEN1], | |
| 36 float h_fft_buf[2] | |
| 37 [kExtendedNumPartitions * PART_LEN1], | |
| 38 float y_fft[2][PART_LEN1]) { | |
| 39 int i; | |
| 40 for (i = 0; i < num_partitions; i++) { | |
| 41 int j; | |
| 42 int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; | |
| 43 int pos = i * PART_LEN1; | |
| 44 // Check for wrap | |
| 45 if (i + x_fft_buf_block_pos >= num_partitions) { | |
| 46 xPos -= num_partitions * (PART_LEN1); | |
| 47 } | |
| 48 | |
| 49 // vectorized code (four at once) | |
| 50 for (j = 0; j + 3 < PART_LEN1; j += 4) { | |
| 51 const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]); | |
| 52 const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]); | |
| 53 const __m128 h_fft_buf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]); | |
| 54 const __m128 h_fft_buf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]); | |
| 55 const __m128 y_fft_re = _mm_loadu_ps(&y_fft[0][j]); | |
| 56 const __m128 y_fft_im = _mm_loadu_ps(&y_fft[1][j]); | |
| 57 const __m128 a = _mm_mul_ps(x_fft_buf_re, h_fft_buf_re); | |
| 58 const __m128 b = _mm_mul_ps(x_fft_buf_im, h_fft_buf_im); | |
| 59 const __m128 c = _mm_mul_ps(x_fft_buf_re, h_fft_buf_im); | |
| 60 const __m128 d = _mm_mul_ps(x_fft_buf_im, h_fft_buf_re); | |
| 61 const __m128 e = _mm_sub_ps(a, b); | |
| 62 const __m128 f = _mm_add_ps(c, d); | |
| 63 const __m128 g = _mm_add_ps(y_fft_re, e); | |
| 64 const __m128 h = _mm_add_ps(y_fft_im, f); | |
| 65 _mm_storeu_ps(&y_fft[0][j], g); | |
| 66 _mm_storeu_ps(&y_fft[1][j], h); | |
| 67 } | |
| 68 // scalar code for the remaining items. | |
| 69 for (; j < PART_LEN1; j++) { | |
| 70 y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], | |
| 71 h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); | |
| 72 y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], | |
| 73 h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); | |
| 74 } | |
| 75 } | |
| 76 } | |
| 77 | |
| 78 static void ScaleErrorSignalSSE2(int extended_filter_enabled, | |
| 79 float normal_mu, | |
| 80 float normal_error_threshold, | |
| 81 float x_pow[PART_LEN1], | |
| 82 float ef[2][PART_LEN1]) { | |
| 83 const __m128 k1e_10f = _mm_set1_ps(1e-10f); | |
| 84 const __m128 kMu = extended_filter_enabled ? _mm_set1_ps(kExtendedMu) | |
| 85 : _mm_set1_ps(normal_mu); | |
| 86 const __m128 kThresh = extended_filter_enabled | |
| 87 ? _mm_set1_ps(kExtendedErrorThreshold) | |
| 88 : _mm_set1_ps(normal_error_threshold); | |
| 89 | |
| 90 int i; | |
| 91 // vectorized code (four at once) | |
| 92 for (i = 0; i + 3 < PART_LEN1; i += 4) { | |
| 93 const __m128 x_pow_local = _mm_loadu_ps(&x_pow[i]); | |
| 94 const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]); | |
| 95 const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]); | |
| 96 | |
| 97 const __m128 xPowPlus = _mm_add_ps(x_pow_local, k1e_10f); | |
| 98 __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus); | |
| 99 __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus); | |
| 100 const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re); | |
| 101 const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im); | |
| 102 const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2); | |
| 103 const __m128 absEf = _mm_sqrt_ps(ef_sum2); | |
| 104 const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh); | |
| 105 __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f); | |
| 106 const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus); | |
| 107 __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv); | |
| 108 __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv); | |
| 109 ef_re_if = _mm_and_ps(bigger, ef_re_if); | |
| 110 ef_im_if = _mm_and_ps(bigger, ef_im_if); | |
| 111 ef_re = _mm_andnot_ps(bigger, ef_re); | |
| 112 ef_im = _mm_andnot_ps(bigger, ef_im); | |
| 113 ef_re = _mm_or_ps(ef_re, ef_re_if); | |
| 114 ef_im = _mm_or_ps(ef_im, ef_im_if); | |
| 115 ef_re = _mm_mul_ps(ef_re, kMu); | |
| 116 ef_im = _mm_mul_ps(ef_im, kMu); | |
| 117 | |
| 118 _mm_storeu_ps(&ef[0][i], ef_re); | |
| 119 _mm_storeu_ps(&ef[1][i], ef_im); | |
| 120 } | |
| 121 // scalar code for the remaining items. | |
| 122 { | |
| 123 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; | |
| 124 const float error_threshold = extended_filter_enabled | |
| 125 ? kExtendedErrorThreshold | |
| 126 : normal_error_threshold; | |
| 127 for (; i < (PART_LEN1); i++) { | |
| 128 float abs_ef; | |
| 129 ef[0][i] /= (x_pow[i] + 1e-10f); | |
| 130 ef[1][i] /= (x_pow[i] + 1e-10f); | |
| 131 abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]); | |
| 132 | |
| 133 if (abs_ef > error_threshold) { | |
| 134 abs_ef = error_threshold / (abs_ef + 1e-10f); | |
| 135 ef[0][i] *= abs_ef; | |
| 136 ef[1][i] *= abs_ef; | |
| 137 } | |
| 138 | |
| 139 // Stepsize factor | |
| 140 ef[0][i] *= mu; | |
| 141 ef[1][i] *= mu; | |
| 142 } | |
| 143 } | |
| 144 } | |
| 145 | |
| 146 static void FilterAdaptationSSE2( | |
| 147 int num_partitions, | |
| 148 int x_fft_buf_block_pos, | |
| 149 float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], | |
| 150 float e_fft[2][PART_LEN1], | |
| 151 float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { | |
| 152 float fft[PART_LEN2]; | |
| 153 int i, j; | |
| 154 for (i = 0; i < num_partitions; i++) { | |
| 155 int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1); | |
| 156 int pos = i * PART_LEN1; | |
| 157 // Check for wrap | |
| 158 if (i + x_fft_buf_block_pos >= num_partitions) { | |
| 159 xPos -= num_partitions * PART_LEN1; | |
| 160 } | |
| 161 | |
| 162 // Process the whole array... | |
| 163 for (j = 0; j < PART_LEN; j += 4) { | |
| 164 // Load x_fft_buf and e_fft. | |
| 165 const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]); | |
| 166 const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]); | |
| 167 const __m128 e_fft_re = _mm_loadu_ps(&e_fft[0][j]); | |
| 168 const __m128 e_fft_im = _mm_loadu_ps(&e_fft[1][j]); | |
| 169 // Calculate the product of conjugate(x_fft_buf) by e_fft. | |
| 170 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm | |
| 171 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe | |
| 172 const __m128 a = _mm_mul_ps(x_fft_buf_re, e_fft_re); | |
| 173 const __m128 b = _mm_mul_ps(x_fft_buf_im, e_fft_im); | |
| 174 const __m128 c = _mm_mul_ps(x_fft_buf_re, e_fft_im); | |
| 175 const __m128 d = _mm_mul_ps(x_fft_buf_im, e_fft_re); | |
| 176 const __m128 e = _mm_add_ps(a, b); | |
| 177 const __m128 f = _mm_sub_ps(c, d); | |
| 178 // Interleave real and imaginary parts. | |
| 179 const __m128 g = _mm_unpacklo_ps(e, f); | |
| 180 const __m128 h = _mm_unpackhi_ps(e, f); | |
| 181 // Store | |
| 182 _mm_storeu_ps(&fft[2 * j + 0], g); | |
| 183 _mm_storeu_ps(&fft[2 * j + 4], h); | |
| 184 } | |
| 185 // ... and fixup the first imaginary entry. | |
| 186 fft[1] = | |
| 187 MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN], | |
| 188 e_fft[0][PART_LEN], e_fft[1][PART_LEN]); | |
| 189 | |
| 190 aec_rdft_inverse_128(fft); | |
| 191 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); | |
| 192 | |
| 193 // fft scaling | |
| 194 { | |
| 195 float scale = 2.0f / PART_LEN2; | |
| 196 const __m128 scale_ps = _mm_load_ps1(&scale); | |
| 197 for (j = 0; j < PART_LEN; j += 4) { | |
| 198 const __m128 fft_ps = _mm_loadu_ps(&fft[j]); | |
| 199 const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps); | |
| 200 _mm_storeu_ps(&fft[j], fft_scale); | |
| 201 } | |
| 202 } | |
| 203 aec_rdft_forward_128(fft); | |
| 204 | |
| 205 { | |
| 206 float wt1 = h_fft_buf[1][pos]; | |
| 207 h_fft_buf[0][pos + PART_LEN] += fft[1]; | |
| 208 for (j = 0; j < PART_LEN; j += 4) { | |
| 209 __m128 wtBuf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]); | |
| 210 __m128 wtBuf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]); | |
| 211 const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]); | |
| 212 const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]); | |
| 213 const __m128 fft_re = | |
| 214 _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2, 0)); | |
| 215 const __m128 fft_im = | |
| 216 _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3, 1)); | |
| 217 wtBuf_re = _mm_add_ps(wtBuf_re, fft_re); | |
| 218 wtBuf_im = _mm_add_ps(wtBuf_im, fft_im); | |
| 219 _mm_storeu_ps(&h_fft_buf[0][pos + j], wtBuf_re); | |
| 220 _mm_storeu_ps(&h_fft_buf[1][pos + j], wtBuf_im); | |
| 221 } | |
| 222 h_fft_buf[1][pos] = wt1; | |
| 223 } | |
| 224 } | |
| 225 } | |
| 226 | |
| 227 static __m128 mm_pow_ps(__m128 a, __m128 b) { | |
| 228 // a^b = exp2(b * log2(a)) | |
| 229 // exp2(x) and log2(x) are calculated using polynomial approximations. | |
| 230 __m128 log2_a, b_log2_a, a_exp_b; | |
| 231 | |
| 232 // Calculate log2(x), x = a. | |
| 233 { | |
| 234 // To calculate log2(x), we decompose x like this: | |
| 235 // x = y * 2^n | |
| 236 // n is an integer | |
| 237 // y is in the [1.0, 2.0) range | |
| 238 // | |
| 239 // log2(x) = log2(y) + n | |
| 240 // n can be evaluated by playing with float representation. | |
| 241 // log2(y) in a small range can be approximated, this code uses an order | |
| 242 // five polynomial approximation. The coefficients have been | |
| 243 // estimated with the Remez algorithm and the resulting | |
| 244 // polynomial has a maximum relative error of 0.00086%. | |
| 245 | |
| 246 // Compute n. | |
| 247 // This is done by masking the exponent, shifting it into the top bit of | |
| 248 // the mantissa, putting eight into the biased exponent (to shift/ | |
| 249 // compensate the fact that the exponent has been shifted in the top/ | |
| 250 // fractional part and finally getting rid of the implicit leading one | |
| 251 // from the mantissa by substracting it out. | |
| 252 static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END = { | |
| 253 0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000}; | |
| 254 static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END = { | |
| 255 0x43800000, 0x43800000, 0x43800000, 0x43800000}; | |
| 256 static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END = { | |
| 257 0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000}; | |
| 258 static const int shift_exponent_into_top_mantissa = 8; | |
| 259 const __m128 two_n = _mm_and_ps(a, *((__m128*)float_exponent_mask)); | |
| 260 const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32( | |
| 261 _mm_castps_si128(two_n), shift_exponent_into_top_mantissa)); | |
| 262 const __m128 n_0 = _mm_or_ps(n_1, *((__m128*)eight_biased_exponent)); | |
| 263 const __m128 n = _mm_sub_ps(n_0, *((__m128*)implicit_leading_one)); | |
| 264 | |
| 265 // Compute y. | |
| 266 static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END = { | |
| 267 0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF}; | |
| 268 static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END = { | |
| 269 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000}; | |
| 270 const __m128 mantissa = _mm_and_ps(a, *((__m128*)mantissa_mask)); | |
| 271 const __m128 y = | |
| 272 _mm_or_ps(mantissa, *((__m128*)zero_biased_exponent_is_one)); | |
| 273 | |
| 274 // Approximate log2(y) ~= (y - 1) * pol5(y). | |
| 275 // pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0 | |
| 276 static const ALIGN16_BEG float ALIGN16_END C5[4] = { | |
| 277 -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f}; | |
| 278 static const ALIGN16_BEG float ALIGN16_END C4[4] = { | |
| 279 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f}; | |
| 280 static const ALIGN16_BEG float ALIGN16_END C3[4] = { | |
| 281 -1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f}; | |
| 282 static const ALIGN16_BEG float ALIGN16_END C2[4] = {2.5988452f, 2.5988452f, | |
| 283 2.5988452f, 2.5988452f}; | |
| 284 static const ALIGN16_BEG float ALIGN16_END C1[4] = { | |
| 285 -3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f}; | |
| 286 static const ALIGN16_BEG float ALIGN16_END C0[4] = {3.1157899f, 3.1157899f, | |
| 287 3.1157899f, 3.1157899f}; | |
| 288 const __m128 pol5_y_0 = _mm_mul_ps(y, *((__m128*)C5)); | |
| 289 const __m128 pol5_y_1 = _mm_add_ps(pol5_y_0, *((__m128*)C4)); | |
| 290 const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y); | |
| 291 const __m128 pol5_y_3 = _mm_add_ps(pol5_y_2, *((__m128*)C3)); | |
| 292 const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y); | |
| 293 const __m128 pol5_y_5 = _mm_add_ps(pol5_y_4, *((__m128*)C2)); | |
| 294 const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y); | |
| 295 const __m128 pol5_y_7 = _mm_add_ps(pol5_y_6, *((__m128*)C1)); | |
| 296 const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y); | |
| 297 const __m128 pol5_y = _mm_add_ps(pol5_y_8, *((__m128*)C0)); | |
| 298 const __m128 y_minus_one = | |
| 299 _mm_sub_ps(y, *((__m128*)zero_biased_exponent_is_one)); | |
| 300 const __m128 log2_y = _mm_mul_ps(y_minus_one, pol5_y); | |
| 301 | |
| 302 // Combine parts. | |
| 303 log2_a = _mm_add_ps(n, log2_y); | |
| 304 } | |
| 305 | |
| 306 // b * log2(a) | |
| 307 b_log2_a = _mm_mul_ps(b, log2_a); | |
| 308 | |
| 309 // Calculate exp2(x), x = b * log2(a). | |
| 310 { | |
| 311 // To calculate 2^x, we decompose x like this: | |
| 312 // x = n + y | |
| 313 // n is an integer, the value of x - 0.5 rounded down, therefore | |
| 314 // y is in the [0.5, 1.5) range | |
| 315 // | |
| 316 // 2^x = 2^n * 2^y | |
| 317 // 2^n can be evaluated by playing with float representation. | |
| 318 // 2^y in a small range can be approximated, this code uses an order two | |
| 319 // polynomial approximation. The coefficients have been estimated | |
| 320 // with the Remez algorithm and the resulting polynomial has a | |
| 321 // maximum relative error of 0.17%. | |
| 322 | |
| 323 // To avoid over/underflow, we reduce the range of input to ]-127, 129]. | |
| 324 static const ALIGN16_BEG float max_input[4] ALIGN16_END = {129.f, 129.f, | |
| 325 129.f, 129.f}; | |
| 326 static const ALIGN16_BEG float min_input[4] ALIGN16_END = { | |
| 327 -126.99999f, -126.99999f, -126.99999f, -126.99999f}; | |
| 328 const __m128 x_min = _mm_min_ps(b_log2_a, *((__m128*)max_input)); | |
| 329 const __m128 x_max = _mm_max_ps(x_min, *((__m128*)min_input)); | |
| 330 // Compute n. | |
| 331 static const ALIGN16_BEG float half[4] ALIGN16_END = {0.5f, 0.5f, 0.5f, | |
| 332 0.5f}; | |
| 333 const __m128 x_minus_half = _mm_sub_ps(x_max, *((__m128*)half)); | |
| 334 const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half); | |
| 335 // Compute 2^n. | |
| 336 static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END = { | |
| 337 127, 127, 127, 127}; | |
| 338 static const int float_exponent_shift = 23; | |
| 339 const __m128i two_n_exponent = | |
| 340 _mm_add_epi32(x_minus_half_floor, *((__m128i*)float_exponent_bias)); | |
| 341 const __m128 two_n = | |
| 342 _mm_castsi128_ps(_mm_slli_epi32(two_n_exponent, float_exponent_shift)); | |
| 343 // Compute y. | |
| 344 const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor)); | |
| 345 // Approximate 2^y ~= C2 * y^2 + C1 * y + C0. | |
| 346 static const ALIGN16_BEG float C2[4] ALIGN16_END = { | |
| 347 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f}; | |
| 348 static const ALIGN16_BEG float C1[4] ALIGN16_END = { | |
| 349 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f}; | |
| 350 static const ALIGN16_BEG float C0[4] ALIGN16_END = {1.0017247f, 1.0017247f, | |
| 351 1.0017247f, 1.0017247f}; | |
| 352 const __m128 exp2_y_0 = _mm_mul_ps(y, *((__m128*)C2)); | |
| 353 const __m128 exp2_y_1 = _mm_add_ps(exp2_y_0, *((__m128*)C1)); | |
| 354 const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y); | |
| 355 const __m128 exp2_y = _mm_add_ps(exp2_y_2, *((__m128*)C0)); | |
| 356 | |
| 357 // Combine parts. | |
| 358 a_exp_b = _mm_mul_ps(exp2_y, two_n); | |
| 359 } | |
| 360 return a_exp_b; | |
| 361 } | |
| 362 | |
| 363 static void OverdriveAndSuppressSSE2(AecCore* aec, | |
| 364 float hNl[PART_LEN1], | |
| 365 const float hNlFb, | |
| 366 float efw[2][PART_LEN1]) { | |
| 367 int i; | |
| 368 const __m128 vec_hNlFb = _mm_set1_ps(hNlFb); | |
| 369 const __m128 vec_one = _mm_set1_ps(1.0f); | |
| 370 const __m128 vec_minus_one = _mm_set1_ps(-1.0f); | |
| 371 const __m128 vec_overDriveSm = _mm_set1_ps(aec->overDriveSm); | |
| 372 // vectorized code (four at once) | |
| 373 for (i = 0; i + 3 < PART_LEN1; i += 4) { | |
| 374 // Weight subbands | |
| 375 __m128 vec_hNl = _mm_loadu_ps(&hNl[i]); | |
| 376 const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]); | |
| 377 const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb); | |
| 378 const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(vec_weightCurve, vec_hNlFb); | |
| 379 const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve); | |
| 380 const __m128 vec_one_weightCurve_hNl = | |
| 381 _mm_mul_ps(vec_one_weightCurve, vec_hNl); | |
| 382 const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl); | |
| 383 const __m128 vec_if1 = _mm_and_ps( | |
| 384 bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl)); | |
| 385 vec_hNl = _mm_or_ps(vec_if0, vec_if1); | |
| 386 | |
| 387 { | |
| 388 const __m128 vec_overDriveCurve = | |
| 389 _mm_loadu_ps(&WebRtcAec_overDriveCurve[i]); | |
| 390 const __m128 vec_overDriveSm_overDriveCurve = | |
| 391 _mm_mul_ps(vec_overDriveSm, vec_overDriveCurve); | |
| 392 vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve); | |
| 393 _mm_storeu_ps(&hNl[i], vec_hNl); | |
| 394 } | |
| 395 | |
| 396 // Suppress error signal | |
| 397 { | |
| 398 __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]); | |
| 399 __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]); | |
| 400 vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl); | |
| 401 vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl); | |
| 402 | |
| 403 // Ooura fft returns incorrect sign on imaginary component. It matters | |
| 404 // here because we are making an additive change with comfort noise. | |
| 405 vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one); | |
| 406 _mm_storeu_ps(&efw[0][i], vec_efw_re); | |
| 407 _mm_storeu_ps(&efw[1][i], vec_efw_im); | |
| 408 } | |
| 409 } | |
| 410 // scalar code for the remaining items. | |
| 411 for (; i < PART_LEN1; i++) { | |
| 412 // Weight subbands | |
| 413 if (hNl[i] > hNlFb) { | |
| 414 hNl[i] = WebRtcAec_weightCurve[i] * hNlFb + | |
| 415 (1 - WebRtcAec_weightCurve[i]) * hNl[i]; | |
| 416 } | |
| 417 hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]); | |
| 418 | |
| 419 // Suppress error signal | |
| 420 efw[0][i] *= hNl[i]; | |
| 421 efw[1][i] *= hNl[i]; | |
| 422 | |
| 423 // Ooura fft returns incorrect sign on imaginary component. It matters | |
| 424 // here because we are making an additive change with comfort noise. | |
| 425 efw[1][i] *= -1; | |
| 426 } | |
| 427 } | |
| 428 | |
| 429 __inline static void _mm_add_ps_4x1(__m128 sum, float* dst) { | |
| 430 // A+B C+D | |
| 431 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(0, 0, 3, 2))); | |
| 432 // A+B+C+D A+B+C+D | |
| 433 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 1, 1, 1))); | |
| 434 _mm_store_ss(dst, sum); | |
| 435 } | |
| 436 | |
| 437 static int PartitionDelaySSE2(const AecCore* aec) { | |
| 438 // Measures the energy in each filter partition and returns the partition with | |
| 439 // highest energy. | |
| 440 // TODO(bjornv): Spread computational cost by computing one partition per | |
| 441 // block? | |
| 442 float wfEnMax = 0; | |
| 443 int i; | |
| 444 int delay = 0; | |
| 445 | |
| 446 for (i = 0; i < aec->num_partitions; i++) { | |
| 447 int j; | |
| 448 int pos = i * PART_LEN1; | |
| 449 float wfEn = 0; | |
| 450 __m128 vec_wfEn = _mm_set1_ps(0.0f); | |
| 451 // vectorized code (four at once) | |
| 452 for (j = 0; j + 3 < PART_LEN1; j += 4) { | |
| 453 const __m128 vec_wfBuf0 = _mm_loadu_ps(&aec->wfBuf[0][pos + j]); | |
| 454 const __m128 vec_wfBuf1 = _mm_loadu_ps(&aec->wfBuf[1][pos + j]); | |
| 455 vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf0, vec_wfBuf0)); | |
| 456 vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf1, vec_wfBuf1)); | |
| 457 } | |
| 458 _mm_add_ps_4x1(vec_wfEn, &wfEn); | |
| 459 | |
| 460 // scalar code for the remaining items. | |
| 461 for (; j < PART_LEN1; j++) { | |
| 462 wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] + | |
| 463 aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j]; | |
| 464 } | |
| 465 | |
| 466 if (wfEn > wfEnMax) { | |
| 467 wfEnMax = wfEn; | |
| 468 delay = i; | |
| 469 } | |
| 470 } | |
| 471 return delay; | |
| 472 } | |
| 473 | |
| 474 // Updates the following smoothed Power Spectral Densities (PSD): | |
| 475 // - sd : near-end | |
| 476 // - se : residual echo | |
| 477 // - sx : far-end | |
| 478 // - sde : cross-PSD of near-end and residual echo | |
| 479 // - sxd : cross-PSD of near-end and far-end | |
| 480 // | |
| 481 // In addition to updating the PSDs, also the filter diverge state is determined | |
| 482 // upon actions are taken. | |
| 483 static void SmoothedPSD(AecCore* aec, | |
| 484 float efw[2][PART_LEN1], | |
| 485 float dfw[2][PART_LEN1], | |
| 486 float xfw[2][PART_LEN1], | |
| 487 int* extreme_filter_divergence) { | |
| 488 // Power estimate smoothing coefficients. | |
| 489 const float* ptrGCoh = | |
| 490 aec->extended_filter_enabled | |
| 491 ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1] | |
| 492 : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1]; | |
| 493 int i; | |
| 494 float sdSum = 0, seSum = 0; | |
| 495 const __m128 vec_15 = _mm_set1_ps(WebRtcAec_kMinFarendPSD); | |
| 496 const __m128 vec_GCoh0 = _mm_set1_ps(ptrGCoh[0]); | |
| 497 const __m128 vec_GCoh1 = _mm_set1_ps(ptrGCoh[1]); | |
| 498 __m128 vec_sdSum = _mm_set1_ps(0.0f); | |
| 499 __m128 vec_seSum = _mm_set1_ps(0.0f); | |
| 500 | |
| 501 for (i = 0; i + 3 < PART_LEN1; i += 4) { | |
| 502 const __m128 vec_dfw0 = _mm_loadu_ps(&dfw[0][i]); | |
| 503 const __m128 vec_dfw1 = _mm_loadu_ps(&dfw[1][i]); | |
| 504 const __m128 vec_efw0 = _mm_loadu_ps(&efw[0][i]); | |
| 505 const __m128 vec_efw1 = _mm_loadu_ps(&efw[1][i]); | |
| 506 const __m128 vec_xfw0 = _mm_loadu_ps(&xfw[0][i]); | |
| 507 const __m128 vec_xfw1 = _mm_loadu_ps(&xfw[1][i]); | |
| 508 __m128 vec_sd = _mm_mul_ps(_mm_loadu_ps(&aec->sd[i]), vec_GCoh0); | |
| 509 __m128 vec_se = _mm_mul_ps(_mm_loadu_ps(&aec->se[i]), vec_GCoh0); | |
| 510 __m128 vec_sx = _mm_mul_ps(_mm_loadu_ps(&aec->sx[i]), vec_GCoh0); | |
| 511 __m128 vec_dfw_sumsq = _mm_mul_ps(vec_dfw0, vec_dfw0); | |
| 512 __m128 vec_efw_sumsq = _mm_mul_ps(vec_efw0, vec_efw0); | |
| 513 __m128 vec_xfw_sumsq = _mm_mul_ps(vec_xfw0, vec_xfw0); | |
| 514 vec_dfw_sumsq = _mm_add_ps(vec_dfw_sumsq, _mm_mul_ps(vec_dfw1, vec_dfw1)); | |
| 515 vec_efw_sumsq = _mm_add_ps(vec_efw_sumsq, _mm_mul_ps(vec_efw1, vec_efw1)); | |
| 516 vec_xfw_sumsq = _mm_add_ps(vec_xfw_sumsq, _mm_mul_ps(vec_xfw1, vec_xfw1)); | |
| 517 vec_xfw_sumsq = _mm_max_ps(vec_xfw_sumsq, vec_15); | |
| 518 vec_sd = _mm_add_ps(vec_sd, _mm_mul_ps(vec_dfw_sumsq, vec_GCoh1)); | |
| 519 vec_se = _mm_add_ps(vec_se, _mm_mul_ps(vec_efw_sumsq, vec_GCoh1)); | |
| 520 vec_sx = _mm_add_ps(vec_sx, _mm_mul_ps(vec_xfw_sumsq, vec_GCoh1)); | |
| 521 _mm_storeu_ps(&aec->sd[i], vec_sd); | |
| 522 _mm_storeu_ps(&aec->se[i], vec_se); | |
| 523 _mm_storeu_ps(&aec->sx[i], vec_sx); | |
| 524 | |
| 525 { | |
| 526 const __m128 vec_3210 = _mm_loadu_ps(&aec->sde[i][0]); | |
| 527 const __m128 vec_7654 = _mm_loadu_ps(&aec->sde[i + 2][0]); | |
| 528 __m128 vec_a = | |
| 529 _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(2, 0, 2, 0)); | |
| 530 __m128 vec_b = | |
| 531 _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(3, 1, 3, 1)); | |
| 532 __m128 vec_dfwefw0011 = _mm_mul_ps(vec_dfw0, vec_efw0); | |
| 533 __m128 vec_dfwefw0110 = _mm_mul_ps(vec_dfw0, vec_efw1); | |
| 534 vec_a = _mm_mul_ps(vec_a, vec_GCoh0); | |
| 535 vec_b = _mm_mul_ps(vec_b, vec_GCoh0); | |
| 536 vec_dfwefw0011 = | |
| 537 _mm_add_ps(vec_dfwefw0011, _mm_mul_ps(vec_dfw1, vec_efw1)); | |
| 538 vec_dfwefw0110 = | |
| 539 _mm_sub_ps(vec_dfwefw0110, _mm_mul_ps(vec_dfw1, vec_efw0)); | |
| 540 vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwefw0011, vec_GCoh1)); | |
| 541 vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwefw0110, vec_GCoh1)); | |
| 542 _mm_storeu_ps(&aec->sde[i][0], _mm_unpacklo_ps(vec_a, vec_b)); | |
| 543 _mm_storeu_ps(&aec->sde[i + 2][0], _mm_unpackhi_ps(vec_a, vec_b)); | |
| 544 } | |
| 545 | |
| 546 { | |
| 547 const __m128 vec_3210 = _mm_loadu_ps(&aec->sxd[i][0]); | |
| 548 const __m128 vec_7654 = _mm_loadu_ps(&aec->sxd[i + 2][0]); | |
| 549 __m128 vec_a = | |
| 550 _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(2, 0, 2, 0)); | |
| 551 __m128 vec_b = | |
| 552 _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(3, 1, 3, 1)); | |
| 553 __m128 vec_dfwxfw0011 = _mm_mul_ps(vec_dfw0, vec_xfw0); | |
| 554 __m128 vec_dfwxfw0110 = _mm_mul_ps(vec_dfw0, vec_xfw1); | |
| 555 vec_a = _mm_mul_ps(vec_a, vec_GCoh0); | |
| 556 vec_b = _mm_mul_ps(vec_b, vec_GCoh0); | |
| 557 vec_dfwxfw0011 = | |
| 558 _mm_add_ps(vec_dfwxfw0011, _mm_mul_ps(vec_dfw1, vec_xfw1)); | |
| 559 vec_dfwxfw0110 = | |
| 560 _mm_sub_ps(vec_dfwxfw0110, _mm_mul_ps(vec_dfw1, vec_xfw0)); | |
| 561 vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwxfw0011, vec_GCoh1)); | |
| 562 vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwxfw0110, vec_GCoh1)); | |
| 563 _mm_storeu_ps(&aec->sxd[i][0], _mm_unpacklo_ps(vec_a, vec_b)); | |
| 564 _mm_storeu_ps(&aec->sxd[i + 2][0], _mm_unpackhi_ps(vec_a, vec_b)); | |
| 565 } | |
| 566 | |
| 567 vec_sdSum = _mm_add_ps(vec_sdSum, vec_sd); | |
| 568 vec_seSum = _mm_add_ps(vec_seSum, vec_se); | |
| 569 } | |
| 570 | |
| 571 _mm_add_ps_4x1(vec_sdSum, &sdSum); | |
| 572 _mm_add_ps_4x1(vec_seSum, &seSum); | |
| 573 | |
| 574 for (; i < PART_LEN1; i++) { | |
| 575 aec->sd[i] = ptrGCoh[0] * aec->sd[i] + | |
| 576 ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]); | |
| 577 aec->se[i] = ptrGCoh[0] * aec->se[i] + | |
| 578 ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]); | |
| 579 // We threshold here to protect against the ill-effects of a zero farend. | |
| 580 // The threshold is not arbitrarily chosen, but balances protection and | |
| 581 // adverse interaction with the algorithm's tuning. | |
| 582 // TODO(bjornv): investigate further why this is so sensitive. | |
| 583 aec->sx[i] = ptrGCoh[0] * aec->sx[i] + | |
| 584 ptrGCoh[1] * WEBRTC_SPL_MAX( | |
| 585 xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], | |
| 586 WebRtcAec_kMinFarendPSD); | |
| 587 | |
| 588 aec->sde[i][0] = | |
| 589 ptrGCoh[0] * aec->sde[i][0] + | |
| 590 ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]); | |
| 591 aec->sde[i][1] = | |
| 592 ptrGCoh[0] * aec->sde[i][1] + | |
| 593 ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]); | |
| 594 | |
| 595 aec->sxd[i][0] = | |
| 596 ptrGCoh[0] * aec->sxd[i][0] + | |
| 597 ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]); | |
| 598 aec->sxd[i][1] = | |
| 599 ptrGCoh[0] * aec->sxd[i][1] + | |
| 600 ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]); | |
| 601 | |
| 602 sdSum += aec->sd[i]; | |
| 603 seSum += aec->se[i]; | |
| 604 } | |
| 605 | |
| 606 // Divergent filter safeguard update. | |
| 607 aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum; | |
| 608 | |
| 609 // Signal extreme filter divergence if the error is significantly larger | |
| 610 // than the nearend (13 dB). | |
| 611 *extreme_filter_divergence = (seSum > (19.95f * sdSum)); | |
| 612 } | |
| 613 | |
| 614 // Window time domain data to be used by the fft. | |
| 615 static void WindowDataSSE2(float* x_windowed, const float* x) { | |
| 616 int i; | |
| 617 for (i = 0; i < PART_LEN; i += 4) { | |
| 618 const __m128 vec_Buf1 = _mm_loadu_ps(&x[i]); | |
| 619 const __m128 vec_Buf2 = _mm_loadu_ps(&x[PART_LEN + i]); | |
| 620 const __m128 vec_sqrtHanning = _mm_load_ps(&WebRtcAec_sqrtHanning[i]); | |
| 621 // A B C D | |
| 622 __m128 vec_sqrtHanning_rev = | |
| 623 _mm_loadu_ps(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]); | |
| 624 // D C B A | |
| 625 vec_sqrtHanning_rev = _mm_shuffle_ps( | |
| 626 vec_sqrtHanning_rev, vec_sqrtHanning_rev, _MM_SHUFFLE(0, 1, 2, 3)); | |
| 627 _mm_storeu_ps(&x_windowed[i], _mm_mul_ps(vec_Buf1, vec_sqrtHanning)); | |
| 628 _mm_storeu_ps(&x_windowed[PART_LEN + i], | |
| 629 _mm_mul_ps(vec_Buf2, vec_sqrtHanning_rev)); | |
| 630 } | |
| 631 } | |
| 632 | |
| 633 // Puts fft output data into a complex valued array. | |
| 634 static void StoreAsComplexSSE2(const float* data, | |
| 635 float data_complex[2][PART_LEN1]) { | |
| 636 int i; | |
| 637 for (i = 0; i < PART_LEN; i += 4) { | |
| 638 const __m128 vec_fft0 = _mm_loadu_ps(&data[2 * i]); | |
| 639 const __m128 vec_fft4 = _mm_loadu_ps(&data[2 * i + 4]); | |
| 640 const __m128 vec_a = | |
| 641 _mm_shuffle_ps(vec_fft0, vec_fft4, _MM_SHUFFLE(2, 0, 2, 0)); | |
| 642 const __m128 vec_b = | |
| 643 _mm_shuffle_ps(vec_fft0, vec_fft4, _MM_SHUFFLE(3, 1, 3, 1)); | |
| 644 _mm_storeu_ps(&data_complex[0][i], vec_a); | |
| 645 _mm_storeu_ps(&data_complex[1][i], vec_b); | |
| 646 } | |
| 647 // fix beginning/end values | |
| 648 data_complex[1][0] = 0; | |
| 649 data_complex[1][PART_LEN] = 0; | |
| 650 data_complex[0][0] = data[0]; | |
| 651 data_complex[0][PART_LEN] = data[1]; | |
| 652 } | |
| 653 | |
| 654 static void SubbandCoherenceSSE2(AecCore* aec, | |
| 655 float efw[2][PART_LEN1], | |
| 656 float dfw[2][PART_LEN1], | |
| 657 float xfw[2][PART_LEN1], | |
| 658 float* fft, | |
| 659 float* cohde, | |
| 660 float* cohxd, | |
| 661 int* extreme_filter_divergence) { | |
| 662 int i; | |
| 663 | |
| 664 SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence); | |
| 665 | |
| 666 { | |
| 667 const __m128 vec_1eminus10 = _mm_set1_ps(1e-10f); | |
| 668 | |
| 669 // Subband coherence | |
| 670 for (i = 0; i + 3 < PART_LEN1; i += 4) { | |
| 671 const __m128 vec_sd = _mm_loadu_ps(&aec->sd[i]); | |
| 672 const __m128 vec_se = _mm_loadu_ps(&aec->se[i]); | |
| 673 const __m128 vec_sx = _mm_loadu_ps(&aec->sx[i]); | |
| 674 const __m128 vec_sdse = | |
| 675 _mm_add_ps(vec_1eminus10, _mm_mul_ps(vec_sd, vec_se)); | |
| 676 const __m128 vec_sdsx = | |
| 677 _mm_add_ps(vec_1eminus10, _mm_mul_ps(vec_sd, vec_sx)); | |
| 678 const __m128 vec_sde_3210 = _mm_loadu_ps(&aec->sde[i][0]); | |
| 679 const __m128 vec_sde_7654 = _mm_loadu_ps(&aec->sde[i + 2][0]); | |
| 680 const __m128 vec_sxd_3210 = _mm_loadu_ps(&aec->sxd[i][0]); | |
| 681 const __m128 vec_sxd_7654 = _mm_loadu_ps(&aec->sxd[i + 2][0]); | |
| 682 const __m128 vec_sde_0 = | |
| 683 _mm_shuffle_ps(vec_sde_3210, vec_sde_7654, _MM_SHUFFLE(2, 0, 2, 0)); | |
| 684 const __m128 vec_sde_1 = | |
| 685 _mm_shuffle_ps(vec_sde_3210, vec_sde_7654, _MM_SHUFFLE(3, 1, 3, 1)); | |
| 686 const __m128 vec_sxd_0 = | |
| 687 _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654, _MM_SHUFFLE(2, 0, 2, 0)); | |
| 688 const __m128 vec_sxd_1 = | |
| 689 _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654, _MM_SHUFFLE(3, 1, 3, 1)); | |
| 690 __m128 vec_cohde = _mm_mul_ps(vec_sde_0, vec_sde_0); | |
| 691 __m128 vec_cohxd = _mm_mul_ps(vec_sxd_0, vec_sxd_0); | |
| 692 vec_cohde = _mm_add_ps(vec_cohde, _mm_mul_ps(vec_sde_1, vec_sde_1)); | |
| 693 vec_cohde = _mm_div_ps(vec_cohde, vec_sdse); | |
| 694 vec_cohxd = _mm_add_ps(vec_cohxd, _mm_mul_ps(vec_sxd_1, vec_sxd_1)); | |
| 695 vec_cohxd = _mm_div_ps(vec_cohxd, vec_sdsx); | |
| 696 _mm_storeu_ps(&cohde[i], vec_cohde); | |
| 697 _mm_storeu_ps(&cohxd[i], vec_cohxd); | |
| 698 } | |
| 699 | |
| 700 // scalar code for the remaining items. | |
| 701 for (; i < PART_LEN1; i++) { | |
| 702 cohde[i] = | |
| 703 (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) / | |
| 704 (aec->sd[i] * aec->se[i] + 1e-10f); | |
| 705 cohxd[i] = | |
| 706 (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) / | |
| 707 (aec->sx[i] * aec->sd[i] + 1e-10f); | |
| 708 } | |
| 709 } | |
| 710 } | |
| 711 | |
| 712 void WebRtcAec_InitAec_SSE2(void) { | |
| 713 WebRtcAec_FilterFar = FilterFarSSE2; | |
| 714 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2; | |
| 715 WebRtcAec_FilterAdaptation = FilterAdaptationSSE2; | |
| 716 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2; | |
| 717 WebRtcAec_SubbandCoherence = SubbandCoherenceSSE2; | |
| 718 WebRtcAec_StoreAsComplex = StoreAsComplexSSE2; | |
| 719 WebRtcAec_PartitionDelay = PartitionDelaySSE2; | |
| 720 WebRtcAec_WindowData = WindowDataSSE2; | |
| 721 } | |
| OLD | NEW |