| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 27 enum { kFloatExponentShift = 23 }; | 27 enum { kFloatExponentShift = 23 }; |
| 28 | 28 |
| 29 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { | 29 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { |
| 30 return aRe * bRe - aIm * bIm; | 30 return aRe * bRe - aIm * bIm; |
| 31 } | 31 } |
| 32 | 32 |
| 33 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { | 33 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { |
| 34 return aRe * bIm + aIm * bRe; | 34 return aRe * bIm + aIm * bRe; |
| 35 } | 35 } |
| 36 | 36 |
| 37 static void FilterFarNEON(int num_partitions, | 37 static void FilterFarNEON( |
| 38 int xfBufBlockPos, | 38 int num_partitions, |
| 39 float xfBuf[2][kExtendedNumPartitions * PART_LEN1], | 39 int x_fft_buf_block_pos, |
| 40 float wfBuf[2][kExtendedNumPartitions * PART_LEN1], | 40 const float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], |
| 41 float yf[2][PART_LEN1]) { | 41 const float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1], |
| 42 float y_fft[2][PART_LEN1]) { |
| 42 int i; | 43 int i; |
| 43 const int num_partitions_local = num_partitions; | 44 const int num_partitions_local = num_partitions; |
| 44 for (i = 0; i < num_partitions_local; i++) { | 45 for (i = 0; i < num_partitions_local; i++) { |
| 45 int j; | 46 int j; |
| 46 int xPos = (i + xfBufBlockPos) * PART_LEN1; | 47 int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; |
| 47 int pos = i * PART_LEN1; | 48 int pos = i * PART_LEN1; |
| 48 // Check for wrap | 49 // Check for wrap |
| 49 if (i + xfBufBlockPos >= num_partitions_local) { | 50 if (i + x_fft_buf_block_pos >= num_partitions_local) { |
| 50 xPos -= num_partitions_local * PART_LEN1; | 51 xPos -= num_partitions_local * PART_LEN1; |
| 51 } | 52 } |
| 52 | 53 |
| 53 // vectorized code (four at once) | 54 // vectorized code (four at once) |
| 54 for (j = 0; j + 3 < PART_LEN1; j += 4) { | 55 for (j = 0; j + 3 < PART_LEN1; j += 4) { |
| 55 const float32x4_t xfBuf_re = vld1q_f32(&xfBuf[0][xPos + j]); | 56 const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]); |
| 56 const float32x4_t xfBuf_im = vld1q_f32(&xfBuf[1][xPos + j]); | 57 const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]); |
| 57 const float32x4_t wfBuf_re = vld1q_f32(&wfBuf[0][pos + j]); | 58 const float32x4_t h_fft_buf_re = vld1q_f32(&h_fft_buf[0][pos + j]); |
| 58 const float32x4_t wfBuf_im = vld1q_f32(&wfBuf[1][pos + j]); | 59 const float32x4_t h_fft_buf_im = vld1q_f32(&h_fft_buf[1][pos + j]); |
| 59 const float32x4_t yf_re = vld1q_f32(&yf[0][j]); | 60 const float32x4_t y_fft_re = vld1q_f32(&y_fft[0][j]); |
| 60 const float32x4_t yf_im = vld1q_f32(&yf[1][j]); | 61 const float32x4_t y_fft_im = vld1q_f32(&y_fft[1][j]); |
| 61 const float32x4_t a = vmulq_f32(xfBuf_re, wfBuf_re); | 62 const float32x4_t a = vmulq_f32(x_fft_buf_re, h_fft_buf_re); |
| 62 const float32x4_t e = vmlsq_f32(a, xfBuf_im, wfBuf_im); | 63 const float32x4_t e = vmlsq_f32(a, x_fft_buf_im, h_fft_buf_im); |
| 63 const float32x4_t c = vmulq_f32(xfBuf_re, wfBuf_im); | 64 const float32x4_t c = vmulq_f32(x_fft_buf_re, h_fft_buf_im); |
| 64 const float32x4_t f = vmlaq_f32(c, xfBuf_im, wfBuf_re); | 65 const float32x4_t f = vmlaq_f32(c, x_fft_buf_im, h_fft_buf_re); |
| 65 const float32x4_t g = vaddq_f32(yf_re, e); | 66 const float32x4_t g = vaddq_f32(y_fft_re, e); |
| 66 const float32x4_t h = vaddq_f32(yf_im, f); | 67 const float32x4_t h = vaddq_f32(y_fft_im, f); |
| 67 vst1q_f32(&yf[0][j], g); | 68 vst1q_f32(&y_fft[0][j], g); |
| 68 vst1q_f32(&yf[1][j], h); | 69 vst1q_f32(&y_fft[1][j], h); |
| 69 } | 70 } |
| 70 // scalar code for the remaining items. | 71 // scalar code for the remaining items. |
| 71 for (; j < PART_LEN1; j++) { | 72 for (; j < PART_LEN1; j++) { |
| 72 yf[0][j] += MulRe(xfBuf[0][xPos + j], | 73 y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], |
| 73 xfBuf[1][xPos + j], | 74 x_fft_buf[1][xPos + j], |
| 74 wfBuf[0][pos + j], | 75 h_fft_buf[0][pos + j], |
| 75 wfBuf[1][pos + j]); | 76 h_fft_buf[1][pos + j]); |
| 76 yf[1][j] += MulIm(xfBuf[0][xPos + j], | 77 y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], |
| 77 xfBuf[1][xPos + j], | 78 x_fft_buf[1][xPos + j], |
| 78 wfBuf[0][pos + j], | 79 h_fft_buf[0][pos + j], |
| 79 wfBuf[1][pos + j]); | 80 h_fft_buf[1][pos + j]); |
| 80 } | 81 } |
| 81 } | 82 } |
| 82 } | 83 } |
| 83 | 84 |
| 84 // ARM64's arm_neon.h has already defined vdivq_f32 vsqrtq_f32. | 85 // ARM64's arm_neon.h has already defined vdivq_f32 vsqrtq_f32. |
| 85 #if !defined (WEBRTC_ARCH_ARM64) | 86 #if !defined (WEBRTC_ARCH_ARM64) |
| 86 static float32x4_t vdivq_f32(float32x4_t a, float32x4_t b) { | 87 static float32x4_t vdivq_f32(float32x4_t a, float32x4_t b) { |
| 87 int i; | 88 int i; |
| 88 float32x4_t x = vrecpeq_f32(b); | 89 float32x4_t x = vrecpeq_f32(b); |
| 89 // from arm documentation | 90 // from arm documentation |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 122 x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); | 123 x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); |
| 123 } | 124 } |
| 124 // sqrt(s) = s * 1/sqrt(s) | 125 // sqrt(s) = s * 1/sqrt(s) |
| 125 return vmulq_f32(s, x);; | 126 return vmulq_f32(s, x);; |
| 126 } | 127 } |
| 127 #endif // WEBRTC_ARCH_ARM64 | 128 #endif // WEBRTC_ARCH_ARM64 |
| 128 | 129 |
| 129 static void ScaleErrorSignalNEON(int extended_filter_enabled, | 130 static void ScaleErrorSignalNEON(int extended_filter_enabled, |
| 130 float normal_mu, | 131 float normal_mu, |
| 131 float normal_error_threshold, | 132 float normal_error_threshold, |
| 132 float *xPow, | 133 float xPow[PART_LEN1], |
| 133 float ef[2][PART_LEN1]) { | 134 float ef[2][PART_LEN1]) { |
| 134 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; | 135 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; |
| 135 const float error_threshold = extended_filter_enabled ? | 136 const float error_threshold = extended_filter_enabled ? |
| 136 kExtendedErrorThreshold : normal_error_threshold; | 137 kExtendedErrorThreshold : normal_error_threshold; |
| 137 const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); | 138 const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); |
| 138 const float32x4_t kMu = vmovq_n_f32(mu); | 139 const float32x4_t kMu = vmovq_n_f32(mu); |
| 139 const float32x4_t kThresh = vmovq_n_f32(error_threshold); | 140 const float32x4_t kThresh = vmovq_n_f32(error_threshold); |
| 140 int i; | 141 int i; |
| 141 // vectorized code (four at once) | 142 // vectorized code (four at once) |
| 142 for (i = 0; i + 3 < PART_LEN1; i += 4) { | 143 for (i = 0; i + 3 < PART_LEN1; i += 4) { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 179 ef[0][i] *= abs_ef; | 180 ef[0][i] *= abs_ef; |
| 180 ef[1][i] *= abs_ef; | 181 ef[1][i] *= abs_ef; |
| 181 } | 182 } |
| 182 | 183 |
| 183 // Stepsize factor | 184 // Stepsize factor |
| 184 ef[0][i] *= mu; | 185 ef[0][i] *= mu; |
| 185 ef[1][i] *= mu; | 186 ef[1][i] *= mu; |
| 186 } | 187 } |
| 187 } | 188 } |
| 188 | 189 |
| 189 static void FilterAdaptationNEON(AecCore* aec, | 190 static void FilterAdaptationNEON( |
| 190 float* fft, | 191 int num_partitions, |
| 191 float ef[2][PART_LEN1]) { | 192 int x_fft_buf_block_pos, |
| 193 const float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], |
| 194 const float e_fft[2][PART_LEN1], |
| 195 float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { |
| 196 float fft[PART_LEN2]; |
| 192 int i; | 197 int i; |
| 193 const int num_partitions = aec->num_partitions; | |
| 194 for (i = 0; i < num_partitions; i++) { | 198 for (i = 0; i < num_partitions; i++) { |
| 195 int xPos = (i + aec->xfBufBlockPos) * PART_LEN1; | 199 int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; |
| 196 int pos = i * PART_LEN1; | 200 int pos = i * PART_LEN1; |
| 197 int j; | 201 int j; |
| 198 // Check for wrap | 202 // Check for wrap |
| 199 if (i + aec->xfBufBlockPos >= num_partitions) { | 203 if (i + x_fft_buf_block_pos >= num_partitions) { |
| 200 xPos -= num_partitions * PART_LEN1; | 204 xPos -= num_partitions * PART_LEN1; |
| 201 } | 205 } |
| 202 | 206 |
| 203 // Process the whole array... | 207 // Process the whole array... |
| 204 for (j = 0; j < PART_LEN; j += 4) { | 208 for (j = 0; j < PART_LEN; j += 4) { |
| 205 // Load xfBuf and ef. | 209 // Load x_fft_buf and e_fft. |
| 206 const float32x4_t xfBuf_re = vld1q_f32(&aec->xfBuf[0][xPos + j]); | 210 const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]); |
| 207 const float32x4_t xfBuf_im = vld1q_f32(&aec->xfBuf[1][xPos + j]); | 211 const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]); |
| 208 const float32x4_t ef_re = vld1q_f32(&ef[0][j]); | 212 const float32x4_t e_fft_re = vld1q_f32(&e_fft[0][j]); |
| 209 const float32x4_t ef_im = vld1q_f32(&ef[1][j]); | 213 const float32x4_t e_fft_im = vld1q_f32(&e_fft[1][j]); |
| 210 // Calculate the product of conjugate(xfBuf) by ef. | 214 // Calculate the product of conjugate(x_fft_buf) by e_fft. |
| 211 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm | 215 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm |
| 212 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe | 216 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe |
| 213 const float32x4_t a = vmulq_f32(xfBuf_re, ef_re); | 217 const float32x4_t a = vmulq_f32(x_fft_buf_re, e_fft_re); |
| 214 const float32x4_t e = vmlaq_f32(a, xfBuf_im, ef_im); | 218 const float32x4_t e = vmlaq_f32(a, x_fft_buf_im, e_fft_im); |
| 215 const float32x4_t c = vmulq_f32(xfBuf_re, ef_im); | 219 const float32x4_t c = vmulq_f32(x_fft_buf_re, e_fft_im); |
| 216 const float32x4_t f = vmlsq_f32(c, xfBuf_im, ef_re); | 220 const float32x4_t f = vmlsq_f32(c, x_fft_buf_im, e_fft_re); |
| 217 // Interleave real and imaginary parts. | 221 // Interleave real and imaginary parts. |
| 218 const float32x4x2_t g_n_h = vzipq_f32(e, f); | 222 const float32x4x2_t g_n_h = vzipq_f32(e, f); |
| 219 // Store | 223 // Store |
| 220 vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); | 224 vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); |
| 221 vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); | 225 vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); |
| 222 } | 226 } |
| 223 // ... and fixup the first imaginary entry. | 227 // ... and fixup the first imaginary entry. |
| 224 fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN], | 228 fft[1] = MulRe(x_fft_buf[0][xPos + PART_LEN], |
| 225 -aec->xfBuf[1][xPos + PART_LEN], | 229 -x_fft_buf[1][xPos + PART_LEN], |
| 226 ef[0][PART_LEN], | 230 e_fft[0][PART_LEN], |
| 227 ef[1][PART_LEN]); | 231 e_fft[1][PART_LEN]); |
| 228 | 232 |
| 229 aec_rdft_inverse_128(fft); | 233 aec_rdft_inverse_128(fft); |
| 230 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); | 234 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); |
| 231 | 235 |
| 232 // fft scaling | 236 // fft scaling |
| 233 { | 237 { |
| 234 const float scale = 2.0f / PART_LEN2; | 238 const float scale = 2.0f / PART_LEN2; |
| 235 const float32x4_t scale_ps = vmovq_n_f32(scale); | 239 const float32x4_t scale_ps = vmovq_n_f32(scale); |
| 236 for (j = 0; j < PART_LEN; j += 4) { | 240 for (j = 0; j < PART_LEN; j += 4) { |
| 237 const float32x4_t fft_ps = vld1q_f32(&fft[j]); | 241 const float32x4_t fft_ps = vld1q_f32(&fft[j]); |
| 238 const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); | 242 const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); |
| 239 vst1q_f32(&fft[j], fft_scale); | 243 vst1q_f32(&fft[j], fft_scale); |
| 240 } | 244 } |
| 241 } | 245 } |
| 242 aec_rdft_forward_128(fft); | 246 aec_rdft_forward_128(fft); |
| 243 | 247 |
| 244 { | 248 { |
| 245 const float wt1 = aec->wfBuf[1][pos]; | 249 const float wt1 = h_fft_buf[1][pos]; |
| 246 aec->wfBuf[0][pos + PART_LEN] += fft[1]; | 250 h_fft_buf[0][pos + PART_LEN] += fft[1]; |
| 247 for (j = 0; j < PART_LEN; j += 4) { | 251 for (j = 0; j < PART_LEN; j += 4) { |
| 248 float32x4_t wtBuf_re = vld1q_f32(&aec->wfBuf[0][pos + j]); | 252 float32x4_t wtBuf_re = vld1q_f32(&h_fft_buf[0][pos + j]); |
| 249 float32x4_t wtBuf_im = vld1q_f32(&aec->wfBuf[1][pos + j]); | 253 float32x4_t wtBuf_im = vld1q_f32(&h_fft_buf[1][pos + j]); |
| 250 const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); | 254 const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); |
| 251 const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); | 255 const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); |
| 252 const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); | 256 const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); |
| 253 wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); | 257 wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); |
| 254 wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); | 258 wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); |
| 255 | 259 |
| 256 vst1q_f32(&aec->wfBuf[0][pos + j], wtBuf_re); | 260 vst1q_f32(&h_fft_buf[0][pos + j], wtBuf_re); |
| 257 vst1q_f32(&aec->wfBuf[1][pos + j], wtBuf_im); | 261 vst1q_f32(&h_fft_buf[1][pos + j], wtBuf_im); |
| 258 } | 262 } |
| 259 aec->wfBuf[1][pos] = wt1; | 263 h_fft_buf[1][pos] = wt1; |
| 260 } | 264 } |
| 261 } | 265 } |
| 262 } | 266 } |
| 263 | 267 |
| 264 static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { | 268 static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { |
| 265 // a^b = exp2(b * log2(a)) | 269 // a^b = exp2(b * log2(a)) |
| 266 // exp2(x) and log2(x) are calculated using polynomial approximations. | 270 // exp2(x) and log2(x) are calculated using polynomial approximations. |
| 267 float32x4_t log2_a, b_log2_a, a_exp_b; | 271 float32x4_t log2_a, b_log2_a, a_exp_b; |
| 268 | 272 |
| 269 // Calculate log2(x), x = a. | 273 // Calculate log2(x), x = a. |
| (...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 734 } | 738 } |
| 735 } | 739 } |
| 736 | 740 |
| 737 void WebRtcAec_InitAec_neon(void) { | 741 void WebRtcAec_InitAec_neon(void) { |
| 738 WebRtcAec_FilterFar = FilterFarNEON; | 742 WebRtcAec_FilterFar = FilterFarNEON; |
| 739 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; | 743 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; |
| 740 WebRtcAec_FilterAdaptation = FilterAdaptationNEON; | 744 WebRtcAec_FilterAdaptation = FilterAdaptationNEON; |
| 741 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON; | 745 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON; |
| 742 WebRtcAec_SubbandCoherence = SubbandCoherenceNEON; | 746 WebRtcAec_SubbandCoherence = SubbandCoherenceNEON; |
| 743 } | 747 } |
| OLD | NEW |