OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 16 matching lines...) Expand all Loading... |
27 enum { kFloatExponentShift = 23 }; | 27 enum { kFloatExponentShift = 23 }; |
28 | 28 |
29 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { | 29 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { |
30 return aRe * bRe - aIm * bIm; | 30 return aRe * bRe - aIm * bIm; |
31 } | 31 } |
32 | 32 |
33 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { | 33 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { |
34 return aRe * bIm + aIm * bRe; | 34 return aRe * bIm + aIm * bRe; |
35 } | 35 } |
36 | 36 |
37 static void FilterFarNEON(int num_partitions, | 37 static void FilterFarNEON( |
38 int xfBufBlockPos, | 38 int num_partitions, |
39 float xfBuf[2][kExtendedNumPartitions * PART_LEN1], | 39 int x_fft_buf_block_pos, |
40 float wfBuf[2][kExtendedNumPartitions * PART_LEN1], | 40 float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], |
41 float yf[2][PART_LEN1]) { | 41 float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1], |
| 42 float y_fft[2][PART_LEN1]) { |
42 int i; | 43 int i; |
43 for (i = 0; i < num_partitions; i++) { | 44 for (i = 0; i < num_partitions; i++) { |
44 int j; | 45 int j; |
45 int xPos = (i + xfBufBlockPos) * PART_LEN1; | 46 int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; |
46 int pos = i * PART_LEN1; | 47 int pos = i * PART_LEN1; |
47 // Check for wrap | 48 // Check for wrap |
48 if (i + xfBufBlockPos >= num_partitions) { | 49 if (i + x_fft_buf_block_pos >= num_partitions) { |
49 xPos -= num_partitions * PART_LEN1; | 50 xPos -= num_partitions * PART_LEN1; |
50 } | 51 } |
51 | 52 |
52 // vectorized code (four at once) | 53 // vectorized code (four at once) |
53 for (j = 0; j + 3 < PART_LEN1; j += 4) { | 54 for (j = 0; j + 3 < PART_LEN1; j += 4) { |
54 const float32x4_t xfBuf_re = vld1q_f32(&xfBuf[0][xPos + j]); | 55 const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]); |
55 const float32x4_t xfBuf_im = vld1q_f32(&xfBuf[1][xPos + j]); | 56 const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]); |
56 const float32x4_t wfBuf_re = vld1q_f32(&wfBuf[0][pos + j]); | 57 const float32x4_t h_fft_buf_re = vld1q_f32(&h_fft_buf[0][pos + j]); |
57 const float32x4_t wfBuf_im = vld1q_f32(&wfBuf[1][pos + j]); | 58 const float32x4_t h_fft_buf_im = vld1q_f32(&h_fft_buf[1][pos + j]); |
58 const float32x4_t yf_re = vld1q_f32(&yf[0][j]); | 59 const float32x4_t y_fft_re = vld1q_f32(&y_fft[0][j]); |
59 const float32x4_t yf_im = vld1q_f32(&yf[1][j]); | 60 const float32x4_t y_fft_im = vld1q_f32(&y_fft[1][j]); |
60 const float32x4_t a = vmulq_f32(xfBuf_re, wfBuf_re); | 61 const float32x4_t a = vmulq_f32(x_fft_buf_re, h_fft_buf_re); |
61 const float32x4_t e = vmlsq_f32(a, xfBuf_im, wfBuf_im); | 62 const float32x4_t e = vmlsq_f32(a, x_fft_buf_im, h_fft_buf_im); |
62 const float32x4_t c = vmulq_f32(xfBuf_re, wfBuf_im); | 63 const float32x4_t c = vmulq_f32(x_fft_buf_re, h_fft_buf_im); |
63 const float32x4_t f = vmlaq_f32(c, xfBuf_im, wfBuf_re); | 64 const float32x4_t f = vmlaq_f32(c, x_fft_buf_im, h_fft_buf_re); |
64 const float32x4_t g = vaddq_f32(yf_re, e); | 65 const float32x4_t g = vaddq_f32(y_fft_re, e); |
65 const float32x4_t h = vaddq_f32(yf_im, f); | 66 const float32x4_t h = vaddq_f32(y_fft_im, f); |
66 vst1q_f32(&yf[0][j], g); | 67 vst1q_f32(&y_fft[0][j], g); |
67 vst1q_f32(&yf[1][j], h); | 68 vst1q_f32(&y_fft[1][j], h); |
68 } | 69 } |
69 // scalar code for the remaining items. | 70 // scalar code for the remaining items. |
70 for (; j < PART_LEN1; j++) { | 71 for (; j < PART_LEN1; j++) { |
71 yf[0][j] += MulRe(xfBuf[0][xPos + j], | 72 y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], |
72 xfBuf[1][xPos + j], | 73 x_fft_buf[1][xPos + j], |
73 wfBuf[0][pos + j], | 74 h_fft_buf[0][pos + j], |
74 wfBuf[1][pos + j]); | 75 h_fft_buf[1][pos + j]); |
75 yf[1][j] += MulIm(xfBuf[0][xPos + j], | 76 y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], |
76 xfBuf[1][xPos + j], | 77 x_fft_buf[1][xPos + j], |
77 wfBuf[0][pos + j], | 78 h_fft_buf[0][pos + j], |
78 wfBuf[1][pos + j]); | 79 h_fft_buf[1][pos + j]); |
79 } | 80 } |
80 } | 81 } |
81 } | 82 } |
82 | 83 |
83 // ARM64's arm_neon.h has already defined vdivq_f32 vsqrtq_f32. | 84 // ARM64's arm_neon.h has already defined vdivq_f32 vsqrtq_f32. |
84 #if !defined (WEBRTC_ARCH_ARM64) | 85 #if !defined (WEBRTC_ARCH_ARM64) |
85 static float32x4_t vdivq_f32(float32x4_t a, float32x4_t b) { | 86 static float32x4_t vdivq_f32(float32x4_t a, float32x4_t b) { |
86 int i; | 87 int i; |
87 float32x4_t x = vrecpeq_f32(b); | 88 float32x4_t x = vrecpeq_f32(b); |
88 // from arm documentation | 89 // from arm documentation |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
121 x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); | 122 x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); |
122 } | 123 } |
123 // sqrt(s) = s * 1/sqrt(s) | 124 // sqrt(s) = s * 1/sqrt(s) |
124 return vmulq_f32(s, x);; | 125 return vmulq_f32(s, x);; |
125 } | 126 } |
126 #endif // WEBRTC_ARCH_ARM64 | 127 #endif // WEBRTC_ARCH_ARM64 |
127 | 128 |
128 static void ScaleErrorSignalNEON(int extended_filter_enabled, | 129 static void ScaleErrorSignalNEON(int extended_filter_enabled, |
129 float normal_mu, | 130 float normal_mu, |
130 float normal_error_threshold, | 131 float normal_error_threshold, |
131 float *x_pow, | 132 float x_pow[PART_LEN1], |
132 float ef[2][PART_LEN1]) { | 133 float ef[2][PART_LEN1]) { |
133 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; | 134 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; |
134 const float error_threshold = extended_filter_enabled ? | 135 const float error_threshold = extended_filter_enabled ? |
135 kExtendedErrorThreshold : normal_error_threshold; | 136 kExtendedErrorThreshold : normal_error_threshold; |
136 const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); | 137 const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); |
137 const float32x4_t kMu = vmovq_n_f32(mu); | 138 const float32x4_t kMu = vmovq_n_f32(mu); |
138 const float32x4_t kThresh = vmovq_n_f32(error_threshold); | 139 const float32x4_t kThresh = vmovq_n_f32(error_threshold); |
139 int i; | 140 int i; |
140 // vectorized code (four at once) | 141 // vectorized code (four at once) |
141 for (i = 0; i + 3 < PART_LEN1; i += 4) { | 142 for (i = 0; i + 3 < PART_LEN1; i += 4) { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
178 ef[0][i] *= abs_ef; | 179 ef[0][i] *= abs_ef; |
179 ef[1][i] *= abs_ef; | 180 ef[1][i] *= abs_ef; |
180 } | 181 } |
181 | 182 |
182 // Stepsize factor | 183 // Stepsize factor |
183 ef[0][i] *= mu; | 184 ef[0][i] *= mu; |
184 ef[1][i] *= mu; | 185 ef[1][i] *= mu; |
185 } | 186 } |
186 } | 187 } |
187 | 188 |
188 static void FilterAdaptationNEON(AecCore* aec, | 189 static void FilterAdaptationNEON( |
189 float* fft, | 190 int num_partitions, |
190 float ef[2][PART_LEN1]) { | 191 int x_fft_buf_block_pos, |
| 192 float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], |
| 193 float e_fft[2][PART_LEN1], |
| 194 float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { |
| 195 float fft[PART_LEN2]; |
191 int i; | 196 int i; |
192 const int num_partitions = aec->num_partitions; | |
193 for (i = 0; i < num_partitions; i++) { | 197 for (i = 0; i < num_partitions; i++) { |
194 int xPos = (i + aec->xfBufBlockPos) * PART_LEN1; | 198 int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; |
195 int pos = i * PART_LEN1; | 199 int pos = i * PART_LEN1; |
196 int j; | 200 int j; |
197 // Check for wrap | 201 // Check for wrap |
198 if (i + aec->xfBufBlockPos >= num_partitions) { | 202 if (i + x_fft_buf_block_pos >= num_partitions) { |
199 xPos -= num_partitions * PART_LEN1; | 203 xPos -= num_partitions * PART_LEN1; |
200 } | 204 } |
201 | 205 |
202 // Process the whole array... | 206 // Process the whole array... |
203 for (j = 0; j < PART_LEN; j += 4) { | 207 for (j = 0; j < PART_LEN; j += 4) { |
204 // Load xfBuf and ef. | 208 // Load x_fft_buf and e_fft. |
205 const float32x4_t xfBuf_re = vld1q_f32(&aec->xfBuf[0][xPos + j]); | 209 const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]); |
206 const float32x4_t xfBuf_im = vld1q_f32(&aec->xfBuf[1][xPos + j]); | 210 const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]); |
207 const float32x4_t ef_re = vld1q_f32(&ef[0][j]); | 211 const float32x4_t e_fft_re = vld1q_f32(&e_fft[0][j]); |
208 const float32x4_t ef_im = vld1q_f32(&ef[1][j]); | 212 const float32x4_t e_fft_im = vld1q_f32(&e_fft[1][j]); |
209 // Calculate the product of conjugate(xfBuf) by ef. | 213 // Calculate the product of conjugate(x_fft_buf) by e_fft. |
210 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm | 214 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm |
211 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe | 215 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe |
212 const float32x4_t a = vmulq_f32(xfBuf_re, ef_re); | 216 const float32x4_t a = vmulq_f32(x_fft_buf_re, e_fft_re); |
213 const float32x4_t e = vmlaq_f32(a, xfBuf_im, ef_im); | 217 const float32x4_t e = vmlaq_f32(a, x_fft_buf_im, e_fft_im); |
214 const float32x4_t c = vmulq_f32(xfBuf_re, ef_im); | 218 const float32x4_t c = vmulq_f32(x_fft_buf_re, e_fft_im); |
215 const float32x4_t f = vmlsq_f32(c, xfBuf_im, ef_re); | 219 const float32x4_t f = vmlsq_f32(c, x_fft_buf_im, e_fft_re); |
216 // Interleave real and imaginary parts. | 220 // Interleave real and imaginary parts. |
217 const float32x4x2_t g_n_h = vzipq_f32(e, f); | 221 const float32x4x2_t g_n_h = vzipq_f32(e, f); |
218 // Store | 222 // Store |
219 vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); | 223 vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); |
220 vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); | 224 vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); |
221 } | 225 } |
222 // ... and fixup the first imaginary entry. | 226 // ... and fixup the first imaginary entry. |
223 fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN], | 227 fft[1] = MulRe(x_fft_buf[0][xPos + PART_LEN], |
224 -aec->xfBuf[1][xPos + PART_LEN], | 228 -x_fft_buf[1][xPos + PART_LEN], |
225 ef[0][PART_LEN], | 229 e_fft[0][PART_LEN], |
226 ef[1][PART_LEN]); | 230 e_fft[1][PART_LEN]); |
227 | 231 |
228 aec_rdft_inverse_128(fft); | 232 aec_rdft_inverse_128(fft); |
229 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); | 233 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); |
230 | 234 |
231 // fft scaling | 235 // fft scaling |
232 { | 236 { |
233 const float scale = 2.0f / PART_LEN2; | 237 const float scale = 2.0f / PART_LEN2; |
234 const float32x4_t scale_ps = vmovq_n_f32(scale); | 238 const float32x4_t scale_ps = vmovq_n_f32(scale); |
235 for (j = 0; j < PART_LEN; j += 4) { | 239 for (j = 0; j < PART_LEN; j += 4) { |
236 const float32x4_t fft_ps = vld1q_f32(&fft[j]); | 240 const float32x4_t fft_ps = vld1q_f32(&fft[j]); |
237 const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); | 241 const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); |
238 vst1q_f32(&fft[j], fft_scale); | 242 vst1q_f32(&fft[j], fft_scale); |
239 } | 243 } |
240 } | 244 } |
241 aec_rdft_forward_128(fft); | 245 aec_rdft_forward_128(fft); |
242 | 246 |
243 { | 247 { |
244 const float wt1 = aec->wfBuf[1][pos]; | 248 const float wt1 = h_fft_buf[1][pos]; |
245 aec->wfBuf[0][pos + PART_LEN] += fft[1]; | 249 h_fft_buf[0][pos + PART_LEN] += fft[1]; |
246 for (j = 0; j < PART_LEN; j += 4) { | 250 for (j = 0; j < PART_LEN; j += 4) { |
247 float32x4_t wtBuf_re = vld1q_f32(&aec->wfBuf[0][pos + j]); | 251 float32x4_t wtBuf_re = vld1q_f32(&h_fft_buf[0][pos + j]); |
248 float32x4_t wtBuf_im = vld1q_f32(&aec->wfBuf[1][pos + j]); | 252 float32x4_t wtBuf_im = vld1q_f32(&h_fft_buf[1][pos + j]); |
249 const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); | 253 const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); |
250 const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); | 254 const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); |
251 const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); | 255 const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); |
252 wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); | 256 wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); |
253 wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); | 257 wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); |
254 | 258 |
255 vst1q_f32(&aec->wfBuf[0][pos + j], wtBuf_re); | 259 vst1q_f32(&h_fft_buf[0][pos + j], wtBuf_re); |
256 vst1q_f32(&aec->wfBuf[1][pos + j], wtBuf_im); | 260 vst1q_f32(&h_fft_buf[1][pos + j], wtBuf_im); |
257 } | 261 } |
258 aec->wfBuf[1][pos] = wt1; | 262 h_fft_buf[1][pos] = wt1; |
259 } | 263 } |
260 } | 264 } |
261 } | 265 } |
262 | 266 |
263 static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { | 267 static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { |
264 // a^b = exp2(b * log2(a)) | 268 // a^b = exp2(b * log2(a)) |
265 // exp2(x) and log2(x) are calculated using polynomial approximations. | 269 // exp2(x) and log2(x) are calculated using polynomial approximations. |
266 float32x4_t log2_a, b_log2_a, a_exp_b; | 270 float32x4_t log2_a, b_log2_a, a_exp_b; |
267 | 271 |
268 // Calculate log2(x), x = a. | 272 // Calculate log2(x), x = a. |
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
733 } | 737 } |
734 } | 738 } |
735 | 739 |
736 void WebRtcAec_InitAec_neon(void) { | 740 void WebRtcAec_InitAec_neon(void) { |
737 WebRtcAec_FilterFar = FilterFarNEON; | 741 WebRtcAec_FilterFar = FilterFarNEON; |
738 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; | 742 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; |
739 WebRtcAec_FilterAdaptation = FilterAdaptationNEON; | 743 WebRtcAec_FilterAdaptation = FilterAdaptationNEON; |
740 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON; | 744 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON; |
741 WebRtcAec_SubbandCoherence = SubbandCoherenceNEON; | 745 WebRtcAec_SubbandCoherence = SubbandCoherenceNEON; |
742 } | 746 } |
OLD | NEW |