OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
122 x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); | 122 x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); |
123 } | 123 } |
124 // sqrt(s) = s * 1/sqrt(s) | 124 // sqrt(s) = s * 1/sqrt(s) |
125 return vmulq_f32(s, x);; | 125 return vmulq_f32(s, x);; |
126 } | 126 } |
127 #endif // WEBRTC_ARCH_ARM64 | 127 #endif // WEBRTC_ARCH_ARM64 |
128 | 128 |
129 static void ScaleErrorSignalNEON(int extended_filter_enabled, | 129 static void ScaleErrorSignalNEON(int extended_filter_enabled, |
130 float normal_mu, | 130 float normal_mu, |
131 float normal_error_threshold, | 131 float normal_error_threshold, |
132 float *xPow, | 132 float xPow[PART_LEN1], |
133 float ef[2][PART_LEN1]) { | 133 float ef[2][PART_LEN1]) { |
134 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; | 134 const float mu = extended_filter_enabled ? kExtendedMu : normal_mu; |
135 const float error_threshold = extended_filter_enabled ? | 135 const float error_threshold = extended_filter_enabled ? |
136 kExtendedErrorThreshold : normal_error_threshold; | 136 kExtendedErrorThreshold : normal_error_threshold; |
137 const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); | 137 const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); |
138 const float32x4_t kMu = vmovq_n_f32(mu); | 138 const float32x4_t kMu = vmovq_n_f32(mu); |
139 const float32x4_t kThresh = vmovq_n_f32(error_threshold); | 139 const float32x4_t kThresh = vmovq_n_f32(error_threshold); |
140 int i; | 140 int i; |
141 // vectorized code (four at once) | 141 // vectorized code (four at once) |
142 for (i = 0; i + 3 < PART_LEN1; i += 4) { | 142 for (i = 0; i + 3 < PART_LEN1; i += 4) { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
179 ef[0][i] *= abs_ef; | 179 ef[0][i] *= abs_ef; |
180 ef[1][i] *= abs_ef; | 180 ef[1][i] *= abs_ef; |
181 } | 181 } |
182 | 182 |
183 // Stepsize factor | 183 // Stepsize factor |
184 ef[0][i] *= mu; | 184 ef[0][i] *= mu; |
185 ef[1][i] *= mu; | 185 ef[1][i] *= mu; |
186 } | 186 } |
187 } | 187 } |
188 | 188 |
189 static void FilterAdaptationNEON(AecCore* aec, | 189 static void FilterAdaptationNEON( |
190 float* fft, | 190 int num_partitions, |
191 float ef[2][PART_LEN1]) { | 191 int xfBufBlockPos, |
| 192 float xfBuf[2][kExtendedNumPartitions * PART_LEN1], |
| 193 float ef[2][PART_LEN1], |
| 194 float wfBuf[2][kExtendedNumPartitions * PART_LEN1]) { |
| 195 float fft[PART_LEN2]; |
192 int i; | 196 int i; |
193 const int num_partitions = aec->num_partitions; | 197 const int num_partitions = num_partitions; |
194 for (i = 0; i < num_partitions; i++) { | 198 for (i = 0; i < num_partitions; i++) { |
195 int xPos = (i + aec->xfBufBlockPos) * PART_LEN1; | 199 int xPos = (i + xfBufBlockPos) * PART_LEN1; |
196 int pos = i * PART_LEN1; | 200 int pos = i * PART_LEN1; |
197 int j; | 201 int j; |
198 // Check for wrap | 202 // Check for wrap |
199 if (i + aec->xfBufBlockPos >= num_partitions) { | 203 if (i + xfBufBlockPos >= num_partitions) { |
200 xPos -= num_partitions * PART_LEN1; | 204 xPos -= num_partitions * PART_LEN1; |
201 } | 205 } |
202 | 206 |
203 // Process the whole array... | 207 // Process the whole array... |
204 for (j = 0; j < PART_LEN; j += 4) { | 208 for (j = 0; j < PART_LEN; j += 4) { |
205 // Load xfBuf and ef. | 209 // Load xfBuf and ef. |
206 const float32x4_t xfBuf_re = vld1q_f32(&aec->xfBuf[0][xPos + j]); | 210 const float32x4_t xfBuf_re = vld1q_f32(&xfBuf[0][xPos + j]); |
207 const float32x4_t xfBuf_im = vld1q_f32(&aec->xfBuf[1][xPos + j]); | 211 const float32x4_t xfBuf_im = vld1q_f32(&xfBuf[1][xPos + j]); |
208 const float32x4_t ef_re = vld1q_f32(&ef[0][j]); | 212 const float32x4_t ef_re = vld1q_f32(&ef[0][j]); |
209 const float32x4_t ef_im = vld1q_f32(&ef[1][j]); | 213 const float32x4_t ef_im = vld1q_f32(&ef[1][j]); |
210 // Calculate the product of conjugate(xfBuf) by ef. | 214 // Calculate the product of conjugate(xfBuf) by ef. |
211 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm | 215 // re(conjugate(a) * b) = aRe * bRe + aIm * bIm |
212 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe | 216 // im(conjugate(a) * b)= aRe * bIm - aIm * bRe |
213 const float32x4_t a = vmulq_f32(xfBuf_re, ef_re); | 217 const float32x4_t a = vmulq_f32(xfBuf_re, ef_re); |
214 const float32x4_t e = vmlaq_f32(a, xfBuf_im, ef_im); | 218 const float32x4_t e = vmlaq_f32(a, xfBuf_im, ef_im); |
215 const float32x4_t c = vmulq_f32(xfBuf_re, ef_im); | 219 const float32x4_t c = vmulq_f32(xfBuf_re, ef_im); |
216 const float32x4_t f = vmlsq_f32(c, xfBuf_im, ef_re); | 220 const float32x4_t f = vmlsq_f32(c, xfBuf_im, ef_re); |
217 // Interleave real and imaginary parts. | 221 // Interleave real and imaginary parts. |
218 const float32x4x2_t g_n_h = vzipq_f32(e, f); | 222 const float32x4x2_t g_n_h = vzipq_f32(e, f); |
219 // Store | 223 // Store |
220 vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); | 224 vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); |
221 vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); | 225 vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); |
222 } | 226 } |
223 // ... and fixup the first imaginary entry. | 227 // ... and fixup the first imaginary entry. |
224 fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN], | 228 fft[1] = MulRe(xfBuf[0][xPos + PART_LEN], |
225 -aec->xfBuf[1][xPos + PART_LEN], | 229 -xfBuf[1][xPos + PART_LEN], |
226 ef[0][PART_LEN], | 230 ef[0][PART_LEN], |
227 ef[1][PART_LEN]); | 231 ef[1][PART_LEN]); |
228 | 232 |
229 aec_rdft_inverse_128(fft); | 233 aec_rdft_inverse_128(fft); |
230 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); | 234 memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); |
231 | 235 |
232 // fft scaling | 236 // fft scaling |
233 { | 237 { |
234 const float scale = 2.0f / PART_LEN2; | 238 const float scale = 2.0f / PART_LEN2; |
235 const float32x4_t scale_ps = vmovq_n_f32(scale); | 239 const float32x4_t scale_ps = vmovq_n_f32(scale); |
236 for (j = 0; j < PART_LEN; j += 4) { | 240 for (j = 0; j < PART_LEN; j += 4) { |
237 const float32x4_t fft_ps = vld1q_f32(&fft[j]); | 241 const float32x4_t fft_ps = vld1q_f32(&fft[j]); |
238 const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); | 242 const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); |
239 vst1q_f32(&fft[j], fft_scale); | 243 vst1q_f32(&fft[j], fft_scale); |
240 } | 244 } |
241 } | 245 } |
242 aec_rdft_forward_128(fft); | 246 aec_rdft_forward_128(fft); |
243 | 247 |
244 { | 248 { |
245 const float wt1 = aec->wfBuf[1][pos]; | 249 const float wt1 = wfBuf[1][pos]; |
246 aec->wfBuf[0][pos + PART_LEN] += fft[1]; | 250 wfBuf[0][pos + PART_LEN] += fft[1]; |
247 for (j = 0; j < PART_LEN; j += 4) { | 251 for (j = 0; j < PART_LEN; j += 4) { |
248 float32x4_t wtBuf_re = vld1q_f32(&aec->wfBuf[0][pos + j]); | 252 float32x4_t wtBuf_re = vld1q_f32(&wfBuf[0][pos + j]); |
249 float32x4_t wtBuf_im = vld1q_f32(&aec->wfBuf[1][pos + j]); | 253 float32x4_t wtBuf_im = vld1q_f32(&wfBuf[1][pos + j]); |
250 const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); | 254 const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); |
251 const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); | 255 const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); |
252 const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); | 256 const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); |
253 wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); | 257 wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); |
254 wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); | 258 wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); |
255 | 259 |
256 vst1q_f32(&aec->wfBuf[0][pos + j], wtBuf_re); | 260 vst1q_f32(&wfBuf[0][pos + j], wtBuf_re); |
257 vst1q_f32(&aec->wfBuf[1][pos + j], wtBuf_im); | 261 vst1q_f32(&wfBuf[1][pos + j], wtBuf_im); |
258 } | 262 } |
259 aec->wfBuf[1][pos] = wt1; | 263 wfBuf[1][pos] = wt1; |
260 } | 264 } |
261 } | 265 } |
262 } | 266 } |
263 | 267 |
264 static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { | 268 static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { |
265 // a^b = exp2(b * log2(a)) | 269 // a^b = exp2(b * log2(a)) |
266 // exp2(x) and log2(x) are calculated using polynomial approximations. | 270 // exp2(x) and log2(x) are calculated using polynomial approximations. |
267 float32x4_t log2_a, b_log2_a, a_exp_b; | 271 float32x4_t log2_a, b_log2_a, a_exp_b; |
268 | 272 |
269 // Calculate log2(x), x = a. | 273 // Calculate log2(x), x = a. |
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
734 } | 738 } |
735 } | 739 } |
736 | 740 |
737 void WebRtcAec_InitAec_neon(void) { | 741 void WebRtcAec_InitAec_neon(void) { |
738 WebRtcAec_FilterFar = FilterFarNEON; | 742 WebRtcAec_FilterFar = FilterFarNEON; |
739 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; | 743 WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; |
740 WebRtcAec_FilterAdaptation = FilterAdaptationNEON; | 744 WebRtcAec_FilterAdaptation = FilterAdaptationNEON; |
741 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON; | 745 WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON; |
742 WebRtcAec_SubbandCoherence = SubbandCoherenceNEON; | 746 WebRtcAec_SubbandCoherence = SubbandCoherenceNEON; |
743 } | 747 } |
OLD | NEW |