OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/modules/audio_processing/aec/aec_rdft.h" | |
12 | |
13 #include <emmintrin.h> | |
14 | |
15 static const ALIGN16_BEG float ALIGN16_END | |
16 k_swap_sign[4] = {-1.f, 1.f, -1.f, 1.f}; | |
17 | |
18 static void cft1st_128_SSE2(float* a) { | |
19 const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); | |
20 int j, k2; | |
21 | |
22 for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) { | |
23 __m128 a00v = _mm_loadu_ps(&a[j + 0]); | |
24 __m128 a04v = _mm_loadu_ps(&a[j + 4]); | |
25 __m128 a08v = _mm_loadu_ps(&a[j + 8]); | |
26 __m128 a12v = _mm_loadu_ps(&a[j + 12]); | |
27 __m128 a01v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(1, 0, 1, 0)); | |
28 __m128 a23v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(3, 2, 3, 2)); | |
29 __m128 a45v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(1, 0, 1, 0)); | |
30 __m128 a67v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(3, 2, 3, 2)); | |
31 | |
32 const __m128 wk1rv = _mm_load_ps(&rdft_wk1r[k2]); | |
33 const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2]); | |
34 const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2]); | |
35 const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2]); | |
36 const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2]); | |
37 const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2]); | |
38 __m128 x0v = _mm_add_ps(a01v, a23v); | |
39 const __m128 x1v = _mm_sub_ps(a01v, a23v); | |
40 const __m128 x2v = _mm_add_ps(a45v, a67v); | |
41 const __m128 x3v = _mm_sub_ps(a45v, a67v); | |
42 __m128 x0w; | |
43 a01v = _mm_add_ps(x0v, x2v); | |
44 x0v = _mm_sub_ps(x0v, x2v); | |
45 x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); | |
46 { | |
47 const __m128 a45_0v = _mm_mul_ps(wk2rv, x0v); | |
48 const __m128 a45_1v = _mm_mul_ps(wk2iv, x0w); | |
49 a45v = _mm_add_ps(a45_0v, a45_1v); | |
50 } | |
51 { | |
52 __m128 a23_0v, a23_1v; | |
53 const __m128 x3w = _mm_shuffle_ps(x3v, x3v, _MM_SHUFFLE(2, 3, 0, 1)); | |
54 const __m128 x3s = _mm_mul_ps(mm_swap_sign, x3w); | |
55 x0v = _mm_add_ps(x1v, x3s); | |
56 x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); | |
57 a23_0v = _mm_mul_ps(wk1rv, x0v); | |
58 a23_1v = _mm_mul_ps(wk1iv, x0w); | |
59 a23v = _mm_add_ps(a23_0v, a23_1v); | |
60 | |
61 x0v = _mm_sub_ps(x1v, x3s); | |
62 x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); | |
63 } | |
64 { | |
65 const __m128 a67_0v = _mm_mul_ps(wk3rv, x0v); | |
66 const __m128 a67_1v = _mm_mul_ps(wk3iv, x0w); | |
67 a67v = _mm_add_ps(a67_0v, a67_1v); | |
68 } | |
69 | |
70 a00v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(1, 0, 1, 0)); | |
71 a04v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(1, 0, 1, 0)); | |
72 a08v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(3, 2, 3, 2)); | |
73 a12v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(3, 2, 3, 2)); | |
74 _mm_storeu_ps(&a[j + 0], a00v); | |
75 _mm_storeu_ps(&a[j + 4], a04v); | |
76 _mm_storeu_ps(&a[j + 8], a08v); | |
77 _mm_storeu_ps(&a[j + 12], a12v); | |
78 } | |
79 } | |
80 | |
81 static void cftmdl_128_SSE2(float* a) { | |
82 const int l = 8; | |
83 const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); | |
84 int j0; | |
85 | |
86 __m128 wk1rv = _mm_load_ps(cftmdl_wk1r); | |
87 for (j0 = 0; j0 < l; j0 += 2) { | |
88 const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]); | |
89 const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]); | |
90 const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]); | |
91 const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]); | |
92 const __m128 a_00_32 = _mm_shuffle_ps(_mm_castsi128_ps(a_00), | |
93 _mm_castsi128_ps(a_32), | |
94 _MM_SHUFFLE(1, 0, 1, 0)); | |
95 const __m128 a_08_40 = _mm_shuffle_ps(_mm_castsi128_ps(a_08), | |
96 _mm_castsi128_ps(a_40), | |
97 _MM_SHUFFLE(1, 0, 1, 0)); | |
98 __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40); | |
99 const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40); | |
100 | |
101 const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]); | |
102 const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]); | |
103 const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]); | |
104 const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]); | |
105 const __m128 a_16_48 = _mm_shuffle_ps(_mm_castsi128_ps(a_16), | |
106 _mm_castsi128_ps(a_48), | |
107 _MM_SHUFFLE(1, 0, 1, 0)); | |
108 const __m128 a_24_56 = _mm_shuffle_ps(_mm_castsi128_ps(a_24), | |
109 _mm_castsi128_ps(a_56), | |
110 _MM_SHUFFLE(1, 0, 1, 0)); | |
111 const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56); | |
112 const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56); | |
113 | |
114 const __m128 xx0 = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); | |
115 const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); | |
116 | |
117 const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32( | |
118 _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1))); | |
119 const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1); | |
120 const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped); | |
121 const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped); | |
122 | |
123 const __m128 yy0 = | |
124 _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(2, 2, 2, 2)); | |
125 const __m128 yy1 = | |
126 _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(3, 3, 3, 3)); | |
127 const __m128 yy2 = _mm_mul_ps(mm_swap_sign, yy1); | |
128 const __m128 yy3 = _mm_add_ps(yy0, yy2); | |
129 const __m128 yy4 = _mm_mul_ps(wk1rv, yy3); | |
130 | |
131 _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx0)); | |
132 _mm_storel_epi64( | |
133 (__m128i*)&a[j0 + 32], | |
134 _mm_shuffle_epi32(_mm_castps_si128(xx0), _MM_SHUFFLE(3, 2, 3, 2))); | |
135 | |
136 _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx1)); | |
137 _mm_storel_epi64( | |
138 (__m128i*)&a[j0 + 48], | |
139 _mm_shuffle_epi32(_mm_castps_si128(xx1), _MM_SHUFFLE(2, 3, 2, 3))); | |
140 a[j0 + 48] = -a[j0 + 48]; | |
141 | |
142 _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(x1_x3_add)); | |
143 _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(x1_x3_sub)); | |
144 | |
145 _mm_storel_epi64((__m128i*)&a[j0 + 40], _mm_castps_si128(yy4)); | |
146 _mm_storel_epi64( | |
147 (__m128i*)&a[j0 + 56], | |
148 _mm_shuffle_epi32(_mm_castps_si128(yy4), _MM_SHUFFLE(2, 3, 2, 3))); | |
149 } | |
150 | |
151 { | |
152 int k = 64; | |
153 int k1 = 2; | |
154 int k2 = 2 * k1; | |
155 const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2 + 0]); | |
156 const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2 + 0]); | |
157 const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2 + 0]); | |
158 const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2 + 0]); | |
159 const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2 + 0]); | |
160 wk1rv = _mm_load_ps(&rdft_wk1r[k2 + 0]); | |
161 for (j0 = k; j0 < l + k; j0 += 2) { | |
162 const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]); | |
163 const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]); | |
164 const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]); | |
165 const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]); | |
166 const __m128 a_00_32 = _mm_shuffle_ps(_mm_castsi128_ps(a_00), | |
167 _mm_castsi128_ps(a_32), | |
168 _MM_SHUFFLE(1, 0, 1, 0)); | |
169 const __m128 a_08_40 = _mm_shuffle_ps(_mm_castsi128_ps(a_08), | |
170 _mm_castsi128_ps(a_40), | |
171 _MM_SHUFFLE(1, 0, 1, 0)); | |
172 __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40); | |
173 const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40); | |
174 | |
175 const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]); | |
176 const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]); | |
177 const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]); | |
178 const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]); | |
179 const __m128 a_16_48 = _mm_shuffle_ps(_mm_castsi128_ps(a_16), | |
180 _mm_castsi128_ps(a_48), | |
181 _MM_SHUFFLE(1, 0, 1, 0)); | |
182 const __m128 a_24_56 = _mm_shuffle_ps(_mm_castsi128_ps(a_24), | |
183 _mm_castsi128_ps(a_56), | |
184 _MM_SHUFFLE(1, 0, 1, 0)); | |
185 const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56); | |
186 const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56); | |
187 | |
188 const __m128 xx = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); | |
189 const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); | |
190 const __m128 xx2 = _mm_mul_ps(xx1, wk2rv); | |
191 const __m128 xx3 = | |
192 _mm_mul_ps(wk2iv, | |
193 _mm_castsi128_ps(_mm_shuffle_epi32( | |
194 _mm_castps_si128(xx1), _MM_SHUFFLE(2, 3, 0, 1)))); | |
195 const __m128 xx4 = _mm_add_ps(xx2, xx3); | |
196 | |
197 const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32( | |
198 _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1))); | |
199 const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1); | |
200 const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped); | |
201 const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped); | |
202 | |
203 const __m128 xx10 = _mm_mul_ps(x1_x3_add, wk1rv); | |
204 const __m128 xx11 = _mm_mul_ps( | |
205 wk1iv, | |
206 _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_add), | |
207 _MM_SHUFFLE(2, 3, 0, 1)))); | |
208 const __m128 xx12 = _mm_add_ps(xx10, xx11); | |
209 | |
210 const __m128 xx20 = _mm_mul_ps(x1_x3_sub, wk3rv); | |
211 const __m128 xx21 = _mm_mul_ps( | |
212 wk3iv, | |
213 _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_sub), | |
214 _MM_SHUFFLE(2, 3, 0, 1)))); | |
215 const __m128 xx22 = _mm_add_ps(xx20, xx21); | |
216 | |
217 _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx)); | |
218 _mm_storel_epi64( | |
219 (__m128i*)&a[j0 + 32], | |
220 _mm_shuffle_epi32(_mm_castps_si128(xx), _MM_SHUFFLE(3, 2, 3, 2))); | |
221 | |
222 _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx4)); | |
223 _mm_storel_epi64( | |
224 (__m128i*)&a[j0 + 48], | |
225 _mm_shuffle_epi32(_mm_castps_si128(xx4), _MM_SHUFFLE(3, 2, 3, 2))); | |
226 | |
227 _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(xx12)); | |
228 _mm_storel_epi64( | |
229 (__m128i*)&a[j0 + 40], | |
230 _mm_shuffle_epi32(_mm_castps_si128(xx12), _MM_SHUFFLE(3, 2, 3, 2))); | |
231 | |
232 _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(xx22)); | |
233 _mm_storel_epi64( | |
234 (__m128i*)&a[j0 + 56], | |
235 _mm_shuffle_epi32(_mm_castps_si128(xx22), _MM_SHUFFLE(3, 2, 3, 2))); | |
236 } | |
237 } | |
238 } | |
239 | |
240 static void rftfsub_128_SSE2(float* a) { | |
241 const float* c = rdft_w + 32; | |
242 int j1, j2, k1, k2; | |
243 float wkr, wki, xr, xi, yr, yi; | |
244 | |
245 static const ALIGN16_BEG float ALIGN16_END | |
246 k_half[4] = {0.5f, 0.5f, 0.5f, 0.5f}; | |
247 const __m128 mm_half = _mm_load_ps(k_half); | |
248 | |
249 // Vectorized code (four at once). | |
250 // Note: commented number are indexes for the first iteration of the loop. | |
251 for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { | |
252 // Load 'wk'. | |
253 const __m128 c_j1 = _mm_loadu_ps(&c[j1]); // 1, 2, 3, 4, | |
254 const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]); // 28, 29, 30, 31, | |
255 const __m128 wkrt = _mm_sub_ps(mm_half, c_k1); // 28, 29, 30, 31, | |
256 const __m128 wkr_ = | |
257 _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28, | |
258 const __m128 wki_ = c_j1; // 1, 2, 3, 4, | |
259 // Load and shuffle 'a'. | |
260 const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]); // 2, 3, 4, 5, | |
261 const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]); // 6, 7, 8, 9, | |
262 const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]); // 120, 121, 122, 123, | |
263 const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]); // 124, 125, 126, 127, | |
264 const __m128 a_j2_p0 = _mm_shuffle_ps( | |
265 a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0)); // 2, 4, 6, 8, | |
266 const __m128 a_j2_p1 = _mm_shuffle_ps( | |
267 a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1)); // 3, 5, 7, 9, | |
268 const __m128 a_k2_p0 = _mm_shuffle_ps( | |
269 a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2)); // 126, 124, 122, 120, | |
270 const __m128 a_k2_p1 = _mm_shuffle_ps( | |
271 a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3)); // 127, 125, 123, 121, | |
272 // Calculate 'x'. | |
273 const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0); | |
274 // 2-126, 4-124, 6-122, 8-120, | |
275 const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1); | |
276 // 3-127, 5-125, 7-123, 9-121, | |
277 // Calculate product into 'y'. | |
278 // yr = wkr * xr - wki * xi; | |
279 // yi = wkr * xi + wki * xr; | |
280 const __m128 a_ = _mm_mul_ps(wkr_, xr_); | |
281 const __m128 b_ = _mm_mul_ps(wki_, xi_); | |
282 const __m128 c_ = _mm_mul_ps(wkr_, xi_); | |
283 const __m128 d_ = _mm_mul_ps(wki_, xr_); | |
284 const __m128 yr_ = _mm_sub_ps(a_, b_); // 2-126, 4-124, 6-122, 8-120, | |
285 const __m128 yi_ = _mm_add_ps(c_, d_); // 3-127, 5-125, 7-123, 9-121, | |
286 // Update 'a'. | |
287 // a[j2 + 0] -= yr; | |
288 // a[j2 + 1] -= yi; | |
289 // a[k2 + 0] += yr; | |
290 // a[k2 + 1] -= yi; | |
291 const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_); // 2, 4, 6, 8, | |
292 const __m128 a_j2_p1n = _mm_sub_ps(a_j2_p1, yi_); // 3, 5, 7, 9, | |
293 const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_); // 126, 124, 122, 120, | |
294 const __m128 a_k2_p1n = _mm_sub_ps(a_k2_p1, yi_); // 127, 125, 123, 121, | |
295 // Shuffle in right order and store. | |
296 const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n); | |
297 // 2, 3, 4, 5, | |
298 const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n); | |
299 // 6, 7, 8, 9, | |
300 const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n); | |
301 // 122, 123, 120, 121, | |
302 const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n); | |
303 // 126, 127, 124, 125, | |
304 const __m128 a_k2_0n = _mm_shuffle_ps( | |
305 a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2)); // 120, 121, 122, 123, | |
306 const __m128 a_k2_4n = _mm_shuffle_ps( | |
307 a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2)); // 124, 125, 126, 127, | |
308 _mm_storeu_ps(&a[0 + j2], a_j2_0n); | |
309 _mm_storeu_ps(&a[4 + j2], a_j2_4n); | |
310 _mm_storeu_ps(&a[122 - j2], a_k2_0n); | |
311 _mm_storeu_ps(&a[126 - j2], a_k2_4n); | |
312 } | |
313 // Scalar code for the remaining items. | |
314 for (; j2 < 64; j1 += 1, j2 += 2) { | |
315 k2 = 128 - j2; | |
316 k1 = 32 - j1; | |
317 wkr = 0.5f - c[k1]; | |
318 wki = c[j1]; | |
319 xr = a[j2 + 0] - a[k2 + 0]; | |
320 xi = a[j2 + 1] + a[k2 + 1]; | |
321 yr = wkr * xr - wki * xi; | |
322 yi = wkr * xi + wki * xr; | |
323 a[j2 + 0] -= yr; | |
324 a[j2 + 1] -= yi; | |
325 a[k2 + 0] += yr; | |
326 a[k2 + 1] -= yi; | |
327 } | |
328 } | |
329 | |
330 static void rftbsub_128_SSE2(float* a) { | |
331 const float* c = rdft_w + 32; | |
332 int j1, j2, k1, k2; | |
333 float wkr, wki, xr, xi, yr, yi; | |
334 | |
335 static const ALIGN16_BEG float ALIGN16_END | |
336 k_half[4] = {0.5f, 0.5f, 0.5f, 0.5f}; | |
337 const __m128 mm_half = _mm_load_ps(k_half); | |
338 | |
339 a[1] = -a[1]; | |
340 // Vectorized code (four at once). | |
341 // Note: commented number are indexes for the first iteration of the loop. | |
342 for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { | |
343 // Load 'wk'. | |
344 const __m128 c_j1 = _mm_loadu_ps(&c[j1]); // 1, 2, 3, 4, | |
345 const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]); // 28, 29, 30, 31, | |
346 const __m128 wkrt = _mm_sub_ps(mm_half, c_k1); // 28, 29, 30, 31, | |
347 const __m128 wkr_ = | |
348 _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28, | |
349 const __m128 wki_ = c_j1; // 1, 2, 3, 4, | |
350 // Load and shuffle 'a'. | |
351 const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]); // 2, 3, 4, 5, | |
352 const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]); // 6, 7, 8, 9, | |
353 const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]); // 120, 121, 122, 123, | |
354 const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]); // 124, 125, 126, 127, | |
355 const __m128 a_j2_p0 = _mm_shuffle_ps( | |
356 a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0)); // 2, 4, 6, 8, | |
357 const __m128 a_j2_p1 = _mm_shuffle_ps( | |
358 a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1)); // 3, 5, 7, 9, | |
359 const __m128 a_k2_p0 = _mm_shuffle_ps( | |
360 a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2)); // 126, 124, 122, 120, | |
361 const __m128 a_k2_p1 = _mm_shuffle_ps( | |
362 a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3)); // 127, 125, 123, 121, | |
363 // Calculate 'x'. | |
364 const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0); | |
365 // 2-126, 4-124, 6-122, 8-120, | |
366 const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1); | |
367 // 3-127, 5-125, 7-123, 9-121, | |
368 // Calculate product into 'y'. | |
369 // yr = wkr * xr + wki * xi; | |
370 // yi = wkr * xi - wki * xr; | |
371 const __m128 a_ = _mm_mul_ps(wkr_, xr_); | |
372 const __m128 b_ = _mm_mul_ps(wki_, xi_); | |
373 const __m128 c_ = _mm_mul_ps(wkr_, xi_); | |
374 const __m128 d_ = _mm_mul_ps(wki_, xr_); | |
375 const __m128 yr_ = _mm_add_ps(a_, b_); // 2-126, 4-124, 6-122, 8-120, | |
376 const __m128 yi_ = _mm_sub_ps(c_, d_); // 3-127, 5-125, 7-123, 9-121, | |
377 // Update 'a'. | |
378 // a[j2 + 0] = a[j2 + 0] - yr; | |
379 // a[j2 + 1] = yi - a[j2 + 1]; | |
380 // a[k2 + 0] = yr + a[k2 + 0]; | |
381 // a[k2 + 1] = yi - a[k2 + 1]; | |
382 const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_); // 2, 4, 6, 8, | |
383 const __m128 a_j2_p1n = _mm_sub_ps(yi_, a_j2_p1); // 3, 5, 7, 9, | |
384 const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_); // 126, 124, 122, 120, | |
385 const __m128 a_k2_p1n = _mm_sub_ps(yi_, a_k2_p1); // 127, 125, 123, 121, | |
386 // Shuffle in right order and store. | |
387 const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n); | |
388 // 2, 3, 4, 5, | |
389 const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n); | |
390 // 6, 7, 8, 9, | |
391 const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n); | |
392 // 122, 123, 120, 121, | |
393 const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n); | |
394 // 126, 127, 124, 125, | |
395 const __m128 a_k2_0n = _mm_shuffle_ps( | |
396 a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2)); // 120, 121, 122, 123, | |
397 const __m128 a_k2_4n = _mm_shuffle_ps( | |
398 a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2)); // 124, 125, 126, 127, | |
399 _mm_storeu_ps(&a[0 + j2], a_j2_0n); | |
400 _mm_storeu_ps(&a[4 + j2], a_j2_4n); | |
401 _mm_storeu_ps(&a[122 - j2], a_k2_0n); | |
402 _mm_storeu_ps(&a[126 - j2], a_k2_4n); | |
403 } | |
404 // Scalar code for the remaining items. | |
405 for (; j2 < 64; j1 += 1, j2 += 2) { | |
406 k2 = 128 - j2; | |
407 k1 = 32 - j1; | |
408 wkr = 0.5f - c[k1]; | |
409 wki = c[j1]; | |
410 xr = a[j2 + 0] - a[k2 + 0]; | |
411 xi = a[j2 + 1] + a[k2 + 1]; | |
412 yr = wkr * xr + wki * xi; | |
413 yi = wkr * xi - wki * xr; | |
414 a[j2 + 0] = a[j2 + 0] - yr; | |
415 a[j2 + 1] = yi - a[j2 + 1]; | |
416 a[k2 + 0] = yr + a[k2 + 0]; | |
417 a[k2 + 1] = yi - a[k2 + 1]; | |
418 } | |
419 a[65] = -a[65]; | |
420 } | |
421 | |
422 void aec_rdft_init_sse2(void) { | |
423 cft1st_128 = cft1st_128_SSE2; | |
424 cftmdl_128 = cftmdl_128_SSE2; | |
425 rftfsub_128 = rftfsub_128_SSE2; | |
426 rftbsub_128 = rftbsub_128_SSE2; | |
427 } | |
OLD | NEW |