OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/modules/video_processing/content_analysis.h" | |
12 | |
13 #include <emmintrin.h> | |
14 #include <math.h> | |
15 | |
16 namespace webrtc { | |
17 | |
18 int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() { | |
19 uint32_t num_pixels = 0; // counter for # of pixels | |
20 const uint8_t* imgBufO = orig_frame_ + border_ * width_ + border_; | |
21 const uint8_t* imgBufP = prev_frame_ + border_ * width_ + border_; | |
22 | |
23 const int32_t width_end = ((width_ - 2 * border_) & -16) + border_; | |
24 | |
25 __m128i sad_64 = _mm_setzero_si128(); | |
26 __m128i sum_64 = _mm_setzero_si128(); | |
27 __m128i sqsum_64 = _mm_setzero_si128(); | |
28 const __m128i z = _mm_setzero_si128(); | |
29 | |
30 for (uint16_t i = 0; i < (height_ - 2 * border_); i += skip_num_) { | |
31 __m128i sqsum_32 = _mm_setzero_si128(); | |
32 | |
33 const uint8_t* lineO = imgBufO; | |
34 const uint8_t* lineP = imgBufP; | |
35 | |
36 // Work on 16 pixels at a time. For HD content with a width of 1920 | |
37 // this loop will run ~67 times (depending on border). Maximum for | |
38 // abs(o-p) and sum(o) will be 255. _mm_sad_epu8 produces 2 64 bit | |
39 // results which are then accumulated. There is no chance of | |
40 // rollover for these two accumulators. | |
41 // o*o will have a maximum of 255*255 = 65025. This will roll over | |
42 // a 16 bit accumulator as 67*65025 > 65535, but will fit in a | |
43 // 32 bit accumulator. | |
44 for (uint16_t j = 0; j < width_end - border_; j += 16) { | |
45 const __m128i o = _mm_loadu_si128((__m128i*)(lineO)); | |
46 const __m128i p = _mm_loadu_si128((__m128i*)(lineP)); | |
47 | |
48 lineO += 16; | |
49 lineP += 16; | |
50 | |
51 // Abs pixel difference between frames. | |
52 sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p)); | |
53 | |
54 // sum of all pixels in frame | |
55 sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z)); | |
56 | |
57 // Squared sum of all pixels in frame. | |
58 const __m128i olo = _mm_unpacklo_epi8(o, z); | |
59 const __m128i ohi = _mm_unpackhi_epi8(o, z); | |
60 | |
61 const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo); | |
62 const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi); | |
63 | |
64 sqsum_32 = _mm_add_epi32(sqsum_32, sqsum_32_lo); | |
65 sqsum_32 = _mm_add_epi32(sqsum_32, sqsum_32_hi); | |
66 } | |
67 | |
68 // Add to 64 bit running sum as to not roll over. | |
69 sqsum_64 = | |
70 _mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z), | |
71 _mm_unpacklo_epi32(sqsum_32, z))); | |
72 | |
73 imgBufO += width_ * skip_num_; | |
74 imgBufP += width_ * skip_num_; | |
75 num_pixels += (width_end - border_); | |
76 } | |
77 | |
78 __m128i sad_final_128; | |
79 __m128i sum_final_128; | |
80 __m128i sqsum_final_128; | |
81 | |
82 // Bring sums out of vector registers and into integer register | |
83 // domain, summing them along the way. | |
84 _mm_store_si128(&sad_final_128, sad_64); | |
85 _mm_store_si128(&sum_final_128, sum_64); | |
86 _mm_store_si128(&sqsum_final_128, sqsum_64); | |
87 | |
88 uint64_t* sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128); | |
89 uint64_t* sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128); | |
90 uint64_t* sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128); | |
91 | |
92 const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1]; | |
93 const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1]; | |
94 const uint32_t tempDiffSum = sad_final_64[0] + sad_final_64[1]; | |
95 | |
96 // Default. | |
97 motion_magnitude_ = 0.0f; | |
98 | |
99 if (tempDiffSum == 0) | |
100 return VPM_OK; | |
101 | |
102 // Normalize over all pixels. | |
103 const float tempDiffAvg = | |
104 static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels); | |
105 const float pixelSumAvg = | |
106 static_cast<float>(pixelSum) / static_cast<float>(num_pixels); | |
107 const float pixelSqSumAvg = | |
108 static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels); | |
109 float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg); | |
110 | |
111 if (contrast > 0.0) { | |
112 contrast = sqrt(contrast); | |
113 motion_magnitude_ = tempDiffAvg / contrast; | |
114 } | |
115 | |
116 return VPM_OK; | |
117 } | |
118 | |
119 int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() { | |
120 const uint8_t* imgBuf = orig_frame_ + border_ * width_; | |
121 const int32_t width_end = ((width_ - 2 * border_) & -16) + border_; | |
122 | |
123 __m128i se_32 = _mm_setzero_si128(); | |
124 __m128i sev_32 = _mm_setzero_si128(); | |
125 __m128i seh_32 = _mm_setzero_si128(); | |
126 __m128i msa_32 = _mm_setzero_si128(); | |
127 const __m128i z = _mm_setzero_si128(); | |
128 | |
129 // Error is accumulated as a 32 bit value. Looking at HD content with a | |
130 // height of 1080 lines, or about 67 macro blocks. If the 16 bit row | |
131 // value is maxed out at 65529 for every row, 65529*1080 = 70777800, which | |
132 // will not roll over a 32 bit accumulator. | |
133 // skip_num_ is also used to reduce the number of rows | |
134 for (int32_t i = 0; i < (height_ - 2 * border_); i += skip_num_) { | |
135 __m128i se_16 = _mm_setzero_si128(); | |
136 __m128i sev_16 = _mm_setzero_si128(); | |
137 __m128i seh_16 = _mm_setzero_si128(); | |
138 __m128i msa_16 = _mm_setzero_si128(); | |
139 | |
140 // Row error is accumulated as a 16 bit value. There are 8 | |
141 // accumulators. Max value of a 16 bit number is 65529. Looking | |
142 // at HD content, 1080p, has a width of 1920, 120 macro blocks. | |
143 // A mb at a time is processed at a time. Absolute max error at | |
144 // a point would be abs(0-255+255+255+255) which equals 1020. | |
145 // 120*1020 = 122400. The probability of hitting this is quite low | |
146 // on well behaved content. A specially crafted image could roll over. | |
147 // border_ could also be adjusted to concentrate on just the center of | |
148 // the images for an HD capture in order to reduce the possiblity of | |
149 // rollover. | |
150 const uint8_t* lineTop = imgBuf - width_ + border_; | |
151 const uint8_t* lineCen = imgBuf + border_; | |
152 const uint8_t* lineBot = imgBuf + width_ + border_; | |
153 | |
154 for (int32_t j = 0; j < width_end - border_; j += 16) { | |
155 const __m128i t = _mm_loadu_si128((__m128i*)(lineTop)); | |
156 const __m128i l = _mm_loadu_si128((__m128i*)(lineCen - 1)); | |
157 const __m128i c = _mm_loadu_si128((__m128i*)(lineCen)); | |
158 const __m128i r = _mm_loadu_si128((__m128i*)(lineCen + 1)); | |
159 const __m128i b = _mm_loadu_si128((__m128i*)(lineBot)); | |
160 | |
161 lineTop += 16; | |
162 lineCen += 16; | |
163 lineBot += 16; | |
164 | |
165 // center pixel unpacked | |
166 __m128i clo = _mm_unpacklo_epi8(c, z); | |
167 __m128i chi = _mm_unpackhi_epi8(c, z); | |
168 | |
169 // left right pixels unpacked and added together | |
170 const __m128i lrlo = | |
171 _mm_add_epi16(_mm_unpacklo_epi8(l, z), _mm_unpacklo_epi8(r, z)); | |
172 const __m128i lrhi = | |
173 _mm_add_epi16(_mm_unpackhi_epi8(l, z), _mm_unpackhi_epi8(r, z)); | |
174 | |
175 // top & bottom pixels unpacked and added together | |
176 const __m128i tblo = | |
177 _mm_add_epi16(_mm_unpacklo_epi8(t, z), _mm_unpacklo_epi8(b, z)); | |
178 const __m128i tbhi = | |
179 _mm_add_epi16(_mm_unpackhi_epi8(t, z), _mm_unpackhi_epi8(b, z)); | |
180 | |
181 // running sum of all pixels | |
182 msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo)); | |
183 | |
184 clo = _mm_slli_epi16(clo, 1); | |
185 chi = _mm_slli_epi16(chi, 1); | |
186 const __m128i sevtlo = _mm_subs_epi16(clo, tblo); | |
187 const __m128i sevthi = _mm_subs_epi16(chi, tbhi); | |
188 const __m128i sehtlo = _mm_subs_epi16(clo, lrlo); | |
189 const __m128i sehthi = _mm_subs_epi16(chi, lrhi); | |
190 | |
191 clo = _mm_slli_epi16(clo, 1); | |
192 chi = _mm_slli_epi16(chi, 1); | |
193 const __m128i setlo = _mm_subs_epi16(clo, _mm_add_epi16(lrlo, tblo)); | |
194 const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi)); | |
195 | |
196 // Add to 16 bit running sum | |
197 se_16 = | |
198 _mm_add_epi16(se_16, _mm_max_epi16(setlo, _mm_subs_epi16(z, setlo))); | |
199 se_16 = | |
200 _mm_add_epi16(se_16, _mm_max_epi16(sethi, _mm_subs_epi16(z, sethi))); | |
201 sev_16 = _mm_add_epi16(sev_16, | |
202 _mm_max_epi16(sevtlo, _mm_subs_epi16(z, sevtlo))); | |
203 sev_16 = _mm_add_epi16(sev_16, | |
204 _mm_max_epi16(sevthi, _mm_subs_epi16(z, sevthi))); | |
205 seh_16 = _mm_add_epi16(seh_16, | |
206 _mm_max_epi16(sehtlo, _mm_subs_epi16(z, sehtlo))); | |
207 seh_16 = _mm_add_epi16(seh_16, | |
208 _mm_max_epi16(sehthi, _mm_subs_epi16(z, sehthi))); | |
209 } | |
210 | |
211 // Add to 32 bit running sum as to not roll over. | |
212 se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16, z), | |
213 _mm_unpacklo_epi16(se_16, z))); | |
214 sev_32 = | |
215 _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16, z), | |
216 _mm_unpacklo_epi16(sev_16, z))); | |
217 seh_32 = | |
218 _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16, z), | |
219 _mm_unpacklo_epi16(seh_16, z))); | |
220 msa_32 = | |
221 _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16, z), | |
222 _mm_unpacklo_epi16(msa_16, z))); | |
223 | |
224 imgBuf += width_ * skip_num_; | |
225 } | |
226 | |
227 __m128i se_128; | |
228 __m128i sev_128; | |
229 __m128i seh_128; | |
230 __m128i msa_128; | |
231 | |
232 // Bring sums out of vector registers and into integer register | |
233 // domain, summing them along the way. | |
234 _mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z), | |
235 _mm_unpacklo_epi32(se_32, z))); | |
236 _mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z), | |
237 _mm_unpacklo_epi32(sev_32, z))); | |
238 _mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z), | |
239 _mm_unpacklo_epi32(seh_32, z))); | |
240 _mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z), | |
241 _mm_unpacklo_epi32(msa_32, z))); | |
242 | |
243 uint64_t* se_64 = reinterpret_cast<uint64_t*>(&se_128); | |
244 uint64_t* sev_64 = reinterpret_cast<uint64_t*>(&sev_128); | |
245 uint64_t* seh_64 = reinterpret_cast<uint64_t*>(&seh_128); | |
246 uint64_t* msa_64 = reinterpret_cast<uint64_t*>(&msa_128); | |
247 | |
248 const uint32_t spatialErrSum = se_64[0] + se_64[1]; | |
249 const uint32_t spatialErrVSum = sev_64[0] + sev_64[1]; | |
250 const uint32_t spatialErrHSum = seh_64[0] + seh_64[1]; | |
251 const uint32_t pixelMSA = msa_64[0] + msa_64[1]; | |
252 | |
253 // Normalize over all pixels. | |
254 const float spatialErr = static_cast<float>(spatialErrSum >> 2); | |
255 const float spatialErrH = static_cast<float>(spatialErrHSum >> 1); | |
256 const float spatialErrV = static_cast<float>(spatialErrVSum >> 1); | |
257 const float norm = static_cast<float>(pixelMSA); | |
258 | |
259 // 2X2: | |
260 spatial_pred_err_ = spatialErr / norm; | |
261 | |
262 // 1X2: | |
263 spatial_pred_err_h_ = spatialErrH / norm; | |
264 | |
265 // 2X1: | |
266 spatial_pred_err_v_ = spatialErrV / norm; | |
267 | |
268 return VPM_OK; | |
269 } | |
270 | |
271 } // namespace webrtc | |
OLD | NEW |