| Index: webrtc/modules/video_processing/content_analysis_sse2.cc
|
| diff --git a/webrtc/modules/video_processing/content_analysis_sse2.cc b/webrtc/modules/video_processing/content_analysis_sse2.cc
|
| index 0e1805a44a16e263d565691b23f015176e41e229..7a60a89b454981a175aceb179506427799286412 100644
|
| --- a/webrtc/modules/video_processing/content_analysis_sse2.cc
|
| +++ b/webrtc/modules/video_processing/content_analysis_sse2.cc
|
| @@ -16,22 +16,22 @@
|
| namespace webrtc {
|
|
|
| int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
| - uint32_t num_pixels = 0; // counter for # of pixels
|
| - const uint8_t* imgBufO = orig_frame_ + border_*width_ + border_;
|
| - const uint8_t* imgBufP = prev_frame_ + border_*width_ + border_;
|
| + uint32_t num_pixels = 0; // counter for # of pixels
|
| + const uint8_t* imgBufO = orig_frame_ + border_ * width_ + border_;
|
| + const uint8_t* imgBufP = prev_frame_ + border_ * width_ + border_;
|
|
|
| - const int32_t width_end = ((width_ - 2*border_) & -16) + border_;
|
| + const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
|
|
|
| - __m128i sad_64 = _mm_setzero_si128();
|
| - __m128i sum_64 = _mm_setzero_si128();
|
| + __m128i sad_64 = _mm_setzero_si128();
|
| + __m128i sum_64 = _mm_setzero_si128();
|
| __m128i sqsum_64 = _mm_setzero_si128();
|
| - const __m128i z = _mm_setzero_si128();
|
| + const __m128i z = _mm_setzero_si128();
|
|
|
| - for (uint16_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
|
| - __m128i sqsum_32 = _mm_setzero_si128();
|
| + for (uint16_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
|
| + __m128i sqsum_32 = _mm_setzero_si128();
|
|
|
| - const uint8_t *lineO = imgBufO;
|
| - const uint8_t *lineP = imgBufP;
|
| + const uint8_t* lineO = imgBufO;
|
| + const uint8_t* lineP = imgBufP;
|
|
|
| // Work on 16 pixels at a time. For HD content with a width of 1920
|
| // this loop will run ~67 times (depending on border). Maximum for
|
| @@ -49,14 +49,14 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
| lineP += 16;
|
|
|
| // Abs pixel difference between frames.
|
| - sad_64 = _mm_add_epi64 (sad_64, _mm_sad_epu8(o, p));
|
| + sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p));
|
|
|
| // sum of all pixels in frame
|
| - sum_64 = _mm_add_epi64 (sum_64, _mm_sad_epu8(o, z));
|
| + sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z));
|
|
|
| // Squared sum of all pixels in frame.
|
| - const __m128i olo = _mm_unpacklo_epi8(o,z);
|
| - const __m128i ohi = _mm_unpackhi_epi8(o,z);
|
| + const __m128i olo = _mm_unpacklo_epi8(o, z);
|
| + const __m128i ohi = _mm_unpackhi_epi8(o, z);
|
|
|
| const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo);
|
| const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi);
|
| @@ -66,9 +66,9 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
| }
|
|
|
| // Add to 64 bit running sum as to not roll over.
|
| - sqsum_64 = _mm_add_epi64(sqsum_64,
|
| - _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32,z),
|
| - _mm_unpacklo_epi32(sqsum_32,z)));
|
| + sqsum_64 =
|
| + _mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z),
|
| + _mm_unpacklo_epi32(sqsum_32, z)));
|
|
|
| imgBufO += width_ * skip_num_;
|
| imgBufP += width_ * skip_num_;
|
| @@ -81,13 +81,13 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
|
|
| // Bring sums out of vector registers and into integer register
|
| // domain, summing them along the way.
|
| - _mm_store_si128 (&sad_final_128, sad_64);
|
| - _mm_store_si128 (&sum_final_128, sum_64);
|
| - _mm_store_si128 (&sqsum_final_128, sqsum_64);
|
| + _mm_store_si128(&sad_final_128, sad_64);
|
| + _mm_store_si128(&sum_final_128, sum_64);
|
| + _mm_store_si128(&sqsum_final_128, sqsum_64);
|
|
|
| - uint64_t *sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
|
| - uint64_t *sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
|
| - uint64_t *sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
|
| + uint64_t* sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
|
| + uint64_t* sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
|
| + uint64_t* sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
|
|
|
| const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
|
| const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
|
| @@ -96,27 +96,31 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
|
| // Default.
|
| motion_magnitude_ = 0.0f;
|
|
|
| - if (tempDiffSum == 0) return VPM_OK;
|
| + if (tempDiffSum == 0)
|
| + return VPM_OK;
|
|
|
| // Normalize over all pixels.
|
| - const float tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
|
| - const float pixelSumAvg = (float)pixelSum / (float)(num_pixels);
|
| - const float pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
|
| + const float tempDiffAvg =
|
| + static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
|
| + const float pixelSumAvg =
|
| + static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
|
| + const float pixelSqSumAvg =
|
| + static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
|
| float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
|
|
|
| if (contrast > 0.0) {
|
| contrast = sqrt(contrast);
|
| - motion_magnitude_ = tempDiffAvg/contrast;
|
| + motion_magnitude_ = tempDiffAvg / contrast;
|
| }
|
|
|
| return VPM_OK;
|
| }
|
|
|
| int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
| - const uint8_t* imgBuf = orig_frame_ + border_*width_;
|
| + const uint8_t* imgBuf = orig_frame_ + border_ * width_;
|
| const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
|
|
|
| - __m128i se_32 = _mm_setzero_si128();
|
| + __m128i se_32 = _mm_setzero_si128();
|
| __m128i sev_32 = _mm_setzero_si128();
|
| __m128i seh_32 = _mm_setzero_si128();
|
| __m128i msa_32 = _mm_setzero_si128();
|
| @@ -127,8 +131,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
| // value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
|
| // will not roll over a 32 bit accumulator.
|
| // skip_num_ is also used to reduce the number of rows
|
| - for (int32_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
|
| - __m128i se_16 = _mm_setzero_si128();
|
| + for (int32_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
|
| + __m128i se_16 = _mm_setzero_si128();
|
| __m128i sev_16 = _mm_setzero_si128();
|
| __m128i seh_16 = _mm_setzero_si128();
|
| __m128i msa_16 = _mm_setzero_si128();
|
| @@ -143,9 +147,9 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
| // border_ could also be adjusted to concentrate on just the center of
|
| // the images for an HD capture in order to reduce the possiblity of
|
| // rollover.
|
| - const uint8_t *lineTop = imgBuf - width_ + border_;
|
| - const uint8_t *lineCen = imgBuf + border_;
|
| - const uint8_t *lineBot = imgBuf + width_ + border_;
|
| + const uint8_t* lineTop = imgBuf - width_ + border_;
|
| + const uint8_t* lineCen = imgBuf + border_;
|
| + const uint8_t* lineBot = imgBuf + width_ + border_;
|
|
|
| for (int32_t j = 0; j < width_end - border_; j += 16) {
|
| const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
|
| @@ -159,20 +163,20 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
| lineBot += 16;
|
|
|
| // center pixel unpacked
|
| - __m128i clo = _mm_unpacklo_epi8(c,z);
|
| - __m128i chi = _mm_unpackhi_epi8(c,z);
|
| + __m128i clo = _mm_unpacklo_epi8(c, z);
|
| + __m128i chi = _mm_unpackhi_epi8(c, z);
|
|
|
| // left right pixels unpacked and added together
|
| - const __m128i lrlo = _mm_add_epi16(_mm_unpacklo_epi8(l,z),
|
| - _mm_unpacklo_epi8(r,z));
|
| - const __m128i lrhi = _mm_add_epi16(_mm_unpackhi_epi8(l,z),
|
| - _mm_unpackhi_epi8(r,z));
|
| + const __m128i lrlo =
|
| + _mm_add_epi16(_mm_unpacklo_epi8(l, z), _mm_unpacklo_epi8(r, z));
|
| + const __m128i lrhi =
|
| + _mm_add_epi16(_mm_unpackhi_epi8(l, z), _mm_unpackhi_epi8(r, z));
|
|
|
| // top & bottom pixels unpacked and added together
|
| - const __m128i tblo = _mm_add_epi16(_mm_unpacklo_epi8(t,z),
|
| - _mm_unpacklo_epi8(b,z));
|
| - const __m128i tbhi = _mm_add_epi16(_mm_unpackhi_epi8(t,z),
|
| - _mm_unpackhi_epi8(b,z));
|
| + const __m128i tblo =
|
| + _mm_add_epi16(_mm_unpacklo_epi8(t, z), _mm_unpacklo_epi8(b, z));
|
| + const __m128i tbhi =
|
| + _mm_add_epi16(_mm_unpackhi_epi8(t, z), _mm_unpackhi_epi8(b, z));
|
|
|
| // running sum of all pixels
|
| msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo));
|
| @@ -190,29 +194,32 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
| const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi));
|
|
|
| // Add to 16 bit running sum
|
| - se_16 = _mm_add_epi16(se_16, _mm_max_epi16(setlo,
|
| - _mm_subs_epi16(z, setlo)));
|
| - se_16 = _mm_add_epi16(se_16, _mm_max_epi16(sethi,
|
| - _mm_subs_epi16(z, sethi)));
|
| - sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevtlo,
|
| - _mm_subs_epi16(z, sevtlo)));
|
| - sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevthi,
|
| - _mm_subs_epi16(z, sevthi)));
|
| - seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehtlo,
|
| - _mm_subs_epi16(z, sehtlo)));
|
| - seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehthi,
|
| - _mm_subs_epi16(z, sehthi)));
|
| + se_16 =
|
| + _mm_add_epi16(se_16, _mm_max_epi16(setlo, _mm_subs_epi16(z, setlo)));
|
| + se_16 =
|
| + _mm_add_epi16(se_16, _mm_max_epi16(sethi, _mm_subs_epi16(z, sethi)));
|
| + sev_16 = _mm_add_epi16(sev_16,
|
| + _mm_max_epi16(sevtlo, _mm_subs_epi16(z, sevtlo)));
|
| + sev_16 = _mm_add_epi16(sev_16,
|
| + _mm_max_epi16(sevthi, _mm_subs_epi16(z, sevthi)));
|
| + seh_16 = _mm_add_epi16(seh_16,
|
| + _mm_max_epi16(sehtlo, _mm_subs_epi16(z, sehtlo)));
|
| + seh_16 = _mm_add_epi16(seh_16,
|
| + _mm_max_epi16(sehthi, _mm_subs_epi16(z, sehthi)));
|
| }
|
|
|
| // Add to 32 bit running sum as to not roll over.
|
| - se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16,z),
|
| - _mm_unpacklo_epi16(se_16,z)));
|
| - sev_32 = _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16,z),
|
| - _mm_unpacklo_epi16(sev_16,z)));
|
| - seh_32 = _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16,z),
|
| - _mm_unpacklo_epi16(seh_16,z)));
|
| - msa_32 = _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16,z),
|
| - _mm_unpacklo_epi16(msa_16,z)));
|
| + se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16, z),
|
| + _mm_unpacklo_epi16(se_16, z)));
|
| + sev_32 =
|
| + _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16, z),
|
| + _mm_unpacklo_epi16(sev_16, z)));
|
| + seh_32 =
|
| + _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16, z),
|
| + _mm_unpacklo_epi16(seh_16, z)));
|
| + msa_32 =
|
| + _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16, z),
|
| + _mm_unpacklo_epi16(msa_16, z)));
|
|
|
| imgBuf += width_ * skip_num_;
|
| }
|
| @@ -224,30 +231,30 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
|
|
| // Bring sums out of vector registers and into integer register
|
| // domain, summing them along the way.
|
| - _mm_store_si128 (&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32,z),
|
| - _mm_unpacklo_epi32(se_32,z)));
|
| - _mm_store_si128 (&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32,z),
|
| - _mm_unpacklo_epi32(sev_32,z)));
|
| - _mm_store_si128 (&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32,z),
|
| - _mm_unpacklo_epi32(seh_32,z)));
|
| - _mm_store_si128 (&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32,z),
|
| - _mm_unpacklo_epi32(msa_32,z)));
|
| -
|
| - uint64_t *se_64 = reinterpret_cast<uint64_t*>(&se_128);
|
| - uint64_t *sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
|
| - uint64_t *seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
|
| - uint64_t *msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
|
| -
|
| - const uint32_t spatialErrSum = se_64[0] + se_64[1];
|
| + _mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z),
|
| + _mm_unpacklo_epi32(se_32, z)));
|
| + _mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z),
|
| + _mm_unpacklo_epi32(sev_32, z)));
|
| + _mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z),
|
| + _mm_unpacklo_epi32(seh_32, z)));
|
| + _mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z),
|
| + _mm_unpacklo_epi32(msa_32, z)));
|
| +
|
| + uint64_t* se_64 = reinterpret_cast<uint64_t*>(&se_128);
|
| + uint64_t* sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
|
| + uint64_t* seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
|
| + uint64_t* msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
|
| +
|
| + const uint32_t spatialErrSum = se_64[0] + se_64[1];
|
| const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
|
| const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
|
| const uint32_t pixelMSA = msa_64[0] + msa_64[1];
|
|
|
| // Normalize over all pixels.
|
| - const float spatialErr = (float)(spatialErrSum >> 2);
|
| - const float spatialErrH = (float)(spatialErrHSum >> 1);
|
| - const float spatialErrV = (float)(spatialErrVSum >> 1);
|
| - const float norm = (float)pixelMSA;
|
| + const float spatialErr = static_cast<float>(spatialErrSum >> 2);
|
| + const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
|
| + const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
|
| + const float norm = static_cast<float>(pixelMSA);
|
|
|
| // 2X2:
|
| spatial_pred_err_ = spatialErr / norm;
|
| @@ -258,7 +265,7 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
|
| // 2X1:
|
| spatial_pred_err_v_ = spatialErrV / norm;
|
|
|
| - return VPM_OK;
|
| + return VPM_OK;
|
| }
|
|
|
| } // namespace webrtc
|
|
|