Index: webrtc/modules/audio_processing/audio_buffer_unittest.cc |
diff --git a/webrtc/modules/audio_processing/audio_buffer_unittest.cc b/webrtc/modules/audio_processing/audio_buffer_unittest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..8e4517f2c4b096f5e7fe2b9a7a7b1cd14824421a |
--- /dev/null |
+++ b/webrtc/modules/audio_processing/audio_buffer_unittest.cc |
@@ -0,0 +1,144 @@ |
+/* |
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
+ * |
+ * Use of this source code is governed by a BSD-style license |
+ * that can be found in the LICENSE file in the root of the source |
+ * tree. An additional intellectual property rights grant can be found |
+ * in the file PATENTS. All contributing project authors may |
+ * be found in the AUTHORS file in the root of the source tree. |
+ */ |
+ |
+#include "webrtc/modules/audio_processing/audio_buffer.h" |
+#include "testing/gtest/include/gtest/gtest.h" |
+#include "testing/gmock/include/gmock/gmock.h" |
+ |
+namespace webrtc { |
+namespace { |
+ |
+using ::testing::ElementsAreArray; |
+ |
+TEST(AudioBufferHelpersTest, DownmixInterleavedToMonoImpl) { |
+ { |
+ const int kNumMultichannelFrames = 4; |
+ const int kNumChannels = 1; |
+ const float interleaved[kNumChannels * kNumMultichannelFrames] = { |
+ 1.f, 2.f, -1.f, -3.f}; |
+ float deinterleaved[kNumMultichannelFrames]; |
+ |
+ DownmixInterleavedToMonoImpl<float, float>( |
+ interleaved, deinterleaved, kNumMultichannelFrames, kNumChannels); |
+ |
+ EXPECT_THAT(deinterleaved, ElementsAreArray(interleaved)); |
+ } |
+ { |
+ const int kNumMultichannelFrames = 2; |
+ const int kNumChannels = 2; |
+ const float interleaved[kNumChannels * kNumMultichannelFrames] = { |
+ 1.f, 2.f, -1.f, -3.f}; |
+ float deinterleaved[kNumMultichannelFrames]; |
+ |
+ DownmixInterleavedToMonoImpl<float, float>( |
+ interleaved, deinterleaved, kNumMultichannelFrames, kNumChannels); |
+ const float expected[kNumMultichannelFrames] = {1.5f, -2.f}; |
+ |
+ EXPECT_THAT(deinterleaved, ElementsAreArray(expected)); |
+ } |
+ { |
+ const int kNumMultichannelFrames = 2; |
+ const int kNumChannels = 3; |
+ const float interleaved[kNumChannels * kNumMultichannelFrames] = { |
+ 1.f, 2.f, 3.f, -1.f, -3.f, 7.f}; |
+ float deinterleaved[kNumMultichannelFrames]; |
+ |
+ DownmixInterleavedToMonoImpl<float, float>( |
+ interleaved, deinterleaved, kNumMultichannelFrames, kNumChannels); |
+ const float expected[kNumMultichannelFrames] = {2.f, 1.f}; |
+ |
+ EXPECT_THAT(deinterleaved, ElementsAreArray(expected)); |
+ } |
+ { |
+ const int kNumMultichannelFrames = 2; |
+ const int kNumChannels = 2; |
+ const int16_t interleaved[kNumChannels * kNumMultichannelFrames] = { |
+ 10, 20, -10, -30}; |
+ int16_t deinterleaved[kNumMultichannelFrames]; |
+ |
+ DownmixInterleavedToMonoImpl<int16_t, int32_t>( |
+ interleaved, deinterleaved, kNumMultichannelFrames, kNumChannels); |
+ const int16_t expected[kNumMultichannelFrames] = {15, -20}; |
+ |
+ EXPECT_THAT(deinterleaved, ElementsAreArray(expected)); |
+ } |
+ { |
+ const int kNumMultichannelFrames = 3; |
+ const int kNumChannels = 3; |
+ const int16_t interleaved[kNumChannels * kNumMultichannelFrames] = { |
+ 30000, 30000, 24001, -5, -10, -20, -30000, -30999, -30000}; |
+ int16_t deinterleaved[kNumMultichannelFrames]; |
+ |
+ DownmixInterleavedToMonoImpl<int16_t, int32_t>( |
+ interleaved, deinterleaved, kNumMultichannelFrames, kNumChannels); |
+ const int16_t expected[kNumMultichannelFrames] = {28000, -11, -30333}; |
+ |
+ EXPECT_THAT(deinterleaved, ElementsAreArray(expected)); |
+ } |
+} |
+ |
+TEST(AudioBufferHelpersTest, DownmixToMonoTest) { |
+ { |
+ const int kNumMultichannelFrames = 4; |
+ const int kNumChannels = 1; |
+ const float input_data[kNumChannels][kNumMultichannelFrames] = { |
+ {1.f, 2.f, -1.f, -3.f}}; |
+ const float* input[kNumChannels]; |
+ for (int i = 0; i < kNumChannels; ++i) { |
+ input[i] = input_data[i]; |
+ } |
+ |
+ float downmixed[kNumMultichannelFrames]; |
+ |
+ DownmixToMono<float, float>(kNumMultichannelFrames, downmixed, input, |
+ kNumChannels); |
+ |
+ EXPECT_THAT(downmixed, ElementsAreArray(input_data[0])); |
+ } |
+ { |
+ const int kNumMultichannelFrames = 3; |
+ const int kNumChannels = 2; |
+ const float input_data[kNumChannels][kNumMultichannelFrames] = { |
+ {1.f, 2.f, -1.f}, {3.f, 0.f, 1.f}}; |
+ const float* input[kNumChannels]; |
+ for (int i = 0; i < kNumChannels; ++i) { |
+ input[i] = input_data[i]; |
+ } |
+ |
+ float downmixed[kNumMultichannelFrames]; |
+ const float expected[kNumMultichannelFrames] = {2.f, 1.f, 0.f}; |
+ |
+ DownmixToMono<float, float>(kNumMultichannelFrames, downmixed, input, |
+ kNumChannels); |
+ |
+ EXPECT_THAT(downmixed, ElementsAreArray(expected)); |
+ } |
+ { |
+ const int kNumMultichannelFrames = 3; |
+ const int kNumChannels = 3; |
+ const int16_t input_data[kNumChannels][kNumMultichannelFrames] = { |
+ {30000, -5, -30000}, {30000, -10, -30999}, {24001, -20, -30000}}; |
+ const int16_t* input[kNumChannels]; |
+ for (int i = 0; i < kNumChannels; ++i) { |
+ input[i] = input_data[i]; |
+ } |
+ |
+ int16_t downmixed[kNumMultichannelFrames]; |
+ const int16_t expected[kNumMultichannelFrames] = {28000, -11, -30333}; |
+ |
+ DownmixToMono<int16_t, int32_t>(kNumMultichannelFrames, downmixed, input, |
+ kNumChannels); |
+ |
+ EXPECT_THAT(downmixed, ElementsAreArray(expected)); |
+ } |
+} |
+ |
+} // namespace |
+} // namespace webrtc |