OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include "webrtc/modules/audio_mixer/frame_combiner.h" |
| 12 |
| 13 #include <numeric> |
| 14 #include <sstream> |
| 15 #include <string> |
| 16 |
| 17 #include "webrtc/base/checks.h" |
| 18 #include "webrtc/test/gtest.h" |
| 19 |
| 20 namespace webrtc { |
| 21 |
| 22 namespace { |
| 23 std::string ProduceDebugText(int sample_rate_hz, |
| 24 int number_of_channels, |
| 25 int number_of_sources) { |
| 26 std::ostringstream ss; |
| 27 ss << "Sample rate: " << sample_rate_hz << " "; |
| 28 ss << "Number of channels: " << number_of_channels << " "; |
| 29 ss << "Number of sources: " << number_of_sources; |
| 30 return ss.str(); |
| 31 } |
| 32 |
| 33 AudioFrame frame1; |
| 34 AudioFrame frame2; |
| 35 AudioFrame audio_frame_for_mixing; |
| 36 |
| 37 void SetUpFrames(int sample_rate_hz, int number_of_channels) { |
| 38 for (auto* frame : {&frame1, &frame2}) { |
| 39 frame->UpdateFrame(-1, 0, nullptr, |
| 40 rtc::CheckedDivExact(sample_rate_hz, 100), |
| 41 sample_rate_hz, AudioFrame::kNormalSpeech, |
| 42 AudioFrame::kVadActive, number_of_channels); |
| 43 } |
| 44 } |
| 45 } // namespace |
| 46 |
| 47 TEST(FrameCombiner, BasicApiCallsLimiter) { |
| 48 FrameCombiner combiner(true); |
| 49 for (const int rate : {8000, 16000, 32000, 48000}) { |
| 50 for (const int number_of_channels : {1, 2}) { |
| 51 const std::vector<AudioFrame*> all_frames = {&frame1, &frame2}; |
| 52 SetUpFrames(rate, number_of_channels); |
| 53 |
| 54 for (const int number_of_frames : {0, 1, 2}) { |
| 55 SCOPED_TRACE( |
| 56 ProduceDebugText(rate, number_of_channels, number_of_frames)); |
| 57 const std::vector<AudioFrame*> frames_to_combine( |
| 58 all_frames.begin(), all_frames.begin() + number_of_frames); |
| 59 combiner.Combine(frames_to_combine, number_of_channels, rate, |
| 60 &audio_frame_for_mixing); |
| 61 } |
| 62 } |
| 63 } |
| 64 } |
| 65 |
| 66 // No APM limiter means no AudioProcessing::NativeRate restriction |
| 67 // on rate. The rate has to be divisible by 100 since we use |
| 68 // 10 ms frames, though. |
| 69 TEST(FrameCombiner, BasicApiCallsNoLimiter) { |
| 70 FrameCombiner combiner(false); |
| 71 for (const int rate : {8000, 10000, 11000, 32000, 44100}) { |
| 72 for (const int number_of_channels : {1, 2}) { |
| 73 const std::vector<AudioFrame*> all_frames = {&frame1, &frame2}; |
| 74 SetUpFrames(rate, number_of_channels); |
| 75 |
| 76 for (const int number_of_frames : {0, 1, 2}) { |
| 77 SCOPED_TRACE( |
| 78 ProduceDebugText(rate, number_of_channels, number_of_frames)); |
| 79 const std::vector<AudioFrame*> frames_to_combine( |
| 80 all_frames.begin(), all_frames.begin() + number_of_frames); |
| 81 combiner.Combine(frames_to_combine, number_of_channels, rate, |
| 82 &audio_frame_for_mixing); |
| 83 } |
| 84 } |
| 85 } |
| 86 } |
| 87 |
| 88 TEST(FrameCombiner, CombiningZeroFramesShouldProduceSilence) { |
| 89 FrameCombiner combiner(false); |
| 90 for (const int rate : {8000, 10000, 11000, 32000, 44100}) { |
| 91 for (const int number_of_channels : {1, 2}) { |
| 92 SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 0)); |
| 93 |
| 94 const std::vector<AudioFrame*> frames_to_combine; |
| 95 combiner.Combine(frames_to_combine, number_of_channels, rate, |
| 96 &audio_frame_for_mixing); |
| 97 |
| 98 const std::vector<int16_t> mixed_data( |
| 99 audio_frame_for_mixing.data_, |
| 100 audio_frame_for_mixing.data_ + number_of_channels * rate / 100); |
| 101 |
| 102 const std::vector<int16_t> expected(number_of_channels * rate / 100, 0); |
| 103 EXPECT_EQ(mixed_data, expected); |
| 104 } |
| 105 } |
| 106 } |
| 107 |
| 108 TEST(FrameCombiner, CombiningOneFrameShouldNotChangeFrame) { |
| 109 FrameCombiner combiner(false); |
| 110 for (const int rate : {8000, 10000, 11000, 32000, 44100}) { |
| 111 for (const int number_of_channels : {1, 2}) { |
| 112 SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1)); |
| 113 |
| 114 SetUpFrames(rate, number_of_channels); |
| 115 std::iota(frame1.data_, frame1.data_ + number_of_channels * rate / 100, |
| 116 0); |
| 117 const std::vector<AudioFrame*> frames_to_combine = {&frame1}; |
| 118 combiner.Combine(frames_to_combine, number_of_channels, rate, |
| 119 &audio_frame_for_mixing); |
| 120 |
| 121 const std::vector<int16_t> mixed_data( |
| 122 audio_frame_for_mixing.data_, |
| 123 audio_frame_for_mixing.data_ + number_of_channels * rate / 100); |
| 124 |
| 125 std::vector<int16_t> expected(number_of_channels * rate / 100); |
| 126 std::iota(expected.begin(), expected.end(), 0); |
| 127 EXPECT_EQ(mixed_data, expected); |
| 128 } |
| 129 } |
| 130 } |
| 131 |
| 132 } // namespace webrtc |
OLD | NEW |