OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
46 } | 46 } |
47 | 47 |
48 AudioFrame frame_for_mixing; | 48 AudioFrame frame_for_mixing; |
49 | 49 |
50 } // namespace | 50 } // namespace |
51 | 51 |
52 class MockMixerAudioSource : public AudioMixer::Source { | 52 class MockMixerAudioSource : public AudioMixer::Source { |
53 public: | 53 public: |
54 MockMixerAudioSource() | 54 MockMixerAudioSource() |
55 : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) { | 55 : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) { |
56 ON_CALL(*this, GetAudioFrameWithInfo(_)) | 56 ON_CALL(*this, GetAudioFrameWithInfo(_, _)) |
57 .WillByDefault( | 57 .WillByDefault( |
58 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo)); | 58 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo)); |
59 } | 59 } |
60 | 60 |
61 MOCK_METHOD1(GetAudioFrameWithInfo, AudioFrameWithInfo(int sample_rate_hz)); | 61 MOCK_METHOD2(GetAudioFrameWithInfo, |
| 62 AudioFrameInfo(int sample_rate_hz, AudioFrame* audio_frame)); |
62 | 63 |
63 MOCK_METHOD0(ssrc, int()); | 64 MOCK_METHOD0(Ssrc, int()); |
64 | 65 |
65 AudioFrame* fake_frame() { return &fake_frame_; } | 66 AudioFrame* fake_frame() { return &fake_frame_; } |
66 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } | 67 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
67 void set_fake_info(const AudioFrameInfo audio_frame_info) { | 68 void set_fake_info(const AudioFrameInfo audio_frame_info) { |
68 fake_audio_frame_info_ = audio_frame_info; | 69 fake_audio_frame_info_ = audio_frame_info; |
69 } | 70 } |
70 | 71 |
71 private: | 72 private: |
72 AudioFrame fake_frame_, fake_output_frame_; | 73 AudioFrame fake_frame_; |
73 AudioFrameInfo fake_audio_frame_info_; | 74 AudioFrameInfo fake_audio_frame_info_; |
74 AudioFrameWithInfo FakeAudioFrameWithInfo(int sample_rate_hz) { | 75 AudioFrameInfo FakeAudioFrameWithInfo(int sample_rate_hz, |
75 fake_output_frame_.CopyFrom(fake_frame_); | 76 AudioFrame* audio_frame) { |
76 return { | 77 audio_frame->CopyFrom(fake_frame_); |
77 &fake_output_frame_, // audio_frame_pointer | 78 return fake_info(); // audio_frame_info |
78 fake_info(), // audio_frame_info | |
79 }; | |
80 } | 79 } |
81 }; | 80 }; |
82 | 81 |
83 // Creates participants from |frames| and |frame_info| and adds them | 82 // Creates participants from |frames| and |frame_info| and adds them |
84 // to the mixer. Compares mixed status with |expected_status| | 83 // to the mixer. Compares mixed status with |expected_status| |
85 void MixAndCompare( | 84 void MixAndCompare( |
86 const std::vector<AudioFrame>& frames, | 85 const std::vector<AudioFrame>& frames, |
87 const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info, | 86 const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info, |
88 const std::vector<bool>& expected_status) { | 87 const std::vector<bool>& expected_status) { |
89 int num_audio_sources = frames.size(); | 88 int num_audio_sources = frames.size(); |
90 RTC_DCHECK(frames.size() == frame_info.size()); | 89 RTC_DCHECK(frames.size() == frame_info.size()); |
91 RTC_DCHECK(frame_info.size() == expected_status.size()); | 90 RTC_DCHECK(frame_info.size() == expected_status.size()); |
92 | 91 |
93 const auto mixer = AudioMixerImpl::Create(); | 92 const auto mixer = AudioMixerImpl::Create(); |
94 std::vector<MockMixerAudioSource> participants(num_audio_sources); | 93 std::vector<MockMixerAudioSource> participants(num_audio_sources); |
95 | 94 |
96 for (int i = 0; i < num_audio_sources; i++) { | 95 for (int i = 0; i < num_audio_sources; i++) { |
97 participants[i].fake_frame()->CopyFrom(frames[i]); | 96 participants[i].fake_frame()->CopyFrom(frames[i]); |
98 participants[i].set_fake_info(frame_info[i]); | 97 participants[i].set_fake_info(frame_info[i]); |
99 } | 98 } |
100 | 99 |
101 for (int i = 0; i < num_audio_sources; i++) { | 100 for (int i = 0; i < num_audio_sources; i++) { |
102 EXPECT_TRUE(mixer->AddSource(&participants[i])); | 101 EXPECT_TRUE(mixer->AddSource(&participants[i])); |
103 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz)) | 102 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
104 .Times(Exactly(1)); | 103 .Times(Exactly(1)); |
105 } | 104 } |
106 | 105 |
107 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 106 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
108 | 107 |
109 for (int i = 0; i < num_audio_sources; i++) { | 108 for (int i = 0; i < num_audio_sources; i++) { |
110 EXPECT_EQ(expected_status[i], | 109 EXPECT_EQ(expected_status[i], |
111 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 110 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
112 << "Mixed status of AudioSource #" << i << " wrong."; | 111 << "Mixed status of AudioSource #" << i << " wrong."; |
113 } | 112 } |
114 } | 113 } |
115 | 114 |
116 TEST(AudioMixer, LargestEnergyVadActiveMixed) { | 115 TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
117 constexpr int kAudioSources = | 116 constexpr int kAudioSources = |
118 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 3; | 117 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 3; |
119 | 118 |
120 const auto mixer = AudioMixerImpl::Create(); | 119 const auto mixer = AudioMixerImpl::Create(); |
121 | 120 |
122 MockMixerAudioSource participants[kAudioSources]; | 121 MockMixerAudioSource participants[kAudioSources]; |
123 | 122 |
124 for (int i = 0; i < kAudioSources; ++i) { | 123 for (int i = 0; i < kAudioSources; ++i) { |
125 ResetFrame(participants[i].fake_frame()); | 124 ResetFrame(participants[i].fake_frame()); |
126 | 125 |
127 // We set the 80-th sample value since the first 80 samples may be | 126 // We set the 80-th sample value since the first 80 samples may be |
128 // modified by a ramped-in window. | 127 // modified by a ramped-in window. |
129 participants[i].fake_frame()->data_[80] = i; | 128 participants[i].fake_frame()->data_[80] = i; |
130 | 129 |
131 EXPECT_TRUE(mixer->AddSource(&participants[i])); | 130 EXPECT_TRUE(mixer->AddSource(&participants[i])); |
132 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_)).Times(Exactly(1)); | 131 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, _)).Times(Exactly(1)); |
133 } | 132 } |
134 | 133 |
135 // Last participant gives audio frame with passive VAD, although it has the | 134 // Last participant gives audio frame with passive VAD, although it has the |
136 // largest energy. | 135 // largest energy. |
137 participants[kAudioSources - 1].fake_frame()->vad_activity_ = | 136 participants[kAudioSources - 1].fake_frame()->vad_activity_ = |
138 AudioFrame::kVadPassive; | 137 AudioFrame::kVadPassive; |
139 | 138 |
140 AudioFrame audio_frame; | 139 AudioFrame audio_frame; |
141 mixer->Mix(kDefaultSampleRateHz, | 140 mixer->Mix(kDefaultSampleRateHz, |
142 1, // number of channels | 141 1, // number of channels |
(...skipping 21 matching lines...) Expand all Loading... |
164 | 163 |
165 ResetFrame(participant.fake_frame()); | 164 ResetFrame(participant.fake_frame()); |
166 const int n_samples = participant.fake_frame()->samples_per_channel_; | 165 const int n_samples = participant.fake_frame()->samples_per_channel_; |
167 | 166 |
168 // Modify the frame so that it's not zero. | 167 // Modify the frame so that it's not zero. |
169 for (int j = 0; j < n_samples; j++) { | 168 for (int j = 0; j < n_samples; j++) { |
170 participant.fake_frame()->data_[j] = j; | 169 participant.fake_frame()->data_[j] = j; |
171 } | 170 } |
172 | 171 |
173 EXPECT_TRUE(mixer->AddSource(&participant)); | 172 EXPECT_TRUE(mixer->AddSource(&participant)); |
174 EXPECT_CALL(participant, GetAudioFrameWithInfo(_)).Times(Exactly(2)); | 173 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2)); |
175 | 174 |
176 AudioFrame audio_frame; | 175 AudioFrame audio_frame; |
177 // Two mix iteration to compare after the ramp-up step. | 176 // Two mix iteration to compare after the ramp-up step. |
178 for (int i = 0; i < 2; i++) { | 177 for (int i = 0; i < 2; i++) { |
179 mixer->Mix(kDefaultSampleRateHz, | 178 mixer->Mix(kDefaultSampleRateHz, |
180 1, // number of channels | 179 1, // number of channels |
181 &audio_frame); | 180 &audio_frame); |
182 } | 181 } |
183 | 182 |
184 EXPECT_EQ( | 183 EXPECT_EQ( |
185 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); | 184 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); |
186 } | 185 } |
187 | 186 |
188 TEST(AudioMixer, ParticipantSampleRate) { | 187 TEST(AudioMixer, ParticipantSampleRate) { |
189 const auto mixer = AudioMixerImpl::Create(); | 188 const auto mixer = AudioMixerImpl::Create(); |
190 | 189 |
191 MockMixerAudioSource participant; | 190 MockMixerAudioSource participant; |
192 ResetFrame(participant.fake_frame()); | 191 ResetFrame(participant.fake_frame()); |
193 | 192 |
194 EXPECT_TRUE(mixer->AddSource(&participant)); | 193 EXPECT_TRUE(mixer->AddSource(&participant)); |
195 for (auto frequency : {8000, 16000, 32000, 48000}) { | 194 for (auto frequency : {8000, 16000, 32000, 48000}) { |
196 EXPECT_CALL(participant, GetAudioFrameWithInfo(frequency)) | 195 EXPECT_CALL(participant, GetAudioFrameWithInfo(frequency, _)) |
197 .Times(Exactly(1)); | 196 .Times(Exactly(1)); |
198 participant.fake_frame()->sample_rate_hz_ = frequency; | 197 participant.fake_frame()->sample_rate_hz_ = frequency; |
199 participant.fake_frame()->samples_per_channel_ = frequency / 100; | 198 participant.fake_frame()->samples_per_channel_ = frequency / 100; |
200 mixer->Mix(frequency, 1, &frame_for_mixing); | 199 mixer->Mix(frequency, 1, &frame_for_mixing); |
201 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); | 200 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); |
202 } | 201 } |
203 } | 202 } |
204 | 203 |
205 TEST(AudioMixer, ParticipantNumberOfChannels) { | 204 TEST(AudioMixer, ParticipantNumberOfChannels) { |
206 const auto mixer = AudioMixerImpl::Create(); | 205 const auto mixer = AudioMixerImpl::Create(); |
207 | 206 |
208 MockMixerAudioSource participant; | 207 MockMixerAudioSource participant; |
209 ResetFrame(participant.fake_frame()); | 208 ResetFrame(participant.fake_frame()); |
210 | 209 |
211 EXPECT_TRUE(mixer->AddSource(&participant)); | 210 EXPECT_TRUE(mixer->AddSource(&participant)); |
212 for (size_t number_of_channels : {1, 2}) { | 211 for (size_t number_of_channels : {1, 2}) { |
213 EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz)) | 212 EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
214 .Times(Exactly(1)); | 213 .Times(Exactly(1)); |
215 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); | 214 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); |
216 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | 215 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
217 } | 216 } |
218 } | 217 } |
219 | 218 |
220 // Maximal amount of participants are mixed one iteration, then | 219 // Maximal amount of participants are mixed one iteration, then |
221 // another participant with higher energy is added. | 220 // another participant with higher energy is added. |
222 TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { | 221 TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { |
223 constexpr int kAudioSources = | 222 constexpr int kAudioSources = |
224 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; | 223 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
225 | 224 |
226 const auto mixer = AudioMixerImpl::Create(); | 225 const auto mixer = AudioMixerImpl::Create(); |
227 MockMixerAudioSource participants[kAudioSources]; | 226 MockMixerAudioSource participants[kAudioSources]; |
228 | 227 |
229 for (int i = 0; i < kAudioSources; i++) { | 228 for (int i = 0; i < kAudioSources; i++) { |
230 ResetFrame(participants[i].fake_frame()); | 229 ResetFrame(participants[i].fake_frame()); |
231 // Set the participant audio energy to increase with the index | 230 // Set the participant audio energy to increase with the index |
232 // |i|. | 231 // |i|. |
233 participants[i].fake_frame()->data_[0] = 100 * i; | 232 participants[i].fake_frame()->data_[0] = 100 * i; |
234 } | 233 } |
235 | 234 |
236 // Add all participants but the loudest for mixing. | 235 // Add all participants but the loudest for mixing. |
237 for (int i = 0; i < kAudioSources - 1; i++) { | 236 for (int i = 0; i < kAudioSources - 1; i++) { |
238 EXPECT_TRUE(mixer->AddSource(&participants[i])); | 237 EXPECT_TRUE(mixer->AddSource(&participants[i])); |
239 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz)) | 238 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
240 .Times(Exactly(1)); | 239 .Times(Exactly(1)); |
241 } | 240 } |
242 | 241 |
243 // First mixer iteration | 242 // First mixer iteration |
244 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 243 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
245 | 244 |
246 // All participants but the loudest should have been mixed. | 245 // All participants but the loudest should have been mixed. |
247 for (int i = 0; i < kAudioSources - 1; i++) { | 246 for (int i = 0; i < kAudioSources - 1; i++) { |
248 EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 247 EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
249 << "Mixed status of AudioSource #" << i << " wrong."; | 248 << "Mixed status of AudioSource #" << i << " wrong."; |
250 } | 249 } |
251 | 250 |
252 // Add new participant with higher energy. | 251 // Add new participant with higher energy. |
253 EXPECT_TRUE(mixer->AddSource(&participants[kAudioSources - 1])); | 252 EXPECT_TRUE(mixer->AddSource(&participants[kAudioSources - 1])); |
254 for (int i = 0; i < kAudioSources; i++) { | 253 for (int i = 0; i < kAudioSources; i++) { |
255 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz)) | 254 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
256 .Times(Exactly(1)); | 255 .Times(Exactly(1)); |
257 } | 256 } |
258 | 257 |
259 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 258 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
260 | 259 |
261 // The most quiet participant should not have been mixed. | 260 // The most quiet participant should not have been mixed. |
262 EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) | 261 EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) |
263 << "Mixed status of AudioSource #0 wrong."; | 262 << "Mixed status of AudioSource #0 wrong."; |
264 | 263 |
265 // The loudest participants should have been mixed. | 264 // The loudest participants should have been mixed. |
(...skipping 14 matching lines...) Expand all Loading... |
280 RTC_FROM_HERE, &AudioMixerImpl::Create); | 279 RTC_FROM_HERE, &AudioMixerImpl::Create); |
281 MockMixerAudioSource participant; | 280 MockMixerAudioSource participant; |
282 | 281 |
283 ResetFrame(participant.fake_frame()); | 282 ResetFrame(participant.fake_frame()); |
284 | 283 |
285 participant_thread->Start(); | 284 participant_thread->Start(); |
286 EXPECT_TRUE(participant_thread->Invoke<int>( | 285 EXPECT_TRUE(participant_thread->Invoke<int>( |
287 RTC_FROM_HERE, | 286 RTC_FROM_HERE, |
288 rtc::Bind(&AudioMixer::AddSource, mixer.get(), &participant))); | 287 rtc::Bind(&AudioMixer::AddSource, mixer.get(), &participant))); |
289 | 288 |
290 EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz)) | 289 EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
291 .Times(Exactly(1)); | 290 .Times(Exactly(1)); |
292 | 291 |
293 // Do one mixer iteration | 292 // Do one mixer iteration |
294 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 293 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
295 } | 294 } |
296 | 295 |
297 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { | 296 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { |
298 constexpr int kAudioSources = | 297 constexpr int kAudioSources = |
299 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; | 298 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
300 | 299 |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
363 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); | 362 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
364 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; | 363 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
365 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 364 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
366 std::numeric_limits<int16_t>::max()); | 365 std::numeric_limits<int16_t>::max()); |
367 std::vector<bool> expected_status(kAudioSources, true); | 366 std::vector<bool> expected_status(kAudioSources, true); |
368 expected_status[0] = false; | 367 expected_status[0] = false; |
369 | 368 |
370 MixAndCompare(frames, frame_info, expected_status); | 369 MixAndCompare(frames, frame_info, expected_status); |
371 } | 370 } |
372 } // namespace webrtc | 371 } // namespace webrtc |
OLD | NEW |