OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <string.h> | 11 #include <string.h> |
12 | 12 |
13 #include <memory> | 13 #include <memory> |
14 #include <utility> | 14 #include <utility> |
15 | 15 |
16 #include "webrtc/base/bind.h" | 16 #include "webrtc/base/bind.h" |
17 #include "webrtc/base/thread.h" | 17 #include "webrtc/base/thread.h" |
18 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 18 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
19 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | 19 #include "webrtc/modules/audio_mixer/audio_mixer.h" |
20 #include "webrtc/test/gmock.h" | 20 #include "webrtc/test/gmock.h" |
21 | 21 |
22 using testing::_; | 22 using testing::_; |
23 using testing::Exactly; | 23 using testing::Exactly; |
24 using testing::Invoke; | 24 using testing::Invoke; |
25 using testing::Return; | 25 using testing::Return; |
26 | 26 |
27 namespace webrtc { | 27 namespace webrtc { |
28 | 28 |
29 namespace { | 29 namespace { |
(...skipping 11 matching lines...) Expand all Loading... |
41 // Frame duration 10ms. | 41 // Frame duration 10ms. |
42 frame->samples_per_channel_ = kDefaultSampleRateHz / 100; | 42 frame->samples_per_channel_ = kDefaultSampleRateHz / 100; |
43 frame->vad_activity_ = AudioFrame::kVadActive; | 43 frame->vad_activity_ = AudioFrame::kVadActive; |
44 frame->speech_type_ = AudioFrame::kNormalSpeech; | 44 frame->speech_type_ = AudioFrame::kNormalSpeech; |
45 } | 45 } |
46 | 46 |
47 AudioFrame frame_for_mixing; | 47 AudioFrame frame_for_mixing; |
48 | 48 |
49 } // namespace | 49 } // namespace |
50 | 50 |
51 class MockMixerAudioSource : public MixerAudioSource { | 51 class MockMixerAudioSource : public AudioMixer::Source { |
52 public: | 52 public: |
53 MockMixerAudioSource() | 53 MockMixerAudioSource() |
54 : fake_audio_frame_info_(MixerAudioSource::AudioFrameInfo::kNormal) { | 54 : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) { |
55 ON_CALL(*this, GetAudioFrameWithMuted(_, _)) | 55 ON_CALL(*this, GetAudioFrameWithInfo(_, _)) |
56 .WillByDefault( | 56 .WillByDefault( |
57 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted)); | 57 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo)); |
58 } | 58 } |
59 | 59 |
60 MOCK_METHOD2(GetAudioFrameWithMuted, | 60 MOCK_METHOD2(GetAudioFrameWithInfo, |
61 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); | 61 AudioFrameWithInfo(const int32_t id, int sample_rate_hz)); |
62 | 62 |
63 AudioFrame* fake_frame() { return &fake_frame_; } | 63 AudioFrame* fake_frame() { return &fake_frame_; } |
64 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } | 64 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
65 void set_fake_info(const AudioFrameInfo audio_frame_info) { | 65 void set_fake_info(const AudioFrameInfo audio_frame_info) { |
66 fake_audio_frame_info_ = audio_frame_info; | 66 fake_audio_frame_info_ = audio_frame_info; |
67 } | 67 } |
68 | 68 |
69 private: | 69 private: |
70 AudioFrame fake_frame_, fake_output_frame_; | 70 AudioFrame fake_frame_, fake_output_frame_; |
71 AudioFrameInfo fake_audio_frame_info_; | 71 AudioFrameInfo fake_audio_frame_info_; |
72 AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id, | 72 AudioFrameWithInfo FakeAudioFrameWithInfo(const int32_t id, |
73 int sample_rate_hz) { | 73 int sample_rate_hz) { |
74 fake_output_frame_.CopyFrom(fake_frame_); | 74 fake_output_frame_.CopyFrom(fake_frame_); |
75 return { | 75 return { |
76 &fake_output_frame_, // audio_frame_pointer | 76 &fake_output_frame_, // audio_frame_pointer |
77 fake_info(), // audio_frame_info | 77 fake_info(), // audio_frame_info |
78 }; | 78 }; |
79 } | 79 } |
80 }; | 80 }; |
81 | 81 |
82 // Creates participants from |frames| and |frame_info| and adds them | 82 // Creates participants from |frames| and |frame_info| and adds them |
83 // to the mixer. Compares mixed status with |expected_status| | 83 // to the mixer. Compares mixed status with |expected_status| |
84 void MixAndCompare( | 84 void MixAndCompare( |
85 const std::vector<AudioFrame>& frames, | 85 const std::vector<AudioFrame>& frames, |
86 const std::vector<MixerAudioSource::AudioFrameInfo>& frame_info, | 86 const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info, |
87 const std::vector<bool>& expected_status) { | 87 const std::vector<bool>& expected_status) { |
88 int num_audio_sources = frames.size(); | 88 int num_audio_sources = frames.size(); |
89 RTC_DCHECK(frames.size() == frame_info.size()); | 89 RTC_DCHECK(frames.size() == frame_info.size()); |
90 RTC_DCHECK(frame_info.size() == expected_status.size()); | 90 RTC_DCHECK(frame_info.size() == expected_status.size()); |
91 | 91 |
92 const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create(kId)); | 92 const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create(kId)); |
93 std::vector<MockMixerAudioSource> participants(num_audio_sources); | 93 std::vector<MockMixerAudioSource> participants(num_audio_sources); |
94 | 94 |
95 for (int i = 0; i < num_audio_sources; i++) { | 95 for (int i = 0; i < num_audio_sources; i++) { |
96 participants[i].fake_frame()->CopyFrom(frames[i]); | 96 participants[i].fake_frame()->CopyFrom(frames[i]); |
97 participants[i].set_fake_info(frame_info[i]); | 97 participants[i].set_fake_info(frame_info[i]); |
98 } | 98 } |
99 | 99 |
100 for (int i = 0; i < num_audio_sources; i++) { | 100 for (int i = 0; i < num_audio_sources; i++) { |
101 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 101 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
102 EXPECT_CALL(participants[i], | 102 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
103 GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | |
104 .Times(Exactly(1)); | 103 .Times(Exactly(1)); |
105 } | 104 } |
106 | 105 |
107 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 106 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
108 | 107 |
109 for (int i = 0; i < num_audio_sources; i++) { | 108 for (int i = 0; i < num_audio_sources; i++) { |
110 EXPECT_EQ(expected_status[i], | 109 EXPECT_EQ(expected_status[i], |
111 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 110 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
112 << "Mixed status of AudioSource #" << i << " wrong."; | 111 << "Mixed status of AudioSource #" << i << " wrong."; |
113 } | 112 } |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
173 MockMixerAudioSource participants[kAudioSources]; | 172 MockMixerAudioSource participants[kAudioSources]; |
174 | 173 |
175 for (int i = 0; i < kAudioSources; ++i) { | 174 for (int i = 0; i < kAudioSources; ++i) { |
176 ResetFrame(participants[i].fake_frame()); | 175 ResetFrame(participants[i].fake_frame()); |
177 | 176 |
178 // We set the 80-th sample value since the first 80 samples may be | 177 // We set the 80-th sample value since the first 80 samples may be |
179 // modified by a ramped-in window. | 178 // modified by a ramped-in window. |
180 participants[i].fake_frame()->data_[80] = i; | 179 participants[i].fake_frame()->data_[80] = i; |
181 | 180 |
182 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 181 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
183 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _)) | 182 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, _)).Times(Exactly(1)); |
184 .Times(Exactly(1)); | |
185 } | 183 } |
186 | 184 |
187 // Last participant gives audio frame with passive VAD, although it has the | 185 // Last participant gives audio frame with passive VAD, although it has the |
188 // largest energy. | 186 // largest energy. |
189 participants[kAudioSources - 1].fake_frame()->vad_activity_ = | 187 participants[kAudioSources - 1].fake_frame()->vad_activity_ = |
190 AudioFrame::kVadPassive; | 188 AudioFrame::kVadPassive; |
191 | 189 |
192 AudioFrame audio_frame; | 190 AudioFrame audio_frame; |
193 mixer->Mix(kDefaultSampleRateHz, | 191 mixer->Mix(kDefaultSampleRateHz, |
194 1, // number of channels | 192 1, // number of channels |
(...skipping 20 matching lines...) Expand all Loading... |
215 | 213 |
216 ResetFrame(participant.fake_frame()); | 214 ResetFrame(participant.fake_frame()); |
217 const int n_samples = participant.fake_frame()->samples_per_channel_; | 215 const int n_samples = participant.fake_frame()->samples_per_channel_; |
218 | 216 |
219 // Modify the frame so that it's not zero. | 217 // Modify the frame so that it's not zero. |
220 for (int j = 0; j < n_samples; j++) { | 218 for (int j = 0; j < n_samples; j++) { |
221 participant.fake_frame()->data_[j] = j; | 219 participant.fake_frame()->data_[j] = j; |
222 } | 220 } |
223 | 221 |
224 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 222 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
225 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); | 223 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2)); |
226 | 224 |
227 AudioFrame audio_frame; | 225 AudioFrame audio_frame; |
228 // Two mix iteration to compare after the ramp-up step. | 226 // Two mix iteration to compare after the ramp-up step. |
229 for (int i = 0; i < 2; i++) { | 227 for (int i = 0; i < 2; i++) { |
230 mixer->Mix(kDefaultSampleRateHz, | 228 mixer->Mix(kDefaultSampleRateHz, |
231 1, // number of channels | 229 1, // number of channels |
232 &audio_frame); | 230 &audio_frame); |
233 } | 231 } |
234 | 232 |
235 EXPECT_EQ( | 233 EXPECT_EQ( |
236 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); | 234 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); |
237 } | 235 } |
238 | 236 |
239 TEST(AudioMixer, FrameNotModifiedForSingleAnonymousParticipant) { | 237 TEST(AudioMixer, FrameNotModifiedForSingleAnonymousParticipant) { |
240 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); | 238 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
241 | 239 |
242 MockMixerAudioSource participant; | 240 MockMixerAudioSource participant; |
243 | 241 |
244 ResetFrame(participant.fake_frame()); | 242 ResetFrame(participant.fake_frame()); |
245 const int n_samples = participant.fake_frame()->samples_per_channel_; | 243 const int n_samples = participant.fake_frame()->samples_per_channel_; |
246 | 244 |
247 // Modify the frame so that it's not zero. | 245 // Modify the frame so that it's not zero. |
248 for (int j = 0; j < n_samples; j++) { | 246 for (int j = 0; j < n_samples; j++) { |
249 participant.fake_frame()->data_[j] = j; | 247 participant.fake_frame()->data_[j] = j; |
250 } | 248 } |
251 | 249 |
252 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 250 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
253 EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&participant, true)); | 251 EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&participant, true)); |
254 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); | 252 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2)); |
255 | 253 |
256 AudioFrame audio_frame; | 254 AudioFrame audio_frame; |
257 // Two mix iteration to compare after the ramp-up step. | 255 // Two mix iteration to compare after the ramp-up step. |
258 for (int i = 0; i < 2; i++) { | 256 for (int i = 0; i < 2; i++) { |
259 mixer->Mix(kDefaultSampleRateHz, | 257 mixer->Mix(kDefaultSampleRateHz, |
260 1, // number of channels | 258 1, // number of channels |
261 &audio_frame); | 259 &audio_frame); |
262 } | 260 } |
263 | 261 |
264 EXPECT_EQ( | 262 EXPECT_EQ( |
265 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); | 263 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); |
266 } | 264 } |
267 | 265 |
268 TEST(AudioMixer, ParticipantSampleRate) { | 266 TEST(AudioMixer, ParticipantSampleRate) { |
269 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); | 267 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
270 | 268 |
271 MockMixerAudioSource participant; | 269 MockMixerAudioSource participant; |
272 ResetFrame(participant.fake_frame()); | 270 ResetFrame(participant.fake_frame()); |
273 | 271 |
274 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 272 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
275 for (auto frequency : {8000, 16000, 32000, 48000}) { | 273 for (auto frequency : {8000, 16000, 32000, 48000}) { |
276 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, frequency)) | 274 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, frequency)) |
277 .Times(Exactly(1)); | 275 .Times(Exactly(1)); |
278 participant.fake_frame()->sample_rate_hz_ = frequency; | 276 participant.fake_frame()->sample_rate_hz_ = frequency; |
279 participant.fake_frame()->samples_per_channel_ = frequency / 100; | 277 participant.fake_frame()->samples_per_channel_ = frequency / 100; |
280 mixer->Mix(frequency, 1, &frame_for_mixing); | 278 mixer->Mix(frequency, 1, &frame_for_mixing); |
281 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); | 279 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); |
282 } | 280 } |
283 } | 281 } |
284 | 282 |
285 TEST(AudioMixer, ParticipantNumberOfChannels) { | 283 TEST(AudioMixer, ParticipantNumberOfChannels) { |
286 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); | 284 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
287 | 285 |
288 MockMixerAudioSource participant; | 286 MockMixerAudioSource participant; |
289 ResetFrame(participant.fake_frame()); | 287 ResetFrame(participant.fake_frame()); |
290 | 288 |
291 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 289 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
292 for (size_t number_of_channels : {1, 2}) { | 290 for (size_t number_of_channels : {1, 2}) { |
293 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 291 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
294 .Times(Exactly(1)); | 292 .Times(Exactly(1)); |
295 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); | 293 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); |
296 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | 294 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
297 } | 295 } |
298 } | 296 } |
299 | 297 |
300 // Test that the volume is reported as zero when the mixer input | 298 // Test that the volume is reported as zero when the mixer input |
301 // comprises only zero values. | 299 // comprises only zero values. |
302 TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) { | 300 TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) { |
303 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); | 301 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
304 | 302 |
305 MockMixerAudioSource participant; | 303 MockMixerAudioSource participant; |
306 ResetFrame(participant.fake_frame()); | 304 ResetFrame(participant.fake_frame()); |
307 | 305 |
308 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 306 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
309 for (int i = 0; i < 11; i++) { | 307 for (int i = 0; i < 11; i++) { |
310 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 308 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
311 .Times(Exactly(1)); | 309 .Times(Exactly(1)); |
312 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 310 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
313 } | 311 } |
314 | 312 |
315 EXPECT_EQ(0, mixer->GetOutputAudioLevel()); | 313 EXPECT_EQ(0, mixer->GetOutputAudioLevel()); |
316 EXPECT_EQ(0, mixer->GetOutputAudioLevelFullRange()); | 314 EXPECT_EQ(0, mixer->GetOutputAudioLevelFullRange()); |
317 } | 315 } |
318 | 316 |
319 // Test that the reported volume is maximal when the mixer | 317 // Test that the reported volume is maximal when the mixer |
320 // input comprises frames with maximal values. | 318 // input comprises frames with maximal values. |
321 TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) { | 319 TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) { |
322 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); | 320 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
323 | 321 |
324 MockMixerAudioSource participant; | 322 MockMixerAudioSource participant; |
325 ResetFrame(participant.fake_frame()); | 323 ResetFrame(participant.fake_frame()); |
326 | 324 |
327 // Fill participant frame data with maximal sound. | 325 // Fill participant frame data with maximal sound. |
328 std::fill(participant.fake_frame()->data_, | 326 std::fill(participant.fake_frame()->data_, |
329 participant.fake_frame()->data_ + kDefaultSampleRateHz / 100, | 327 participant.fake_frame()->data_ + kDefaultSampleRateHz / 100, |
330 std::numeric_limits<int16_t>::max()); | 328 std::numeric_limits<int16_t>::max()); |
331 | 329 |
332 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 330 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
333 | 331 |
334 // We do >10 iterations, because the audio level indicator only | 332 // We do >10 iterations, because the audio level indicator only |
335 // updates once every 10 calls. | 333 // updates once every 10 calls. |
336 for (int i = 0; i < 11; i++) { | 334 for (int i = 0; i < 11; i++) { |
337 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 335 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
338 .Times(Exactly(1)); | 336 .Times(Exactly(1)); |
339 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 337 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
340 } | 338 } |
341 | 339 |
342 // 9 is the highest possible audio level | 340 // 9 is the highest possible audio level |
343 EXPECT_EQ(9, mixer->GetOutputAudioLevel()); | 341 EXPECT_EQ(9, mixer->GetOutputAudioLevel()); |
344 | 342 |
345 // 0x7fff = 32767 is the highest full range audio level. | 343 // 0x7fff = 32767 is the highest full range audio level. |
346 EXPECT_EQ(std::numeric_limits<int16_t>::max(), | 344 EXPECT_EQ(std::numeric_limits<int16_t>::max(), |
347 mixer->GetOutputAudioLevelFullRange()); | 345 mixer->GetOutputAudioLevelFullRange()); |
(...skipping 11 matching lines...) Expand all Loading... |
359 for (int i = 0; i < kAudioSources; i++) { | 357 for (int i = 0; i < kAudioSources; i++) { |
360 ResetFrame(participants[i].fake_frame()); | 358 ResetFrame(participants[i].fake_frame()); |
361 // Set the participant audio energy to increase with the index | 359 // Set the participant audio energy to increase with the index |
362 // |i|. | 360 // |i|. |
363 participants[i].fake_frame()->data_[0] = 100 * i; | 361 participants[i].fake_frame()->data_[0] = 100 * i; |
364 } | 362 } |
365 | 363 |
366 // Add all participants but the loudest for mixing. | 364 // Add all participants but the loudest for mixing. |
367 for (int i = 0; i < kAudioSources - 1; i++) { | 365 for (int i = 0; i < kAudioSources - 1; i++) { |
368 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 366 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
369 EXPECT_CALL(participants[i], | 367 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
370 GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | |
371 .Times(Exactly(1)); | 368 .Times(Exactly(1)); |
372 } | 369 } |
373 | 370 |
374 // First mixer iteration | 371 // First mixer iteration |
375 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 372 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
376 | 373 |
377 // All participants but the loudest should have been mixed. | 374 // All participants but the loudest should have been mixed. |
378 for (int i = 0; i < kAudioSources - 1; i++) { | 375 for (int i = 0; i < kAudioSources - 1; i++) { |
379 EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 376 EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
380 << "Mixed status of AudioSource #" << i << " wrong."; | 377 << "Mixed status of AudioSource #" << i << " wrong."; |
381 } | 378 } |
382 | 379 |
383 // Add new participant with higher energy. | 380 // Add new participant with higher energy. |
384 EXPECT_EQ(0, | 381 EXPECT_EQ(0, |
385 mixer->SetMixabilityStatus(&participants[kAudioSources - 1], true)); | 382 mixer->SetMixabilityStatus(&participants[kAudioSources - 1], true)); |
386 for (int i = 0; i < kAudioSources; i++) { | 383 for (int i = 0; i < kAudioSources; i++) { |
387 EXPECT_CALL(participants[i], | 384 EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
388 GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | |
389 .Times(Exactly(1)); | 385 .Times(Exactly(1)); |
390 } | 386 } |
391 | 387 |
392 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 388 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
393 | 389 |
394 // The most quiet participant should not have been mixed. | 390 // The most quiet participant should not have been mixed. |
395 EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) | 391 EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) |
396 << "Mixed status of AudioSource #0 wrong."; | 392 << "Mixed status of AudioSource #0 wrong."; |
397 | 393 |
398 // The loudest participants should have been mixed. | 394 // The loudest participants should have been mixed. |
(...skipping 20 matching lines...) Expand all Loading... |
419 participant_thread->Start(); | 415 participant_thread->Start(); |
420 EXPECT_EQ(0, participant_thread->Invoke<int>( | 416 EXPECT_EQ(0, participant_thread->Invoke<int>( |
421 RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetMixabilityStatus, | 417 RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetMixabilityStatus, |
422 mixer.get(), &participant, true))); | 418 mixer.get(), &participant, true))); |
423 | 419 |
424 EXPECT_EQ( | 420 EXPECT_EQ( |
425 0, participant_thread->Invoke<int>( | 421 0, participant_thread->Invoke<int>( |
426 RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetAnonymousMixabilityStatus, | 422 RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetAnonymousMixabilityStatus, |
427 mixer.get(), &participant, true))); | 423 mixer.get(), &participant, true))); |
428 | 424 |
429 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 425 EXPECT_CALL(participant, GetAudioFrameWithInfo(_, kDefaultSampleRateHz)) |
430 .Times(Exactly(1)); | 426 .Times(Exactly(1)); |
431 | 427 |
432 // Do one mixer iteration | 428 // Do one mixer iteration |
433 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 429 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
434 } | 430 } |
435 | 431 |
436 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { | 432 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { |
437 constexpr int kAudioSources = | 433 constexpr int kAudioSources = |
438 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 434 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
439 | 435 |
440 std::vector<AudioFrame> frames(kAudioSources); | 436 std::vector<AudioFrame> frames(kAudioSources); |
441 for (auto& frame : frames) { | 437 for (auto& frame : frames) { |
442 ResetFrame(&frame); | 438 ResetFrame(&frame); |
443 } | 439 } |
444 | 440 |
445 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 441 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
446 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 442 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
447 frame_info[0] = MixerAudioSource::AudioFrameInfo::kMuted; | 443 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
448 std::vector<bool> expected_status(kAudioSources, true); | 444 std::vector<bool> expected_status(kAudioSources, true); |
449 expected_status[0] = false; | 445 expected_status[0] = false; |
450 | 446 |
451 MixAndCompare(frames, frame_info, expected_status); | 447 MixAndCompare(frames, frame_info, expected_status); |
452 } | 448 } |
453 | 449 |
454 TEST(AudioMixer, PassiveShouldMixAfterNormal) { | 450 TEST(AudioMixer, PassiveShouldMixAfterNormal) { |
455 constexpr int kAudioSources = | 451 constexpr int kAudioSources = |
456 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 452 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
457 | 453 |
458 std::vector<AudioFrame> frames(kAudioSources); | 454 std::vector<AudioFrame> frames(kAudioSources); |
459 for (auto& frame : frames) { | 455 for (auto& frame : frames) { |
460 ResetFrame(&frame); | 456 ResetFrame(&frame); |
461 } | 457 } |
462 | 458 |
463 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 459 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
464 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 460 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
465 frames[0].vad_activity_ = AudioFrame::kVadPassive; | 461 frames[0].vad_activity_ = AudioFrame::kVadPassive; |
466 std::vector<bool> expected_status(kAudioSources, true); | 462 std::vector<bool> expected_status(kAudioSources, true); |
467 expected_status[0] = false; | 463 expected_status[0] = false; |
468 | 464 |
469 MixAndCompare(frames, frame_info, expected_status); | 465 MixAndCompare(frames, frame_info, expected_status); |
470 } | 466 } |
471 | 467 |
472 TEST(AudioMixer, ActiveShouldMixBeforeLoud) { | 468 TEST(AudioMixer, ActiveShouldMixBeforeLoud) { |
473 constexpr int kAudioSources = | 469 constexpr int kAudioSources = |
474 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 470 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
475 | 471 |
476 std::vector<AudioFrame> frames(kAudioSources); | 472 std::vector<AudioFrame> frames(kAudioSources); |
477 for (auto& frame : frames) { | 473 for (auto& frame : frames) { |
478 ResetFrame(&frame); | 474 ResetFrame(&frame); |
479 } | 475 } |
480 | 476 |
481 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 477 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
482 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 478 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
483 frames[0].vad_activity_ = AudioFrame::kVadPassive; | 479 frames[0].vad_activity_ = AudioFrame::kVadPassive; |
484 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 480 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
485 std::numeric_limits<int16_t>::max()); | 481 std::numeric_limits<int16_t>::max()); |
486 std::vector<bool> expected_status(kAudioSources, true); | 482 std::vector<bool> expected_status(kAudioSources, true); |
487 expected_status[0] = false; | 483 expected_status[0] = false; |
488 | 484 |
489 MixAndCompare(frames, frame_info, expected_status); | 485 MixAndCompare(frames, frame_info, expected_status); |
490 } | 486 } |
491 | 487 |
492 TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { | 488 TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { |
493 constexpr int kAudioSources = | 489 constexpr int kAudioSources = |
494 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 490 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
495 | 491 |
496 std::vector<AudioFrame> frames(kAudioSources); | 492 std::vector<AudioFrame> frames(kAudioSources); |
497 for (auto& frame : frames) { | 493 for (auto& frame : frames) { |
498 ResetFrame(&frame); | 494 ResetFrame(&frame); |
499 } | 495 } |
500 | 496 |
501 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 497 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
502 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 498 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
503 frame_info[0] = MixerAudioSource::AudioFrameInfo::kMuted; | 499 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
504 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 500 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
505 std::numeric_limits<int16_t>::max()); | 501 std::numeric_limits<int16_t>::max()); |
506 std::vector<bool> expected_status(kAudioSources, true); | 502 std::vector<bool> expected_status(kAudioSources, true); |
507 expected_status[0] = false; | 503 expected_status[0] = false; |
508 | 504 |
509 MixAndCompare(frames, frame_info, expected_status); | 505 MixAndCompare(frames, frame_info, expected_status); |
510 } | 506 } |
511 } // namespace webrtc | 507 } // namespace webrtc |
OLD | NEW |