OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
70 MOCK_METHOD2(GetAudioFrameWithMuted, | 70 MOCK_METHOD2(GetAudioFrameWithMuted, |
71 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); | 71 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); |
72 | 72 |
73 AudioFrame* fake_frame() { return &fake_frame_; } | 73 AudioFrame* fake_frame() { return &fake_frame_; } |
74 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } | 74 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
75 void set_fake_info(const AudioFrameInfo audio_frame_info) { | 75 void set_fake_info(const AudioFrameInfo audio_frame_info) { |
76 fake_audio_frame_info_ = audio_frame_info; | 76 fake_audio_frame_info_ = audio_frame_info; |
77 } | 77 } |
78 | 78 |
79 private: | 79 private: |
80 AudioFrame fake_frame_; | 80 AudioFrame fake_frame_, output_frame_; |
81 AudioFrameInfo fake_audio_frame_info_; | 81 AudioFrameInfo fake_audio_frame_info_; |
82 AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id, | 82 AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id, |
83 int sample_rate_hz) { | 83 int sample_rate_hz) { |
84 output_frame_.CopyFrom(fake_frame_); | |
84 return { | 85 return { |
85 fake_frame(), // audio_frame_pointer | 86 &output_frame_, // audio_frame_pointer |
86 fake_info(), // audio_frame_info | 87 fake_info(), // audio_frame_info |
87 }; | 88 }; |
88 } | 89 } |
89 }; | 90 }; |
90 | 91 |
91 // Keeps two identical sets of participants and two mixers to test | 92 // Keeps two identical sets of participants and two mixers to test |
92 // that the same participants are chosen for mixing. | 93 // that the same participants are chosen for mixing. |
93 class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver { | 94 class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver { |
94 protected: | 95 protected: |
95 constexpr static int kId = 1; | 96 constexpr static int kId = 1; |
96 constexpr static int kSampleRateHz = 32000; | 97 constexpr static int kSampleRateHz = 32000; |
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
267 } | 268 } |
268 | 269 |
269 // SetMixabilityStatus(anonymous, false) will remove anonymous from both | 270 // SetMixabilityStatus(anonymous, false) will remove anonymous from both |
270 // anonymous and named groups. | 271 // anonymous and named groups. |
271 EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false)); | 272 EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false)); |
272 EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1])); | 273 EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1])); |
273 EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1])); | 274 EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1])); |
274 } | 275 } |
275 | 276 |
276 TEST(AudioMixer, LargestEnergyVadActiveMixed) { | 277 TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
277 const int kId = 1; | 278 constexpr int kId = 1; |
278 const int kAudioSources = | 279 constexpr int kAudioSources = |
279 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 3; | 280 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 3; |
280 const int kSampleRateHz = 32000; | 281 constexpr int kSampleRateHz = 32000; |
281 | 282 |
282 std::unique_ptr<NewAudioConferenceMixer> mixer( | 283 std::unique_ptr<NewAudioConferenceMixer> mixer( |
283 NewAudioConferenceMixer::Create(kId)); | 284 NewAudioConferenceMixer::Create(kId)); |
284 | 285 |
285 MockMixerAudioSource participants[kAudioSources]; | 286 MockMixerAudioSource participants[kAudioSources]; |
286 | 287 |
287 for (int i = 0; i < kAudioSources; ++i) { | 288 for (int i = 0; i < kAudioSources; ++i) { |
288 participants[i].fake_frame()->id_ = i; | 289 participants[i].fake_frame()->id_ = i; |
289 participants[i].fake_frame()->sample_rate_hz_ = kSampleRateHz; | 290 participants[i].fake_frame()->sample_rate_hz_ = kSampleRateHz; |
290 participants[i].fake_frame()->speech_type_ = AudioFrame::kNormalSpeech; | 291 participants[i].fake_frame()->speech_type_ = AudioFrame::kNormalSpeech; |
(...skipping 30 matching lines...) Expand all Loading... | |
321 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i | 322 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i |
322 << " wrong."; | 323 << " wrong."; |
323 } else { | 324 } else { |
324 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i | 325 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i |
325 << " wrong."; | 326 << " wrong."; |
326 } | 327 } |
327 } | 328 } |
328 } | 329 } |
329 | 330 |
330 TEST(AudioMixer, ParticipantSampleRate) { | 331 TEST(AudioMixer, ParticipantSampleRate) { |
331 const int kId = 1; | 332 constexpr int kId = 1; |
332 std::unique_ptr<NewAudioConferenceMixer> mixer( | 333 std::unique_ptr<NewAudioConferenceMixer> mixer( |
333 NewAudioConferenceMixer::Create(kId)); | 334 NewAudioConferenceMixer::Create(kId)); |
334 AudioFrame frame_for_mixing; | 335 AudioFrame frame_for_mixing; |
335 | 336 |
336 MockMixerAudioSource participant; | 337 MockMixerAudioSource participant; |
337 participant.fake_frame()->sample_rate_hz_ = 8000; | 338 participant.fake_frame()->sample_rate_hz_ = 8000; |
338 participant.fake_frame()->num_channels_ = 1; | 339 participant.fake_frame()->num_channels_ = 1; |
339 | 340 |
340 // Frame duration 10ms. | 341 // Frame duration 10ms. |
341 participant.fake_frame()->samples_per_channel_ = 8000 / 100; | 342 participant.fake_frame()->samples_per_channel_ = 8000 / 100; |
342 | 343 |
343 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 344 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
344 for (auto frequency : {8000, 16000, 32000, 48000}) { | 345 for (auto frequency : {8000, 16000, 32000, 48000}) { |
345 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, frequency)) | 346 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, frequency)) |
346 .Times(Exactly(1)); | 347 .Times(Exactly(1)); |
347 mixer->Mix(frequency, 1, &frame_for_mixing); | 348 mixer->Mix(frequency, 1, &frame_for_mixing); |
348 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); | 349 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); |
349 } | 350 } |
350 } | 351 } |
351 | 352 |
352 TEST(AudioMixer, ParticipantNumberOfChannels) { | 353 TEST(AudioMixer, ParticipantNumberOfChannels) { |
353 const int kId = 1; | 354 constexpr int kId = 1; |
354 std::unique_ptr<NewAudioConferenceMixer> mixer( | 355 std::unique_ptr<NewAudioConferenceMixer> mixer( |
355 NewAudioConferenceMixer::Create(kId)); | 356 NewAudioConferenceMixer::Create(kId)); |
356 AudioFrame frame_for_mixing; | 357 AudioFrame frame_for_mixing; |
357 | 358 |
358 MockMixerAudioSource participant; | 359 MockMixerAudioSource participant; |
359 participant.fake_frame()->sample_rate_hz_ = 8000; | 360 participant.fake_frame()->sample_rate_hz_ = 8000; |
360 participant.fake_frame()->num_channels_ = 1; | 361 participant.fake_frame()->num_channels_ = 1; |
361 | 362 |
362 // Frame duration 10ms. | 363 // Frame duration 10ms. |
363 participant.fake_frame()->samples_per_channel_ = 8000 / 100; | 364 participant.fake_frame()->samples_per_channel_ = 8000 / 100; |
364 | 365 |
365 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 366 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
366 for (size_t number_of_channels : {1, 2}) { | 367 for (size_t number_of_channels : {1, 2}) { |
367 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, 8000)).Times(Exactly(1)); | 368 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, 8000)).Times(Exactly(1)); |
368 mixer->Mix(8000, number_of_channels, &frame_for_mixing); | 369 mixer->Mix(8000, number_of_channels, &frame_for_mixing); |
369 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | 370 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
370 } | 371 } |
371 } | 372 } |
372 | 373 |
374 // Test that the volume is reported as zero when the mixer input | |
375 // comprises only zero values. | |
376 TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) { | |
377 constexpr int kId = 1; | |
378 constexpr int kSampleRateHz = 8000; | |
379 std::unique_ptr<NewAudioConferenceMixer> mixer( | |
kwiberg-webrtc
2016/08/24 08:04:39
You can make mixer const (i.e., const std::unique_
aleloi
2016/08/24 08:15:16
I'll do it in one of the dependent CLs. I've chang
| |
380 NewAudioConferenceMixer::Create(kId)); | |
381 AudioFrame frame_for_mixing; | |
382 | |
383 MockMixerAudioSource participant; | |
384 participant.fake_frame()->sample_rate_hz_ = kSampleRateHz; | |
385 participant.fake_frame()->num_channels_ = 1; | |
386 | |
387 // Frame duration 10ms. | |
388 participant.fake_frame()->samples_per_channel_ = kSampleRateHz / 100; | |
389 | |
390 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | |
391 for (size_t i = 0; i < 11; i++) { | |
kwiberg-webrtc
2016/08/24 08:04:39
I'm pretty sure these indexes will fit in an int..
aleloi
2016/08/24 08:15:16
This has been pointed out by ivoc@ in one of the d
| |
392 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kSampleRateHz)) | |
393 .Times(Exactly(1)); | |
394 mixer->Mix(8000, 1, &frame_for_mixing); | |
395 } | |
396 | |
397 EXPECT_EQ(0, mixer->GetOutputAudioLevel()); | |
398 EXPECT_EQ(0, mixer->GetOutputAudioLevelFullRange()); | |
399 } | |
400 | |
401 // Test that the reported volume is maximal when the mixer | |
402 // input comprises frames with maximal values. | |
403 TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) { | |
404 constexpr int kId = 1; | |
405 constexpr int kSampleRateHz = 8000; | |
406 std::unique_ptr<NewAudioConferenceMixer> mixer( | |
407 NewAudioConferenceMixer::Create(kId)); | |
kwiberg-webrtc
2016/08/24 08:04:39
const here too.
aleloi
2016/08/24 08:15:16
Same as above.
| |
408 AudioFrame frame_for_mixing; | |
409 | |
410 MockMixerAudioSource participant; | |
411 participant.fake_frame()->sample_rate_hz_ = kSampleRateHz; | |
412 participant.fake_frame()->num_channels_ = 1; | |
413 | |
414 // Frame duration 10ms. | |
415 participant.fake_frame()->samples_per_channel_ = kSampleRateHz / 100; | |
416 | |
417 // Fill participant frame data with maximal sound. | |
418 std::fill(participant.fake_frame()->data_, | |
419 participant.fake_frame()->data_ + kSampleRateHz / 100, | |
420 std::numeric_limits<int16_t>::max()); | |
421 | |
422 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | |
423 for (size_t i = 0; i < 11; i++) { | |
424 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kSampleRateHz)) | |
425 .Times(Exactly(1)); | |
426 mixer->Mix(8000, 1, &frame_for_mixing); | |
427 } | |
428 | |
429 // 9 is the highest possible audio level | |
430 EXPECT_EQ(9, mixer->GetOutputAudioLevel()); | |
431 | |
432 // 0x7fff = 32767 is the highest full range audio level. | |
433 EXPECT_EQ(std::numeric_limits<int16_t>::max(), | |
434 mixer->GetOutputAudioLevelFullRange()); | |
435 } | |
436 | |
373 TEST_F(BothMixersTest, CompareInitialFrameAudio) { | 437 TEST_F(BothMixersTest, CompareInitialFrameAudio) { |
374 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); | 438 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); |
375 | 439 |
376 // Make sure the participant is marked as 'non-mixed' so that it is | 440 // Make sure the participant is marked as 'non-mixed' so that it is |
377 // ramped in next round. | 441 // ramped in next round. |
378 ResetAudioSource(); | 442 ResetAudioSource(); |
379 | 443 |
380 // Construct the expected sound for the first mixing round. | 444 // Construct the expected sound for the first mixing round. |
381 mixing_round_frame.CopyFrom(*participant_.fake_frame()); | 445 mixing_round_frame.CopyFrom(*participant_.fake_frame()); |
382 RampIn(mixing_round_frame); | 446 RampIn(mixing_round_frame); |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
474 MixerParticipant::AudioFrameInfo::kMuted); | 538 MixerParticipant::AudioFrameInfo::kMuted); |
475 } else { | 539 } else { |
476 AddParticipant(&audio_frames[i], | 540 AddParticipant(&audio_frames[i], |
477 MixerParticipant::AudioFrameInfo::kNormal); | 541 MixerParticipant::AudioFrameInfo::kNormal); |
478 } | 542 } |
479 MixAndCompare(); | 543 MixAndCompare(); |
480 } | 544 } |
481 } | 545 } |
482 | 546 |
483 } // namespace webrtc | 547 } // namespace webrtc |
OLD | NEW |