OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 59 matching lines...) Loading... |
70 MOCK_METHOD2(GetAudioFrameWithMuted, | 70 MOCK_METHOD2(GetAudioFrameWithMuted, |
71 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); | 71 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); |
72 | 72 |
73 AudioFrame* fake_frame() { return &fake_frame_; } | 73 AudioFrame* fake_frame() { return &fake_frame_; } |
74 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } | 74 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
75 void set_fake_info(const AudioFrameInfo audio_frame_info) { | 75 void set_fake_info(const AudioFrameInfo audio_frame_info) { |
76 fake_audio_frame_info_ = audio_frame_info; | 76 fake_audio_frame_info_ = audio_frame_info; |
77 } | 77 } |
78 | 78 |
79 private: | 79 private: |
80 AudioFrame fake_frame_; | 80 AudioFrame fake_frame_, output_frame_; |
81 AudioFrameInfo fake_audio_frame_info_; | 81 AudioFrameInfo fake_audio_frame_info_; |
82 AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id, | 82 AudioFrameWithMuted FakeAudioFrameWithMuted(const int32_t id, |
83 int sample_rate_hz) { | 83 int sample_rate_hz) { |
| 84 output_frame_.CopyFrom(fake_frame_); |
84 return { | 85 return { |
85 fake_frame(), // audio_frame_pointer | 86 &output_frame_, // audio_frame_pointer |
86 fake_info(), // audio_frame_info | 87 fake_info(), // audio_frame_info |
87 }; | 88 }; |
88 } | 89 } |
89 }; | 90 }; |
90 | 91 |
91 // Keeps two identical sets of participants and two mixers to test | 92 // Keeps two identical sets of participants and two mixers to test |
92 // that the same participants are chosen for mixing. | 93 // that the same participants are chosen for mixing. |
93 class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver { | 94 class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver { |
94 protected: | 95 protected: |
95 constexpr static int kId = 1; | 96 constexpr static int kId = 1; |
96 constexpr static int kSampleRateHz = 32000; | 97 constexpr static int kSampleRateHz = 32000; |
(...skipping 266 matching lines...) Loading... |
363 participant.fake_frame()->samples_per_channel_ = 8000 / 100; | 364 participant.fake_frame()->samples_per_channel_ = 8000 / 100; |
364 | 365 |
365 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 366 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
366 for (size_t number_of_channels : {1, 2}) { | 367 for (size_t number_of_channels : {1, 2}) { |
367 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, 8000)).Times(Exactly(1)); | 368 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, 8000)).Times(Exactly(1)); |
368 mixer->Mix(8000, number_of_channels, &frame_for_mixing); | 369 mixer->Mix(8000, number_of_channels, &frame_for_mixing); |
369 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | 370 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
370 } | 371 } |
371 } | 372 } |
372 | 373 |
| 374 // Test that the volume is reported as zero when the mixer input |
| 375 // comprises only zero values. |
| 376 TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) { |
| 377 const int kId = 1; |
| 378 const int kSampleRateHz = 8000; |
| 379 std::unique_ptr<NewAudioConferenceMixer> mixer( |
| 380 NewAudioConferenceMixer::Create(kId)); |
| 381 AudioFrame frame_for_mixing; |
| 382 |
| 383 MockMixerAudioSource participant; |
| 384 participant.fake_frame()->sample_rate_hz_ = kSampleRateHz; |
| 385 participant.fake_frame()->num_channels_ = 1; |
| 386 |
| 387 // Frame duration 10ms. |
| 388 participant.fake_frame()->samples_per_channel_ = kSampleRateHz / 100; |
| 389 |
| 390 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
| 391 for (size_t i = 0; i < 11; i++) { |
| 392 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kSampleRateHz)) |
| 393 .Times(Exactly(1)); |
| 394 mixer->Mix(8000, 1, &frame_for_mixing); |
| 395 } |
| 396 |
| 397 EXPECT_EQ(static_cast<uint32_t>(0), mixer->GetAudioOutputLevel()); |
| 398 EXPECT_EQ(static_cast<uint32_t>(0), mixer->GetAudioOutputLevelFullRange()); |
| 399 } |
| 400 |
| 401 // Test that the reported volume is maximal as full when the mixer |
| 402 // input comprises frames with maximal values. |
| 403 TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) { |
| 404 const int kId = 1; |
| 405 const int kSampleRateHz = 8000; |
| 406 std::unique_ptr<NewAudioConferenceMixer> mixer( |
| 407 NewAudioConferenceMixer::Create(kId)); |
| 408 AudioFrame frame_for_mixing; |
| 409 |
| 410 MockMixerAudioSource participant; |
| 411 participant.fake_frame()->sample_rate_hz_ = kSampleRateHz; |
| 412 participant.fake_frame()->num_channels_ = 1; |
| 413 |
| 414 // Frame duration 10ms. |
| 415 participant.fake_frame()->samples_per_channel_ = kSampleRateHz / 100; |
| 416 |
| 417 // Fill participant frame data with maximal sound. |
| 418 std::fill(participant.fake_frame()->data_, |
| 419 participant.fake_frame()->data_ + kSampleRateHz / 100, |
| 420 std::numeric_limits<int16_t>::max()); |
| 421 |
| 422 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
| 423 for (size_t i = 0; i < 11; i++) { |
| 424 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kSampleRateHz)) |
| 425 .Times(Exactly(1)); |
| 426 mixer->Mix(8000, 1, &frame_for_mixing); |
| 427 } |
| 428 |
| 429 // 9 is the highest possible audio level |
| 430 EXPECT_EQ(static_cast<uint32_t>(9), mixer->GetAudioOutputLevel()); |
| 431 |
| 432 // 0x7fff = 32767 is the highest full range audio level. |
| 433 EXPECT_EQ(static_cast<uint32_t>(std::numeric_limits<int16_t>::max()), |
| 434 mixer->GetAudioOutputLevelFullRange()); |
| 435 } |
| 436 |
373 TEST_F(BothMixersTest, CompareInitialFrameAudio) { | 437 TEST_F(BothMixersTest, CompareInitialFrameAudio) { |
374 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); | 438 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); |
375 | 439 |
376 // Make sure the participant is marked as 'non-mixed' so that it is | 440 // Make sure the participant is marked as 'non-mixed' so that it is |
377 // ramped in next round. | 441 // ramped in next round. |
378 ResetAudioSource(); | 442 ResetAudioSource(); |
379 | 443 |
380 // Construct the expected sound for the first mixing round. | 444 // Construct the expected sound for the first mixing round. |
381 mixing_round_frame.CopyFrom(*participant_.fake_frame()); | 445 mixing_round_frame.CopyFrom(*participant_.fake_frame()); |
382 RampIn(mixing_round_frame); | 446 RampIn(mixing_round_frame); |
(...skipping 91 matching lines...) Loading... |
474 MixerParticipant::AudioFrameInfo::kMuted); | 538 MixerParticipant::AudioFrameInfo::kMuted); |
475 } else { | 539 } else { |
476 AddParticipant(&audio_frames[i], | 540 AddParticipant(&audio_frames[i], |
477 MixerParticipant::AudioFrameInfo::kNormal); | 541 MixerParticipant::AudioFrameInfo::kNormal); |
478 } | 542 } |
479 MixAndCompare(); | 543 MixAndCompare(); |
480 } | 544 } |
481 } | 545 } |
482 | 546 |
483 } // namespace webrtc | 547 } // namespace webrtc |
OLD | NEW |