Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 62 public: | 62 public: |
| 63 MockMixerAudioSource() | 63 MockMixerAudioSource() |
| 64 : fake_audio_frame_info_(MixerAudioSource::AudioFrameInfo::kNormal) { | 64 : fake_audio_frame_info_(MixerAudioSource::AudioFrameInfo::kNormal) { |
| 65 ON_CALL(*this, GetAudioFrameWithMuted(_, _)) | 65 ON_CALL(*this, GetAudioFrameWithMuted(_, _)) |
| 66 .WillByDefault( | 66 .WillByDefault( |
| 67 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted)); | 67 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted)); |
| 68 } | 68 } |
| 69 | 69 |
| 70 MOCK_METHOD2(GetAudioFrameWithMuted, | 70 MOCK_METHOD2(GetAudioFrameWithMuted, |
| 71 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); | 71 AudioFrameWithMuted(const int32_t id, int sample_rate_hz)); |
| 72 MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id)); | |
| 73 | 72 |
| 74 AudioFrame* fake_frame() { return &fake_frame_; } | 73 AudioFrame* fake_frame() { return &fake_frame_; } |
| 75 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } | 74 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
| 76 void set_fake_info(const AudioFrameInfo audio_frame_info) { | 75 void set_fake_info(const AudioFrameInfo audio_frame_info) { |
| 77 fake_audio_frame_info_ = audio_frame_info; | 76 fake_audio_frame_info_ = audio_frame_info; |
| 78 } | 77 } |
| 79 | 78 |
| 80 private: | 79 private: |
| 81 AudioFrame fake_frame_; | 80 AudioFrame fake_frame_; |
| 82 AudioFrameInfo fake_audio_frame_info_; | 81 AudioFrameInfo fake_audio_frame_info_; |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 99 CompareWithOldMixerTest() | 98 CompareWithOldMixerTest() |
| 100 : old_mixer_(AudioConferenceMixer::Create(kId)), | 99 : old_mixer_(AudioConferenceMixer::Create(kId)), |
| 101 new_mixer_(NewAudioConferenceMixer::Create(kId)) {} | 100 new_mixer_(NewAudioConferenceMixer::Create(kId)) {} |
| 102 | 101 |
| 103 ~CompareWithOldMixerTest() { Reset(); } | 102 ~CompareWithOldMixerTest() { Reset(); } |
| 104 | 103 |
| 105 // Mixes with both mixers and compares results: resulting frames and | 104 // Mixes with both mixers and compares results: resulting frames and |
| 106 // mix statuses. | 105 // mix statuses. |
| 107 void MixAndCompare() { | 106 void MixAndCompare() { |
| 108 old_mixer_->Process(); | 107 old_mixer_->Process(); |
| 109 new_mixer_->Mix(&new_mixer_frame_); | 108 new_mixer_->Mix(kSampleRateHz, |
| 109 1, // number of channels | |
| 110 &new_mixer_frame_); | |
| 110 EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_, | 111 EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_, |
| 111 sizeof(old_mixer_frame_.data_))); | 112 sizeof(old_mixer_frame_.data_))); |
| 112 | 113 |
| 113 for (auto& participant_pair : participants_) { | 114 for (auto& participant_pair : participants_) { |
| 114 EXPECT_EQ(participant_pair.first->IsMixed(), | 115 EXPECT_EQ(participant_pair.first->IsMixed(), |
| 115 participant_pair.second->IsMixed()); | 116 participant_pair.second->IsMixed()); |
| 116 } | 117 } |
| 117 } | 118 } |
| 118 | 119 |
| 119 std::unique_ptr<AudioFrame> last_mixed_audio_old() { | 120 std::unique_ptr<AudioFrame> last_mixed_audio_old() { |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 195 | 196 |
| 196 // We modify one sample within the RampIn window and one sample | 197 // We modify one sample within the RampIn window and one sample |
| 197 // outside of it. | 198 // outside of it. |
| 198 participant_.fake_frame()->data_[10] = 100; | 199 participant_.fake_frame()->data_[10] = 100; |
| 199 participant_.fake_frame()->data_[20] = -200; | 200 participant_.fake_frame()->data_[20] = -200; |
| 200 participant_.fake_frame()->data_[30] = 300; | 201 participant_.fake_frame()->data_[30] = 300; |
| 201 participant_.fake_frame()->data_[90] = -400; | 202 participant_.fake_frame()->data_[90] = -400; |
| 202 | 203 |
| 203 // Frame duration 10ms. | 204 // Frame duration 10ms. |
| 204 participant_.fake_frame()->samples_per_channel_ = kSampleRateHz / 100; | 205 participant_.fake_frame()->samples_per_channel_ = kSampleRateHz / 100; |
| 205 EXPECT_CALL(participant_, NeededFrequency(_)) | |
| 206 .WillRepeatedly(Return(kSampleRateHz)); | |
| 207 } | 206 } |
| 208 | 207 |
| 209 ~BothMixersTest() { AudioMixer::Destroy(audio_mixer_); } | 208 ~BothMixersTest() { AudioMixer::Destroy(audio_mixer_); } |
| 210 | 209 |
| 211 // Mark the participant as 'unmixed' last round. | 210 // Mark the participant as 'unmixed' last round. |
| 212 void ResetAudioSource() { participant_._mixHistory->SetIsMixed(false); } | 211 void ResetAudioSource() { participant_._mixHistory->SetIsMixed(false); } |
| 213 | 212 |
| 214 AudioMixer* audio_mixer_; | 213 AudioMixer* audio_mixer_; |
| 215 MockMixerAudioSource participant_; | 214 MockMixerAudioSource participant_; |
| 216 AudioFrame mixing_round_frame, mixed_results_frame_; | 215 AudioFrame mixing_round_frame, mixed_results_frame_; |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 295 // Frame duration 10ms. | 294 // Frame duration 10ms. |
| 296 participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100; | 295 participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100; |
| 297 | 296 |
| 298 // We set the 80-th sample value since the first 80 samples may be | 297 // We set the 80-th sample value since the first 80 samples may be |
| 299 // modified by a ramped-in window. | 298 // modified by a ramped-in window. |
| 300 participants[i].fake_frame()->data_[80] = i; | 299 participants[i].fake_frame()->data_[80] = i; |
| 301 | 300 |
| 302 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 301 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
| 303 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _)) | 302 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _)) |
| 304 .Times(Exactly(1)); | 303 .Times(Exactly(1)); |
| 305 EXPECT_CALL(participants[i], NeededFrequency(_)) | |
| 306 .WillRepeatedly(Return(kSampleRateHz)); | |
| 307 } | 304 } |
| 308 | 305 |
| 309 // Last participant gives audio frame with passive VAD, although it has the | 306 // Last participant gives audio frame with passive VAD, although it has the |
| 310 // largest energy. | 307 // largest energy. |
| 311 participants[kAudioSources - 1].fake_frame()->vad_activity_ = | 308 participants[kAudioSources - 1].fake_frame()->vad_activity_ = |
| 312 AudioFrame::kVadPassive; | 309 AudioFrame::kVadPassive; |
| 313 | 310 |
| 314 AudioFrame audio_frame; | 311 AudioFrame audio_frame; |
| 315 mixer->Mix(&audio_frame); | 312 mixer->Mix(kSampleRateHz, |
| 313 1, // number of channels | |
| 314 &audio_frame); | |
| 316 | 315 |
| 317 for (int i = 0; i < kAudioSources; ++i) { | 316 for (int i = 0; i < kAudioSources; ++i) { |
| 318 bool is_mixed = participants[i].IsMixed(); | 317 bool is_mixed = participants[i].IsMixed(); |
| 319 if (i == kAudioSources - 1 || | 318 if (i == kAudioSources - 1 || |
| 320 i < kAudioSources - 1 - | 319 i < kAudioSources - 1 - |
| 321 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) { | 320 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) { |
| 322 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i | 321 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i |
| 323 << " wrong."; | 322 << " wrong."; |
| 324 } else { | 323 } else { |
| 325 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i | 324 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i |
| 326 << " wrong."; | 325 << " wrong."; |
| 327 } | 326 } |
| 328 } | 327 } |
| 329 } | 328 } |
| 330 | 329 |
| 330 TEST(AudioMixer, ParticipantSampleRate) { | |
| 331 const int kId = 1; | |
| 332 std::unique_ptr<NewAudioConferenceMixer> mixer( | |
| 333 NewAudioConferenceMixer::Create(kId)); | |
| 334 AudioFrame frame_for_mixing; | |
| 335 | |
| 336 MockMixerAudioSource participant; | |
| 337 participant.fake_frame()->sample_rate_hz_ = 8000; | |
| 338 participant.fake_frame()->num_channels_ = 1; | |
| 339 | |
| 340 // Frame duration 10ms. | |
| 341 participant.fake_frame()->samples_per_channel_ = 8000 / 100; | |
| 342 | |
| 343 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | |
| 344 for (auto frequency : {8000, 16000, 32000, 48000}) { | |
| 345 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, frequency)) | |
| 346 .Times(Exactly(1)); | |
| 347 mixer->Mix(frequency, 1, &frame_for_mixing); | |
| 348 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); | |
| 349 } | |
| 350 } | |
| 351 | |
| 352 TEST(AudioMixer, ParticipantNumberOfChannels) { | |
| 353 const int kId = 1; | |
| 354 std::unique_ptr<NewAudioConferenceMixer> mixer( | |
| 355 NewAudioConferenceMixer::Create(kId)); | |
| 356 AudioFrame frame_for_mixing; | |
| 357 | |
| 358 MockMixerAudioSource participant; | |
| 359 participant.fake_frame()->sample_rate_hz_ = 8000; | |
| 360 participant.fake_frame()->num_channels_ = 1; | |
| 361 | |
| 362 // Frame duration 10ms. | |
| 363 participant.fake_frame()->samples_per_channel_ = 8000 / 100; | |
| 364 | |
| 365 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | |
| 366 for (size_t number_of_channels : {1, 2}) { | |
| 367 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, 8000)).Times(Exactly(1)); | |
| 368 mixer->Mix(8000, number_of_channels, &frame_for_mixing); | |
| 369 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | |
| 370 } | |
| 371 } | |
| 372 | |
|
aleloi2
2016/08/06 10:13:50
Two new tests to check that the mixer asks for the
| |
| 331 TEST_F(BothMixersTest, CompareInitialFrameAudio) { | 373 TEST_F(BothMixersTest, CompareInitialFrameAudio) { |
| 332 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); | 374 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); |
| 333 | 375 |
| 334 // Make sure the participant is marked as 'non-mixed' so that it is | 376 // Make sure the participant is marked as 'non-mixed' so that it is |
| 335 // ramped in next round. | 377 // ramped in next round. |
| 336 ResetAudioSource(); | 378 ResetAudioSource(); |
| 337 | 379 |
| 338 // Construct the expected sound for the first mixing round. | 380 // Construct the expected sound for the first mixing round. |
| 339 mixing_round_frame.CopyFrom(*participant_.fake_frame()); | 381 mixing_round_frame.CopyFrom(*participant_.fake_frame()); |
| 340 RampIn(mixing_round_frame); | 382 RampIn(mixing_round_frame); |
| 341 | 383 |
| 342 // Mix frames and put the result into a frame. | 384 // Mix frames and put the result into a frame. |
| 343 audio_mixer_->MixActiveChannels(); | |
| 344 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); | 385 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
| 345 | 386 |
| 346 // Compare the received frame with the expected. | 387 // Compare the received frame with the expected. |
| 347 EXPECT_EQ(mixing_round_frame.sample_rate_hz_, | 388 EXPECT_EQ(mixing_round_frame.sample_rate_hz_, |
| 348 mixed_results_frame_.sample_rate_hz_); | 389 mixed_results_frame_.sample_rate_hz_); |
| 349 EXPECT_EQ(mixing_round_frame.num_channels_, | 390 EXPECT_EQ(mixing_round_frame.num_channels_, |
| 350 mixed_results_frame_.num_channels_); | 391 mixed_results_frame_.num_channels_); |
| 351 EXPECT_EQ(mixing_round_frame.samples_per_channel_, | 392 EXPECT_EQ(mixing_round_frame.samples_per_channel_, |
| 352 mixed_results_frame_.samples_per_channel_); | 393 mixed_results_frame_.samples_per_channel_); |
| 353 EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_, | 394 EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_, |
| 354 sizeof(mixing_round_frame.data_))); | 395 sizeof(mixing_round_frame.data_))); |
| 355 } | 396 } |
| 356 | 397 |
| 357 TEST_F(BothMixersTest, CompareSecondFrameAudio) { | 398 TEST_F(BothMixersTest, CompareSecondFrameAudio) { |
| 358 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); | 399 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); |
| 359 | 400 |
| 360 // Make sure the participant is marked as 'non-mixed' so that it is | 401 // Make sure the participant is marked as 'non-mixed' so that it is |
| 361 // ramped in next round. | 402 // ramped in next round. |
| 362 ResetAudioSource(); | 403 ResetAudioSource(); |
| 363 | 404 |
| 364 // Do one mixing iteration. | 405 // Do one mixing iteration. |
| 365 audio_mixer_->MixActiveChannels(); | 406 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
| 366 | 407 |
| 367 // Mix frames a second time and compare with the expected frame | 408 // Mix frames a second time and compare with the expected frame |
| 368 // (which is the participant's frame). | 409 // (which is the participant's frame). |
| 369 audio_mixer_->MixActiveChannels(); | |
| 370 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); | 410 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
| 371 EXPECT_EQ(0, | 411 EXPECT_EQ(0, |
| 372 memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_, | 412 memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_, |
| 373 sizeof(mixing_round_frame.data_))); | 413 sizeof(mixing_round_frame.data_))); |
| 374 } | 414 } |
| 375 | 415 |
| 376 TEST_F(CompareWithOldMixerTest, TwoParticipantsNormalFrames) { | 416 TEST_F(CompareWithOldMixerTest, TwoParticipantsNormalFrames) { |
| 377 Reset(); | 417 Reset(); |
| 378 AudioFrame first_frame, second_frame; | 418 AudioFrame first_frame, second_frame; |
| 379 | 419 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 434 MixerParticipant::AudioFrameInfo::kMuted); | 474 MixerParticipant::AudioFrameInfo::kMuted); |
| 435 } else { | 475 } else { |
| 436 AddParticipant(&audio_frames[i], | 476 AddParticipant(&audio_frames[i], |
| 437 MixerParticipant::AudioFrameInfo::kNormal); | 477 MixerParticipant::AudioFrameInfo::kNormal); |
| 438 } | 478 } |
| 439 MixAndCompare(); | 479 MixAndCompare(); |
| 440 } | 480 } |
| 441 } | 481 } |
| 442 | 482 |
| 443 } // namespace webrtc | 483 } // namespace webrtc |
| OLD | NEW |