Chromium Code Reviews

Side by Side Diff: webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc

Issue 2221443002: Changed mixing api and removed resampler (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 88 matching lines...)
99 CompareWithOldMixerTest() 99 CompareWithOldMixerTest()
100 : old_mixer_(AudioConferenceMixer::Create(kId)), 100 : old_mixer_(AudioConferenceMixer::Create(kId)),
101 new_mixer_(NewAudioConferenceMixer::Create(kId)) {} 101 new_mixer_(NewAudioConferenceMixer::Create(kId)) {}
102 102
103 ~CompareWithOldMixerTest() { Reset(); } 103 ~CompareWithOldMixerTest() { Reset(); }
104 104
105 // Mixes with both mixers and compares results: resulting frames and 105 // Mixes with both mixers and compares results: resulting frames and
106 // mix statuses. 106 // mix statuses.
107 void MixAndCompare() { 107 void MixAndCompare() {
108 old_mixer_->Process(); 108 old_mixer_->Process();
109 new_mixer_->Mix(&new_mixer_frame_); 109 new_mixer_->Mix(kSampleRateHz,
aleloi 2016/08/05 09:52:22 Adepted tests to match new mixer API.
110 1, // number of channels
111 &new_mixer_frame_);
110 EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_, 112 EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_,
111 sizeof(old_mixer_frame_.data_))); 113 sizeof(old_mixer_frame_.data_)));
112 114
113 for (auto& participant_pair : participants_) { 115 for (auto& participant_pair : participants_) {
114 EXPECT_EQ(participant_pair.first->IsMixed(), 116 EXPECT_EQ(participant_pair.first->IsMixed(),
115 participant_pair.second->IsMixed()); 117 participant_pair.second->IsMixed());
116 } 118 }
117 } 119 }
118 120
119 std::unique_ptr<AudioFrame> last_mixed_audio_old() { 121 std::unique_ptr<AudioFrame> last_mixed_audio_old() {
(...skipping 185 matching lines...)
305 EXPECT_CALL(participants[i], NeededFrequency(_)) 307 EXPECT_CALL(participants[i], NeededFrequency(_))
306 .WillRepeatedly(Return(kSampleRateHz)); 308 .WillRepeatedly(Return(kSampleRateHz));
307 } 309 }
308 310
309 // Last participant gives audio frame with passive VAD, although it has the 311 // Last participant gives audio frame with passive VAD, although it has the
310 // largest energy. 312 // largest energy.
311 participants[kAudioSources - 1].fake_frame()->vad_activity_ = 313 participants[kAudioSources - 1].fake_frame()->vad_activity_ =
312 AudioFrame::kVadPassive; 314 AudioFrame::kVadPassive;
313 315
314 AudioFrame audio_frame; 316 AudioFrame audio_frame;
315 mixer->Mix(&audio_frame); 317 mixer->Mix(kSampleRateHz,
318 1, // number of channels
319 &audio_frame);
316 320
317 for (int i = 0; i < kAudioSources; ++i) { 321 for (int i = 0; i < kAudioSources; ++i) {
318 bool is_mixed = participants[i].IsMixed(); 322 bool is_mixed = participants[i].IsMixed();
319 if (i == kAudioSources - 1 || 323 if (i == kAudioSources - 1 ||
320 i < kAudioSources - 1 - 324 i < kAudioSources - 1 -
321 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) { 325 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) {
322 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i 326 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i
323 << " wrong."; 327 << " wrong.";
324 } else { 328 } else {
325 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i 329 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i
326 << " wrong."; 330 << " wrong.";
327 } 331 }
328 } 332 }
329 } 333 }
330 334
331 TEST_F(BothMixersTest, CompareInitialFrameAudio) { 335 TEST_F(BothMixersTest, CompareInitialFrameAudio) {
332 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); 336 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1));
333 337
334 // Make sure the participant is marked as 'non-mixed' so that it is 338 // Make sure the participant is marked as 'non-mixed' so that it is
335 // ramped in next round. 339 // ramped in next round.
336 ResetAudioSource(); 340 ResetAudioSource();
337 341
338 // Construct the expected sound for the first mixing round. 342 // Construct the expected sound for the first mixing round.
339 mixing_round_frame.CopyFrom(*participant_.fake_frame()); 343 mixing_round_frame.CopyFrom(*participant_.fake_frame());
340 RampIn(mixing_round_frame); 344 RampIn(mixing_round_frame);
341 345
342 // Mix frames and put the result into a frame. 346 // Mix frames and put the result into a frame.
343 audio_mixer_->MixActiveChannels();
aleloi 2016/08/05 09:52:22 This is the test that touches OutputMixer. Now Ge
344 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); 347 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_);
345 348
346 // Compare the received frame with the expected. 349 // Compare the received frame with the expected.
347 EXPECT_EQ(mixing_round_frame.sample_rate_hz_, 350 EXPECT_EQ(mixing_round_frame.sample_rate_hz_,
348 mixed_results_frame_.sample_rate_hz_); 351 mixed_results_frame_.sample_rate_hz_);
349 EXPECT_EQ(mixing_round_frame.num_channels_, 352 EXPECT_EQ(mixing_round_frame.num_channels_,
350 mixed_results_frame_.num_channels_); 353 mixed_results_frame_.num_channels_);
351 EXPECT_EQ(mixing_round_frame.samples_per_channel_, 354 EXPECT_EQ(mixing_round_frame.samples_per_channel_,
352 mixed_results_frame_.samples_per_channel_); 355 mixed_results_frame_.samples_per_channel_);
353 EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_, 356 EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_,
354 sizeof(mixing_round_frame.data_))); 357 sizeof(mixing_round_frame.data_)));
355 } 358 }
356 359
357 TEST_F(BothMixersTest, CompareSecondFrameAudio) { 360 TEST_F(BothMixersTest, CompareSecondFrameAudio) {
358 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); 361 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(2));
359 362
360 // Make sure the participant is marked as 'non-mixed' so that it is 363 // Make sure the participant is marked as 'non-mixed' so that it is
361 // ramped in next round. 364 // ramped in next round.
362 ResetAudioSource(); 365 ResetAudioSource();
363 366
364 // Do one mixing iteration. 367 // Do one mixing iteration.
365 audio_mixer_->MixActiveChannels(); 368 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_);
366 369
367 // Mix frames a second time and compare with the expected frame 370 // Mix frames a second time and compare with the expected frame
368 // (which is the participant's frame). 371 // (which is the participant's frame).
369 audio_mixer_->MixActiveChannels();
370 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); 372 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_);
371 EXPECT_EQ(0, 373 EXPECT_EQ(0,
372 memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_, 374 memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_,
373 sizeof(mixing_round_frame.data_))); 375 sizeof(mixing_round_frame.data_)));
374 } 376 }
375 377
376 TEST_F(CompareWithOldMixerTest, TwoParticipantsNormalFrames) { 378 TEST_F(CompareWithOldMixerTest, TwoParticipantsNormalFrames) {
377 Reset(); 379 Reset();
378 AudioFrame first_frame, second_frame; 380 AudioFrame first_frame, second_frame;
379 381
(...skipping 54 matching lines...)
434 MixerParticipant::AudioFrameInfo::kMuted); 436 MixerParticipant::AudioFrameInfo::kMuted);
435 } else { 437 } else {
436 AddParticipant(&audio_frames[i], 438 AddParticipant(&audio_frames[i],
437 MixerParticipant::AudioFrameInfo::kNormal); 439 MixerParticipant::AudioFrameInfo::kNormal);
438 } 440 }
439 MixAndCompare(); 441 MixAndCompare();
440 } 442 }
441 } 443 }
442 444
443 } // namespace webrtc 445 } // namespace webrtc
OLDNEW

Powered by Google App Engine