Index: webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
diff --git a/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc b/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
index 8e4d0fc1edc540f3329649a72e464e5d222c2876..7fe37587fed96e71ae9c6ba4041316e3f98e471e 100644 |
--- a/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
+++ b/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
@@ -106,7 +106,9 @@ class CompareWithOldMixerTest : public testing::Test, AudioMixerOutputReceiver { |
// mix statuses. |
void MixAndCompare() { |
old_mixer_->Process(); |
- new_mixer_->Mix(&new_mixer_frame_); |
+ new_mixer_->Mix(kSampleRateHz, |
aleloi
2016/08/05 09:52:22
Adepted tests to match new mixer API.
|
+ 1, // number of channels |
+ &new_mixer_frame_); |
EXPECT_EQ(0, memcmp(old_mixer_frame_.data_, new_mixer_frame_.data_, |
sizeof(old_mixer_frame_.data_))); |
@@ -312,7 +314,9 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
AudioFrame::kVadPassive; |
AudioFrame audio_frame; |
- mixer->Mix(&audio_frame); |
+ mixer->Mix(kSampleRateHz, |
+ 1, // number of channels |
+ &audio_frame); |
for (int i = 0; i < kAudioSources; ++i) { |
bool is_mixed = participants[i].IsMixed(); |
@@ -340,7 +344,6 @@ TEST_F(BothMixersTest, CompareInitialFrameAudio) { |
RampIn(mixing_round_frame); |
// Mix frames and put the result into a frame. |
- audio_mixer_->MixActiveChannels(); |
aleloi
2016/08/05 09:52:22
This is the test that touches OutputMixer.
Now Ge
|
audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
// Compare the received frame with the expected. |
@@ -362,11 +365,10 @@ TEST_F(BothMixersTest, CompareSecondFrameAudio) { |
ResetAudioSource(); |
// Do one mixing iteration. |
- audio_mixer_->MixActiveChannels(); |
+ audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
// Mix frames a second time and compare with the expected frame |
// (which is the participant's frame). |
- audio_mixer_->MixActiveChannels(); |
audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
EXPECT_EQ(0, |
memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_, |