Index: webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
diff --git a/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc b/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
index 36bd7c4211e6a9fc0398dfebe3773b91e69a4a1b..9a51e911f7202b193abb50e4ccc73042c759383c 100644 |
--- a/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
+++ b/webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc |
@@ -8,6 +8,7 @@ |
* be found in the AUTHORS file in the root of the source tree. |
*/ |
+#include <cstring> |
ivoc
2016/08/31 14:34:25
I think we generally use the <string.h> style for
aleloi
2016/08/31 15:16:22
Seems like that. 21 matches for cstring and 243 fo
|
#include <memory> |
#include <utility> |
@@ -202,6 +203,65 @@ TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
} |
} |
+TEST(AudioMixer, FrameNotModifiedForSingleParticipant) { |
+ const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
+ |
+ MockMixerAudioSource participant; |
+ |
+ ResetFrame(participant.fake_frame()); |
+ const int n_samples = participant.fake_frame()->samples_per_channel_; |
+ |
+ // Modify the frame so that it's not zero. |
+ for (int j = 0; j < n_samples; j++) { |
+ participant.fake_frame()->data_[j] = j; |
+ } |
+ |
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
+ EXPECT_CALL(participant, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); |
+ |
+ AudioFrame audio_frame; |
+ // Two mix iteration to compare after the ramp-up step. |
+ for (int i = 0; i < 2; i++) { |
+ mixer->Mix(kDefaultSampleRateHz, |
+ 1, // number of channels |
+ &audio_frame); |
+ } |
+ |
+ EXPECT_EQ(std::memcmp(participant.fake_frame()->data_, audio_frame.data_, |
ivoc
2016/08/31 14:34:24
Please change the order, i.e. EXPECT_EQ(0, std::me
aleloi
2016/08/31 15:16:22
Done. I also changed in a few other tests.
|
+ n_samples), |
+ 0); |
+} |
+ |
+TEST(AudioMixer, FrameNotModifiedForSingleAnonymousParticipant) { |
aleloi
2016/08/31 11:07:46
This test compares mixer output in the anonymous c
|
+ const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
+ |
+ MockMixerAudioSource participant; |
+ |
+ ResetFrame(participant.fake_frame()); |
+ const int n_samples = participant.fake_frame()->samples_per_channel_; |
+ |
+ // Modify the frame so that it's not zero. |
+ for (int j = 0; j < n_samples; j++) { |
+ participant.fake_frame()->data_[j] = j; |
+ } |
+ |
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
+ EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&participant, true)); |
+ EXPECT_CALL(participant, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); |
+ |
+ AudioFrame audio_frame; |
+ // Two mix iteration to compare after the ramp-up step. |
+ for (int i = 0; i < 2; i++) { |
+ mixer->Mix(kDefaultSampleRateHz, |
+ 1, // number of channels |
+ &audio_frame); |
+ } |
+ |
+ EXPECT_EQ(std::memcmp(participant.fake_frame()->data_, audio_frame.data_, |
ivoc
2016/08/31 14:34:24
Same here.
aleloi
2016/08/31 15:16:22
Done.
|
+ n_samples), |
+ 0); |
+} |
+ |
TEST(AudioMixer, ParticipantSampleRate) { |
const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |