OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <memory> | 11 #include <memory> |
12 | 12 |
13 #include "testing/gmock/include/gmock/gmock.h" | 13 #include "testing/gmock/include/gmock/gmock.h" |
14 | 14 |
15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" | 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" |
16 #include "webrtc/modules/audio_mixer/audio_mixer.h" | 16 #include "webrtc/modules/audio_mixer/audio_mixer.h" |
17 #include "webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h" | 17 #include "webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h" |
18 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 18 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" |
19 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 19 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" |
20 | 20 |
21 namespace webrtc { | 21 namespace webrtc { |
22 | 22 |
23 using testing::_; | 23 using testing::_; |
24 using testing::AtLeast; | 24 using testing::Exactly; |
25 using testing::Invoke; | 25 using testing::Invoke; |
26 using testing::Return; | 26 using testing::Return; |
27 | 27 |
28 using voe::AudioMixer; | 28 using voe::AudioMixer; |
29 | 29 |
30 class MockMixerAudioSource : public MixerAudioSource { | 30 class MockMixerAudioSource : public MixerAudioSource { |
31 public: | 31 public: |
32 MockMixerAudioSource() { | 32 MockMixerAudioSource() { |
33 ON_CALL(*this, GetAudioFrame(_, _)) | 33 ON_CALL(*this, GetAudioFrameWithMuted(_, _)) |
34 .WillByDefault(Invoke(this, &MockMixerAudioSource::FakeAudioFrame)); | 34 .WillByDefault( |
| 35 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted)); |
35 } | 36 } |
36 MOCK_METHOD2(GetAudioFrame, | 37 MOCK_METHOD2(GetAudioFrameWithMuted, |
37 int32_t(const int32_t id, AudioFrame* audio_frame)); | 38 AudioFrameWithInfo(const int32_t id, int sample_rate_hz)); |
38 MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id)); | 39 MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id)); |
| 40 |
39 AudioFrame* fake_frame() { return &fake_frame_; } | 41 AudioFrame* fake_frame() { return &fake_frame_; } |
40 | 42 |
41 private: | 43 private: |
42 AudioFrame fake_frame_; | 44 AudioFrame fake_frame_; |
43 int32_t FakeAudioFrame(const int32_t id, AudioFrame* audio_frame) { | 45 AudioFrameWithInfo FakeAudioFrameWithMuted(const int32_t id, |
44 audio_frame->CopyFrom(fake_frame_); | 46 int sample_rate_hz) { |
45 return 0; | 47 return { |
| 48 fake_frame(), // audio_frame_pointer |
| 49 AudioFrameInfo::kNormal, // audio_frame_info |
| 50 }; |
46 } | 51 } |
47 }; | 52 }; |
48 | 53 |
49 class BothMixersTest : public testing::Test { | 54 class BothMixersTest : public testing::Test { |
50 protected: | 55 protected: |
51 BothMixersTest() { | 56 BothMixersTest() { |
52 // Create an OutputMixer. | 57 // Create an OutputMixer. |
53 AudioMixer::Create(audio_mixer_, kId); | 58 AudioMixer::Create(audio_mixer_, kId); |
54 | 59 |
55 // Create one mixer participant and add it to the mixer. | 60 // Create one mixer participant and add it to the mixer. |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
162 participants[i].fake_frame()->num_channels_ = 1; | 167 participants[i].fake_frame()->num_channels_ = 1; |
163 | 168 |
164 // Frame duration 10ms. | 169 // Frame duration 10ms. |
165 participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100; | 170 participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100; |
166 | 171 |
167 // We set the 80-th sample value since the first 80 samples may be | 172 // We set the 80-th sample value since the first 80 samples may be |
168 // modified by a ramped-in window. | 173 // modified by a ramped-in window. |
169 participants[i].fake_frame()->data_[80] = i; | 174 participants[i].fake_frame()->data_[80] = i; |
170 | 175 |
171 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 176 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
172 EXPECT_CALL(participants[i], GetAudioFrame(_, _)).Times(AtLeast(1)); | 177 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _)) |
| 178 .Times(Exactly(1)); |
173 EXPECT_CALL(participants[i], NeededFrequency(_)) | 179 EXPECT_CALL(participants[i], NeededFrequency(_)) |
174 .WillRepeatedly(Return(kSampleRateHz)); | 180 .WillRepeatedly(Return(kSampleRateHz)); |
175 } | 181 } |
176 | 182 |
177 // Last participant gives audio frame with passive VAD, although it has the | 183 // Last participant gives audio frame with passive VAD, although it has the |
178 // largest energy. | 184 // largest energy. |
179 participants[kParticipants - 1].fake_frame()->vad_activity_ = | 185 participants[kParticipants - 1].fake_frame()->vad_activity_ = |
180 AudioFrame::kVadPassive; | 186 AudioFrame::kVadPassive; |
181 | 187 |
182 AudioFrame audio_frame; | 188 AudioFrame audio_frame; |
183 mixer->Mix(&audio_frame); | 189 mixer->Mix(&audio_frame); |
184 | 190 |
185 for (int i = 0; i < kParticipants; ++i) { | 191 for (int i = 0; i < kParticipants; ++i) { |
186 bool is_mixed = participants[i].IsMixed(); | 192 bool is_mixed = participants[i].IsMixed(); |
187 if (i == kParticipants - 1 || | 193 if (i == kParticipants - 1 || |
188 i < kParticipants - 1 - | 194 i < kParticipants - 1 - |
189 NewAudioConferenceMixer::kMaximumAmountOfMixedParticipants) { | 195 NewAudioConferenceMixer::kMaximumAmountOfMixedParticipants) { |
190 EXPECT_FALSE(is_mixed) << "Mixing status of Participant #" << i | 196 EXPECT_FALSE(is_mixed) << "Mixing status of Participant #" << i |
191 << " wrong."; | 197 << " wrong."; |
192 } else { | 198 } else { |
193 EXPECT_TRUE(is_mixed) << "Mixing status of Participant #" << i | 199 EXPECT_TRUE(is_mixed) << "Mixing status of Participant #" << i |
194 << " wrong."; | 200 << " wrong."; |
195 } | 201 } |
196 } | 202 } |
197 } | 203 } |
198 | 204 |
199 TEST_F(BothMixersTest, CompareInitialFrameAudio) { | 205 TEST_F(BothMixersTest, CompareInitialFrameAudio) { |
200 EXPECT_CALL(participant_, GetAudioFrame(_, _)).Times(AtLeast(1)); | 206 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(1)); |
201 | 207 |
202 // Make sure the participant is marked as 'non-mixed' so that it is | 208 // Make sure the participant is marked as 'non-mixed' so that it is |
203 // ramped in next round. | 209 // ramped in next round. |
204 ResetParticipant(); | 210 ResetParticipant(); |
205 | 211 |
206 // Construct the expected sound for the first mixing round. | 212 // Construct the expected sound for the first mixing round. |
207 mixing_round_frame.CopyFrom(*participant_.fake_frame()); | 213 mixing_round_frame.CopyFrom(*participant_.fake_frame()); |
208 RampIn(mixing_round_frame); | 214 RampIn(mixing_round_frame); |
209 | 215 |
210 // Mix frames and put the result into a frame. | 216 // Mix frames and put the result into a frame. |
211 audio_mixer_->MixActiveChannels(); | 217 audio_mixer_->MixActiveChannels(); |
212 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); | 218 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
213 | 219 |
214 // Compare the received frame with the expected. | 220 // Compare the received frame with the expected. |
215 EXPECT_EQ(mixing_round_frame.sample_rate_hz_, | 221 EXPECT_EQ(mixing_round_frame.sample_rate_hz_, |
216 mixed_results_frame_.sample_rate_hz_); | 222 mixed_results_frame_.sample_rate_hz_); |
217 EXPECT_EQ(mixing_round_frame.num_channels_, | 223 EXPECT_EQ(mixing_round_frame.num_channels_, |
218 mixed_results_frame_.num_channels_); | 224 mixed_results_frame_.num_channels_); |
219 EXPECT_EQ(mixing_round_frame.samples_per_channel_, | 225 EXPECT_EQ(mixing_round_frame.samples_per_channel_, |
220 mixed_results_frame_.samples_per_channel_); | 226 mixed_results_frame_.samples_per_channel_); |
221 EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_, | 227 EXPECT_EQ(0, memcmp(mixing_round_frame.data_, mixed_results_frame_.data_, |
222 sizeof(mixing_round_frame.data_))); | 228 sizeof(mixing_round_frame.data_))); |
223 } | 229 } |
224 | 230 |
225 TEST_F(BothMixersTest, CompareSecondFrameAudio) { | 231 TEST_F(BothMixersTest, CompareSecondFrameAudio) { |
226 EXPECT_CALL(participant_, GetAudioFrame(_, _)).Times(AtLeast(1)); | 232 EXPECT_CALL(participant_, GetAudioFrameWithMuted(_, _)).Times(Exactly(2)); |
227 | 233 |
228 // Make sure the participant is marked as 'non-mixed' so that it is | 234 // Make sure the participant is marked as 'non-mixed' so that it is |
229 // ramped in next round. | 235 // ramped in next round. |
230 ResetParticipant(); | 236 ResetParticipant(); |
231 | 237 |
232 // Do one mixing iteration. | 238 // Do one mixing iteration. |
233 audio_mixer_->MixActiveChannels(); | 239 audio_mixer_->MixActiveChannels(); |
234 | 240 |
235 // Mix frames a second time and compare with the expected frame | 241 // Mix frames a second time and compare with the expected frame |
236 // (which is the participant's frame). | 242 // (which is the participant's frame). |
237 audio_mixer_->MixActiveChannels(); | 243 audio_mixer_->MixActiveChannels(); |
238 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); | 244 audio_mixer_->GetMixedAudio(kSampleRateHz, 1, &mixed_results_frame_); |
239 EXPECT_EQ(0, | 245 EXPECT_EQ(0, |
240 memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_, | 246 memcmp(participant_.fake_frame()->data_, mixed_results_frame_.data_, |
241 sizeof(mixing_round_frame.data_))); | 247 sizeof(mixing_round_frame.data_))); |
242 } | 248 } |
243 | 249 |
244 } // namespace webrtc | 250 } // namespace webrtc |
OLD | NEW |