OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <memory> | 11 #include <memory> |
12 #include <utility> | 12 #include <utility> |
13 | 13 |
14 #include "testing/gmock/include/gmock/gmock.h" | 14 #include "testing/gmock/include/gmock/gmock.h" |
15 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | 15 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" |
16 #include "webrtc/modules/audio_mixer/new_audio_conference_mixer.h" | 16 #include "webrtc/modules/audio_mixer/audio_mixer.h" |
17 | 17 |
18 using testing::_; | 18 using testing::_; |
19 using testing::Exactly; | 19 using testing::Exactly; |
20 using testing::Invoke; | 20 using testing::Invoke; |
21 using testing::Return; | 21 using testing::Return; |
22 | 22 |
23 namespace webrtc { | 23 namespace webrtc { |
24 | 24 |
25 namespace { | 25 namespace { |
26 | 26 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
78 // Creates participants from |frames| and |frame_info| and adds them | 78 // Creates participants from |frames| and |frame_info| and adds them |
79 // to the mixer. Compares mixed status with |expected_status| | 79 // to the mixer. Compares mixed status with |expected_status| |
80 void MixAndCompare( | 80 void MixAndCompare( |
81 const std::vector<AudioFrame>& frames, | 81 const std::vector<AudioFrame>& frames, |
82 const std::vector<MixerAudioSource::AudioFrameInfo>& frame_info, | 82 const std::vector<MixerAudioSource::AudioFrameInfo>& frame_info, |
83 const std::vector<bool>& expected_status) { | 83 const std::vector<bool>& expected_status) { |
84 size_t num_audio_sources = frames.size(); | 84 size_t num_audio_sources = frames.size(); |
85 RTC_DCHECK(frames.size() == frame_info.size() && | 85 RTC_DCHECK(frames.size() == frame_info.size() && |
86 frame_info.size() == expected_status.size()); | 86 frame_info.size() == expected_status.size()); |
87 | 87 |
88 std::unique_ptr<NewAudioConferenceMixer> mixer( | 88 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
89 NewAudioConferenceMixer::Create(kId)); | |
90 std::vector<MockMixerAudioSource> participants(num_audio_sources); | 89 std::vector<MockMixerAudioSource> participants(num_audio_sources); |
91 | 90 |
92 for (size_t i = 0; i < num_audio_sources; i++) { | 91 for (size_t i = 0; i < num_audio_sources; i++) { |
93 participants[i].fake_frame()->CopyFrom(frames[i]); | 92 participants[i].fake_frame()->CopyFrom(frames[i]); |
94 participants[i].set_fake_info(frame_info[i]); | 93 participants[i].set_fake_info(frame_info[i]); |
95 } | 94 } |
96 | 95 |
97 for (size_t i = 0; i < num_audio_sources; i++) { | 96 for (size_t i = 0; i < num_audio_sources; i++) { |
98 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 97 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
99 EXPECT_CALL(participants[i], | 98 EXPECT_CALL(participants[i], |
100 GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 99 GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) |
101 .Times(Exactly(1)); | 100 .Times(Exactly(1)); |
102 } | 101 } |
103 | 102 |
104 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 103 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
105 | 104 |
106 for (size_t i = 0; i < num_audio_sources; i++) { | 105 for (size_t i = 0; i < num_audio_sources; i++) { |
107 EXPECT_EQ(participants[i].IsMixed(), expected_status[i]) | 106 EXPECT_EQ(participants[i].IsMixed(), expected_status[i]) |
108 << "Mixed status of AudioSource #" << i << " wrong."; | 107 << "Mixed status of AudioSource #" << i << " wrong."; |
109 } | 108 } |
110 } | 109 } |
111 | 110 |
112 TEST(AudioMixer, AnonymousAndNamed) { | 111 TEST(AudioMixer, AnonymousAndNamed) { |
113 // Should not matter even if partipants are more than | 112 // Should not matter even if partipants are more than |
114 // kMaximumAmountOfMixedAudioSources. | 113 // kMaximumAmountOfMixedAudioSources. |
115 constexpr int kNamed = | 114 constexpr int kNamed = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
116 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | 115 constexpr int kAnonymous = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
117 constexpr int kAnonymous = | |
118 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | |
119 | 116 |
120 std::unique_ptr<NewAudioConferenceMixer> mixer( | 117 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
121 NewAudioConferenceMixer::Create(kId)); | |
122 | 118 |
123 MockMixerAudioSource named[kNamed]; | 119 MockMixerAudioSource named[kNamed]; |
124 MockMixerAudioSource anonymous[kAnonymous]; | 120 MockMixerAudioSource anonymous[kAnonymous]; |
125 | 121 |
126 for (int i = 0; i < kNamed; ++i) { | 122 for (int i = 0; i < kNamed; ++i) { |
127 EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], true)); | 123 EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], true)); |
128 EXPECT_TRUE(mixer->MixabilityStatus(named[i])); | 124 EXPECT_TRUE(mixer->MixabilityStatus(named[i])); |
129 } | 125 } |
130 | 126 |
131 for (int i = 0; i < kAnonymous; ++i) { | 127 for (int i = 0; i < kAnonymous; ++i) { |
(...skipping 25 matching lines...) Expand all Loading... |
157 } | 153 } |
158 | 154 |
159 // SetMixabilityStatus(anonymous, false) will remove anonymous from both | 155 // SetMixabilityStatus(anonymous, false) will remove anonymous from both |
160 // anonymous and named groups. | 156 // anonymous and named groups. |
161 EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false)); | 157 EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false)); |
162 EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1])); | 158 EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1])); |
163 EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1])); | 159 EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1])); |
164 } | 160 } |
165 | 161 |
166 TEST(AudioMixer, LargestEnergyVadActiveMixed) { | 162 TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
167 const int kAudioSources = | 163 const int kAudioSources = AudioMixer::kMaximumAmountOfMixedAudioSources + 3; |
168 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 3; | |
169 | 164 |
170 std::unique_ptr<NewAudioConferenceMixer> mixer( | 165 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
171 NewAudioConferenceMixer::Create(kId)); | |
172 | 166 |
173 MockMixerAudioSource participants[kAudioSources]; | 167 MockMixerAudioSource participants[kAudioSources]; |
174 | 168 |
175 for (int i = 0; i < kAudioSources; ++i) { | 169 for (int i = 0; i < kAudioSources; ++i) { |
176 ResetFrame(participants[i].fake_frame()); | 170 ResetFrame(participants[i].fake_frame()); |
177 | 171 |
178 // We set the 80-th sample value since the first 80 samples may be | 172 // We set the 80-th sample value since the first 80 samples may be |
179 // modified by a ramped-in window. | 173 // modified by a ramped-in window. |
180 participants[i].fake_frame()->data_[80] = i; | 174 participants[i].fake_frame()->data_[80] = i; |
181 | 175 |
182 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 176 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
183 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _)) | 177 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_, _)) |
184 .Times(Exactly(1)); | 178 .Times(Exactly(1)); |
185 } | 179 } |
186 | 180 |
187 // Last participant gives audio frame with passive VAD, although it has the | 181 // Last participant gives audio frame with passive VAD, although it has the |
188 // largest energy. | 182 // largest energy. |
189 participants[kAudioSources - 1].fake_frame()->vad_activity_ = | 183 participants[kAudioSources - 1].fake_frame()->vad_activity_ = |
190 AudioFrame::kVadPassive; | 184 AudioFrame::kVadPassive; |
191 | 185 |
192 AudioFrame audio_frame; | 186 AudioFrame audio_frame; |
193 mixer->Mix(kDefaultSampleRateHz, | 187 mixer->Mix(kDefaultSampleRateHz, |
194 1, // number of channels | 188 1, // number of channels |
195 &audio_frame); | 189 &audio_frame); |
196 | 190 |
197 for (int i = 0; i < kAudioSources; ++i) { | 191 for (int i = 0; i < kAudioSources; ++i) { |
198 bool is_mixed = participants[i].IsMixed(); | 192 bool is_mixed = participants[i].IsMixed(); |
199 if (i == kAudioSources - 1 || | 193 if (i == kAudioSources - 1 || |
200 i < kAudioSources - 1 - | 194 i < kAudioSources - 1 - AudioMixer::kMaximumAmountOfMixedAudioSources) { |
201 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources) { | |
202 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i | 195 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i |
203 << " wrong."; | 196 << " wrong."; |
204 } else { | 197 } else { |
205 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i | 198 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i |
206 << " wrong."; | 199 << " wrong."; |
207 } | 200 } |
208 } | 201 } |
209 } | 202 } |
210 | 203 |
211 TEST(AudioMixer, ParticipantSampleRate) { | 204 TEST(AudioMixer, ParticipantSampleRate) { |
212 std::unique_ptr<NewAudioConferenceMixer> mixer( | 205 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
213 NewAudioConferenceMixer::Create(kId)); | |
214 | 206 |
215 MockMixerAudioSource participant; | 207 MockMixerAudioSource participant; |
216 ResetFrame(participant.fake_frame()); | 208 ResetFrame(participant.fake_frame()); |
217 | 209 |
218 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 210 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
219 for (auto frequency : {8000, 16000, 32000, 48000}) { | 211 for (auto frequency : {8000, 16000, 32000, 48000}) { |
220 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, frequency)) | 212 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, frequency)) |
221 .Times(Exactly(1)); | 213 .Times(Exactly(1)); |
222 mixer->Mix(frequency, 1, &frame_for_mixing); | 214 mixer->Mix(frequency, 1, &frame_for_mixing); |
223 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); | 215 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); |
224 } | 216 } |
225 } | 217 } |
226 | 218 |
227 TEST(AudioMixer, ParticipantNumberOfChannels) { | 219 TEST(AudioMixer, ParticipantNumberOfChannels) { |
228 std::unique_ptr<NewAudioConferenceMixer> mixer( | 220 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
229 NewAudioConferenceMixer::Create(kId)); | |
230 | 221 |
231 MockMixerAudioSource participant; | 222 MockMixerAudioSource participant; |
232 ResetFrame(participant.fake_frame()); | 223 ResetFrame(participant.fake_frame()); |
233 | 224 |
234 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 225 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
235 for (size_t number_of_channels : {1, 2}) { | 226 for (size_t number_of_channels : {1, 2}) { |
236 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 227 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) |
237 .Times(Exactly(1)); | 228 .Times(Exactly(1)); |
238 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); | 229 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); |
239 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | 230 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
240 } | 231 } |
241 } | 232 } |
242 | 233 |
243 // Test that the volume is reported as zero when the mixer input | 234 // Test that the volume is reported as zero when the mixer input |
244 // comprises only zero values. | 235 // comprises only zero values. |
245 TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) { | 236 TEST(AudioMixer, LevelIsZeroWhenMixingZeroes) { |
246 std::unique_ptr<NewAudioConferenceMixer> mixer( | 237 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
247 NewAudioConferenceMixer::Create(kId)); | |
248 | 238 |
249 MockMixerAudioSource participant; | 239 MockMixerAudioSource participant; |
250 ResetFrame(participant.fake_frame()); | 240 ResetFrame(participant.fake_frame()); |
251 | 241 |
252 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 242 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
253 for (size_t i = 0; i < 11; i++) { | 243 for (size_t i = 0; i < 11; i++) { |
254 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 244 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) |
255 .Times(Exactly(1)); | 245 .Times(Exactly(1)); |
256 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 246 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
257 } | 247 } |
258 | 248 |
259 EXPECT_EQ(static_cast<uint32_t>(0), mixer->GetOutputAudioLevel()); | 249 EXPECT_EQ(static_cast<uint32_t>(0), mixer->GetOutputAudioLevel()); |
260 EXPECT_EQ(static_cast<uint32_t>(0), mixer->GetOutputAudioLevelFullRange()); | 250 EXPECT_EQ(static_cast<uint32_t>(0), mixer->GetOutputAudioLevelFullRange()); |
261 } | 251 } |
262 | 252 |
263 // Test that the reported volume is maximal as full when the mixer | 253 // Test that the reported volume is maximal as full when the mixer |
264 // input comprises frames with maximal values. | 254 // input comprises frames with maximal values. |
265 TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) { | 255 TEST(AudioMixer, LevelIsMaximalWhenMixingMaximalValues) { |
266 std::unique_ptr<NewAudioConferenceMixer> mixer( | 256 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
267 NewAudioConferenceMixer::Create(kId)); | |
268 | 257 |
269 MockMixerAudioSource participant; | 258 MockMixerAudioSource participant; |
270 ResetFrame(participant.fake_frame()); | 259 ResetFrame(participant.fake_frame()); |
271 | 260 |
272 // Fill participant frame data with maximal sound. | 261 // Fill participant frame data with maximal sound. |
273 std::fill(participant.fake_frame()->data_, | 262 std::fill(participant.fake_frame()->data_, |
274 participant.fake_frame()->data_ + kDefaultSampleRateHz / 100, | 263 participant.fake_frame()->data_ + kDefaultSampleRateHz / 100, |
275 std::numeric_limits<int16_t>::max()); | 264 std::numeric_limits<int16_t>::max()); |
276 | 265 |
277 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 266 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); |
278 for (size_t i = 0; i < 11; i++) { | 267 for (size_t i = 0; i < 11; i++) { |
279 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 268 EXPECT_CALL(participant, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) |
280 .Times(Exactly(1)); | 269 .Times(Exactly(1)); |
281 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 270 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
282 } | 271 } |
283 | 272 |
284 // 9 is the highest possible audio level | 273 // 9 is the highest possible audio level |
285 EXPECT_EQ(static_cast<uint32_t>(9), mixer->GetOutputAudioLevel()); | 274 EXPECT_EQ(static_cast<uint32_t>(9), mixer->GetOutputAudioLevel()); |
286 | 275 |
287 // 0x7fff = 32767 is the highest full range audio level. | 276 // 0x7fff = 32767 is the highest full range audio level. |
288 EXPECT_EQ(static_cast<uint32_t>(std::numeric_limits<int16_t>::max()), | 277 EXPECT_EQ(static_cast<uint32_t>(std::numeric_limits<int16_t>::max()), |
289 mixer->GetOutputAudioLevelFullRange()); | 278 mixer->GetOutputAudioLevelFullRange()); |
290 } | 279 } |
291 | 280 |
292 // Test that the reported volume is maximal as full when the mixer | 281 // Test that the reported volume is maximal as full when the mixer |
293 // input comprises frames with maximal values. | 282 // input comprises frames with maximal values. |
294 TEST(AudioMixer, VolumeTwoParticipants) { | 283 TEST(AudioMixer, VolumeTwoParticipants) { |
295 std::unique_ptr<NewAudioConferenceMixer> mixer( | 284 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
296 NewAudioConferenceMixer::Create(kId)); | |
297 | 285 |
298 MockMixerAudioSource participant1, participant2; | 286 MockMixerAudioSource participant1, participant2; |
299 ResetFrame(participant1.fake_frame()); | 287 ResetFrame(participant1.fake_frame()); |
300 ResetFrame(participant2.fake_frame()); | 288 ResetFrame(participant2.fake_frame()); |
301 | 289 |
302 // Fill participant1 frame data with maximal sound. | 290 // Fill participant1 frame data with maximal sound. |
303 std::fill(participant1.fake_frame()->data_, | 291 std::fill(participant1.fake_frame()->data_, |
304 participant1.fake_frame()->data_ + kDefaultSampleRateHz / 100, 0); | 292 participant1.fake_frame()->data_ + kDefaultSampleRateHz / 100, 0); |
305 std::fill(participant2.fake_frame()->data_, | 293 std::fill(participant2.fake_frame()->data_, |
306 participant2.fake_frame()->data_ + kDefaultSampleRateHz / 100, | 294 participant2.fake_frame()->data_ + kDefaultSampleRateHz / 100, |
307 std::numeric_limits<int16_t>::max()); | 295 std::numeric_limits<int16_t>::max()); |
308 | 296 |
309 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant1, true)); | 297 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant1, true)); |
310 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant2, true)); | 298 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant2, true)); |
311 for (size_t i = 0; i < 11; i++) { | 299 for (size_t i = 0; i < 11; i++) { |
312 EXPECT_CALL(participant1, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 300 EXPECT_CALL(participant1, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) |
313 .Times(Exactly(1)); | 301 .Times(Exactly(1)); |
314 EXPECT_CALL(participant2, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) | 302 EXPECT_CALL(participant2, GetAudioFrameWithMuted(_, kDefaultSampleRateHz)) |
315 .Times(Exactly(1)); | 303 .Times(Exactly(1)); |
316 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 304 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
317 } | 305 } |
318 } | 306 } |
319 | 307 |
320 // Maximal amount of participants are mixed one iteration, then | 308 // Maximal amount of participants are mixed one iteration, then |
321 // another participant with higher energy is added. | 309 // another participant with higher energy is added. |
322 TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { | 310 TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { |
323 const int kAudioSources = | 311 const int kAudioSources = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
324 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | |
325 | 312 |
326 std::unique_ptr<NewAudioConferenceMixer> mixer( | 313 std::unique_ptr<AudioMixer> mixer(AudioMixer::Create(kId)); |
327 NewAudioConferenceMixer::Create(kId)); | |
328 MockMixerAudioSource participants[kAudioSources]; | 314 MockMixerAudioSource participants[kAudioSources]; |
329 | 315 |
330 for (size_t i = 0; i < kAudioSources; i++) { | 316 for (size_t i = 0; i < kAudioSources; i++) { |
331 ResetFrame(participants[i].fake_frame()); | 317 ResetFrame(participants[i].fake_frame()); |
332 participants[i].fake_frame()->data_[0] = 100 * i; | 318 participants[i].fake_frame()->data_[0] = 100 * i; |
333 } | 319 } |
334 | 320 |
335 for (size_t i = 0; i < kAudioSources - 1; i++) { | 321 for (size_t i = 0; i < kAudioSources - 1; i++) { |
336 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 322 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); |
337 EXPECT_CALL(participants[i], | 323 EXPECT_CALL(participants[i], |
(...skipping 23 matching lines...) Expand all Loading... |
361 EXPECT_FALSE(participants[0].IsMixed()) | 347 EXPECT_FALSE(participants[0].IsMixed()) |
362 << "Mixed status of AudioSource #0 wrong."; | 348 << "Mixed status of AudioSource #0 wrong."; |
363 | 349 |
364 for (size_t i = 1; i < kAudioSources; i++) { | 350 for (size_t i = 1; i < kAudioSources; i++) { |
365 EXPECT_EQ(participants[i].IsMixed(), true) | 351 EXPECT_EQ(participants[i].IsMixed(), true) |
366 << "Mixed status of AudioSource #" << i << " wrong."; | 352 << "Mixed status of AudioSource #" << i << " wrong."; |
367 } | 353 } |
368 } | 354 } |
369 | 355 |
370 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { | 356 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { |
371 const int kAudioSources = | 357 const int kAudioSources = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
372 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | |
373 | 358 |
374 std::vector<AudioFrame> frames(kAudioSources); | 359 std::vector<AudioFrame> frames(kAudioSources); |
375 for (auto& frame : frames) { | 360 for (auto& frame : frames) { |
376 ResetFrame(&frame); | 361 ResetFrame(&frame); |
377 } | 362 } |
378 | 363 |
379 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 364 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( |
380 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 365 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); |
381 frame_info[0] = MixerAudioSource::AudioFrameInfo::kMuted; | 366 frame_info[0] = MixerAudioSource::AudioFrameInfo::kMuted; |
382 std::vector<bool> expected_status(kAudioSources, true); | 367 std::vector<bool> expected_status(kAudioSources, true); |
383 expected_status[0] = false; | 368 expected_status[0] = false; |
384 | 369 |
385 MixAndCompare(frames, frame_info, expected_status); | 370 MixAndCompare(frames, frame_info, expected_status); |
386 } | 371 } |
387 | 372 |
388 TEST(AudioMixer, PassiveShouldMixAfterNormal) { | 373 TEST(AudioMixer, PassiveShouldMixAfterNormal) { |
389 const int kAudioSources = | 374 const int kAudioSources = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
390 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | |
391 | 375 |
392 std::vector<AudioFrame> frames(kAudioSources); | 376 std::vector<AudioFrame> frames(kAudioSources); |
393 for (auto& frame : frames) { | 377 for (auto& frame : frames) { |
394 ResetFrame(&frame); | 378 ResetFrame(&frame); |
395 } | 379 } |
396 | 380 |
397 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 381 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( |
398 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 382 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); |
399 frames[0].vad_activity_ = AudioFrame::kVadPassive; | 383 frames[0].vad_activity_ = AudioFrame::kVadPassive; |
400 std::vector<bool> expected_status(kAudioSources, true); | 384 std::vector<bool> expected_status(kAudioSources, true); |
401 expected_status[0] = false; | 385 expected_status[0] = false; |
402 | 386 |
403 MixAndCompare(frames, frame_info, expected_status); | 387 MixAndCompare(frames, frame_info, expected_status); |
404 } | 388 } |
405 | 389 |
406 TEST(AudioMixer, ActiveShouldMixBeforeLoud) { | 390 TEST(AudioMixer, ActiveShouldMixBeforeLoud) { |
407 const int kAudioSources = | 391 const int kAudioSources = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
408 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | |
409 | 392 |
410 std::vector<AudioFrame> frames(kAudioSources); | 393 std::vector<AudioFrame> frames(kAudioSources); |
411 for (auto& frame : frames) { | 394 for (auto& frame : frames) { |
412 ResetFrame(&frame); | 395 ResetFrame(&frame); |
413 } | 396 } |
414 | 397 |
415 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 398 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( |
416 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 399 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); |
417 frames[0].vad_activity_ = AudioFrame::kVadPassive; | 400 frames[0].vad_activity_ = AudioFrame::kVadPassive; |
418 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 401 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
419 std::numeric_limits<int16_t>::max()); | 402 std::numeric_limits<int16_t>::max()); |
420 std::vector<bool> expected_status(kAudioSources, true); | 403 std::vector<bool> expected_status(kAudioSources, true); |
421 expected_status[0] = false; | 404 expected_status[0] = false; |
422 | 405 |
423 MixAndCompare(frames, frame_info, expected_status); | 406 MixAndCompare(frames, frame_info, expected_status); |
424 } | 407 } |
425 | 408 |
426 TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { | 409 TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { |
427 const int kAudioSources = | 410 const int kAudioSources = AudioMixer::kMaximumAmountOfMixedAudioSources + 1; |
428 NewAudioConferenceMixer::kMaximumAmountOfMixedAudioSources + 1; | |
429 | 411 |
430 std::vector<AudioFrame> frames(kAudioSources); | 412 std::vector<AudioFrame> frames(kAudioSources); |
431 for (auto& frame : frames) { | 413 for (auto& frame : frames) { |
432 ResetFrame(&frame); | 414 ResetFrame(&frame); |
433 } | 415 } |
434 | 416 |
435 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( | 417 std::vector<MixerAudioSource::AudioFrameInfo> frame_info( |
436 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); | 418 kAudioSources, MixerAudioSource::AudioFrameInfo::kNormal); |
437 frame_info[0] = MixerAudioSource::AudioFrameInfo::kMuted; | 419 frame_info[0] = MixerAudioSource::AudioFrameInfo::kMuted; |
438 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 420 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
439 std::numeric_limits<int16_t>::max()); | 421 std::numeric_limits<int16_t>::max()); |
440 std::vector<bool> expected_status(kAudioSources, true); | 422 std::vector<bool> expected_status(kAudioSources, true); |
441 expected_status[0] = false; | 423 expected_status[0] = false; |
442 | 424 |
443 MixAndCompare(frames, frame_info, expected_status); | 425 MixAndCompare(frames, frame_info, expected_status); |
444 } | 426 } |
445 } // namespace webrtc | 427 } // namespace webrtc |
OLD | NEW |