OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" |
12 | 12 |
(...skipping 19 matching lines...) Expand all Loading... | |
32 | 32 |
33 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; | 33 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; |
34 | 34 |
35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
36 // These effects are applied to |frame| itself prior to mixing. Assumes that | 36 // These effects are applied to |frame| itself prior to mixing. Assumes that |
37 // |mixed_frame| always has at least as many channels as |frame|. Supports | 37 // |mixed_frame| always has at least as many channels as |frame|. Supports |
38 // stereo at most. | 38 // stereo at most. |
39 // | 39 // |
40 // TODO(andrew): consider not modifying |frame| here. | 40 // TODO(andrew): consider not modifying |frame| here. |
41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
42 assert(mixed_frame->num_channels_ >= frame->num_channels_); | 42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
43 if (use_limiter) { | 43 if (use_limiter) { |
44 // Divide by two to avoid saturation in the mixing. | 44 // Divide by two to avoid saturation in the mixing. |
45 // This is only meaningful if the limiter will be used. | 45 // This is only meaningful if the limiter will be used. |
46 *frame >>= 1; | 46 *frame >>= 1; |
47 } | 47 } |
48 if (mixed_frame->num_channels_ > frame->num_channels_) { | 48 if (mixed_frame->num_channels_ > frame->num_channels_) { |
49 // We only support mono-to-stereo. | 49 // We only support mono-to-stereo. |
50 assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1); | 50 RTC_DCHECK_EQ(mixed_frame->num_channels_, static_cast<size_t>(2)); |
51 RTC_DCHECK_EQ(frame->num_channels_, static_cast<size_t>(1)); | |
51 AudioFrameOperations::MonoToStereo(frame); | 52 AudioFrameOperations::MonoToStereo(frame); |
52 } | 53 } |
53 | 54 |
54 *mixed_frame += *frame; | 55 *mixed_frame += *frame; |
55 } | 56 } |
56 | 57 |
57 // Return the max number of channels from a |list| composed of AudioFrames. | 58 // Return the max number of channels from a |list| composed of AudioFrames. |
58 size_t MaxNumChannels(const AudioFrameList* list) { | 59 size_t MaxNumChannels(const AudioFrameList* list) { |
59 size_t max_num_channels = 1; | 60 size_t max_num_channels = 1; |
60 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end(); | 61 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end(); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
104 if (!mixer->Init()) { | 105 if (!mixer->Init()) { |
105 delete mixer; | 106 delete mixer; |
106 return NULL; | 107 return NULL; |
107 } | 108 } |
108 return mixer; | 109 return mixer; |
109 } | 110 } |
110 | 111 |
111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
112 : _id(id), | 113 : _id(id), |
113 _minimumMixingFreq(kLowestPossible), | 114 _minimumMixingFreq(kLowestPossible), |
114 _mixReceiver(NULL), | |
115 _outputFrequency(kDefaultFrequency), | 115 _outputFrequency(kDefaultFrequency), |
116 _sampleSize(0), | 116 _sampleSize(0), |
117 _audioFramePool(NULL), | 117 _audioFramePool(NULL), |
118 _participantList(), | 118 _participantList(), |
119 _additionalParticipantList(), | 119 _additionalParticipantList(), |
120 _numMixedParticipants(0), | 120 _numMixedParticipants(0), |
121 use_limiter_(true), | 121 use_limiter_(true), |
122 _timeStamp(0), | 122 _timeStamp(0), |
123 _timeScheduler(kProcessPeriodicityInMs), | 123 _timeScheduler(kProcessPeriodicityInMs), |
124 _processCalls(0) {} | 124 _processCalls(0) {} |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
164 return false; | 164 return false; |
165 | 165 |
166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
167 return false; | 167 return false; |
168 | 168 |
169 return true; | 169 return true; |
170 } | 170 } |
171 | 171 |
172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | 172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { |
173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | 173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
174 assert(_audioFramePool == NULL); | 174 RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr)); |
175 } | 175 } |
176 | 176 |
177 // Process should be called every kProcessPeriodicityInMs ms | 177 // Process should be called every kProcessPeriodicityInMs ms |
178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { | 178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { |
179 int64_t timeUntilNextProcess = 0; | 179 int64_t timeUntilNextProcess = 0; |
180 CriticalSectionScoped cs(_crit.get()); | 180 CriticalSectionScoped cs(_crit.get()); |
181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | 181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { |
182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
183 "failed in TimeToNextUpdate() call"); | 183 "failed in TimeToNextUpdate() call"); |
184 // Sanity check | 184 // Sanity check |
185 assert(false); | 185 RTC_DCHECK(false); |
186 return -1; | 186 return -1; |
187 } | 187 } |
188 return timeUntilNextProcess; | 188 return timeUntilNextProcess; |
189 } | 189 } |
190 | 190 |
191 void NewAudioConferenceMixerImpl::Process() { | 191 void NewAudioConferenceMixerImpl::Process() { |
192 // TODO(aleloi) Remove this method. | |
193 RTC_NOTREACHED(); | |
194 } | |
195 | |
196 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | |
192 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | 197 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
193 { | 198 { |
194 CriticalSectionScoped cs(_crit.get()); | 199 CriticalSectionScoped cs(_crit.get()); |
195 assert(_processCalls == 0); | 200 RTC_DCHECK_EQ(_processCalls, 0); |
196 _processCalls++; | 201 _processCalls++; |
197 | 202 |
198 // Let the scheduler know that we are running one iteration. | 203 // Let the scheduler know that we are running one iteration. |
199 _timeScheduler.UpdateScheduler(); | 204 _timeScheduler.UpdateScheduler(); |
200 } | 205 } |
201 | 206 |
202 AudioFrameList mixList; | 207 AudioFrameList mixList; |
203 AudioFrameList rampOutList; | 208 AudioFrameList rampOutList; |
204 AudioFrameList additionalFramesList; | 209 AudioFrameList additionalFramesList; |
205 std::map<int, MixerAudioSource*> mixedParticipantsMap; | 210 std::map<int, MixerAudioSource*> mixedParticipantsMap; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
237 if (OutputFrequency() != kSwbInHz) { | 242 if (OutputFrequency() != kSwbInHz) { |
238 SetOutputFrequency(kSwbInHz); | 243 SetOutputFrequency(kSwbInHz); |
239 } | 244 } |
240 break; | 245 break; |
241 case 48000: | 246 case 48000: |
242 if (OutputFrequency() != kFbInHz) { | 247 if (OutputFrequency() != kFbInHz) { |
243 SetOutputFrequency(kFbInHz); | 248 SetOutputFrequency(kFbInHz); |
244 } | 249 } |
245 break; | 250 break; |
246 default: | 251 default: |
247 assert(false); | 252 RTC_DCHECK(false); |
tommi
2016/07/08 12:24:17
nit: RTC_NOTREACHED()
aleloi
2016/07/08 12:57:40
Done.
| |
248 | 253 |
249 CriticalSectionScoped cs(_crit.get()); | 254 CriticalSectionScoped cs(_crit.get()); |
250 _processCalls--; | 255 _processCalls--; |
251 return; | 256 return; |
252 } | 257 } |
253 } | 258 } |
254 | 259 |
255 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 260 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, |
256 &remainingParticipantsAllowedToMix); | 261 &remainingParticipantsAllowedToMix); |
257 | 262 |
258 GetAdditionalAudio(&additionalFramesList); | 263 GetAdditionalAudio(&additionalFramesList); |
259 UpdateMixedStatus(mixedParticipantsMap); | 264 UpdateMixedStatus(mixedParticipantsMap); |
260 } | 265 } |
261 | 266 |
262 // Get an AudioFrame for mixing from the memory pool. | 267 // TODO(henrike): it might be better to decide the number of channels |
tommi
2016/07/08 12:24:17
henrike doesn't work on webrtc anymore. Can you as
aleloi
2016/07/08 12:57:40
Done.
| |
263 AudioFrame* mixedAudio = NULL; | 268 // with an API instead of dynamically. |
264 if (_audioFramePool->PopMemory(mixedAudio) == -1) { | 269 |
265 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 270 // Find the max channels over all mixing lists. |
266 "failed PopMemory() call"); | 271 const size_t num_mixed_channels = std::max( |
267 assert(false); | 272 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), |
268 return; | 273 MaxNumChannels(&rampOutList))); |
269 } | 274 |
275 audio_frame_for_mixing->UpdateFrame( | |
276 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, | |
277 AudioFrame::kVadPassive, num_mixed_channels); | |
278 | |
279 _timeStamp += static_cast<uint32_t>(_sampleSize); | |
280 | |
281 use_limiter_ = _numMixedParticipants > 1 && | |
282 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | |
283 | |
284 // We only use the limiter if it supports the output sample rate and | |
285 // we're actually mixing multiple streams. | |
286 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); | |
270 | 287 |
271 { | 288 { |
272 CriticalSectionScoped cs(_crit.get()); | 289 CriticalSectionScoped cs(_crit.get()); |
290 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | |
291 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); | |
273 | 292 |
274 // TODO(henrike): it might be better to decide the number of channels | 293 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
275 // with an API instead of dynamically. | |
276 | |
277 // Find the max channels over all mixing lists. | |
278 const size_t num_mixed_channels = | |
279 std::max(MaxNumChannels(&mixList), | |
280 std::max(MaxNumChannels(&additionalFramesList), | |
281 MaxNumChannels(&rampOutList))); | |
282 | |
283 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | |
284 AudioFrame::kNormalSpeech, AudioFrame::kVadPassive, | |
285 num_mixed_channels); | |
286 | |
287 _timeStamp += static_cast<uint32_t>(_sampleSize); | |
288 | |
289 // We only use the limiter if it supports the output sample rate and | |
290 // we're actually mixing multiple streams. | |
291 use_limiter_ = _numMixedParticipants > 1 && | |
292 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | |
293 | |
294 MixFromList(mixedAudio, mixList); | |
295 MixAnonomouslyFromList(mixedAudio, additionalFramesList); | |
296 MixAnonomouslyFromList(mixedAudio, rampOutList); | |
297 | |
298 if (mixedAudio->samples_per_channel_ == 0) { | |
299 // Nothing was mixed, set the audio samples to silence. | 294 // Nothing was mixed, set the audio samples to silence. |
300 mixedAudio->samples_per_channel_ = _sampleSize; | 295 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
301 mixedAudio->Mute(); | 296 audio_frame_for_mixing->Mute(); |
302 } else { | 297 } else { |
303 // Only call the limiter if we have something to mix. | 298 // Only call the limiter if we have something to mix. |
304 LimitMixedAudio(mixedAudio); | 299 LimitMixedAudio(audio_frame_for_mixing); |
305 } | 300 } |
306 } | 301 } |
307 | 302 |
308 { | |
309 CriticalSectionScoped cs(_cbCrit.get()); | |
310 if (_mixReceiver != NULL) { | |
311 const AudioFrame** dummy = NULL; | |
312 _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0); | |
313 } | |
314 } | |
315 | |
316 // Reclaim all outstanding memory. | |
317 _audioFramePool->PushMemory(mixedAudio); | |
318 ClearAudioFrameList(&mixList); | 303 ClearAudioFrameList(&mixList); |
319 ClearAudioFrameList(&rampOutList); | 304 ClearAudioFrameList(&rampOutList); |
320 ClearAudioFrameList(&additionalFramesList); | 305 ClearAudioFrameList(&additionalFramesList); |
321 { | 306 { |
322 CriticalSectionScoped cs(_crit.get()); | 307 CriticalSectionScoped cs(_crit.get()); |
323 _processCalls--; | 308 _processCalls--; |
324 } | 309 } |
325 return; | 310 return; |
326 } | 311 } |
327 | 312 |
328 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback( | |
329 OldAudioMixerOutputReceiver* mixReceiver) { | |
330 CriticalSectionScoped cs(_cbCrit.get()); | |
331 if (_mixReceiver != NULL) { | |
332 return -1; | |
333 } | |
334 _mixReceiver = mixReceiver; | |
335 return 0; | |
336 } | |
337 | |
338 int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { | |
339 CriticalSectionScoped cs(_cbCrit.get()); | |
340 if (_mixReceiver == NULL) { | |
341 return -1; | |
342 } | |
343 _mixReceiver = NULL; | |
344 return 0; | |
345 } | |
346 | |
347 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 313 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
348 const Frequency& frequency) { | 314 const Frequency& frequency) { |
349 CriticalSectionScoped cs(_crit.get()); | 315 CriticalSectionScoped cs(_crit.get()); |
350 | 316 |
351 _outputFrequency = frequency; | 317 _outputFrequency = frequency; |
352 _sampleSize = | 318 _sampleSize = |
353 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 319 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
354 | 320 |
355 return 0; | 321 return 0; |
356 } | 322 } |
(...skipping 24 matching lines...) Expand all Loading... | |
381 } | 347 } |
382 bool success = false; | 348 bool success = false; |
383 if (mixable) { | 349 if (mixable) { |
384 success = AddParticipantToList(participant, &_participantList); | 350 success = AddParticipantToList(participant, &_participantList); |
385 } else { | 351 } else { |
386 success = RemoveParticipantFromList(participant, &_participantList); | 352 success = RemoveParticipantFromList(participant, &_participantList); |
387 } | 353 } |
388 if (!success) { | 354 if (!success) { |
389 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 355 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
390 "failed to %s participant", mixable ? "add" : "remove"); | 356 "failed to %s participant", mixable ? "add" : "remove"); |
391 assert(false); | 357 RTC_DCHECK(false); |
tommi
2016/07/08 12:24:17
RTC_NOTREACHED() here as well (and below). Down t
aleloi
2016/07/08 12:57:40
Done.
| |
392 return -1; | 358 return -1; |
393 } | 359 } |
394 | 360 |
395 size_t numMixedNonAnonymous = _participantList.size(); | 361 size_t numMixedNonAnonymous = _participantList.size(); |
396 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { | 362 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { |
397 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; | 363 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; |
398 } | 364 } |
399 numMixedParticipants = | 365 numMixedParticipants = |
400 numMixedNonAnonymous + _additionalParticipantList.size(); | 366 numMixedNonAnonymous + _additionalParticipantList.size(); |
401 } | 367 } |
(...skipping 15 matching lines...) Expand all Loading... | |
417 MixerAudioSource* participant, | 383 MixerAudioSource* participant, |
418 bool anonymous) { | 384 bool anonymous) { |
419 CriticalSectionScoped cs(_cbCrit.get()); | 385 CriticalSectionScoped cs(_cbCrit.get()); |
420 if (IsParticipantInList(*participant, _additionalParticipantList)) { | 386 if (IsParticipantInList(*participant, _additionalParticipantList)) { |
421 if (anonymous) { | 387 if (anonymous) { |
422 return 0; | 388 return 0; |
423 } | 389 } |
424 if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) { | 390 if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) { |
425 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 391 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
426 "unable to remove participant from anonymous list"); | 392 "unable to remove participant from anonymous list"); |
427 assert(false); | 393 RTC_DCHECK(false); |
428 return -1; | 394 return -1; |
429 } | 395 } |
430 return AddParticipantToList(participant, &_participantList) ? 0 : -1; | 396 return AddParticipantToList(participant, &_participantList) ? 0 : -1; |
431 } | 397 } |
432 if (!anonymous) { | 398 if (!anonymous) { |
433 return 0; | 399 return 0; |
434 } | 400 } |
435 const bool mixable = | 401 const bool mixable = |
436 RemoveParticipantFromList(participant, &_participantList); | 402 RemoveParticipantFromList(participant, &_participantList); |
437 if (!mixable) { | 403 if (!mixable) { |
(...skipping 23 matching lines...) Expand all Loading... | |
461 freq = kSwbInHz; | 427 freq = kSwbInHz; |
462 } | 428 } |
463 | 429 |
464 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || | 430 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || |
465 (freq == kLowestPossible)) { | 431 (freq == kLowestPossible)) { |
466 _minimumMixingFreq = freq; | 432 _minimumMixingFreq = freq; |
467 return 0; | 433 return 0; |
468 } else { | 434 } else { |
469 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 435 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
470 "SetMinimumMixingFrequency incorrect frequency: %i", freq); | 436 "SetMinimumMixingFrequency incorrect frequency: %i", freq); |
471 assert(false); | 437 RTC_DCHECK(false); |
472 return -1; | 438 return -1; |
473 } | 439 } |
474 } | 440 } |
475 | 441 |
476 // Check all AudioFrames that are to be mixed. The highest sampling frequency | 442 // Check all AudioFrames that are to be mixed. The highest sampling frequency |
477 // found is the lowest that can be used without losing information. | 443 // found is the lowest that can be used without losing information. |
478 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const { | 444 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const { |
479 const int participantListFrequency = | 445 const int participantListFrequency = |
480 GetLowestMixingFrequencyFromList(_participantList); | 446 GetLowestMixingFrequencyFromList(_participantList); |
481 const int anonymousListFrequency = | 447 const int anonymousListFrequency = |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
528 (*maxAudioFrameCounter > | 494 (*maxAudioFrameCounter > |
529 (activeList.size() + passiveWasMixedList.size() + | 495 (activeList.size() + passiveWasMixedList.size() + |
530 passiveWasNotMixedList.size())); | 496 passiveWasNotMixedList.size())); |
531 | 497 |
532 bool wasMixed = false; | 498 bool wasMixed = false; |
533 wasMixed = (*participant)->_mixHistory->WasMixed(); | 499 wasMixed = (*participant)->_mixHistory->WasMixed(); |
534 AudioFrame* audioFrame = NULL; | 500 AudioFrame* audioFrame = NULL; |
535 if (_audioFramePool->PopMemory(audioFrame) == -1) { | 501 if (_audioFramePool->PopMemory(audioFrame) == -1) { |
536 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 502 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
537 "failed PopMemory() call"); | 503 "failed PopMemory() call"); |
538 assert(false); | 504 RTC_DCHECK(false); |
539 return; | 505 return; |
540 } | 506 } |
541 audioFrame->sample_rate_hz_ = _outputFrequency; | 507 audioFrame->sample_rate_hz_ = _outputFrequency; |
542 | 508 |
543 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 509 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
544 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 510 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
545 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 511 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
546 "failed to GetAudioFrameWithMuted() from participant"); | 512 "failed to GetAudioFrameWithMuted() from participant"); |
547 _audioFramePool->PushMemory(audioFrame); | 513 _audioFramePool->PushMemory(audioFrame); |
548 continue; | 514 continue; |
549 } | 515 } |
550 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 516 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); |
551 if (_participantList.size() != 1) { | 517 if (_participantList.size() != 1) { |
552 // TODO(wu): Issue 3390, add support for multiple participants case. | 518 // TODO(wu): Issue 3390, add support for multiple participants case. |
553 audioFrame->ntp_time_ms_ = -1; | 519 audioFrame->ntp_time_ms_ = -1; |
554 } | 520 } |
555 | 521 |
556 // TODO(henrike): this assert triggers in some test cases where SRTP is | 522 // TODO(henrike): this assert triggers in some test cases where SRTP is |
557 // used which prevents NetEQ from making a VAD. Temporarily disable this | 523 // used which prevents NetEQ from making a VAD. Temporarily disable this |
558 // assert until the problem is fixed on a higher level. | 524 // assert until the problem is fixed on a higher level. |
559 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); | 525 // RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown); |
560 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { | 526 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { |
561 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 527 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
562 "invalid VAD state from participant"); | 528 "invalid VAD state from participant"); |
563 } | 529 } |
564 | 530 |
565 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { | 531 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { |
566 if (!wasMixed && !muted) { | 532 if (!wasMixed && !muted) { |
567 RampIn(*audioFrame); | 533 RampIn(*audioFrame); |
568 } | 534 } |
569 | 535 |
(...skipping 17 matching lines...) Expand all Loading... | |
587 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | 553 RTC_DCHECK(!muted); // Cannot replace with a muted frame. |
588 FrameAndMuteInfo replaceFrame = *replaceItem; | 554 FrameAndMuteInfo replaceFrame = *replaceItem; |
589 | 555 |
590 bool replaceWasMixed = false; | 556 bool replaceWasMixed = false; |
591 std::map<int, MixerAudioSource*>::const_iterator it = | 557 std::map<int, MixerAudioSource*>::const_iterator it = |
592 mixParticipantList->find(replaceFrame.frame->id_); | 558 mixParticipantList->find(replaceFrame.frame->id_); |
593 | 559 |
594 // When a frame is pushed to |activeList| it is also pushed | 560 // When a frame is pushed to |activeList| it is also pushed |
595 // to mixParticipantList with the frame's id. This means | 561 // to mixParticipantList with the frame's id. This means |
596 // that the Find call above should never fail. | 562 // that the Find call above should never fail. |
597 assert(it != mixParticipantList->end()); | 563 RTC_DCHECK(it != mixParticipantList->end()); |
598 replaceWasMixed = it->second->_mixHistory->WasMixed(); | 564 replaceWasMixed = it->second->_mixHistory->WasMixed(); |
599 | 565 |
600 mixParticipantList->erase(replaceFrame.frame->id_); | 566 mixParticipantList->erase(replaceFrame.frame->id_); |
601 activeList.erase(replaceItem); | 567 activeList.erase(replaceItem); |
602 | 568 |
603 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 569 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
604 (*mixParticipantList)[audioFrame->id_] = *participant; | 570 (*mixParticipantList)[audioFrame->id_] = *participant; |
605 assert(mixParticipantList->size() <= | 571 RTC_DCHECK_LE(mixParticipantList->size(), |
606 kMaximumAmountOfMixedParticipants); | 572 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
607 | 573 |
608 if (replaceWasMixed) { | 574 if (replaceWasMixed) { |
609 if (!replaceFrame.muted) { | 575 if (!replaceFrame.muted) { |
610 RampOut(*replaceFrame.frame); | 576 RampOut(*replaceFrame.frame); |
611 } | 577 } |
612 rampOutList->push_back(replaceFrame); | 578 rampOutList->push_back(replaceFrame); |
613 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 579 RTC_DCHECK_LE( |
580 rampOutList->size(), | |
581 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); | |
614 } else { | 582 } else { |
615 _audioFramePool->PushMemory(replaceFrame.frame); | 583 _audioFramePool->PushMemory(replaceFrame.frame); |
616 } | 584 } |
617 } else { | 585 } else { |
618 if (wasMixed) { | 586 if (wasMixed) { |
619 if (!muted) { | 587 if (!muted) { |
620 RampOut(*audioFrame); | 588 RampOut(*audioFrame); |
621 } | 589 } |
622 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); | 590 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); |
623 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 591 RTC_DCHECK_LE( |
592 rampOutList->size(), | |
593 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); | |
624 } else { | 594 } else { |
625 _audioFramePool->PushMemory(audioFrame); | 595 _audioFramePool->PushMemory(audioFrame); |
626 } | 596 } |
627 } | 597 } |
628 } else { | 598 } else { |
629 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 599 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
630 (*mixParticipantList)[audioFrame->id_] = *participant; | 600 (*mixParticipantList)[audioFrame->id_] = *participant; |
631 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 601 RTC_DCHECK_LE(mixParticipantList->size(), |
602 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); | |
632 } | 603 } |
633 } else { | 604 } else { |
634 if (wasMixed) { | 605 if (wasMixed) { |
635 ParticipantFrameStruct* part_struct = | 606 ParticipantFrameStruct* part_struct = |
636 new ParticipantFrameStruct(*participant, audioFrame, muted); | 607 new ParticipantFrameStruct(*participant, audioFrame, muted); |
637 passiveWasMixedList.push_back(part_struct); | 608 passiveWasMixedList.push_back(part_struct); |
638 } else if (mustAddToPassiveList) { | 609 } else if (mustAddToPassiveList) { |
639 if (!muted) { | 610 if (!muted) { |
640 RampIn(*audioFrame); | 611 RampIn(*audioFrame); |
641 } | 612 } |
642 ParticipantFrameStruct* part_struct = | 613 ParticipantFrameStruct* part_struct = |
643 new ParticipantFrameStruct(*participant, audioFrame, muted); | 614 new ParticipantFrameStruct(*participant, audioFrame, muted); |
644 passiveWasNotMixedList.push_back(part_struct); | 615 passiveWasNotMixedList.push_back(part_struct); |
645 } else { | 616 } else { |
646 _audioFramePool->PushMemory(audioFrame); | 617 _audioFramePool->PushMemory(audioFrame); |
647 } | 618 } |
648 } | 619 } |
649 } | 620 } |
650 assert(activeList.size() <= *maxAudioFrameCounter); | 621 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); |
651 // At this point it is known which participants should be mixed. Transfer | 622 // At this point it is known which participants should be mixed. Transfer |
652 // this information to this functions output parameters. | 623 // this information to this functions output parameters. |
653 for (AudioFrameList::const_iterator iter = activeList.begin(); | 624 for (AudioFrameList::const_iterator iter = activeList.begin(); |
654 iter != activeList.end(); ++iter) { | 625 iter != activeList.end(); ++iter) { |
655 mixList->push_back(*iter); | 626 mixList->push_back(*iter); |
656 } | 627 } |
657 activeList.clear(); | 628 activeList.clear(); |
658 // Always mix a constant number of AudioFrames. If there aren't enough | 629 // Always mix a constant number of AudioFrames. If there aren't enough |
659 // active participants mix passive ones. Starting with those that was mixed | 630 // active participants mix passive ones. Starting with those that was mixed |
660 // last iteration. | 631 // last iteration. |
661 for (ParticipantFrameStructList::const_iterator iter = | 632 for (ParticipantFrameStructList::const_iterator iter = |
662 passiveWasMixedList.begin(); | 633 passiveWasMixedList.begin(); |
663 iter != passiveWasMixedList.end(); ++iter) { | 634 iter != passiveWasMixedList.end(); ++iter) { |
664 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 635 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
665 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 636 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
666 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 637 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
667 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 638 RTC_DCHECK_LE(mixParticipantList->size(), |
639 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); | |
668 } else { | 640 } else { |
669 _audioFramePool->PushMemory((*iter)->audioFrame); | 641 _audioFramePool->PushMemory((*iter)->audioFrame); |
670 } | 642 } |
671 delete *iter; | 643 delete *iter; |
672 } | 644 } |
673 // And finally the ones that have not been mixed for a while. | 645 // And finally the ones that have not been mixed for a while. |
674 for (ParticipantFrameStructList::const_iterator iter = | 646 for (ParticipantFrameStructList::const_iterator iter = |
675 passiveWasNotMixedList.begin(); | 647 passiveWasNotMixedList.begin(); |
676 iter != passiveWasNotMixedList.end(); ++iter) { | 648 iter != passiveWasNotMixedList.end(); ++iter) { |
677 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 649 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
678 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 650 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
679 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 651 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
680 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 652 RTC_DCHECK_LE(mixParticipantList->size(), |
653 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); | |
681 } else { | 654 } else { |
682 _audioFramePool->PushMemory((*iter)->audioFrame); | 655 _audioFramePool->PushMemory((*iter)->audioFrame); |
683 } | 656 } |
684 delete *iter; | 657 delete *iter; |
685 } | 658 } |
686 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); | 659 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); |
687 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | 660 *maxAudioFrameCounter += mixListStartSize - mixList->size(); |
688 } | 661 } |
689 | 662 |
690 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 663 void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
691 AudioFrameList* additionalFramesList) const { | 664 AudioFrameList* additionalFramesList) const { |
692 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 665 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
693 "GetAdditionalAudio(additionalFramesList)"); | 666 "GetAdditionalAudio(additionalFramesList)"); |
694 // The GetAudioFrameWithMuted() callback may result in the participant being | 667 // The GetAudioFrameWithMuted() callback may result in the participant being |
695 // removed from additionalParticipantList_. If that happens it will | 668 // removed from additionalParticipantList_. If that happens it will |
696 // invalidate any iterators. Create a copy of the participants list such | 669 // invalidate any iterators. Create a copy of the participants list such |
697 // that the list of participants can be traversed safely. | 670 // that the list of participants can be traversed safely. |
698 MixerAudioSourceList additionalParticipantList; | 671 MixerAudioSourceList additionalParticipantList; |
699 additionalParticipantList.insert(additionalParticipantList.begin(), | 672 additionalParticipantList.insert(additionalParticipantList.begin(), |
700 _additionalParticipantList.begin(), | 673 _additionalParticipantList.begin(), |
701 _additionalParticipantList.end()); | 674 _additionalParticipantList.end()); |
702 | 675 |
703 for (MixerAudioSourceList::const_iterator participant = | 676 for (MixerAudioSourceList::const_iterator participant = |
704 additionalParticipantList.begin(); | 677 additionalParticipantList.begin(); |
705 participant != additionalParticipantList.end(); ++participant) { | 678 participant != additionalParticipantList.end(); ++participant) { |
706 AudioFrame* audioFrame = NULL; | 679 AudioFrame* audioFrame = NULL; |
707 if (_audioFramePool->PopMemory(audioFrame) == -1) { | 680 if (_audioFramePool->PopMemory(audioFrame) == -1) { |
708 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 681 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
709 "failed PopMemory() call"); | 682 "failed PopMemory() call"); |
710 assert(false); | 683 RTC_DCHECK(false); |
711 return; | 684 return; |
712 } | 685 } |
713 audioFrame->sample_rate_hz_ = _outputFrequency; | 686 audioFrame->sample_rate_hz_ = _outputFrequency; |
714 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 687 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
715 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 688 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
716 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 689 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
717 "failed to GetAudioFrameWithMuted() from participant"); | 690 "failed to GetAudioFrameWithMuted() from participant"); |
718 _audioFramePool->PushMemory(audioFrame); | 691 _audioFramePool->PushMemory(audioFrame); |
719 continue; | 692 continue; |
720 } | 693 } |
721 if (audioFrame->samples_per_channel_ == 0) { | 694 if (audioFrame->samples_per_channel_ == 0) { |
722 // Empty frame. Don't use it. | 695 // Empty frame. Don't use it. |
723 _audioFramePool->PushMemory(audioFrame); | 696 _audioFramePool->PushMemory(audioFrame); |
724 continue; | 697 continue; |
725 } | 698 } |
726 additionalFramesList->push_back(FrameAndMuteInfo( | 699 additionalFramesList->push_back(FrameAndMuteInfo( |
727 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 700 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
728 } | 701 } |
729 } | 702 } |
730 | 703 |
731 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | 704 void NewAudioConferenceMixerImpl::UpdateMixedStatus( |
732 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { | 705 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { |
733 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 706 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
734 "UpdateMixedStatus(mixedParticipantsMap)"); | 707 "UpdateMixedStatus(mixedParticipantsMap)"); |
735 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); | 708 RTC_DCHECK_LE(mixedParticipantsMap.size(), |
709 static_cast<size_t>(kMaximumAmountOfMixedParticipants)); | |
736 | 710 |
737 // Loop through all participants. If they are in the mix map they | 711 // Loop through all participants. If they are in the mix map they |
738 // were mixed. | 712 // were mixed. |
739 for (MixerAudioSourceList::const_iterator participant = | 713 for (MixerAudioSourceList::const_iterator participant = |
740 _participantList.begin(); | 714 _participantList.begin(); |
741 participant != _participantList.end(); ++participant) { | 715 participant != _participantList.end(); ++participant) { |
742 bool isMixed = false; | 716 bool isMixed = false; |
743 for (std::map<int, MixerAudioSource*>::const_iterator it = | 717 for (std::map<int, MixerAudioSource*>::const_iterator it = |
744 mixedParticipantsMap.begin(); | 718 mixedParticipantsMap.begin(); |
745 it != mixedParticipantsMap.end(); ++it) { | 719 it != mixedParticipantsMap.end(); ++it) { |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
800 // Participant is no longer mixed, reset to default. | 774 // Participant is no longer mixed, reset to default. |
801 participant->_mixHistory->ResetMixedStatus(); | 775 participant->_mixHistory->ResetMixedStatus(); |
802 return true; | 776 return true; |
803 } | 777 } |
804 } | 778 } |
805 return false; | 779 return false; |
806 } | 780 } |
807 | 781 |
808 int32_t NewAudioConferenceMixerImpl::MixFromList( | 782 int32_t NewAudioConferenceMixerImpl::MixFromList( |
809 AudioFrame* mixedAudio, | 783 AudioFrame* mixedAudio, |
810 const AudioFrameList& audioFrameList) const { | 784 const AudioFrameList& audioFrameList, |
811 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 785 int32_t id, |
786 bool use_limiter) { | |
787 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
812 "MixFromList(mixedAudio, audioFrameList)"); | 788 "MixFromList(mixedAudio, audioFrameList)"); |
813 if (audioFrameList.empty()) | 789 if (audioFrameList.empty()) |
814 return 0; | 790 return 0; |
815 | 791 |
816 uint32_t position = 0; | 792 uint32_t position = 0; |
817 | 793 |
818 if (_numMixedParticipants == 1) { | 794 if (audioFrameList.size() == 1) { |
819 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | 795 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; |
820 mixedAudio->elapsed_time_ms_ = | 796 mixedAudio->elapsed_time_ms_ = |
821 audioFrameList.front().frame->elapsed_time_ms_; | 797 audioFrameList.front().frame->elapsed_time_ms_; |
822 } else { | 798 } else { |
823 // TODO(wu): Issue 3390. | 799 // TODO(wu): Issue 3390. |
824 // Audio frame timestamp is only supported in one channel case. | 800 // Audio frame timestamp is only supported in one channel case. |
825 mixedAudio->timestamp_ = 0; | 801 mixedAudio->timestamp_ = 0; |
826 mixedAudio->elapsed_time_ms_ = -1; | 802 mixedAudio->elapsed_time_ms_ = -1; |
827 } | 803 } |
828 | 804 |
829 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | 805 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); |
830 iter != audioFrameList.end(); ++iter) { | 806 iter != audioFrameList.end(); ++iter) { |
831 if (position >= kMaximumAmountOfMixedParticipants) { | 807 if (position >= kMaximumAmountOfMixedParticipants) { |
832 WEBRTC_TRACE( | 808 WEBRTC_TRACE( |
833 kTraceMemory, kTraceAudioMixerServer, _id, | 809 kTraceMemory, kTraceAudioMixerServer, id, |
834 "Trying to mix more than max amount of mixed participants:%d!", | 810 "Trying to mix more than max amount of mixed participants:%d!", |
835 kMaximumAmountOfMixedParticipants); | 811 kMaximumAmountOfMixedParticipants); |
836 // Assert and avoid crash | 812 // Assert and avoid crash |
837 assert(false); | 813 RTC_DCHECK(false); |
838 position = 0; | 814 position = 0; |
839 } | 815 } |
840 if (!iter->muted) { | 816 if (!iter->muted) { |
841 MixFrames(mixedAudio, iter->frame, use_limiter_); | 817 MixFrames(mixedAudio, iter->frame, use_limiter); |
842 } | 818 } |
843 | 819 |
844 position++; | 820 position++; |
845 } | 821 } |
846 | 822 |
847 return 0; | 823 return 0; |
848 } | 824 } |
849 | 825 |
850 // TODO(andrew): consolidate this function with MixFromList. | 826 // TODO(andrew): consolidate this function with MixFromList. |
851 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( | 827 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
883 // is enabled (moving from one to two participants) it has the potential to | 859 // is enabled (moving from one to two participants) it has the potential to |
884 // create discontinuities in the mixed frame. | 860 // create discontinuities in the mixed frame. |
885 // | 861 // |
886 // Instead we double the frame (with addition since left-shifting a | 862 // Instead we double the frame (with addition since left-shifting a |
887 // negative value is undefined). | 863 // negative value is undefined). |
888 *mixedAudio += *mixedAudio; | 864 *mixedAudio += *mixedAudio; |
889 | 865 |
890 if (error != _limiter->kNoError) { | 866 if (error != _limiter->kNoError) { |
891 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 867 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
892 "Error from AudioProcessing: %d", error); | 868 "Error from AudioProcessing: %d", error); |
893 assert(false); | 869 RTC_DCHECK(false); |
894 return false; | 870 return false; |
895 } | 871 } |
896 return true; | 872 return true; |
897 } | 873 } |
898 } // namespace webrtc | 874 } // namespace webrtc |
OLD | NEW |