OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "modules/audio_conference_mixer/source/audio_conference_mixer_impl.h" | |
12 #include "audio/utility/audio_frame_operations.h" | |
13 #include "modules/audio_conference_mixer/include/audio_conference_mixer_defines.
h" | |
14 #include "modules/audio_conference_mixer/source/audio_frame_manipulator.h" | |
15 #include "modules/audio_processing/include/audio_processing.h" | |
16 #include "rtc_base/logging.h" | |
17 | |
18 namespace webrtc { | |
19 namespace { | |
20 | |
21 struct ParticipantFrameStruct { | |
22 ParticipantFrameStruct(MixerParticipant* p, AudioFrame* a, bool m) | |
23 : participant(p), audioFrame(a), muted(m) {} | |
24 MixerParticipant* participant; | |
25 AudioFrame* audioFrame; | |
26 bool muted; | |
27 }; | |
28 | |
29 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; | |
30 | |
31 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | |
32 // These effects are applied to |frame| itself prior to mixing. Assumes that | |
33 // |mixed_frame| always has at least as many channels as |frame|. Supports | |
34 // stereo at most. | |
35 // | |
36 // TODO(andrew): consider not modifying |frame| here. | |
37 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | |
38 assert(mixed_frame->num_channels_ >= frame->num_channels_); | |
39 if (use_limiter) { | |
40 // This is to avoid saturation in the mixing. It is only | |
41 // meaningful if the limiter will be used. | |
42 AudioFrameOperations::ApplyHalfGain(frame); | |
43 } | |
44 if (mixed_frame->num_channels_ > frame->num_channels_) { | |
45 // We only support mono-to-stereo. | |
46 assert(mixed_frame->num_channels_ == 2 && | |
47 frame->num_channels_ == 1); | |
48 AudioFrameOperations::MonoToStereo(frame); | |
49 } | |
50 | |
51 AudioFrameOperations::Add(*frame, mixed_frame); | |
52 } | |
53 | |
54 // Return the max number of channels from a |list| composed of AudioFrames. | |
55 size_t MaxNumChannels(const AudioFrameList* list) { | |
56 size_t max_num_channels = 1; | |
57 for (AudioFrameList::const_iterator iter = list->begin(); | |
58 iter != list->end(); | |
59 ++iter) { | |
60 max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_); | |
61 } | |
62 return max_num_channels; | |
63 } | |
64 | |
65 } // namespace | |
66 | |
67 MixerParticipant::MixerParticipant() | |
68 : _mixHistory(new MixHistory()) { | |
69 } | |
70 | |
71 MixerParticipant::~MixerParticipant() { | |
72 delete _mixHistory; | |
73 } | |
74 | |
75 bool MixerParticipant::IsMixed() const { | |
76 return _mixHistory->IsMixed(); | |
77 } | |
78 | |
79 MixHistory::MixHistory() | |
80 : _isMixed(0) { | |
81 } | |
82 | |
83 MixHistory::~MixHistory() { | |
84 } | |
85 | |
86 bool MixHistory::IsMixed() const { | |
87 return _isMixed; | |
88 } | |
89 | |
90 bool MixHistory::WasMixed() const { | |
91 // Was mixed is the same as is mixed depending on perspective. This function | |
92 // is for the perspective of AudioConferenceMixerImpl. | |
93 return IsMixed(); | |
94 } | |
95 | |
96 int32_t MixHistory::SetIsMixed(const bool mixed) { | |
97 _isMixed = mixed; | |
98 return 0; | |
99 } | |
100 | |
101 void MixHistory::ResetMixedStatus() { | |
102 _isMixed = false; | |
103 } | |
104 | |
105 AudioConferenceMixer* AudioConferenceMixer::Create(int id) { | |
106 AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id); | |
107 if(!mixer->Init()) { | |
108 delete mixer; | |
109 return NULL; | |
110 } | |
111 return mixer; | |
112 } | |
113 | |
114 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) | |
115 : _id(id), | |
116 _minimumMixingFreq(kLowestPossible), | |
117 _mixReceiver(NULL), | |
118 _outputFrequency(kDefaultFrequency), | |
119 _sampleSize(0), | |
120 _audioFramePool(NULL), | |
121 _participantList(), | |
122 _additionalParticipantList(), | |
123 _numMixedParticipants(0), | |
124 use_limiter_(true), | |
125 _timeStamp(0), | |
126 _timeScheduler(kProcessPeriodicityInMs), | |
127 _processCalls(0) {} | |
128 | |
129 bool AudioConferenceMixerImpl::Init() { | |
130 Config config; | |
131 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | |
132 _limiter.reset(AudioProcessing::Create(config)); | |
133 if(!_limiter.get()) | |
134 return false; | |
135 | |
136 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, | |
137 DEFAULT_AUDIO_FRAME_POOLSIZE); | |
138 if(_audioFramePool == NULL) | |
139 return false; | |
140 | |
141 if(SetOutputFrequency(kDefaultFrequency) == -1) | |
142 return false; | |
143 | |
144 if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | |
145 _limiter->kNoError) | |
146 return false; | |
147 | |
148 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | |
149 // divide-by-2 but -7 is used instead to give a bit of headroom since the | |
150 // AGC is not a hard limiter. | |
151 if(_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) | |
152 return false; | |
153 | |
154 if(_limiter->gain_control()->set_compression_gain_db(0) | |
155 != _limiter->kNoError) | |
156 return false; | |
157 | |
158 if(_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) | |
159 return false; | |
160 | |
161 if(_limiter->gain_control()->Enable(true) != _limiter->kNoError) | |
162 return false; | |
163 | |
164 return true; | |
165 } | |
166 | |
167 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() { | |
168 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | |
169 assert(_audioFramePool == NULL); | |
170 } | |
171 | |
172 // Process should be called every kProcessPeriodicityInMs ms | |
173 int64_t AudioConferenceMixerImpl::TimeUntilNextProcess() { | |
174 int64_t timeUntilNextProcess = 0; | |
175 rtc::CritScope cs(&_crit); | |
176 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | |
177 LOG(LS_ERROR) << "failed in TimeToNextUpdate() call"; | |
178 // Sanity check | |
179 assert(false); | |
180 return -1; | |
181 } | |
182 return timeUntilNextProcess; | |
183 } | |
184 | |
185 void AudioConferenceMixerImpl::Process() { | |
186 size_t remainingParticipantsAllowedToMix = | |
187 kMaximumAmountOfMixedParticipants; | |
188 { | |
189 rtc::CritScope cs(&_crit); | |
190 assert(_processCalls == 0); | |
191 _processCalls++; | |
192 | |
193 // Let the scheduler know that we are running one iteration. | |
194 _timeScheduler.UpdateScheduler(); | |
195 } | |
196 | |
197 AudioFrameList mixList; | |
198 AudioFrameList rampOutList; | |
199 AudioFrameList additionalFramesList; | |
200 std::map<int, MixerParticipant*> mixedParticipantsMap; | |
201 { | |
202 rtc::CritScope cs(&_cbCrit); | |
203 | |
204 int32_t lowFreq = GetLowestMixingFrequency(); | |
205 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | |
206 // supported so use the closest higher frequency to not lose any | |
207 // information. | |
208 // TODO(henrike): this is probably more appropriate to do in | |
209 // GetLowestMixingFrequency(). | |
210 if (lowFreq == 12000) { | |
211 lowFreq = 16000; | |
212 } else if (lowFreq == 24000) { | |
213 lowFreq = 32000; | |
214 } | |
215 if(lowFreq <= 0) { | |
216 rtc::CritScope cs(&_crit); | |
217 _processCalls--; | |
218 return; | |
219 } else { | |
220 switch(lowFreq) { | |
221 case 8000: | |
222 if(OutputFrequency() != kNbInHz) { | |
223 SetOutputFrequency(kNbInHz); | |
224 } | |
225 break; | |
226 case 16000: | |
227 if(OutputFrequency() != kWbInHz) { | |
228 SetOutputFrequency(kWbInHz); | |
229 } | |
230 break; | |
231 case 32000: | |
232 if(OutputFrequency() != kSwbInHz) { | |
233 SetOutputFrequency(kSwbInHz); | |
234 } | |
235 break; | |
236 case 48000: | |
237 if(OutputFrequency() != kFbInHz) { | |
238 SetOutputFrequency(kFbInHz); | |
239 } | |
240 break; | |
241 default: | |
242 assert(false); | |
243 | |
244 rtc::CritScope cs(&_crit); | |
245 _processCalls--; | |
246 return; | |
247 } | |
248 } | |
249 | |
250 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | |
251 &remainingParticipantsAllowedToMix); | |
252 | |
253 GetAdditionalAudio(&additionalFramesList); | |
254 UpdateMixedStatus(mixedParticipantsMap); | |
255 } | |
256 | |
257 // Get an AudioFrame for mixing from the memory pool. | |
258 AudioFrame* mixedAudio = NULL; | |
259 if(_audioFramePool->PopMemory(mixedAudio) == -1) { | |
260 LOG(LS_ERROR) << "failed PopMemory() call"; | |
261 assert(false); | |
262 return; | |
263 } | |
264 | |
265 { | |
266 rtc::CritScope cs(&_crit); | |
267 | |
268 // TODO(henrike): it might be better to decide the number of channels | |
269 // with an API instead of dynamically. | |
270 | |
271 // Find the max channels over all mixing lists. | |
272 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), | |
273 std::max(MaxNumChannels(&additionalFramesList), | |
274 MaxNumChannels(&rampOutList))); | |
275 | |
276 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | |
277 AudioFrame::kNormalSpeech, | |
278 AudioFrame::kVadPassive, num_mixed_channels); | |
279 | |
280 _timeStamp += static_cast<uint32_t>(_sampleSize); | |
281 | |
282 // We only use the limiter if it supports the output sample rate and | |
283 // we're actually mixing multiple streams. | |
284 use_limiter_ = | |
285 _numMixedParticipants > 1 && | |
286 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | |
287 | |
288 MixFromList(mixedAudio, mixList); | |
289 MixAnonomouslyFromList(mixedAudio, additionalFramesList); | |
290 MixAnonomouslyFromList(mixedAudio, rampOutList); | |
291 | |
292 if(mixedAudio->samples_per_channel_ == 0) { | |
293 // Nothing was mixed, set the audio samples to silence. | |
294 mixedAudio->samples_per_channel_ = _sampleSize; | |
295 AudioFrameOperations::Mute(mixedAudio); | |
296 } else { | |
297 // Only call the limiter if we have something to mix. | |
298 LimitMixedAudio(mixedAudio); | |
299 } | |
300 } | |
301 | |
302 { | |
303 rtc::CritScope cs(&_cbCrit); | |
304 if(_mixReceiver != NULL) { | |
305 const AudioFrame** dummy = NULL; | |
306 _mixReceiver->NewMixedAudio( | |
307 _id, | |
308 *mixedAudio, | |
309 dummy, | |
310 0); | |
311 } | |
312 } | |
313 | |
314 // Reclaim all outstanding memory. | |
315 _audioFramePool->PushMemory(mixedAudio); | |
316 ClearAudioFrameList(&mixList); | |
317 ClearAudioFrameList(&rampOutList); | |
318 ClearAudioFrameList(&additionalFramesList); | |
319 { | |
320 rtc::CritScope cs(&_crit); | |
321 _processCalls--; | |
322 } | |
323 return; | |
324 } | |
325 | |
326 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( | |
327 AudioMixerOutputReceiver* mixReceiver) { | |
328 rtc::CritScope cs(&_cbCrit); | |
329 if(_mixReceiver != NULL) { | |
330 return -1; | |
331 } | |
332 _mixReceiver = mixReceiver; | |
333 return 0; | |
334 } | |
335 | |
336 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { | |
337 rtc::CritScope cs(&_cbCrit); | |
338 if(_mixReceiver == NULL) { | |
339 return -1; | |
340 } | |
341 _mixReceiver = NULL; | |
342 return 0; | |
343 } | |
344 | |
345 int32_t AudioConferenceMixerImpl::SetOutputFrequency( | |
346 const Frequency& frequency) { | |
347 rtc::CritScope cs(&_crit); | |
348 | |
349 _outputFrequency = frequency; | |
350 _sampleSize = | |
351 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); | |
352 | |
353 return 0; | |
354 } | |
355 | |
356 AudioConferenceMixer::Frequency | |
357 AudioConferenceMixerImpl::OutputFrequency() const { | |
358 rtc::CritScope cs(&_crit); | |
359 return _outputFrequency; | |
360 } | |
361 | |
362 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( | |
363 MixerParticipant* participant, bool mixable) { | |
364 if (!mixable) { | |
365 // Anonymous participants are in a separate list. Make sure that the | |
366 // participant is in the _participantList if it is being mixed. | |
367 SetAnonymousMixabilityStatus(participant, false); | |
368 } | |
369 size_t numMixedParticipants; | |
370 { | |
371 rtc::CritScope cs(&_cbCrit); | |
372 const bool isMixed = | |
373 IsParticipantInList(*participant, _participantList); | |
374 // API must be called with a new state. | |
375 if(!(mixable ^ isMixed)) { | |
376 LOG(LS_ERROR) << "Mixable is aready " << | |
377 (isMixed ? "ON" : "off"); | |
378 return -1; | |
379 } | |
380 bool success = false; | |
381 if(mixable) { | |
382 success = AddParticipantToList(participant, &_participantList); | |
383 } else { | |
384 success = RemoveParticipantFromList(participant, &_participantList); | |
385 } | |
386 if(!success) { | |
387 LOG(LS_ERROR) << "failed to " << (mixable ? "add" : "remove") | |
388 << " participant"; | |
389 assert(false); | |
390 return -1; | |
391 } | |
392 | |
393 size_t numMixedNonAnonymous = _participantList.size(); | |
394 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { | |
395 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; | |
396 } | |
397 numMixedParticipants = | |
398 numMixedNonAnonymous + _additionalParticipantList.size(); | |
399 } | |
400 // A MixerParticipant was added or removed. Make sure the scratch | |
401 // buffer is updated if necessary. | |
402 // Note: The scratch buffer may only be updated in Process(). | |
403 rtc::CritScope cs(&_crit); | |
404 _numMixedParticipants = numMixedParticipants; | |
405 return 0; | |
406 } | |
407 | |
408 bool AudioConferenceMixerImpl::MixabilityStatus( | |
409 const MixerParticipant& participant) const { | |
410 rtc::CritScope cs(&_cbCrit); | |
411 return IsParticipantInList(participant, _participantList); | |
412 } | |
413 | |
414 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | |
415 MixerParticipant* participant, bool anonymous) { | |
416 rtc::CritScope cs(&_cbCrit); | |
417 if(IsParticipantInList(*participant, _additionalParticipantList)) { | |
418 if(anonymous) { | |
419 return 0; | |
420 } | |
421 if(!RemoveParticipantFromList(participant, | |
422 &_additionalParticipantList)) { | |
423 LOG(LS_ERROR) << "unable to remove participant from anonymous list"; | |
424 assert(false); | |
425 return -1; | |
426 } | |
427 return AddParticipantToList(participant, &_participantList) ? 0 : -1; | |
428 } | |
429 if(!anonymous) { | |
430 return 0; | |
431 } | |
432 const bool mixable = RemoveParticipantFromList(participant, | |
433 &_participantList); | |
434 if(!mixable) { | |
435 LOG(LS_WARNING) << | |
436 "participant must be registered before turning it into anonymous"; | |
437 // Setting anonymous status is only possible if MixerParticipant is | |
438 // already registered. | |
439 return -1; | |
440 } | |
441 return AddParticipantToList(participant, &_additionalParticipantList) ? | |
442 0 : -1; | |
443 } | |
444 | |
445 bool AudioConferenceMixerImpl::AnonymousMixabilityStatus( | |
446 const MixerParticipant& participant) const { | |
447 rtc::CritScope cs(&_cbCrit); | |
448 return IsParticipantInList(participant, _additionalParticipantList); | |
449 } | |
450 | |
451 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency( | |
452 Frequency freq) { | |
453 // Make sure that only allowed sampling frequencies are used. Use closest | |
454 // higher sampling frequency to avoid losing information. | |
455 if (static_cast<int>(freq) == 12000) { | |
456 freq = kWbInHz; | |
457 } else if (static_cast<int>(freq) == 24000) { | |
458 freq = kSwbInHz; | |
459 } | |
460 | |
461 if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || | |
462 (freq == kLowestPossible)) { | |
463 _minimumMixingFreq=freq; | |
464 return 0; | |
465 } else { | |
466 LOG(LS_ERROR) << "SetMinimumMixingFrequency incorrect frequency: " | |
467 << freq; | |
468 assert(false); | |
469 return -1; | |
470 } | |
471 } | |
472 | |
473 // Check all AudioFrames that are to be mixed. The highest sampling frequency | |
474 // found is the lowest that can be used without losing information. | |
475 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() const { | |
476 const int participantListFrequency = | |
477 GetLowestMixingFrequencyFromList(_participantList); | |
478 const int anonymousListFrequency = | |
479 GetLowestMixingFrequencyFromList(_additionalParticipantList); | |
480 const int highestFreq = | |
481 (participantListFrequency > anonymousListFrequency) ? | |
482 participantListFrequency : anonymousListFrequency; | |
483 // Check if the user specified a lowest mixing frequency. | |
484 if(_minimumMixingFreq != kLowestPossible) { | |
485 if(_minimumMixingFreq > highestFreq) { | |
486 return _minimumMixingFreq; | |
487 } | |
488 } | |
489 return highestFreq; | |
490 } | |
491 | |
492 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( | |
493 const MixerParticipantList& mixList) const { | |
494 int32_t highestFreq = 8000; | |
495 for (MixerParticipantList::const_iterator iter = mixList.begin(); | |
496 iter != mixList.end(); | |
497 ++iter) { | |
498 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | |
499 if(neededFrequency > highestFreq) { | |
500 highestFreq = neededFrequency; | |
501 } | |
502 } | |
503 return highestFreq; | |
504 } | |
505 | |
506 void AudioConferenceMixerImpl::UpdateToMix( | |
507 AudioFrameList* mixList, | |
508 AudioFrameList* rampOutList, | |
509 std::map<int, MixerParticipant*>* mixParticipantList, | |
510 size_t* maxAudioFrameCounter) const { | |
511 LOG(LS_VERBOSE) << | |
512 "UpdateToMix(mixList,rampOutList,mixParticipantList," << | |
513 *maxAudioFrameCounter << ")"; | |
514 const size_t mixListStartSize = mixList->size(); | |
515 AudioFrameList activeList; | |
516 // Struct needed by the passive lists to keep track of which AudioFrame | |
517 // belongs to which MixerParticipant. | |
518 ParticipantFrameStructList passiveWasNotMixedList; | |
519 ParticipantFrameStructList passiveWasMixedList; | |
520 for (MixerParticipantList::const_iterator participant = | |
521 _participantList.begin(); participant != _participantList.end(); | |
522 ++participant) { | |
523 // Stop keeping track of passive participants if there are already | |
524 // enough participants available (they wont be mixed anyway). | |
525 bool mustAddToPassiveList = (*maxAudioFrameCounter > | |
526 (activeList.size() + | |
527 passiveWasMixedList.size() + | |
528 passiveWasNotMixedList.size())); | |
529 | |
530 bool wasMixed = false; | |
531 wasMixed = (*participant)->_mixHistory->WasMixed(); | |
532 AudioFrame* audioFrame = NULL; | |
533 if(_audioFramePool->PopMemory(audioFrame) == -1) { | |
534 LOG(LS_ERROR) << "failed PopMemory() call"; | |
535 assert(false); | |
536 return; | |
537 } | |
538 audioFrame->sample_rate_hz_ = _outputFrequency; | |
539 | |
540 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | |
541 if (ret == MixerParticipant::AudioFrameInfo::kError) { | |
542 LOG(LS_WARNING) | |
543 << "failed to GetAudioFrameWithMuted() from participant"; | |
544 _audioFramePool->PushMemory(audioFrame); | |
545 continue; | |
546 } | |
547 const bool muted = (ret == MixerParticipant::AudioFrameInfo::kMuted); | |
548 if (_participantList.size() != 1) { | |
549 // TODO(wu): Issue 3390, add support for multiple participants case. | |
550 audioFrame->ntp_time_ms_ = -1; | |
551 } | |
552 | |
553 // TODO(henrike): this assert triggers in some test cases where SRTP is | |
554 // used which prevents NetEQ from making a VAD. Temporarily disable this | |
555 // assert until the problem is fixed on a higher level. | |
556 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); | |
557 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { | |
558 LOG(LS_WARNING) << "invalid VAD state from participant"; | |
559 } | |
560 | |
561 if(audioFrame->vad_activity_ == AudioFrame::kVadActive) { | |
562 if(!wasMixed && !muted) { | |
563 RampIn(*audioFrame); | |
564 } | |
565 | |
566 if(activeList.size() >= *maxAudioFrameCounter) { | |
567 // There are already more active participants than should be | |
568 // mixed. Only keep the ones with the highest energy. | |
569 AudioFrameList::iterator replaceItem; | |
570 uint32_t lowestEnergy = | |
571 muted ? 0 : CalculateEnergy(*audioFrame); | |
572 | |
573 bool found_replace_item = false; | |
574 for (AudioFrameList::iterator iter = activeList.begin(); | |
575 iter != activeList.end(); | |
576 ++iter) { | |
577 const uint32_t energy = | |
578 muted ? 0 : CalculateEnergy(*iter->frame); | |
579 if(energy < lowestEnergy) { | |
580 replaceItem = iter; | |
581 lowestEnergy = energy; | |
582 found_replace_item = true; | |
583 } | |
584 } | |
585 if(found_replace_item) { | |
586 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | |
587 FrameAndMuteInfo replaceFrame = *replaceItem; | |
588 | |
589 bool replaceWasMixed = false; | |
590 std::map<int, MixerParticipant*>::const_iterator it = | |
591 mixParticipantList->find(replaceFrame.frame->id_); | |
592 | |
593 // When a frame is pushed to |activeList| it is also pushed | |
594 // to mixParticipantList with the frame's id. This means | |
595 // that the Find call above should never fail. | |
596 assert(it != mixParticipantList->end()); | |
597 replaceWasMixed = it->second->_mixHistory->WasMixed(); | |
598 | |
599 mixParticipantList->erase(replaceFrame.frame->id_); | |
600 activeList.erase(replaceItem); | |
601 | |
602 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | |
603 (*mixParticipantList)[audioFrame->id_] = *participant; | |
604 assert(mixParticipantList->size() <= | |
605 kMaximumAmountOfMixedParticipants); | |
606 | |
607 if (replaceWasMixed) { | |
608 if (!replaceFrame.muted) { | |
609 RampOut(*replaceFrame.frame); | |
610 } | |
611 rampOutList->push_back(replaceFrame); | |
612 assert(rampOutList->size() <= | |
613 kMaximumAmountOfMixedParticipants); | |
614 } else { | |
615 _audioFramePool->PushMemory(replaceFrame.frame); | |
616 } | |
617 } else { | |
618 if(wasMixed) { | |
619 if (!muted) { | |
620 RampOut(*audioFrame); | |
621 } | |
622 rampOutList->push_back(FrameAndMuteInfo(audioFrame, | |
623 muted)); | |
624 assert(rampOutList->size() <= | |
625 kMaximumAmountOfMixedParticipants); | |
626 } else { | |
627 _audioFramePool->PushMemory(audioFrame); | |
628 } | |
629 } | |
630 } else { | |
631 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | |
632 (*mixParticipantList)[audioFrame->id_] = *participant; | |
633 assert(mixParticipantList->size() <= | |
634 kMaximumAmountOfMixedParticipants); | |
635 } | |
636 } else { | |
637 if(wasMixed) { | |
638 ParticipantFrameStruct* part_struct = | |
639 new ParticipantFrameStruct(*participant, audioFrame, muted); | |
640 passiveWasMixedList.push_back(part_struct); | |
641 } else if(mustAddToPassiveList) { | |
642 if (!muted) { | |
643 RampIn(*audioFrame); | |
644 } | |
645 ParticipantFrameStruct* part_struct = | |
646 new ParticipantFrameStruct(*participant, audioFrame, muted); | |
647 passiveWasNotMixedList.push_back(part_struct); | |
648 } else { | |
649 _audioFramePool->PushMemory(audioFrame); | |
650 } | |
651 } | |
652 } | |
653 assert(activeList.size() <= *maxAudioFrameCounter); | |
654 // At this point it is known which participants should be mixed. Transfer | |
655 // this information to this functions output parameters. | |
656 for (AudioFrameList::const_iterator iter = activeList.begin(); | |
657 iter != activeList.end(); | |
658 ++iter) { | |
659 mixList->push_back(*iter); | |
660 } | |
661 activeList.clear(); | |
662 // Always mix a constant number of AudioFrames. If there aren't enough | |
663 // active participants mix passive ones. Starting with those that was mixed | |
664 // last iteration. | |
665 for (ParticipantFrameStructList::const_iterator | |
666 iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end(); | |
667 ++iter) { | |
668 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
669 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, | |
670 (*iter)->muted)); | |
671 (*mixParticipantList)[(*iter)->audioFrame->id_] = | |
672 (*iter)->participant; | |
673 assert(mixParticipantList->size() <= | |
674 kMaximumAmountOfMixedParticipants); | |
675 } else { | |
676 _audioFramePool->PushMemory((*iter)->audioFrame); | |
677 } | |
678 delete *iter; | |
679 } | |
680 // And finally the ones that have not been mixed for a while. | |
681 for (ParticipantFrameStructList::const_iterator iter = | |
682 passiveWasNotMixedList.begin(); | |
683 iter != passiveWasNotMixedList.end(); | |
684 ++iter) { | |
685 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
686 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, | |
687 (*iter)->muted)); | |
688 (*mixParticipantList)[(*iter)->audioFrame->id_] = | |
689 (*iter)->participant; | |
690 assert(mixParticipantList->size() <= | |
691 kMaximumAmountOfMixedParticipants); | |
692 } else { | |
693 _audioFramePool->PushMemory((*iter)->audioFrame); | |
694 } | |
695 delete *iter; | |
696 } | |
697 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); | |
698 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | |
699 } | |
700 | |
701 void AudioConferenceMixerImpl::GetAdditionalAudio( | |
702 AudioFrameList* additionalFramesList) const { | |
703 LOG(LS_VERBOSE) << "GetAdditionalAudio(additionalFramesList)"; | |
704 // The GetAudioFrameWithMuted() callback may result in the participant being | |
705 // removed from additionalParticipantList_. If that happens it will | |
706 // invalidate any iterators. Create a copy of the participants list such | |
707 // that the list of participants can be traversed safely. | |
708 MixerParticipantList additionalParticipantList; | |
709 additionalParticipantList.insert(additionalParticipantList.begin(), | |
710 _additionalParticipantList.begin(), | |
711 _additionalParticipantList.end()); | |
712 | |
713 for (MixerParticipantList::const_iterator participant = | |
714 additionalParticipantList.begin(); | |
715 participant != additionalParticipantList.end(); | |
716 ++participant) { | |
717 AudioFrame* audioFrame = NULL; | |
718 if(_audioFramePool->PopMemory(audioFrame) == -1) { | |
719 LOG(LS_ERROR) << "failed PopMemory() call"; | |
720 assert(false); | |
721 return; | |
722 } | |
723 audioFrame->sample_rate_hz_ = _outputFrequency; | |
724 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | |
725 if (ret == MixerParticipant::AudioFrameInfo::kError) { | |
726 LOG(LS_WARNING) | |
727 << "failed to GetAudioFrameWithMuted() from participant"; | |
728 _audioFramePool->PushMemory(audioFrame); | |
729 continue; | |
730 } | |
731 if(audioFrame->samples_per_channel_ == 0) { | |
732 // Empty frame. Don't use it. | |
733 _audioFramePool->PushMemory(audioFrame); | |
734 continue; | |
735 } | |
736 additionalFramesList->push_back(FrameAndMuteInfo( | |
737 audioFrame, ret == MixerParticipant::AudioFrameInfo::kMuted)); | |
738 } | |
739 } | |
740 | |
741 void AudioConferenceMixerImpl::UpdateMixedStatus( | |
742 const std::map<int, MixerParticipant*>& mixedParticipantsMap) const { | |
743 LOG(LS_VERBOSE) << "UpdateMixedStatus(mixedParticipantsMap)"; | |
744 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); | |
745 | |
746 // Loop through all participants. If they are in the mix map they | |
747 // were mixed. | |
748 for (MixerParticipantList::const_iterator | |
749 participant =_participantList.begin(); | |
750 participant != _participantList.end(); | |
751 ++participant) { | |
752 bool isMixed = false; | |
753 for (auto it = mixedParticipantsMap.begin(); | |
754 it != mixedParticipantsMap.end(); | |
755 ++it) { | |
756 if (it->second == *participant) { | |
757 isMixed = true; | |
758 break; | |
759 } | |
760 } | |
761 (*participant)->_mixHistory->SetIsMixed(isMixed); | |
762 } | |
763 } | |
764 | |
765 void AudioConferenceMixerImpl::ClearAudioFrameList( | |
766 AudioFrameList* audioFrameList) const { | |
767 LOG(LS_VERBOSE) << "ClearAudioFrameList(audioFrameList)"; | |
768 for (AudioFrameList::iterator iter = audioFrameList->begin(); | |
769 iter != audioFrameList->end(); | |
770 ++iter) { | |
771 _audioFramePool->PushMemory(iter->frame); | |
772 } | |
773 audioFrameList->clear(); | |
774 } | |
775 | |
776 bool AudioConferenceMixerImpl::IsParticipantInList( | |
777 const MixerParticipant& participant, | |
778 const MixerParticipantList& participantList) const { | |
779 LOG(LS_VERBOSE) << "IsParticipantInList(participant,participantList)"; | |
780 for (MixerParticipantList::const_iterator iter = participantList.begin(); | |
781 iter != participantList.end(); | |
782 ++iter) { | |
783 if(&participant == *iter) { | |
784 return true; | |
785 } | |
786 } | |
787 return false; | |
788 } | |
789 | |
790 bool AudioConferenceMixerImpl::AddParticipantToList( | |
791 MixerParticipant* participant, | |
792 MixerParticipantList* participantList) const { | |
793 LOG(LS_VERBOSE) << "AddParticipantToList(participant, participantList)"; | |
794 participantList->push_back(participant); | |
795 // Make sure that the mixed status is correct for new MixerParticipant. | |
796 participant->_mixHistory->ResetMixedStatus(); | |
797 return true; | |
798 } | |
799 | |
800 bool AudioConferenceMixerImpl::RemoveParticipantFromList( | |
801 MixerParticipant* participant, | |
802 MixerParticipantList* participantList) const { | |
803 LOG(LS_VERBOSE) | |
804 << "RemoveParticipantFromList(participant, participantList)"; | |
805 for (MixerParticipantList::iterator iter = participantList->begin(); | |
806 iter != participantList->end(); | |
807 ++iter) { | |
808 if(*iter == participant) { | |
809 participantList->erase(iter); | |
810 // Participant is no longer mixed, reset to default. | |
811 participant->_mixHistory->ResetMixedStatus(); | |
812 return true; | |
813 } | |
814 } | |
815 return false; | |
816 } | |
817 | |
818 int32_t AudioConferenceMixerImpl::MixFromList( | |
819 AudioFrame* mixedAudio, | |
820 const AudioFrameList& audioFrameList) const { | |
821 | |
822 LOG(LS_VERBOSE) << "MixFromList(mixedAudio, audioFrameList)"; | |
823 if(audioFrameList.empty()) return 0; | |
824 | |
825 uint32_t position = 0; | |
826 | |
827 if (_numMixedParticipants == 1) { | |
828 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | |
829 mixedAudio->elapsed_time_ms_ = | |
830 audioFrameList.front().frame->elapsed_time_ms_; | |
831 } else { | |
832 // TODO(wu): Issue 3390. | |
833 // Audio frame timestamp is only supported in one channel case. | |
834 mixedAudio->timestamp_ = 0; | |
835 mixedAudio->elapsed_time_ms_ = -1; | |
836 } | |
837 | |
838 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
839 iter != audioFrameList.end(); | |
840 ++iter) { | |
841 if(position >= kMaximumAmountOfMixedParticipants) { | |
842 LOG(LS_ERROR) << | |
843 "Trying to mix more than max amount of mixed participants:" | |
844 << kMaximumAmountOfMixedParticipants << "!"; | |
845 // Assert and avoid crash | |
846 assert(false); | |
847 position = 0; | |
848 } | |
849 if (!iter->muted) { | |
850 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
851 } | |
852 | |
853 position++; | |
854 } | |
855 | |
856 return 0; | |
857 } | |
858 | |
859 // TODO(andrew): consolidate this function with MixFromList. | |
860 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList( | |
861 AudioFrame* mixedAudio, | |
862 const AudioFrameList& audioFrameList) const { | |
863 LOG(LS_VERBOSE) << "MixAnonomouslyFromList(mixedAudio, audioFrameList)"; | |
864 | |
865 if(audioFrameList.empty()) return 0; | |
866 | |
867 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
868 iter != audioFrameList.end(); | |
869 ++iter) { | |
870 if (!iter->muted) { | |
871 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
872 } | |
873 } | |
874 return 0; | |
875 } | |
876 | |
877 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { | |
878 if (!use_limiter_) { | |
879 return true; | |
880 } | |
881 | |
882 // Smoothly limit the mixed frame. | |
883 const int error = _limiter->ProcessStream(mixedAudio); | |
884 | |
885 // And now we can safely restore the level. This procedure results in | |
886 // some loss of resolution, deemed acceptable. | |
887 // | |
888 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | |
889 // and compression gain of 6 dB). However, in the transition frame when this | |
890 // is enabled (moving from one to two participants) it has the potential to | |
891 // create discontinuities in the mixed frame. | |
892 // | |
893 // Instead we double the frame (with addition since left-shifting a | |
894 // negative value is undefined). | |
895 AudioFrameOperations::Add(*mixedAudio, mixedAudio); | |
896 | |
897 if(error != _limiter->kNoError) { | |
898 LOG(LS_ERROR) << "Error from AudioProcessing: " << error; | |
899 assert(false); | |
900 return false; | |
901 } | |
902 return true; | |
903 } | |
904 } // namespace webrtc | |
OLD | NEW |