OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | |
12 | |
13 #include <algorithm> | |
14 | |
15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " | |
16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | |
17 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | |
19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
20 #include "webrtc/system_wrappers/include/trace.h" | |
21 | |
22 namespace webrtc { | |
23 namespace { | |
24 | |
25 struct ParticipantFrameStruct { | |
26 ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m) | |
27 : participant(p), audioFrame(a), muted(m) {} | |
28 MixerAudioSource* participant; | |
29 AudioFrame* audioFrame; | |
30 bool muted; | |
31 }; | |
32 | |
33 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; | |
34 | |
35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | |
36 // These effects are applied to |frame| itself prior to mixing. Assumes that | |
37 // |mixed_frame| always has at least as many channels as |frame|. Supports | |
38 // stereo at most. | |
39 // | |
40 // TODO(andrew): consider not modifying |frame| here. | |
41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | |
42 RTC_DCHECK(mixed_frame->num_channels_ >= frame->num_channels_); | |
43 if (use_limiter) { | |
44 // Divide by two to avoid saturation in the mixing. | |
45 // This is only meaningful if the limiter will be used. | |
46 *frame >>= 1; | |
47 } | |
48 if (mixed_frame->num_channels_ > frame->num_channels_) { | |
49 // We only support mono-to-stereo. | |
50 RTC_DCHECK(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1); | |
51 AudioFrameOperations::MonoToStereo(frame); | |
52 } | |
53 | |
54 *mixed_frame += *frame; | |
55 } | |
56 | |
57 // Return the max number of channels from a |list| composed of AudioFrames. | |
58 size_t MaxNumChannels(const AudioFrameList* list) { | |
59 size_t max_num_channels = 1; | |
60 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end(); | |
61 ++iter) { | |
62 max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_); | |
63 } | |
64 return max_num_channels; | |
65 } | |
66 | |
67 } // namespace | |
68 | |
69 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} | |
70 | |
71 MixerAudioSource::~MixerAudioSource() { | |
72 delete _mixHistory; | |
73 } | |
74 | |
75 bool MixerAudioSource::IsMixed() const { | |
76 return _mixHistory->IsMixed(); | |
77 } | |
78 | |
79 NewMixHistory::NewMixHistory() : _isMixed(0) {} | |
80 | |
81 NewMixHistory::~NewMixHistory() {} | |
82 | |
83 bool NewMixHistory::IsMixed() const { | |
84 return _isMixed; | |
85 } | |
86 | |
87 bool NewMixHistory::WasMixed() const { | |
88 // Was mixed is the same as is mixed depending on perspective. This function | |
89 // is for the perspective of NewAudioConferenceMixerImpl. | |
90 return IsMixed(); | |
91 } | |
92 | |
93 int32_t NewMixHistory::SetIsMixed(const bool mixed) { | |
94 _isMixed = mixed; | |
95 return 0; | |
96 } | |
97 | |
98 void NewMixHistory::ResetMixedStatus() { | |
99 _isMixed = false; | |
100 } | |
101 | |
102 NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) { | |
103 NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id); | |
104 if (!mixer->Init()) { | |
105 delete mixer; | |
106 return NULL; | |
107 } | |
108 return mixer; | |
109 } | |
110 | |
111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | |
112 : _id(id), | |
113 _minimumMixingFreq(kLowestPossible), | |
114 _outputFrequency(kDefaultFrequency), | |
115 _sampleSize(0), | |
116 _audioFramePool(NULL), | |
117 _participantList(), | |
118 _additionalParticipantList(), | |
119 _numMixedParticipants(0), | |
120 use_limiter_(true), | |
121 _timeStamp(0), | |
122 _timeScheduler(kProcessPeriodicityInMs), | |
123 _processCalls(0) {} | |
124 | |
125 bool NewAudioConferenceMixerImpl::Init() { | |
126 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
127 if (_crit.get() == NULL) | |
128 return false; | |
129 | |
130 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
131 if (_cbCrit.get() == NULL) | |
132 return false; | |
133 | |
134 Config config; | |
135 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | |
136 _limiter.reset(AudioProcessing::Create(config)); | |
137 if (!_limiter.get()) | |
138 return false; | |
139 | |
140 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, | |
141 DEFAULT_AUDIO_FRAME_POOLSIZE); | |
142 if (_audioFramePool == NULL) | |
143 return false; | |
144 | |
145 if (SetOutputFrequency(kDefaultFrequency) == -1) | |
146 return false; | |
147 | |
148 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | |
149 _limiter->kNoError) | |
150 return false; | |
151 | |
152 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | |
153 // divide-by-2 but -7 is used instead to give a bit of headroom since the | |
154 // AGC is not a hard limiter. | |
155 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) | |
156 return false; | |
157 | |
158 if (_limiter->gain_control()->set_compression_gain_db(0) != | |
159 _limiter->kNoError) | |
160 return false; | |
161 | |
162 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) | |
163 return false; | |
164 | |
165 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | |
166 return false; | |
167 | |
168 return true; | |
169 } | |
170 | |
171 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | |
172 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | |
173 RTC_DCHECK(_audioFramePool == NULL); | |
174 } | |
175 | |
176 // Process should be called every kProcessPeriodicityInMs ms | |
177 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { | |
178 int64_t timeUntilNextProcess = 0; | |
179 CriticalSectionScoped cs(_crit.get()); | |
180 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | |
181 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
182 "failed in TimeToNextUpdate() call"); | |
183 // Sanity check | |
184 RTC_DCHECK(false); | |
185 return -1; | |
186 } | |
187 return timeUntilNextProcess; | |
188 } | |
189 | |
190 void NewAudioConferenceMixerImpl::Process() { | |
191 // TODO(aleloi) Remove this method. | |
192 RTC_NOTREACHED(); | |
193 } | |
194 | |
195 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | |
196 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | |
197 { | |
198 CriticalSectionScoped cs(_crit.get()); | |
199 RTC_DCHECK(_processCalls == 0); | |
ivoc
2016/07/07 13:46:16
Please use RTC_DCHECK_EQ, RTC_DCHECK_NEQ, RTC_DCHE
aleloi
2016/07/07 14:29:12
Done.
| |
200 _processCalls++; | |
201 | |
202 // Let the scheduler know that we are running one iteration. | |
203 _timeScheduler.UpdateScheduler(); | |
204 } | |
205 | |
206 AudioFrameList mixList; | |
207 AudioFrameList rampOutList; | |
208 AudioFrameList additionalFramesList; | |
209 std::map<int, MixerAudioSource*> mixedParticipantsMap; | |
210 { | |
211 CriticalSectionScoped cs(_cbCrit.get()); | |
212 | |
213 int32_t lowFreq = GetLowestMixingFrequency(); | |
214 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | |
215 // supported so use the closest higher frequency to not lose any | |
216 // information. | |
217 // TODO(henrike): this is probably more appropriate to do in | |
218 // GetLowestMixingFrequency(). | |
219 if (lowFreq == 12000) { | |
220 lowFreq = 16000; | |
221 } else if (lowFreq == 24000) { | |
222 lowFreq = 32000; | |
223 } | |
224 if (lowFreq <= 0) { | |
225 CriticalSectionScoped cs(_crit.get()); | |
226 _processCalls--; | |
227 return; | |
228 } else { | |
229 switch (lowFreq) { | |
230 case 8000: | |
231 if (OutputFrequency() != kNbInHz) { | |
232 SetOutputFrequency(kNbInHz); | |
233 } | |
234 break; | |
235 case 16000: | |
236 if (OutputFrequency() != kWbInHz) { | |
237 SetOutputFrequency(kWbInHz); | |
238 } | |
239 break; | |
240 case 32000: | |
241 if (OutputFrequency() != kSwbInHz) { | |
242 SetOutputFrequency(kSwbInHz); | |
243 } | |
244 break; | |
245 case 48000: | |
246 if (OutputFrequency() != kFbInHz) { | |
247 SetOutputFrequency(kFbInHz); | |
248 } | |
249 break; | |
250 default: | |
251 RTC_DCHECK(false); | |
252 | |
253 CriticalSectionScoped cs(_crit.get()); | |
254 _processCalls--; | |
255 return; | |
256 } | |
257 } | |
258 | |
259 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | |
260 &remainingParticipantsAllowedToMix); | |
261 | |
262 GetAdditionalAudio(&additionalFramesList); | |
263 UpdateMixedStatus(mixedParticipantsMap); | |
264 } | |
265 | |
266 // TODO(henrike): it might be better to decide the number of channels | |
267 // with an API instead of dynamically. | |
268 | |
269 // Find the max channels over all mixing lists. | |
270 const size_t num_mixed_channels = std::max( | |
271 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), | |
272 MaxNumChannels(&rampOutList))); | |
273 | |
274 audio_frame_for_mixing->UpdateFrame( | |
275 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, | |
276 AudioFrame::kVadPassive, num_mixed_channels); | |
277 | |
278 _timeStamp += static_cast<uint32_t>(_sampleSize); | |
279 | |
280 use_limiter_ = _numMixedParticipants > 1 && | |
281 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | |
282 | |
283 // We only use the limiter if it supports the output sample rate and | |
284 // we're actually mixing multiple streams. | |
285 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); | |
286 | |
287 { | |
288 CriticalSectionScoped cs(_crit.get()); | |
289 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | |
290 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); | |
291 | |
292 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | |
293 // Nothing was mixed, set the audio samples to silence. | |
294 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; | |
295 audio_frame_for_mixing->Mute(); | |
296 } else { | |
297 // Only call the limiter if we have something to mix. | |
298 LimitMixedAudio(audio_frame_for_mixing); | |
299 } | |
300 } | |
301 | |
302 ClearAudioFrameList(&mixList); | |
303 ClearAudioFrameList(&rampOutList); | |
304 ClearAudioFrameList(&additionalFramesList); | |
305 { | |
306 CriticalSectionScoped cs(_crit.get()); | |
307 _processCalls--; | |
308 } | |
309 return; | |
310 } | |
311 | |
312 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | |
313 const Frequency& frequency) { | |
314 CriticalSectionScoped cs(_crit.get()); | |
315 | |
316 _outputFrequency = frequency; | |
317 _sampleSize = | |
318 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | |
319 | |
320 return 0; | |
321 } | |
322 | |
323 NewAudioConferenceMixer::Frequency | |
324 NewAudioConferenceMixerImpl::OutputFrequency() const { | |
325 CriticalSectionScoped cs(_crit.get()); | |
326 return _outputFrequency; | |
327 } | |
328 | |
329 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( | |
330 MixerAudioSource* participant, | |
331 bool mixable) { | |
332 if (!mixable) { | |
333 // Anonymous participants are in a separate list. Make sure that the | |
334 // participant is in the _participantList if it is being mixed. | |
335 SetAnonymousMixabilityStatus(participant, false); | |
336 } | |
337 size_t numMixedParticipants; | |
338 { | |
339 CriticalSectionScoped cs(_cbCrit.get()); | |
340 const bool isMixed = IsParticipantInList(*participant, _participantList); | |
341 // API must be called with a new state. | |
342 if (!(mixable ^ isMixed)) { | |
343 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
344 "Mixable is aready %s", isMixed ? "ON" : "off"); | |
345 return -1; | |
346 } | |
347 bool success = false; | |
348 if (mixable) { | |
349 success = AddParticipantToList(participant, &_participantList); | |
350 } else { | |
351 success = RemoveParticipantFromList(participant, &_participantList); | |
352 } | |
353 if (!success) { | |
354 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
355 "failed to %s participant", mixable ? "add" : "remove"); | |
356 RTC_DCHECK(false); | |
357 return -1; | |
358 } | |
359 | |
360 size_t numMixedNonAnonymous = _participantList.size(); | |
361 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { | |
362 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; | |
363 } | |
364 numMixedParticipants = | |
365 numMixedNonAnonymous + _additionalParticipantList.size(); | |
366 } | |
367 // A MixerAudioSource was added or removed. Make sure the scratch | |
368 // buffer is updated if necessary. | |
369 // Note: The scratch buffer may only be updated in Process(). | |
370 CriticalSectionScoped cs(_crit.get()); | |
371 _numMixedParticipants = numMixedParticipants; | |
372 return 0; | |
373 } | |
374 | |
375 bool NewAudioConferenceMixerImpl::MixabilityStatus( | |
376 const MixerAudioSource& participant) const { | |
377 CriticalSectionScoped cs(_cbCrit.get()); | |
378 return IsParticipantInList(participant, _participantList); | |
379 } | |
380 | |
381 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | |
382 MixerAudioSource* participant, | |
383 bool anonymous) { | |
384 CriticalSectionScoped cs(_cbCrit.get()); | |
385 if (IsParticipantInList(*participant, _additionalParticipantList)) { | |
386 if (anonymous) { | |
387 return 0; | |
388 } | |
389 if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) { | |
390 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
391 "unable to remove participant from anonymous list"); | |
392 RTC_DCHECK(false); | |
393 return -1; | |
394 } | |
395 return AddParticipantToList(participant, &_participantList) ? 0 : -1; | |
396 } | |
397 if (!anonymous) { | |
398 return 0; | |
399 } | |
400 const bool mixable = | |
401 RemoveParticipantFromList(participant, &_participantList); | |
402 if (!mixable) { | |
403 WEBRTC_TRACE( | |
404 kTraceWarning, kTraceAudioMixerServer, _id, | |
405 "participant must be registered before turning it into anonymous"); | |
406 // Setting anonymous status is only possible if MixerAudioSource is | |
407 // already registered. | |
408 return -1; | |
409 } | |
410 return AddParticipantToList(participant, &_additionalParticipantList) ? 0 | |
411 : -1; | |
412 } | |
413 | |
414 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( | |
415 const MixerAudioSource& participant) const { | |
416 CriticalSectionScoped cs(_cbCrit.get()); | |
417 return IsParticipantInList(participant, _additionalParticipantList); | |
418 } | |
419 | |
420 int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) { | |
421 // Make sure that only allowed sampling frequencies are used. Use closest | |
422 // higher sampling frequency to avoid losing information. | |
423 if (static_cast<int>(freq) == 12000) { | |
424 freq = kWbInHz; | |
425 } else if (static_cast<int>(freq) == 24000) { | |
426 freq = kSwbInHz; | |
427 } | |
428 | |
429 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || | |
430 (freq == kLowestPossible)) { | |
431 _minimumMixingFreq = freq; | |
432 return 0; | |
433 } else { | |
434 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
435 "SetMinimumMixingFrequency incorrect frequency: %i", freq); | |
436 RTC_DCHECK(false); | |
437 return -1; | |
438 } | |
439 } | |
440 | |
441 // Check all AudioFrames that are to be mixed. The highest sampling frequency | |
442 // found is the lowest that can be used without losing information. | |
443 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const { | |
444 const int participantListFrequency = | |
445 GetLowestMixingFrequencyFromList(_participantList); | |
446 const int anonymousListFrequency = | |
447 GetLowestMixingFrequencyFromList(_additionalParticipantList); | |
448 const int highestFreq = (participantListFrequency > anonymousListFrequency) | |
449 ? participantListFrequency | |
450 : anonymousListFrequency; | |
451 // Check if the user specified a lowest mixing frequency. | |
452 if (_minimumMixingFreq != kLowestPossible) { | |
453 if (_minimumMixingFreq > highestFreq) { | |
454 return _minimumMixingFreq; | |
455 } | |
456 } | |
457 return highestFreq; | |
458 } | |
459 | |
460 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( | |
461 const MixerAudioSourceList& mixList) const { | |
462 int32_t highestFreq = 8000; | |
463 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); | |
464 iter != mixList.end(); ++iter) { | |
465 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | |
466 if (neededFrequency > highestFreq) { | |
467 highestFreq = neededFrequency; | |
468 } | |
469 } | |
470 return highestFreq; | |
471 } | |
472 | |
473 void NewAudioConferenceMixerImpl::UpdateToMix( | |
474 AudioFrameList* mixList, | |
475 AudioFrameList* rampOutList, | |
476 std::map<int, MixerAudioSource*>* mixParticipantList, | |
477 size_t* maxAudioFrameCounter) const { | |
478 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
479 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", | |
480 *maxAudioFrameCounter); | |
481 const size_t mixListStartSize = mixList->size(); | |
482 AudioFrameList activeList; | |
483 // Struct needed by the passive lists to keep track of which AudioFrame | |
484 // belongs to which MixerAudioSource. | |
485 ParticipantFrameStructList passiveWasNotMixedList; | |
486 ParticipantFrameStructList passiveWasMixedList; | |
487 for (MixerAudioSourceList::const_iterator participant = | |
488 _participantList.begin(); | |
489 participant != _participantList.end(); ++participant) { | |
490 // Stop keeping track of passive participants if there are already | |
491 // enough participants available (they wont be mixed anyway). | |
492 bool mustAddToPassiveList = | |
493 (*maxAudioFrameCounter > | |
494 (activeList.size() + passiveWasMixedList.size() + | |
495 passiveWasNotMixedList.size())); | |
496 | |
497 bool wasMixed = false; | |
498 wasMixed = (*participant)->_mixHistory->WasMixed(); | |
499 AudioFrame* audioFrame = NULL; | |
500 if (_audioFramePool->PopMemory(audioFrame) == -1) { | |
501 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | |
502 "failed PopMemory() call"); | |
503 RTC_DCHECK(false); | |
504 return; | |
505 } | |
506 audioFrame->sample_rate_hz_ = _outputFrequency; | |
507 | |
508 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | |
509 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | |
510 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
511 "failed to GetAudioFrameWithMuted() from participant"); | |
512 _audioFramePool->PushMemory(audioFrame); | |
513 continue; | |
514 } | |
515 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | |
516 if (_participantList.size() != 1) { | |
517 // TODO(wu): Issue 3390, add support for multiple participants case. | |
518 audioFrame->ntp_time_ms_ = -1; | |
519 } | |
520 | |
521 // TODO(henrike): this assert triggers in some test cases where SRTP is | |
522 // used which prevents NetEQ from making a VAD. Temporarily disable this | |
523 // assert until the problem is fixed on a higher level. | |
524 // RTC_DCHECK(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); | |
525 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { | |
526 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
527 "invalid VAD state from participant"); | |
528 } | |
529 | |
530 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { | |
531 if (!wasMixed && !muted) { | |
532 RampIn(*audioFrame); | |
533 } | |
534 | |
535 if (activeList.size() >= *maxAudioFrameCounter) { | |
536 // There are already more active participants than should be | |
537 // mixed. Only keep the ones with the highest energy. | |
538 AudioFrameList::iterator replaceItem; | |
539 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame); | |
540 | |
541 bool found_replace_item = false; | |
542 for (AudioFrameList::iterator iter = activeList.begin(); | |
543 iter != activeList.end(); ++iter) { | |
544 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); | |
545 if (energy < lowestEnergy) { | |
546 replaceItem = iter; | |
547 lowestEnergy = energy; | |
548 found_replace_item = true; | |
549 } | |
550 } | |
551 if (found_replace_item) { | |
552 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | |
553 FrameAndMuteInfo replaceFrame = *replaceItem; | |
554 | |
555 bool replaceWasMixed = false; | |
556 std::map<int, MixerAudioSource*>::const_iterator it = | |
557 mixParticipantList->find(replaceFrame.frame->id_); | |
558 | |
559 // When a frame is pushed to |activeList| it is also pushed | |
560 // to mixParticipantList with the frame's id. This means | |
561 // that the Find call above should never fail. | |
562 RTC_DCHECK(it != mixParticipantList->end()); | |
563 replaceWasMixed = it->second->_mixHistory->WasMixed(); | |
564 | |
565 mixParticipantList->erase(replaceFrame.frame->id_); | |
566 activeList.erase(replaceItem); | |
567 | |
568 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | |
569 (*mixParticipantList)[audioFrame->id_] = *participant; | |
570 RTC_DCHECK(mixParticipantList->size() <= | |
571 kMaximumAmountOfMixedParticipants); | |
572 | |
573 if (replaceWasMixed) { | |
574 if (!replaceFrame.muted) { | |
575 RampOut(*replaceFrame.frame); | |
576 } | |
577 rampOutList->push_back(replaceFrame); | |
578 RTC_DCHECK(rampOutList->size() <= | |
579 kMaximumAmountOfMixedParticipants); | |
580 } else { | |
581 _audioFramePool->PushMemory(replaceFrame.frame); | |
582 } | |
583 } else { | |
584 if (wasMixed) { | |
585 if (!muted) { | |
586 RampOut(*audioFrame); | |
587 } | |
588 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); | |
589 RTC_DCHECK(rampOutList->size() <= | |
590 kMaximumAmountOfMixedParticipants); | |
591 } else { | |
592 _audioFramePool->PushMemory(audioFrame); | |
593 } | |
594 } | |
595 } else { | |
596 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | |
597 (*mixParticipantList)[audioFrame->id_] = *participant; | |
598 RTC_DCHECK(mixParticipantList->size() <= | |
599 kMaximumAmountOfMixedParticipants); | |
600 } | |
601 } else { | |
602 if (wasMixed) { | |
603 ParticipantFrameStruct* part_struct = | |
604 new ParticipantFrameStruct(*participant, audioFrame, muted); | |
605 passiveWasMixedList.push_back(part_struct); | |
606 } else if (mustAddToPassiveList) { | |
607 if (!muted) { | |
608 RampIn(*audioFrame); | |
609 } | |
610 ParticipantFrameStruct* part_struct = | |
611 new ParticipantFrameStruct(*participant, audioFrame, muted); | |
612 passiveWasNotMixedList.push_back(part_struct); | |
613 } else { | |
614 _audioFramePool->PushMemory(audioFrame); | |
615 } | |
616 } | |
617 } | |
618 RTC_DCHECK(activeList.size() <= *maxAudioFrameCounter); | |
619 // At this point it is known which participants should be mixed. Transfer | |
620 // this information to this functions output parameters. | |
621 for (AudioFrameList::const_iterator iter = activeList.begin(); | |
622 iter != activeList.end(); ++iter) { | |
623 mixList->push_back(*iter); | |
624 } | |
625 activeList.clear(); | |
626 // Always mix a constant number of AudioFrames. If there aren't enough | |
627 // active participants mix passive ones. Starting with those that was mixed | |
628 // last iteration. | |
629 for (ParticipantFrameStructList::const_iterator iter = | |
630 passiveWasMixedList.begin(); | |
631 iter != passiveWasMixedList.end(); ++iter) { | |
632 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
633 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | |
634 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | |
635 RTC_DCHECK(mixParticipantList->size() <= | |
636 kMaximumAmountOfMixedParticipants); | |
637 } else { | |
638 _audioFramePool->PushMemory((*iter)->audioFrame); | |
639 } | |
640 delete *iter; | |
641 } | |
642 // And finally the ones that have not been mixed for a while. | |
643 for (ParticipantFrameStructList::const_iterator iter = | |
644 passiveWasNotMixedList.begin(); | |
645 iter != passiveWasNotMixedList.end(); ++iter) { | |
646 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
647 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | |
648 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | |
649 RTC_DCHECK(mixParticipantList->size() <= | |
650 kMaximumAmountOfMixedParticipants); | |
651 } else { | |
652 _audioFramePool->PushMemory((*iter)->audioFrame); | |
653 } | |
654 delete *iter; | |
655 } | |
656 RTC_DCHECK(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); | |
657 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | |
658 } | |
659 | |
660 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | |
661 AudioFrameList* additionalFramesList) const { | |
662 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
663 "GetAdditionalAudio(additionalFramesList)"); | |
664 // The GetAudioFrameWithMuted() callback may result in the participant being | |
665 // removed from additionalParticipantList_. If that happens it will | |
666 // invalidate any iterators. Create a copy of the participants list such | |
667 // that the list of participants can be traversed safely. | |
668 MixerAudioSourceList additionalParticipantList; | |
669 additionalParticipantList.insert(additionalParticipantList.begin(), | |
670 _additionalParticipantList.begin(), | |
671 _additionalParticipantList.end()); | |
672 | |
673 for (MixerAudioSourceList::const_iterator participant = | |
674 additionalParticipantList.begin(); | |
675 participant != additionalParticipantList.end(); ++participant) { | |
676 AudioFrame* audioFrame = NULL; | |
677 if (_audioFramePool->PopMemory(audioFrame) == -1) { | |
678 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | |
679 "failed PopMemory() call"); | |
680 RTC_DCHECK(false); | |
681 return; | |
682 } | |
683 audioFrame->sample_rate_hz_ = _outputFrequency; | |
684 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | |
685 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | |
686 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
687 "failed to GetAudioFrameWithMuted() from participant"); | |
688 _audioFramePool->PushMemory(audioFrame); | |
689 continue; | |
690 } | |
691 if (audioFrame->samples_per_channel_ == 0) { | |
692 // Empty frame. Don't use it. | |
693 _audioFramePool->PushMemory(audioFrame); | |
694 continue; | |
695 } | |
696 additionalFramesList->push_back(FrameAndMuteInfo( | |
697 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | |
698 } | |
699 } | |
700 | |
701 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | |
702 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { | |
703 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
704 "UpdateMixedStatus(mixedParticipantsMap)"); | |
705 RTC_DCHECK(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); | |
706 | |
707 // Loop through all participants. If they are in the mix map they | |
708 // were mixed. | |
709 for (MixerAudioSourceList::const_iterator participant = | |
710 _participantList.begin(); | |
711 participant != _participantList.end(); ++participant) { | |
712 bool isMixed = false; | |
713 for (std::map<int, MixerAudioSource*>::const_iterator it = | |
714 mixedParticipantsMap.begin(); | |
715 it != mixedParticipantsMap.end(); ++it) { | |
716 if (it->second == *participant) { | |
717 isMixed = true; | |
718 break; | |
719 } | |
720 } | |
721 (*participant)->_mixHistory->SetIsMixed(isMixed); | |
722 } | |
723 } | |
724 | |
725 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | |
726 AudioFrameList* audioFrameList) const { | |
727 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
728 "ClearAudioFrameList(audioFrameList)"); | |
729 for (AudioFrameList::iterator iter = audioFrameList->begin(); | |
730 iter != audioFrameList->end(); ++iter) { | |
731 _audioFramePool->PushMemory(iter->frame); | |
732 } | |
733 audioFrameList->clear(); | |
734 } | |
735 | |
736 bool NewAudioConferenceMixerImpl::IsParticipantInList( | |
737 const MixerAudioSource& participant, | |
738 const MixerAudioSourceList& participantList) const { | |
739 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
740 "IsParticipantInList(participant,participantList)"); | |
741 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); | |
742 iter != participantList.end(); ++iter) { | |
743 if (&participant == *iter) { | |
744 return true; | |
745 } | |
746 } | |
747 return false; | |
748 } | |
749 | |
750 bool NewAudioConferenceMixerImpl::AddParticipantToList( | |
751 MixerAudioSource* participant, | |
752 MixerAudioSourceList* participantList) const { | |
753 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
754 "AddParticipantToList(participant, participantList)"); | |
755 participantList->push_back(participant); | |
756 // Make sure that the mixed status is correct for new MixerAudioSource. | |
757 participant->_mixHistory->ResetMixedStatus(); | |
758 return true; | |
759 } | |
760 | |
761 bool NewAudioConferenceMixerImpl::RemoveParticipantFromList( | |
762 MixerAudioSource* participant, | |
763 MixerAudioSourceList* participantList) const { | |
764 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
765 "RemoveParticipantFromList(participant, participantList)"); | |
766 for (MixerAudioSourceList::iterator iter = participantList->begin(); | |
767 iter != participantList->end(); ++iter) { | |
768 if (*iter == participant) { | |
769 participantList->erase(iter); | |
770 // Participant is no longer mixed, reset to default. | |
771 participant->_mixHistory->ResetMixedStatus(); | |
772 return true; | |
773 } | |
774 } | |
775 return false; | |
776 } | |
777 | |
778 int32_t NewAudioConferenceMixerImpl::MixFromList( | |
779 AudioFrame* mixedAudio, | |
780 const AudioFrameList& audioFrameList, | |
781 int32_t id, | |
782 bool use_limiter) { | |
783 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
784 "MixFromList(mixedAudio, audioFrameList)"); | |
785 if (audioFrameList.empty()) | |
786 return 0; | |
787 | |
788 uint32_t position = 0; | |
789 | |
790 if (audioFrameList.size() == 1) { | |
791 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | |
792 mixedAudio->elapsed_time_ms_ = | |
793 audioFrameList.front().frame->elapsed_time_ms_; | |
794 } else { | |
795 // TODO(wu): Issue 3390. | |
796 // Audio frame timestamp is only supported in one channel case. | |
797 mixedAudio->timestamp_ = 0; | |
798 mixedAudio->elapsed_time_ms_ = -1; | |
799 } | |
800 | |
801 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
802 iter != audioFrameList.end(); ++iter) { | |
803 if (position >= kMaximumAmountOfMixedParticipants) { | |
804 WEBRTC_TRACE( | |
805 kTraceMemory, kTraceAudioMixerServer, id, | |
806 "Trying to mix more than max amount of mixed participants:%d!", | |
807 kMaximumAmountOfMixedParticipants); | |
808 // Assert and avoid crash | |
809 RTC_DCHECK(false); | |
810 position = 0; | |
811 } | |
812 if (!iter->muted) { | |
813 MixFrames(mixedAudio, iter->frame, use_limiter); | |
814 } | |
815 | |
816 position++; | |
817 } | |
818 | |
819 return 0; | |
820 } | |
821 | |
822 // TODO(andrew): consolidate this function with MixFromList. | |
823 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( | |
824 AudioFrame* mixedAudio, | |
825 const AudioFrameList& audioFrameList) const { | |
826 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
827 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); | |
828 | |
829 if (audioFrameList.empty()) | |
830 return 0; | |
831 | |
832 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
833 iter != audioFrameList.end(); ++iter) { | |
834 if (!iter->muted) { | |
835 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
836 } | |
837 } | |
838 return 0; | |
839 } | |
840 | |
841 bool NewAudioConferenceMixerImpl::LimitMixedAudio( | |
842 AudioFrame* mixedAudio) const { | |
843 if (!use_limiter_) { | |
844 return true; | |
845 } | |
846 | |
847 // Smoothly limit the mixed frame. | |
848 const int error = _limiter->ProcessStream(mixedAudio); | |
849 | |
850 // And now we can safely restore the level. This procedure results in | |
851 // some loss of resolution, deemed acceptable. | |
852 // | |
853 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | |
854 // and compression gain of 6 dB). However, in the transition frame when this | |
855 // is enabled (moving from one to two participants) it has the potential to | |
856 // create discontinuities in the mixed frame. | |
857 // | |
858 // Instead we double the frame (with addition since left-shifting a | |
859 // negative value is undefined). | |
860 *mixedAudio += *mixedAudio; | |
861 | |
862 if (error != _limiter->kNoError) { | |
863 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
864 "Error from AudioProcessing: %d", error); | |
865 RTC_DCHECK(false); | |
866 return false; | |
867 } | |
868 return true; | |
869 } | |
870 } // namespace webrtc | |
OLD | NEW |