Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(291)

Side by Side Diff: webrtc/modules/audio_mixer/source/audio_conference_mixer_impl.cc

Issue 2109133003: Added empty directory with myself as owner for new mixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Renamed to avoid compilation crashes and added build file. Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/modules/audio_mixer/include/audio_conference_mixer_defines.h"
12 #include "webrtc/modules/audio_mixer/source/audio_conference_mixer_impl.h"
13 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h "
14 #include "webrtc/modules/audio_processing/include/audio_processing.h"
15 #include "webrtc/modules/utility/include/audio_frame_operations.h"
16 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
17 #include "webrtc/system_wrappers/include/trace.h"
18
19 namespace webrtc {
20 namespace {
21
22 struct ParticipantFrameStruct {
23 ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m)
24 : participant(p), audioFrame(a), muted(m) {}
25 MixerAudioSource* participant;
26 AudioFrame* audioFrame;
27 bool muted;
28 };
29
30 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
31
32 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
33 // These effects are applied to |frame| itself prior to mixing. Assumes that
34 // |mixed_frame| always has at least as many channels as |frame|. Supports
35 // stereo at most.
36 //
37 // TODO(andrew): consider not modifying |frame| here.
38 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
39 assert(mixed_frame->num_channels_ >= frame->num_channels_);
40 if (use_limiter) {
41 // Divide by two to avoid saturation in the mixing.
42 // This is only meaningful if the limiter will be used.
43 *frame >>= 1;
44 }
45 if (mixed_frame->num_channels_ > frame->num_channels_) {
46 // We only support mono-to-stereo.
47 assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1);
48 AudioFrameOperations::MonoToStereo(frame);
49 }
50
51 *mixed_frame += *frame;
52 }
53
54 // Return the max number of channels from a |list| composed of AudioFrames.
55 size_t MaxNumChannels(const AudioFrameList* list) {
56 size_t max_num_channels = 1;
57 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end();
58 ++iter) {
59 max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_);
60 }
61 return max_num_channels;
62 }
63
64 } // namespace
65
66 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {}
67
68 MixerAudioSource::~MixerAudioSource() {
69 delete _mixHistory;
70 }
71
72 bool MixerAudioSource::IsMixed() const {
73 return _mixHistory->IsMixed();
74 }
75
76 NewMixHistory::NewMixHistory() : _isMixed(0) {}
77
78 NewMixHistory::~NewMixHistory() {}
79
80 bool NewMixHistory::IsMixed() const {
81 return _isMixed;
82 }
83
84 bool NewMixHistory::WasMixed() const {
85 // Was mixed is the same as is mixed depending on perspective. This function
86 // is for the perspective of NewAudioConferenceMixerImpl.
87 return IsMixed();
88 }
89
90 int32_t NewMixHistory::SetIsMixed(const bool mixed) {
91 _isMixed = mixed;
92 return 0;
93 }
94
95 void NewMixHistory::ResetMixedStatus() {
96 _isMixed = false;
97 }
98
99 NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) {
100 NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id);
101 if (!mixer->Init()) {
102 delete mixer;
103 return NULL;
104 }
105 return mixer;
106 }
107
108 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
109 : _id(id),
110 _minimumMixingFreq(kLowestPossible),
111 _mixReceiver(NULL),
112 _outputFrequency(kDefaultFrequency),
113 _sampleSize(0),
114 _audioFramePool(NULL),
115 _participantList(),
116 _additionalParticipantList(),
117 _numMixedParticipants(0),
118 use_limiter_(true),
119 _timeStamp(0),
120 _timeScheduler(kProcessPeriodicityInMs),
121 _processCalls(0) {}
122
123 bool NewAudioConferenceMixerImpl::Init() {
124 _crit.reset(CriticalSectionWrapper::CreateCriticalSection());
125 if (_crit.get() == NULL)
126 return false;
127
128 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection());
129 if (_cbCrit.get() == NULL)
130 return false;
131
132 Config config;
133 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
134 _limiter.reset(AudioProcessing::Create(config));
135 if (!_limiter.get())
136 return false;
137
138 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
139 DEFAULT_AUDIO_FRAME_POOLSIZE);
140 if (_audioFramePool == NULL)
141 return false;
142
143 if (SetOutputFrequency(kDefaultFrequency) == -1)
144 return false;
145
146 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
147 _limiter->kNoError)
148 return false;
149
150 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
151 // divide-by-2 but -7 is used instead to give a bit of headroom since the
152 // AGC is not a hard limiter.
153 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError)
154 return false;
155
156 if (_limiter->gain_control()->set_compression_gain_db(0) !=
157 _limiter->kNoError)
158 return false;
159
160 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
161 return false;
162
163 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError)
164 return false;
165
166 return true;
167 }
168
169 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
170 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
171 assert(_audioFramePool == NULL);
172 }
173
174 // Process should be called every kProcessPeriodicityInMs ms
175 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() {
176 int64_t timeUntilNextProcess = 0;
177 CriticalSectionScoped cs(_crit.get());
178 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
179 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
180 "failed in TimeToNextUpdate() call");
181 // Sanity check
182 assert(false);
183 return -1;
184 }
185 return timeUntilNextProcess;
186 }
187
188 void NewAudioConferenceMixerImpl::Process() {
189 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants;
190 {
191 CriticalSectionScoped cs(_crit.get());
192 assert(_processCalls == 0);
193 _processCalls++;
194
195 // Let the scheduler know that we are running one iteration.
196 _timeScheduler.UpdateScheduler();
197 }
198
199 AudioFrameList mixList;
200 AudioFrameList rampOutList;
201 AudioFrameList additionalFramesList;
202 std::map<int, MixerAudioSource*> mixedParticipantsMap;
203 {
204 CriticalSectionScoped cs(_cbCrit.get());
205
206 int32_t lowFreq = GetLowestMixingFrequency();
207 // SILK can run in 12 kHz and 24 kHz. These frequencies are not
208 // supported so use the closest higher frequency to not lose any
209 // information.
210 // TODO(henrike): this is probably more appropriate to do in
211 // GetLowestMixingFrequency().
212 if (lowFreq == 12000) {
213 lowFreq = 16000;
214 } else if (lowFreq == 24000) {
215 lowFreq = 32000;
216 }
217 if (lowFreq <= 0) {
218 CriticalSectionScoped cs(_crit.get());
219 _processCalls--;
220 return;
221 } else {
222 switch (lowFreq) {
223 case 8000:
224 if (OutputFrequency() != kNbInHz) {
225 SetOutputFrequency(kNbInHz);
226 }
227 break;
228 case 16000:
229 if (OutputFrequency() != kWbInHz) {
230 SetOutputFrequency(kWbInHz);
231 }
232 break;
233 case 32000:
234 if (OutputFrequency() != kSwbInHz) {
235 SetOutputFrequency(kSwbInHz);
236 }
237 break;
238 case 48000:
239 if (OutputFrequency() != kFbInHz) {
240 SetOutputFrequency(kFbInHz);
241 }
242 break;
243 default:
244 assert(false);
245
246 CriticalSectionScoped cs(_crit.get());
247 _processCalls--;
248 return;
249 }
250 }
251
252 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
253 &remainingParticipantsAllowedToMix);
254
255 GetAdditionalAudio(&additionalFramesList);
256 UpdateMixedStatus(mixedParticipantsMap);
257 }
258
259 // Get an AudioFrame for mixing from the memory pool.
260 AudioFrame* mixedAudio = NULL;
261 if (_audioFramePool->PopMemory(mixedAudio) == -1) {
262 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
263 "failed PopMemory() call");
264 assert(false);
265 return;
266 }
267
268 {
269 CriticalSectionScoped cs(_crit.get());
270
271 // TODO(henrike): it might be better to decide the number of channels
272 // with an API instead of dynamically.
273
274 // Find the max channels over all mixing lists.
275 const size_t num_mixed_channels =
276 std::max(MaxNumChannels(&mixList),
277 std::max(MaxNumChannels(&additionalFramesList),
278 MaxNumChannels(&rampOutList)));
279
280 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
281 AudioFrame::kNormalSpeech, AudioFrame::kVadPassive,
282 num_mixed_channels);
283
284 _timeStamp += static_cast<uint32_t>(_sampleSize);
285
286 // We only use the limiter if it supports the output sample rate and
287 // we're actually mixing multiple streams.
288 use_limiter_ = _numMixedParticipants > 1 &&
289 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
290
291 MixFromList(mixedAudio, mixList);
292 MixAnonomouslyFromList(mixedAudio, additionalFramesList);
293 MixAnonomouslyFromList(mixedAudio, rampOutList);
294
295 if (mixedAudio->samples_per_channel_ == 0) {
296 // Nothing was mixed, set the audio samples to silence.
297 mixedAudio->samples_per_channel_ = _sampleSize;
298 mixedAudio->Mute();
299 } else {
300 // Only call the limiter if we have something to mix.
301 LimitMixedAudio(mixedAudio);
302 }
303 }
304
305 {
306 CriticalSectionScoped cs(_cbCrit.get());
307 if (_mixReceiver != NULL) {
308 const AudioFrame** dummy = NULL;
309 _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0);
310 }
311 }
312
313 // Reclaim all outstanding memory.
314 _audioFramePool->PushMemory(mixedAudio);
315 ClearAudioFrameList(&mixList);
316 ClearAudioFrameList(&rampOutList);
317 ClearAudioFrameList(&additionalFramesList);
318 {
319 CriticalSectionScoped cs(_crit.get());
320 _processCalls--;
321 }
322 return;
323 }
324
325 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback(
326 AudioMixerOutputReceiver* mixReceiver) {
327 CriticalSectionScoped cs(_cbCrit.get());
328 if (_mixReceiver != NULL) {
329 return -1;
330 }
331 _mixReceiver = mixReceiver;
332 return 0;
333 }
334
335 int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
336 CriticalSectionScoped cs(_cbCrit.get());
337 if (_mixReceiver == NULL) {
338 return -1;
339 }
340 _mixReceiver = NULL;
341 return 0;
342 }
343
344 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
345 const Frequency& frequency) {
346 CriticalSectionScoped cs(_crit.get());
347
348 _outputFrequency = frequency;
349 _sampleSize =
350 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000);
351
352 return 0;
353 }
354
355 NewAudioConferenceMixer::Frequency
356 NewAudioConferenceMixerImpl::OutputFrequency() const {
357 CriticalSectionScoped cs(_crit.get());
358 return _outputFrequency;
359 }
360
361 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
362 MixerAudioSource* participant,
363 bool mixable) {
364 if (!mixable) {
365 // Anonymous participants are in a separate list. Make sure that the
366 // participant is in the _participantList if it is being mixed.
367 SetAnonymousMixabilityStatus(participant, false);
368 }
369 size_t numMixedParticipants;
370 {
371 CriticalSectionScoped cs(_cbCrit.get());
372 const bool isMixed = IsParticipantInList(*participant, _participantList);
373 // API must be called with a new state.
374 if (!(mixable ^ isMixed)) {
375 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
376 "Mixable is aready %s", isMixed ? "ON" : "off");
377 return -1;
378 }
379 bool success = false;
380 if (mixable) {
381 success = AddParticipantToList(participant, &_participantList);
382 } else {
383 success = RemoveParticipantFromList(participant, &_participantList);
384 }
385 if (!success) {
386 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
387 "failed to %s participant", mixable ? "add" : "remove");
388 assert(false);
389 return -1;
390 }
391
392 size_t numMixedNonAnonymous = _participantList.size();
393 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) {
394 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants;
395 }
396 numMixedParticipants =
397 numMixedNonAnonymous + _additionalParticipantList.size();
398 }
399 // A MixerAudioSource was added or removed. Make sure the scratch
400 // buffer is updated if necessary.
401 // Note: The scratch buffer may only be updated in Process().
402 CriticalSectionScoped cs(_crit.get());
403 _numMixedParticipants = numMixedParticipants;
404 return 0;
405 }
406
407 bool NewAudioConferenceMixerImpl::MixabilityStatus(
408 const MixerAudioSource& participant) const {
409 CriticalSectionScoped cs(_cbCrit.get());
410 return IsParticipantInList(participant, _participantList);
411 }
412
413 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
414 MixerAudioSource* participant,
415 bool anonymous) {
416 CriticalSectionScoped cs(_cbCrit.get());
417 if (IsParticipantInList(*participant, _additionalParticipantList)) {
418 if (anonymous) {
419 return 0;
420 }
421 if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) {
422 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
423 "unable to remove participant from anonymous list");
424 assert(false);
425 return -1;
426 }
427 return AddParticipantToList(participant, &_participantList) ? 0 : -1;
428 }
429 if (!anonymous) {
430 return 0;
431 }
432 const bool mixable =
433 RemoveParticipantFromList(participant, &_participantList);
434 if (!mixable) {
435 WEBRTC_TRACE(
436 kTraceWarning, kTraceAudioMixerServer, _id,
437 "participant must be registered before turning it into anonymous");
438 // Setting anonymous status is only possible if MixerAudioSource is
439 // already registered.
440 return -1;
441 }
442 return AddParticipantToList(participant, &_additionalParticipantList) ? 0
443 : -1;
444 }
445
446 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus(
447 const MixerAudioSource& participant) const {
448 CriticalSectionScoped cs(_cbCrit.get());
449 return IsParticipantInList(participant, _additionalParticipantList);
450 }
451
452 int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) {
453 // Make sure that only allowed sampling frequencies are used. Use closest
454 // higher sampling frequency to avoid losing information.
455 if (static_cast<int>(freq) == 12000) {
456 freq = kWbInHz;
457 } else if (static_cast<int>(freq) == 24000) {
458 freq = kSwbInHz;
459 }
460
461 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
462 (freq == kLowestPossible)) {
463 _minimumMixingFreq = freq;
464 return 0;
465 } else {
466 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
467 "SetMinimumMixingFrequency incorrect frequency: %i", freq);
468 assert(false);
469 return -1;
470 }
471 }
472
473 // Check all AudioFrames that are to be mixed. The highest sampling frequency
474 // found is the lowest that can be used without losing information.
475 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const {
476 const int participantListFrequency =
477 GetLowestMixingFrequencyFromList(_participantList);
478 const int anonymousListFrequency =
479 GetLowestMixingFrequencyFromList(_additionalParticipantList);
480 const int highestFreq = (participantListFrequency > anonymousListFrequency)
481 ? participantListFrequency
482 : anonymousListFrequency;
483 // Check if the user specified a lowest mixing frequency.
484 if (_minimumMixingFreq != kLowestPossible) {
485 if (_minimumMixingFreq > highestFreq) {
486 return _minimumMixingFreq;
487 }
488 }
489 return highestFreq;
490 }
491
492 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
493 const MixerAudioSourceList& mixList) const {
494 int32_t highestFreq = 8000;
495 for (MixerAudioSourceList::const_iterator iter = mixList.begin();
496 iter != mixList.end(); ++iter) {
497 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
498 if (neededFrequency > highestFreq) {
499 highestFreq = neededFrequency;
500 }
501 }
502 return highestFreq;
503 }
504
505 void NewAudioConferenceMixerImpl::UpdateToMix(
506 AudioFrameList* mixList,
507 AudioFrameList* rampOutList,
508 std::map<int, MixerAudioSource*>* mixParticipantList,
509 size_t* maxAudioFrameCounter) const {
510 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
511 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
512 *maxAudioFrameCounter);
513 const size_t mixListStartSize = mixList->size();
514 AudioFrameList activeList;
515 // Struct needed by the passive lists to keep track of which AudioFrame
516 // belongs to which MixerAudioSource.
517 ParticipantFrameStructList passiveWasNotMixedList;
518 ParticipantFrameStructList passiveWasMixedList;
519 for (MixerAudioSourceList::const_iterator participant =
520 _participantList.begin();
521 participant != _participantList.end(); ++participant) {
522 // Stop keeping track of passive participants if there are already
523 // enough participants available (they wont be mixed anyway).
524 bool mustAddToPassiveList =
525 (*maxAudioFrameCounter >
526 (activeList.size() + passiveWasMixedList.size() +
527 passiveWasNotMixedList.size()));
528
529 bool wasMixed = false;
530 wasMixed = (*participant)->_mixHistory->WasMixed();
531 AudioFrame* audioFrame = NULL;
532 if (_audioFramePool->PopMemory(audioFrame) == -1) {
533 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
534 "failed PopMemory() call");
535 assert(false);
536 return;
537 }
538 audioFrame->sample_rate_hz_ = _outputFrequency;
539
540 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
541 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
542 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
543 "failed to GetAudioFrameWithMuted() from participant");
544 _audioFramePool->PushMemory(audioFrame);
545 continue;
546 }
547 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
548 if (_participantList.size() != 1) {
549 // TODO(wu): Issue 3390, add support for multiple participants case.
550 audioFrame->ntp_time_ms_ = -1;
551 }
552
553 // TODO(henrike): this assert triggers in some test cases where SRTP is
554 // used which prevents NetEQ from making a VAD. Temporarily disable this
555 // assert until the problem is fixed on a higher level.
556 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
557 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
558 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
559 "invalid VAD state from participant");
560 }
561
562 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) {
563 if (!wasMixed && !muted) {
564 RampIn(*audioFrame);
565 }
566
567 if (activeList.size() >= *maxAudioFrameCounter) {
568 // There are already more active participants than should be
569 // mixed. Only keep the ones with the highest energy.
570 AudioFrameList::iterator replaceItem;
571 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame);
572
573 bool found_replace_item = false;
574 for (AudioFrameList::iterator iter = activeList.begin();
575 iter != activeList.end(); ++iter) {
576 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
577 if (energy < lowestEnergy) {
578 replaceItem = iter;
579 lowestEnergy = energy;
580 found_replace_item = true;
581 }
582 }
583 if (found_replace_item) {
584 RTC_DCHECK(!muted); // Cannot replace with a muted frame.
585 FrameAndMuteInfo replaceFrame = *replaceItem;
586
587 bool replaceWasMixed = false;
588 std::map<int, MixerAudioSource*>::const_iterator it =
589 mixParticipantList->find(replaceFrame.frame->id_);
590
591 // When a frame is pushed to |activeList| it is also pushed
592 // to mixParticipantList with the frame's id. This means
593 // that the Find call above should never fail.
594 assert(it != mixParticipantList->end());
595 replaceWasMixed = it->second->_mixHistory->WasMixed();
596
597 mixParticipantList->erase(replaceFrame.frame->id_);
598 activeList.erase(replaceItem);
599
600 activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
601 (*mixParticipantList)[audioFrame->id_] = *participant;
602 assert(mixParticipantList->size() <=
603 kMaximumAmountOfMixedParticipants);
604
605 if (replaceWasMixed) {
606 if (!replaceFrame.muted) {
607 RampOut(*replaceFrame.frame);
608 }
609 rampOutList->push_back(replaceFrame);
610 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
611 } else {
612 _audioFramePool->PushMemory(replaceFrame.frame);
613 }
614 } else {
615 if (wasMixed) {
616 if (!muted) {
617 RampOut(*audioFrame);
618 }
619 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted));
620 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
621 } else {
622 _audioFramePool->PushMemory(audioFrame);
623 }
624 }
625 } else {
626 activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
627 (*mixParticipantList)[audioFrame->id_] = *participant;
628 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
629 }
630 } else {
631 if (wasMixed) {
632 ParticipantFrameStruct* part_struct =
633 new ParticipantFrameStruct(*participant, audioFrame, muted);
634 passiveWasMixedList.push_back(part_struct);
635 } else if (mustAddToPassiveList) {
636 if (!muted) {
637 RampIn(*audioFrame);
638 }
639 ParticipantFrameStruct* part_struct =
640 new ParticipantFrameStruct(*participant, audioFrame, muted);
641 passiveWasNotMixedList.push_back(part_struct);
642 } else {
643 _audioFramePool->PushMemory(audioFrame);
644 }
645 }
646 }
647 assert(activeList.size() <= *maxAudioFrameCounter);
648 // At this point it is known which participants should be mixed. Transfer
649 // this information to this functions output parameters.
650 for (AudioFrameList::const_iterator iter = activeList.begin();
651 iter != activeList.end(); ++iter) {
652 mixList->push_back(*iter);
653 }
654 activeList.clear();
655 // Always mix a constant number of AudioFrames. If there aren't enough
656 // active participants mix passive ones. Starting with those that was mixed
657 // last iteration.
658 for (ParticipantFrameStructList::const_iterator iter =
659 passiveWasMixedList.begin();
660 iter != passiveWasMixedList.end(); ++iter) {
661 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
662 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
663 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
664 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
665 } else {
666 _audioFramePool->PushMemory((*iter)->audioFrame);
667 }
668 delete *iter;
669 }
670 // And finally the ones that have not been mixed for a while.
671 for (ParticipantFrameStructList::const_iterator iter =
672 passiveWasNotMixedList.begin();
673 iter != passiveWasNotMixedList.end(); ++iter) {
674 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
675 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
676 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
677 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
678 } else {
679 _audioFramePool->PushMemory((*iter)->audioFrame);
680 }
681 delete *iter;
682 }
683 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
684 *maxAudioFrameCounter += mixListStartSize - mixList->size();
685 }
686
687 void NewAudioConferenceMixerImpl::GetAdditionalAudio(
688 AudioFrameList* additionalFramesList) const {
689 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
690 "GetAdditionalAudio(additionalFramesList)");
691 // The GetAudioFrameWithMuted() callback may result in the participant being
692 // removed from additionalParticipantList_. If that happens it will
693 // invalidate any iterators. Create a copy of the participants list such
694 // that the list of participants can be traversed safely.
695 MixerAudioSourceList additionalParticipantList;
696 additionalParticipantList.insert(additionalParticipantList.begin(),
697 _additionalParticipantList.begin(),
698 _additionalParticipantList.end());
699
700 for (MixerAudioSourceList::const_iterator participant =
701 additionalParticipantList.begin();
702 participant != additionalParticipantList.end(); ++participant) {
703 AudioFrame* audioFrame = NULL;
704 if (_audioFramePool->PopMemory(audioFrame) == -1) {
705 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
706 "failed PopMemory() call");
707 assert(false);
708 return;
709 }
710 audioFrame->sample_rate_hz_ = _outputFrequency;
711 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
712 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
713 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
714 "failed to GetAudioFrameWithMuted() from participant");
715 _audioFramePool->PushMemory(audioFrame);
716 continue;
717 }
718 if (audioFrame->samples_per_channel_ == 0) {
719 // Empty frame. Don't use it.
720 _audioFramePool->PushMemory(audioFrame);
721 continue;
722 }
723 additionalFramesList->push_back(FrameAndMuteInfo(
724 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
725 }
726 }
727
728 void NewAudioConferenceMixerImpl::UpdateMixedStatus(
729 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const {
730 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
731 "UpdateMixedStatus(mixedParticipantsMap)");
732 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
733
734 // Loop through all participants. If they are in the mix map they
735 // were mixed.
736 for (MixerAudioSourceList::const_iterator participant =
737 _participantList.begin();
738 participant != _participantList.end(); ++participant) {
739 bool isMixed = false;
740 for (std::map<int, MixerAudioSource*>::const_iterator it =
741 mixedParticipantsMap.begin();
742 it != mixedParticipantsMap.end(); ++it) {
743 if (it->second == *participant) {
744 isMixed = true;
745 break;
746 }
747 }
748 (*participant)->_mixHistory->SetIsMixed(isMixed);
749 }
750 }
751
752 void NewAudioConferenceMixerImpl::ClearAudioFrameList(
753 AudioFrameList* audioFrameList) const {
754 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
755 "ClearAudioFrameList(audioFrameList)");
756 for (AudioFrameList::iterator iter = audioFrameList->begin();
757 iter != audioFrameList->end(); ++iter) {
758 _audioFramePool->PushMemory(iter->frame);
759 }
760 audioFrameList->clear();
761 }
762
763 bool NewAudioConferenceMixerImpl::IsParticipantInList(
764 const MixerAudioSource& participant,
765 const MixerAudioSourceList& participantList) const {
766 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
767 "IsParticipantInList(participant,participantList)");
768 for (MixerAudioSourceList::const_iterator iter = participantList.begin();
769 iter != participantList.end(); ++iter) {
770 if (&participant == *iter) {
771 return true;
772 }
773 }
774 return false;
775 }
776
777 bool NewAudioConferenceMixerImpl::AddParticipantToList(
778 MixerAudioSource* participant,
779 MixerAudioSourceList* participantList) const {
780 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
781 "AddParticipantToList(participant, participantList)");
782 participantList->push_back(participant);
783 // Make sure that the mixed status is correct for new MixerAudioSource.
784 participant->_mixHistory->ResetMixedStatus();
785 return true;
786 }
787
788 bool NewAudioConferenceMixerImpl::RemoveParticipantFromList(
789 MixerAudioSource* participant,
790 MixerAudioSourceList* participantList) const {
791 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
792 "RemoveParticipantFromList(participant, participantList)");
793 for (MixerAudioSourceList::iterator iter = participantList->begin();
794 iter != participantList->end(); ++iter) {
795 if (*iter == participant) {
796 participantList->erase(iter);
797 // Participant is no longer mixed, reset to default.
798 participant->_mixHistory->ResetMixedStatus();
799 return true;
800 }
801 }
802 return false;
803 }
804
805 int32_t NewAudioConferenceMixerImpl::MixFromList(
806 AudioFrame* mixedAudio,
807 const AudioFrameList& audioFrameList) const {
808 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
809 "MixFromList(mixedAudio, audioFrameList)");
810 if (audioFrameList.empty())
811 return 0;
812
813 uint32_t position = 0;
814
815 if (_numMixedParticipants == 1) {
816 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_;
817 mixedAudio->elapsed_time_ms_ =
818 audioFrameList.front().frame->elapsed_time_ms_;
819 } else {
820 // TODO(wu): Issue 3390.
821 // Audio frame timestamp is only supported in one channel case.
822 mixedAudio->timestamp_ = 0;
823 mixedAudio->elapsed_time_ms_ = -1;
824 }
825
826 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
827 iter != audioFrameList.end(); ++iter) {
828 if (position >= kMaximumAmountOfMixedParticipants) {
829 WEBRTC_TRACE(
830 kTraceMemory, kTraceAudioMixerServer, _id,
831 "Trying to mix more than max amount of mixed participants:%d!",
832 kMaximumAmountOfMixedParticipants);
833 // Assert and avoid crash
834 assert(false);
835 position = 0;
836 }
837 if (!iter->muted) {
838 MixFrames(mixedAudio, iter->frame, use_limiter_);
839 }
840
841 position++;
842 }
843
844 return 0;
845 }
846
847 // TODO(andrew): consolidate this function with MixFromList.
848 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList(
849 AudioFrame* mixedAudio,
850 const AudioFrameList& audioFrameList) const {
851 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
852 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
853
854 if (audioFrameList.empty())
855 return 0;
856
857 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
858 iter != audioFrameList.end(); ++iter) {
859 if (!iter->muted) {
860 MixFrames(mixedAudio, iter->frame, use_limiter_);
861 }
862 }
863 return 0;
864 }
865
866 bool NewAudioConferenceMixerImpl::LimitMixedAudio(
867 AudioFrame* mixedAudio) const {
868 if (!use_limiter_) {
869 return true;
870 }
871
872 // Smoothly limit the mixed frame.
873 const int error = _limiter->ProcessStream(mixedAudio);
874
875 // And now we can safely restore the level. This procedure results in
876 // some loss of resolution, deemed acceptable.
877 //
878 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
879 // and compression gain of 6 dB). However, in the transition frame when this
880 // is enabled (moving from one to two participants) it has the potential to
881 // create discontinuities in the mixed frame.
882 //
883 // Instead we double the frame (with addition since left-shifting a
884 // negative value is undefined).
885 *mixedAudio += *mixedAudio;
886
887 if (error != _limiter->kNoError) {
888 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
889 "Error from AudioProcessing: %d", error);
890 assert(false);
891 return false;
892 }
893 return true;
894 }
895 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698