OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | |
12 | |
13 #include <algorithm> | |
14 #include <functional> | |
15 | |
16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" | |
17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | |
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | |
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
21 #include "webrtc/system_wrappers/include/trace.h" | |
22 #include "webrtc/voice_engine/utility.h" | |
23 | |
24 namespace webrtc { | |
25 namespace { | |
26 | |
27 class SourceFrame { | |
28 public: | |
29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | |
30 : audio_source_(p), | |
31 audio_frame_(a), | |
32 muted_(m), | |
33 was_mixed_before_(was_mixed_before) { | |
34 if (!muted_) { | |
35 energy_ = CalculateEnergy(*a); | |
36 } | |
37 } | |
38 | |
39 // a.shouldMixBefore(b) is used to select mixer participants. | |
40 bool shouldMixBefore(const SourceFrame& other) const { | |
41 if (muted_ != other.muted_) { | |
42 return other.muted_; | |
43 } | |
44 | |
45 auto our_activity = audio_frame_->vad_activity_; | |
46 auto other_activity = other.audio_frame_->vad_activity_; | |
47 | |
48 if (our_activity != other_activity) { | |
49 return our_activity == AudioFrame::kVadActive; | |
50 } | |
51 | |
52 return energy_ > other.energy_; | |
53 } | |
54 | |
55 MixerAudioSource* audio_source_; | |
56 AudioFrame* audio_frame_; | |
57 bool muted_; | |
58 uint32_t energy_; | |
59 bool was_mixed_before_; | |
60 }; | |
61 | |
62 // Remixes a frame between stereo and mono. | |
63 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | |
64 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | |
65 if (frame->num_channels_ == 1 && number_of_channels == 2) { | |
66 AudioFrameOperations::MonoToStereo(frame); | |
67 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | |
68 AudioFrameOperations::StereoToMono(frame); | |
69 } | |
70 } | |
71 | |
72 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | |
73 // These effects are applied to |frame| itself prior to mixing. Assumes that | |
74 // |mixed_frame| always has at least as many channels as |frame|. Supports | |
75 // stereo at most. | |
76 // | |
77 // TODO(andrew): consider not modifying |frame| here. | |
78 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | |
79 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | |
80 if (use_limiter) { | |
81 // Divide by two to avoid saturation in the mixing. | |
82 // This is only meaningful if the limiter will be used. | |
83 *frame >>= 1; | |
84 } | |
85 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); | |
86 *mixed_frame += *frame; | |
87 } | |
88 | |
89 } // namespace | |
90 | |
91 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} | |
92 | |
93 MixerAudioSource::~MixerAudioSource() { | |
94 delete _mixHistory; | |
95 } | |
96 | |
97 bool MixerAudioSource::IsMixed() const { | |
98 return _mixHistory->IsMixed(); | |
99 } | |
100 | |
101 NewMixHistory::NewMixHistory() : is_mixed_(0) {} | |
102 | |
103 NewMixHistory::~NewMixHistory() {} | |
104 | |
105 bool NewMixHistory::IsMixed() const { | |
106 return is_mixed_; | |
107 } | |
108 | |
109 bool NewMixHistory::WasMixed() const { | |
110 // Was mixed is the same as is mixed depending on perspective. This function | |
111 // is for the perspective of NewAudioConferenceMixerImpl. | |
112 return IsMixed(); | |
113 } | |
114 | |
115 int32_t NewMixHistory::SetIsMixed(const bool mixed) { | |
116 is_mixed_ = mixed; | |
117 return 0; | |
118 } | |
119 | |
120 void NewMixHistory::ResetMixedStatus() { | |
121 is_mixed_ = false; | |
122 } | |
123 | |
124 NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) { | |
125 NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id); | |
126 if (!mixer->Init()) { | |
127 delete mixer; | |
128 return NULL; | |
129 } | |
130 return mixer; | |
131 } | |
132 | |
133 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | |
134 : id_(id), | |
135 output_frequency_(kDefaultFrequency), | |
136 sample_size_(0), | |
137 audio_source_list_(), | |
138 additional_audio_source_list_(), | |
139 num_mixed_audio_sources_(0), | |
140 use_limiter_(true), | |
141 time_stamp_(0) { | |
142 thread_checker_.DetachFromThread(); | |
143 } | |
144 | |
145 bool NewAudioConferenceMixerImpl::Init() { | |
146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
147 if (crit_.get() == NULL) | |
148 return false; | |
149 | |
150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
151 if (cb_crit_.get() == NULL) | |
152 return false; | |
153 | |
154 Config config; | |
155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | |
156 limiter_.reset(AudioProcessing::Create(config)); | |
157 if (!limiter_.get()) | |
158 return false; | |
159 | |
160 if (SetOutputFrequency(kDefaultFrequency) == -1) | |
161 return false; | |
162 | |
163 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != | |
164 limiter_->kNoError) | |
165 return false; | |
166 | |
167 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | |
168 // divide-by-2 but -7 is used instead to give a bit of headroom since the | |
169 // AGC is not a hard limiter. | |
170 if (limiter_->gain_control()->set_target_level_dbfs(7) != limiter_->kNoError) | |
171 return false; | |
172 | |
173 if (limiter_->gain_control()->set_compression_gain_db(0) != | |
174 limiter_->kNoError) | |
175 return false; | |
176 | |
177 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) | |
178 return false; | |
179 | |
180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) | |
181 return false; | |
182 | |
183 return true; | |
184 } | |
185 | |
186 void NewAudioConferenceMixerImpl::Mix(int sample_rate, | |
187 size_t number_of_channels, | |
188 AudioFrame* audio_frame_for_mixing) { | |
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | |
190 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | |
191 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
192 AudioFrameList mixList; | |
193 AudioFrameList additionalFramesList; | |
194 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | |
195 { | |
196 CriticalSectionScoped cs(cb_crit_.get()); | |
197 Frequency mixing_frequency; | |
198 | |
199 switch (sample_rate) { | |
200 case 8000: | |
201 mixing_frequency = kNbInHz; | |
202 break; | |
203 case 16000: | |
204 mixing_frequency = kWbInHz; | |
205 break; | |
206 case 32000: | |
207 mixing_frequency = kSwbInHz; | |
208 break; | |
209 case 48000: | |
210 mixing_frequency = kFbInHz; | |
211 break; | |
212 default: | |
213 RTC_NOTREACHED(); | |
214 return; | |
215 } | |
216 | |
217 if (OutputFrequency() != mixing_frequency) { | |
218 SetOutputFrequency(mixing_frequency); | |
219 } | |
220 | |
221 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); | |
222 remainingAudioSourcesAllowedToMix -= mixList.size(); | |
223 GetAdditionalAudio(&additionalFramesList); | |
224 } | |
225 | |
226 for (FrameAndMuteInfo& frame_and_mute : mixList) { | |
227 RemixFrame(frame_and_mute.frame, number_of_channels); | |
228 } | |
229 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | |
230 RemixFrame(frame_and_mute.frame, number_of_channels); | |
231 } | |
232 | |
233 audio_frame_for_mixing->UpdateFrame( | |
234 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | |
235 AudioFrame::kVadPassive, number_of_channels); | |
236 | |
237 time_stamp_ += static_cast<uint32_t>(sample_size_); | |
238 | |
239 use_limiter_ = num_mixed_audio_sources_ > 1 && | |
240 output_frequency_ <= AudioProcessing::kMaxNativeSampleRateHz; | |
241 | |
242 // We only use the limiter if it supports the output sample rate and | |
243 // we're actually mixing multiple streams. | |
244 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | |
245 | |
246 { | |
247 CriticalSectionScoped cs(crit_.get()); | |
248 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | |
249 | |
250 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | |
251 // Nothing was mixed, set the audio samples to silence. | |
252 audio_frame_for_mixing->samples_per_channel_ = sample_size_; | |
253 audio_frame_for_mixing->Mute(); | |
254 } else { | |
255 // Only call the limiter if we have something to mix. | |
256 LimitMixedAudio(audio_frame_for_mixing); | |
257 } | |
258 } | |
259 return; | |
260 } | |
261 | |
262 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | |
263 const Frequency& frequency) { | |
264 CriticalSectionScoped cs(crit_.get()); | |
265 | |
266 output_frequency_ = frequency; | |
267 sample_size_ = | |
268 static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000); | |
269 | |
270 return 0; | |
271 } | |
272 | |
273 NewAudioConferenceMixer::Frequency | |
274 NewAudioConferenceMixerImpl::OutputFrequency() const { | |
275 CriticalSectionScoped cs(crit_.get()); | |
276 return output_frequency_; | |
277 } | |
278 | |
279 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( | |
280 MixerAudioSource* audio_source, | |
281 bool mixable) { | |
282 if (!mixable) { | |
283 // Anonymous audio sources are in a separate list. Make sure that the | |
284 // audio source is in the _audioSourceList if it is being mixed. | |
285 SetAnonymousMixabilityStatus(audio_source, false); | |
286 } | |
287 size_t numMixedAudioSources; | |
288 { | |
289 CriticalSectionScoped cs(cb_crit_.get()); | |
290 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); | |
291 // API must be called with a new state. | |
292 if (!(mixable ^ isMixed)) { | |
293 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | |
294 "Mixable is aready %s", isMixed ? "ON" : "off"); | |
295 return -1; | |
296 } | |
297 bool success = false; | |
298 if (mixable) { | |
299 success = AddAudioSourceToList(audio_source, &audio_source_list_); | |
300 } else { | |
301 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); | |
302 } | |
303 if (!success) { | |
304 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | |
305 "failed to %s audio_source", mixable ? "add" : "remove"); | |
306 RTC_NOTREACHED(); | |
307 return -1; | |
308 } | |
309 | |
310 size_t numMixedNonAnonymous = audio_source_list_.size(); | |
311 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { | |
312 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; | |
313 } | |
314 numMixedAudioSources = | |
315 numMixedNonAnonymous + additional_audio_source_list_.size(); | |
316 } | |
317 // A MixerAudioSource was added or removed. Make sure the scratch | |
318 // buffer is updated if necessary. | |
319 // Note: The scratch buffer may only be updated in Process(). | |
320 CriticalSectionScoped cs(crit_.get()); | |
321 num_mixed_audio_sources_ = numMixedAudioSources; | |
322 return 0; | |
323 } | |
324 | |
325 bool NewAudioConferenceMixerImpl::MixabilityStatus( | |
326 const MixerAudioSource& audio_source) const { | |
327 CriticalSectionScoped cs(cb_crit_.get()); | |
328 return IsAudioSourceInList(audio_source, audio_source_list_); | |
329 } | |
330 | |
331 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | |
332 MixerAudioSource* audio_source, | |
333 bool anonymous) { | |
334 CriticalSectionScoped cs(cb_crit_.get()); | |
335 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { | |
336 if (anonymous) { | |
337 return 0; | |
338 } | |
339 if (!RemoveAudioSourceFromList(audio_source, | |
340 &additional_audio_source_list_)) { | |
341 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | |
342 "unable to remove audio_source from anonymous list"); | |
343 RTC_NOTREACHED(); | |
344 return -1; | |
345 } | |
346 return AddAudioSourceToList(audio_source, &audio_source_list_) ? 0 : -1; | |
347 } | |
348 if (!anonymous) { | |
349 return 0; | |
350 } | |
351 const bool mixable = | |
352 RemoveAudioSourceFromList(audio_source, &audio_source_list_); | |
353 if (!mixable) { | |
354 WEBRTC_TRACE( | |
355 kTraceWarning, kTraceAudioMixerServer, id_, | |
356 "audio_source must be registered before turning it into anonymous"); | |
357 // Setting anonymous status is only possible if MixerAudioSource is | |
358 // already registered. | |
359 return -1; | |
360 } | |
361 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) | |
362 ? 0 | |
363 : -1; | |
364 } | |
365 | |
366 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( | |
367 const MixerAudioSource& audio_source) const { | |
368 CriticalSectionScoped cs(cb_crit_.get()); | |
369 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | |
370 } | |
371 | |
372 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( | |
373 size_t maxAudioFrameCounter) const { | |
374 AudioFrameList result; | |
375 std::vector<SourceFrame> audioSourceMixingDataList; | |
376 | |
377 // Get audio source audio and put it in the struct vector. | |
378 for (MixerAudioSource* audio_source : audio_source_list_) { | |
379 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | |
380 id_, static_cast<int>(output_frequency_)); | |
381 | |
382 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | |
383 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | |
384 | |
385 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | |
386 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | |
387 "failed to GetAudioFrameWithMuted() from participant"); | |
388 continue; | |
389 } | |
390 audioSourceMixingDataList.emplace_back( | |
391 audio_source, audio_source_audio_frame, | |
392 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | |
393 audio_source->_mixHistory->WasMixed()); | |
394 } | |
395 | |
396 // Sort frames by sorting function. | |
397 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | |
398 std::mem_fn(&SourceFrame::shouldMixBefore)); | |
399 | |
400 // Go through list in order and put things in mixList. | |
401 for (SourceFrame& p : audioSourceMixingDataList) { | |
402 // Filter muted. | |
403 if (p.muted_) { | |
404 p.audio_source_->_mixHistory->SetIsMixed(false); | |
405 continue; | |
406 } | |
407 | |
408 // Add frame to result vector for mixing. | |
409 bool is_mixed = false; | |
410 if (maxAudioFrameCounter > 0) { | |
411 --maxAudioFrameCounter; | |
412 if (!p.was_mixed_before_) { | |
413 RampIn(*p.audio_frame_); | |
414 } | |
415 result.emplace_back(p.audio_frame_, false); | |
416 is_mixed = true; | |
417 } | |
418 | |
419 // Ramp out unmuted. | |
420 if (p.was_mixed_before_ && !is_mixed) { | |
421 RampOut(*p.audio_frame_); | |
422 result.emplace_back(p.audio_frame_, false); | |
423 } | |
424 | |
425 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | |
426 } | |
427 return result; | |
428 } | |
429 | |
430 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | |
431 AudioFrameList* additionalFramesList) const { | |
432 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
433 "GetAdditionalAudio(additionalFramesList)"); | |
434 // The GetAudioFrameWithMuted() callback may result in the audio source being | |
435 // removed from additionalAudioFramesList_. If that happens it will | |
436 // invalidate any iterators. Create a copy of the audio sources list such | |
437 // that the list of participants can be traversed safely. | |
438 MixerAudioSourceList additionalAudioSourceList; | |
439 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | |
440 additional_audio_source_list_.begin(), | |
441 additional_audio_source_list_.end()); | |
442 | |
443 for (MixerAudioSourceList::const_iterator audio_source = | |
444 additionalAudioSourceList.begin(); | |
445 audio_source != additionalAudioSourceList.end(); ++audio_source) { | |
446 auto audio_frame_with_info = | |
447 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); | |
448 auto ret = audio_frame_with_info.audio_frame_info; | |
449 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | |
450 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | |
451 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | |
452 "failed to GetAudioFrameWithMuted() from audio_source"); | |
453 continue; | |
454 } | |
455 if (audio_frame->samples_per_channel_ == 0) { | |
456 // Empty frame. Don't use it. | |
457 continue; | |
458 } | |
459 additionalFramesList->push_back(FrameAndMuteInfo( | |
460 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | |
461 } | |
462 } | |
463 | |
464 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | |
465 const MixerAudioSource& audio_source, | |
466 const MixerAudioSourceList& audioSourceList) const { | |
467 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
468 "IsAudioSourceInList(audio_source,audioSourceList)"); | |
469 return std::find(audioSourceList.begin(), audioSourceList.end(), | |
470 &audio_source) != audioSourceList.end(); | |
471 } | |
472 | |
473 bool NewAudioConferenceMixerImpl::AddAudioSourceToList( | |
474 MixerAudioSource* audio_source, | |
475 MixerAudioSourceList* audioSourceList) const { | |
476 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
477 "AddAudioSourceToList(audio_source, audioSourceList)"); | |
478 audioSourceList->push_back(audio_source); | |
479 // Make sure that the mixed status is correct for new MixerAudioSource. | |
480 audio_source->_mixHistory->ResetMixedStatus(); | |
481 return true; | |
482 } | |
483 | |
484 bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList( | |
485 MixerAudioSource* audio_source, | |
486 MixerAudioSourceList* audioSourceList) const { | |
487 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
488 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); | |
489 auto iter = | |
490 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); | |
491 if (iter != audioSourceList->end()) { | |
492 audioSourceList->erase(iter); | |
493 // AudioSource is no longer mixed, reset to default. | |
494 audio_source->_mixHistory->ResetMixedStatus(); | |
495 return true; | |
496 } else { | |
497 return false; | |
498 } | |
499 } | |
500 | |
501 int32_t NewAudioConferenceMixerImpl::MixFromList( | |
502 AudioFrame* mixedAudio, | |
503 const AudioFrameList& audioFrameList, | |
504 int32_t id, | |
505 bool use_limiter) { | |
506 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
507 "MixFromList(mixedAudio, audioFrameList)"); | |
508 if (audioFrameList.empty()) | |
509 return 0; | |
510 | |
511 uint32_t position = 0; | |
512 | |
513 if (audioFrameList.size() == 1) { | |
514 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | |
515 mixedAudio->elapsed_time_ms_ = | |
516 audioFrameList.front().frame->elapsed_time_ms_; | |
517 } else { | |
518 // TODO(wu): Issue 3390. | |
519 // Audio frame timestamp is only supported in one channel case. | |
520 mixedAudio->timestamp_ = 0; | |
521 mixedAudio->elapsed_time_ms_ = -1; | |
522 } | |
523 | |
524 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
525 iter != audioFrameList.end(); ++iter) { | |
526 if (position >= kMaximumAmountOfMixedAudioSources) { | |
527 WEBRTC_TRACE( | |
528 kTraceMemory, kTraceAudioMixerServer, id, | |
529 "Trying to mix more than max amount of mixed audio sources:%d!", | |
530 kMaximumAmountOfMixedAudioSources); | |
531 // Assert and avoid crash | |
532 RTC_NOTREACHED(); | |
533 position = 0; | |
534 } | |
535 if (!iter->muted) { | |
536 MixFrames(mixedAudio, iter->frame, use_limiter); | |
537 } | |
538 | |
539 position++; | |
540 } | |
541 | |
542 return 0; | |
543 } | |
544 | |
545 // TODO(andrew): consolidate this function with MixFromList. | |
546 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( | |
547 AudioFrame* mixedAudio, | |
548 const AudioFrameList& audioFrameList) const { | |
549 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
550 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); | |
551 | |
552 if (audioFrameList.empty()) | |
553 return 0; | |
554 | |
555 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
556 iter != audioFrameList.end(); ++iter) { | |
557 if (!iter->muted) { | |
558 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
559 } | |
560 } | |
561 return 0; | |
562 } | |
563 | |
564 bool NewAudioConferenceMixerImpl::LimitMixedAudio( | |
565 AudioFrame* mixedAudio) const { | |
566 if (!use_limiter_) { | |
567 return true; | |
568 } | |
569 | |
570 // Smoothly limit the mixed frame. | |
571 const int error = limiter_->ProcessStream(mixedAudio); | |
572 | |
573 // And now we can safely restore the level. This procedure results in | |
574 // some loss of resolution, deemed acceptable. | |
575 // | |
576 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | |
577 // and compression gain of 6 dB). However, in the transition frame when this | |
578 // is enabled (moving from one to two audio sources) it has the potential to | |
579 // create discontinuities in the mixed frame. | |
580 // | |
581 // Instead we double the frame (with addition since left-shifting a | |
582 // negative value is undefined). | |
583 *mixedAudio += *mixedAudio; | |
584 | |
585 if (error != limiter_->kNoError) { | |
586 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | |
587 "Error from AudioProcessing: %d", error); | |
588 RTC_NOTREACHED(); | |
589 return false; | |
590 } | |
591 return true; | |
592 } | |
593 } // namespace webrtc | |
OLD | NEW |