OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 | 15 |
16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | 17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" |
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
21 #include "webrtc/system_wrappers/include/trace.h" | 21 #include "webrtc/system_wrappers/include/trace.h" |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
112 | 112 |
113 int32_t NewMixHistory::SetIsMixed(const bool mixed) { | 113 int32_t NewMixHistory::SetIsMixed(const bool mixed) { |
114 is_mixed_ = mixed; | 114 is_mixed_ = mixed; |
115 return 0; | 115 return 0; |
116 } | 116 } |
117 | 117 |
118 void NewMixHistory::ResetMixedStatus() { | 118 void NewMixHistory::ResetMixedStatus() { |
119 is_mixed_ = false; | 119 is_mixed_ = false; |
120 } | 120 } |
121 | 121 |
122 NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) { | 122 AudioMixer* AudioMixer::Create(int id) { |
123 NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id); | 123 AudioMixerImpl* mixer = new AudioMixerImpl(id); |
124 if (!mixer->Init()) { | 124 if (!mixer->Init()) { |
125 delete mixer; | 125 delete mixer; |
126 return NULL; | 126 return NULL; |
127 } | 127 } |
128 return mixer; | 128 return mixer; |
129 } | 129 } |
130 | 130 |
131 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 131 AudioMixerImpl::AudioMixerImpl(int id) |
132 : id_(id), | 132 : id_(id), |
133 output_frequency_(kDefaultFrequency), | 133 output_frequency_(kDefaultFrequency), |
134 sample_size_(0), | 134 sample_size_(0), |
135 audio_source_list_(), | 135 audio_source_list_(), |
136 additional_audio_source_list_(), | 136 additional_audio_source_list_(), |
137 num_mixed_audio_sources_(0), | 137 num_mixed_audio_sources_(0), |
138 use_limiter_(true), | 138 use_limiter_(true), |
139 time_stamp_(0) { | 139 time_stamp_(0) { |
140 thread_checker_.DetachFromThread(); | 140 thread_checker_.DetachFromThread(); |
141 } | 141 } |
142 | 142 |
143 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {} | 143 AudioMixerImpl::~AudioMixerImpl() {} |
144 | 144 |
145 bool NewAudioConferenceMixerImpl::Init() { | 145 bool AudioMixerImpl::Init() { |
146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | 146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); |
147 if (crit_.get() == NULL) | 147 if (crit_.get() == NULL) |
148 return false; | 148 return false; |
149 | 149 |
150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | 150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); |
151 if (cb_crit_.get() == NULL) | 151 if (cb_crit_.get() == NULL) |
152 return false; | 152 return false; |
153 | 153 |
154 Config config; | 154 Config config; |
155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
(...skipping 20 matching lines...) Expand all Loading... |
176 | 176 |
177 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) | 177 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) |
178 return false; | 178 return false; |
179 | 179 |
180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) | 180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) |
181 return false; | 181 return false; |
182 | 182 |
183 return true; | 183 return true; |
184 } | 184 } |
185 | 185 |
186 void NewAudioConferenceMixerImpl::Mix(int sample_rate, | 186 void AudioMixerImpl::Mix(int sample_rate, |
187 size_t number_of_channels, | 187 size_t number_of_channels, |
188 AudioFrame* audio_frame_for_mixing) { | 188 AudioFrame* audio_frame_for_mixing) { |
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
191 AudioFrameList mixList; | 191 AudioFrameList mixList; |
192 AudioFrameList additionalFramesList; | 192 AudioFrameList additionalFramesList; |
193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
194 { | 194 { |
195 CriticalSectionScoped cs(cb_crit_.get()); | 195 CriticalSectionScoped cs(cb_crit_.get()); |
196 Frequency mixing_frequency; | 196 Frequency mixing_frequency; |
197 | 197 |
198 switch (sample_rate) { | 198 switch (sample_rate) { |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
253 LimitMixedAudio(audio_frame_for_mixing); | 253 LimitMixedAudio(audio_frame_for_mixing); |
254 } | 254 } |
255 } | 255 } |
256 | 256 |
257 // Pass the final result to the level indicator. | 257 // Pass the final result to the level indicator. |
258 audio_level_.ComputeLevel(*audio_frame_for_mixing); | 258 audio_level_.ComputeLevel(*audio_frame_for_mixing); |
259 | 259 |
260 return; | 260 return; |
261 } | 261 } |
262 | 262 |
263 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 263 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
264 const Frequency& frequency) { | |
265 CriticalSectionScoped cs(crit_.get()); | 264 CriticalSectionScoped cs(crit_.get()); |
266 | 265 |
267 output_frequency_ = frequency; | 266 output_frequency_ = frequency; |
268 sample_size_ = | 267 sample_size_ = |
269 static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000); | 268 static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000); |
270 | 269 |
271 return 0; | 270 return 0; |
272 } | 271 } |
273 | 272 |
274 NewAudioConferenceMixer::Frequency | 273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { |
275 NewAudioConferenceMixerImpl::OutputFrequency() const { | |
276 CriticalSectionScoped cs(crit_.get()); | 274 CriticalSectionScoped cs(crit_.get()); |
277 return output_frequency_; | 275 return output_frequency_; |
278 } | 276 } |
279 | 277 |
280 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( | 278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
281 MixerAudioSource* audio_source, | 279 bool mixable) { |
282 bool mixable) { | |
283 if (!mixable) { | 280 if (!mixable) { |
284 // Anonymous audio sources are in a separate list. Make sure that the | 281 // Anonymous audio sources are in a separate list. Make sure that the |
285 // audio source is in the _audioSourceList if it is being mixed. | 282 // audio source is in the _audioSourceList if it is being mixed. |
286 SetAnonymousMixabilityStatus(audio_source, false); | 283 SetAnonymousMixabilityStatus(audio_source, false); |
287 } | 284 } |
288 size_t numMixedAudioSources; | 285 size_t numMixedAudioSources; |
289 { | 286 { |
290 CriticalSectionScoped cs(cb_crit_.get()); | 287 CriticalSectionScoped cs(cb_crit_.get()); |
291 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); | 288 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); |
292 // API must be called with a new state. | 289 // API must be called with a new state. |
(...skipping 23 matching lines...) Expand all Loading... |
316 numMixedNonAnonymous + additional_audio_source_list_.size(); | 313 numMixedNonAnonymous + additional_audio_source_list_.size(); |
317 } | 314 } |
318 // A MixerAudioSource was added or removed. Make sure the scratch | 315 // A MixerAudioSource was added or removed. Make sure the scratch |
319 // buffer is updated if necessary. | 316 // buffer is updated if necessary. |
320 // Note: The scratch buffer may only be updated in Process(). | 317 // Note: The scratch buffer may only be updated in Process(). |
321 CriticalSectionScoped cs(crit_.get()); | 318 CriticalSectionScoped cs(crit_.get()); |
322 num_mixed_audio_sources_ = numMixedAudioSources; | 319 num_mixed_audio_sources_ = numMixedAudioSources; |
323 return 0; | 320 return 0; |
324 } | 321 } |
325 | 322 |
326 bool NewAudioConferenceMixerImpl::MixabilityStatus( | 323 bool AudioMixerImpl::MixabilityStatus( |
327 const MixerAudioSource& audio_source) const { | 324 const MixerAudioSource& audio_source) const { |
328 CriticalSectionScoped cs(cb_crit_.get()); | 325 CriticalSectionScoped cs(cb_crit_.get()); |
329 return IsAudioSourceInList(audio_source, audio_source_list_); | 326 return IsAudioSourceInList(audio_source, audio_source_list_); |
330 } | 327 } |
331 | 328 |
332 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | 329 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( |
333 MixerAudioSource* audio_source, | 330 MixerAudioSource* audio_source, |
334 bool anonymous) { | 331 bool anonymous) { |
335 CriticalSectionScoped cs(cb_crit_.get()); | 332 CriticalSectionScoped cs(cb_crit_.get()); |
336 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { | 333 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { |
337 if (anonymous) { | 334 if (anonymous) { |
338 return 0; | 335 return 0; |
339 } | 336 } |
340 if (!RemoveAudioSourceFromList(audio_source, | 337 if (!RemoveAudioSourceFromList(audio_source, |
341 &additional_audio_source_list_)) { | 338 &additional_audio_source_list_)) { |
342 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 339 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
(...skipping 14 matching lines...) Expand all Loading... |
357 "audio_source must be registered before turning it into anonymous"); | 354 "audio_source must be registered before turning it into anonymous"); |
358 // Setting anonymous status is only possible if MixerAudioSource is | 355 // Setting anonymous status is only possible if MixerAudioSource is |
359 // already registered. | 356 // already registered. |
360 return -1; | 357 return -1; |
361 } | 358 } |
362 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) | 359 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) |
363 ? 0 | 360 ? 0 |
364 : -1; | 361 : -1; |
365 } | 362 } |
366 | 363 |
367 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( | 364 bool AudioMixerImpl::AnonymousMixabilityStatus( |
368 const MixerAudioSource& audio_source) const { | 365 const MixerAudioSource& audio_source) const { |
369 CriticalSectionScoped cs(cb_crit_.get()); | 366 CriticalSectionScoped cs(cb_crit_.get()); |
370 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 367 return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
371 } | 368 } |
372 | 369 |
373 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( | 370 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { |
374 size_t maxAudioFrameCounter) const { | |
375 AudioFrameList result; | 371 AudioFrameList result; |
376 std::vector<SourceFrame> audioSourceMixingDataList; | 372 std::vector<SourceFrame> audioSourceMixingDataList; |
377 | 373 |
378 // Get audio source audio and put it in the struct vector. | 374 // Get audio source audio and put it in the struct vector. |
379 for (MixerAudioSource* audio_source : audio_source_list_) { | 375 for (MixerAudioSource* audio_source : audio_source_list_) { |
380 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 376 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
381 id_, static_cast<int>(output_frequency_)); | 377 id_, static_cast<int>(output_frequency_)); |
382 | 378 |
383 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 379 auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
384 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
421 if (p.was_mixed_before_ && !is_mixed) { | 417 if (p.was_mixed_before_ && !is_mixed) { |
422 NewMixerRampOut(p.audio_frame_); | 418 NewMixerRampOut(p.audio_frame_); |
423 result.emplace_back(p.audio_frame_, false); | 419 result.emplace_back(p.audio_frame_, false); |
424 } | 420 } |
425 | 421 |
426 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | 422 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); |
427 } | 423 } |
428 return result; | 424 return result; |
429 } | 425 } |
430 | 426 |
431 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 427 void AudioMixerImpl::GetAdditionalAudio( |
432 AudioFrameList* additionalFramesList) const { | 428 AudioFrameList* additionalFramesList) const { |
433 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 429 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
434 "GetAdditionalAudio(additionalFramesList)"); | 430 "GetAdditionalAudio(additionalFramesList)"); |
435 // The GetAudioFrameWithMuted() callback may result in the audio source being | 431 // The GetAudioFrameWithMuted() callback may result in the audio source being |
436 // removed from additionalAudioFramesList_. If that happens it will | 432 // removed from additionalAudioFramesList_. If that happens it will |
437 // invalidate any iterators. Create a copy of the audio sources list such | 433 // invalidate any iterators. Create a copy of the audio sources list such |
438 // that the list of participants can be traversed safely. | 434 // that the list of participants can be traversed safely. |
439 MixerAudioSourceList additionalAudioSourceList; | 435 MixerAudioSourceList additionalAudioSourceList; |
440 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 436 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), |
441 additional_audio_source_list_.begin(), | 437 additional_audio_source_list_.begin(), |
(...skipping 13 matching lines...) Expand all Loading... |
455 } | 451 } |
456 if (audio_frame->samples_per_channel_ == 0) { | 452 if (audio_frame->samples_per_channel_ == 0) { |
457 // Empty frame. Don't use it. | 453 // Empty frame. Don't use it. |
458 continue; | 454 continue; |
459 } | 455 } |
460 additionalFramesList->push_back(FrameAndMuteInfo( | 456 additionalFramesList->push_back(FrameAndMuteInfo( |
461 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 457 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
462 } | 458 } |
463 } | 459 } |
464 | 460 |
465 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | 461 bool AudioMixerImpl::IsAudioSourceInList( |
466 const MixerAudioSource& audio_source, | 462 const MixerAudioSource& audio_source, |
467 const MixerAudioSourceList& audioSourceList) const { | 463 const MixerAudioSourceList& audioSourceList) const { |
468 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 464 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
469 "IsAudioSourceInList(audio_source,audioSourceList)"); | 465 "IsAudioSourceInList(audio_source,audioSourceList)"); |
470 return std::find(audioSourceList.begin(), audioSourceList.end(), | 466 return std::find(audioSourceList.begin(), audioSourceList.end(), |
471 &audio_source) != audioSourceList.end(); | 467 &audio_source) != audioSourceList.end(); |
472 } | 468 } |
473 | 469 |
474 bool NewAudioConferenceMixerImpl::AddAudioSourceToList( | 470 bool AudioMixerImpl::AddAudioSourceToList( |
475 MixerAudioSource* audio_source, | 471 MixerAudioSource* audio_source, |
476 MixerAudioSourceList* audioSourceList) const { | 472 MixerAudioSourceList* audioSourceList) const { |
477 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 473 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
478 "AddAudioSourceToList(audio_source, audioSourceList)"); | 474 "AddAudioSourceToList(audio_source, audioSourceList)"); |
479 audioSourceList->push_back(audio_source); | 475 audioSourceList->push_back(audio_source); |
480 // Make sure that the mixed status is correct for new MixerAudioSource. | 476 // Make sure that the mixed status is correct for new MixerAudioSource. |
481 audio_source->_mixHistory->ResetMixedStatus(); | 477 audio_source->_mixHistory->ResetMixedStatus(); |
482 return true; | 478 return true; |
483 } | 479 } |
484 | 480 |
485 bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList( | 481 bool AudioMixerImpl::RemoveAudioSourceFromList( |
486 MixerAudioSource* audio_source, | 482 MixerAudioSource* audio_source, |
487 MixerAudioSourceList* audioSourceList) const { | 483 MixerAudioSourceList* audioSourceList) const { |
488 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 484 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
489 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); | 485 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); |
490 auto iter = | 486 auto iter = |
491 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); | 487 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); |
492 if (iter != audioSourceList->end()) { | 488 if (iter != audioSourceList->end()) { |
493 audioSourceList->erase(iter); | 489 audioSourceList->erase(iter); |
494 // AudioSource is no longer mixed, reset to default. | 490 // AudioSource is no longer mixed, reset to default. |
495 audio_source->_mixHistory->ResetMixedStatus(); | 491 audio_source->_mixHistory->ResetMixedStatus(); |
496 return true; | 492 return true; |
497 } else { | 493 } else { |
498 return false; | 494 return false; |
499 } | 495 } |
500 } | 496 } |
501 | 497 |
502 int32_t NewAudioConferenceMixerImpl::MixFromList( | 498 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, |
503 AudioFrame* mixedAudio, | 499 const AudioFrameList& audioFrameList, |
504 const AudioFrameList& audioFrameList, | 500 int32_t id, |
505 int32_t id, | 501 bool use_limiter) { |
506 bool use_limiter) { | |
507 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | 502 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
508 "MixFromList(mixedAudio, audioFrameList)"); | 503 "MixFromList(mixedAudio, audioFrameList)"); |
509 if (audioFrameList.empty()) | 504 if (audioFrameList.empty()) |
510 return 0; | 505 return 0; |
511 | 506 |
512 uint32_t position = 0; | 507 uint32_t position = 0; |
513 | 508 |
514 if (audioFrameList.size() == 1) { | 509 if (audioFrameList.size() == 1) { |
515 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | 510 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; |
516 mixedAudio->elapsed_time_ms_ = | 511 mixedAudio->elapsed_time_ms_ = |
(...skipping 11 matching lines...) Expand all Loading... |
528 MixFrames(mixedAudio, iter->frame, use_limiter); | 523 MixFrames(mixedAudio, iter->frame, use_limiter); |
529 } | 524 } |
530 | 525 |
531 position++; | 526 position++; |
532 } | 527 } |
533 | 528 |
534 return 0; | 529 return 0; |
535 } | 530 } |
536 | 531 |
537 // TODO(andrew): consolidate this function with MixFromList. | 532 // TODO(andrew): consolidate this function with MixFromList. |
538 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( | 533 int32_t AudioMixerImpl::MixAnonomouslyFromList( |
539 AudioFrame* mixedAudio, | 534 AudioFrame* mixedAudio, |
540 const AudioFrameList& audioFrameList) const { | 535 const AudioFrameList& audioFrameList) const { |
541 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 536 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
542 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); | 537 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); |
543 | 538 |
544 if (audioFrameList.empty()) | 539 if (audioFrameList.empty()) |
545 return 0; | 540 return 0; |
546 | 541 |
547 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | 542 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); |
548 iter != audioFrameList.end(); ++iter) { | 543 iter != audioFrameList.end(); ++iter) { |
549 if (!iter->muted) { | 544 if (!iter->muted) { |
550 MixFrames(mixedAudio, iter->frame, use_limiter_); | 545 MixFrames(mixedAudio, iter->frame, use_limiter_); |
551 } | 546 } |
552 } | 547 } |
553 return 0; | 548 return 0; |
554 } | 549 } |
555 | 550 |
556 bool NewAudioConferenceMixerImpl::LimitMixedAudio( | 551 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { |
557 AudioFrame* mixedAudio) const { | |
558 if (!use_limiter_) { | 552 if (!use_limiter_) { |
559 return true; | 553 return true; |
560 } | 554 } |
561 | 555 |
562 // Smoothly limit the mixed frame. | 556 // Smoothly limit the mixed frame. |
563 const int error = limiter_->ProcessStream(mixedAudio); | 557 const int error = limiter_->ProcessStream(mixedAudio); |
564 | 558 |
565 // And now we can safely restore the level. This procedure results in | 559 // And now we can safely restore the level. This procedure results in |
566 // some loss of resolution, deemed acceptable. | 560 // some loss of resolution, deemed acceptable. |
567 // | 561 // |
568 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | 562 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS |
569 // and compression gain of 6 dB). However, in the transition frame when this | 563 // and compression gain of 6 dB). However, in the transition frame when this |
570 // is enabled (moving from one to two audio sources) it has the potential to | 564 // is enabled (moving from one to two audio sources) it has the potential to |
571 // create discontinuities in the mixed frame. | 565 // create discontinuities in the mixed frame. |
572 // | 566 // |
573 // Instead we double the frame (with addition since left-shifting a | 567 // Instead we double the frame (with addition since left-shifting a |
574 // negative value is undefined). | 568 // negative value is undefined). |
575 *mixedAudio += *mixedAudio; | 569 *mixedAudio += *mixedAudio; |
576 | 570 |
577 if (error != limiter_->kNoError) { | 571 if (error != limiter_->kNoError) { |
578 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 572 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
579 "Error from AudioProcessing: %d", error); | 573 "Error from AudioProcessing: %d", error); |
580 RTC_NOTREACHED(); | 574 RTC_NOTREACHED(); |
581 return false; | 575 return false; |
582 } | 576 } |
583 return true; | 577 return true; |
584 } | 578 } |
585 | 579 |
586 uint32_t NewAudioConferenceMixerImpl::GetOutputAudioLevel() { | 580 uint32_t AudioMixerImpl::GetOutputAudioLevel() { |
587 int8_t current_level = audio_level_.Level(); | 581 int8_t current_level = audio_level_.Level(); |
588 uint32_t level = static_cast<uint32_t>(current_level); | 582 uint32_t level = static_cast<uint32_t>(current_level); |
589 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 583 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
590 "GetAudioOutputLevel() => level=%u", level); | 584 "GetAudioOutputLevel() => level=%u", level); |
591 return level; | 585 return level; |
592 } | 586 } |
593 | 587 |
594 uint32_t NewAudioConferenceMixerImpl::GetOutputAudioLevelFullRange() { | 588 uint32_t AudioMixerImpl::GetOutputAudioLevelFullRange() { |
595 int16_t current_level = audio_level_.LevelFullRange(); | 589 int16_t current_level = audio_level_.LevelFullRange(); |
596 uint32_t level = static_cast<uint32_t>(current_level); | 590 uint32_t level = static_cast<uint32_t>(current_level); |
597 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 591 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
598 "GetAudioOutputLevelFullRange() => level=%u", level); | 592 "GetAudioOutputLevelFullRange() => level=%u", level); |
599 return level; | 593 return level; |
600 } | 594 } |
601 } // namespace webrtc | 595 } // namespace webrtc |
OLD | NEW |