OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 18 matching lines...) Expand all Loading... | |
29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
30 : audio_source_(p), | 30 : audio_source_(p), |
31 audio_frame_(a), | 31 audio_frame_(a), |
32 muted_(m), | 32 muted_(m), |
33 was_mixed_before_(was_mixed_before) { | 33 was_mixed_before_(was_mixed_before) { |
34 if (!muted_) { | 34 if (!muted_) { |
35 energy_ = NewMixerCalculateEnergy(*a); | 35 energy_ = NewMixerCalculateEnergy(*a); |
36 } | 36 } |
37 } | 37 } |
38 | 38 |
39 SourceFrame(MixerAudioSource* p, | |
40 AudioFrame* a, | |
41 bool m, | |
42 bool was_mixed_before, | |
43 uint32_t energy) | |
44 : audio_source_(p), | |
45 audio_frame_(a), | |
46 muted_(m), | |
47 energy_(energy), | |
48 was_mixed_before_(was_mixed_before) {} | |
49 | |
39 // a.shouldMixBefore(b) is used to select mixer participants. | 50 // a.shouldMixBefore(b) is used to select mixer participants. |
40 bool shouldMixBefore(const SourceFrame& other) const { | 51 bool shouldMixBefore(const SourceFrame& other) const { |
41 if (muted_ != other.muted_) { | 52 if (muted_ != other.muted_) { |
42 return other.muted_; | 53 return other.muted_; |
43 } | 54 } |
44 | 55 |
45 auto our_activity = audio_frame_->vad_activity_; | 56 auto our_activity = audio_frame_->vad_activity_; |
46 auto other_activity = other.audio_frame_->vad_activity_; | 57 auto other_activity = other.audio_frame_->vad_activity_; |
47 | 58 |
48 if (our_activity != other_activity) { | 59 if (our_activity != other_activity) { |
(...skipping 13 matching lines...) Expand all Loading... | |
62 // Remixes a frame between stereo and mono. | 73 // Remixes a frame between stereo and mono. |
63 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | 74 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
64 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 75 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
65 if (frame->num_channels_ == 1 && number_of_channels == 2) { | 76 if (frame->num_channels_ == 1 && number_of_channels == 2) { |
66 AudioFrameOperations::MonoToStereo(frame); | 77 AudioFrameOperations::MonoToStereo(frame); |
67 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 78 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
68 AudioFrameOperations::StereoToMono(frame); | 79 AudioFrameOperations::StereoToMono(frame); |
69 } | 80 } |
70 } | 81 } |
71 | 82 |
72 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
73 // These effects are applied to |frame| itself prior to mixing. Assumes that | 84 for (const auto& source_frame : mixed_sources_and_frames) { |
74 // |mixed_frame| always has at least as many channels as |frame|. Supports | 85 // Ramp in previously unmixed. |
75 // stereo at most. | 86 if (!source_frame.was_mixed_before_) { |
76 // | 87 NewMixerRampIn(source_frame.audio_frame_); |
77 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 88 } |
78 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 89 |
79 if (use_limiter) { | 90 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed(); |
80 // Divide by two to avoid saturation in the mixing. | 91 // Ramp out currently unmixed. |
81 // This is only meaningful if the limiter will be used. | 92 if (source_frame.was_mixed_before_ && !is_mixed) { |
82 *frame >>= 1; | 93 NewMixerRampOut(source_frame.audio_frame_); |
94 } | |
83 } | 95 } |
84 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); | |
85 *mixed_frame += *frame; | |
86 } | 96 } |
87 | 97 |
88 } // namespace | 98 } // namespace |
89 | 99 |
90 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} | 100 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} |
91 | 101 |
92 MixerAudioSource::~MixerAudioSource() { | 102 MixerAudioSource::~MixerAudioSource() { |
93 delete _mixHistory; | 103 delete _mixHistory; |
94 } | 104 } |
95 | 105 |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
192 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 202 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
193 "Invalid frequency: %d", sample_rate); | 203 "Invalid frequency: %d", sample_rate); |
194 RTC_NOTREACHED(); | 204 RTC_NOTREACHED(); |
195 return; | 205 return; |
196 } | 206 } |
197 | 207 |
198 if (OutputFrequency() != sample_rate) { | 208 if (OutputFrequency() != sample_rate) { |
199 SetOutputFrequency(static_cast<Frequency>(sample_rate)); | 209 SetOutputFrequency(static_cast<Frequency>(sample_rate)); |
200 } | 210 } |
201 | 211 |
202 AudioFrameList mixList; | 212 AudioFrameList mix_list; |
203 AudioFrameList additionalFramesList; | 213 AudioFrameList anonymous_mix_list; |
204 int num_mixed_audio_sources; | 214 int num_mixed_audio_sources; |
205 { | 215 { |
206 CriticalSectionScoped cs(crit_.get()); | 216 CriticalSectionScoped cs(crit_.get()); |
207 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); | 217 mix_list = GetNonAnonymousAudio(); |
208 GetAdditionalAudio(&additionalFramesList); | 218 anonymous_mix_list = GetAnonymousAudio(); |
209 num_mixed_audio_sources = num_mixed_audio_sources_; | 219 num_mixed_audio_sources = num_mixed_audio_sources_; |
210 } | 220 } |
211 | 221 |
212 for (FrameAndMuteInfo& frame_and_mute : mixList) { | 222 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
213 RemixFrame(frame_and_mute.frame, number_of_channels); | 223 anonymous_mix_list.end()); |
214 } | 224 |
215 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | 225 for (const auto& frame : mix_list) { |
216 RemixFrame(frame_and_mute.frame, number_of_channels); | 226 RemixFrame(frame, number_of_channels); |
217 } | 227 } |
218 | 228 |
219 audio_frame_for_mixing->UpdateFrame( | 229 audio_frame_for_mixing->UpdateFrame( |
220 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | 230 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, |
221 AudioFrame::kVadPassive, number_of_channels); | 231 AudioFrame::kVadPassive, number_of_channels); |
222 | 232 |
223 time_stamp_ += static_cast<uint32_t>(sample_size_); | 233 time_stamp_ += static_cast<uint32_t>(sample_size_); |
224 | 234 |
225 use_limiter_ = num_mixed_audio_sources > 1; | 235 use_limiter_ = num_mixed_audio_sources > 1; |
226 | 236 |
227 // We only use the limiter if it supports the output sample rate and | 237 // We only use the limiter if we're actually mixing multiple streams. |
228 // we're actually mixing multiple streams. | 238 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); |
229 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | 239 |
230 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | |
231 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 240 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
232 // Nothing was mixed, set the audio samples to silence. | 241 // Nothing was mixed, set the audio samples to silence. |
233 audio_frame_for_mixing->samples_per_channel_ = sample_size_; | 242 audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
234 audio_frame_for_mixing->Mute(); | 243 audio_frame_for_mixing->Mute(); |
235 } else { | 244 } else { |
236 // Only call the limiter if we have something to mix. | 245 // Only call the limiter if we have something to mix. |
237 LimitMixedAudio(audio_frame_for_mixing); | 246 LimitMixedAudio(audio_frame_for_mixing); |
238 } | 247 } |
239 | 248 |
240 // Pass the final result to the level indicator. | 249 // Pass the final result to the level indicator. |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
336 ? 0 | 345 ? 0 |
337 : -1; | 346 : -1; |
338 } | 347 } |
339 | 348 |
340 bool AudioMixerImpl::AnonymousMixabilityStatus( | 349 bool AudioMixerImpl::AnonymousMixabilityStatus( |
341 const MixerAudioSource& audio_source) const { | 350 const MixerAudioSource& audio_source) const { |
342 CriticalSectionScoped cs(crit_.get()); | 351 CriticalSectionScoped cs(crit_.get()); |
343 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 352 return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
344 } | 353 } |
345 | 354 |
346 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { | 355 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { |
356 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
357 "GetNonAnonymousAudio()"); | |
347 AudioFrameList result; | 358 AudioFrameList result; |
348 std::vector<SourceFrame> audioSourceMixingDataList; | 359 std::vector<SourceFrame> audioSourceMixingDataList; |
360 std::vector<SourceFrame> ramp_list; | |
349 | 361 |
350 // Get audio source audio and put it in the struct vector. | 362 // Get audio source audio and put it in the struct vector. |
351 for (MixerAudioSource* audio_source : audio_source_list_) { | 363 for (MixerAudioSource* audio_source : audio_source_list_) { |
352 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 364 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
353 id_, static_cast<int>(output_frequency_)); | 365 id_, static_cast<int>(output_frequency_)); |
354 | 366 |
355 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 367 auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
356 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 368 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
357 | 369 |
358 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 370 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
359 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 371 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
360 "failed to GetAudioFrameWithMuted() from participant"); | 372 "failed to GetAudioFrameWithMuted() from participant"); |
361 continue; | 373 continue; |
362 } | 374 } |
363 audioSourceMixingDataList.emplace_back( | 375 audioSourceMixingDataList.emplace_back( |
364 audio_source, audio_source_audio_frame, | 376 audio_source, audio_source_audio_frame, |
365 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 377 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, |
366 audio_source->_mixHistory->WasMixed()); | 378 audio_source->_mixHistory->WasMixed()); |
367 } | 379 } |
368 | 380 |
369 // Sort frames by sorting function. | 381 // Sort frames by sorting function. |
370 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | 382 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), |
371 std::mem_fn(&SourceFrame::shouldMixBefore)); | 383 std::mem_fn(&SourceFrame::shouldMixBefore)); |
372 | 384 |
385 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources; | |
373 // Go through list in order and put things in mixList. | 386 // Go through list in order and put things in mixList. |
374 for (SourceFrame& p : audioSourceMixingDataList) { | 387 for (SourceFrame& p : audioSourceMixingDataList) { |
375 // Filter muted. | 388 // Filter muted. |
376 if (p.muted_) { | 389 if (p.muted_) { |
377 p.audio_source_->_mixHistory->SetIsMixed(false); | 390 p.audio_source_->_mixHistory->SetIsMixed(false); |
378 continue; | 391 continue; |
379 } | 392 } |
380 | 393 |
381 // Add frame to result vector for mixing. | 394 // Add frame to result vector for mixing. |
382 bool is_mixed = false; | 395 bool is_mixed = false; |
383 if (maxAudioFrameCounter > 0) { | 396 if (maxAudioFrameCounter > 0) { |
384 --maxAudioFrameCounter; | 397 --maxAudioFrameCounter; |
385 if (!p.was_mixed_before_) { | 398 result.push_back(p.audio_frame_); |
386 NewMixerRampIn(p.audio_frame_); | 399 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, |
387 } | 400 p.was_mixed_before_, -1); |
388 result.emplace_back(p.audio_frame_, false); | |
389 is_mixed = true; | 401 is_mixed = true; |
390 } | 402 } |
391 | |
392 // Ramp out unmuted. | |
393 if (p.was_mixed_before_ && !is_mixed) { | |
394 NewMixerRampOut(p.audio_frame_); | |
395 result.emplace_back(p.audio_frame_, false); | |
396 } | |
397 | |
398 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | 403 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); |
399 } | 404 } |
405 Ramp(ramp_list); | |
400 return result; | 406 return result; |
401 } | 407 } |
402 | 408 |
403 void AudioMixerImpl::GetAdditionalAudio( | 409 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { |
404 AudioFrameList* additionalFramesList) const { | |
405 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 410 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
406 "GetAdditionalAudio(additionalFramesList)"); | 411 "GetAnonymousAudio()"); |
407 // The GetAudioFrameWithMuted() callback may result in the audio source being | 412 // The GetAudioFrameWithMuted() callback may result in the audio source being |
408 // removed from additionalAudioFramesList_. If that happens it will | 413 // removed from additionalAudioFramesList_. If that happens it will |
409 // invalidate any iterators. Create a copy of the audio sources list such | 414 // invalidate any iterators. Create a copy of the audio sources list such |
410 // that the list of participants can be traversed safely. | 415 // that the list of participants can be traversed safely. |
416 std::vector<SourceFrame> ramp_list; | |
411 MixerAudioSourceList additionalAudioSourceList; | 417 MixerAudioSourceList additionalAudioSourceList; |
418 AudioFrameList result; | |
412 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 419 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), |
413 additional_audio_source_list_.begin(), | 420 additional_audio_source_list_.begin(), |
414 additional_audio_source_list_.end()); | 421 additional_audio_source_list_.end()); |
415 | 422 |
416 for (MixerAudioSourceList::const_iterator audio_source = | 423 for (MixerAudioSourceList::const_iterator audio_source = |
417 additionalAudioSourceList.begin(); | 424 additionalAudioSourceList.begin(); |
418 audio_source != additionalAudioSourceList.end(); ++audio_source) { | 425 audio_source != additionalAudioSourceList.end(); ++audio_source) { |
419 auto audio_frame_with_info = | 426 auto audio_frame_with_info = |
420 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); | 427 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); |
421 auto ret = audio_frame_with_info.audio_frame_info; | 428 auto ret = audio_frame_with_info.audio_frame_info; |
422 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 429 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
423 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 430 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
424 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 431 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
425 "failed to GetAudioFrameWithMuted() from audio_source"); | 432 "failed to GetAudioFrameWithMuted() from audio_source"); |
426 continue; | 433 continue; |
427 } | 434 } |
428 if (audio_frame->samples_per_channel_ == 0) { | 435 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
429 // Empty frame. Don't use it. | 436 result.push_back(audio_frame); |
430 continue; | 437 ramp_list.emplace_back((*audio_source), audio_frame, false, |
kwiberg-webrtc
2016/09/01 11:43:45
Unnecessary parentheses.
| |
438 (*audio_source)->_mixHistory->IsMixed(), -1); | |
439 (*audio_source)->_mixHistory->SetIsMixed(true); | |
431 } | 440 } |
432 additionalFramesList->push_back(FrameAndMuteInfo( | |
433 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | |
434 } | 441 } |
442 Ramp(ramp_list); | |
443 return result; | |
435 } | 444 } |
436 | 445 |
437 bool AudioMixerImpl::IsAudioSourceInList( | 446 bool AudioMixerImpl::IsAudioSourceInList( |
438 const MixerAudioSource& audio_source, | 447 const MixerAudioSource& audio_source, |
439 const MixerAudioSourceList& audioSourceList) const { | 448 const MixerAudioSourceList& audioSourceList) const { |
440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
441 "IsAudioSourceInList(audio_source,audioSourceList)"); | 450 "IsAudioSourceInList(audio_source,audioSourceList)"); |
442 return std::find(audioSourceList.begin(), audioSourceList.end(), | 451 return std::find(audioSourceList.begin(), audioSourceList.end(), |
443 &audio_source) != audioSourceList.end(); | 452 &audio_source) != audioSourceList.end(); |
444 } | 453 } |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
476 int32_t id, | 485 int32_t id, |
477 bool use_limiter) { | 486 bool use_limiter) { |
478 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | 487 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
479 "MixFromList(mixedAudio, audioFrameList)"); | 488 "MixFromList(mixedAudio, audioFrameList)"); |
480 if (audioFrameList.empty()) | 489 if (audioFrameList.empty()) |
481 return 0; | 490 return 0; |
482 | 491 |
483 uint32_t position = 0; | 492 uint32_t position = 0; |
484 | 493 |
485 if (audioFrameList.size() == 1) { | 494 if (audioFrameList.size() == 1) { |
486 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | 495 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_; |
487 mixedAudio->elapsed_time_ms_ = | 496 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_; |
488 audioFrameList.front().frame->elapsed_time_ms_; | |
489 } else { | 497 } else { |
490 // TODO(wu): Issue 3390. | 498 // TODO(wu): Issue 3390. |
491 // Audio frame timestamp is only supported in one channel case. | 499 // Audio frame timestamp is only supported in one channel case. |
492 mixedAudio->timestamp_ = 0; | 500 mixedAudio->timestamp_ = 0; |
493 mixedAudio->elapsed_time_ms_ = -1; | 501 mixedAudio->elapsed_time_ms_ = -1; |
494 } | 502 } |
495 | 503 |
496 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | 504 for (const auto& frame : audioFrameList) { |
497 iter != audioFrameList.end(); ++iter) { | 505 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_); |
498 if (!iter->muted) { | 506 RTC_DCHECK_EQ( |
499 MixFrames(mixedAudio, iter->frame, use_limiter); | 507 frame->samples_per_channel_, |
508 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) / | |
509 1000)); | |
510 | |
511 // Mix |f.frame| into |mixedAudio|, with saturation protection. | |
512 // These effect is applied to |f.frame| itself prior to mixing. | |
513 if (use_limiter) { | |
514 // Divide by two to avoid saturation in the mixing. | |
515 // This is only meaningful if the limiter will be used. | |
516 *frame >>= 1; | |
500 } | 517 } |
501 | 518 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_); |
519 *mixedAudio += *frame; | |
502 position++; | 520 position++; |
503 } | 521 } |
504 | |
505 return 0; | |
506 } | |
507 | |
508 // TODO(andrew): consolidate this function with MixFromList. | |
509 int32_t AudioMixerImpl::MixAnonomouslyFromList( | |
510 AudioFrame* mixedAudio, | |
511 const AudioFrameList& audioFrameList) const { | |
512 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
513 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); | |
514 | |
515 if (audioFrameList.empty()) | |
516 return 0; | |
517 | |
518 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
519 iter != audioFrameList.end(); ++iter) { | |
520 if (!iter->muted) { | |
521 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
522 } | |
523 } | |
524 return 0; | 522 return 0; |
525 } | 523 } |
526 | 524 |
527 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { | 525 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { |
528 if (!use_limiter_) { | 526 if (!use_limiter_) { |
529 return true; | 527 return true; |
530 } | 528 } |
531 | 529 |
532 // Smoothly limit the mixed frame. | 530 // Smoothly limit the mixed frame. |
533 const int error = limiter_->ProcessStream(mixedAudio); | 531 const int error = limiter_->ProcessStream(mixedAudio); |
(...skipping 26 matching lines...) Expand all Loading... | |
560 return level; | 558 return level; |
561 } | 559 } |
562 | 560 |
563 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 561 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
564 const int level = audio_level_.LevelFullRange(); | 562 const int level = audio_level_.LevelFullRange(); |
565 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 563 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
566 "GetAudioOutputLevelFullRange() => level=%d", level); | 564 "GetAudioOutputLevelFullRange() => level=%d", level); |
567 return level; | 565 return level; |
568 } | 566 } |
569 } // namespace webrtc | 567 } // namespace webrtc |
OLD | NEW |