Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(316)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2298163002: Simplifications of the mixing algorithm. (Closed)
Patch Set: Initial PS Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 18 matching lines...) Expand all
29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) 29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before)
30 : audio_source_(p), 30 : audio_source_(p),
31 audio_frame_(a), 31 audio_frame_(a),
32 muted_(m), 32 muted_(m),
33 was_mixed_before_(was_mixed_before) { 33 was_mixed_before_(was_mixed_before) {
34 if (!muted_) { 34 if (!muted_) {
35 energy_ = NewMixerCalculateEnergy(*a); 35 energy_ = NewMixerCalculateEnergy(*a);
36 } 36 }
37 } 37 }
38 38
39 SourceFrame(MixerAudioSource* p,
40 AudioFrame* a,
41 bool m,
42 bool was_mixed_before,
43 uint32_t energy)
44 : audio_source_(p),
45 audio_frame_(a),
46 muted_(m),
47 energy_(energy),
48 was_mixed_before_(was_mixed_before) {}
49
aleloi 2016/08/31 11:34:29 Differs from the above constructor by not calculat
39 // a.shouldMixBefore(b) is used to select mixer participants. 50 // a.shouldMixBefore(b) is used to select mixer participants.
40 bool shouldMixBefore(const SourceFrame& other) const { 51 bool shouldMixBefore(const SourceFrame& other) const {
41 if (muted_ != other.muted_) { 52 if (muted_ != other.muted_) {
42 return other.muted_; 53 return other.muted_;
43 } 54 }
44 55
45 auto our_activity = audio_frame_->vad_activity_; 56 auto our_activity = audio_frame_->vad_activity_;
46 auto other_activity = other.audio_frame_->vad_activity_; 57 auto other_activity = other.audio_frame_->vad_activity_;
47 58
48 if (our_activity != other_activity) { 59 if (our_activity != other_activity) {
(...skipping 13 matching lines...) Expand all
62 // Remixes a frame between stereo and mono. 73 // Remixes a frame between stereo and mono.
63 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { 74 void RemixFrame(AudioFrame* frame, size_t number_of_channels) {
64 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 75 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
65 if (frame->num_channels_ == 1 && number_of_channels == 2) { 76 if (frame->num_channels_ == 1 && number_of_channels == 2) {
66 AudioFrameOperations::MonoToStereo(frame); 77 AudioFrameOperations::MonoToStereo(frame);
67 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { 78 } else if (frame->num_channels_ == 2 && number_of_channels == 1) {
68 AudioFrameOperations::StereoToMono(frame); 79 AudioFrameOperations::StereoToMono(frame);
69 } 80 }
70 } 81 }
71 82
72 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. 83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
73 // These effects are applied to |frame| itself prior to mixing. Assumes that 84 for (const auto& source_frame : mixed_sources_and_frames) {
74 // |mixed_frame| always has at least as many channels as |frame|. Supports 85 // Ramp in previously unmixed.
75 // stereo at most. 86 if (!source_frame.was_mixed_before_) {
76 // 87 NewMixerRampIn(source_frame.audio_frame_);
77 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { 88 }
78 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); 89
79 if (use_limiter) { 90 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed();
80 // Divide by two to avoid saturation in the mixing. 91 // Ramp out currently unmixed.
81 // This is only meaningful if the limiter will be used. 92 if (source_frame.was_mixed_before_ && !is_mixed) {
82 *frame >>= 1; 93 NewMixerRampOut(source_frame.audio_frame_);
aleloi 2016/08/31 11:34:29 MixFrames is now done in MixFromList.
94 }
aleloi 2016/08/31 11:34:29 Ramping in/out should be done both for anonymous a
83 } 95 }
84 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_);
85 *mixed_frame += *frame;
86 } 96 }
87 97
88 } // namespace 98 } // namespace
89 99
90 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} 100 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {}
91 101
92 MixerAudioSource::~MixerAudioSource() { 102 MixerAudioSource::~MixerAudioSource() {
93 delete _mixHistory; 103 delete _mixHistory;
94 } 104 }
95 105
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
192 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 202 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
193 "Invalid frequency: %d", sample_rate); 203 "Invalid frequency: %d", sample_rate);
194 RTC_NOTREACHED(); 204 RTC_NOTREACHED();
195 return; 205 return;
196 } 206 }
197 207
198 if (OutputFrequency() != sample_rate) { 208 if (OutputFrequency() != sample_rate) {
199 SetOutputFrequency(static_cast<Frequency>(sample_rate)); 209 SetOutputFrequency(static_cast<Frequency>(sample_rate));
200 } 210 }
201 211
202 AudioFrameList mixList; 212 AudioFrameList mix_list;
203 AudioFrameList additionalFramesList; 213 AudioFrameList anonymous_mix_list;
204 { 214 {
205 CriticalSectionScoped cs(crit_.get()); 215 CriticalSectionScoped cs(crit_.get());
206 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); 216 mix_list = GetNonAnonymousAudio();
207 GetAdditionalAudio(&additionalFramesList); 217 anonymous_mix_list = GetAnonymousAudio();
208 } 218 }
209 219
210 for (FrameAndMuteInfo& frame_and_mute : mixList) { 220 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(),
211 RemixFrame(frame_and_mute.frame, number_of_channels); 221 anonymous_mix_list.end());
212 } 222
213 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { 223 for (const auto& frame : mix_list) {
214 RemixFrame(frame_and_mute.frame, number_of_channels); 224 RemixFrame(frame, number_of_channels);
215 } 225 }
216 226
217 audio_frame_for_mixing->UpdateFrame( 227 audio_frame_for_mixing->UpdateFrame(
218 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 228 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech,
219 AudioFrame::kVadPassive, number_of_channels); 229 AudioFrame::kVadPassive, number_of_channels);
220 230
221 time_stamp_ += static_cast<uint32_t>(sample_size_); 231 time_stamp_ += static_cast<uint32_t>(sample_size_);
222 232
223 use_limiter_ = num_mixed_audio_sources_ > 1; 233 use_limiter_ = num_mixed_audio_sources_ > 1;
224 234
225 // We only use the limiter if it supports the output sample rate and 235 // We only use the limiter if we're actually mixing multiple streams.
226 // we're actually mixing multiple streams. 236 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_);
227 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); 237
228 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
229 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 238 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
230 // Nothing was mixed, set the audio samples to silence. 239 // Nothing was mixed, set the audio samples to silence.
231 audio_frame_for_mixing->samples_per_channel_ = sample_size_; 240 audio_frame_for_mixing->samples_per_channel_ = sample_size_;
232 audio_frame_for_mixing->Mute(); 241 audio_frame_for_mixing->Mute();
233 } else { 242 } else {
234 // Only call the limiter if we have something to mix. 243 // Only call the limiter if we have something to mix.
235 LimitMixedAudio(audio_frame_for_mixing); 244 LimitMixedAudio(audio_frame_for_mixing);
236 } 245 }
237 246
238 // Pass the final result to the level indicator. 247 // Pass the final result to the level indicator.
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
334 ? 0 343 ? 0
335 : -1; 344 : -1;
336 } 345 }
337 346
338 bool AudioMixerImpl::AnonymousMixabilityStatus( 347 bool AudioMixerImpl::AnonymousMixabilityStatus(
339 const MixerAudioSource& audio_source) const { 348 const MixerAudioSource& audio_source) const {
340 CriticalSectionScoped cs(crit_.get()); 349 CriticalSectionScoped cs(crit_.get());
341 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 350 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
342 } 351 }
343 352
344 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { 353 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
354 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
355 "GetNonAnonymousAudio()");
345 AudioFrameList result; 356 AudioFrameList result;
346 std::vector<SourceFrame> audioSourceMixingDataList; 357 std::vector<SourceFrame> audioSourceMixingDataList;
358 std::vector<SourceFrame> ramp_list;
aleloi 2016/08/31 11:34:29 See comment about 2:nd SourceFrame constructor.
347 359
348 // Get audio source audio and put it in the struct vector. 360 // Get audio source audio and put it in the struct vector.
349 for (MixerAudioSource* audio_source : audio_source_list_) { 361 for (MixerAudioSource* audio_source : audio_source_list_) {
350 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 362 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
351 id_, static_cast<int>(output_frequency_)); 363 id_, static_cast<int>(output_frequency_));
352 364
353 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 365 auto audio_frame_info = audio_frame_with_info.audio_frame_info;
354 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 366 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
355 367
356 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 368 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
357 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 369 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
358 "failed to GetAudioFrameWithMuted() from participant"); 370 "failed to GetAudioFrameWithMuted() from participant");
359 continue; 371 continue;
360 } 372 }
361 audioSourceMixingDataList.emplace_back( 373 audioSourceMixingDataList.emplace_back(
362 audio_source, audio_source_audio_frame, 374 audio_source, audio_source_audio_frame,
363 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, 375 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
364 audio_source->_mixHistory->WasMixed()); 376 audio_source->_mixHistory->WasMixed());
365 } 377 }
366 378
367 // Sort frames by sorting function. 379 // Sort frames by sorting function.
368 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), 380 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(),
369 std::mem_fn(&SourceFrame::shouldMixBefore)); 381 std::mem_fn(&SourceFrame::shouldMixBefore));
370 382
383 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources;
371 // Go through list in order and put things in mixList. 384 // Go through list in order and put things in mixList.
372 for (SourceFrame& p : audioSourceMixingDataList) { 385 for (SourceFrame& p : audioSourceMixingDataList) {
373 // Filter muted. 386 // Filter muted.
374 if (p.muted_) { 387 if (p.muted_) {
375 p.audio_source_->_mixHistory->SetIsMixed(false); 388 p.audio_source_->_mixHistory->SetIsMixed(false);
376 continue; 389 continue;
377 } 390 }
378 391
379 // Add frame to result vector for mixing. 392 // Add frame to result vector for mixing.
380 bool is_mixed = false; 393 bool is_mixed = false;
381 if (maxAudioFrameCounter > 0) { 394 if (maxAudioFrameCounter > 0) {
382 --maxAudioFrameCounter; 395 --maxAudioFrameCounter;
383 if (!p.was_mixed_before_) { 396 result.push_back(p.audio_frame_);
384 NewMixerRampIn(p.audio_frame_); 397 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false,
385 } 398 p.was_mixed_before_, -1);
386 result.emplace_back(p.audio_frame_, false);
387 is_mixed = true; 399 is_mixed = true;
388 } 400 }
389
390 // Ramp out unmuted.
391 if (p.was_mixed_before_ && !is_mixed) {
392 NewMixerRampOut(p.audio_frame_);
393 result.emplace_back(p.audio_frame_, false);
394 }
395
396 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 401 p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
397 } 402 }
403 Ramp(ramp_list);
398 return result; 404 return result;
399 } 405 }
400 406
401 void AudioMixerImpl::GetAdditionalAudio( 407 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const {
402 AudioFrameList* additionalFramesList) const {
403 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 408 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
404 "GetAdditionalAudio(additionalFramesList)"); 409 "GetAnonymousAudio()");
405 // The GetAudioFrameWithMuted() callback may result in the audio source being 410 // The GetAudioFrameWithMuted() callback may result in the audio source being
406 // removed from additionalAudioFramesList_. If that happens it will 411 // removed from additionalAudioFramesList_. If that happens it will
407 // invalidate any iterators. Create a copy of the audio sources list such 412 // invalidate any iterators. Create a copy of the audio sources list such
408 // that the list of participants can be traversed safely. 413 // that the list of participants can be traversed safely.
414 std::vector<SourceFrame> ramp_list;
409 MixerAudioSourceList additionalAudioSourceList; 415 MixerAudioSourceList additionalAudioSourceList;
416 AudioFrameList result;
410 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 417 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
411 additional_audio_source_list_.begin(), 418 additional_audio_source_list_.begin(),
412 additional_audio_source_list_.end()); 419 additional_audio_source_list_.end());
413 420
414 for (MixerAudioSourceList::const_iterator audio_source = 421 for (MixerAudioSourceList::const_iterator audio_source =
415 additionalAudioSourceList.begin(); 422 additionalAudioSourceList.begin();
416 audio_source != additionalAudioSourceList.end(); ++audio_source) { 423 audio_source != additionalAudioSourceList.end(); ++audio_source) {
417 auto audio_frame_with_info = 424 auto audio_frame_with_info =
418 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); 425 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_);
419 auto ret = audio_frame_with_info.audio_frame_info; 426 auto ret = audio_frame_with_info.audio_frame_info;
420 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 427 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
421 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 428 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
422 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 429 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
423 "failed to GetAudioFrameWithMuted() from audio_source"); 430 "failed to GetAudioFrameWithMuted() from audio_source");
424 continue; 431 continue;
425 } 432 }
426 if (audio_frame->samples_per_channel_ == 0) { 433 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) {
427 // Empty frame. Don't use it. 434 result.push_back(audio_frame);
428 continue; 435 ramp_list.emplace_back((*audio_source), audio_frame, false,
aleloi 2016/08/31 11:34:29 Replaced this with a RTC_DCHECK_EQ in MixFromList.
436 (*audio_source)->_mixHistory->IsMixed(), -1);
437 (*audio_source)->_mixHistory->SetIsMixed(true);
429 } 438 }
430 additionalFramesList->push_back(FrameAndMuteInfo(
431 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
432 } 439 }
440 Ramp(ramp_list);
441 return result;
433 } 442 }
434 443
435 bool AudioMixerImpl::IsAudioSourceInList( 444 bool AudioMixerImpl::IsAudioSourceInList(
436 const MixerAudioSource& audio_source, 445 const MixerAudioSource& audio_source,
437 const MixerAudioSourceList& audioSourceList) const { 446 const MixerAudioSourceList& audioSourceList) const {
438 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 447 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
439 "IsAudioSourceInList(audio_source,audioSourceList)"); 448 "IsAudioSourceInList(audio_source,audioSourceList)");
440 return std::find(audioSourceList.begin(), audioSourceList.end(), 449 return std::find(audioSourceList.begin(), audioSourceList.end(),
441 &audio_source) != audioSourceList.end(); 450 &audio_source) != audioSourceList.end();
442 } 451 }
(...skipping 28 matching lines...) Expand all
471 480
472 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, 481 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio,
473 const AudioFrameList& audioFrameList, 482 const AudioFrameList& audioFrameList,
474 int32_t id, 483 int32_t id,
475 bool use_limiter) { 484 bool use_limiter) {
476 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, 485 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
477 "MixFromList(mixedAudio, audioFrameList)"); 486 "MixFromList(mixedAudio, audioFrameList)");
478 if (audioFrameList.empty()) 487 if (audioFrameList.empty())
479 return 0; 488 return 0;
480 489
481 uint32_t position = 0; 490 uint32_t position = 0;
ivoc 2016/08/31 15:01:06 It looks like this is not used anywhere, if so, pl
aleloi 2016/08/31 15:25:34 Removed in dependent CL, https://codereview.webrtc
482 491
483 if (audioFrameList.size() == 1) { 492 if (audioFrameList.size() == 1) {
484 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; 493 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
485 mixedAudio->elapsed_time_ms_ = 494 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
486 audioFrameList.front().frame->elapsed_time_ms_;
487 } else { 495 } else {
488 // TODO(wu): Issue 3390. 496 // TODO(wu): Issue 3390.
489 // Audio frame timestamp is only supported in one channel case. 497 // Audio frame timestamp is only supported in one channel case.
490 mixedAudio->timestamp_ = 0; 498 mixedAudio->timestamp_ = 0;
491 mixedAudio->elapsed_time_ms_ = -1; 499 mixedAudio->elapsed_time_ms_ = -1;
492 } 500 }
493 501
494 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); 502 for (const auto& frame : audioFrameList) {
495 iter != audioFrameList.end(); ++iter) { 503 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_);
496 if (!iter->muted) { 504 RTC_DCHECK_EQ(
497 MixFrames(mixedAudio, iter->frame, use_limiter); 505 frame->samples_per_channel_,
506 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) /
507 1000));
508
509 // Mix |f.frame| into |mixedAudio|, with saturation protection.
510 // These effect is applied to |f.frame| itself prior to mixing.
511 if (use_limiter) {
512 // Divide by two to avoid saturation in the mixing.
513 // This is only meaningful if the limiter will be used.
514 *frame >>= 1;
ivoc 2016/08/31 15:01:06 Should we try to come up with a better approach he
aleloi 2016/08/31 15:25:34 I think that was the decision. I am going to take
498 } 515 }
499 516 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_);
517 *mixedAudio += *frame;
500 position++; 518 position++;
501 } 519 }
502
503 return 0;
504 }
505
506 // TODO(andrew): consolidate this function with MixFromList.
aleloi 2016/08/31 11:34:29 Done! It took only five years or so:)
507 int32_t AudioMixerImpl::MixAnonomouslyFromList(
508 AudioFrame* mixedAudio,
509 const AudioFrameList& audioFrameList) const {
510 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
511 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
512
513 if (audioFrameList.empty())
514 return 0;
515
516 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
517 iter != audioFrameList.end(); ++iter) {
518 if (!iter->muted) {
519 MixFrames(mixedAudio, iter->frame, use_limiter_);
520 }
521 }
522 return 0; 520 return 0;
523 } 521 }
524 522
525 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { 523 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
526 if (!use_limiter_) { 524 if (!use_limiter_) {
527 return true; 525 return true;
528 } 526 }
529 527
530 // Smoothly limit the mixed frame. 528 // Smoothly limit the mixed frame.
531 const int error = limiter_->ProcessStream(mixedAudio); 529 const int error = limiter_->ProcessStream(mixedAudio);
(...skipping 26 matching lines...) Expand all
558 return level; 556 return level;
559 } 557 }
560 558
561 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 559 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
562 const int level = audio_level_.LevelFullRange(); 560 const int level = audio_level_.LevelFullRange();
563 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 561 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
564 "GetAudioOutputLevelFullRange() => level=%d", level); 562 "GetAudioOutputLevelFullRange() => level=%d", level);
565 return level; 563 return level;
566 } 564 }
567 } // namespace webrtc 565 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698