| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 30 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
| 31 : audio_source_(p), | 31 : audio_source_(p), |
| 32 audio_frame_(a), | 32 audio_frame_(a), |
| 33 muted_(m), | 33 muted_(m), |
| 34 was_mixed_before_(was_mixed_before) { | 34 was_mixed_before_(was_mixed_before) { |
| 35 if (!muted_) { | 35 if (!muted_) { |
| 36 energy_ = NewMixerCalculateEnergy(*a); | 36 energy_ = NewMixerCalculateEnergy(*a); |
| 37 } | 37 } |
| 38 } | 38 } |
| 39 | 39 |
| 40 SourceFrame(MixerAudioSource* p, |
| 41 AudioFrame* a, |
| 42 bool m, |
| 43 bool was_mixed_before, |
| 44 uint32_t energy) |
| 45 : audio_source_(p), |
| 46 audio_frame_(a), |
| 47 muted_(m), |
| 48 energy_(energy), |
| 49 was_mixed_before_(was_mixed_before) {} |
| 50 |
| 40 // a.shouldMixBefore(b) is used to select mixer participants. | 51 // a.shouldMixBefore(b) is used to select mixer participants. |
| 41 bool shouldMixBefore(const SourceFrame& other) const { | 52 bool shouldMixBefore(const SourceFrame& other) const { |
| 42 if (muted_ != other.muted_) { | 53 if (muted_ != other.muted_) { |
| 43 return other.muted_; | 54 return other.muted_; |
| 44 } | 55 } |
| 45 | 56 |
| 46 auto our_activity = audio_frame_->vad_activity_; | 57 auto our_activity = audio_frame_->vad_activity_; |
| 47 auto other_activity = other.audio_frame_->vad_activity_; | 58 auto other_activity = other.audio_frame_->vad_activity_; |
| 48 | 59 |
| 49 if (our_activity != other_activity) { | 60 if (our_activity != other_activity) { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 63 // Remixes a frame between stereo and mono. | 74 // Remixes a frame between stereo and mono. |
| 64 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | 75 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
| 65 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 76 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
| 66 if (frame->num_channels_ == 1 && number_of_channels == 2) { | 77 if (frame->num_channels_ == 1 && number_of_channels == 2) { |
| 67 AudioFrameOperations::MonoToStereo(frame); | 78 AudioFrameOperations::MonoToStereo(frame); |
| 68 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 79 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
| 69 AudioFrameOperations::StereoToMono(frame); | 80 AudioFrameOperations::StereoToMono(frame); |
| 70 } | 81 } |
| 71 } | 82 } |
| 72 | 83 |
| 73 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 84 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
| 74 // These effects are applied to |frame| itself prior to mixing. Assumes that | 85 for (const auto& source_frame : mixed_sources_and_frames) { |
| 75 // |mixed_frame| always has at least as many channels as |frame|. Supports | 86 // Ramp in previously unmixed. |
| 76 // stereo at most. | 87 if (!source_frame.was_mixed_before_) { |
| 77 // | 88 NewMixerRampIn(source_frame.audio_frame_); |
| 78 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 89 } |
| 79 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 90 |
| 80 if (use_limiter) { | 91 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed(); |
| 81 // Divide by two to avoid saturation in the mixing. | 92 // Ramp out currently unmixed. |
| 82 // This is only meaningful if the limiter will be used. | 93 if (source_frame.was_mixed_before_ && !is_mixed) { |
| 83 *frame >>= 1; | 94 NewMixerRampOut(source_frame.audio_frame_); |
| 95 } |
| 84 } | 96 } |
| 85 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); | |
| 86 *mixed_frame += *frame; | |
| 87 } | 97 } |
| 88 | 98 |
| 89 } // namespace | 99 } // namespace |
| 90 | 100 |
| 91 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} | 101 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} |
| 92 | 102 |
| 93 MixerAudioSource::~MixerAudioSource() { | 103 MixerAudioSource::~MixerAudioSource() { |
| 94 delete _mixHistory; | 104 delete _mixHistory; |
| 95 } | 105 } |
| 96 | 106 |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 181 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 191 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
| 182 "Invalid frequency: %d", sample_rate); | 192 "Invalid frequency: %d", sample_rate); |
| 183 RTC_NOTREACHED(); | 193 RTC_NOTREACHED(); |
| 184 return; | 194 return; |
| 185 } | 195 } |
| 186 | 196 |
| 187 if (OutputFrequency() != sample_rate) { | 197 if (OutputFrequency() != sample_rate) { |
| 188 SetOutputFrequency(static_cast<Frequency>(sample_rate)); | 198 SetOutputFrequency(static_cast<Frequency>(sample_rate)); |
| 189 } | 199 } |
| 190 | 200 |
| 191 AudioFrameList mixList; | 201 AudioFrameList mix_list; |
| 192 AudioFrameList additionalFramesList; | 202 AudioFrameList anonymous_mix_list; |
| 193 int num_mixed_audio_sources; | 203 int num_mixed_audio_sources; |
| 194 { | 204 { |
| 195 rtc::CritScope lock(&crit_); | 205 rtc::CritScope lock(&crit_); |
| 196 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); | 206 mix_list = GetNonAnonymousAudio(); |
| 197 GetAdditionalAudio(&additionalFramesList); | 207 anonymous_mix_list = GetAnonymousAudio(); |
| 198 num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_); | 208 num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_); |
| 199 } | 209 } |
| 200 | 210 |
| 201 for (FrameAndMuteInfo& frame_and_mute : mixList) { | 211 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
| 202 RemixFrame(frame_and_mute.frame, number_of_channels); | 212 anonymous_mix_list.end()); |
| 203 } | 213 |
| 204 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | 214 for (const auto& frame : mix_list) { |
| 205 RemixFrame(frame_and_mute.frame, number_of_channels); | 215 RemixFrame(frame, number_of_channels); |
| 206 } | 216 } |
| 207 | 217 |
| 208 audio_frame_for_mixing->UpdateFrame( | 218 audio_frame_for_mixing->UpdateFrame( |
| 209 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | 219 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, |
| 210 AudioFrame::kVadPassive, number_of_channels); | 220 AudioFrame::kVadPassive, number_of_channels); |
| 211 | 221 |
| 212 time_stamp_ += static_cast<uint32_t>(sample_size_); | 222 time_stamp_ += static_cast<uint32_t>(sample_size_); |
| 213 | 223 |
| 214 use_limiter_ = num_mixed_audio_sources > 1; | 224 use_limiter_ = num_mixed_audio_sources > 1; |
| 215 | 225 |
| 216 // We only use the limiter if it supports the output sample rate and | 226 // We only use the limiter if we're actually mixing multiple streams. |
| 217 // we're actually mixing multiple streams. | 227 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); |
| 218 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | 228 |
| 219 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | |
| 220 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 229 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
| 221 // Nothing was mixed, set the audio samples to silence. | 230 // Nothing was mixed, set the audio samples to silence. |
| 222 audio_frame_for_mixing->samples_per_channel_ = sample_size_; | 231 audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
| 223 audio_frame_for_mixing->Mute(); | 232 audio_frame_for_mixing->Mute(); |
| 224 } else { | 233 } else { |
| 225 // Only call the limiter if we have something to mix. | 234 // Only call the limiter if we have something to mix. |
| 226 LimitMixedAudio(audio_frame_for_mixing); | 235 LimitMixedAudio(audio_frame_for_mixing); |
| 227 } | 236 } |
| 228 | 237 |
| 229 // Pass the final result to the level indicator. | 238 // Pass the final result to the level indicator. |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 325 ? 0 | 334 ? 0 |
| 326 : -1; | 335 : -1; |
| 327 } | 336 } |
| 328 | 337 |
| 329 bool AudioMixerImpl::AnonymousMixabilityStatus( | 338 bool AudioMixerImpl::AnonymousMixabilityStatus( |
| 330 const MixerAudioSource& audio_source) const { | 339 const MixerAudioSource& audio_source) const { |
| 331 rtc::CritScope lock(&crit_); | 340 rtc::CritScope lock(&crit_); |
| 332 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 341 return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
| 333 } | 342 } |
| 334 | 343 |
| 335 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { | 344 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { |
| 336 RTC_DCHECK_RUN_ON(&thread_checker_); | 345 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 346 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 347 "GetNonAnonymousAudio()"); |
| 337 AudioFrameList result; | 348 AudioFrameList result; |
| 338 std::vector<SourceFrame> audioSourceMixingDataList; | 349 std::vector<SourceFrame> audioSourceMixingDataList; |
| 350 std::vector<SourceFrame> ramp_list; |
| 339 | 351 |
| 340 // Get audio source audio and put it in the struct vector. | 352 // Get audio source audio and put it in the struct vector. |
| 341 for (MixerAudioSource* audio_source : audio_source_list_) { | 353 for (MixerAudioSource* audio_source : audio_source_list_) { |
| 342 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 354 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
| 343 id_, static_cast<int>(output_frequency_)); | 355 id_, static_cast<int>(output_frequency_)); |
| 344 | 356 |
| 345 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 357 auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
| 346 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 358 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
| 347 | 359 |
| 348 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 360 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
| 349 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 361 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
| 350 "failed to GetAudioFrameWithMuted() from participant"); | 362 "failed to GetAudioFrameWithMuted() from participant"); |
| 351 continue; | 363 continue; |
| 352 } | 364 } |
| 353 audioSourceMixingDataList.emplace_back( | 365 audioSourceMixingDataList.emplace_back( |
| 354 audio_source, audio_source_audio_frame, | 366 audio_source, audio_source_audio_frame, |
| 355 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 367 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, |
| 356 audio_source->_mixHistory->WasMixed()); | 368 audio_source->_mixHistory->WasMixed()); |
| 357 } | 369 } |
| 358 | 370 |
| 359 // Sort frames by sorting function. | 371 // Sort frames by sorting function. |
| 360 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | 372 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), |
| 361 std::mem_fn(&SourceFrame::shouldMixBefore)); | 373 std::mem_fn(&SourceFrame::shouldMixBefore)); |
| 362 | 374 |
| 375 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources; |
| 363 // Go through list in order and put things in mixList. | 376 // Go through list in order and put things in mixList. |
| 364 for (SourceFrame& p : audioSourceMixingDataList) { | 377 for (SourceFrame& p : audioSourceMixingDataList) { |
| 365 // Filter muted. | 378 // Filter muted. |
| 366 if (p.muted_) { | 379 if (p.muted_) { |
| 367 p.audio_source_->_mixHistory->SetIsMixed(false); | 380 p.audio_source_->_mixHistory->SetIsMixed(false); |
| 368 continue; | 381 continue; |
| 369 } | 382 } |
| 370 | 383 |
| 371 // Add frame to result vector for mixing. | 384 // Add frame to result vector for mixing. |
| 372 bool is_mixed = false; | 385 bool is_mixed = false; |
| 373 if (maxAudioFrameCounter > 0) { | 386 if (maxAudioFrameCounter > 0) { |
| 374 --maxAudioFrameCounter; | 387 --maxAudioFrameCounter; |
| 375 if (!p.was_mixed_before_) { | 388 result.push_back(p.audio_frame_); |
| 376 NewMixerRampIn(p.audio_frame_); | 389 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, |
| 377 } | 390 p.was_mixed_before_, -1); |
| 378 result.emplace_back(p.audio_frame_, false); | |
| 379 is_mixed = true; | 391 is_mixed = true; |
| 380 } | 392 } |
| 381 | |
| 382 // Ramp out unmuted. | |
| 383 if (p.was_mixed_before_ && !is_mixed) { | |
| 384 NewMixerRampOut(p.audio_frame_); | |
| 385 result.emplace_back(p.audio_frame_, false); | |
| 386 } | |
| 387 | |
| 388 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | 393 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); |
| 389 } | 394 } |
| 395 Ramp(ramp_list); |
| 390 return result; | 396 return result; |
| 391 } | 397 } |
| 392 | 398 |
| 393 void AudioMixerImpl::GetAdditionalAudio( | 399 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { |
| 394 AudioFrameList* additionalFramesList) const { | |
| 395 RTC_DCHECK_RUN_ON(&thread_checker_); | 400 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 396 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 401 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 397 "GetAdditionalAudio(additionalFramesList)"); | 402 "GetAnonymousAudio()"); |
| 398 // The GetAudioFrameWithMuted() callback may result in the audio source being | 403 // The GetAudioFrameWithMuted() callback may result in the audio source being |
| 399 // removed from additionalAudioFramesList_. If that happens it will | 404 // removed from additionalAudioFramesList_. If that happens it will |
| 400 // invalidate any iterators. Create a copy of the audio sources list such | 405 // invalidate any iterators. Create a copy of the audio sources list such |
| 401 // that the list of participants can be traversed safely. | 406 // that the list of participants can be traversed safely. |
| 407 std::vector<SourceFrame> ramp_list; |
| 402 MixerAudioSourceList additionalAudioSourceList; | 408 MixerAudioSourceList additionalAudioSourceList; |
| 409 AudioFrameList result; |
| 403 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 410 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), |
| 404 additional_audio_source_list_.begin(), | 411 additional_audio_source_list_.begin(), |
| 405 additional_audio_source_list_.end()); | 412 additional_audio_source_list_.end()); |
| 406 | 413 |
| 407 for (MixerAudioSourceList::const_iterator audio_source = | 414 for (MixerAudioSourceList::const_iterator audio_source = |
| 408 additionalAudioSourceList.begin(); | 415 additionalAudioSourceList.begin(); |
| 409 audio_source != additionalAudioSourceList.end(); ++audio_source) { | 416 audio_source != additionalAudioSourceList.end(); ++audio_source) { |
| 410 auto audio_frame_with_info = | 417 auto audio_frame_with_info = |
| 411 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); | 418 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); |
| 412 auto ret = audio_frame_with_info.audio_frame_info; | 419 auto ret = audio_frame_with_info.audio_frame_info; |
| 413 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 420 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
| 414 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 421 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
| 415 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 422 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
| 416 "failed to GetAudioFrameWithMuted() from audio_source"); | 423 "failed to GetAudioFrameWithMuted() from audio_source"); |
| 417 continue; | 424 continue; |
| 418 } | 425 } |
| 419 if (audio_frame->samples_per_channel_ == 0) { | 426 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
| 420 // Empty frame. Don't use it. | 427 result.push_back(audio_frame); |
| 421 continue; | 428 ramp_list.emplace_back(*audio_source, audio_frame, false, |
| 429 (*audio_source)->_mixHistory->IsMixed(), -1); |
| 430 (*audio_source)->_mixHistory->SetIsMixed(true); |
| 422 } | 431 } |
| 423 additionalFramesList->push_back(FrameAndMuteInfo( | |
| 424 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | |
| 425 } | 432 } |
| 433 Ramp(ramp_list); |
| 434 return result; |
| 426 } | 435 } |
| 427 | 436 |
| 428 bool AudioMixerImpl::IsAudioSourceInList( | 437 bool AudioMixerImpl::IsAudioSourceInList( |
| 429 const MixerAudioSource& audio_source, | 438 const MixerAudioSource& audio_source, |
| 430 const MixerAudioSourceList& audioSourceList) const { | 439 const MixerAudioSourceList& audioSourceList) const { |
| 431 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 432 "IsAudioSourceInList(audio_source,audioSourceList)"); | 441 "IsAudioSourceInList(audio_source,audioSourceList)"); |
| 433 return std::find(audioSourceList.begin(), audioSourceList.end(), | 442 return std::find(audioSourceList.begin(), audioSourceList.end(), |
| 434 &audio_source) != audioSourceList.end(); | 443 &audio_source) != audioSourceList.end(); |
| 435 } | 444 } |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 int32_t id, | 476 int32_t id, |
| 468 bool use_limiter) { | 477 bool use_limiter) { |
| 469 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | 478 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
| 470 "MixFromList(mixedAudio, audioFrameList)"); | 479 "MixFromList(mixedAudio, audioFrameList)"); |
| 471 if (audioFrameList.empty()) | 480 if (audioFrameList.empty()) |
| 472 return 0; | 481 return 0; |
| 473 | 482 |
| 474 uint32_t position = 0; | 483 uint32_t position = 0; |
| 475 | 484 |
| 476 if (audioFrameList.size() == 1) { | 485 if (audioFrameList.size() == 1) { |
| 477 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | 486 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_; |
| 478 mixedAudio->elapsed_time_ms_ = | 487 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_; |
| 479 audioFrameList.front().frame->elapsed_time_ms_; | |
| 480 } else { | 488 } else { |
| 481 // TODO(wu): Issue 3390. | 489 // TODO(wu): Issue 3390. |
| 482 // Audio frame timestamp is only supported in one channel case. | 490 // Audio frame timestamp is only supported in one channel case. |
| 483 mixedAudio->timestamp_ = 0; | 491 mixedAudio->timestamp_ = 0; |
| 484 mixedAudio->elapsed_time_ms_ = -1; | 492 mixedAudio->elapsed_time_ms_ = -1; |
| 485 } | 493 } |
| 486 | 494 |
| 487 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | 495 for (const auto& frame : audioFrameList) { |
| 488 iter != audioFrameList.end(); ++iter) { | 496 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_); |
| 489 if (!iter->muted) { | 497 RTC_DCHECK_EQ( |
| 490 MixFrames(mixedAudio, iter->frame, use_limiter); | 498 frame->samples_per_channel_, |
| 499 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) / |
| 500 1000)); |
| 501 |
| 502 // Mix |f.frame| into |mixedAudio|, with saturation protection. |
| 503 // These effect is applied to |f.frame| itself prior to mixing. |
| 504 if (use_limiter) { |
| 505 // Divide by two to avoid saturation in the mixing. |
| 506 // This is only meaningful if the limiter will be used. |
| 507 *frame >>= 1; |
| 491 } | 508 } |
| 492 | 509 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_); |
| 510 *mixedAudio += *frame; |
| 493 position++; | 511 position++; |
| 494 } | 512 } |
| 495 | |
| 496 return 0; | |
| 497 } | |
| 498 | |
| 499 // TODO(andrew): consolidate this function with MixFromList. | |
| 500 int32_t AudioMixerImpl::MixAnonomouslyFromList( | |
| 501 AudioFrame* mixedAudio, | |
| 502 const AudioFrameList& audioFrameList) const { | |
| 503 RTC_DCHECK_RUN_ON(&thread_checker_); | |
| 504 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
| 505 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); | |
| 506 | |
| 507 if (audioFrameList.empty()) | |
| 508 return 0; | |
| 509 | |
| 510 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
| 511 iter != audioFrameList.end(); ++iter) { | |
| 512 if (!iter->muted) { | |
| 513 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
| 514 } | |
| 515 } | |
| 516 return 0; | 513 return 0; |
| 517 } | 514 } |
| 518 | 515 |
| 519 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { | 516 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { |
| 520 RTC_DCHECK_RUN_ON(&thread_checker_); | 517 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 521 if (!use_limiter_) { | 518 if (!use_limiter_) { |
| 522 return true; | 519 return true; |
| 523 } | 520 } |
| 524 | 521 |
| 525 // Smoothly limit the mixed frame. | 522 // Smoothly limit the mixed frame. |
| (...skipping 29 matching lines...) Expand all Loading... |
| 555 } | 552 } |
| 556 | 553 |
| 557 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 554 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
| 558 RTC_DCHECK_RUN_ON(&thread_checker_); | 555 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 559 const int level = audio_level_.LevelFullRange(); | 556 const int level = audio_level_.LevelFullRange(); |
| 560 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 557 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
| 561 "GetAudioOutputLevelFullRange() => level=%d", level); | 558 "GetAudioOutputLevelFullRange() => level=%d", level); |
| 562 return level; | 559 return level; |
| 563 } | 560 } |
| 564 } // namespace webrtc | 561 } // namespace webrtc |
| OLD | NEW |