| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 #include <functional> | 14 #include <functional> |
| 15 #include <utility> | 15 #include <utility> |
| 16 | 16 |
| 17 #include "webrtc/base/thread_annotations.h" | |
| 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
| 19 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | |
| 20 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
| 21 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 18 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
| 22 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
| 23 #include "webrtc/system_wrappers/include/trace.h" | 19 #include "webrtc/system_wrappers/include/trace.h" |
| 24 | 20 |
| 25 namespace webrtc { | 21 namespace webrtc { |
| 26 namespace { | 22 namespace { |
| 27 | 23 |
| 28 class SourceFrame { | 24 class SourceFrame { |
| 29 public: | 25 public: |
| 30 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 26 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
| 31 : audio_source_(p), | 27 : audio_source_(p), |
| 32 audio_frame_(a), | 28 audio_frame_(a), |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 81 } | 77 } |
| 82 } | 78 } |
| 83 | 79 |
| 84 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { | 80 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
| 85 for (const auto& source_frame : mixed_sources_and_frames) { | 81 for (const auto& source_frame : mixed_sources_and_frames) { |
| 86 // Ramp in previously unmixed. | 82 // Ramp in previously unmixed. |
| 87 if (!source_frame.was_mixed_before_) { | 83 if (!source_frame.was_mixed_before_) { |
| 88 NewMixerRampIn(source_frame.audio_frame_); | 84 NewMixerRampIn(source_frame.audio_frame_); |
| 89 } | 85 } |
| 90 | 86 |
| 91 const bool is_mixed = source_frame.audio_source_->mix_history_->IsMixed(); | 87 const bool is_mixed = source_frame.audio_source_->IsMixed(); |
| 92 // Ramp out currently unmixed. | 88 // Ramp out currently unmixed. |
| 93 if (source_frame.was_mixed_before_ && !is_mixed) { | 89 if (source_frame.was_mixed_before_ && !is_mixed) { |
| 94 NewMixerRampOut(source_frame.audio_frame_); | 90 NewMixerRampOut(source_frame.audio_frame_); |
| 95 } | 91 } |
| 96 } | 92 } |
| 97 } | 93 } |
| 98 | 94 |
| 95 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
| 96 int32_t MixFromList(AudioFrame* mixed_audio, |
| 97 const AudioFrameList& audio_frame_list, |
| 98 int32_t id, |
| 99 bool use_limiter) { |
| 100 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
| 101 "MixFromList(mixed_audio, audio_frame_list)"); |
| 102 if (audio_frame_list.empty()) |
| 103 return 0; |
| 104 |
| 105 if (audio_frame_list.size() == 1) { |
| 106 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; |
| 107 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; |
| 108 } else { |
| 109 // TODO(wu): Issue 3390. |
| 110 // Audio frame timestamp is only supported in one channel case. |
| 111 mixed_audio->timestamp_ = 0; |
| 112 mixed_audio->elapsed_time_ms_ = -1; |
| 113 } |
| 114 |
| 115 for (const auto& frame : audio_frame_list) { |
| 116 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); |
| 117 RTC_DCHECK_EQ( |
| 118 frame->samples_per_channel_, |
| 119 static_cast<size_t>((mixed_audio->sample_rate_hz_ * |
| 120 webrtc::AudioMixerImpl::kFrameDurationInMs) / |
| 121 1000)); |
| 122 |
| 123 // Mix |f.frame| into |mixed_audio|, with saturation protection. |
| 124 // These effect is applied to |f.frame| itself prior to mixing. |
| 125 if (use_limiter) { |
| 126 // Divide by two to avoid saturation in the mixing. |
| 127 // This is only meaningful if the limiter will be used. |
| 128 *frame >>= 1; |
| 129 } |
| 130 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
| 131 *mixed_audio += *frame; |
| 132 } |
| 133 return 0; |
| 134 } |
| 135 |
| 99 } // namespace | 136 } // namespace |
| 100 | 137 |
| 101 MixerAudioSource::MixerAudioSource() : mix_history_(new NewMixHistory()) {} | |
| 102 | |
| 103 MixerAudioSource::~MixerAudioSource() { | |
| 104 delete mix_history_; | |
| 105 } | |
| 106 | |
| 107 bool MixerAudioSource::IsMixed() const { | |
| 108 return mix_history_->IsMixed(); | |
| 109 } | |
| 110 | |
| 111 NewMixHistory::NewMixHistory() : is_mixed_(0) {} | |
| 112 | |
| 113 NewMixHistory::~NewMixHistory() {} | |
| 114 | |
| 115 bool NewMixHistory::IsMixed() const { | |
| 116 return is_mixed_; | |
| 117 } | |
| 118 | |
| 119 bool NewMixHistory::WasMixed() const { | |
| 120 // Was mixed is the same as is mixed depending on perspective. This function | |
| 121 // is for the perspective of NewAudioConferenceMixerImpl. | |
| 122 return IsMixed(); | |
| 123 } | |
| 124 | |
| 125 int32_t NewMixHistory::SetIsMixed(const bool mixed) { | |
| 126 is_mixed_ = mixed; | |
| 127 return 0; | |
| 128 } | |
| 129 | |
| 130 void NewMixHistory::ResetMixedStatus() { | |
| 131 is_mixed_ = false; | |
| 132 } | |
| 133 | |
| 134 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { | 138 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { |
| 135 return AudioMixerImpl::Create(id); | 139 return AudioMixerImpl::Create(id); |
| 136 } | 140 } |
| 137 | 141 |
| 138 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter) | 142 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter) |
| 139 : id_(id), | 143 : id_(id), |
| 140 audio_source_list_(), | 144 audio_source_list_(), |
| 141 additional_audio_source_list_(), | 145 additional_audio_source_list_(), |
| 142 num_mixed_audio_sources_(0), | 146 num_mixed_audio_sources_(0), |
| 143 use_limiter_(true), | 147 use_limiter_(true), |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 192 RTC_NOTREACHED(); | 196 RTC_NOTREACHED(); |
| 193 return; | 197 return; |
| 194 } | 198 } |
| 195 | 199 |
| 196 if (OutputFrequency() != sample_rate) { | 200 if (OutputFrequency() != sample_rate) { |
| 197 SetOutputFrequency(static_cast<Frequency>(sample_rate)); | 201 SetOutputFrequency(static_cast<Frequency>(sample_rate)); |
| 198 } | 202 } |
| 199 | 203 |
| 200 AudioFrameList mix_list; | 204 AudioFrameList mix_list; |
| 201 AudioFrameList anonymous_mix_list; | 205 AudioFrameList anonymous_mix_list; |
| 202 int num_mixed_audio_sources; | 206 size_t num_mixed_audio_sources; |
| 203 { | 207 { |
| 204 rtc::CritScope lock(&crit_); | 208 rtc::CritScope lock(&crit_); |
| 205 mix_list = GetNonAnonymousAudio(); | 209 mix_list = GetNonAnonymousAudio(); |
| 206 anonymous_mix_list = GetAnonymousAudio(); | 210 anonymous_mix_list = GetAnonymousAudio(); |
| 207 num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_); | 211 num_mixed_audio_sources = num_mixed_audio_sources_; |
| 208 } | 212 } |
| 209 | 213 |
| 210 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), | 214 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
| 211 anonymous_mix_list.end()); | 215 anonymous_mix_list.end()); |
| 212 | 216 |
| 213 for (const auto& frame : mix_list) { | 217 for (const auto& frame : mix_list) { |
| 214 RemixFrame(frame, number_of_channels); | 218 RemixFrame(frame, number_of_channels); |
| 215 } | 219 } |
| 216 | 220 |
| 217 audio_frame_for_mixing->UpdateFrame( | 221 audio_frame_for_mixing->UpdateFrame( |
| (...skipping 18 matching lines...) Expand all Loading... |
| 236 | 240 |
| 237 // Pass the final result to the level indicator. | 241 // Pass the final result to the level indicator. |
| 238 audio_level_.ComputeLevel(*audio_frame_for_mixing); | 242 audio_level_.ComputeLevel(*audio_frame_for_mixing); |
| 239 | 243 |
| 240 return; | 244 return; |
| 241 } | 245 } |
| 242 | 246 |
| 243 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { | 247 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
| 244 RTC_DCHECK_RUN_ON(&thread_checker_); | 248 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 245 output_frequency_ = frequency; | 249 output_frequency_ = frequency; |
| 246 sample_size_ = | 250 sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000; |
| 247 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); | |
| 248 | 251 |
| 249 return 0; | 252 return 0; |
| 250 } | 253 } |
| 251 | 254 |
| 252 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { | 255 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { |
| 253 RTC_DCHECK_RUN_ON(&thread_checker_); | 256 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 254 return output_frequency_; | 257 return output_frequency_; |
| 255 } | 258 } |
| 256 | 259 |
| 257 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, | 260 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 358 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 361 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
| 359 | 362 |
| 360 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 363 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
| 361 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 364 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
| 362 "failed to GetAudioFrameWithMuted() from participant"); | 365 "failed to GetAudioFrameWithMuted() from participant"); |
| 363 continue; | 366 continue; |
| 364 } | 367 } |
| 365 audio_source_mixing_data_list.emplace_back( | 368 audio_source_mixing_data_list.emplace_back( |
| 366 audio_source, audio_source_audio_frame, | 369 audio_source, audio_source_audio_frame, |
| 367 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 370 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, |
| 368 audio_source->mix_history_->WasMixed()); | 371 audio_source->WasMixed()); |
| 369 } | 372 } |
| 370 | 373 |
| 371 // Sort frames by sorting function. | 374 // Sort frames by sorting function. |
| 372 std::sort(audio_source_mixing_data_list.begin(), | 375 std::sort(audio_source_mixing_data_list.begin(), |
| 373 audio_source_mixing_data_list.end(), | 376 audio_source_mixing_data_list.end(), |
| 374 std::mem_fn(&SourceFrame::shouldMixBefore)); | 377 std::mem_fn(&SourceFrame::shouldMixBefore)); |
| 375 | 378 |
| 376 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; | 379 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; |
| 377 | 380 |
| 378 // Go through list in order and put unmuted frames in result list. | 381 // Go through list in order and put unmuted frames in result list. |
| 379 for (const SourceFrame& p : audio_source_mixing_data_list) { | 382 for (const SourceFrame& p : audio_source_mixing_data_list) { |
| 380 // Filter muted. | 383 // Filter muted. |
| 381 if (p.muted_) { | 384 if (p.muted_) { |
| 382 p.audio_source_->mix_history_->SetIsMixed(false); | 385 p.audio_source_->SetIsMixed(false); |
| 383 continue; | 386 continue; |
| 384 } | 387 } |
| 385 | 388 |
| 386 // Add frame to result vector for mixing. | 389 // Add frame to result vector for mixing. |
| 387 bool is_mixed = false; | 390 bool is_mixed = false; |
| 388 if (max_audio_frame_counter > 0) { | 391 if (max_audio_frame_counter > 0) { |
| 389 --max_audio_frame_counter; | 392 --max_audio_frame_counter; |
| 390 result.push_back(p.audio_frame_); | 393 result.push_back(p.audio_frame_); |
| 391 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, | 394 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, |
| 392 p.was_mixed_before_, -1); | 395 p.was_mixed_before_, -1); |
| 393 is_mixed = true; | 396 is_mixed = true; |
| 394 } | 397 } |
| 395 p.audio_source_->mix_history_->SetIsMixed(is_mixed); | 398 p.audio_source_->SetIsMixed(is_mixed); |
| 396 } | 399 } |
| 397 Ramp(ramp_list); | 400 Ramp(ramp_list); |
| 398 return result; | 401 return result; |
| 399 } | 402 } |
| 400 | 403 |
| 401 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { | 404 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { |
| 402 RTC_DCHECK_RUN_ON(&thread_checker_); | 405 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 403 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 406 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 404 "GetAnonymousAudio()"); | 407 "GetAnonymousAudio()"); |
| 405 // The GetAudioFrameWithMuted() callback may result in the audio source being | 408 // The GetAudioFrameWithMuted() callback may result in the audio source being |
| (...skipping 13 matching lines...) Expand all Loading... |
| 419 const auto ret = audio_frame_with_info.audio_frame_info; | 422 const auto ret = audio_frame_with_info.audio_frame_info; |
| 420 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 423 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
| 421 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 424 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
| 422 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 425 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
| 423 "failed to GetAudioFrameWithMuted() from audio_source"); | 426 "failed to GetAudioFrameWithMuted() from audio_source"); |
| 424 continue; | 427 continue; |
| 425 } | 428 } |
| 426 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { | 429 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
| 427 result.push_back(audio_frame); | 430 result.push_back(audio_frame); |
| 428 ramp_list.emplace_back(audio_source, audio_frame, false, | 431 ramp_list.emplace_back(audio_source, audio_frame, false, |
| 429 audio_source->mix_history_->IsMixed(), 0); | 432 audio_source->IsMixed(), 0); |
| 430 audio_source->mix_history_->SetIsMixed(true); | 433 audio_source->SetIsMixed(true); |
| 431 } | 434 } |
| 432 } | 435 } |
| 433 Ramp(ramp_list); | 436 Ramp(ramp_list); |
| 434 return result; | 437 return result; |
| 435 } | 438 } |
| 436 | 439 |
| 437 bool AudioMixerImpl::IsAudioSourceInList( | 440 bool AudioMixerImpl::IsAudioSourceInList( |
| 438 const MixerAudioSource& audio_source, | 441 const MixerAudioSource& audio_source, |
| 439 const MixerAudioSourceList& audio_source_list) const { | 442 const MixerAudioSourceList& audio_source_list) const { |
| 440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 443 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 441 "IsAudioSourceInList(audio_source,audio_source_list)"); | 444 "IsAudioSourceInList(audio_source,audio_source_list)"); |
| 442 return std::find(audio_source_list.begin(), audio_source_list.end(), | 445 return std::find(audio_source_list.begin(), audio_source_list.end(), |
| 443 &audio_source) != audio_source_list.end(); | 446 &audio_source) != audio_source_list.end(); |
| 444 } | 447 } |
| 445 | 448 |
| 446 bool AudioMixerImpl::AddAudioSourceToList( | 449 bool AudioMixerImpl::AddAudioSourceToList( |
| 447 MixerAudioSource* audio_source, | 450 MixerAudioSource* audio_source, |
| 448 MixerAudioSourceList* audio_source_list) const { | 451 MixerAudioSourceList* audio_source_list) const { |
| 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 452 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 450 "AddAudioSourceToList(audio_source, audio_source_list)"); | 453 "AddAudioSourceToList(audio_source, audio_source_list)"); |
| 451 audio_source_list->push_back(audio_source); | 454 audio_source_list->push_back(audio_source); |
| 452 // Make sure that the mixed status is correct for new MixerAudioSource. | 455 // Make sure that the mixed status is correct for new MixerAudioSource. |
| 453 audio_source->mix_history_->ResetMixedStatus(); | 456 audio_source->ResetMixedStatus(); |
| 454 return true; | 457 return true; |
| 455 } | 458 } |
| 456 | 459 |
| 457 bool AudioMixerImpl::RemoveAudioSourceFromList( | 460 bool AudioMixerImpl::RemoveAudioSourceFromList( |
| 458 MixerAudioSource* audio_source, | 461 MixerAudioSource* audio_source, |
| 459 MixerAudioSourceList* audio_source_list) const { | 462 MixerAudioSourceList* audio_source_list) const { |
| 460 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 463 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
| 461 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); | 464 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); |
| 462 const auto iter = std::find(audio_source_list->begin(), | 465 const auto iter = std::find(audio_source_list->begin(), |
| 463 audio_source_list->end(), audio_source); | 466 audio_source_list->end(), audio_source); |
| 464 if (iter != audio_source_list->end()) { | 467 if (iter != audio_source_list->end()) { |
| 465 audio_source_list->erase(iter); | 468 audio_source_list->erase(iter); |
| 466 // AudioSource is no longer mixed, reset to default. | 469 // AudioSource is no longer mixed, reset to default. |
| 467 audio_source->mix_history_->ResetMixedStatus(); | 470 audio_source->ResetMixedStatus(); |
| 468 return true; | 471 return true; |
| 469 } else { | 472 } else { |
| 470 return false; | 473 return false; |
| 471 } | 474 } |
| 472 } | 475 } |
| 473 | 476 |
| 474 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixed_audio, | |
| 475 const AudioFrameList& audio_frame_list, | |
| 476 int32_t id, | |
| 477 bool use_limiter) { | |
| 478 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
| 479 "MixFromList(mixed_audio, audio_frame_list)"); | |
| 480 if (audio_frame_list.empty()) | |
| 481 return 0; | |
| 482 | |
| 483 if (audio_frame_list.size() == 1) { | |
| 484 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; | |
| 485 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; | |
| 486 } else { | |
| 487 // TODO(wu): Issue 3390. | |
| 488 // Audio frame timestamp is only supported in one channel case. | |
| 489 mixed_audio->timestamp_ = 0; | |
| 490 mixed_audio->elapsed_time_ms_ = -1; | |
| 491 } | |
| 492 | |
| 493 for (const auto& frame : audio_frame_list) { | |
| 494 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); | |
| 495 RTC_DCHECK_EQ( | |
| 496 frame->samples_per_channel_, | |
| 497 static_cast<size_t>( | |
| 498 (mixed_audio->sample_rate_hz_ * kFrameDurationInMs) / 1000)); | |
| 499 | |
| 500 // Mix |f.frame| into |mixed_audio|, with saturation protection. | |
| 501 // These effect is applied to |f.frame| itself prior to mixing. | |
| 502 if (use_limiter) { | |
| 503 // Divide by two to avoid saturation in the mixing. | |
| 504 // This is only meaningful if the limiter will be used. | |
| 505 *frame >>= 1; | |
| 506 } | |
| 507 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); | |
| 508 *mixed_audio += *frame; | |
| 509 } | |
| 510 return 0; | |
| 511 } | |
| 512 | |
| 513 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { | 477 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
| 514 RTC_DCHECK_RUN_ON(&thread_checker_); | 478 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 515 if (!use_limiter_) { | 479 if (!use_limiter_) { |
| 516 return true; | 480 return true; |
| 517 } | 481 } |
| 518 | 482 |
| 519 // Smoothly limit the mixed frame. | 483 // Smoothly limit the mixed frame. |
| 520 const int error = limiter_->ProcessStream(mixed_audio); | 484 const int error = limiter_->ProcessStream(mixed_audio); |
| 521 | 485 |
| 522 // And now we can safely restore the level. This procedure results in | 486 // And now we can safely restore the level. This procedure results in |
| (...skipping 26 matching lines...) Expand all Loading... |
| 549 } | 513 } |
| 550 | 514 |
| 551 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 515 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
| 552 RTC_DCHECK_RUN_ON(&thread_checker_); | 516 RTC_DCHECK_RUN_ON(&thread_checker_); |
| 553 const int level = audio_level_.LevelFullRange(); | 517 const int level = audio_level_.LevelFullRange(); |
| 554 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 518 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
| 555 "GetAudioOutputLevelFullRange() => level=%d", level); | 519 "GetAudioOutputLevelFullRange() => level=%d", level); |
| 556 return level; | 520 return level; |
| 557 } | 521 } |
| 558 } // namespace webrtc | 522 } // namespace webrtc |
| OLD | NEW |