| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 #include <functional> | 14 #include <functional> |
| 15 #include <iterator> | 15 #include <iterator> |
| 16 #include <utility> | 16 #include <utility> |
| 17 | 17 |
| 18 #include "webrtc/audio/utility/audio_frame_operations.h" | |
| 19 #include "webrtc/base/logging.h" | 18 #include "webrtc/base/logging.h" |
| 20 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 19 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
| 21 #include "webrtc/modules/audio_mixer/default_output_rate_calculator.h" | 20 #include "webrtc/modules/audio_mixer/default_output_rate_calculator.h" |
| 22 | 21 |
| 23 namespace webrtc { | 22 namespace webrtc { |
| 24 namespace { | 23 namespace { |
| 25 | 24 |
| 26 struct SourceFrame { | 25 struct SourceFrame { |
| 27 SourceFrame(AudioMixerImpl::SourceStatus* source_status, | 26 SourceFrame(AudioMixerImpl::SourceStatus* source_status, |
| 28 AudioFrame* audio_frame, | 27 AudioFrame* audio_frame, |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 72 void RampAndUpdateGain( | 71 void RampAndUpdateGain( |
| 73 const std::vector<SourceFrame>& mixed_sources_and_frames) { | 72 const std::vector<SourceFrame>& mixed_sources_and_frames) { |
| 74 for (const auto& source_frame : mixed_sources_and_frames) { | 73 for (const auto& source_frame : mixed_sources_and_frames) { |
| 75 float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f; | 74 float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f; |
| 76 Ramp(source_frame.source_status->gain, target_gain, | 75 Ramp(source_frame.source_status->gain, target_gain, |
| 77 source_frame.audio_frame); | 76 source_frame.audio_frame); |
| 78 source_frame.source_status->gain = target_gain; | 77 source_frame.source_status->gain = target_gain; |
| 79 } | 78 } |
| 80 } | 79 } |
| 81 | 80 |
| 82 // Mix the AudioFrames stored in audioFrameList into mixed_audio. | |
| 83 int32_t MixFromList(AudioFrame* mixed_audio, | |
| 84 const AudioFrameList& audio_frame_list, | |
| 85 bool use_limiter) { | |
| 86 if (audio_frame_list.empty()) { | |
| 87 return 0; | |
| 88 } | |
| 89 | |
| 90 if (audio_frame_list.size() == 1) { | |
| 91 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; | |
| 92 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; | |
| 93 } else { | |
| 94 // TODO(wu): Issue 3390. | |
| 95 // Audio frame timestamp is only supported in one channel case. | |
| 96 mixed_audio->timestamp_ = 0; | |
| 97 mixed_audio->elapsed_time_ms_ = -1; | |
| 98 } | |
| 99 | |
| 100 for (const auto& frame : audio_frame_list) { | |
| 101 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); | |
| 102 RTC_DCHECK_EQ( | |
| 103 frame->samples_per_channel_, | |
| 104 static_cast<size_t>((mixed_audio->sample_rate_hz_ * | |
| 105 webrtc::AudioMixerImpl::kFrameDurationInMs) / | |
| 106 1000)); | |
| 107 | |
| 108 // Mix |f.frame| into |mixed_audio|, with saturation protection. | |
| 109 // These effect is applied to |f.frame| itself prior to mixing. | |
| 110 if (use_limiter) { | |
| 111 // This is to avoid saturation in the mixing. It is only | |
| 112 // meaningful if the limiter will be used. | |
| 113 AudioFrameOperations::ApplyHalfGain(frame); | |
| 114 } | |
| 115 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); | |
| 116 AudioFrameOperations::Add(*frame, mixed_audio); | |
| 117 } | |
| 118 return 0; | |
| 119 } | |
| 120 | |
| 121 AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList( | 81 AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList( |
| 122 AudioMixerImpl::Source const* audio_source, | 82 AudioMixerImpl::Source const* audio_source, |
| 123 AudioMixerImpl::SourceStatusList const* audio_source_list) { | 83 AudioMixerImpl::SourceStatusList const* audio_source_list) { |
| 124 return std::find_if( | 84 return std::find_if( |
| 125 audio_source_list->begin(), audio_source_list->end(), | 85 audio_source_list->begin(), audio_source_list->end(), |
| 126 [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) { | 86 [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) { |
| 127 return p->audio_source == audio_source; | 87 return p->audio_source == audio_source; |
| 128 }); | 88 }); |
| 129 } | 89 } |
| 130 | 90 |
| 131 // TODO(aleloi): remove non-const version when WEBRTC only supports modern STL. | 91 // TODO(aleloi): remove non-const version when WEBRTC only supports modern STL. |
| 132 AudioMixerImpl::SourceStatusList::iterator FindSourceInList( | 92 AudioMixerImpl::SourceStatusList::iterator FindSourceInList( |
| 133 AudioMixerImpl::Source const* audio_source, | 93 AudioMixerImpl::Source const* audio_source, |
| 134 AudioMixerImpl::SourceStatusList* audio_source_list) { | 94 AudioMixerImpl::SourceStatusList* audio_source_list) { |
| 135 return std::find_if( | 95 return std::find_if( |
| 136 audio_source_list->begin(), audio_source_list->end(), | 96 audio_source_list->begin(), audio_source_list->end(), |
| 137 [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) { | 97 [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) { |
| 138 return p->audio_source == audio_source; | 98 return p->audio_source == audio_source; |
| 139 }); | 99 }); |
| 140 } | 100 } |
| 141 | 101 |
| 142 std::unique_ptr<AudioProcessing> CreateLimiter() { | |
| 143 Config config; | |
| 144 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | |
| 145 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); | |
| 146 if (!limiter.get()) { | |
| 147 return nullptr; | |
| 148 } | |
| 149 | |
| 150 if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | |
| 151 limiter->kNoError) { | |
| 152 return nullptr; | |
| 153 } | |
| 154 | |
| 155 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | |
| 156 // divide-by-2 but -7 is used instead to give a bit of headroom since the | |
| 157 // AGC is not a hard limiter. | |
| 158 if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) { | |
| 159 return nullptr; | |
| 160 } | |
| 161 | |
| 162 if (limiter->gain_control()->set_compression_gain_db(0) != | |
| 163 limiter->kNoError) { | |
| 164 return nullptr; | |
| 165 } | |
| 166 | |
| 167 if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) { | |
| 168 return nullptr; | |
| 169 } | |
| 170 | |
| 171 if (limiter->gain_control()->Enable(true) != limiter->kNoError) { | |
| 172 return nullptr; | |
| 173 } | |
| 174 return limiter; | |
| 175 } | |
| 176 | |
| 177 } // namespace | 102 } // namespace |
| 178 | 103 |
| 179 AudioMixerImpl::AudioMixerImpl( | 104 AudioMixerImpl::AudioMixerImpl( |
| 180 std::unique_ptr<AudioProcessing> limiter, | 105 std::unique_ptr<OutputRateCalculator> output_rate_calculator, |
| 181 std::unique_ptr<OutputRateCalculator> output_rate_calculator) | 106 bool use_limiter) |
| 182 : output_rate_calculator_(std::move(output_rate_calculator)), | 107 : output_rate_calculator_(std::move(output_rate_calculator)), |
| 183 output_frequency_(0), | 108 output_frequency_(0), |
| 184 sample_size_(0), | 109 sample_size_(0), |
| 185 audio_source_list_(), | 110 audio_source_list_(), |
| 186 use_limiter_(true), | 111 frame_combiner_(use_limiter) {} |
| 187 time_stamp_(0), | |
| 188 limiter_(std::move(limiter)) {} | |
| 189 | 112 |
| 190 AudioMixerImpl::~AudioMixerImpl() {} | 113 AudioMixerImpl::~AudioMixerImpl() {} |
| 191 | 114 |
| 192 rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create() { | 115 rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create() { |
| 193 return CreateWithOutputRateCalculator( | 116 return CreateWithOutputRateCalculatorAndLimiter( |
| 194 std::unique_ptr<DefaultOutputRateCalculator>( | 117 std::unique_ptr<DefaultOutputRateCalculator>( |
| 195 new DefaultOutputRateCalculator())); | 118 new DefaultOutputRateCalculator()), |
| 119 true); |
| 196 } | 120 } |
| 197 | 121 |
| 198 rtc::scoped_refptr<AudioMixerImpl> | 122 rtc::scoped_refptr<AudioMixerImpl> |
| 199 AudioMixerImpl::CreateWithOutputRateCalculator( | 123 AudioMixerImpl::CreateWithOutputRateCalculator( |
| 200 std::unique_ptr<OutputRateCalculator> output_rate_calculator) { | 124 std::unique_ptr<OutputRateCalculator> output_rate_calculator) { |
| 125 return CreateWithOutputRateCalculatorAndLimiter( |
| 126 std::move(output_rate_calculator), true); |
| 127 } |
| 128 |
| 129 rtc::scoped_refptr<AudioMixerImpl> |
| 130 AudioMixerImpl::CreateWithOutputRateCalculatorAndLimiter( |
| 131 std::unique_ptr<OutputRateCalculator> output_rate_calculator, |
| 132 bool use_limiter) { |
| 201 return rtc::scoped_refptr<AudioMixerImpl>( | 133 return rtc::scoped_refptr<AudioMixerImpl>( |
| 202 new rtc::RefCountedObject<AudioMixerImpl>( | 134 new rtc::RefCountedObject<AudioMixerImpl>( |
| 203 CreateLimiter(), std::move(output_rate_calculator))); | 135 std::move(output_rate_calculator), use_limiter)); |
| 204 } | 136 } |
| 205 | 137 |
| 206 void AudioMixerImpl::Mix(size_t number_of_channels, | 138 void AudioMixerImpl::Mix(size_t number_of_channels, |
| 207 AudioFrame* audio_frame_for_mixing) { | 139 AudioFrame* audio_frame_for_mixing) { |
| 208 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 140 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
| 209 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); | 141 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); |
| 210 | 142 |
| 211 CalculateOutputFrequency(); | 143 CalculateOutputFrequency(); |
| 212 | 144 |
| 213 AudioFrameList mix_list; | |
| 214 { | 145 { |
| 215 rtc::CritScope lock(&crit_); | 146 rtc::CritScope lock(&crit_); |
| 216 mix_list = GetAudioFromSources(); | 147 frame_combiner_.Combine(GetAudioFromSources(), number_of_channels, |
| 217 | 148 OutputFrequency(), audio_frame_for_mixing); |
| 218 for (const auto& frame : mix_list) { | |
| 219 RemixFrame(number_of_channels, frame); | |
| 220 } | |
| 221 | |
| 222 audio_frame_for_mixing->UpdateFrame( | |
| 223 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech, | |
| 224 AudioFrame::kVadPassive, number_of_channels); | |
| 225 | |
| 226 time_stamp_ += static_cast<uint32_t>(sample_size_); | |
| 227 | |
| 228 use_limiter_ = mix_list.size() > 1; | |
| 229 | |
| 230 // We only use the limiter if we're actually mixing multiple streams. | |
| 231 MixFromList(audio_frame_for_mixing, mix_list, use_limiter_); | |
| 232 } | |
| 233 | |
| 234 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | |
| 235 // Nothing was mixed, set the audio samples to silence. | |
| 236 audio_frame_for_mixing->samples_per_channel_ = sample_size_; | |
| 237 AudioFrameOperations::Mute(audio_frame_for_mixing); | |
| 238 } else { | |
| 239 // Only call the limiter if we have something to mix. | |
| 240 LimitMixedAudio(audio_frame_for_mixing); | |
| 241 } | 149 } |
| 242 | 150 |
| 243 return; | 151 return; |
| 244 } | 152 } |
| 245 | 153 |
| 246 void AudioMixerImpl::CalculateOutputFrequency() { | 154 void AudioMixerImpl::CalculateOutputFrequency() { |
| 247 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); | 155 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); |
| 248 rtc::CritScope lock(&crit_); | 156 rtc::CritScope lock(&crit_); |
| 249 | 157 |
| 250 std::vector<int> preferred_rates; | 158 std::vector<int> preferred_rates; |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 324 result.push_back(p.audio_frame); | 232 result.push_back(p.audio_frame); |
| 325 ramp_list.emplace_back(p.source_status, p.audio_frame, false, -1); | 233 ramp_list.emplace_back(p.source_status, p.audio_frame, false, -1); |
| 326 is_mixed = true; | 234 is_mixed = true; |
| 327 } | 235 } |
| 328 p.source_status->is_mixed = is_mixed; | 236 p.source_status->is_mixed = is_mixed; |
| 329 } | 237 } |
| 330 RampAndUpdateGain(ramp_list); | 238 RampAndUpdateGain(ramp_list); |
| 331 return result; | 239 return result; |
| 332 } | 240 } |
| 333 | 241 |
| 334 | |
| 335 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { | |
| 336 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); | |
| 337 if (!use_limiter_) { | |
| 338 return true; | |
| 339 } | |
| 340 | |
| 341 // Smoothly limit the mixed frame. | |
| 342 const int error = limiter_->ProcessStream(mixed_audio); | |
| 343 | |
| 344 // And now we can safely restore the level. This procedure results in | |
| 345 // some loss of resolution, deemed acceptable. | |
| 346 // | |
| 347 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | |
| 348 // and compression gain of 6 dB). However, in the transition frame when this | |
| 349 // is enabled (moving from one to two audio sources) it has the potential to | |
| 350 // create discontinuities in the mixed frame. | |
| 351 // | |
| 352 // Instead we double the frame (with addition since left-shifting a | |
| 353 // negative value is undefined). | |
| 354 AudioFrameOperations::Add(*mixed_audio, mixed_audio); | |
| 355 | |
| 356 if (error != limiter_->kNoError) { | |
| 357 LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error; | |
| 358 RTC_NOTREACHED(); | |
| 359 return false; | |
| 360 } | |
| 361 return true; | |
| 362 } | |
| 363 | |
| 364 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( | 242 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( |
| 365 AudioMixerImpl::Source* audio_source) const { | 243 AudioMixerImpl::Source* audio_source) const { |
| 366 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); | 244 RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); |
| 367 rtc::CritScope lock(&crit_); | 245 rtc::CritScope lock(&crit_); |
| 368 | 246 |
| 369 const auto iter = FindSourceInList(audio_source, &audio_source_list_); | 247 const auto iter = FindSourceInList(audio_source, &audio_source_list_); |
| 370 if (iter != audio_source_list_.end()) { | 248 if (iter != audio_source_list_.end()) { |
| 371 return (*iter)->is_mixed; | 249 return (*iter)->is_mixed; |
| 372 } | 250 } |
| 373 | 251 |
| 374 LOG(LS_ERROR) << "Audio source unknown"; | 252 LOG(LS_ERROR) << "Audio source unknown"; |
| 375 return false; | 253 return false; |
| 376 } | 254 } |
| 377 } // namespace webrtc | 255 } // namespace webrtc |
| OLD | NEW |