Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2286343002: Less lock acquisitions for AudioMixer. (Closed)
Patch Set: Added ACCESS_ON to limiter and added threading unit test. Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <functional> 14 #include <functional>
15 15
16 #include "webrtc/base/thread_annotations.h"
16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" 17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" 18 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" 19 #include "webrtc/modules/audio_processing/include/audio_processing.h"
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" 20 #include "webrtc/modules/utility/include/audio_frame_operations.h"
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
21 #include "webrtc/system_wrappers/include/trace.h" 22 #include "webrtc/system_wrappers/include/trace.h"
22 23
23 namespace webrtc { 24 namespace webrtc {
24 namespace { 25 namespace {
25 26
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 additional_audio_source_list_(), 137 additional_audio_source_list_(),
137 num_mixed_audio_sources_(0), 138 num_mixed_audio_sources_(0),
138 use_limiter_(true), 139 use_limiter_(true),
139 time_stamp_(0) { 140 time_stamp_(0) {
140 thread_checker_.DetachFromThread(); 141 thread_checker_.DetachFromThread();
141 } 142 }
142 143
143 AudioMixerImpl::~AudioMixerImpl() {} 144 AudioMixerImpl::~AudioMixerImpl() {}
144 145
145 bool AudioMixerImpl::Init() { 146 bool AudioMixerImpl::Init() {
147 RTC_DCHECK_RUN_ON(&thread_checker_);
146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); 148 crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
147 if (crit_.get() == NULL) 149 if (crit_.get() == NULL)
148 return false; 150 return false;
aleloi 2016/09/02 08:51:21 This can't fail and is set in the constructor now.
149 151
150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
151 if (cb_crit_.get() == NULL)
152 return false;
153
154 Config config; 152 Config config;
155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 153 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
156 limiter_.reset(AudioProcessing::Create(config)); 154 limiter_.reset(AudioProcessing::Create(config));
157 if (!limiter_.get()) 155 if (!limiter_.get())
158 return false; 156 return false;
159 157
160 if (SetOutputFrequency(kDefaultFrequency) == -1) 158 if (SetOutputFrequency(kDefaultFrequency) == -1)
161 return false; 159 return false;
162 160
163 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != 161 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) !=
164 limiter_->kNoError) 162 limiter_->kNoError)
165 return false; 163 return false;
166 164
167 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the 165 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
168 // divide-by-2 but -7 is used instead to give a bit of headroom since the 166 // divide-by-2 but -7 is used instead to give a bit of headroom since the
169 // AGC is not a hard limiter. 167 // AGC is not a hard limiter.
170 if (limiter_->gain_control()->set_target_level_dbfs(7) != limiter_->kNoError) 168 if (limiter_->gain_control()->set_target_level_dbfs(7) != limiter_->kNoError)
171 return false; 169 return false;
172 170
173 if (limiter_->gain_control()->set_compression_gain_db(0) != 171 if (limiter_->gain_control()->set_compression_gain_db(0) !=
174 limiter_->kNoError) 172 limiter_->kNoError)
175 return false; 173 return false;
176 174
177 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) 175 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError)
178 return false; 176 return false;
179 177
180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) 178 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError)
181 return false; 179 return false;
182 180 thread_checker_.DetachFromThread();
183 return true; 181 return true;
184 } 182 }
185 183
186 void AudioMixerImpl::Mix(int sample_rate, 184 void AudioMixerImpl::Mix(int sample_rate,
187 size_t number_of_channels, 185 size_t number_of_channels,
188 AudioFrame* audio_frame_for_mixing) { 186 AudioFrame* audio_frame_for_mixing) {
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 187 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 188 RTC_DCHECK_RUN_ON(&thread_checker_);
189 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
190
191 if (sample_rate != kNbInHz && sample_rate != kWbInHz &&
192 sample_rate != kSwbInHz && sample_rate != kFbInHz) {
193 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
194 "Invalid frequency: %d", sample_rate);
195 RTC_NOTREACHED();
196 return;
197 }
198
199 if (OutputFrequency() != sample_rate) {
200 SetOutputFrequency(static_cast<Frequency>(sample_rate));
201 }
202
191 AudioFrameList mixList; 203 AudioFrameList mixList;
192 AudioFrameList additionalFramesList; 204 AudioFrameList additionalFramesList;
193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; 205 int num_mixed_audio_sources;
194 { 206 {
195 CriticalSectionScoped cs(cb_crit_.get()); 207 CriticalSectionScoped cs(crit_.get());
196 Frequency mixing_frequency;
197
198 switch (sample_rate) {
199 case 8000:
200 mixing_frequency = kNbInHz;
201 break;
202 case 16000:
203 mixing_frequency = kWbInHz;
204 break;
205 case 32000:
206 mixing_frequency = kSwbInHz;
207 break;
208 case 48000:
209 mixing_frequency = kFbInHz;
210 break;
211 default:
212 RTC_NOTREACHED();
213 return;
214 }
215
216 if (OutputFrequency() != mixing_frequency) {
217 SetOutputFrequency(mixing_frequency);
218 }
219
220 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); 208 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources);
221 GetAdditionalAudio(&additionalFramesList); 209 GetAdditionalAudio(&additionalFramesList);
210 num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_);
222 } 211 }
223 212
224 for (FrameAndMuteInfo& frame_and_mute : mixList) { 213 for (FrameAndMuteInfo& frame_and_mute : mixList) {
225 RemixFrame(frame_and_mute.frame, number_of_channels); 214 RemixFrame(frame_and_mute.frame, number_of_channels);
226 } 215 }
227 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { 216 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) {
228 RemixFrame(frame_and_mute.frame, number_of_channels); 217 RemixFrame(frame_and_mute.frame, number_of_channels);
229 } 218 }
230 219
231 audio_frame_for_mixing->UpdateFrame( 220 audio_frame_for_mixing->UpdateFrame(
232 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 221 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech,
233 AudioFrame::kVadPassive, number_of_channels); 222 AudioFrame::kVadPassive, number_of_channels);
234 223
235 time_stamp_ += static_cast<uint32_t>(sample_size_); 224 time_stamp_ += static_cast<uint32_t>(sample_size_);
236 225
237 use_limiter_ = num_mixed_audio_sources_ > 1; 226 use_limiter_ = num_mixed_audio_sources > 1;
238 227
239 // We only use the limiter if it supports the output sample rate and 228 // We only use the limiter if it supports the output sample rate and
240 // we're actually mixing multiple streams. 229 // we're actually mixing multiple streams.
241 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); 230 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_);
242 231 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
243 { 232 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
244 CriticalSectionScoped cs(crit_.get()); 233 // Nothing was mixed, set the audio samples to silence.
245 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); 234 audio_frame_for_mixing->samples_per_channel_ = sample_size_;
246 235 audio_frame_for_mixing->Mute();
247 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 236 } else {
248 // Nothing was mixed, set the audio samples to silence. 237 // Only call the limiter if we have something to mix.
249 audio_frame_for_mixing->samples_per_channel_ = sample_size_; 238 LimitMixedAudio(audio_frame_for_mixing);
250 audio_frame_for_mixing->Mute();
251 } else {
252 // Only call the limiter if we have something to mix.
253 LimitMixedAudio(audio_frame_for_mixing);
254 }
255 } 239 }
256 240
257 // Pass the final result to the level indicator. 241 // Pass the final result to the level indicator.
258 audio_level_.ComputeLevel(*audio_frame_for_mixing); 242 audio_level_.ComputeLevel(*audio_frame_for_mixing);
259 243
260 return; 244 return;
261 } 245 }
262 246
263 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { 247 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) {
264 CriticalSectionScoped cs(crit_.get()); 248 RTC_DCHECK_RUN_ON(&thread_checker_);
265
266 output_frequency_ = frequency; 249 output_frequency_ = frequency;
267 sample_size_ = 250 sample_size_ =
268 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); 251 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000);
269 252
270 return 0; 253 return 0;
271 } 254 }
272 255
273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { 256 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
274 CriticalSectionScoped cs(crit_.get()); 257 RTC_DCHECK_RUN_ON(&thread_checker_);
275 return output_frequency_; 258 return output_frequency_;
276 } 259 }
277 260
278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 261 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
279 bool mixable) { 262 bool mixable) {
280 if (!mixable) { 263 if (!mixable) {
281 // Anonymous audio sources are in a separate list. Make sure that the 264 // Anonymous audio sources are in a separate list. Make sure that the
282 // audio source is in the _audioSourceList if it is being mixed. 265 // audio source is in the _audioSourceList if it is being mixed.
283 SetAnonymousMixabilityStatus(audio_source, false); 266 SetAnonymousMixabilityStatus(audio_source, false);
284 } 267 }
285 size_t numMixedAudioSources;
286 { 268 {
287 CriticalSectionScoped cs(cb_crit_.get()); 269 CriticalSectionScoped cs(crit_.get());
288 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); 270 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_);
289 // API must be called with a new state. 271 // API must be called with a new state.
290 if (!(mixable ^ isMixed)) { 272 if (!(mixable ^ isMixed)) {
291 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 273 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
292 "Mixable is aready %s", isMixed ? "ON" : "off"); 274 "Mixable is aready %s", isMixed ? "ON" : "off");
293 return -1; 275 return -1;
294 } 276 }
295 bool success = false; 277 bool success = false;
296 if (mixable) { 278 if (mixable) {
297 success = AddAudioSourceToList(audio_source, &audio_source_list_); 279 success = AddAudioSourceToList(audio_source, &audio_source_list_);
298 } else { 280 } else {
299 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); 281 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
300 } 282 }
301 if (!success) { 283 if (!success) {
302 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 284 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
303 "failed to %s audio_source", mixable ? "add" : "remove"); 285 "failed to %s audio_source", mixable ? "add" : "remove");
304 RTC_NOTREACHED(); 286 RTC_NOTREACHED();
305 return -1; 287 return -1;
306 } 288 }
307 289
308 size_t numMixedNonAnonymous = audio_source_list_.size(); 290 size_t numMixedNonAnonymous = audio_source_list_.size();
309 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { 291 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) {
310 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; 292 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources;
311 } 293 }
312 numMixedAudioSources = 294 num_mixed_audio_sources_ =
313 numMixedNonAnonymous + additional_audio_source_list_.size(); 295 numMixedNonAnonymous + additional_audio_source_list_.size();
314 } 296 }
315 // A MixerAudioSource was added or removed. Make sure the scratch
316 // buffer is updated if necessary.
317 // Note: The scratch buffer may only be updated in Process().
318 CriticalSectionScoped cs(crit_.get());
319 num_mixed_audio_sources_ = numMixedAudioSources;
320 return 0; 297 return 0;
321 } 298 }
322 299
323 bool AudioMixerImpl::MixabilityStatus( 300 bool AudioMixerImpl::MixabilityStatus(
324 const MixerAudioSource& audio_source) const { 301 const MixerAudioSource& audio_source) const {
325 CriticalSectionScoped cs(cb_crit_.get()); 302 CriticalSectionScoped cs(crit_.get());
326 return IsAudioSourceInList(audio_source, audio_source_list_); 303 return IsAudioSourceInList(audio_source, audio_source_list_);
327 } 304 }
328 305
329 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( 306 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
330 MixerAudioSource* audio_source, 307 MixerAudioSource* audio_source,
331 bool anonymous) { 308 bool anonymous) {
332 CriticalSectionScoped cs(cb_crit_.get()); 309 CriticalSectionScoped cs(crit_.get());
333 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { 310 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
334 if (anonymous) { 311 if (anonymous) {
335 return 0; 312 return 0;
336 } 313 }
337 if (!RemoveAudioSourceFromList(audio_source, 314 if (!RemoveAudioSourceFromList(audio_source,
338 &additional_audio_source_list_)) { 315 &additional_audio_source_list_)) {
339 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 316 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
340 "unable to remove audio_source from anonymous list"); 317 "unable to remove audio_source from anonymous list");
341 RTC_NOTREACHED(); 318 RTC_NOTREACHED();
342 return -1; 319 return -1;
(...skipping 13 matching lines...) Expand all
356 // already registered. 333 // already registered.
357 return -1; 334 return -1;
358 } 335 }
359 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) 336 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
360 ? 0 337 ? 0
361 : -1; 338 : -1;
362 } 339 }
363 340
364 bool AudioMixerImpl::AnonymousMixabilityStatus( 341 bool AudioMixerImpl::AnonymousMixabilityStatus(
365 const MixerAudioSource& audio_source) const { 342 const MixerAudioSource& audio_source) const {
366 CriticalSectionScoped cs(cb_crit_.get()); 343 CriticalSectionScoped cs(crit_.get());
367 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 344 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
368 } 345 }
369 346
370 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { 347 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const {
348 RTC_DCHECK_RUN_ON(&thread_checker_);
371 AudioFrameList result; 349 AudioFrameList result;
372 std::vector<SourceFrame> audioSourceMixingDataList; 350 std::vector<SourceFrame> audioSourceMixingDataList;
373 351
374 // Get audio source audio and put it in the struct vector. 352 // Get audio source audio and put it in the struct vector.
375 for (MixerAudioSource* audio_source : audio_source_list_) { 353 for (MixerAudioSource* audio_source : audio_source_list_) {
376 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 354 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
377 id_, static_cast<int>(output_frequency_)); 355 id_, static_cast<int>(output_frequency_));
378 356
379 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 357 auto audio_frame_info = audio_frame_with_info.audio_frame_info;
380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 358 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
419 result.emplace_back(p.audio_frame_, false); 397 result.emplace_back(p.audio_frame_, false);
420 } 398 }
421 399
422 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 400 p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
423 } 401 }
424 return result; 402 return result;
425 } 403 }
426 404
427 void AudioMixerImpl::GetAdditionalAudio( 405 void AudioMixerImpl::GetAdditionalAudio(
428 AudioFrameList* additionalFramesList) const { 406 AudioFrameList* additionalFramesList) const {
407 RTC_DCHECK_RUN_ON(&thread_checker_);
429 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 408 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
430 "GetAdditionalAudio(additionalFramesList)"); 409 "GetAdditionalAudio(additionalFramesList)");
431 // The GetAudioFrameWithMuted() callback may result in the audio source being 410 // The GetAudioFrameWithMuted() callback may result in the audio source being
432 // removed from additionalAudioFramesList_. If that happens it will 411 // removed from additionalAudioFramesList_. If that happens it will
433 // invalidate any iterators. Create a copy of the audio sources list such 412 // invalidate any iterators. Create a copy of the audio sources list such
434 // that the list of participants can be traversed safely. 413 // that the list of participants can be traversed safely.
435 MixerAudioSourceList additionalAudioSourceList; 414 MixerAudioSourceList additionalAudioSourceList;
436 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 415 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
437 additional_audio_source_list_.begin(), 416 additional_audio_source_list_.begin(),
438 additional_audio_source_list_.end()); 417 additional_audio_source_list_.end());
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
526 position++; 505 position++;
527 } 506 }
528 507
529 return 0; 508 return 0;
530 } 509 }
531 510
532 // TODO(andrew): consolidate this function with MixFromList. 511 // TODO(andrew): consolidate this function with MixFromList.
533 int32_t AudioMixerImpl::MixAnonomouslyFromList( 512 int32_t AudioMixerImpl::MixAnonomouslyFromList(
534 AudioFrame* mixedAudio, 513 AudioFrame* mixedAudio,
535 const AudioFrameList& audioFrameList) const { 514 const AudioFrameList& audioFrameList) const {
515 RTC_DCHECK_RUN_ON(&thread_checker_);
536 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 516 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
537 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); 517 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
538 518
539 if (audioFrameList.empty()) 519 if (audioFrameList.empty())
540 return 0; 520 return 0;
541 521
542 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); 522 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
543 iter != audioFrameList.end(); ++iter) { 523 iter != audioFrameList.end(); ++iter) {
544 if (!iter->muted) { 524 if (!iter->muted) {
545 MixFrames(mixedAudio, iter->frame, use_limiter_); 525 MixFrames(mixedAudio, iter->frame, use_limiter_);
546 } 526 }
547 } 527 }
548 return 0; 528 return 0;
549 } 529 }
550 530
551 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { 531 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
532 RTC_DCHECK_RUN_ON(&thread_checker_);
552 if (!use_limiter_) { 533 if (!use_limiter_) {
553 return true; 534 return true;
554 } 535 }
555 536
556 // Smoothly limit the mixed frame. 537 // Smoothly limit the mixed frame.
557 const int error = limiter_->ProcessStream(mixedAudio); 538 const int error = limiter_->ProcessStream(mixedAudio);
558 539
559 // And now we can safely restore the level. This procedure results in 540 // And now we can safely restore the level. This procedure results in
560 // some loss of resolution, deemed acceptable. 541 // some loss of resolution, deemed acceptable.
561 // 542 //
562 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 543 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
563 // and compression gain of 6 dB). However, in the transition frame when this 544 // and compression gain of 6 dB). However, in the transition frame when this
564 // is enabled (moving from one to two audio sources) it has the potential to 545 // is enabled (moving from one to two audio sources) it has the potential to
565 // create discontinuities in the mixed frame. 546 // create discontinuities in the mixed frame.
566 // 547 //
567 // Instead we double the frame (with addition since left-shifting a 548 // Instead we double the frame (with addition since left-shifting a
568 // negative value is undefined). 549 // negative value is undefined).
569 *mixedAudio += *mixedAudio; 550 *mixedAudio += *mixedAudio;
570 551
571 if (error != limiter_->kNoError) { 552 if (error != limiter_->kNoError) {
572 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 553 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
573 "Error from AudioProcessing: %d", error); 554 "Error from AudioProcessing: %d", error);
574 RTC_NOTREACHED(); 555 RTC_NOTREACHED();
575 return false; 556 return false;
576 } 557 }
577 return true; 558 return true;
578 } 559 }
579 560
580 int AudioMixerImpl::GetOutputAudioLevel() { 561 int AudioMixerImpl::GetOutputAudioLevel() {
562 RTC_DCHECK_RUN_ON(&thread_checker_);
581 const int level = audio_level_.Level(); 563 const int level = audio_level_.Level();
582 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 564 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
583 "GetAudioOutputLevel() => level=%d", level); 565 "GetAudioOutputLevel() => level=%d", level);
584 return level; 566 return level;
585 } 567 }
586 568
587 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 569 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
570 RTC_DCHECK_RUN_ON(&thread_checker_);
588 const int level = audio_level_.LevelFullRange(); 571 const int level = audio_level_.LevelFullRange();
589 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 572 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
590 "GetAudioOutputLevelFullRange() => level=%d", level); 573 "GetAudioOutputLevelFullRange() => level=%d", level);
591 return level; 574 return level;
592 } 575 }
593 } // namespace webrtc 576 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_mixer/audio_mixer_impl.h ('k') | webrtc/modules/audio_mixer/test/audio_mixer_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698