Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(185)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2294263002: Fix style guide issues in audio mixer. (Closed)
Patch Set: Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
46 muted_(m), 46 muted_(m),
47 energy_(energy), 47 energy_(energy),
48 was_mixed_before_(was_mixed_before) {} 48 was_mixed_before_(was_mixed_before) {}
49 49
50 // a.shouldMixBefore(b) is used to select mixer participants. 50 // a.shouldMixBefore(b) is used to select mixer participants.
51 bool shouldMixBefore(const SourceFrame& other) const { 51 bool shouldMixBefore(const SourceFrame& other) const {
52 if (muted_ != other.muted_) { 52 if (muted_ != other.muted_) {
53 return other.muted_; 53 return other.muted_;
54 } 54 }
55 55
56 auto our_activity = audio_frame_->vad_activity_; 56 const auto our_activity = audio_frame_->vad_activity_;
57 auto other_activity = other.audio_frame_->vad_activity_; 57 const auto other_activity = other.audio_frame_->vad_activity_;
58 58
59 if (our_activity != other_activity) { 59 if (our_activity != other_activity) {
60 return our_activity == AudioFrame::kVadActive; 60 return our_activity == AudioFrame::kVadActive;
61 } 61 }
62 62
63 return energy_ > other.energy_; 63 return energy_ > other.energy_;
64 } 64 }
65 65
66 MixerAudioSource* audio_source_; 66 MixerAudioSource* audio_source_;
67 AudioFrame* audio_frame_; 67 AudioFrame* audio_frame_;
(...skipping 12 matching lines...) Expand all
80 } 80 }
81 } 81 }
82 82
83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { 83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
84 for (const auto& source_frame : mixed_sources_and_frames) { 84 for (const auto& source_frame : mixed_sources_and_frames) {
85 // Ramp in previously unmixed. 85 // Ramp in previously unmixed.
86 if (!source_frame.was_mixed_before_) { 86 if (!source_frame.was_mixed_before_) {
87 NewMixerRampIn(source_frame.audio_frame_); 87 NewMixerRampIn(source_frame.audio_frame_);
88 } 88 }
89 89
90 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed(); 90 const bool is_mixed = source_frame.audio_source_->mix_history_->IsMixed();
91 // Ramp out currently unmixed. 91 // Ramp out currently unmixed.
92 if (source_frame.was_mixed_before_ && !is_mixed) { 92 if (source_frame.was_mixed_before_ && !is_mixed) {
93 NewMixerRampOut(source_frame.audio_frame_); 93 NewMixerRampOut(source_frame.audio_frame_);
94 } 94 }
95 } 95 }
96 } 96 }
97 97
98 } // namespace 98 } // namespace
99 99
100 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} 100 MixerAudioSource::MixerAudioSource() : mix_history_(new NewMixHistory()) {}
101 101
102 MixerAudioSource::~MixerAudioSource() { 102 MixerAudioSource::~MixerAudioSource() {
103 delete _mixHistory; 103 delete mix_history_;
104 } 104 }
105 105
106 bool MixerAudioSource::IsMixed() const { 106 bool MixerAudioSource::IsMixed() const {
107 return _mixHistory->IsMixed(); 107 return mix_history_->IsMixed();
108 } 108 }
109 109
110 NewMixHistory::NewMixHistory() : is_mixed_(0) {} 110 NewMixHistory::NewMixHistory() : is_mixed_(0) {}
111 111
112 NewMixHistory::~NewMixHistory() {} 112 NewMixHistory::~NewMixHistory() {}
113 113
114 bool NewMixHistory::IsMixed() const { 114 bool NewMixHistory::IsMixed() const {
115 return is_mixed_; 115 return is_mixed_;
116 } 116 }
117 117
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
188 return false; 188 return false;
189 189
190 return true; 190 return true;
191 } 191 }
192 192
193 void AudioMixerImpl::Mix(int sample_rate, 193 void AudioMixerImpl::Mix(int sample_rate,
194 size_t number_of_channels, 194 size_t number_of_channels,
195 AudioFrame* audio_frame_for_mixing) { 195 AudioFrame* audio_frame_for_mixing) {
196 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 196 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
197 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 197 RTC_DCHECK(thread_checker_.CalledOnValidThread());
198 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
aleloi 2016/08/31 12:35:05 Isn't used any longer. Strange that this was unnot
199 198
200 if (sample_rate != kNbInHz && sample_rate != kWbInHz && 199 if (sample_rate != kNbInHz && sample_rate != kWbInHz &&
201 sample_rate != kSwbInHz && sample_rate != kFbInHz) { 200 sample_rate != kSwbInHz && sample_rate != kFbInHz) {
202 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 201 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
203 "Invalid frequency: %d", sample_rate); 202 "Invalid frequency: %d", sample_rate);
204 RTC_NOTREACHED(); 203 RTC_NOTREACHED();
205 return; 204 return;
206 } 205 }
207 206
208 if (OutputFrequency() != sample_rate) { 207 if (OutputFrequency() != sample_rate) {
209 SetOutputFrequency(static_cast<Frequency>(sample_rate)); 208 SetOutputFrequency(static_cast<Frequency>(sample_rate));
210 } 209 }
211 210
212 AudioFrameList mix_list; 211 AudioFrameList mix_list;
213 AudioFrameList anonymous_mix_list; 212 AudioFrameList anonymous_mix_list;
214 { 213 {
215 CriticalSectionScoped cs(crit_.get()); 214 CriticalSectionScoped cs(crit_.get());
216 mix_list = GetNonAnonymousAudio(); 215 mix_list = GetNonAnonymousAudio();
217 anonymous_mix_list = GetAnonymousAudio(); 216 anonymous_mix_list = GetAnonymousAudio();
218 } 217 }
219 218
220 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), 219 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(),
221 anonymous_mix_list.end()); 220 anonymous_mix_list.end());
222 221
223 for (const auto& frame : mix_list) { 222 for (const auto& frame : mix_list) {
224 RemixFrame(frame, number_of_channels); 223 RemixFrame(frame, number_of_channels);
225 } 224 }
226 225
227 audio_frame_for_mixing->UpdateFrame( 226 audio_frame_for_mixing->UpdateFrame(
228 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 227 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech,
229 AudioFrame::kVadPassive, number_of_channels); 228 AudioFrame::kVadPassive, number_of_channels);
230 229
231 time_stamp_ += static_cast<uint32_t>(sample_size_); 230 time_stamp_ += static_cast<uint32_t>(sample_size_);
232 231
233 use_limiter_ = num_mixed_audio_sources_ > 1; 232 use_limiter_ = num_mixed_audio_sources_ > 1;
234 233
235 // We only use the limiter if we're actually mixing multiple streams. 234 // We only use the limiter if we're actually mixing multiple streams.
236 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); 235 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_);
237 236
238 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 237 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
286 } else { 285 } else {
287 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); 286 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
288 } 287 }
289 if (!success) { 288 if (!success) {
290 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 289 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
291 "failed to %s audio_source", mixable ? "add" : "remove"); 290 "failed to %s audio_source", mixable ? "add" : "remove");
292 RTC_NOTREACHED(); 291 RTC_NOTREACHED();
293 return -1; 292 return -1;
294 } 293 }
295 294
296 size_t numMixedNonAnonymous = audio_source_list_.size(); 295 size_t num_mixed_non_anonymous = audio_source_list_.size();
297 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { 296 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) {
298 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; 297 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources;
299 } 298 }
300 num_mixed_audio_sources_ = 299 num_mixed_audio_sources_ =
301 numMixedNonAnonymous + additional_audio_source_list_.size(); 300 num_mixed_non_anonymous + additional_audio_source_list_.size();
302 } 301 }
303 return 0; 302 return 0;
304 } 303 }
305 304
306 bool AudioMixerImpl::MixabilityStatus( 305 bool AudioMixerImpl::MixabilityStatus(
307 const MixerAudioSource& audio_source) const { 306 const MixerAudioSource& audio_source) const {
308 CriticalSectionScoped cs(crit_.get()); 307 CriticalSectionScoped cs(crit_.get());
309 return IsAudioSourceInList(audio_source, audio_source_list_); 308 return IsAudioSourceInList(audio_source, audio_source_list_);
310 } 309 }
311 310
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
347 bool AudioMixerImpl::AnonymousMixabilityStatus( 346 bool AudioMixerImpl::AnonymousMixabilityStatus(
348 const MixerAudioSource& audio_source) const { 347 const MixerAudioSource& audio_source) const {
349 CriticalSectionScoped cs(crit_.get()); 348 CriticalSectionScoped cs(crit_.get());
350 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 349 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
351 } 350 }
352 351
353 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { 352 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
354 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 353 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
355 "GetNonAnonymousAudio()"); 354 "GetNonAnonymousAudio()");
356 AudioFrameList result; 355 AudioFrameList result;
357 std::vector<SourceFrame> audioSourceMixingDataList; 356 std::vector<SourceFrame> audio_source_mixing_data_list;
358 std::vector<SourceFrame> ramp_list; 357 std::vector<SourceFrame> ramp_list;
359 358
360 // Get audio source audio and put it in the struct vector. 359 // Get audio source audio and put it in the struct vector.
361 for (MixerAudioSource* audio_source : audio_source_list_) { 360 for (const auto audio_source : audio_source_list_) {
362 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 361 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
363 id_, static_cast<int>(output_frequency_)); 362 id_, static_cast<int>(OutputFrequency()));
364 363
365 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 364 const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
366 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 365 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
367 366
368 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 367 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
369 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 368 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
370 "failed to GetAudioFrameWithMuted() from participant"); 369 "failed to GetAudioFrameWithMuted() from participant");
371 continue; 370 continue;
372 } 371 }
373 audioSourceMixingDataList.emplace_back( 372 audio_source_mixing_data_list.emplace_back(
374 audio_source, audio_source_audio_frame, 373 audio_source, audio_source_audio_frame,
375 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, 374 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
376 audio_source->_mixHistory->WasMixed()); 375 audio_source->mix_history_->WasMixed());
377 } 376 }
378 377
379 // Sort frames by sorting function. 378 // Sort frames by sorting function.
380 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), 379 std::sort(audio_source_mixing_data_list.begin(),
380 audio_source_mixing_data_list.end(),
381 std::mem_fn(&SourceFrame::shouldMixBefore)); 381 std::mem_fn(&SourceFrame::shouldMixBefore));
382 382
383 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources; 383 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
384 // Go through list in order and put things in mixList. 384
385 for (SourceFrame& p : audioSourceMixingDataList) { 385 // Go through list in order and put unmuted frames in result list.
386 for (const SourceFrame& p : audio_source_mixing_data_list) {
386 // Filter muted. 387 // Filter muted.
387 if (p.muted_) { 388 if (p.muted_) {
388 p.audio_source_->_mixHistory->SetIsMixed(false); 389 p.audio_source_->mix_history_->SetIsMixed(false);
389 continue; 390 continue;
390 } 391 }
391 392
392 // Add frame to result vector for mixing. 393 // Add frame to result vector for mixing.
393 bool is_mixed = false; 394 bool is_mixed = false;
394 if (maxAudioFrameCounter > 0) { 395 if (max_audio_frame_counter > 0) {
395 --maxAudioFrameCounter; 396 --max_audio_frame_counter;
396 result.push_back(p.audio_frame_); 397 result.push_back(p.audio_frame_);
397 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, 398 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false,
398 p.was_mixed_before_, -1); 399 p.was_mixed_before_, -1);
399 is_mixed = true; 400 is_mixed = true;
400 } 401 }
401 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 402 p.audio_source_->mix_history_->SetIsMixed(is_mixed);
402 } 403 }
403 Ramp(ramp_list); 404 Ramp(ramp_list);
404 return result; 405 return result;
405 } 406 }
406 407
407 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { 408 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const {
408 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 409 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
409 "GetAnonymousAudio()"); 410 "GetAnonymousAudio()");
410 // The GetAudioFrameWithMuted() callback may result in the audio source being 411 // The GetAudioFrameWithMuted() callback may result in the audio source being
411 // removed from additionalAudioFramesList_. If that happens it will 412 // removed from additionalAudioFramesList_. If that happens it will
412 // invalidate any iterators. Create a copy of the audio sources list such 413 // invalidate any iterators. Create a copy of the audio sources list such
413 // that the list of participants can be traversed safely. 414 // that the list of participants can be traversed safely.
414 std::vector<SourceFrame> ramp_list; 415 std::vector<SourceFrame> ramp_list;
415 MixerAudioSourceList additionalAudioSourceList; 416 MixerAudioSourceList additional_audio_sources_list;
416 AudioFrameList result; 417 AudioFrameList result;
417 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 418 additional_audio_sources_list.insert(additional_audio_sources_list.begin(),
418 additional_audio_source_list_.begin(), 419 additional_audio_source_list_.begin(),
419 additional_audio_source_list_.end()); 420 additional_audio_source_list_.end());
420 421
421 for (MixerAudioSourceList::const_iterator audio_source = 422 for (const auto& audio_source : additional_audio_sources_list) {
422 additionalAudioSourceList.begin(); 423 const auto audio_frame_with_info =
423 audio_source != additionalAudioSourceList.end(); ++audio_source) { 424 audio_source->GetAudioFrameWithMuted(id_, OutputFrequency());
424 auto audio_frame_with_info = 425 const auto ret = audio_frame_with_info.audio_frame_info;
425 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_);
426 auto ret = audio_frame_with_info.audio_frame_info;
427 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 426 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
428 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 427 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
429 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 428 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
430 "failed to GetAudioFrameWithMuted() from audio_source"); 429 "failed to GetAudioFrameWithMuted() from audio_source");
431 continue; 430 continue;
432 } 431 }
433 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { 432 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) {
434 result.push_back(audio_frame); 433 result.push_back(audio_frame);
435 ramp_list.emplace_back((*audio_source), audio_frame, false, 434 ramp_list.emplace_back(audio_source, audio_frame, false,
436 (*audio_source)->_mixHistory->IsMixed(), -1); 435 audio_source->mix_history_->IsMixed(), 0);
437 (*audio_source)->_mixHistory->SetIsMixed(true); 436 audio_source->mix_history_->SetIsMixed(true);
438 } 437 }
439 } 438 }
440 Ramp(ramp_list); 439 Ramp(ramp_list);
441 return result; 440 return result;
442 } 441 }
443 442
444 bool AudioMixerImpl::IsAudioSourceInList( 443 bool AudioMixerImpl::IsAudioSourceInList(
445 const MixerAudioSource& audio_source, 444 const MixerAudioSource& audio_source,
446 const MixerAudioSourceList& audioSourceList) const { 445 const MixerAudioSourceList& audio_source_list) const {
447 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 446 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
448 "IsAudioSourceInList(audio_source,audioSourceList)"); 447 "IsAudioSourceInList(audio_source,audio_source_list)");
449 return std::find(audioSourceList.begin(), audioSourceList.end(), 448 return std::find(audio_source_list.begin(), audio_source_list.end(),
450 &audio_source) != audioSourceList.end(); 449 &audio_source) != audio_source_list.end();
451 } 450 }
452 451
453 bool AudioMixerImpl::AddAudioSourceToList( 452 bool AudioMixerImpl::AddAudioSourceToList(
454 MixerAudioSource* audio_source, 453 MixerAudioSource* audio_source,
455 MixerAudioSourceList* audioSourceList) const { 454 MixerAudioSourceList* audio_source_list) const {
456 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 455 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
457 "AddAudioSourceToList(audio_source, audioSourceList)"); 456 "AddAudioSourceToList(audio_source, audio_source_list)");
458 audioSourceList->push_back(audio_source); 457 audio_source_list->push_back(audio_source);
459 // Make sure that the mixed status is correct for new MixerAudioSource. 458 // Make sure that the mixed status is correct for new MixerAudioSource.
460 audio_source->_mixHistory->ResetMixedStatus(); 459 audio_source->mix_history_->ResetMixedStatus();
461 return true; 460 return true;
462 } 461 }
463 462
464 bool AudioMixerImpl::RemoveAudioSourceFromList( 463 bool AudioMixerImpl::RemoveAudioSourceFromList(
465 MixerAudioSource* audio_source, 464 MixerAudioSource* audio_source,
466 MixerAudioSourceList* audioSourceList) const { 465 MixerAudioSourceList* audio_source_list) const {
467 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 466 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
468 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); 467 "RemoveAudioSourceFromList(audio_source, audio_source_list)");
469 auto iter = 468 const auto iter = std::find(audio_source_list->begin(),
470 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); 469 audio_source_list->end(), audio_source);
471 if (iter != audioSourceList->end()) { 470 if (iter != audio_source_list->end()) {
472 audioSourceList->erase(iter); 471 audio_source_list->erase(iter);
473 // AudioSource is no longer mixed, reset to default. 472 // AudioSource is no longer mixed, reset to default.
474 audio_source->_mixHistory->ResetMixedStatus(); 473 audio_source->mix_history_->ResetMixedStatus();
475 return true; 474 return true;
476 } else { 475 } else {
477 return false; 476 return false;
478 } 477 }
479 } 478 }
480 479
481 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, 480 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixed_audio,
482 const AudioFrameList& audioFrameList, 481 const AudioFrameList& audio_frame_list,
483 int32_t id, 482 int32_t id,
484 bool use_limiter) { 483 bool use_limiter) {
485 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, 484 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
486 "MixFromList(mixedAudio, audioFrameList)"); 485 "MixFromList(mixed_audio, audio_frame_list)");
487 if (audioFrameList.empty()) 486 if (audio_frame_list.empty())
488 return 0; 487 return 0;
489 488
490 uint32_t position = 0; 489 if (audio_frame_list.size() == 1) {
491 490 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_;
492 if (audioFrameList.size() == 1) { 491 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_;
493 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
494 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
495 } else { 492 } else {
496 // TODO(wu): Issue 3390. 493 // TODO(wu): Issue 3390.
497 // Audio frame timestamp is only supported in one channel case. 494 // Audio frame timestamp is only supported in one channel case.
498 mixedAudio->timestamp_ = 0; 495 mixed_audio->timestamp_ = 0;
499 mixedAudio->elapsed_time_ms_ = -1; 496 mixed_audio->elapsed_time_ms_ = -1;
500 } 497 }
501 498
502 for (const auto& frame : audioFrameList) { 499 for (const auto& frame : audio_frame_list) {
503 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_); 500 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_);
504 RTC_DCHECK_EQ( 501 RTC_DCHECK_EQ(
505 frame->samples_per_channel_, 502 frame->samples_per_channel_,
506 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) / 503 static_cast<size_t>(
507 1000)); 504 (mixed_audio->sample_rate_hz_ * kFrameDurationInMs) / 1000));
508 505
509 // Mix |f.frame| into |mixedAudio|, with saturation protection. 506 // Mix |f.frame| into |mixed_audio|, with saturation protection.
510 // These effect is applied to |f.frame| itself prior to mixing. 507 // These effect is applied to |f.frame| itself prior to mixing.
511 if (use_limiter) { 508 if (use_limiter) {
512 // Divide by two to avoid saturation in the mixing. 509 // Divide by two to avoid saturation in the mixing.
513 // This is only meaningful if the limiter will be used. 510 // This is only meaningful if the limiter will be used.
514 *frame >>= 1; 511 *frame >>= 1;
515 } 512 }
516 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_); 513 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
517 *mixedAudio += *frame; 514 *mixed_audio += *frame;
518 position++;
519 } 515 }
520 return 0; 516 return 0;
521 } 517 }
522 518
523 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { 519 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const {
524 if (!use_limiter_) { 520 if (!use_limiter_) {
525 return true; 521 return true;
526 } 522 }
527 523
528 // Smoothly limit the mixed frame. 524 // Smoothly limit the mixed frame.
529 const int error = limiter_->ProcessStream(mixedAudio); 525 const int error = limiter_->ProcessStream(mixed_audio);
530 526
531 // And now we can safely restore the level. This procedure results in 527 // And now we can safely restore the level. This procedure results in
532 // some loss of resolution, deemed acceptable. 528 // some loss of resolution, deemed acceptable.
533 // 529 //
534 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 530 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
535 // and compression gain of 6 dB). However, in the transition frame when this 531 // and compression gain of 6 dB). However, in the transition frame when this
536 // is enabled (moving from one to two audio sources) it has the potential to 532 // is enabled (moving from one to two audio sources) it has the potential to
537 // create discontinuities in the mixed frame. 533 // create discontinuities in the mixed frame.
538 // 534 //
539 // Instead we double the frame (with addition since left-shifting a 535 // Instead we double the frame (with addition since left-shifting a
540 // negative value is undefined). 536 // negative value is undefined).
541 *mixedAudio += *mixedAudio; 537 *mixed_audio += *mixed_audio;
542 538
543 if (error != limiter_->kNoError) { 539 if (error != limiter_->kNoError) {
544 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 540 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
545 "Error from AudioProcessing: %d", error); 541 "Error from AudioProcessing: %d", error);
546 RTC_NOTREACHED(); 542 RTC_NOTREACHED();
547 return false; 543 return false;
548 } 544 }
549 return true; 545 return true;
550 } 546 }
551 547
552 int AudioMixerImpl::GetOutputAudioLevel() { 548 int AudioMixerImpl::GetOutputAudioLevel() {
553 const int level = audio_level_.Level(); 549 const int level = audio_level_.Level();
554 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 550 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
555 "GetAudioOutputLevel() => level=%d", level); 551 "GetAudioOutputLevel() => level=%d", level);
556 return level; 552 return level;
557 } 553 }
558 554
559 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 555 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
560 const int level = audio_level_.LevelFullRange(); 556 const int level = audio_level_.LevelFullRange();
561 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 557 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
562 "GetAudioOutputLevelFullRange() => level=%d", level); 558 "GetAudioOutputLevelFullRange() => level=%d", level);
563 return level; 559 return level;
564 } 560 }
565 } // namespace webrtc 561 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698