Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(111)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2294263002: Fix style guide issues in audio mixer. (Closed)
Patch Set: Rebase. Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/modules/audio_mixer/audio_mixer_impl.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
46 muted_(m), 46 muted_(m),
47 energy_(energy), 47 energy_(energy),
48 was_mixed_before_(was_mixed_before) {} 48 was_mixed_before_(was_mixed_before) {}
49 49
50 // a.shouldMixBefore(b) is used to select mixer participants. 50 // a.shouldMixBefore(b) is used to select mixer participants.
51 bool shouldMixBefore(const SourceFrame& other) const { 51 bool shouldMixBefore(const SourceFrame& other) const {
52 if (muted_ != other.muted_) { 52 if (muted_ != other.muted_) {
53 return other.muted_; 53 return other.muted_;
54 } 54 }
55 55
56 auto our_activity = audio_frame_->vad_activity_; 56 const auto our_activity = audio_frame_->vad_activity_;
57 auto other_activity = other.audio_frame_->vad_activity_; 57 const auto other_activity = other.audio_frame_->vad_activity_;
58 58
59 if (our_activity != other_activity) { 59 if (our_activity != other_activity) {
60 return our_activity == AudioFrame::kVadActive; 60 return our_activity == AudioFrame::kVadActive;
61 } 61 }
62 62
63 return energy_ > other.energy_; 63 return energy_ > other.energy_;
64 } 64 }
65 65
66 MixerAudioSource* audio_source_; 66 MixerAudioSource* audio_source_;
67 AudioFrame* audio_frame_; 67 AudioFrame* audio_frame_;
(...skipping 12 matching lines...) Expand all
80 } 80 }
81 } 81 }
82 82
83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { 83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
84 for (const auto& source_frame : mixed_sources_and_frames) { 84 for (const auto& source_frame : mixed_sources_and_frames) {
85 // Ramp in previously unmixed. 85 // Ramp in previously unmixed.
86 if (!source_frame.was_mixed_before_) { 86 if (!source_frame.was_mixed_before_) {
87 NewMixerRampIn(source_frame.audio_frame_); 87 NewMixerRampIn(source_frame.audio_frame_);
88 } 88 }
89 89
90 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed(); 90 const bool is_mixed = source_frame.audio_source_->mix_history_->IsMixed();
91 // Ramp out currently unmixed. 91 // Ramp out currently unmixed.
92 if (source_frame.was_mixed_before_ && !is_mixed) { 92 if (source_frame.was_mixed_before_ && !is_mixed) {
93 NewMixerRampOut(source_frame.audio_frame_); 93 NewMixerRampOut(source_frame.audio_frame_);
94 } 94 }
95 } 95 }
96 } 96 }
97 97
98 } // namespace 98 } // namespace
99 99
100 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} 100 MixerAudioSource::MixerAudioSource() : mix_history_(new NewMixHistory()) {}
101 101
102 MixerAudioSource::~MixerAudioSource() { 102 MixerAudioSource::~MixerAudioSource() {
103 delete _mixHistory; 103 delete mix_history_;
kwiberg-webrtc 2016/09/01 11:59:11 Could you use a unique_ptr instead of a raw pointe
aleloi 2016/09/01 13:36:17 I'll do it in the dependent CL https://codereview.
104 } 104 }
105 105
106 bool MixerAudioSource::IsMixed() const { 106 bool MixerAudioSource::IsMixed() const {
107 return _mixHistory->IsMixed(); 107 return mix_history_->IsMixed();
108 } 108 }
109 109
110 NewMixHistory::NewMixHistory() : is_mixed_(0) {} 110 NewMixHistory::NewMixHistory() : is_mixed_(0) {}
111 111
112 NewMixHistory::~NewMixHistory() {} 112 NewMixHistory::~NewMixHistory() {}
113 113
114 bool NewMixHistory::IsMixed() const { 114 bool NewMixHistory::IsMixed() const {
115 return is_mixed_; 115 return is_mixed_;
116 } 116 }
117 117
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
188 return false; 188 return false;
189 189
190 return true; 190 return true;
191 } 191 }
192 192
193 void AudioMixerImpl::Mix(int sample_rate, 193 void AudioMixerImpl::Mix(int sample_rate,
194 size_t number_of_channels, 194 size_t number_of_channels,
195 AudioFrame* audio_frame_for_mixing) { 195 AudioFrame* audio_frame_for_mixing) {
196 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 196 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
197 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 197 RTC_DCHECK(thread_checker_.CalledOnValidThread());
198 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
199 198
200 if (sample_rate != kNbInHz && sample_rate != kWbInHz && 199 if (sample_rate != kNbInHz && sample_rate != kWbInHz &&
201 sample_rate != kSwbInHz && sample_rate != kFbInHz) { 200 sample_rate != kSwbInHz && sample_rate != kFbInHz) {
202 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 201 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
203 "Invalid frequency: %d", sample_rate); 202 "Invalid frequency: %d", sample_rate);
204 RTC_NOTREACHED(); 203 RTC_NOTREACHED();
205 return; 204 return;
206 } 205 }
207 206
208 if (OutputFrequency() != sample_rate) { 207 if (OutputFrequency() != sample_rate) {
(...skipping 11 matching lines...) Expand all
220 } 219 }
221 220
222 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), 221 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(),
223 anonymous_mix_list.end()); 222 anonymous_mix_list.end());
224 223
225 for (const auto& frame : mix_list) { 224 for (const auto& frame : mix_list) {
226 RemixFrame(frame, number_of_channels); 225 RemixFrame(frame, number_of_channels);
227 } 226 }
228 227
229 audio_frame_for_mixing->UpdateFrame( 228 audio_frame_for_mixing->UpdateFrame(
230 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 229 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech,
231 AudioFrame::kVadPassive, number_of_channels); 230 AudioFrame::kVadPassive, number_of_channels);
232 231
233 time_stamp_ += static_cast<uint32_t>(sample_size_); 232 time_stamp_ += static_cast<uint32_t>(sample_size_);
234 233
235 use_limiter_ = num_mixed_audio_sources > 1; 234 use_limiter_ = num_mixed_audio_sources > 1;
236 235
237 // We only use the limiter if we're actually mixing multiple streams. 236 // We only use the limiter if we're actually mixing multiple streams.
238 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); 237 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_);
239 238
240 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 239 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
(...skipping 27 matching lines...) Expand all
268 267
269 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 268 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
270 bool mixable) { 269 bool mixable) {
271 if (!mixable) { 270 if (!mixable) {
272 // Anonymous audio sources are in a separate list. Make sure that the 271 // Anonymous audio sources are in a separate list. Make sure that the
273 // audio source is in the _audioSourceList if it is being mixed. 272 // audio source is in the _audioSourceList if it is being mixed.
274 SetAnonymousMixabilityStatus(audio_source, false); 273 SetAnonymousMixabilityStatus(audio_source, false);
275 } 274 }
276 { 275 {
277 CriticalSectionScoped cs(crit_.get()); 276 CriticalSectionScoped cs(crit_.get());
278 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); 277 const bool is_mixed =
278 IsAudioSourceInList(*audio_source, audio_source_list_);
279 // API must be called with a new state. 279 // API must be called with a new state.
280 if (!(mixable ^ isMixed)) { 280 if (!(mixable ^ is_mixed)) {
281 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 281 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
282 "Mixable is aready %s", isMixed ? "ON" : "off"); 282 "Mixable is aready %s", is_mixed ? "ON" : "off");
283 return -1; 283 return -1;
284 } 284 }
285 bool success = false; 285 bool success = false;
286 if (mixable) { 286 if (mixable) {
287 success = AddAudioSourceToList(audio_source, &audio_source_list_); 287 success = AddAudioSourceToList(audio_source, &audio_source_list_);
288 } else { 288 } else {
289 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); 289 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
290 } 290 }
291 if (!success) { 291 if (!success) {
292 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 292 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
293 "failed to %s audio_source", mixable ? "add" : "remove"); 293 "failed to %s audio_source", mixable ? "add" : "remove");
294 RTC_NOTREACHED(); 294 RTC_NOTREACHED();
295 return -1; 295 return -1;
296 } 296 }
297 297
298 size_t numMixedNonAnonymous = audio_source_list_.size(); 298 size_t num_mixed_non_anonymous = audio_source_list_.size();
299 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { 299 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) {
300 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; 300 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources;
301 } 301 }
302 num_mixed_audio_sources_ = 302 num_mixed_audio_sources_ =
303 numMixedNonAnonymous + additional_audio_source_list_.size(); 303 num_mixed_non_anonymous + additional_audio_source_list_.size();
304 } 304 }
305 return 0; 305 return 0;
306 } 306 }
307 307
308 bool AudioMixerImpl::MixabilityStatus( 308 bool AudioMixerImpl::MixabilityStatus(
309 const MixerAudioSource& audio_source) const { 309 const MixerAudioSource& audio_source) const {
310 CriticalSectionScoped cs(crit_.get()); 310 CriticalSectionScoped cs(crit_.get());
311 return IsAudioSourceInList(audio_source, audio_source_list_); 311 return IsAudioSourceInList(audio_source, audio_source_list_);
312 } 312 }
313 313
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
349 bool AudioMixerImpl::AnonymousMixabilityStatus( 349 bool AudioMixerImpl::AnonymousMixabilityStatus(
350 const MixerAudioSource& audio_source) const { 350 const MixerAudioSource& audio_source) const {
351 CriticalSectionScoped cs(crit_.get()); 351 CriticalSectionScoped cs(crit_.get());
352 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 352 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
353 } 353 }
354 354
355 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { 355 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
356 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 356 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
357 "GetNonAnonymousAudio()"); 357 "GetNonAnonymousAudio()");
358 AudioFrameList result; 358 AudioFrameList result;
359 std::vector<SourceFrame> audioSourceMixingDataList; 359 std::vector<SourceFrame> audio_source_mixing_data_list;
360 std::vector<SourceFrame> ramp_list; 360 std::vector<SourceFrame> ramp_list;
361 361
362 // Get audio source audio and put it in the struct vector. 362 // Get audio source audio and put it in the struct vector.
363 for (MixerAudioSource* audio_source : audio_source_list_) { 363 for (const auto audio_source : audio_source_list_) {
kwiberg-webrtc 2016/09/01 11:59:11 If it's a pointer type, readability is greatly imp
aleloi 2016/09/01 13:36:17 It's auto * const here since only the pointer is c
kwiberg-webrtc 2016/09/01 19:46:33 Ah, right.
364 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 364 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
365 id_, static_cast<int>(output_frequency_)); 365 id_, static_cast<int>(OutputFrequency()));
366 366
367 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 367 const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
368 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 368 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
369 369
370 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 370 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
371 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 371 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
372 "failed to GetAudioFrameWithMuted() from participant"); 372 "failed to GetAudioFrameWithMuted() from participant");
373 continue; 373 continue;
374 } 374 }
375 audioSourceMixingDataList.emplace_back( 375 audio_source_mixing_data_list.emplace_back(
376 audio_source, audio_source_audio_frame, 376 audio_source, audio_source_audio_frame,
377 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, 377 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
378 audio_source->_mixHistory->WasMixed()); 378 audio_source->mix_history_->WasMixed());
379 } 379 }
380 380
381 // Sort frames by sorting function. 381 // Sort frames by sorting function.
382 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), 382 std::sort(audio_source_mixing_data_list.begin(),
383 audio_source_mixing_data_list.end(),
383 std::mem_fn(&SourceFrame::shouldMixBefore)); 384 std::mem_fn(&SourceFrame::shouldMixBefore));
384 385
385 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources; 386 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
386 // Go through list in order and put things in mixList. 387
387 for (SourceFrame& p : audioSourceMixingDataList) { 388 // Go through list in order and put unmuted frames in result list.
389 for (const SourceFrame& p : audio_source_mixing_data_list) {
388 // Filter muted. 390 // Filter muted.
389 if (p.muted_) { 391 if (p.muted_) {
390 p.audio_source_->_mixHistory->SetIsMixed(false); 392 p.audio_source_->mix_history_->SetIsMixed(false);
391 continue; 393 continue;
392 } 394 }
393 395
394 // Add frame to result vector for mixing. 396 // Add frame to result vector for mixing.
395 bool is_mixed = false; 397 bool is_mixed = false;
396 if (maxAudioFrameCounter > 0) { 398 if (max_audio_frame_counter > 0) {
397 --maxAudioFrameCounter; 399 --max_audio_frame_counter;
398 result.push_back(p.audio_frame_); 400 result.push_back(p.audio_frame_);
399 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, 401 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false,
400 p.was_mixed_before_, -1); 402 p.was_mixed_before_, -1);
401 is_mixed = true; 403 is_mixed = true;
402 } 404 }
403 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 405 p.audio_source_->mix_history_->SetIsMixed(is_mixed);
404 } 406 }
405 Ramp(ramp_list); 407 Ramp(ramp_list);
406 return result; 408 return result;
407 } 409 }
408 410
409 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { 411 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const {
410 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 412 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
411 "GetAnonymousAudio()"); 413 "GetAnonymousAudio()");
412 // The GetAudioFrameWithMuted() callback may result in the audio source being 414 // The GetAudioFrameWithMuted() callback may result in the audio source being
413 // removed from additionalAudioFramesList_. If that happens it will 415 // removed from additionalAudioFramesList_. If that happens it will
414 // invalidate any iterators. Create a copy of the audio sources list such 416 // invalidate any iterators. Create a copy of the audio sources list such
415 // that the list of participants can be traversed safely. 417 // that the list of participants can be traversed safely.
416 std::vector<SourceFrame> ramp_list; 418 std::vector<SourceFrame> ramp_list;
417 MixerAudioSourceList additionalAudioSourceList; 419 MixerAudioSourceList additional_audio_sources_list;
418 AudioFrameList result; 420 AudioFrameList result;
419 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 421 additional_audio_sources_list.insert(additional_audio_sources_list.begin(),
420 additional_audio_source_list_.begin(), 422 additional_audio_source_list_.begin(),
421 additional_audio_source_list_.end()); 423 additional_audio_source_list_.end());
422 424
423 for (MixerAudioSourceList::const_iterator audio_source = 425 for (const auto& audio_source : additional_audio_sources_list) {
424 additionalAudioSourceList.begin(); 426 const auto audio_frame_with_info =
425 audio_source != additionalAudioSourceList.end(); ++audio_source) { 427 audio_source->GetAudioFrameWithMuted(id_, OutputFrequency());
426 auto audio_frame_with_info = 428 const auto ret = audio_frame_with_info.audio_frame_info;
427 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_);
428 auto ret = audio_frame_with_info.audio_frame_info;
429 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 429 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
430 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 430 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
431 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 431 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
432 "failed to GetAudioFrameWithMuted() from audio_source"); 432 "failed to GetAudioFrameWithMuted() from audio_source");
433 continue; 433 continue;
434 } 434 }
435 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { 435 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) {
436 result.push_back(audio_frame); 436 result.push_back(audio_frame);
437 ramp_list.emplace_back((*audio_source), audio_frame, false, 437 ramp_list.emplace_back(audio_source, audio_frame, false,
438 (*audio_source)->_mixHistory->IsMixed(), -1); 438 audio_source->mix_history_->IsMixed(), 0);
439 (*audio_source)->_mixHistory->SetIsMixed(true); 439 audio_source->mix_history_->SetIsMixed(true);
440 } 440 }
441 } 441 }
442 Ramp(ramp_list); 442 Ramp(ramp_list);
443 return result; 443 return result;
444 } 444 }
445 445
446 bool AudioMixerImpl::IsAudioSourceInList( 446 bool AudioMixerImpl::IsAudioSourceInList(
447 const MixerAudioSource& audio_source, 447 const MixerAudioSource& audio_source,
448 const MixerAudioSourceList& audioSourceList) const { 448 const MixerAudioSourceList& audio_source_list) const {
449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
450 "IsAudioSourceInList(audio_source,audioSourceList)"); 450 "IsAudioSourceInList(audio_source,audio_source_list)");
451 return std::find(audioSourceList.begin(), audioSourceList.end(), 451 return std::find(audio_source_list.begin(), audio_source_list.end(),
452 &audio_source) != audioSourceList.end(); 452 &audio_source) != audio_source_list.end();
453 } 453 }
454 454
455 bool AudioMixerImpl::AddAudioSourceToList( 455 bool AudioMixerImpl::AddAudioSourceToList(
456 MixerAudioSource* audio_source, 456 MixerAudioSource* audio_source,
457 MixerAudioSourceList* audioSourceList) const { 457 MixerAudioSourceList* audio_source_list) const {
458 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 458 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
459 "AddAudioSourceToList(audio_source, audioSourceList)"); 459 "AddAudioSourceToList(audio_source, audio_source_list)");
460 audioSourceList->push_back(audio_source); 460 audio_source_list->push_back(audio_source);
461 // Make sure that the mixed status is correct for new MixerAudioSource. 461 // Make sure that the mixed status is correct for new MixerAudioSource.
462 audio_source->_mixHistory->ResetMixedStatus(); 462 audio_source->mix_history_->ResetMixedStatus();
463 return true; 463 return true;
464 } 464 }
465 465
466 bool AudioMixerImpl::RemoveAudioSourceFromList( 466 bool AudioMixerImpl::RemoveAudioSourceFromList(
467 MixerAudioSource* audio_source, 467 MixerAudioSource* audio_source,
468 MixerAudioSourceList* audioSourceList) const { 468 MixerAudioSourceList* audio_source_list) const {
469 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 469 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
470 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); 470 "RemoveAudioSourceFromList(audio_source, audio_source_list)");
471 auto iter = 471 const auto iter = std::find(audio_source_list->begin(),
472 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); 472 audio_source_list->end(), audio_source);
473 if (iter != audioSourceList->end()) { 473 if (iter != audio_source_list->end()) {
474 audioSourceList->erase(iter); 474 audio_source_list->erase(iter);
475 // AudioSource is no longer mixed, reset to default. 475 // AudioSource is no longer mixed, reset to default.
476 audio_source->_mixHistory->ResetMixedStatus(); 476 audio_source->mix_history_->ResetMixedStatus();
477 return true; 477 return true;
478 } else { 478 } else {
479 return false; 479 return false;
480 } 480 }
481 } 481 }
482 482
483 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, 483 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixed_audio,
484 const AudioFrameList& audioFrameList, 484 const AudioFrameList& audio_frame_list,
485 int32_t id, 485 int32_t id,
486 bool use_limiter) { 486 bool use_limiter) {
487 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, 487 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
488 "MixFromList(mixedAudio, audioFrameList)"); 488 "MixFromList(mixed_audio, audio_frame_list)");
489 if (audioFrameList.empty()) 489 if (audio_frame_list.empty())
490 return 0; 490 return 0;
491 491
492 uint32_t position = 0; 492 if (audio_frame_list.size() == 1) {
493 493 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_;
494 if (audioFrameList.size() == 1) { 494 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_;
495 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
496 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
497 } else { 495 } else {
498 // TODO(wu): Issue 3390. 496 // TODO(wu): Issue 3390.
499 // Audio frame timestamp is only supported in one channel case. 497 // Audio frame timestamp is only supported in one channel case.
500 mixedAudio->timestamp_ = 0; 498 mixed_audio->timestamp_ = 0;
501 mixedAudio->elapsed_time_ms_ = -1; 499 mixed_audio->elapsed_time_ms_ = -1;
502 } 500 }
503 501
504 for (const auto& frame : audioFrameList) { 502 for (const auto& frame : audio_frame_list) {
505 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_); 503 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_);
506 RTC_DCHECK_EQ( 504 RTC_DCHECK_EQ(
507 frame->samples_per_channel_, 505 frame->samples_per_channel_,
508 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) / 506 static_cast<size_t>(
509 1000)); 507 (mixed_audio->sample_rate_hz_ * kFrameDurationInMs) / 1000));
510 508
511 // Mix |f.frame| into |mixedAudio|, with saturation protection. 509 // Mix |f.frame| into |mixed_audio|, with saturation protection.
512 // These effect is applied to |f.frame| itself prior to mixing. 510 // These effect is applied to |f.frame| itself prior to mixing.
513 if (use_limiter) { 511 if (use_limiter) {
514 // Divide by two to avoid saturation in the mixing. 512 // Divide by two to avoid saturation in the mixing.
515 // This is only meaningful if the limiter will be used. 513 // This is only meaningful if the limiter will be used.
516 *frame >>= 1; 514 *frame >>= 1;
517 } 515 }
518 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_); 516 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
519 *mixedAudio += *frame; 517 *mixed_audio += *frame;
520 position++;
521 } 518 }
522 return 0; 519 return 0;
523 } 520 }
524 521
525 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { 522 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const {
526 if (!use_limiter_) { 523 if (!use_limiter_) {
527 return true; 524 return true;
528 } 525 }
529 526
530 // Smoothly limit the mixed frame. 527 // Smoothly limit the mixed frame.
531 const int error = limiter_->ProcessStream(mixedAudio); 528 const int error = limiter_->ProcessStream(mixed_audio);
532 529
533 // And now we can safely restore the level. This procedure results in 530 // And now we can safely restore the level. This procedure results in
534 // some loss of resolution, deemed acceptable. 531 // some loss of resolution, deemed acceptable.
535 // 532 //
536 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 533 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
537 // and compression gain of 6 dB). However, in the transition frame when this 534 // and compression gain of 6 dB). However, in the transition frame when this
538 // is enabled (moving from one to two audio sources) it has the potential to 535 // is enabled (moving from one to two audio sources) it has the potential to
539 // create discontinuities in the mixed frame. 536 // create discontinuities in the mixed frame.
540 // 537 //
541 // Instead we double the frame (with addition since left-shifting a 538 // Instead we double the frame (with addition since left-shifting a
542 // negative value is undefined). 539 // negative value is undefined).
543 *mixedAudio += *mixedAudio; 540 *mixed_audio += *mixed_audio;
544 541
545 if (error != limiter_->kNoError) { 542 if (error != limiter_->kNoError) {
546 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 543 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
547 "Error from AudioProcessing: %d", error); 544 "Error from AudioProcessing: %d", error);
548 RTC_NOTREACHED(); 545 RTC_NOTREACHED();
549 return false; 546 return false;
550 } 547 }
551 return true; 548 return true;
552 } 549 }
553 550
554 int AudioMixerImpl::GetOutputAudioLevel() { 551 int AudioMixerImpl::GetOutputAudioLevel() {
555 const int level = audio_level_.Level(); 552 const int level = audio_level_.Level();
556 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 553 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
557 "GetAudioOutputLevel() => level=%d", level); 554 "GetAudioOutputLevel() => level=%d", level);
558 return level; 555 return level;
559 } 556 }
560 557
561 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 558 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
562 const int level = audio_level_.LevelFullRange(); 559 const int level = audio_level_.LevelFullRange();
563 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 560 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
564 "GetAudioOutputLevelFullRange() => level=%d", level); 561 "GetAudioOutputLevelFullRange() => level=%d", level);
565 return level; 562 return level;
566 } 563 }
567 } // namespace webrtc 564 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_mixer/audio_mixer_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698