Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(130)

Side by Side Diff: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc

Issue 2127763002: Removed the memory pool from the mixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@removed_time_scheduler
Patch Set: Removed '_pointer'. Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
107 return NULL; 107 return NULL;
108 } 108 }
109 return mixer; 109 return mixer;
110 } 110 }
111 111
112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) 112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
113 : _id(id), 113 : _id(id),
114 _minimumMixingFreq(kLowestPossible), 114 _minimumMixingFreq(kLowestPossible),
115 _outputFrequency(kDefaultFrequency), 115 _outputFrequency(kDefaultFrequency),
116 _sampleSize(0), 116 _sampleSize(0),
117 _audioFramePool(NULL),
118 audio_source_list_(), 117 audio_source_list_(),
119 additional_audio_source_list_(), 118 additional_audio_source_list_(),
120 num_mixed_audio_sources_(0), 119 num_mixed_audio_sources_(0),
121 use_limiter_(true), 120 use_limiter_(true),
122 _timeStamp(0) { 121 _timeStamp(0) {
123 thread_checker_.DetachFromThread(); 122 thread_checker_.DetachFromThread();
124 } 123 }
125 124
126 bool NewAudioConferenceMixerImpl::Init() { 125 bool NewAudioConferenceMixerImpl::Init() {
127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); 126 _crit.reset(CriticalSectionWrapper::CreateCriticalSection());
128 if (_crit.get() == NULL) 127 if (_crit.get() == NULL)
129 return false; 128 return false;
130 129
131 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); 130 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection());
132 if (_cbCrit.get() == NULL) 131 if (_cbCrit.get() == NULL)
133 return false; 132 return false;
134 133
135 Config config; 134 Config config;
136 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 135 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
137 _limiter.reset(AudioProcessing::Create(config)); 136 _limiter.reset(AudioProcessing::Create(config));
138 if (!_limiter.get()) 137 if (!_limiter.get())
139 return false; 138 return false;
140 139
141 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
142 DEFAULT_AUDIO_FRAME_POOLSIZE);
143 if (_audioFramePool == NULL)
144 return false;
145
146 if (SetOutputFrequency(kDefaultFrequency) == -1) 140 if (SetOutputFrequency(kDefaultFrequency) == -1)
147 return false; 141 return false;
148 142
149 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != 143 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
150 _limiter->kNoError) 144 _limiter->kNoError)
151 return false; 145 return false;
152 146
153 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the 147 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
154 // divide-by-2 but -7 is used instead to give a bit of headroom since the 148 // divide-by-2 but -7 is used instead to give a bit of headroom since the
155 // AGC is not a hard limiter. 149 // AGC is not a hard limiter.
156 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) 150 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError)
157 return false; 151 return false;
158 152
159 if (_limiter->gain_control()->set_compression_gain_db(0) != 153 if (_limiter->gain_control()->set_compression_gain_db(0) !=
160 _limiter->kNoError) 154 _limiter->kNoError)
161 return false; 155 return false;
162 156
163 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) 157 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
164 return false; 158 return false;
165 159
166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) 160 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError)
167 return false; 161 return false;
168 162
169 return true; 163 return true;
170 } 164 }
171 165
172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
174 RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr));
175 }
176
177 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { 166 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
178 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; 167 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
179 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 168 RTC_DCHECK(thread_checker_.CalledOnValidThread());
180 AudioFrameList mixList; 169 AudioFrameList mixList;
181 AudioFrameList rampOutList; 170 AudioFrameList rampOutList;
182 AudioFrameList additionalFramesList; 171 AudioFrameList additionalFramesList;
183 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; 172 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
184 { 173 {
185 CriticalSectionScoped cs(_cbCrit.get()); 174 CriticalSectionScoped cs(_cbCrit.get());
186 175
(...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after
456 audio_source != audio_source_list_.end(); ++audio_source) { 445 audio_source != audio_source_list_.end(); ++audio_source) {
457 // Stop keeping track of passive audioSources if there are already 446 // Stop keeping track of passive audioSources if there are already
458 // enough audio sources available (they wont be mixed anyway). 447 // enough audio sources available (they wont be mixed anyway).
459 bool mustAddToPassiveList = 448 bool mustAddToPassiveList =
460 (*maxAudioFrameCounter > 449 (*maxAudioFrameCounter >
461 (activeList.size() + passiveWasMixedList.size() + 450 (activeList.size() + passiveWasMixedList.size() +
462 passiveWasNotMixedList.size())); 451 passiveWasNotMixedList.size()));
463 452
464 bool wasMixed = false; 453 bool wasMixed = false;
465 wasMixed = (*audio_source)->_mixHistory->WasMixed(); 454 wasMixed = (*audio_source)->_mixHistory->WasMixed();
466 AudioFrame* audioFrame = NULL;
467 if (_audioFramePool->PopMemory(audioFrame) == -1) {
468 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
469 "failed PopMemory() call");
470 RTC_NOTREACHED();
471 return;
472 }
473 audioFrame->sample_rate_hz_ = _outputFrequency;
474 455
475 auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame); 456 auto audio_frame_with_info =
457 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
458 auto ret = audio_frame_with_info.audio_frame_info;
459 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
476 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 460 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
477 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
478 "failed to GetAudioFrameWithMuted() from audio source");
479 _audioFramePool->PushMemory(audioFrame);
480 continue; 461 continue;
481 } 462 }
482 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); 463 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
483 if (audio_source_list_.size() != 1) { 464 if (audio_source_list_.size() != 1) {
484 // TODO(wu): Issue 3390, add support for multiple audio sources case. 465 // TODO(wu): Issue 3390, add support for multiple audio sources case.
485 audioFrame->ntp_time_ms_ = -1; 466 audio_frame->ntp_time_ms_ = -1;
486 } 467 }
487 468
488 // TODO(aleloi): this assert triggers in some test cases where SRTP is 469 // TODO(aleloi): this assert triggers in some test cases where SRTP is
489 // used which prevents NetEQ from making a VAD. Temporarily disable this 470 // used which prevents NetEQ from making a VAD. Temporarily disable this
490 // assert until the problem is fixed on a higher level. 471 // assert until the problem is fixed on a higher level.
491 // RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown); 472 // RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown);
492 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { 473 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
493 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 474 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
494 "invalid VAD state from audio source"); 475 "invalid VAD state from audio source");
495 } 476 }
496 477
497 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { 478 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
498 if (!wasMixed && !muted) { 479 if (!wasMixed && !muted) {
499 RampIn(*audioFrame); 480 RampIn(*audio_frame);
500 } 481 }
501 482
502 if (activeList.size() >= *maxAudioFrameCounter) { 483 if (activeList.size() >= *maxAudioFrameCounter) {
503 // There are already more active audio sources than should be 484 // There are already more active audio sources than should be
504 // mixed. Only keep the ones with the highest energy. 485 // mixed. Only keep the ones with the highest energy.
505 AudioFrameList::iterator replaceItem; 486 AudioFrameList::iterator replaceItem;
506 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame); 487 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
507 488
508 bool found_replace_item = false; 489 bool found_replace_item = false;
509 for (AudioFrameList::iterator iter = activeList.begin(); 490 for (AudioFrameList::iterator iter = activeList.begin();
510 iter != activeList.end(); ++iter) { 491 iter != activeList.end(); ++iter) {
511 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); 492 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
512 if (energy < lowestEnergy) { 493 if (energy < lowestEnergy) {
513 replaceItem = iter; 494 replaceItem = iter;
514 lowestEnergy = energy; 495 lowestEnergy = energy;
515 found_replace_item = true; 496 found_replace_item = true;
516 } 497 }
517 } 498 }
518 if (found_replace_item) { 499 if (found_replace_item) {
519 RTC_DCHECK(!muted); // Cannot replace with a muted frame. 500 RTC_DCHECK(!muted); // Cannot replace with a muted frame.
520 FrameAndMuteInfo replaceFrame = *replaceItem; 501 FrameAndMuteInfo replaceFrame = *replaceItem;
521 502
522 bool replaceWasMixed = false; 503 bool replaceWasMixed = false;
523 std::map<int, MixerAudioSource*>::const_iterator it = 504 std::map<int, MixerAudioSource*>::const_iterator it =
524 mixAudioSourceList->find(replaceFrame.frame->id_); 505 mixAudioSourceList->find(replaceFrame.frame->id_);
525 506
526 // When a frame is pushed to |activeList| it is also pushed 507 // When a frame is pushed to |activeList| it is also pushed
527 // to mixAudioSourceList with the frame's id. This means 508 // to mixAudioSourceList with the frame's id. This means
528 // that the Find call above should never fail. 509 // that the Find call above should never fail.
529 RTC_DCHECK(it != mixAudioSourceList->end()); 510 RTC_DCHECK(it != mixAudioSourceList->end());
530 replaceWasMixed = it->second->_mixHistory->WasMixed(); 511 replaceWasMixed = it->second->_mixHistory->WasMixed();
531 512
532 mixAudioSourceList->erase(replaceFrame.frame->id_); 513 mixAudioSourceList->erase(replaceFrame.frame->id_);
533 activeList.erase(replaceItem); 514 activeList.erase(replaceItem);
534 515
535 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); 516 activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
536 (*mixAudioSourceList)[audioFrame->id_] = *audio_source; 517 (*mixAudioSourceList)[audio_frame->id_] = *audio_source;
537 RTC_DCHECK_LE(mixAudioSourceList->size(), 518 RTC_DCHECK_LE(mixAudioSourceList->size(),
538 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 519 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
539 520
540 if (replaceWasMixed) { 521 if (replaceWasMixed) {
541 if (!replaceFrame.muted) { 522 if (!replaceFrame.muted) {
542 RampOut(*replaceFrame.frame); 523 RampOut(*replaceFrame.frame);
543 } 524 }
544 rampOutList->push_back(replaceFrame); 525 rampOutList->push_back(replaceFrame);
545 RTC_DCHECK_LE( 526 RTC_DCHECK_LE(
546 rampOutList->size(), 527 rampOutList->size(),
547 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 528 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
548 } else {
549 _audioFramePool->PushMemory(replaceFrame.frame);
550 } 529 }
551 } else { 530 } else {
552 if (wasMixed) { 531 if (wasMixed) {
553 if (!muted) { 532 if (!muted) {
554 RampOut(*audioFrame); 533 RampOut(*audio_frame);
555 } 534 }
556 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); 535 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
557 RTC_DCHECK_LE( 536 RTC_DCHECK_LE(
558 rampOutList->size(), 537 rampOutList->size(),
559 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 538 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
560 } else {
561 _audioFramePool->PushMemory(audioFrame);
562 } 539 }
563 } 540 }
564 } else { 541 } else {
565 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); 542 activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
566 (*mixAudioSourceList)[audioFrame->id_] = *audio_source; 543 (*mixAudioSourceList)[audio_frame->id_] = *audio_source;
567 RTC_DCHECK_LE(mixAudioSourceList->size(), 544 RTC_DCHECK_LE(mixAudioSourceList->size(),
568 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 545 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
569 } 546 }
570 } else { 547 } else {
571 if (wasMixed) { 548 if (wasMixed) {
572 AudioSourceWithFrame* part_struct = 549 AudioSourceWithFrame* part_struct =
573 new AudioSourceWithFrame(*audio_source, audioFrame, muted); 550 new AudioSourceWithFrame(*audio_source, audio_frame, muted);
574 passiveWasMixedList.push_back(part_struct); 551 passiveWasMixedList.push_back(part_struct);
575 } else if (mustAddToPassiveList) { 552 } else if (mustAddToPassiveList) {
576 if (!muted) { 553 if (!muted) {
577 RampIn(*audioFrame); 554 RampIn(*audio_frame);
578 } 555 }
579 AudioSourceWithFrame* part_struct = 556 AudioSourceWithFrame* part_struct =
580 new AudioSourceWithFrame(*audio_source, audioFrame, muted); 557 new AudioSourceWithFrame(*audio_source, audio_frame, muted);
581 passiveWasNotMixedList.push_back(part_struct); 558 passiveWasNotMixedList.push_back(part_struct);
582 } else {
583 _audioFramePool->PushMemory(audioFrame);
584 } 559 }
585 } 560 }
586 } 561 }
587 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); 562 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter);
588 // At this point it is known which audio sources should be mixed. Transfer 563 // At this point it is known which audio sources should be mixed. Transfer
589 // this information to this functions output parameters. 564 // this information to this functions output parameters.
590 for (AudioFrameList::const_iterator iter = activeList.begin(); 565 for (AudioFrameList::const_iterator iter = activeList.begin();
591 iter != activeList.end(); ++iter) { 566 iter != activeList.end(); ++iter) {
592 mixList->push_back(*iter); 567 mixList->push_back(*iter);
593 } 568 }
594 activeList.clear(); 569 activeList.clear();
595 // Always mix a constant number of AudioFrames. If there aren't enough 570 // Always mix a constant number of AudioFrames. If there aren't enough
596 // active audio sources mix passive ones. Starting with those that was mixed 571 // active audio sources mix passive ones. Starting with those that was mixed
597 // last iteration. 572 // last iteration.
598 for (AudioSourceWithFrameList::const_iterator iter = 573 for (AudioSourceWithFrameList::const_iterator iter =
599 passiveWasMixedList.begin(); 574 passiveWasMixedList.begin();
600 iter != passiveWasMixedList.end(); ++iter) { 575 iter != passiveWasMixedList.end(); ++iter) {
601 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { 576 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
602 mixList->push_back( 577 mixList->push_back(
603 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); 578 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
604 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; 579 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
605 RTC_DCHECK_LE(mixAudioSourceList->size(), 580 RTC_DCHECK_LE(mixAudioSourceList->size(),
606 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 581 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
607 } else {
608 _audioFramePool->PushMemory((*iter)->audio_frame);
609 } 582 }
610 delete *iter; 583 delete *iter;
611 } 584 }
612 // And finally the ones that have not been mixed for a while. 585 // And finally the ones that have not been mixed for a while.
613 for (AudioSourceWithFrameList::const_iterator iter = 586 for (AudioSourceWithFrameList::const_iterator iter =
614 passiveWasNotMixedList.begin(); 587 passiveWasNotMixedList.begin();
615 iter != passiveWasNotMixedList.end(); ++iter) { 588 iter != passiveWasNotMixedList.end(); ++iter) {
616 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { 589 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
617 mixList->push_back( 590 mixList->push_back(
618 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); 591 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
619 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; 592 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
620 RTC_DCHECK_LE(mixAudioSourceList->size(), 593 RTC_DCHECK_LE(mixAudioSourceList->size(),
621 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 594 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
622 } else {
623 _audioFramePool->PushMemory((*iter)->audio_frame);
624 } 595 }
625 delete *iter; 596 delete *iter;
626 } 597 }
627 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); 598 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size());
628 *maxAudioFrameCounter += mixListStartSize - mixList->size(); 599 *maxAudioFrameCounter += mixListStartSize - mixList->size();
629 } 600 }
630 601
631 void NewAudioConferenceMixerImpl::GetAdditionalAudio( 602 void NewAudioConferenceMixerImpl::GetAdditionalAudio(
632 AudioFrameList* additionalFramesList) const { 603 AudioFrameList* additionalFramesList) const {
633 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 604 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
634 "GetAdditionalAudio(additionalFramesList)"); 605 "GetAdditionalAudio(additionalFramesList)");
635 // The GetAudioFrameWithMuted() callback may result in the audio source being 606 // The GetAudioFrameWithMuted() callback may result in the audio source being
636 // removed from additionalAudioSourceList_. If that happens it will 607 // removed from additionalAudioFramesList_. If that happens it will
637 // invalidate any iterators. Create a copy of the audio sources list such 608 // invalidate any iterators. Create a copy of the audio sources list such
638 // that the list of audio sources can be traversed safely. 609 // that the list of participants can be traversed safely.
639 MixerAudioSourceList additionalAudioSourceList; 610 MixerAudioSourceList additionalAudioSourceList;
640 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 611 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
641 additional_audio_source_list_.begin(), 612 additional_audio_source_list_.begin(),
642 additional_audio_source_list_.end()); 613 additional_audio_source_list_.end());
643 614
644 for (MixerAudioSourceList::const_iterator audio_source = 615 for (MixerAudioSourceList::const_iterator audio_source =
645 additionalAudioSourceList.begin(); 616 additionalAudioSourceList.begin();
646 audio_source != additionalAudioSourceList.end(); ++audio_source) { 617 audio_source != additionalAudioSourceList.end(); ++audio_source) {
647 AudioFrame* audioFrame = NULL; 618 auto audio_frame_with_info =
648 if (_audioFramePool->PopMemory(audioFrame) == -1) { 619 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
649 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 620 auto ret = audio_frame_with_info.audio_frame_info;
650 "failed PopMemory() call"); 621 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
651 RTC_NOTREACHED();
652 return;
653 }
654 audioFrame->sample_rate_hz_ = _outputFrequency;
655 auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
656 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 622 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
657 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 623 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
658 "failed to GetAudioFrameWithMuted() from audio_source"); 624 "failed to GetAudioFrameWithMuted() from audio_source");
659 _audioFramePool->PushMemory(audioFrame);
660 continue; 625 continue;
661 } 626 }
662 if (audioFrame->samples_per_channel_ == 0) { 627 if (audio_frame->samples_per_channel_ == 0) {
663 // Empty frame. Don't use it. 628 // Empty frame. Don't use it.
664 _audioFramePool->PushMemory(audioFrame);
665 continue; 629 continue;
666 } 630 }
667 additionalFramesList->push_back(FrameAndMuteInfo( 631 additionalFramesList->push_back(FrameAndMuteInfo(
668 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); 632 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
669 } 633 }
670 } 634 }
671 635
672 void NewAudioConferenceMixerImpl::UpdateMixedStatus( 636 void NewAudioConferenceMixerImpl::UpdateMixedStatus(
673 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const { 637 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const {
674 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 638 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
675 "UpdateMixedStatus(mixedAudioSourcesMap)"); 639 "UpdateMixedStatus(mixedAudioSourcesMap)");
676 RTC_DCHECK_LE(mixedAudioSourcesMap.size(), 640 RTC_DCHECK_LE(mixedAudioSourcesMap.size(),
677 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); 641 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
678 642
(...skipping 12 matching lines...) Expand all
691 } 655 }
692 } 656 }
693 (*audio_source)->_mixHistory->SetIsMixed(isMixed); 657 (*audio_source)->_mixHistory->SetIsMixed(isMixed);
694 } 658 }
695 } 659 }
696 660
697 void NewAudioConferenceMixerImpl::ClearAudioFrameList( 661 void NewAudioConferenceMixerImpl::ClearAudioFrameList(
698 AudioFrameList* audioFrameList) const { 662 AudioFrameList* audioFrameList) const {
699 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 663 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
700 "ClearAudioFrameList(audioFrameList)"); 664 "ClearAudioFrameList(audioFrameList)");
701 for (AudioFrameList::iterator iter = audioFrameList->begin();
702 iter != audioFrameList->end(); ++iter) {
703 _audioFramePool->PushMemory(iter->frame);
704 }
705 audioFrameList->clear(); 665 audioFrameList->clear();
706 } 666 }
707 667
708 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( 668 bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
709 const MixerAudioSource& audio_source, 669 const MixerAudioSource& audio_source,
710 const MixerAudioSourceList& audioSourceList) const { 670 const MixerAudioSourceList& audioSourceList) const {
711 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 671 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
712 "IsAudioSourceInList(audio_source,audioSourceList)"); 672 "IsAudioSourceInList(audio_source,audioSourceList)");
713 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); 673 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin();
714 iter != audioSourceList.end(); ++iter) { 674 iter != audioSourceList.end(); ++iter) {
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
833 793
834 if (error != _limiter->kNoError) { 794 if (error != _limiter->kNoError) {
835 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 795 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
836 "Error from AudioProcessing: %d", error); 796 "Error from AudioProcessing: %d", error);
837 RTC_NOTREACHED(); 797 RTC_NOTREACHED();
838 return false; 798 return false;
839 } 799 }
840 return true; 800 return true;
841 } 801 }
842 } // namespace webrtc 802 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698