OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 return NULL; | 106 return NULL; |
107 } | 107 } |
108 return mixer; | 108 return mixer; |
109 } | 109 } |
110 | 110 |
111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
112 : _id(id), | 112 : _id(id), |
113 _minimumMixingFreq(kLowestPossible), | 113 _minimumMixingFreq(kLowestPossible), |
114 _outputFrequency(kDefaultFrequency), | 114 _outputFrequency(kDefaultFrequency), |
115 _sampleSize(0), | 115 _sampleSize(0), |
116 _audioFramePool(NULL), | |
117 _participantList(), | 116 _participantList(), |
118 _additionalParticipantList(), | 117 _additionalParticipantList(), |
119 _numMixedParticipants(0), | 118 _numMixedParticipants(0), |
120 use_limiter_(true), | 119 use_limiter_(true), |
121 _timeStamp(0), | 120 _timeStamp(0), |
122 mix_calls_(0) {} | 121 mix_calls_(0) {} |
123 | 122 |
124 bool NewAudioConferenceMixerImpl::Init() { | 123 bool NewAudioConferenceMixerImpl::Init() { |
125 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 124 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
126 if (_crit.get() == NULL) | 125 if (_crit.get() == NULL) |
127 return false; | 126 return false; |
128 | 127 |
129 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 128 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
130 if (_cbCrit.get() == NULL) | 129 if (_cbCrit.get() == NULL) |
131 return false; | 130 return false; |
132 | 131 |
133 Config config; | 132 Config config; |
134 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 133 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
135 _limiter.reset(AudioProcessing::Create(config)); | 134 _limiter.reset(AudioProcessing::Create(config)); |
136 if (!_limiter.get()) | 135 if (!_limiter.get()) |
137 return false; | 136 return false; |
138 | 137 |
139 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, | |
140 DEFAULT_AUDIO_FRAME_POOLSIZE); | |
141 if (_audioFramePool == NULL) | |
142 return false; | |
143 | |
144 if (SetOutputFrequency(kDefaultFrequency) == -1) | 138 if (SetOutputFrequency(kDefaultFrequency) == -1) |
145 return false; | 139 return false; |
146 | 140 |
147 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | 141 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != |
148 _limiter->kNoError) | 142 _limiter->kNoError) |
149 return false; | 143 return false; |
150 | 144 |
151 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | 145 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the |
152 // divide-by-2 but -7 is used instead to give a bit of headroom since the | 146 // divide-by-2 but -7 is used instead to give a bit of headroom since the |
153 // AGC is not a hard limiter. | 147 // AGC is not a hard limiter. |
154 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) | 148 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) |
155 return false; | 149 return false; |
156 | 150 |
157 if (_limiter->gain_control()->set_compression_gain_db(0) != | 151 if (_limiter->gain_control()->set_compression_gain_db(0) != |
158 _limiter->kNoError) | 152 _limiter->kNoError) |
159 return false; | 153 return false; |
160 | 154 |
161 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) | 155 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) |
162 return false; | 156 return false; |
163 | 157 |
164 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 158 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
165 return false; | 159 return false; |
166 | 160 |
167 return true; | 161 return true; |
168 } | 162 } |
169 | 163 |
170 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | |
171 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | |
172 assert(_audioFramePool == NULL); | |
173 } | |
174 | |
175 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 164 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
176 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | 165 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
177 { | 166 { |
178 CriticalSectionScoped cs(_crit.get()); | 167 CriticalSectionScoped cs(_crit.get()); |
179 RTC_DCHECK(mix_calls_ == 0); | 168 RTC_DCHECK(mix_calls_ == 0); |
180 mix_calls_++; | 169 mix_calls_++; |
181 } | 170 } |
182 | 171 |
183 AudioFrameList mixList; | 172 AudioFrameList mixList; |
184 AudioFrameList rampOutList; | 173 AudioFrameList rampOutList; |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
467 participant != _participantList.end(); ++participant) { | 456 participant != _participantList.end(); ++participant) { |
468 // Stop keeping track of passive participants if there are already | 457 // Stop keeping track of passive participants if there are already |
469 // enough participants available (they wont be mixed anyway). | 458 // enough participants available (they wont be mixed anyway). |
470 bool mustAddToPassiveList = | 459 bool mustAddToPassiveList = |
471 (*maxAudioFrameCounter > | 460 (*maxAudioFrameCounter > |
472 (activeList.size() + passiveWasMixedList.size() + | 461 (activeList.size() + passiveWasMixedList.size() + |
473 passiveWasNotMixedList.size())); | 462 passiveWasNotMixedList.size())); |
474 | 463 |
475 bool wasMixed = false; | 464 bool wasMixed = false; |
476 wasMixed = (*participant)->_mixHistory->WasMixed(); | 465 wasMixed = (*participant)->_mixHistory->WasMixed(); |
477 AudioFrame* audioFrame = NULL; | 466 AudioFrame* audioFrame = (*participant)->GetFramePointer(); |
478 if (_audioFramePool->PopMemory(audioFrame) == -1) { | |
479 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | |
480 "failed PopMemory() call"); | |
481 assert(false); | |
482 return; | |
483 } | |
484 audioFrame->sample_rate_hz_ = _outputFrequency; | 467 audioFrame->sample_rate_hz_ = _outputFrequency; |
485 | 468 |
486 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 469 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
487 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 470 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
488 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
489 "failed to GetAudioFrameWithMuted() from participant"); | |
490 _audioFramePool->PushMemory(audioFrame); | |
491 continue; | 471 continue; |
492 } | 472 } |
493 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 473 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); |
494 if (_participantList.size() != 1) { | 474 if (_participantList.size() != 1) { |
495 // TODO(wu): Issue 3390, add support for multiple participants case. | 475 // TODO(wu): Issue 3390, add support for multiple participants case. |
496 audioFrame->ntp_time_ms_ = -1; | 476 audioFrame->ntp_time_ms_ = -1; |
497 } | 477 } |
498 | 478 |
499 // TODO(henrike): this assert triggers in some test cases where SRTP is | 479 // TODO(henrike): this assert triggers in some test cases where SRTP is |
500 // used which prevents NetEQ from making a VAD. Temporarily disable this | 480 // used which prevents NetEQ from making a VAD. Temporarily disable this |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
547 (*mixParticipantList)[audioFrame->id_] = *participant; | 527 (*mixParticipantList)[audioFrame->id_] = *participant; |
548 assert(mixParticipantList->size() <= | 528 assert(mixParticipantList->size() <= |
549 kMaximumAmountOfMixedParticipants); | 529 kMaximumAmountOfMixedParticipants); |
550 | 530 |
551 if (replaceWasMixed) { | 531 if (replaceWasMixed) { |
552 if (!replaceFrame.muted) { | 532 if (!replaceFrame.muted) { |
553 RampOut(*replaceFrame.frame); | 533 RampOut(*replaceFrame.frame); |
554 } | 534 } |
555 rampOutList->push_back(replaceFrame); | 535 rampOutList->push_back(replaceFrame); |
556 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 536 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
557 } else { | |
558 _audioFramePool->PushMemory(replaceFrame.frame); | |
559 } | 537 } |
560 } else { | 538 } else { |
561 if (wasMixed) { | 539 if (wasMixed) { |
562 if (!muted) { | 540 if (!muted) { |
563 RampOut(*audioFrame); | 541 RampOut(*audioFrame); |
564 } | 542 } |
565 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); | 543 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); |
566 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 544 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
567 } else { | |
568 _audioFramePool->PushMemory(audioFrame); | |
569 } | 545 } |
570 } | 546 } |
571 } else { | 547 } else { |
572 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 548 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
573 (*mixParticipantList)[audioFrame->id_] = *participant; | 549 (*mixParticipantList)[audioFrame->id_] = *participant; |
574 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 550 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
575 } | 551 } |
576 } else { | 552 } else { |
577 if (wasMixed) { | 553 if (wasMixed) { |
578 ParticipantFrameStruct* part_struct = | 554 ParticipantFrameStruct* part_struct = |
579 new ParticipantFrameStruct(*participant, audioFrame, muted); | 555 new ParticipantFrameStruct(*participant, audioFrame, muted); |
580 passiveWasMixedList.push_back(part_struct); | 556 passiveWasMixedList.push_back(part_struct); |
581 } else if (mustAddToPassiveList) { | 557 } else if (mustAddToPassiveList) { |
582 if (!muted) { | 558 if (!muted) { |
583 RampIn(*audioFrame); | 559 RampIn(*audioFrame); |
584 } | 560 } |
585 ParticipantFrameStruct* part_struct = | 561 ParticipantFrameStruct* part_struct = |
586 new ParticipantFrameStruct(*participant, audioFrame, muted); | 562 new ParticipantFrameStruct(*participant, audioFrame, muted); |
587 passiveWasNotMixedList.push_back(part_struct); | 563 passiveWasNotMixedList.push_back(part_struct); |
588 } else { | |
589 _audioFramePool->PushMemory(audioFrame); | |
590 } | 564 } |
591 } | 565 } |
592 } | 566 } |
593 assert(activeList.size() <= *maxAudioFrameCounter); | 567 assert(activeList.size() <= *maxAudioFrameCounter); |
594 // At this point it is known which participants should be mixed. Transfer | 568 // At this point it is known which participants should be mixed. Transfer |
595 // this information to this functions output parameters. | 569 // this information to this functions output parameters. |
596 for (AudioFrameList::const_iterator iter = activeList.begin(); | 570 for (AudioFrameList::const_iterator iter = activeList.begin(); |
597 iter != activeList.end(); ++iter) { | 571 iter != activeList.end(); ++iter) { |
598 mixList->push_back(*iter); | 572 mixList->push_back(*iter); |
599 } | 573 } |
600 activeList.clear(); | 574 activeList.clear(); |
601 // Always mix a constant number of AudioFrames. If there aren't enough | 575 // Always mix a constant number of AudioFrames. If there aren't enough |
602 // active participants mix passive ones. Starting with those that was mixed | 576 // active participants mix passive ones. Starting with those that was mixed |
603 // last iteration. | 577 // last iteration. |
604 for (ParticipantFrameStructList::const_iterator iter = | 578 for (ParticipantFrameStructList::const_iterator iter = |
605 passiveWasMixedList.begin(); | 579 passiveWasMixedList.begin(); |
606 iter != passiveWasMixedList.end(); ++iter) { | 580 iter != passiveWasMixedList.end(); ++iter) { |
607 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 581 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
608 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 582 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
609 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 583 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
610 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 584 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
611 } else { | |
612 _audioFramePool->PushMemory((*iter)->audioFrame); | |
613 } | 585 } |
614 delete *iter; | 586 delete *iter; |
615 } | 587 } |
616 // And finally the ones that have not been mixed for a while. | 588 // And finally the ones that have not been mixed for a while. |
617 for (ParticipantFrameStructList::const_iterator iter = | 589 for (ParticipantFrameStructList::const_iterator iter = |
618 passiveWasNotMixedList.begin(); | 590 passiveWasNotMixedList.begin(); |
619 iter != passiveWasNotMixedList.end(); ++iter) { | 591 iter != passiveWasNotMixedList.end(); ++iter) { |
620 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 592 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
621 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 593 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
622 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 594 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
623 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 595 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
624 } else { | |
625 _audioFramePool->PushMemory((*iter)->audioFrame); | |
626 } | 596 } |
627 delete *iter; | 597 delete *iter; |
628 } | 598 } |
629 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); | 599 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); |
630 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | 600 *maxAudioFrameCounter += mixListStartSize - mixList->size(); |
631 } | 601 } |
632 | 602 |
633 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 603 void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
634 AudioFrameList* additionalFramesList) const { | 604 AudioFrameList* additionalFramesList) const { |
635 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 605 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
636 "GetAdditionalAudio(additionalFramesList)"); | 606 "GetAdditionalAudio(additionalFramesList)"); |
637 // The GetAudioFrameWithMuted() callback may result in the participant being | 607 // The GetAudioFrameWithMuted() callback may result in the participant being |
638 // removed from additionalParticipantList_. If that happens it will | 608 // removed from additionalParticipantList_. If that happens it will |
639 // invalidate any iterators. Create a copy of the participants list such | 609 // invalidate any iterators. Create a copy of the participants list such |
640 // that the list of participants can be traversed safely. | 610 // that the list of participants can be traversed safely. |
641 MixerAudioSourceList additionalParticipantList; | 611 MixerAudioSourceList additionalParticipantList; |
642 additionalParticipantList.insert(additionalParticipantList.begin(), | 612 additionalParticipantList.insert(additionalParticipantList.begin(), |
643 _additionalParticipantList.begin(), | 613 _additionalParticipantList.begin(), |
644 _additionalParticipantList.end()); | 614 _additionalParticipantList.end()); |
645 | 615 |
646 for (MixerAudioSourceList::const_iterator participant = | 616 for (MixerAudioSourceList::const_iterator participant = |
647 additionalParticipantList.begin(); | 617 additionalParticipantList.begin(); |
648 participant != additionalParticipantList.end(); ++participant) { | 618 participant != additionalParticipantList.end(); ++participant) { |
649 AudioFrame* audioFrame = NULL; | 619 AudioFrame* audioFrame = (*participant)->GetFramePointer(); |
650 if (_audioFramePool->PopMemory(audioFrame) == -1) { | |
651 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | |
652 "failed PopMemory() call"); | |
653 assert(false); | |
654 return; | |
655 } | |
656 audioFrame->sample_rate_hz_ = _outputFrequency; | 620 audioFrame->sample_rate_hz_ = _outputFrequency; |
657 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 621 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
658 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 622 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
659 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 623 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
660 "failed to GetAudioFrameWithMuted() from participant"); | 624 "failed to GetAudioFrameWithMuted() from participant"); |
661 _audioFramePool->PushMemory(audioFrame); | |
662 continue; | 625 continue; |
663 } | 626 } |
664 if (audioFrame->samples_per_channel_ == 0) { | 627 if (audioFrame->samples_per_channel_ == 0) { |
665 // Empty frame. Don't use it. | 628 // Empty frame. Don't use it. |
666 _audioFramePool->PushMemory(audioFrame); | |
667 continue; | 629 continue; |
668 } | 630 } |
669 additionalFramesList->push_back(FrameAndMuteInfo( | 631 additionalFramesList->push_back(FrameAndMuteInfo( |
670 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 632 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
671 } | 633 } |
672 } | 634 } |
673 | 635 |
674 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | 636 void NewAudioConferenceMixerImpl::UpdateMixedStatus( |
675 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { | 637 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { |
676 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 638 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
(...skipping 15 matching lines...) Expand all Loading... |
692 } | 654 } |
693 } | 655 } |
694 (*participant)->_mixHistory->SetIsMixed(isMixed); | 656 (*participant)->_mixHistory->SetIsMixed(isMixed); |
695 } | 657 } |
696 } | 658 } |
697 | 659 |
698 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | 660 void NewAudioConferenceMixerImpl::ClearAudioFrameList( |
699 AudioFrameList* audioFrameList) const { | 661 AudioFrameList* audioFrameList) const { |
700 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 662 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
701 "ClearAudioFrameList(audioFrameList)"); | 663 "ClearAudioFrameList(audioFrameList)"); |
702 for (AudioFrameList::iterator iter = audioFrameList->begin(); | |
703 iter != audioFrameList->end(); ++iter) { | |
704 _audioFramePool->PushMemory(iter->frame); | |
705 } | |
706 audioFrameList->clear(); | 664 audioFrameList->clear(); |
707 } | 665 } |
708 | 666 |
709 bool NewAudioConferenceMixerImpl::IsParticipantInList( | 667 bool NewAudioConferenceMixerImpl::IsParticipantInList( |
710 const MixerAudioSource& participant, | 668 const MixerAudioSource& participant, |
711 const MixerAudioSourceList& participantList) const { | 669 const MixerAudioSourceList& participantList) const { |
712 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 670 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
713 "IsParticipantInList(participant,participantList)"); | 671 "IsParticipantInList(participant,participantList)"); |
714 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); | 672 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); |
715 iter != participantList.end(); ++iter) { | 673 iter != participantList.end(); ++iter) { |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
832 | 790 |
833 if (error != _limiter->kNoError) { | 791 if (error != _limiter->kNoError) { |
834 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 792 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
835 "Error from AudioProcessing: %d", error); | 793 "Error from AudioProcessing: %d", error); |
836 assert(false); | 794 assert(false); |
837 return false; | 795 return false; |
838 } | 796 } |
839 return true; | 797 return true; |
840 } | 798 } |
841 } // namespace webrtc | 799 } // namespace webrtc |
OLD | NEW |