| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 106 return NULL; | 106 return NULL; |
| 107 } | 107 } |
| 108 return mixer; | 108 return mixer; |
| 109 } | 109 } |
| 110 | 110 |
| 111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
| 112 : _id(id), | 112 : _id(id), |
| 113 _minimumMixingFreq(kLowestPossible), | 113 _minimumMixingFreq(kLowestPossible), |
| 114 _outputFrequency(kDefaultFrequency), | 114 _outputFrequency(kDefaultFrequency), |
| 115 _sampleSize(0), | 115 _sampleSize(0), |
| 116 _audioFramePool(NULL), | |
| 117 _participantList(), | 116 _participantList(), |
| 118 _additionalParticipantList(), | 117 _additionalParticipantList(), |
| 119 _numMixedParticipants(0), | 118 _numMixedParticipants(0), |
| 120 use_limiter_(true), | 119 use_limiter_(true), |
| 121 _timeStamp(0), | 120 _timeStamp(0), |
| 122 mix_calls_(0) {} | 121 mix_calls_(0) {} |
| 123 | 122 |
| 124 bool NewAudioConferenceMixerImpl::Init() { | 123 bool NewAudioConferenceMixerImpl::Init() { |
| 125 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 124 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
| 126 if (_crit.get() == NULL) | 125 if (_crit.get() == NULL) |
| 127 return false; | 126 return false; |
| 128 | 127 |
| 129 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 128 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
| 130 if (_cbCrit.get() == NULL) | 129 if (_cbCrit.get() == NULL) |
| 131 return false; | 130 return false; |
| 132 | 131 |
| 133 Config config; | 132 Config config; |
| 134 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 133 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
| 135 _limiter.reset(AudioProcessing::Create(config)); | 134 _limiter.reset(AudioProcessing::Create(config)); |
| 136 if (!_limiter.get()) | 135 if (!_limiter.get()) |
| 137 return false; | 136 return false; |
| 138 | 137 |
| 139 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, | |
| 140 DEFAULT_AUDIO_FRAME_POOLSIZE); | |
| 141 if (_audioFramePool == NULL) | |
| 142 return false; | |
| 143 | |
| 144 if (SetOutputFrequency(kDefaultFrequency) == -1) | 138 if (SetOutputFrequency(kDefaultFrequency) == -1) |
| 145 return false; | 139 return false; |
| 146 | 140 |
| 147 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | 141 if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != |
| 148 _limiter->kNoError) | 142 _limiter->kNoError) |
| 149 return false; | 143 return false; |
| 150 | 144 |
| 151 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | 145 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the |
| 152 // divide-by-2 but -7 is used instead to give a bit of headroom since the | 146 // divide-by-2 but -7 is used instead to give a bit of headroom since the |
| 153 // AGC is not a hard limiter. | 147 // AGC is not a hard limiter. |
| 154 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) | 148 if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) |
| 155 return false; | 149 return false; |
| 156 | 150 |
| 157 if (_limiter->gain_control()->set_compression_gain_db(0) != | 151 if (_limiter->gain_control()->set_compression_gain_db(0) != |
| 158 _limiter->kNoError) | 152 _limiter->kNoError) |
| 159 return false; | 153 return false; |
| 160 | 154 |
| 161 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) | 155 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) |
| 162 return false; | 156 return false; |
| 163 | 157 |
| 164 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 158 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
| 165 return false; | 159 return false; |
| 166 | 160 |
| 167 return true; | 161 return true; |
| 168 } | 162 } |
| 169 | 163 |
| 170 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | |
| 171 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | |
| 172 assert(_audioFramePool == NULL); | |
| 173 } | |
| 174 | |
| 175 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 164 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
| 176 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | 165 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
| 177 { | 166 { |
| 178 CriticalSectionScoped cs(_crit.get()); | 167 CriticalSectionScoped cs(_crit.get()); |
| 179 RTC_DCHECK(mix_calls_ == 0); | 168 RTC_DCHECK(mix_calls_ == 0); |
| 180 mix_calls_++; | 169 mix_calls_++; |
| 181 } | 170 } |
| 182 | 171 |
| 183 AudioFrameList mixList; | 172 AudioFrameList mixList; |
| 184 AudioFrameList rampOutList; | 173 AudioFrameList rampOutList; |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 participant != _participantList.end(); ++participant) { | 456 participant != _participantList.end(); ++participant) { |
| 468 // Stop keeping track of passive participants if there are already | 457 // Stop keeping track of passive participants if there are already |
| 469 // enough participants available (they wont be mixed anyway). | 458 // enough participants available (they wont be mixed anyway). |
| 470 bool mustAddToPassiveList = | 459 bool mustAddToPassiveList = |
| 471 (*maxAudioFrameCounter > | 460 (*maxAudioFrameCounter > |
| 472 (activeList.size() + passiveWasMixedList.size() + | 461 (activeList.size() + passiveWasMixedList.size() + |
| 473 passiveWasNotMixedList.size())); | 462 passiveWasNotMixedList.size())); |
| 474 | 463 |
| 475 bool wasMixed = false; | 464 bool wasMixed = false; |
| 476 wasMixed = (*participant)->_mixHistory->WasMixed(); | 465 wasMixed = (*participant)->_mixHistory->WasMixed(); |
| 477 AudioFrame* audioFrame = NULL; | |
| 478 if (_audioFramePool->PopMemory(audioFrame) == -1) { | |
| 479 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | |
| 480 "failed PopMemory() call"); | |
| 481 assert(false); | |
| 482 return; | |
| 483 } | |
| 484 audioFrame->sample_rate_hz_ = _outputFrequency; | |
| 485 | 466 |
| 486 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 467 auto audio_frame_with_info = |
| 468 (*participant)->GetAudioFrameWithMuted(_id, _outputFrequency); |
| 469 |
| 470 auto ret = audio_frame_with_info.audio_frame_info; |
| 471 AudioFrame* audio_frame = audio_frame_with_info.audio_frame_pointer; |
| 487 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 472 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
| 488 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
| 489 "failed to GetAudioFrameWithMuted() from participant"); | |
| 490 _audioFramePool->PushMemory(audioFrame); | |
| 491 continue; | 473 continue; |
| 492 } | 474 } |
| 493 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 475 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); |
| 494 if (_participantList.size() != 1) { | 476 if (_participantList.size() != 1) { |
| 495 // TODO(wu): Issue 3390, add support for multiple participants case. | 477 // TODO(wu): Issue 3390, add support for multiple participants case. |
| 496 audioFrame->ntp_time_ms_ = -1; | 478 audio_frame->ntp_time_ms_ = -1; |
| 497 } | 479 } |
| 498 | 480 |
| 499 // TODO(henrike): this assert triggers in some test cases where SRTP is | 481 // TODO(henrike): this assert triggers in some test cases where SRTP is |
| 500 // used which prevents NetEQ from making a VAD. Temporarily disable this | 482 // used which prevents NetEQ from making a VAD. Temporarily disable this |
| 501 // assert until the problem is fixed on a higher level. | 483 // assert until the problem is fixed on a higher level. |
| 502 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); | 484 // assert(audio_frame->vad_activity_ != AudioFrame::kVadUnknown); |
| 503 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { | 485 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) { |
| 504 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 486 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
| 505 "invalid VAD state from participant"); | 487 "invalid VAD state from participant"); |
| 506 } | 488 } |
| 507 | 489 |
| 508 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { | 490 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) { |
| 509 if (!wasMixed && !muted) { | 491 if (!wasMixed && !muted) { |
| 510 RampIn(*audioFrame); | 492 RampIn(*audio_frame); |
| 511 } | 493 } |
| 512 | 494 |
| 513 if (activeList.size() >= *maxAudioFrameCounter) { | 495 if (activeList.size() >= *maxAudioFrameCounter) { |
| 514 // There are already more active participants than should be | 496 // There are already more active participants than should be |
| 515 // mixed. Only keep the ones with the highest energy. | 497 // mixed. Only keep the ones with the highest energy. |
| 516 AudioFrameList::iterator replaceItem; | 498 AudioFrameList::iterator replaceItem; |
| 517 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame); | 499 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame); |
| 518 | 500 |
| 519 bool found_replace_item = false; | 501 bool found_replace_item = false; |
| 520 for (AudioFrameList::iterator iter = activeList.begin(); | 502 for (AudioFrameList::iterator iter = activeList.begin(); |
| 521 iter != activeList.end(); ++iter) { | 503 iter != activeList.end(); ++iter) { |
| 522 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); | 504 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); |
| 523 if (energy < lowestEnergy) { | 505 if (energy < lowestEnergy) { |
| 524 replaceItem = iter; | 506 replaceItem = iter; |
| 525 lowestEnergy = energy; | 507 lowestEnergy = energy; |
| 526 found_replace_item = true; | 508 found_replace_item = true; |
| 527 } | 509 } |
| 528 } | 510 } |
| 529 if (found_replace_item) { | 511 if (found_replace_item) { |
| 530 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | 512 RTC_DCHECK(!muted); // Cannot replace with a muted frame. |
| 531 FrameAndMuteInfo replaceFrame = *replaceItem; | 513 FrameAndMuteInfo replaceFrame = *replaceItem; |
| 532 | 514 |
| 533 bool replaceWasMixed = false; | 515 bool replaceWasMixed = false; |
| 534 std::map<int, MixerAudioSource*>::const_iterator it = | 516 std::map<int, MixerAudioSource*>::const_iterator it = |
| 535 mixParticipantList->find(replaceFrame.frame->id_); | 517 mixParticipantList->find(replaceFrame.frame->id_); |
| 536 | 518 |
| 537 // When a frame is pushed to |activeList| it is also pushed | 519 // When a frame is pushed to |activeList| it is also pushed |
| 538 // to mixParticipantList with the frame's id. This means | 520 // to mixParticipantList with the frame's id. This means |
| 539 // that the Find call above should never fail. | 521 // that the Find call above should never fail. |
| 540 assert(it != mixParticipantList->end()); | 522 assert(it != mixParticipantList->end()); |
| 541 replaceWasMixed = it->second->_mixHistory->WasMixed(); | 523 replaceWasMixed = it->second->_mixHistory->WasMixed(); |
| 542 | 524 |
| 543 mixParticipantList->erase(replaceFrame.frame->id_); | 525 mixParticipantList->erase(replaceFrame.frame->id_); |
| 544 activeList.erase(replaceItem); | 526 activeList.erase(replaceItem); |
| 545 | 527 |
| 546 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 528 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); |
| 547 (*mixParticipantList)[audioFrame->id_] = *participant; | 529 (*mixParticipantList)[audio_frame->id_] = *participant; |
| 548 assert(mixParticipantList->size() <= | 530 assert(mixParticipantList->size() <= |
| 549 kMaximumAmountOfMixedParticipants); | 531 kMaximumAmountOfMixedParticipants); |
| 550 | 532 |
| 551 if (replaceWasMixed) { | 533 if (replaceWasMixed) { |
| 552 if (!replaceFrame.muted) { | 534 if (!replaceFrame.muted) { |
| 553 RampOut(*replaceFrame.frame); | 535 RampOut(*replaceFrame.frame); |
| 554 } | 536 } |
| 555 rampOutList->push_back(replaceFrame); | 537 rampOutList->push_back(replaceFrame); |
| 556 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 538 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
| 557 } else { | |
| 558 _audioFramePool->PushMemory(replaceFrame.frame); | |
| 559 } | 539 } |
| 560 } else { | 540 } else { |
| 561 if (wasMixed) { | 541 if (wasMixed) { |
| 562 if (!muted) { | 542 if (!muted) { |
| 563 RampOut(*audioFrame); | 543 RampOut(*audio_frame); |
| 564 } | 544 } |
| 565 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); | 545 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted)); |
| 566 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 546 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
| 567 } else { | |
| 568 _audioFramePool->PushMemory(audioFrame); | |
| 569 } | 547 } |
| 570 } | 548 } |
| 571 } else { | 549 } else { |
| 572 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 550 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); |
| 573 (*mixParticipantList)[audioFrame->id_] = *participant; | 551 (*mixParticipantList)[audio_frame->id_] = *participant; |
| 574 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 552 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
| 575 } | 553 } |
| 576 } else { | 554 } else { |
| 577 if (wasMixed) { | 555 if (wasMixed) { |
| 578 ParticipantFrameStruct* part_struct = | 556 ParticipantFrameStruct* part_struct = |
| 579 new ParticipantFrameStruct(*participant, audioFrame, muted); | 557 new ParticipantFrameStruct(*participant, audio_frame, muted); |
| 580 passiveWasMixedList.push_back(part_struct); | 558 passiveWasMixedList.push_back(part_struct); |
| 581 } else if (mustAddToPassiveList) { | 559 } else if (mustAddToPassiveList) { |
| 582 if (!muted) { | 560 if (!muted) { |
| 583 RampIn(*audioFrame); | 561 RampIn(*audio_frame); |
| 584 } | 562 } |
| 585 ParticipantFrameStruct* part_struct = | 563 ParticipantFrameStruct* part_struct = |
| 586 new ParticipantFrameStruct(*participant, audioFrame, muted); | 564 new ParticipantFrameStruct(*participant, audio_frame, muted); |
| 587 passiveWasNotMixedList.push_back(part_struct); | 565 passiveWasNotMixedList.push_back(part_struct); |
| 588 } else { | |
| 589 _audioFramePool->PushMemory(audioFrame); | |
| 590 } | 566 } |
| 591 } | 567 } |
| 592 } | 568 } |
| 593 assert(activeList.size() <= *maxAudioFrameCounter); | 569 assert(activeList.size() <= *maxAudioFrameCounter); |
| 594 // At this point it is known which participants should be mixed. Transfer | 570 // At this point it is known which participants should be mixed. Transfer |
| 595 // this information to this functions output parameters. | 571 // this information to this functions output parameters. |
| 596 for (AudioFrameList::const_iterator iter = activeList.begin(); | 572 for (AudioFrameList::const_iterator iter = activeList.begin(); |
| 597 iter != activeList.end(); ++iter) { | 573 iter != activeList.end(); ++iter) { |
| 598 mixList->push_back(*iter); | 574 mixList->push_back(*iter); |
| 599 } | 575 } |
| 600 activeList.clear(); | 576 activeList.clear(); |
| 601 // Always mix a constant number of AudioFrames. If there aren't enough | 577 // Always mix a constant number of AudioFrames. If there aren't enough |
| 602 // active participants mix passive ones. Starting with those that was mixed | 578 // active participants mix passive ones. Starting with those that was mixed |
| 603 // last iteration. | 579 // last iteration. |
| 604 for (ParticipantFrameStructList::const_iterator iter = | 580 for (ParticipantFrameStructList::const_iterator iter = |
| 605 passiveWasMixedList.begin(); | 581 passiveWasMixedList.begin(); |
| 606 iter != passiveWasMixedList.end(); ++iter) { | 582 iter != passiveWasMixedList.end(); ++iter) { |
| 607 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 583 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
| 608 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 584 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
| 609 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 585 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
| 610 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 586 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
| 611 } else { | |
| 612 _audioFramePool->PushMemory((*iter)->audioFrame); | |
| 613 } | 587 } |
| 614 delete *iter; | 588 delete *iter; |
| 615 } | 589 } |
| 616 // And finally the ones that have not been mixed for a while. | 590 // And finally the ones that have not been mixed for a while. |
| 617 for (ParticipantFrameStructList::const_iterator iter = | 591 for (ParticipantFrameStructList::const_iterator iter = |
| 618 passiveWasNotMixedList.begin(); | 592 passiveWasNotMixedList.begin(); |
| 619 iter != passiveWasNotMixedList.end(); ++iter) { | 593 iter != passiveWasNotMixedList.end(); ++iter) { |
| 620 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 594 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
| 621 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 595 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
| 622 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 596 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
| 623 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 597 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
| 624 } else { | |
| 625 _audioFramePool->PushMemory((*iter)->audioFrame); | |
| 626 } | 598 } |
| 627 delete *iter; | 599 delete *iter; |
| 628 } | 600 } |
| 629 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); | 601 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); |
| 630 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | 602 *maxAudioFrameCounter += mixListStartSize - mixList->size(); |
| 631 } | 603 } |
| 632 | 604 |
| 633 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 605 void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
| 634 AudioFrameList* additionalFramesList) const { | 606 AudioFrameList* additionalFramesList) const { |
| 635 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 607 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 636 "GetAdditionalAudio(additionalFramesList)"); | 608 "GetAdditionalAudio(additionalFramesList)"); |
| 637 // The GetAudioFrameWithMuted() callback may result in the participant being | 609 // The GetAudioFrameWithMuted() callback may result in the participant being |
| 638 // removed from additionalParticipantList_. If that happens it will | 610 // removed from additionalParticipantList_. If that happens it will |
| 639 // invalidate any iterators. Create a copy of the participants list such | 611 // invalidate any iterators. Create a copy of the participants list such |
| 640 // that the list of participants can be traversed safely. | 612 // that the list of participants can be traversed safely. |
| 641 MixerAudioSourceList additionalParticipantList; | 613 MixerAudioSourceList additionalParticipantList; |
| 642 additionalParticipantList.insert(additionalParticipantList.begin(), | 614 additionalParticipantList.insert(additionalParticipantList.begin(), |
| 643 _additionalParticipantList.begin(), | 615 _additionalParticipantList.begin(), |
| 644 _additionalParticipantList.end()); | 616 _additionalParticipantList.end()); |
| 645 | 617 |
| 646 for (MixerAudioSourceList::const_iterator participant = | 618 for (MixerAudioSourceList::const_iterator participant = |
| 647 additionalParticipantList.begin(); | 619 additionalParticipantList.begin(); |
| 648 participant != additionalParticipantList.end(); ++participant) { | 620 participant != additionalParticipantList.end(); ++participant) { |
| 649 AudioFrame* audioFrame = NULL; | 621 auto audio_frame_with_info = |
| 650 if (_audioFramePool->PopMemory(audioFrame) == -1) { | 622 (*participant)->GetAudioFrameWithMuted(_id, _outputFrequency); |
| 651 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 623 auto ret = audio_frame_with_info.audio_frame_info; |
| 652 "failed PopMemory() call"); | 624 AudioFrame* audio_frame = audio_frame_with_info.audio_frame_pointer; |
| 653 assert(false); | |
| 654 return; | |
| 655 } | |
| 656 audioFrame->sample_rate_hz_ = _outputFrequency; | |
| 657 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | |
| 658 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 625 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
| 659 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 626 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
| 660 "failed to GetAudioFrameWithMuted() from participant"); | 627 "failed to GetAudioFrameWithMuted() from participant"); |
| 661 _audioFramePool->PushMemory(audioFrame); | |
| 662 continue; | 628 continue; |
| 663 } | 629 } |
| 664 if (audioFrame->samples_per_channel_ == 0) { | 630 if (audio_frame->samples_per_channel_ == 0) { |
| 665 // Empty frame. Don't use it. | 631 // Empty frame. Don't use it. |
| 666 _audioFramePool->PushMemory(audioFrame); | |
| 667 continue; | 632 continue; |
| 668 } | 633 } |
| 669 additionalFramesList->push_back(FrameAndMuteInfo( | 634 additionalFramesList->push_back(FrameAndMuteInfo( |
| 670 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 635 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
| 671 } | 636 } |
| 672 } | 637 } |
| 673 | 638 |
| 674 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | 639 void NewAudioConferenceMixerImpl::UpdateMixedStatus( |
| 675 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { | 640 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { |
| 676 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 641 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 677 "UpdateMixedStatus(mixedParticipantsMap)"); | 642 "UpdateMixedStatus(mixedParticipantsMap)"); |
| 678 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); | 643 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); |
| 679 | 644 |
| 680 // Loop through all participants. If they are in the mix map they | 645 // Loop through all participants. If they are in the mix map they |
| (...skipping 11 matching lines...) Expand all Loading... |
| 692 } | 657 } |
| 693 } | 658 } |
| 694 (*participant)->_mixHistory->SetIsMixed(isMixed); | 659 (*participant)->_mixHistory->SetIsMixed(isMixed); |
| 695 } | 660 } |
| 696 } | 661 } |
| 697 | 662 |
| 698 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | 663 void NewAudioConferenceMixerImpl::ClearAudioFrameList( |
| 699 AudioFrameList* audioFrameList) const { | 664 AudioFrameList* audioFrameList) const { |
| 700 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 665 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 701 "ClearAudioFrameList(audioFrameList)"); | 666 "ClearAudioFrameList(audioFrameList)"); |
| 702 for (AudioFrameList::iterator iter = audioFrameList->begin(); | |
| 703 iter != audioFrameList->end(); ++iter) { | |
| 704 _audioFramePool->PushMemory(iter->frame); | |
| 705 } | |
| 706 audioFrameList->clear(); | 667 audioFrameList->clear(); |
| 707 } | 668 } |
| 708 | 669 |
| 709 bool NewAudioConferenceMixerImpl::IsParticipantInList( | 670 bool NewAudioConferenceMixerImpl::IsParticipantInList( |
| 710 const MixerAudioSource& participant, | 671 const MixerAudioSource& participant, |
| 711 const MixerAudioSourceList& participantList) const { | 672 const MixerAudioSourceList& participantList) const { |
| 712 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 673 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 713 "IsParticipantInList(participant,participantList)"); | 674 "IsParticipantInList(participant,participantList)"); |
| 714 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); | 675 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); |
| 715 iter != participantList.end(); ++iter) { | 676 iter != participantList.end(); ++iter) { |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 832 | 793 |
| 833 if (error != _limiter->kNoError) { | 794 if (error != _limiter->kNoError) { |
| 834 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 795 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 835 "Error from AudioProcessing: %d", error); | 796 "Error from AudioProcessing: %d", error); |
| 836 assert(false); | 797 assert(false); |
| 837 return false; | 798 return false; |
| 838 } | 799 } |
| 839 return true; | 800 return true; |
| 840 } | 801 } |
| 841 } // namespace webrtc | 802 } // namespace webrtc |
| OLD | NEW |