| Index: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
|
| diff --git a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
|
| index 5355382745f3e92ab14c42d8b5fd21fffe819e33..466506a3c651630d64bf0cdf4b4b0f551e2138f3 100644
|
| --- a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
|
| +++ b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
|
| @@ -113,7 +113,6 @@ NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
|
| _minimumMixingFreq(kLowestPossible),
|
| _outputFrequency(kDefaultFrequency),
|
| _sampleSize(0),
|
| - _audioFramePool(NULL),
|
| _participantList(),
|
| _additionalParticipantList(),
|
| _numMixedParticipants(0),
|
| @@ -136,11 +135,6 @@ bool NewAudioConferenceMixerImpl::Init() {
|
| if (!_limiter.get())
|
| return false;
|
|
|
| - MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
|
| - DEFAULT_AUDIO_FRAME_POOLSIZE);
|
| - if (_audioFramePool == NULL)
|
| - return false;
|
| -
|
| if (SetOutputFrequency(kDefaultFrequency) == -1)
|
| return false;
|
|
|
| @@ -167,11 +161,6 @@ bool NewAudioConferenceMixerImpl::Init() {
|
| return true;
|
| }
|
|
|
| -NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
|
| - MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
|
| - assert(_audioFramePool == NULL);
|
| -}
|
| -
|
| void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
|
| size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants;
|
| {
|
| @@ -474,47 +463,40 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
|
|
|
| bool wasMixed = false;
|
| wasMixed = (*participant)->_mixHistory->WasMixed();
|
| - AudioFrame* audioFrame = NULL;
|
| - if (_audioFramePool->PopMemory(audioFrame) == -1) {
|
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
|
| - "failed PopMemory() call");
|
| - assert(false);
|
| - return;
|
| - }
|
| - audioFrame->sample_rate_hz_ = _outputFrequency;
|
|
|
| - auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
|
| + auto audio_frame_with_info =
|
| + (*participant)->GetAudioFrameWithMuted(_id, _outputFrequency);
|
| +
|
| + auto ret = audio_frame_with_info.audio_frame_info;
|
| + AudioFrame* audio_frame = audio_frame_with_info.audio_frame_pointer;
|
| if (ret == MixerAudioSource::AudioFrameInfo::kError) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
| - "failed to GetAudioFrameWithMuted() from participant");
|
| - _audioFramePool->PushMemory(audioFrame);
|
| continue;
|
| }
|
| const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
|
| if (_participantList.size() != 1) {
|
| // TODO(wu): Issue 3390, add support for multiple participants case.
|
| - audioFrame->ntp_time_ms_ = -1;
|
| + audio_frame->ntp_time_ms_ = -1;
|
| }
|
|
|
| // TODO(henrike): this assert triggers in some test cases where SRTP is
|
| // used which prevents NetEQ from making a VAD. Temporarily disable this
|
| // assert until the problem is fixed on a higher level.
|
| - // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
|
| - if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
|
| + // assert(audio_frame->vad_activity_ != AudioFrame::kVadUnknown);
|
| + if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
|
| WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
| "invalid VAD state from participant");
|
| }
|
|
|
| - if (audioFrame->vad_activity_ == AudioFrame::kVadActive) {
|
| + if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
|
| if (!wasMixed && !muted) {
|
| - RampIn(*audioFrame);
|
| + RampIn(*audio_frame);
|
| }
|
|
|
| if (activeList.size() >= *maxAudioFrameCounter) {
|
| // There are already more active participants than should be
|
| // mixed. Only keep the ones with the highest energy.
|
| AudioFrameList::iterator replaceItem;
|
| - uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame);
|
| + uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
|
|
|
| bool found_replace_item = false;
|
| for (AudioFrameList::iterator iter = activeList.begin();
|
| @@ -543,8 +525,8 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
|
| mixParticipantList->erase(replaceFrame.frame->id_);
|
| activeList.erase(replaceItem);
|
|
|
| - activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
|
| - (*mixParticipantList)[audioFrame->id_] = *participant;
|
| + activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
|
| + (*mixParticipantList)[audio_frame->id_] = *participant;
|
| assert(mixParticipantList->size() <=
|
| kMaximumAmountOfMixedParticipants);
|
|
|
| @@ -554,39 +536,33 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
|
| }
|
| rampOutList->push_back(replaceFrame);
|
| assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
|
| - } else {
|
| - _audioFramePool->PushMemory(replaceFrame.frame);
|
| }
|
| } else {
|
| if (wasMixed) {
|
| if (!muted) {
|
| - RampOut(*audioFrame);
|
| + RampOut(*audio_frame);
|
| }
|
| - rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted));
|
| + rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
|
| assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
|
| - } else {
|
| - _audioFramePool->PushMemory(audioFrame);
|
| }
|
| }
|
| } else {
|
| - activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
|
| - (*mixParticipantList)[audioFrame->id_] = *participant;
|
| + activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
|
| + (*mixParticipantList)[audio_frame->id_] = *participant;
|
| assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
|
| }
|
| } else {
|
| if (wasMixed) {
|
| ParticipantFrameStruct* part_struct =
|
| - new ParticipantFrameStruct(*participant, audioFrame, muted);
|
| + new ParticipantFrameStruct(*participant, audio_frame, muted);
|
| passiveWasMixedList.push_back(part_struct);
|
| } else if (mustAddToPassiveList) {
|
| if (!muted) {
|
| - RampIn(*audioFrame);
|
| + RampIn(*audio_frame);
|
| }
|
| ParticipantFrameStruct* part_struct =
|
| - new ParticipantFrameStruct(*participant, audioFrame, muted);
|
| + new ParticipantFrameStruct(*participant, audio_frame, muted);
|
| passiveWasNotMixedList.push_back(part_struct);
|
| - } else {
|
| - _audioFramePool->PushMemory(audioFrame);
|
| }
|
| }
|
| }
|
| @@ -608,8 +584,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
|
| mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
|
| (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
|
| assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
|
| - } else {
|
| - _audioFramePool->PushMemory((*iter)->audioFrame);
|
| }
|
| delete *iter;
|
| }
|
| @@ -621,8 +595,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix(
|
| mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
|
| (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
|
| assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
|
| - } else {
|
| - _audioFramePool->PushMemory((*iter)->audioFrame);
|
| }
|
| delete *iter;
|
| }
|
| @@ -646,28 +618,21 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
|
| for (MixerAudioSourceList::const_iterator participant =
|
| additionalParticipantList.begin();
|
| participant != additionalParticipantList.end(); ++participant) {
|
| - AudioFrame* audioFrame = NULL;
|
| - if (_audioFramePool->PopMemory(audioFrame) == -1) {
|
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
|
| - "failed PopMemory() call");
|
| - assert(false);
|
| - return;
|
| - }
|
| - audioFrame->sample_rate_hz_ = _outputFrequency;
|
| - auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
|
| + auto audio_frame_with_info =
|
| + (*participant)->GetAudioFrameWithMuted(_id, _outputFrequency);
|
| + auto ret = audio_frame_with_info.audio_frame_info;
|
| + AudioFrame* audio_frame = audio_frame_with_info.audio_frame_pointer;
|
| if (ret == MixerAudioSource::AudioFrameInfo::kError) {
|
| WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
|
| "failed to GetAudioFrameWithMuted() from participant");
|
| - _audioFramePool->PushMemory(audioFrame);
|
| continue;
|
| }
|
| - if (audioFrame->samples_per_channel_ == 0) {
|
| + if (audio_frame->samples_per_channel_ == 0) {
|
| // Empty frame. Don't use it.
|
| - _audioFramePool->PushMemory(audioFrame);
|
| continue;
|
| }
|
| additionalFramesList->push_back(FrameAndMuteInfo(
|
| - audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
|
| + audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
|
| }
|
| }
|
|
|
| @@ -699,10 +664,6 @@ void NewAudioConferenceMixerImpl::ClearAudioFrameList(
|
| AudioFrameList* audioFrameList) const {
|
| WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
|
| "ClearAudioFrameList(audioFrameList)");
|
| - for (AudioFrameList::iterator iter = audioFrameList->begin();
|
| - iter != audioFrameList->end(); ++iter) {
|
| - _audioFramePool->PushMemory(iter->frame);
|
| - }
|
| audioFrameList->clear();
|
| }
|
|
|
|
|