Index: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
diff --git a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
index 5355382745f3e92ab14c42d8b5fd21fffe819e33..edeecabdeb6f474b4f9813f4ff6b778c205bf982 100644 |
--- a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
+++ b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
@@ -113,7 +113,6 @@ NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
_minimumMixingFreq(kLowestPossible), |
_outputFrequency(kDefaultFrequency), |
_sampleSize(0), |
- _audioFramePool(NULL), |
_participantList(), |
_additionalParticipantList(), |
_numMixedParticipants(0), |
@@ -136,11 +135,6 @@ bool NewAudioConferenceMixerImpl::Init() { |
if (!_limiter.get()) |
return false; |
- MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, |
- DEFAULT_AUDIO_FRAME_POOLSIZE); |
- if (_audioFramePool == NULL) |
- return false; |
- |
if (SetOutputFrequency(kDefaultFrequency) == -1) |
return false; |
@@ -167,11 +161,6 @@ bool NewAudioConferenceMixerImpl::Init() { |
return true; |
} |
-NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { |
- MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
- assert(_audioFramePool == NULL); |
-} |
- |
void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
{ |
@@ -474,20 +463,11 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
bool wasMixed = false; |
wasMixed = (*participant)->_mixHistory->WasMixed(); |
- AudioFrame* audioFrame = NULL; |
- if (_audioFramePool->PopMemory(audioFrame) == -1) { |
- WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
- "failed PopMemory() call"); |
- assert(false); |
- return; |
- } |
+ AudioFrame* audioFrame = (*participant)->GetFramePointer(); |
audioFrame->sample_rate_hz_ = _outputFrequency; |
auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
- "failed to GetAudioFrameWithMuted() from participant"); |
- _audioFramePool->PushMemory(audioFrame); |
continue; |
} |
const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); |
@@ -554,8 +534,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
} |
rampOutList->push_back(replaceFrame); |
assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
- } else { |
- _audioFramePool->PushMemory(replaceFrame.frame); |
} |
} else { |
if (wasMixed) { |
@@ -564,8 +542,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
} |
rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); |
assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
- } else { |
- _audioFramePool->PushMemory(audioFrame); |
} |
} |
} else { |
@@ -585,8 +561,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
ParticipantFrameStruct* part_struct = |
new ParticipantFrameStruct(*participant, audioFrame, muted); |
passiveWasNotMixedList.push_back(part_struct); |
- } else { |
- _audioFramePool->PushMemory(audioFrame); |
} |
} |
} |
@@ -608,8 +582,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
(*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
- } else { |
- _audioFramePool->PushMemory((*iter)->audioFrame); |
} |
delete *iter; |
} |
@@ -621,8 +593,6 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
(*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
- } else { |
- _audioFramePool->PushMemory((*iter)->audioFrame); |
} |
delete *iter; |
} |
@@ -646,24 +616,16 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
for (MixerAudioSourceList::const_iterator participant = |
additionalParticipantList.begin(); |
participant != additionalParticipantList.end(); ++participant) { |
- AudioFrame* audioFrame = NULL; |
- if (_audioFramePool->PopMemory(audioFrame) == -1) { |
- WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
- "failed PopMemory() call"); |
- assert(false); |
- return; |
- } |
+ AudioFrame* audioFrame = (*participant)->GetFramePointer(); |
audioFrame->sample_rate_hz_ = _outputFrequency; |
auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
"failed to GetAudioFrameWithMuted() from participant"); |
- _audioFramePool->PushMemory(audioFrame); |
continue; |
} |
if (audioFrame->samples_per_channel_ == 0) { |
// Empty frame. Don't use it. |
- _audioFramePool->PushMemory(audioFrame); |
continue; |
} |
additionalFramesList->push_back(FrameAndMuteInfo( |
@@ -699,10 +661,6 @@ void NewAudioConferenceMixerImpl::ClearAudioFrameList( |
AudioFrameList* audioFrameList) const { |
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
"ClearAudioFrameList(audioFrameList)"); |
- for (AudioFrameList::iterator iter = audioFrameList->begin(); |
- iter != audioFrameList->end(); ++iter) { |
- _audioFramePool->PushMemory(iter->frame); |
- } |
audioFrameList->clear(); |
} |