Index: webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
index af91c69f694d7cd112a536d4e0260fe022436d57..e64ad932567615a0d9615f3925049e97e4fcaa8e 100644 |
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
@@ -107,7 +107,7 @@ AudioConferenceMixer* AudioConferenceMixer::Create(int id) { |
AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id); |
if(!mixer->Init()) { |
delete mixer; |
- return NULL; |
+ return nullptr; |
} |
return mixer; |
} |
@@ -115,10 +115,10 @@ AudioConferenceMixer* AudioConferenceMixer::Create(int id) { |
AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) |
: _id(id), |
_minimumMixingFreq(kLowestPossible), |
- _mixReceiver(NULL), |
+ _mixReceiver(nullptr), |
_outputFrequency(kDefaultFrequency), |
_sampleSize(0), |
- _audioFramePool(NULL), |
+ _audioFramePool(nullptr), |
_participantList(), |
_additionalParticipantList(), |
_numMixedParticipants(0), |
@@ -129,12 +129,12 @@ AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) |
bool AudioConferenceMixerImpl::Init() { |
_crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
- if (_crit.get() == NULL) |
- return false; |
+ if (_crit.get() == nullptr) |
+ return false; |
_cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
- if(_cbCrit.get() == NULL) |
- return false; |
+ if (_cbCrit.get() == nullptr) |
+ return false; |
Config config; |
config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
@@ -144,8 +144,8 @@ bool AudioConferenceMixerImpl::Init() { |
MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, |
DEFAULT_AUDIO_FRAME_POOLSIZE); |
- if(_audioFramePool == NULL) |
- return false; |
+ if (_audioFramePool == nullptr) |
+ return false; |
if(SetOutputFrequency(kDefaultFrequency) == -1) |
return false; |
@@ -175,7 +175,7 @@ bool AudioConferenceMixerImpl::Init() { |
AudioConferenceMixerImpl::~AudioConferenceMixerImpl() { |
MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
- assert(_audioFramePool == NULL); |
+ assert(_audioFramePool == nullptr); |
} |
// Process should be called every kProcessPeriodicityInMs ms |
@@ -265,7 +265,7 @@ void AudioConferenceMixerImpl::Process() { |
} |
// Get an AudioFrame for mixing from the memory pool. |
- AudioFrame* mixedAudio = NULL; |
+ AudioFrame* mixedAudio = nullptr; |
if(_audioFramePool->PopMemory(mixedAudio) == -1) { |
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
"failed PopMemory() call"); |
@@ -284,7 +284,7 @@ void AudioConferenceMixerImpl::Process() { |
std::max(MaxNumChannels(&additionalFramesList), |
MaxNumChannels(&rampOutList))); |
- mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, |
+ mixedAudio->UpdateFrame(-1, _timeStamp, nullptr, 0, _outputFrequency, |
AudioFrame::kNormalSpeech, |
AudioFrame::kVadPassive, num_mixed_channels); |
@@ -312,13 +312,9 @@ void AudioConferenceMixerImpl::Process() { |
{ |
CriticalSectionScoped cs(_cbCrit.get()); |
- if(_mixReceiver != NULL) { |
- const AudioFrame** dummy = NULL; |
- _mixReceiver->NewMixedAudio( |
- _id, |
- *mixedAudio, |
- dummy, |
- 0); |
+ if (_mixReceiver != nullptr) { |
+ const AudioFrame** dummy = nullptr; |
+ _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0); |
} |
} |
@@ -337,8 +333,8 @@ void AudioConferenceMixerImpl::Process() { |
int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( |
AudioMixerOutputReceiver* mixReceiver) { |
CriticalSectionScoped cs(_cbCrit.get()); |
- if(_mixReceiver != NULL) { |
- return -1; |
+ if (_mixReceiver != nullptr) { |
+ return -1; |
} |
_mixReceiver = mixReceiver; |
return 0; |
@@ -346,10 +342,10 @@ int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( |
int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { |
CriticalSectionScoped cs(_cbCrit.get()); |
- if(_mixReceiver == NULL) { |
- return -1; |
+ if (_mixReceiver == nullptr) { |
+ return -1; |
} |
- _mixReceiver = NULL; |
+ _mixReceiver = nullptr; |
return 0; |
} |
@@ -546,7 +542,7 @@ void AudioConferenceMixerImpl::UpdateToMix( |
bool wasMixed = false; |
wasMixed = (*participant)->_mixHistory->WasMixed(); |
- AudioFrame* audioFrame = NULL; |
+ AudioFrame* audioFrame = nullptr; |
if(_audioFramePool->PopMemory(audioFrame) == -1) { |
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
"failed PopMemory() call"); |
@@ -734,12 +730,12 @@ void AudioConferenceMixerImpl::GetAdditionalAudio( |
additionalParticipantList.begin(); |
participant != additionalParticipantList.end(); |
++participant) { |
- AudioFrame* audioFrame = NULL; |
- if(_audioFramePool->PopMemory(audioFrame) == -1) { |
- WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
- "failed PopMemory() call"); |
- assert(false); |
- return; |
+ AudioFrame* audioFrame = nullptr; |
+ if (_audioFramePool->PopMemory(audioFrame) == -1) { |
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
+ "failed PopMemory() call"); |
+ assert(false); |
+ return; |
} |
audioFrame->sample_rate_hz_ = _outputFrequency; |
auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |