Chromium Code Reviews| Index: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
| diff --git a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
| index 36d70b217133e9f77643eadb4c02516b1f9109ca..128da74a317b4574f02cb2f9369c2abf1af8b2f0 100644 |
| --- a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
| +++ b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc |
| @@ -1,5 +1,5 @@ |
| /* |
| - * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
| * |
| * Use of this source code is governed by a BSD-style license |
| * that can be found in the LICENSE file in the root of the source |
| @@ -39,7 +39,7 @@ typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; |
| // |
| // TODO(andrew): consider not modifying |frame| here. |
| void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
| - assert(mixed_frame->num_channels_ >= frame->num_channels_); |
| + RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
| if (use_limiter) { |
| // Divide by two to avoid saturation in the mixing. |
| // This is only meaningful if the limiter will be used. |
| @@ -47,7 +47,8 @@ void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
| } |
| if (mixed_frame->num_channels_ > frame->num_channels_) { |
| // We only support mono-to-stereo. |
| - assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1); |
| + RTC_DCHECK_EQ(mixed_frame->num_channels_, static_cast<size_t>(2)); |
| + RTC_DCHECK_EQ(frame->num_channels_, static_cast<size_t>(1)); |
| AudioFrameOperations::MonoToStereo(frame); |
| } |
| @@ -111,7 +112,6 @@ NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) { |
| NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
| : _id(id), |
| _minimumMixingFreq(kLowestPossible), |
| - _mixReceiver(NULL), |
| _outputFrequency(kDefaultFrequency), |
| _sampleSize(0), |
| _audioFramePool(NULL), |
| @@ -171,7 +171,7 @@ bool NewAudioConferenceMixerImpl::Init() { |
| NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { |
| MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
| - assert(_audioFramePool == NULL); |
| + RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr)); |
| } |
| // Process should be called every kProcessPeriodicityInMs ms |
| @@ -182,17 +182,22 @@ int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { |
| WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| "failed in TimeToNextUpdate() call"); |
| // Sanity check |
| - assert(false); |
| + RTC_DCHECK(false); |
| return -1; |
| } |
| return timeUntilNextProcess; |
| } |
| void NewAudioConferenceMixerImpl::Process() { |
| + // TODO(aleloi) Remove this method. |
| + RTC_NOTREACHED(); |
| +} |
| + |
| +void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
| size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
| { |
| CriticalSectionScoped cs(_crit.get()); |
| - assert(_processCalls == 0); |
| + RTC_DCHECK_EQ(_processCalls, 0); |
| _processCalls++; |
| // Let the scheduler know that we are running one iteration. |
| @@ -244,7 +249,7 @@ void NewAudioConferenceMixerImpl::Process() { |
| } |
| break; |
| default: |
| - assert(false); |
| + RTC_DCHECK(false); |
|
tommi
2016/07/08 12:24:17
nit: RTC_NOTREACHED()
aleloi
2016/07/08 12:57:40
Done.
|
| CriticalSectionScoped cs(_crit.get()); |
| _processCalls--; |
| @@ -259,62 +264,42 @@ void NewAudioConferenceMixerImpl::Process() { |
| UpdateMixedStatus(mixedParticipantsMap); |
| } |
| - // Get an AudioFrame for mixing from the memory pool. |
| - AudioFrame* mixedAudio = NULL; |
| - if (_audioFramePool->PopMemory(mixedAudio) == -1) { |
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
| - "failed PopMemory() call"); |
| - assert(false); |
| - return; |
| - } |
| + // TODO(henrike): it might be better to decide the number of channels |
|
tommi
2016/07/08 12:24:17
henrike doesn't work on webrtc anymore. Can you as
aleloi
2016/07/08 12:57:40
Done.
|
| + // with an API instead of dynamically. |
| - { |
| - CriticalSectionScoped cs(_crit.get()); |
| - |
| - // TODO(henrike): it might be better to decide the number of channels |
| - // with an API instead of dynamically. |
| + // Find the max channels over all mixing lists. |
| + const size_t num_mixed_channels = std::max( |
| + MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), |
| + MaxNumChannels(&rampOutList))); |
| - // Find the max channels over all mixing lists. |
| - const size_t num_mixed_channels = |
| - std::max(MaxNumChannels(&mixList), |
| - std::max(MaxNumChannels(&additionalFramesList), |
| - MaxNumChannels(&rampOutList))); |
| + audio_frame_for_mixing->UpdateFrame( |
| + -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, |
| + AudioFrame::kVadPassive, num_mixed_channels); |
| - mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, |
| - AudioFrame::kNormalSpeech, AudioFrame::kVadPassive, |
| - num_mixed_channels); |
| + _timeStamp += static_cast<uint32_t>(_sampleSize); |
| - _timeStamp += static_cast<uint32_t>(_sampleSize); |
| + use_limiter_ = _numMixedParticipants > 1 && |
| + _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
| - // We only use the limiter if it supports the output sample rate and |
| - // we're actually mixing multiple streams. |
| - use_limiter_ = _numMixedParticipants > 1 && |
| - _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
| + // We only use the limiter if it supports the output sample rate and |
| + // we're actually mixing multiple streams. |
| + MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); |
| - MixFromList(mixedAudio, mixList); |
| - MixAnonomouslyFromList(mixedAudio, additionalFramesList); |
| - MixAnonomouslyFromList(mixedAudio, rampOutList); |
| + { |
| + CriticalSectionScoped cs(_crit.get()); |
| + MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
| + MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); |
| - if (mixedAudio->samples_per_channel_ == 0) { |
| + if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
| // Nothing was mixed, set the audio samples to silence. |
| - mixedAudio->samples_per_channel_ = _sampleSize; |
| - mixedAudio->Mute(); |
| + audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
| + audio_frame_for_mixing->Mute(); |
| } else { |
| // Only call the limiter if we have something to mix. |
| - LimitMixedAudio(mixedAudio); |
| + LimitMixedAudio(audio_frame_for_mixing); |
| } |
| } |
| - { |
| - CriticalSectionScoped cs(_cbCrit.get()); |
| - if (_mixReceiver != NULL) { |
| - const AudioFrame** dummy = NULL; |
| - _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0); |
| - } |
| - } |
| - |
| - // Reclaim all outstanding memory. |
| - _audioFramePool->PushMemory(mixedAudio); |
| ClearAudioFrameList(&mixList); |
| ClearAudioFrameList(&rampOutList); |
| ClearAudioFrameList(&additionalFramesList); |
| @@ -325,25 +310,6 @@ void NewAudioConferenceMixerImpl::Process() { |
| return; |
| } |
| -int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback( |
| - OldAudioMixerOutputReceiver* mixReceiver) { |
| - CriticalSectionScoped cs(_cbCrit.get()); |
| - if (_mixReceiver != NULL) { |
| - return -1; |
| - } |
| - _mixReceiver = mixReceiver; |
| - return 0; |
| -} |
| - |
| -int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { |
| - CriticalSectionScoped cs(_cbCrit.get()); |
| - if (_mixReceiver == NULL) { |
| - return -1; |
| - } |
| - _mixReceiver = NULL; |
| - return 0; |
| -} |
| - |
| int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
| const Frequency& frequency) { |
| CriticalSectionScoped cs(_crit.get()); |
| @@ -388,7 +354,7 @@ int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( |
| if (!success) { |
| WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| "failed to %s participant", mixable ? "add" : "remove"); |
| - assert(false); |
| + RTC_DCHECK(false); |
|
tommi
2016/07/08 12:24:17
RTC_NOTREACHED() here as well (and below). Down t
aleloi
2016/07/08 12:57:40
Done.
|
| return -1; |
| } |
| @@ -424,7 +390,7 @@ int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( |
| if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) { |
| WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| "unable to remove participant from anonymous list"); |
| - assert(false); |
| + RTC_DCHECK(false); |
| return -1; |
| } |
| return AddParticipantToList(participant, &_participantList) ? 0 : -1; |
| @@ -468,7 +434,7 @@ int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) { |
| } else { |
| WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| "SetMinimumMixingFrequency incorrect frequency: %i", freq); |
| - assert(false); |
| + RTC_DCHECK(false); |
| return -1; |
| } |
| } |
| @@ -535,7 +501,7 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| if (_audioFramePool->PopMemory(audioFrame) == -1) { |
| WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
| "failed PopMemory() call"); |
| - assert(false); |
| + RTC_DCHECK(false); |
| return; |
| } |
| audioFrame->sample_rate_hz_ = _outputFrequency; |
| @@ -556,7 +522,7 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| // TODO(henrike): this assert triggers in some test cases where SRTP is |
| // used which prevents NetEQ from making a VAD. Temporarily disable this |
| // assert until the problem is fixed on a higher level. |
| - // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); |
| + // RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown); |
| if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { |
| WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
| "invalid VAD state from participant"); |
| @@ -594,7 +560,7 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| // When a frame is pushed to |activeList| it is also pushed |
| // to mixParticipantList with the frame's id. This means |
| // that the Find call above should never fail. |
| - assert(it != mixParticipantList->end()); |
| + RTC_DCHECK(it != mixParticipantList->end()); |
| replaceWasMixed = it->second->_mixHistory->WasMixed(); |
| mixParticipantList->erase(replaceFrame.frame->id_); |
| @@ -602,15 +568,17 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
| (*mixParticipantList)[audioFrame->id_] = *participant; |
| - assert(mixParticipantList->size() <= |
| - kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE(mixParticipantList->size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| if (replaceWasMixed) { |
| if (!replaceFrame.muted) { |
| RampOut(*replaceFrame.frame); |
| } |
| rampOutList->push_back(replaceFrame); |
| - assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE( |
| + rampOutList->size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| } else { |
| _audioFramePool->PushMemory(replaceFrame.frame); |
| } |
| @@ -620,7 +588,9 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| RampOut(*audioFrame); |
| } |
| rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); |
| - assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE( |
| + rampOutList->size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| } else { |
| _audioFramePool->PushMemory(audioFrame); |
| } |
| @@ -628,7 +598,8 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| } else { |
| activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
| (*mixParticipantList)[audioFrame->id_] = *participant; |
| - assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE(mixParticipantList->size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| } |
| } else { |
| if (wasMixed) { |
| @@ -647,7 +618,7 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| } |
| } |
| } |
| - assert(activeList.size() <= *maxAudioFrameCounter); |
| + RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); |
| // At this point it is known which participants should be mixed. Transfer |
| // this information to this functions output parameters. |
| for (AudioFrameList::const_iterator iter = activeList.begin(); |
| @@ -664,7 +635,8 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
| mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
| (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
| - assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE(mixParticipantList->size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| } else { |
| _audioFramePool->PushMemory((*iter)->audioFrame); |
| } |
| @@ -677,13 +649,14 @@ void NewAudioConferenceMixerImpl::UpdateToMix( |
| if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
| mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); |
| (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; |
| - assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE(mixParticipantList->size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| } else { |
| _audioFramePool->PushMemory((*iter)->audioFrame); |
| } |
| delete *iter; |
| } |
| - assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); |
| + RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); |
| *maxAudioFrameCounter += mixListStartSize - mixList->size(); |
| } |
| @@ -707,7 +680,7 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
| if (_audioFramePool->PopMemory(audioFrame) == -1) { |
| WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
| "failed PopMemory() call"); |
| - assert(false); |
| + RTC_DCHECK(false); |
| return; |
| } |
| audioFrame->sample_rate_hz_ = _outputFrequency; |
| @@ -732,7 +705,8 @@ void NewAudioConferenceMixerImpl::UpdateMixedStatus( |
| const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { |
| WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| "UpdateMixedStatus(mixedParticipantsMap)"); |
| - assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); |
| + RTC_DCHECK_LE(mixedParticipantsMap.size(), |
| + static_cast<size_t>(kMaximumAmountOfMixedParticipants)); |
| // Loop through all participants. If they are in the mix map they |
| // were mixed. |
| @@ -807,15 +781,17 @@ bool NewAudioConferenceMixerImpl::RemoveParticipantFromList( |
| int32_t NewAudioConferenceMixerImpl::MixFromList( |
| AudioFrame* mixedAudio, |
| - const AudioFrameList& audioFrameList) const { |
| - WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| + const AudioFrameList& audioFrameList, |
| + int32_t id, |
| + bool use_limiter) { |
| + WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
| "MixFromList(mixedAudio, audioFrameList)"); |
| if (audioFrameList.empty()) |
| return 0; |
| uint32_t position = 0; |
| - if (_numMixedParticipants == 1) { |
| + if (audioFrameList.size() == 1) { |
| mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; |
| mixedAudio->elapsed_time_ms_ = |
| audioFrameList.front().frame->elapsed_time_ms_; |
| @@ -830,15 +806,15 @@ int32_t NewAudioConferenceMixerImpl::MixFromList( |
| iter != audioFrameList.end(); ++iter) { |
| if (position >= kMaximumAmountOfMixedParticipants) { |
| WEBRTC_TRACE( |
| - kTraceMemory, kTraceAudioMixerServer, _id, |
| + kTraceMemory, kTraceAudioMixerServer, id, |
| "Trying to mix more than max amount of mixed participants:%d!", |
| kMaximumAmountOfMixedParticipants); |
| // Assert and avoid crash |
| - assert(false); |
| + RTC_DCHECK(false); |
| position = 0; |
| } |
| if (!iter->muted) { |
| - MixFrames(mixedAudio, iter->frame, use_limiter_); |
| + MixFrames(mixedAudio, iter->frame, use_limiter); |
| } |
| position++; |
| @@ -890,7 +866,7 @@ bool NewAudioConferenceMixerImpl::LimitMixedAudio( |
| if (error != _limiter->kNoError) { |
| WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| "Error from AudioProcessing: %d", error); |
| - assert(false); |
| + RTC_DCHECK(false); |
| return false; |
| } |
| return true; |