Index: webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
index dce3d0b5451b85a7558cb4abf9d5976cde2b005f..d6aa97714f2404f58dda787225c280ad502da518 100644 |
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc |
@@ -40,7 +40,7 @@ void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
if (use_limiter) { |
// Divide by two to avoid saturation in the mixing. |
// This is only meaningful if the limiter will be used. |
- *frame >>= 1; |
+ AudioFrameOperations::ShiftDown(frame); |
the sun
2016/10/19 09:23:16
Note, if this function was called DivideBy2(), App
aleloi
2016/10/20 08:27:05
Done.
|
} |
if (mixed_frame->num_channels_ > frame->num_channels_) { |
// We only support mono-to-stereo. |
@@ -49,7 +49,7 @@ void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
AudioFrameOperations::MonoToStereo(frame); |
} |
- *mixed_frame += *frame; |
+ AudioFrameOperations::AddFrames(*frame, mixed_frame); |
} |
// Return the max number of channels from a |list| composed of AudioFrames. |
@@ -284,9 +284,10 @@ void AudioConferenceMixerImpl::Process() { |
std::max(MaxNumChannels(&additionalFramesList), |
MaxNumChannels(&rampOutList))); |
- mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, |
- AudioFrame::kNormalSpeech, |
- AudioFrame::kVadPassive, num_mixed_channels); |
+ AudioFrameOperations::UpdateFrame( |
+ -1, _timeStamp, NULL, 0, _outputFrequency, |
+ AudioFrame::kNormalSpeech, AudioFrame::kVadPassive, |
+ num_mixed_channels, mixedAudio); |
_timeStamp += static_cast<uint32_t>(_sampleSize); |
@@ -303,7 +304,7 @@ void AudioConferenceMixerImpl::Process() { |
if(mixedAudio->samples_per_channel_ == 0) { |
// Nothing was mixed, set the audio samples to silence. |
mixedAudio->samples_per_channel_ = _sampleSize; |
- mixedAudio->Mute(); |
+ AudioFrameOperations::Mute(mixedAudio); |
} else { |
// Only call the limiter if we have something to mix. |
LimitMixedAudio(mixedAudio); |
@@ -922,7 +923,7 @@ bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { |
// |
// Instead we double the frame (with addition since left-shifting a |
// negative value is undefined). |
- *mixedAudio += *mixedAudio; |
+ AudioFrameOperations::AddFrames(*mixedAudio, mixedAudio); |
if(error != _limiter->kNoError) { |
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |