Index: webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
index daeea3577e23a854d26f9b702c6d84f1c5ec989d..a06bb1e79fe423666b055c5877a691bd0d4a9f41 100644 |
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
@@ -335,8 +335,13 @@ int DownMix(const AudioFrame& frame, |
if (length_out_buff < frame.samples_per_channel_) { |
return -1; |
} |
- for (size_t n = 0; n < frame.samples_per_channel_; ++n) |
- out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1; |
+ if (!frame.muted()) { |
+ const int16_t* frame_data = frame.data(); |
+ for (size_t n = 0; n < frame.samples_per_channel_; ++n) |
+ out_buff[n] = (frame_data[2 * n] + frame_data[2 * n + 1]) >> 1; |
+ } else { |
+ memset(out_buff, 0, 2 * frame.samples_per_channel_); |
+ } |
return 0; |
} |
@@ -345,9 +350,10 @@ int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { |
if (length_out_buff < frame.samples_per_channel_) { |
return -1; |
} |
+ const int16_t* frame_data = frame.data(); |
for (size_t n = frame.samples_per_channel_; n != 0; --n) { |
size_t i = n - 1; |
- int16_t sample = frame.data_[i]; |
+ int16_t sample = frame_data[i]; |
out_buff[2 * i + 1] = sample; |
out_buff[2 * i] = sample; |
} |
@@ -732,12 +738,13 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, |
// When adding data to encoders this pointer is pointing to an audio buffer |
// with correct number of channels. |
- const int16_t* ptr_audio = ptr_frame->data_; |
+ const int16_t* ptr_audio = ptr_frame->data(); |
// For pushing data to primary, point the |ptr_audio| to correct buffer. |
if (!same_num_channels) |
ptr_audio = input_data->buffer; |
+ // TODO(yujo): Skip encode of muted frames. |
hlundin-webrtc
2017/03/16 14:47:48
See above: I don't think we will have muted input
yujo
2017/03/16 23:37:21
Muted input is actually my motivation for this who
hlundin-webrtc
2017/03/17 14:29:38
Oh! Then I see. I though you were only in it to sh
|
input_data->input_timestamp = ptr_frame->timestamp_; |
input_data->audio = ptr_audio; |
input_data->length_per_channel = ptr_frame->samples_per_channel_; |
@@ -751,6 +758,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, |
// encoders has to be mono for down-mix to take place. |
// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing |
// is required, |*ptr_out| points to |in_frame|. |
+// TODO(yujo): Make this more efficient for muted frames. |
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
const AudioFrame** ptr_out) { |
const bool resample = |
@@ -800,13 +808,12 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
*ptr_out = &preprocess_frame_; |
preprocess_frame_.num_channels_ = in_frame.num_channels_; |
int16_t audio[WEBRTC_10MS_PCM_AUDIO]; |
- const int16_t* src_ptr_audio = in_frame.data_; |
- int16_t* dest_ptr_audio = preprocess_frame_.data_; |
+ const int16_t* src_ptr_audio = in_frame.data(); |
if (down_mix) { |
// If a resampling is required the output of a down-mix is written into a |
// local buffer, otherwise, it will be written to the output frame. |
- if (resample) |
- dest_ptr_audio = audio; |
+ int16_t* dest_ptr_audio = resample ? |
+ audio : preprocess_frame_.mutable_data(); |
if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) |
return -1; |
preprocess_frame_.num_channels_ = 1; |
@@ -820,7 +827,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
// If it is required, we have to do a resampling. |
if (resample) { |
// The result of the resampler is written to output frame. |
- dest_ptr_audio = preprocess_frame_.data_; |
+ int16_t* dest_ptr_audio = preprocess_frame_.mutable_data(); |
int samples_per_channel = resampler_.Resample10Msec( |
src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), |