Index: webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
index daeea3577e23a854d26f9b702c6d84f1c5ec989d..3e0de4a8940c3e42f4178a8cb21b8d498bc84a0f 100644 |
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc |
@@ -332,24 +332,36 @@ bool IsCodecCN(const CodecInst& codec) { |
int DownMix(const AudioFrame& frame, |
size_t length_out_buff, |
int16_t* out_buff) { |
- if (length_out_buff < frame.samples_per_channel_) { |
+ if (frame.num_channels_ != 2 || |
the sun
2017/03/23 19:34:38
Make this a DCHECK instead - it doesn't look like
yujo
2017/03/24 07:30:14
Here's one major place where the result is used: h
the sun
2017/03/24 08:05:58
Well, tracing that upwards:
https://cs.chromium.or
yujo
2017/03/24 17:10:32
Done. I meant that there was a difference in behav
|
+ length_out_buff < frame.samples_per_channel_) { |
return -1; |
} |
- for (size_t n = 0; n < frame.samples_per_channel_; ++n) |
- out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1; |
+ if (!frame.muted()) { |
+ const int16_t* frame_data = frame.data(); |
+ for (size_t n = 0; n < frame.samples_per_channel_; ++n) |
+ out_buff[n] = (frame_data[2 * n] + frame_data[2 * n + 1]) >> 1; |
the sun
2017/03/23 19:34:38
out of scope for this CL, but IIUC this risks wrap
yujo
2017/03/24 07:30:14
Fixed.
|
+ } else { |
+ memset(out_buff, 0, frame.samples_per_channel_); |
+ } |
return 0; |
} |
// Mono-to-stereo can be used as in-place. |
int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { |
- if (length_out_buff < frame.samples_per_channel_) { |
+ if (frame.num_channels_ != 1 || |
the sun
2017/03/23 19:34:38
DCHECK
yujo
2017/03/24 17:10:32
Done.
|
+ length_out_buff < 2 * frame.samples_per_channel_) { |
return -1; |
} |
- for (size_t n = frame.samples_per_channel_; n != 0; --n) { |
- size_t i = n - 1; |
- int16_t sample = frame.data_[i]; |
- out_buff[2 * i + 1] = sample; |
- out_buff[2 * i] = sample; |
+ if (!frame.muted()) { |
+ const int16_t* frame_data = frame.data(); |
+ for (size_t n = frame.samples_per_channel_; n != 0; --n) { |
+ size_t i = n - 1; |
+ int16_t sample = frame_data[i]; |
+ out_buff[2 * i + 1] = sample; |
+ out_buff[2 * i] = sample; |
+ } |
+ } else { |
+ memset(out_buff, 0, 2 * frame.samples_per_channel_); |
} |
return 0; |
} |
@@ -732,12 +744,13 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, |
// When adding data to encoders this pointer is pointing to an audio buffer |
// with correct number of channels. |
- const int16_t* ptr_audio = ptr_frame->data_; |
+ const int16_t* ptr_audio = ptr_frame->data(); |
// For pushing data to primary, point the |ptr_audio| to correct buffer. |
if (!same_num_channels) |
ptr_audio = input_data->buffer; |
+ // TODO(yujo): Skip encode of muted frames. |
input_data->input_timestamp = ptr_frame->timestamp_; |
input_data->audio = ptr_audio; |
input_data->length_per_channel = ptr_frame->samples_per_channel_; |
@@ -751,6 +764,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, |
// encoders has to be mono for down-mix to take place. |
// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing |
// is required, |*ptr_out| points to |in_frame|. |
+// TODO(yujo): Make this more efficient for muted frames. |
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
const AudioFrame** ptr_out) { |
const bool resample = |
@@ -800,13 +814,12 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
*ptr_out = &preprocess_frame_; |
preprocess_frame_.num_channels_ = in_frame.num_channels_; |
int16_t audio[WEBRTC_10MS_PCM_AUDIO]; |
- const int16_t* src_ptr_audio = in_frame.data_; |
- int16_t* dest_ptr_audio = preprocess_frame_.data_; |
+ const int16_t* src_ptr_audio = in_frame.data(); |
if (down_mix) { |
// If a resampling is required the output of a down-mix is written into a |
// local buffer, otherwise, it will be written to the output frame. |
- if (resample) |
- dest_ptr_audio = audio; |
+ int16_t* dest_ptr_audio = resample ? |
+ audio : preprocess_frame_.mutable_data(); |
if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) |
return -1; |
preprocess_frame_.num_channels_ = 1; |
@@ -820,7 +833,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
// If it is required, we have to do a resampling. |
if (resample) { |
// The result of the resampler is written to output frame. |
- dest_ptr_audio = preprocess_frame_.data_; |
+ int16_t* dest_ptr_audio = preprocess_frame_.mutable_data(); |
int samples_per_channel = resampler_.Resample10Msec( |
src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), |