| Index: webrtc/modules/audio_processing/audio_buffer.cc
|
| diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
|
| index 04dcaea799d60af6bbc48d899e9ded8134d6ce03..cc8375e449b1e300b96d616c7d6cd782e4e618e8 100644
|
| --- a/webrtc/modules/audio_processing/audio_buffer.cc
|
| +++ b/webrtc/modules/audio_processing/audio_buffer.cc
|
| @@ -436,20 +436,28 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
| }
|
|
|
| void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
|
| - assert(proc_num_frames_ == output_num_frames_);
|
| - assert(num_channels_ == num_input_channels_);
|
| - assert(frame->num_channels_ == num_channels_);
|
| - assert(frame->samples_per_channel_ == proc_num_frames_);
|
| frame->vad_activity_ = activity_;
|
| -
|
| if (!data_changed) {
|
| return;
|
| }
|
|
|
| - Interleave(data_->ibuf()->channels(),
|
| - proc_num_frames_,
|
| - num_channels_,
|
| - frame->data_);
|
| + assert(proc_num_frames_ == output_num_frames_);
|
| + assert(frame->num_channels_ == num_channels_ || num_channels_ == 1);
|
| + assert(frame->samples_per_channel_ == proc_num_frames_);
|
| +
|
| + if (frame->num_channels_ == num_channels_) {
|
| + Interleave(data_->ibuf()->channels(), proc_num_frames_, num_channels_,
|
| + frame->data_);
|
| + } else {
|
| + // Copy single AudioBuffer channel into all AudioFrame channels
|
| + rtc::scoped_ptr<int16_t*> channel_ptr_copies(
|
| + new int16_t*[frame->num_channels_]);
|
| + for (int i = 0; i < frame->num_channels_; ++i) {
|
| + channel_ptr_copies.get()[i] = data_->ibuf()->channels()[0];
|
| + }
|
| + Interleave(channel_ptr_copies.get(), proc_num_frames_, num_channels_,
|
| + frame->data_);
|
| + }
|
| }
|
|
|
| void AudioBuffer::CopyLowPassToReference() {
|
|
|