Index: webrtc/modules/audio_processing/audio_buffer.cc |
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc |
index 579a5c24904aab28a857f9d7beca753888825404..5f90e0f54776f9e34c0e1c4f7ab918367dcd8000 100644 |
--- a/webrtc/modules/audio_processing/audio_buffer.cc |
+++ b/webrtc/modules/audio_processing/audio_buffer.cc |
@@ -394,13 +394,14 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) { |
} else { |
deinterleaved = input_buffer_->ibuf()->channels(); |
} |
+ // TODO(yujo): handle muted frames more efficiently. |
if (num_proc_channels_ == 1) { |
// Downmix and deinterleave simultaneously. |
- DownmixInterleavedToMono(frame->data_, input_num_frames_, |
+ DownmixInterleavedToMono(frame->data(), input_num_frames_, |
num_input_channels_, deinterleaved[0]); |
} else { |
RTC_DCHECK_EQ(num_proc_channels_, num_input_channels_); |
- Deinterleave(frame->data_, |
+ Deinterleave(frame->data(), |
input_num_frames_, |
num_proc_channels_, |
deinterleaved); |
@@ -437,12 +438,13 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const { |
data_ptr = output_buffer_.get(); |
} |
+ // TODO(yujo): handle muted frames more efficiently. |
if (frame->num_channels_ == num_channels_) { |
Interleave(data_ptr->ibuf()->channels(), output_num_frames_, num_channels_, |
- frame->data_); |
+ frame->mutable_data()); |
} else { |
UpmixMonoToInterleaved(data_ptr->ibuf()->channels()[0], output_num_frames_, |
- frame->num_channels_, frame->data_); |
+ frame->num_channels_, frame->mutable_data()); |
} |
} |