Index: webrtc/audio/audio_transport_proxy.cc |
diff --git a/webrtc/audio/audio_transport_proxy.cc b/webrtc/audio/audio_transport_proxy.cc |
index 4d2f9e30e1c217bae2381b3f75ee27b8aeb5fe85..d6ce9397c71fdd2d597f36754aef0deb45f6b3fa 100644 |
--- a/webrtc/audio/audio_transport_proxy.cc |
+++ b/webrtc/audio/audio_transport_proxy.cc |
@@ -25,9 +25,11 @@ int Resample(const AudioFrame& frame, |
resampler->InitializeIfNeeded(frame.sample_rate_hz_, destination_sample_rate, |
number_of_channels); |
+ // TODO(yujo): make resampler take an AudioFrame, and add special case |
+ // handling of muted frames. |
the sun
2017/03/23 19:34:38
Note: That would mean the resampler needs to maint
yujo
2017/03/24 07:30:14
I'm not clear on how the resampler works, but mayb
|
return resampler->Resample( |
- frame.data_, frame.samples_per_channel_ * number_of_channels, destination, |
- number_of_channels * target_number_of_samples_per_channel); |
+ frame.data(), frame.samples_per_channel_ * number_of_channels, |
+ destination, number_of_channels * target_number_of_samples_per_channel); |
} |
} // namespace |
@@ -77,7 +79,7 @@ int32_t AudioTransportProxy::NeedMorePlayData(const size_t nSamples, |
// 100 = 1 second / data duration (10 ms). |
RTC_DCHECK_EQ(nSamples * 100, samplesPerSec); |
RTC_DCHECK_LE(nBytesPerSample * nSamples * nChannels, |
- sizeof(AudioFrame::data_)); |
+ AudioFrame::kMaxDataSizeBytes); |
mixer_->Mix(nChannels, &mixed_frame_); |
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_; |
@@ -120,7 +122,7 @@ void AudioTransportProxy::PullRenderData(int bits_per_sample, |
// 8 = bits per byte. |
RTC_DCHECK_LE(bits_per_sample / 8 * number_of_frames * number_of_channels, |
- sizeof(AudioFrame::data_)); |
+ AudioFrame::kMaxDataSizeBytes); |
mixer_->Mix(number_of_channels, &mixed_frame_); |
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_; |
*ntp_time_ms = mixed_frame_.ntp_time_ms_; |