| Index: webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
|
| diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
|
| index ae5a04f25e05aebd80ac21d6b222929e1b3ba896..a21b32b9211d84b5dea99f66593c537b58d39852 100644
|
| --- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
|
| +++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
|
| @@ -344,7 +344,7 @@ int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
|
|
|
| int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
| enum NetEqOutputType type;
|
| - int samples_per_channel;
|
| + size_t samples_per_channel;
|
| int num_channels;
|
| bool return_silence = false;
|
|
|
| @@ -394,7 +394,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
| }
|
|
|
| // NetEq always returns 10 ms of audio.
|
| - current_sample_rate_hz_ = samples_per_channel * 100;
|
| + current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
|
|
|
| // Update if resampling is required.
|
| bool need_resampling = (desired_freq_hz != -1) &&
|
| @@ -403,18 +403,19 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
| if (need_resampling && !resampled_last_output_frame_) {
|
| // Prime the resampler with the last frame.
|
| int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
|
| - samples_per_channel =
|
| + int samples_per_channel_int =
|
| resampler_.Resample10Msec(last_audio_buffer_.get(),
|
| current_sample_rate_hz_,
|
| desired_freq_hz,
|
| num_channels,
|
| AudioFrame::kMaxDataSizeSamples,
|
| temp_output);
|
| - if (samples_per_channel < 0) {
|
| + if (samples_per_channel_int < 0) {
|
| LOG(LERROR) << "AcmReceiver::GetAudio - "
|
| "Resampling last_audio_buffer_ failed.";
|
| return -1;
|
| }
|
| + samples_per_channel = static_cast<size_t>(samples_per_channel_int);
|
| }
|
|
|
| // The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either
|
| @@ -422,17 +423,18 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
| // TODO(henrik.lundin) Glitches in the output may appear if the output rate
|
| // from NetEq changes. See WebRTC issue 3923.
|
| if (need_resampling) {
|
| - samples_per_channel =
|
| + int samples_per_channel_int =
|
| resampler_.Resample10Msec(audio_buffer_.get(),
|
| current_sample_rate_hz_,
|
| desired_freq_hz,
|
| num_channels,
|
| AudioFrame::kMaxDataSizeSamples,
|
| audio_frame->data_);
|
| - if (samples_per_channel < 0) {
|
| + if (samples_per_channel_int < 0) {
|
| LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
|
| return -1;
|
| }
|
| + samples_per_channel = static_cast<size_t>(samples_per_channel_int);
|
| resampled_last_output_frame_ = true;
|
| } else {
|
| resampled_last_output_frame_ = false;
|
| @@ -448,7 +450,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
|
|
| audio_frame->num_channels_ = num_channels;
|
| audio_frame->samples_per_channel_ = samples_per_channel;
|
| - audio_frame->sample_rate_hz_ = samples_per_channel * 100;
|
| + audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
|
|
|
| // Should set |vad_activity| before calling SetAudioFrameActivityAndType().
|
| audio_frame->vad_activity_ = previous_audio_activity_;
|
| @@ -784,10 +786,11 @@ bool AcmReceiver::GetSilence(int desired_sample_rate_hz, AudioFrame* frame) {
|
| frame->sample_rate_hz_ = current_sample_rate_hz_;
|
| }
|
|
|
| - frame->samples_per_channel_ = frame->sample_rate_hz_ / 100; // Always 10 ms.
|
| + frame->samples_per_channel_ =
|
| + static_cast<size_t>(frame->sample_rate_hz_ / 100); // Always 10 ms.
|
| frame->speech_type_ = AudioFrame::kCNG;
|
| frame->vad_activity_ = AudioFrame::kVadPassive;
|
| - int samples = frame->samples_per_channel_ * frame->num_channels_;
|
| + size_t samples = frame->samples_per_channel_ * frame->num_channels_;
|
| memset(frame->data_, 0, samples * sizeof(int16_t));
|
| return true;
|
| }
|
|
|