Index: webrtc/modules/audio_coding/neteq/time_stretch.cc |
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.cc b/webrtc/modules/audio_coding/neteq/time_stretch.cc |
index 5577cd2ecbbd80886e7cda49a9d050417bc7e407..b129b4eed79c722b1959dc326812060bdfa877cb 100644 |
--- a/webrtc/modules/audio_coding/neteq/time_stretch.cc |
+++ b/webrtc/modules/audio_coding/neteq/time_stretch.cc |
@@ -23,9 +23,9 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, |
size_t input_len, |
bool fast_mode, |
AudioMultiVector* output, |
- int16_t* length_change_samples) { |
+ size_t* length_change_samples) { |
// Pre-calculate common multiplication with |fs_mult_|. |
- int fs_mult_120 = fs_mult_ * 120; // Corresponds to 15 ms. |
+ size_t fs_mult_120 = fs_mult_ * 120; // Corresponds to 15 ms. |
hlundin-webrtc
2015/08/10 11:30:02
int
|
const int16_t* signal; |
rtc::scoped_ptr<int16_t[]> signal_array; |
@@ -48,8 +48,7 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, |
} |
// Find maximum absolute value of input signal. |
- max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, |
- static_cast<int>(signal_len)); |
+ max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len); |
// Downsample to 4 kHz sample rate and calculate auto-correlation. |
DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen, |
@@ -58,13 +57,12 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, |
AutoCorrelation(); |
// Find the strongest correlation peak. |
- static const int kNumPeaks = 1; |
- int peak_index; |
+ static const size_t kNumPeaks = 1; |
+ size_t peak_index; |
int16_t peak_value; |
DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks, |
fs_mult_, &peak_index, &peak_value); |
// Assert that |peak_index| stays within boundaries. |
- assert(peak_index >= 0); |
assert(peak_index <= (2 * kCorrelationLen - 1) * fs_mult_); |
// Compensate peak_index for displaced starting position. The displacement |
@@ -79,7 +77,7 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, |
// Calculate scaling to ensure that |peak_index| samples can be square-summed |
// without overflowing. |
int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) - |
- WebRtcSpl_NormW32(peak_index); |
+ WebRtcSpl_NormW32(static_cast<int32_t>(peak_index)); |
hlundin-webrtc
2015/08/10 11:30:02
rtc::checked_cast
Peter Kasting
2015/08/17 22:49:47
This shouldn't be necessary, as the asserts above
hlundin-webrtc
2015/08/18 07:19:18
Acknowledged.
|
scaling = std::max(0, scaling); |
// |vec1| starts at 15 ms minus one pitch period. |
@@ -177,7 +175,7 @@ void TimeStretch::AutoCorrelation() { |
} |
bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy, |
- int peak_index, int scaling) const { |
+ size_t peak_index, int scaling) const { |
// Check if the signal seems to be active speech or not (simple VAD). |
// If (vec1_energy + vec2_energy) / (2 * peak_index) <= |
// 8 * background_noise_energy, then we say that the signal contains no |
@@ -197,7 +195,7 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy, |
int right_scale = 16 - WebRtcSpl_NormW32(right_side); |
right_scale = std::max(0, right_scale); |
left_side = left_side >> right_scale; |
- right_side = peak_index * (right_side >> right_scale); |
+ right_side = static_cast<int32_t>(peak_index) * (right_side >> right_scale); |
hlundin-webrtc
2015/08/10 11:30:02
rtc::checked_cast
Peter Kasting
2015/08/17 22:49:47
While today this shouldn't be necessary as |peak_i
hlundin-webrtc
2015/08/18 07:19:18
Thanks. Future changes is what worries me sometime
|
// Scale |left_side| properly before comparing with |right_side|. |
// (|scaling| is the scale factor before energy calculation, thus the scale |