| Index: webrtc/common_audio/vad/vad_sp_unittest.cc
|
| diff --git a/webrtc/common_audio/vad/vad_sp_unittest.cc b/webrtc/common_audio/vad/vad_sp_unittest.cc
|
| index 9127bf329901faae94941a3a18ec03bb83f8f18f..6d5e2a646b636d7085a346a3eabd28012babb9bf 100644
|
| --- a/webrtc/common_audio/vad/vad_sp_unittest.cc
|
| +++ b/webrtc/common_audio/vad/vad_sp_unittest.cc
|
| @@ -23,7 +23,7 @@ namespace {
|
|
|
| TEST_F(VadTest, vad_sp) {
|
| VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
|
| - const int kMaxFrameLenSp = 960; // Maximum frame length in this unittest.
|
| + const size_t kMaxFrameLenSp = 960; // Maximum frame length in this unittest.
|
| int16_t zeros[kMaxFrameLenSp] = { 0 };
|
| int32_t state[2] = { 0 };
|
| int16_t data_in[kMaxFrameLenSp];
|
| @@ -40,14 +40,14 @@ TEST_F(VadTest, vad_sp) {
|
|
|
| // Construct a speech signal that will trigger the VAD in all modes. It is
|
| // known that (i * i) will wrap around, but that doesn't matter in this case.
|
| - for (int16_t i = 0; i < kMaxFrameLenSp; ++i) {
|
| + for (size_t i = 0; i < kMaxFrameLenSp; ++i) {
|
| data_in[i] = static_cast<int16_t>(i * i);
|
| }
|
| // Input values all zeros, expect all zeros out.
|
| WebRtcVad_Downsampling(zeros, data_out, state, kMaxFrameLenSp);
|
| EXPECT_EQ(0, state[0]);
|
| EXPECT_EQ(0, state[1]);
|
| - for (int16_t i = 0; i < kMaxFrameLenSp / 2; ++i) {
|
| + for (size_t i = 0; i < kMaxFrameLenSp / 2; ++i) {
|
| EXPECT_EQ(0, data_out[i]);
|
| }
|
| // Make a simple non-zero data test.
|
|
|