Index: webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
index b9bae205c1afefd384d51e8bd96a30529326d6ca..b2e2e2a8d4aedd9612925dc456149a51de4b8a9b 100644 |
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
@@ -14,6 +14,7 @@ |
#include <limits> |
#include <queue> |
+#include "webrtc/base/arraysize.h" |
#include "webrtc/base/scoped_ptr.h" |
#include "webrtc/common_audio/include/audio_util.h" |
#include "webrtc/common_audio/resampler/include/push_resampler.h" |
@@ -47,11 +48,8 @@ namespace { |
// file. This is the typical case. When the file should be updated, it can |
// be set to true with the command-line switch --write_ref_data. |
bool write_ref_data = false; |
-const int kChannels[] = {1, 2}; |
-const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels); |
- |
+const google::protobuf::int32 kChannels[] = {1, 2}; |
const int kSampleRates[] = {8000, 16000, 32000, 48000}; |
-const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates); |
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) |
// AECM doesn't support super-wb. |
@@ -59,8 +57,6 @@ const int kProcessSampleRates[] = {8000, 16000}; |
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) |
const int kProcessSampleRates[] = {8000, 16000, 32000, 48000}; |
#endif |
-const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) / |
- sizeof(*kProcessSampleRates); |
void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) { |
ChannelBuffer<int16_t> cb_int(cb->num_frames(), |
@@ -69,7 +65,7 @@ void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) { |
cb->num_frames(), |
cb->num_channels(), |
cb_int.channels()); |
- for (int i = 0; i < cb->num_channels(); ++i) { |
+ for (size_t i = 0; i < cb->num_channels(); ++i) { |
S16ToFloat(cb_int.channels()[i], |
cb->num_frames(), |
cb->channels()[i]); |
@@ -81,7 +77,7 @@ void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) { |
} |
// Number of channels including the keyboard channel. |
-int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { |
+size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { |
switch (layout) { |
case AudioProcessing::kMono: |
return 1; |
@@ -92,7 +88,7 @@ int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { |
return 3; |
} |
assert(false); |
- return -1; |
+ return 0; |
} |
int TruncateToMultipleOf10(int value) { |
@@ -100,25 +96,25 @@ int TruncateToMultipleOf10(int value) { |
} |
void MixStereoToMono(const float* stereo, float* mono, |
- int samples_per_channel) { |
- for (int i = 0; i < samples_per_channel; ++i) |
+ size_t samples_per_channel) { |
+ for (size_t i = 0; i < samples_per_channel; ++i) |
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2; |
} |
void MixStereoToMono(const int16_t* stereo, int16_t* mono, |
- int samples_per_channel) { |
- for (int i = 0; i < samples_per_channel; ++i) |
+ size_t samples_per_channel) { |
+ for (size_t i = 0; i < samples_per_channel; ++i) |
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1; |
} |
-void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) { |
- for (int i = 0; i < samples_per_channel; i++) { |
+void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) { |
+ for (size_t i = 0; i < samples_per_channel; i++) { |
stereo[i * 2 + 1] = stereo[i * 2]; |
} |
} |
-void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) { |
- for (int i = 0; i < samples_per_channel; i++) { |
+void VerifyChannelsAreEqual(int16_t* stereo, size_t samples_per_channel) { |
+ for (size_t i = 0; i < samples_per_channel; i++) { |
EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]); |
} |
} |
@@ -191,9 +187,9 @@ T AbsValue(T a) { |
} |
int16_t MaxAudioFrame(const AudioFrame& frame) { |
- const int length = frame.samples_per_channel_ * frame.num_channels_; |
+ const size_t length = frame.samples_per_channel_ * frame.num_channels_; |
int16_t max_data = AbsValue(frame.data_[0]); |
- for (int i = 1; i < length; i++) { |
+ for (size_t i = 1; i < length; i++) { |
max_data = std::max(max_data, AbsValue(frame.data_[i])); |
} |
@@ -255,9 +251,9 @@ std::string OutputFilePath(std::string name, |
int input_rate, |
int output_rate, |
int reverse_rate, |
- int num_input_channels, |
- int num_output_channels, |
- int num_reverse_channels) { |
+ size_t num_input_channels, |
+ size_t num_output_channels, |
+ size_t num_reverse_channels) { |
std::ostringstream ss; |
ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 |
<< "_r" << num_reverse_channels << "_" << reverse_rate / 1000 << "_"; |
@@ -342,9 +338,9 @@ class ApmTest : public ::testing::Test { |
void Init(int sample_rate_hz, |
int output_sample_rate_hz, |
int reverse_sample_rate_hz, |
- int num_reverse_channels, |
- int num_input_channels, |
- int num_output_channels, |
+ size_t num_reverse_channels, |
+ size_t num_input_channels, |
+ size_t num_output_channels, |
bool open_output_file); |
void Init(AudioProcessing* ap); |
void EnableAllComponents(); |
@@ -357,12 +353,12 @@ class ApmTest : public ::testing::Test { |
void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, |
int delay_min, int delay_max); |
void TestChangingChannelsInt16Interface( |
- int num_channels, |
+ size_t num_channels, |
AudioProcessing::Error expected_return); |
- void TestChangingForwardChannels(int num_in_channels, |
- int num_out_channels, |
+ void TestChangingForwardChannels(size_t num_in_channels, |
+ size_t num_out_channels, |
AudioProcessing::Error expected_return); |
- void TestChangingReverseChannels(int num_rev_channels, |
+ void TestChangingReverseChannels(size_t num_rev_channels, |
AudioProcessing::Error expected_return); |
void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate); |
void RunManualVolumeChangeIsPossibleTest(int sample_rate); |
@@ -383,7 +379,7 @@ class ApmTest : public ::testing::Test { |
rtc::scoped_ptr<ChannelBuffer<float> > float_cb_; |
rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_; |
int output_sample_rate_hz_; |
- int num_output_channels_; |
+ size_t num_output_channels_; |
FILE* far_file_; |
FILE* near_file_; |
FILE* out_file_; |
@@ -466,9 +462,9 @@ void ApmTest::Init(AudioProcessing* ap) { |
void ApmTest::Init(int sample_rate_hz, |
int output_sample_rate_hz, |
int reverse_sample_rate_hz, |
- int num_input_channels, |
- int num_output_channels, |
- int num_reverse_channels, |
+ size_t num_input_channels, |
+ size_t num_output_channels, |
+ size_t num_reverse_channels, |
bool open_output_file) { |
SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_); |
output_sample_rate_hz_ = output_sample_rate_hz; |
@@ -803,7 +799,7 @@ TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) { |
} |
void ApmTest::TestChangingChannelsInt16Interface( |
- int num_channels, |
+ size_t num_channels, |
AudioProcessing::Error expected_return) { |
frame_->num_channels_ = num_channels; |
EXPECT_EQ(expected_return, apm_->ProcessStream(frame_)); |
@@ -811,8 +807,8 @@ void ApmTest::TestChangingChannelsInt16Interface( |
} |
void ApmTest::TestChangingForwardChannels( |
- int num_in_channels, |
- int num_out_channels, |
+ size_t num_in_channels, |
+ size_t num_out_channels, |
AudioProcessing::Error expected_return) { |
const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels}; |
const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels}; |
@@ -823,7 +819,7 @@ void ApmTest::TestChangingForwardChannels( |
} |
void ApmTest::TestChangingReverseChannels( |
- int num_rev_channels, |
+ size_t num_rev_channels, |
AudioProcessing::Error expected_return) { |
const ProcessingConfig processing_config = { |
{{ frame_->sample_rate_hz_, apm_->num_input_channels() }, |
@@ -841,7 +837,7 @@ TEST_F(ApmTest, ChannelsInt16Interface) { |
TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError); |
- for (int i = 1; i < 4; i++) { |
+ for (size_t i = 1; i < 4; i++) { |
TestChangingChannelsInt16Interface(i, kNoErr); |
EXPECT_EQ(i, apm_->num_input_channels()); |
// We always force the number of reverse channels used for processing to 1. |
@@ -856,8 +852,8 @@ TEST_F(ApmTest, Channels) { |
TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError); |
TestChangingReverseChannels(0, apm_->kBadNumberChannelsError); |
- for (int i = 1; i < 4; ++i) { |
- for (int j = 0; j < 1; ++j) { |
+ for (size_t i = 1; i < 4; ++i) { |
+ for (size_t j = 0; j < 1; ++j) { |
// Output channels much be one or match input channels. |
if (j == 1 || i == j) { |
TestChangingForwardChannels(i, j, kNoErr); |
@@ -881,7 +877,7 @@ TEST_F(ApmTest, SampleRatesInt) { |
EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat)); |
// Testing valid sample rates |
int fs[] = {8000, 16000, 32000, 48000}; |
- for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) { |
+ for (size_t i = 0; i < arraysize(fs); i++) { |
SetContainerFormat(fs[i], 2, frame_, &float_cb_); |
EXPECT_NOERR(ProcessStreamChooser(kIntFormat)); |
EXPECT_EQ(fs[i], apm_->input_sample_rate_hz()); |
@@ -901,7 +897,7 @@ TEST_F(ApmTest, EchoCancellation) { |
EchoCancellation::kModerateSuppression, |
EchoCancellation::kHighSuppression, |
}; |
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) { |
+ for (size_t i = 0; i < arraysize(level); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->echo_cancellation()->set_suppression_level(level[i])); |
EXPECT_EQ(level[i], |
@@ -978,7 +974,7 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) { |
// Test a couple of corner cases and verify that the estimated delay is |
// within a valid region (set to +-1.5 blocks). Note that these cases are |
// sampling frequency dependent. |
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) { |
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) { |
Init(kProcessSampleRates[i], |
kProcessSampleRates[i], |
kProcessSampleRates[i], |
@@ -1050,7 +1046,7 @@ TEST_F(ApmTest, EchoControlMobile) { |
EchoControlMobile::kSpeakerphone, |
EchoControlMobile::kLoudSpeakerphone, |
}; |
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) { |
+ for (size_t i = 0; i < arraysize(mode); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->echo_control_mobile()->set_routing_mode(mode[i])); |
EXPECT_EQ(mode[i], |
@@ -1115,7 +1111,7 @@ TEST_F(ApmTest, GainControl) { |
GainControl::kAdaptiveDigital, |
GainControl::kFixedDigital |
}; |
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) { |
+ for (size_t i = 0; i < arraysize(mode); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->gain_control()->set_mode(mode[i])); |
EXPECT_EQ(mode[i], apm_->gain_control()->mode()); |
@@ -1131,7 +1127,7 @@ TEST_F(ApmTest, GainControl) { |
apm_->gain_control()->target_level_dbfs())); |
int level_dbfs[] = {0, 6, 31}; |
- for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) { |
+ for (size_t i = 0; i < arraysize(level_dbfs); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->gain_control()->set_target_level_dbfs(level_dbfs[i])); |
EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs()); |
@@ -1149,7 +1145,7 @@ TEST_F(ApmTest, GainControl) { |
apm_->gain_control()->compression_gain_db())); |
int gain_db[] = {0, 10, 90}; |
- for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) { |
+ for (size_t i = 0; i < arraysize(gain_db); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->gain_control()->set_compression_gain_db(gain_db[i])); |
EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db()); |
@@ -1180,14 +1176,14 @@ TEST_F(ApmTest, GainControl) { |
apm_->gain_control()->analog_level_maximum())); |
int min_level[] = {0, 255, 1024}; |
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) { |
+ for (size_t i = 0; i < arraysize(min_level); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->gain_control()->set_analog_level_limits(min_level[i], 1024)); |
EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum()); |
} |
int max_level[] = {0, 1024, 65535}; |
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) { |
+ for (size_t i = 0; i < arraysize(min_level); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->gain_control()->set_analog_level_limits(0, max_level[i])); |
EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum()); |
@@ -1226,7 +1222,7 @@ void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) { |
// Verifies that despite volume slider quantization, the AGC can continue to |
// increase its volume. |
TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) { |
- for (size_t i = 0; i < kSampleRatesSize; ++i) { |
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) { |
RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]); |
} |
} |
@@ -1271,7 +1267,7 @@ void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) { |
} |
TEST_F(ApmTest, ManualVolumeChangeIsPossible) { |
- for (size_t i = 0; i < kSampleRatesSize; ++i) { |
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) { |
RunManualVolumeChangeIsPossibleTest(kSampleRates[i]); |
} |
} |
@@ -1279,11 +1275,11 @@ TEST_F(ApmTest, ManualVolumeChangeIsPossible) { |
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) |
TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { |
const int kSampleRateHz = 16000; |
- const int kSamplesPerChannel = |
- AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000; |
- const int kNumInputChannels = 2; |
- const int kNumOutputChannels = 1; |
- const int kNumChunks = 700; |
+ const size_t kSamplesPerChannel = |
+ static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000); |
+ const size_t kNumInputChannels = 2; |
+ const size_t kNumOutputChannels = 1; |
+ const size_t kNumChunks = 700; |
const float kScaleFactor = 0.25f; |
Config config; |
std::vector<webrtc::Point> geometry; |
@@ -1297,8 +1293,8 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { |
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true)); |
ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels); |
ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels); |
- const int max_length = kSamplesPerChannel * std::max(kNumInputChannels, |
- kNumOutputChannels); |
+ const size_t max_length = kSamplesPerChannel * std::max(kNumInputChannels, |
+ kNumOutputChannels); |
rtc::scoped_ptr<int16_t[]> int_data(new int16_t[max_length]); |
rtc::scoped_ptr<float[]> float_data(new float[max_length]); |
std::string filename = ResourceFilePath("far", kSampleRateHz); |
@@ -1310,13 +1306,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { |
bool is_target = false; |
EXPECT_CALL(*beamformer, is_target_present()) |
.WillRepeatedly(testing::ReturnPointee(&is_target)); |
- for (int i = 0; i < kNumChunks; ++i) { |
+ for (size_t i = 0; i < kNumChunks; ++i) { |
ASSERT_TRUE(ReadChunk(far_file, |
int_data.get(), |
float_data.get(), |
&src_buf)); |
- for (int j = 0; j < kNumInputChannels; ++j) { |
- for (int k = 0; k < kSamplesPerChannel; ++k) { |
+ for (size_t j = 0; j < kNumInputChannels; ++j) { |
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) { |
src_buf.channels()[j][k] *= kScaleFactor; |
} |
} |
@@ -1335,7 +1331,7 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { |
apm->gain_control()->compression_gain_db()); |
rewind(far_file); |
is_target = true; |
- for (int i = 0; i < kNumChunks; ++i) { |
+ for (size_t i = 0; i < kNumChunks; ++i) { |
ASSERT_TRUE(ReadChunk(far_file, |
int_data.get(), |
float_data.get(), |
@@ -1370,7 +1366,7 @@ TEST_F(ApmTest, NoiseSuppression) { |
NoiseSuppression::kHigh, |
NoiseSuppression::kVeryHigh |
}; |
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) { |
+ for (size_t i = 0; i < arraysize(level); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->noise_suppression()->set_level(level[i])); |
EXPECT_EQ(level[i], apm_->noise_suppression()->level()); |
@@ -1472,7 +1468,7 @@ TEST_F(ApmTest, VoiceDetection) { |
VoiceDetection::kModerateLikelihood, |
VoiceDetection::kHighLikelihood |
}; |
- for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) { |
+ for (size_t i = 0; i < arraysize(likelihood); i++) { |
EXPECT_EQ(apm_->kNoError, |
apm_->voice_detection()->set_likelihood(likelihood[i])); |
EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood()); |
@@ -1504,7 +1500,7 @@ TEST_F(ApmTest, VoiceDetection) { |
AudioFrame::kVadPassive, |
AudioFrame::kVadUnknown |
}; |
- for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) { |
+ for (size_t i = 0; i < arraysize(activity); i++) { |
frame_->vad_activity_ = activity[i]; |
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); |
EXPECT_EQ(activity[i], frame_->vad_activity_); |
@@ -1530,7 +1526,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) { |
} |
TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) { |
- for (size_t i = 0; i < kSampleRatesSize; i++) { |
+ for (size_t i = 0; i < arraysize(kSampleRates); i++) { |
Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false); |
SetFrameTo(frame_, 1000, 2000); |
AudioFrame frame_copy; |
@@ -1567,7 +1563,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) { |
TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) { |
EnableAllComponents(); |
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) { |
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) { |
Init(kProcessSampleRates[i], |
kProcessSampleRates[i], |
kProcessSampleRates[i], |
@@ -1903,11 +1899,14 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) { |
if (test->num_input_channels() != test->num_output_channels()) |
continue; |
- const int num_render_channels = test->num_reverse_channels(); |
- const int num_input_channels = test->num_input_channels(); |
- const int num_output_channels = test->num_output_channels(); |
- const int samples_per_channel = test->sample_rate() * |
- AudioProcessing::kChunkSizeMs / 1000; |
+ const size_t num_render_channels = |
+ static_cast<size_t>(test->num_reverse_channels()); |
+ const size_t num_input_channels = |
+ static_cast<size_t>(test->num_input_channels()); |
+ const size_t num_output_channels = |
+ static_cast<size_t>(test->num_output_channels()); |
+ const size_t samples_per_channel = static_cast<size_t>( |
+ test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000); |
Init(test->sample_rate(), test->sample_rate(), test->sample_rate(), |
num_input_channels, num_output_channels, num_render_channels, true); |
@@ -1948,7 +1947,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) { |
test->sample_rate(), |
LayoutFromChannels(num_output_channels), |
float_cb_->channels())); |
- for (int j = 0; j < num_output_channels; ++j) { |
+ for (size_t j = 0; j < num_output_channels; ++j) { |
FloatToS16(float_cb_->channels()[j], |
samples_per_channel, |
output_cb.channels()[j]); |
@@ -1999,9 +1998,9 @@ TEST_F(ApmTest, Process) { |
OpenFileAndReadMessage(ref_filename_, &ref_data); |
} else { |
// Write the desired tests to the protobuf reference file. |
- for (size_t i = 0; i < kChannelsSize; i++) { |
- for (size_t j = 0; j < kChannelsSize; j++) { |
- for (size_t l = 0; l < kProcessSampleRatesSize; l++) { |
+ for (size_t i = 0; i < arraysize(kChannels); i++) { |
+ for (size_t j = 0; j < arraysize(kChannels); j++) { |
+ for (size_t l = 0; l < arraysize(kProcessSampleRates); l++) { |
audioproc::Test* test = ref_data.add_test(); |
test->set_num_reverse_channels(kChannels[i]); |
test->set_num_input_channels(kChannels[j]); |
@@ -2042,9 +2041,9 @@ TEST_F(ApmTest, Process) { |
Init(test->sample_rate(), |
test->sample_rate(), |
test->sample_rate(), |
- test->num_input_channels(), |
- test->num_output_channels(), |
- test->num_reverse_channels(), |
+ static_cast<size_t>(test->num_input_channels()), |
+ static_cast<size_t>(test->num_output_channels()), |
+ static_cast<size_t>(test->num_reverse_channels()), |
true); |
int frame_count = 0; |
@@ -2069,7 +2068,8 @@ TEST_F(ApmTest, Process) { |
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); |
// Ensure the frame was downmixed properly. |
- EXPECT_EQ(test->num_output_channels(), frame_->num_channels_); |
+ EXPECT_EQ(static_cast<size_t>(test->num_output_channels()), |
+ frame_->num_channels_); |
max_output_average += MaxAudioFrame(*frame_); |
@@ -2099,7 +2099,7 @@ TEST_F(ApmTest, Process) { |
ASSERT_EQ(frame_size, write_count); |
// Reset in case of downmixing. |
- frame_->num_channels_ = test->num_input_channels(); |
+ frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); |
frame_count++; |
} |
max_output_average /= frame_count; |
@@ -2228,12 +2228,11 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) { |
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono}, |
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo}, |
}; |
- size_t channel_format_size = sizeof(cf) / sizeof(*cf); |
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create()); |
// Enable one component just to ensure some processing takes place. |
ap->noise_suppression()->Enable(true); |
- for (size_t i = 0; i < channel_format_size; ++i) { |
+ for (size_t i = 0; i < arraysize(cf); ++i) { |
const int in_rate = 44100; |
const int out_rate = 48000; |
ChannelBuffer<float> in_cb(SamplesFromRate(in_rate), |
@@ -2321,14 +2320,10 @@ class AudioProcessingTest |
static void SetUpTestCase() { |
// Create all needed output reference files. |
const int kNativeRates[] = {8000, 16000, 32000, 48000}; |
- const size_t kNativeRatesSize = |
- sizeof(kNativeRates) / sizeof(*kNativeRates); |
- const int kNumChannels[] = {1, 2}; |
- const size_t kNumChannelsSize = |
- sizeof(kNumChannels) / sizeof(*kNumChannels); |
- for (size_t i = 0; i < kNativeRatesSize; ++i) { |
- for (size_t j = 0; j < kNumChannelsSize; ++j) { |
- for (size_t k = 0; k < kNumChannelsSize; ++k) { |
+ const size_t kNumChannels[] = {1, 2}; |
+ for (size_t i = 0; i < arraysize(kNativeRates); ++i) { |
+ for (size_t j = 0; j < arraysize(kNumChannels); ++j) { |
+ for (size_t k = 0; k < arraysize(kNumChannels); ++k) { |
// The reference files always have matching input and output channels. |
ProcessFormat(kNativeRates[i], |
kNativeRates[i], |
@@ -2350,9 +2345,9 @@ class AudioProcessingTest |
static void ProcessFormat(int input_rate, |
int output_rate, |
int reverse_rate, |
- int num_input_channels, |
- int num_output_channels, |
- int num_reverse_channels, |
+ size_t num_input_channels, |
+ size_t num_output_channels, |
+ size_t num_reverse_channels, |
std::string output_file_prefix) { |
Config config; |
config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
@@ -2450,9 +2445,8 @@ TEST_P(AudioProcessingTest, Formats) { |
{2, 2, 1}, |
{2, 2, 2}, |
}; |
- size_t channel_format_size = sizeof(cf) / sizeof(*cf); |
- for (size_t i = 0; i < channel_format_size; ++i) { |
+ for (size_t i = 0; i < arraysize(cf); ++i) { |
ProcessFormat(input_rate_, |
output_rate_, |
reverse_rate_, |