Index: webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
index 667ed2aafcae3a7b69c188bf1815a560984225bf..f186a7af100cd791c82542eb82cda99f33089750 100644 |
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc |
@@ -203,10 +203,10 @@ int16_t MaxAudioFrame(const AudioFrame& frame) { |
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) |
void TestStats(const AudioProcessing::Statistic& test, |
const audioproc::Test::Statistic& reference) { |
- EXPECT_NEAR(reference.instant(), test.instant, 1); |
peah-webrtc
2016/02/22 09:04:22
I think it would be better if you could change the
|
- EXPECT_EQ(reference.average(), test.average); |
- EXPECT_EQ(reference.maximum(), test.maximum); |
- EXPECT_NEAR(reference.minimum(), test.minimum, 1); |
+ EXPECT_NEAR(reference.instant(), test.instant, 2); |
+ EXPECT_NEAR(reference.average(), test.average, 2); |
+ EXPECT_NEAR(reference.maximum(), test.maximum, 2); |
+ EXPECT_NEAR(reference.minimum(), test.minimum, 2); |
} |
void WriteStatsMessage(const AudioProcessing::Statistic& output, |