OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
197 for (size_t i = 1; i < length; i++) { | 197 for (size_t i = 1; i < length; i++) { |
198 max_data = std::max(max_data, AbsValue(frame.data_[i])); | 198 max_data = std::max(max_data, AbsValue(frame.data_[i])); |
199 } | 199 } |
200 | 200 |
201 return max_data; | 201 return max_data; |
202 } | 202 } |
203 | 203 |
204 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | 204 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) |
205 void TestStats(const AudioProcessing::Statistic& test, | 205 void TestStats(const AudioProcessing::Statistic& test, |
206 const audioproc::Test::Statistic& reference) { | 206 const audioproc::Test::Statistic& reference) { |
207 EXPECT_NEAR(reference.instant(), test.instant, 2); | 207 ASSERT_EQ(reference.instant(), test.instant); |
peah-webrtc
2016/05/20 12:21:23
Why not use EXPECT_EQ? Does it make sense to conti
minyue-webrtc
2016/05/23 03:19:45
It was EXPECT_EQ because the test checked the valu
peah-webrtc
2016/05/23 05:11:53
I don't agree with that. Please see the related co
| |
208 EXPECT_NEAR(reference.average(), test.average, 2); | 208 ASSERT_EQ(reference.average(), test.average); |
209 EXPECT_NEAR(reference.maximum(), test.maximum, 3); | 209 ASSERT_EQ(reference.maximum(), test.maximum); |
210 EXPECT_NEAR(reference.minimum(), test.minimum, 2); | 210 ASSERT_EQ(reference.minimum(), test.minimum); |
211 } | 211 } |
212 | 212 |
213 void WriteStatsMessage(const AudioProcessing::Statistic& output, | 213 void WriteStatsMessage(const AudioProcessing::Statistic& output, |
214 audioproc::Test::Statistic* msg) { | 214 audioproc::Test::Statistic* msg) { |
215 msg->set_instant(output.instant); | 215 msg->set_instant(output.instant); |
216 msg->set_average(output.average); | 216 msg->set_average(output.average); |
217 msg->set_maximum(output.maximum); | 217 msg->set_maximum(output.maximum); |
218 msg->set_minimum(output.minimum); | 218 msg->set_minimum(output.minimum); |
219 } | 219 } |
220 #endif | 220 #endif |
221 | 221 |
222 void OpenFileAndWriteMessage(const std::string filename, | 222 void OpenFileAndWriteMessage(const std::string filename, |
223 const ::google::protobuf::MessageLite& msg) { | 223 const ::google::protobuf::MessageLite& msg) { |
224 #if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID) | |
225 FILE* file = fopen(filename.c_str(), "wb"); | 224 FILE* file = fopen(filename.c_str(), "wb"); |
226 ASSERT_TRUE(file != NULL); | 225 ASSERT_TRUE(file != NULL); |
227 | 226 |
228 int32_t size = msg.ByteSize(); | 227 int32_t size = msg.ByteSize(); |
229 ASSERT_GT(size, 0); | 228 ASSERT_GT(size, 0); |
230 std::unique_ptr<uint8_t[]> array(new uint8_t[size]); | 229 std::unique_ptr<uint8_t[]> array(new uint8_t[size]); |
231 ASSERT_TRUE(msg.SerializeToArray(array.get(), size)); | 230 ASSERT_TRUE(msg.SerializeToArray(array.get(), size)); |
232 | 231 |
233 ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file)); | 232 ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file)); |
234 ASSERT_EQ(static_cast<size_t>(size), | 233 ASSERT_EQ(static_cast<size_t>(size), |
235 fwrite(array.get(), sizeof(array[0]), size, file)); | 234 fwrite(array.get(), sizeof(array[0]), size, file)); |
236 fclose(file); | 235 fclose(file); |
237 #else | |
238 std::cout << "Warning: Writing new reference is only allowed on Linux!" | |
239 << std::endl; | |
240 #endif | |
241 } | 236 } |
242 | 237 |
243 std::string ResourceFilePath(std::string name, int sample_rate_hz) { | 238 std::string ResourceFilePath(std::string name, int sample_rate_hz) { |
244 std::ostringstream ss; | 239 std::ostringstream ss; |
245 // Resource files are all stereo. | 240 // Resource files are all stereo. |
246 ss << name << sample_rate_hz / 1000 << "_stereo"; | 241 ss << name << sample_rate_hz / 1000 << "_stereo"; |
247 return test::ResourcePath(ss.str(), "pcm"); | 242 return test::ResourcePath(ss.str(), "pcm"); |
248 } | 243 } |
249 | 244 |
250 // Temporary filenames unique to this process. Used to be able to run these | 245 // Temporary filenames unique to this process. Used to be able to run these |
(...skipping 1843 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2094 true); | 2089 true); |
2095 | 2090 |
2096 int frame_count = 0; | 2091 int frame_count = 0; |
2097 int has_echo_count = 0; | 2092 int has_echo_count = 0; |
2098 int has_voice_count = 0; | 2093 int has_voice_count = 0; |
2099 int is_saturated_count = 0; | 2094 int is_saturated_count = 0; |
2100 int analog_level = 127; | 2095 int analog_level = 127; |
2101 int analog_level_average = 0; | 2096 int analog_level_average = 0; |
2102 int max_output_average = 0; | 2097 int max_output_average = 0; |
2103 float ns_speech_prob_average = 0.0f; | 2098 float ns_speech_prob_average = 0.0f; |
2099 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | |
2100 int stats_index = 0; | |
2101 #endif | |
2104 | 2102 |
2105 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { | 2103 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { |
2106 EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_)); | 2104 EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_)); |
2107 | 2105 |
2108 frame_->vad_activity_ = AudioFrame::kVadUnknown; | 2106 frame_->vad_activity_ = AudioFrame::kVadUnknown; |
2109 | 2107 |
2110 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); | 2108 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); |
2111 apm_->echo_cancellation()->set_stream_drift_samples(0); | 2109 apm_->echo_cancellation()->set_stream_drift_samples(0); |
2112 EXPECT_EQ(apm_->kNoError, | 2110 EXPECT_EQ(apm_->kNoError, |
2113 apm_->gain_control()->set_stream_analog_level(analog_level)); | 2111 apm_->gain_control()->set_stream_analog_level(analog_level)); |
(...skipping 27 matching lines...) Expand all Loading... | |
2141 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_; | 2139 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_; |
2142 size_t write_count = fwrite(frame_->data_, | 2140 size_t write_count = fwrite(frame_->data_, |
2143 sizeof(int16_t), | 2141 sizeof(int16_t), |
2144 frame_size, | 2142 frame_size, |
2145 out_file_); | 2143 out_file_); |
2146 ASSERT_EQ(frame_size, write_count); | 2144 ASSERT_EQ(frame_size, write_count); |
2147 | 2145 |
2148 // Reset in case of downmixing. | 2146 // Reset in case of downmixing. |
2149 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); | 2147 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); |
2150 frame_count++; | 2148 frame_count++; |
2149 | |
2150 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | |
2151 const int kStatsAggregationFrameNum = 100; // 1 second. | |
2152 if (frame_count % kStatsAggregationFrameNum == 0) { | |
2153 // Get echo metrics. | |
2154 EchoCancellation::Metrics echo_metrics; | |
2155 EXPECT_EQ(apm_->kNoError, | |
2156 apm_->echo_cancellation()->GetMetrics(&echo_metrics)); | |
2157 | |
2158 // Get delay metrics. | |
2159 int median = 0; | |
2160 int std = 0; | |
2161 float fraction_poor_delays = 0; | |
2162 EXPECT_EQ(apm_->kNoError, | |
2163 apm_->echo_cancellation()->GetDelayMetrics( | |
2164 &median, &std, &fraction_poor_delays)); | |
2165 | |
2166 // Get RMS. | |
2167 int rms_level = apm_->level_estimator()->RMS(); | |
2168 EXPECT_LE(0, rms_level); | |
2169 EXPECT_GE(127, rms_level); | |
2170 | |
2171 if (!write_ref_data) { | |
2172 const audioproc::Test::EchoMetrics& reference = | |
2173 test->echo_metrics(stats_index); | |
2174 TestStats(echo_metrics.residual_echo_return_loss, | |
2175 reference.residual_echo_return_loss()); | |
2176 TestStats(echo_metrics.echo_return_loss, | |
2177 reference.echo_return_loss()); | |
2178 TestStats(echo_metrics.echo_return_loss_enhancement, | |
2179 reference.echo_return_loss_enhancement()); | |
2180 TestStats(echo_metrics.a_nlp, | |
2181 reference.a_nlp()); | |
2182 ASSERT_EQ(echo_metrics.divergent_filter_fraction, | |
2183 reference.divergent_filter_fraction()); | |
2184 | |
2185 const audioproc::Test::DelayMetrics& reference_delay = | |
2186 test->delay_metrics(stats_index); | |
2187 ASSERT_EQ(reference_delay.median(), median); | |
peah-webrtc
2016/05/20 12:21:23
Why have you changed this to ASSERT_EQ? That does
minyue-webrtc
2016/05/23 03:19:45
I think if it fails one time, no benefits can be o
peah-webrtc
2016/05/23 05:11:53
That depends on how it is to be used. If it is to
| |
2188 ASSERT_EQ(reference_delay.std(), std); | |
2189 ASSERT_EQ(reference_delay.fraction_poor_delays(), | |
2190 fraction_poor_delays); | |
2191 | |
2192 ASSERT_EQ(test->rms_level(stats_index), rms_level); | |
2193 | |
2194 ++stats_index; | |
2195 } else { | |
2196 audioproc::Test::EchoMetrics* message = | |
2197 test->add_echo_metrics(); | |
2198 WriteStatsMessage(echo_metrics.residual_echo_return_loss, | |
2199 message->mutable_residual_echo_return_loss()); | |
2200 WriteStatsMessage(echo_metrics.echo_return_loss, | |
2201 message->mutable_echo_return_loss()); | |
2202 WriteStatsMessage(echo_metrics.echo_return_loss_enhancement, | |
2203 message->mutable_echo_return_loss_enhancement()); | |
2204 WriteStatsMessage(echo_metrics.a_nlp, | |
2205 message->mutable_a_nlp()); | |
2206 message->set_divergent_filter_fraction( | |
2207 echo_metrics.divergent_filter_fraction); | |
2208 | |
2209 audioproc::Test::DelayMetrics* message_delay = | |
2210 test->add_delay_metrics(); | |
2211 message_delay->set_median(median); | |
2212 message_delay->set_std(std); | |
2213 message_delay->set_fraction_poor_delays(fraction_poor_delays); | |
2214 | |
2215 test->add_rms_level(rms_level); | |
2216 } | |
2217 } | |
2218 #endif // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE). | |
2151 } | 2219 } |
2152 max_output_average /= frame_count; | 2220 max_output_average /= frame_count; |
2153 analog_level_average /= frame_count; | 2221 analog_level_average /= frame_count; |
2154 ns_speech_prob_average /= frame_count; | 2222 ns_speech_prob_average /= frame_count; |
2155 | 2223 |
2156 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | |
2157 EchoCancellation::Metrics echo_metrics; | |
2158 EXPECT_EQ(apm_->kNoError, | |
2159 apm_->echo_cancellation()->GetMetrics(&echo_metrics)); | |
2160 int median = 0; | |
2161 int std = 0; | |
2162 float fraction_poor_delays = 0; | |
2163 EXPECT_EQ(apm_->kNoError, | |
2164 apm_->echo_cancellation()->GetDelayMetrics( | |
2165 &median, &std, &fraction_poor_delays)); | |
2166 | |
2167 int rms_level = apm_->level_estimator()->RMS(); | |
2168 EXPECT_LE(0, rms_level); | |
2169 EXPECT_GE(127, rms_level); | |
2170 #endif | |
2171 | |
2172 if (!write_ref_data) { | 2224 if (!write_ref_data) { |
2173 const int kIntNear = 1; | 2225 const int kIntNear = 1; |
2174 // When running the test on a N7 we get a {2, 6} difference of | 2226 // When running the test on a N7 we get a {2, 6} difference of |
2175 // |has_voice_count| and |max_output_average| is up to 18 higher. | 2227 // |has_voice_count| and |max_output_average| is up to 18 higher. |
2176 // All numbers being consistently higher on N7 compare to ref_data. | 2228 // All numbers being consistently higher on N7 compare to ref_data. |
2177 // TODO(bjornv): If we start getting more of these offsets on Android we | 2229 // TODO(bjornv): If we start getting more of these offsets on Android we |
2178 // should consider a different approach. Either using one slack for all, | 2230 // should consider a different approach. Either using one slack for all, |
2179 // or generate a separate android reference. | 2231 // or generate a separate android reference. |
2180 #if defined(WEBRTC_ANDROID) | 2232 #if defined(WEBRTC_ANDROID) |
2181 const int kHasVoiceCountOffset = 3; | 2233 const int kHasVoiceCountOffset = 3; |
2182 const int kHasVoiceCountNear = 4; | 2234 const int kHasVoiceCountNear = 4; |
2183 const int kMaxOutputAverageOffset = 9; | 2235 const int kMaxOutputAverageOffset = 9; |
2184 const int kMaxOutputAverageNear = 9; | 2236 const int kMaxOutputAverageNear = 9; |
2185 #else | 2237 #else |
2186 const int kHasVoiceCountOffset = 0; | 2238 const int kHasVoiceCountOffset = 0; |
2187 const int kHasVoiceCountNear = kIntNear; | 2239 const int kHasVoiceCountNear = kIntNear; |
2188 const int kMaxOutputAverageOffset = 0; | 2240 const int kMaxOutputAverageOffset = 0; |
2189 const int kMaxOutputAverageNear = kIntNear; | 2241 const int kMaxOutputAverageNear = kIntNear; |
2190 #endif | 2242 #endif |
2191 EXPECT_NEAR(test->has_echo_count(), has_echo_count, kIntNear); | 2243 EXPECT_NEAR(test->has_echo_count(), has_echo_count, kIntNear); |
2192 EXPECT_NEAR(test->has_voice_count(), | 2244 EXPECT_NEAR(test->has_voice_count(), |
2193 has_voice_count - kHasVoiceCountOffset, | 2245 has_voice_count - kHasVoiceCountOffset, |
2194 kHasVoiceCountNear); | 2246 kHasVoiceCountNear); |
2195 EXPECT_NEAR(test->is_saturated_count(), is_saturated_count, kIntNear); | 2247 EXPECT_NEAR(test->is_saturated_count(), is_saturated_count, kIntNear); |
2196 | 2248 |
2197 EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear); | 2249 EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear); |
2198 EXPECT_NEAR(test->max_output_average(), | 2250 EXPECT_NEAR(test->max_output_average(), |
2199 max_output_average - kMaxOutputAverageOffset, | 2251 max_output_average - kMaxOutputAverageOffset, |
2200 kMaxOutputAverageNear); | 2252 kMaxOutputAverageNear); |
2201 | |
2202 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | 2253 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) |
peah-webrtc
2016/05/20 12:21:23
Why were only parts of the stats moved to test for
minyue-webrtc
2016/05/23 03:19:45
Because has_echo_count, has_voice_count, analog_le
peah-webrtc
2016/05/23 05:13:09
Acknowledged.
| |
2203 audioproc::Test::EchoMetrics reference = test->echo_metrics(); | |
2204 TestStats(echo_metrics.residual_echo_return_loss, | |
2205 reference.residual_echo_return_loss()); | |
2206 TestStats(echo_metrics.echo_return_loss, | |
2207 reference.echo_return_loss()); | |
2208 TestStats(echo_metrics.echo_return_loss_enhancement, | |
2209 reference.echo_return_loss_enhancement()); | |
2210 TestStats(echo_metrics.a_nlp, | |
2211 reference.a_nlp()); | |
2212 | |
2213 const double kFloatNear = 0.0005; | 2254 const double kFloatNear = 0.0005; |
2214 audioproc::Test::DelayMetrics reference_delay = test->delay_metrics(); | |
2215 EXPECT_NEAR(reference_delay.median(), median, kIntNear); | |
2216 EXPECT_NEAR(reference_delay.std(), std, kIntNear); | |
2217 EXPECT_NEAR(reference_delay.fraction_poor_delays(), fraction_poor_delays, | |
2218 kFloatNear); | |
2219 | |
2220 EXPECT_NEAR(test->rms_level(), rms_level, kIntNear); | |
2221 | |
2222 EXPECT_NEAR(test->ns_speech_probability_average(), | 2255 EXPECT_NEAR(test->ns_speech_probability_average(), |
2223 ns_speech_prob_average, | 2256 ns_speech_prob_average, |
2224 kFloatNear); | 2257 kFloatNear); |
2225 #endif | 2258 #endif |
2226 } else { | 2259 } else { |
2227 test->set_has_echo_count(has_echo_count); | 2260 test->set_has_echo_count(has_echo_count); |
2228 test->set_has_voice_count(has_voice_count); | 2261 test->set_has_voice_count(has_voice_count); |
2229 test->set_is_saturated_count(is_saturated_count); | 2262 test->set_is_saturated_count(is_saturated_count); |
2230 | 2263 |
2231 test->set_analog_level_average(analog_level_average); | 2264 test->set_analog_level_average(analog_level_average); |
2232 test->set_max_output_average(max_output_average); | 2265 test->set_max_output_average(max_output_average); |
2233 | 2266 |
2234 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | 2267 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) |
2235 audioproc::Test::EchoMetrics* message = test->mutable_echo_metrics(); | |
2236 WriteStatsMessage(echo_metrics.residual_echo_return_loss, | |
2237 message->mutable_residual_echo_return_loss()); | |
2238 WriteStatsMessage(echo_metrics.echo_return_loss, | |
2239 message->mutable_echo_return_loss()); | |
2240 WriteStatsMessage(echo_metrics.echo_return_loss_enhancement, | |
2241 message->mutable_echo_return_loss_enhancement()); | |
2242 WriteStatsMessage(echo_metrics.a_nlp, | |
2243 message->mutable_a_nlp()); | |
2244 | |
2245 audioproc::Test::DelayMetrics* message_delay = | |
2246 test->mutable_delay_metrics(); | |
2247 message_delay->set_median(median); | |
2248 message_delay->set_std(std); | |
2249 message_delay->set_fraction_poor_delays(fraction_poor_delays); | |
2250 | |
2251 test->set_rms_level(rms_level); | |
2252 | |
2253 EXPECT_LE(0.0f, ns_speech_prob_average); | 2268 EXPECT_LE(0.0f, ns_speech_prob_average); |
2254 EXPECT_GE(1.0f, ns_speech_prob_average); | 2269 EXPECT_GE(1.0f, ns_speech_prob_average); |
2255 test->set_ns_speech_probability_average(ns_speech_prob_average); | 2270 test->set_ns_speech_probability_average(ns_speech_prob_average); |
2256 #endif | 2271 #endif |
2257 } | 2272 } |
2258 | 2273 |
2259 rewind(far_file_); | 2274 rewind(far_file_); |
2260 rewind(near_file_); | 2275 rewind(near_file_); |
2261 } | 2276 } |
2262 | 2277 |
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2752 std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35), | 2767 std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35), |
2753 std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0), | 2768 std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0), |
2754 std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20), | 2769 std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20), |
2755 std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20), | 2770 std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20), |
2756 std::tr1::make_tuple(16000, 16000, 32000, 16000, 35, 20), | 2771 std::tr1::make_tuple(16000, 16000, 32000, 16000, 35, 20), |
2757 std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0))); | 2772 std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0))); |
2758 #endif | 2773 #endif |
2759 | 2774 |
2760 } // namespace | 2775 } // namespace |
2761 } // namespace webrtc | 2776 } // namespace webrtc |
OLD | NEW |