Index: webrtc/voice_engine/test/auto_test/standard/audio_processing_test.cc |
diff --git a/webrtc/voice_engine/test/auto_test/standard/audio_processing_test.cc b/webrtc/voice_engine/test/auto_test/standard/audio_processing_test.cc |
index 4aab3639b94bcf2968d282b7ec15631a471942d2..b9e9ff7ff166f5e8d1d7b5f65572d6458cfced1b 100644 |
--- a/webrtc/voice_engine/test/auto_test/standard/audio_processing_test.cc |
+++ b/webrtc/voice_engine/test/auto_test/standard/audio_processing_test.cc |
@@ -66,37 +66,6 @@ class AudioProcessingTest : public AfterStreamingFixture { |
EXPECT_TRUE(ns_status); |
EXPECT_EQ(expected_ns_mode, ns_mode); |
} |
- |
- void TryDetectingSilence() { |
- // Here, speech is running. Shut down speech. |
- EXPECT_EQ(0, voe_codec_->SetVADStatus(channel_, true)); |
- EXPECT_EQ(0, voe_volume_control_->SetInputMute(channel_, true)); |
- EXPECT_EQ(0, voe_file_->StopPlayingFileAsMicrophone(channel_)); |
- |
- // We should detect the silence after a short time. |
- Sleep(50); |
- for (int i = 0; i < 25; i++) { |
- EXPECT_EQ(0, voe_apm_->VoiceActivityIndicator(channel_)); |
- Sleep(10); |
- } |
- } |
- |
- void TryDetectingSpeechAfterSilence() { |
- // Re-enable speech. |
- RestartFakeMicrophone(); |
- EXPECT_EQ(0, voe_codec_->SetVADStatus(channel_, false)); |
- EXPECT_EQ(0, voe_volume_control_->SetInputMute(channel_, false)); |
- |
- // We should detect the speech after a short time. |
- for (int i = 0; i < 50; i++) { |
- if (voe_apm_->VoiceActivityIndicator(channel_) == 1) { |
- return; |
- } |
- Sleep(10); |
- } |
- |
- ADD_FAILURE() << "Failed to detect speech within 500 ms."; |
- } |
}; |
#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID) |
@@ -328,10 +297,4 @@ TEST_F(AudioProcessingTest, EcIsDisabledAndAecmIsDefaultEcMode) { |
EXPECT_EQ(webrtc::kEcAecm, ec_mode); |
} |
-// Not needed anymore - API is unused. |
-TEST_F(AudioProcessingTest, TestVoiceActivityDetection) { |
- TryDetectingSilence(); |
- TryDetectingSpeechAfterSilence(); |
-} |
- |
#endif // WEBRTC_IOS || WEBRTC_ANDROID |