OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
62 | 62 |
63 enum StreamDirection { kForward = 0, kReverse }; | 63 enum StreamDirection { kForward = 0, kReverse }; |
64 | 64 |
65 void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) { | 65 void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) { |
66 ChannelBuffer<int16_t> cb_int(cb->num_frames(), | 66 ChannelBuffer<int16_t> cb_int(cb->num_frames(), |
67 cb->num_channels()); | 67 cb->num_channels()); |
68 Deinterleave(int_data, | 68 Deinterleave(int_data, |
69 cb->num_frames(), | 69 cb->num_frames(), |
70 cb->num_channels(), | 70 cb->num_channels(), |
71 cb_int.channels()); | 71 cb_int.channels()); |
72 for (int i = 0; i < cb->num_channels(); ++i) { | 72 for (size_t i = 0; i < cb->num_channels(); ++i) { |
73 S16ToFloat(cb_int.channels()[i], | 73 S16ToFloat(cb_int.channels()[i], |
74 cb->num_frames(), | 74 cb->num_frames(), |
75 cb->channels()[i]); | 75 cb->channels()[i]); |
76 } | 76 } |
77 } | 77 } |
78 | 78 |
79 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) { | 79 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) { |
80 ConvertToFloat(frame.data_, cb); | 80 ConvertToFloat(frame.data_, cb); |
81 } | 81 } |
82 | 82 |
83 // Number of channels including the keyboard channel. | 83 // Number of channels including the keyboard channel. |
84 int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { | 84 size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { |
85 switch (layout) { | 85 switch (layout) { |
86 case AudioProcessing::kMono: | 86 case AudioProcessing::kMono: |
87 return 1; | 87 return 1; |
88 case AudioProcessing::kMonoAndKeyboard: | 88 case AudioProcessing::kMonoAndKeyboard: |
89 case AudioProcessing::kStereo: | 89 case AudioProcessing::kStereo: |
90 return 2; | 90 return 2; |
91 case AudioProcessing::kStereoAndKeyboard: | 91 case AudioProcessing::kStereoAndKeyboard: |
92 return 3; | 92 return 3; |
93 } | 93 } |
94 assert(false); | 94 assert(false); |
(...skipping 29 matching lines...) Expand all Loading... |
124 } | 124 } |
125 | 125 |
126 void SetFrameTo(AudioFrame* frame, int16_t value) { | 126 void SetFrameTo(AudioFrame* frame, int16_t value) { |
127 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; | 127 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; |
128 ++i) { | 128 ++i) { |
129 frame->data_[i] = value; | 129 frame->data_[i] = value; |
130 } | 130 } |
131 } | 131 } |
132 | 132 |
133 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) { | 133 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) { |
134 ASSERT_EQ(2, frame->num_channels_); | 134 ASSERT_EQ(2u, frame->num_channels_); |
135 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { | 135 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { |
136 frame->data_[i] = left; | 136 frame->data_[i] = left; |
137 frame->data_[i + 1] = right; | 137 frame->data_[i + 1] = right; |
138 } | 138 } |
139 } | 139 } |
140 | 140 |
141 void ScaleFrame(AudioFrame* frame, float scale) { | 141 void ScaleFrame(AudioFrame* frame, float scale) { |
142 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; | 142 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; |
143 ++i) { | 143 ++i) { |
144 frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale); | 144 frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
249 // Temporary filenames unique to this process. Used to be able to run these | 249 // Temporary filenames unique to this process. Used to be able to run these |
250 // tests in parallel as each process needs to be running in isolation they can't | 250 // tests in parallel as each process needs to be running in isolation they can't |
251 // have competing filenames. | 251 // have competing filenames. |
252 std::map<std::string, std::string> temp_filenames; | 252 std::map<std::string, std::string> temp_filenames; |
253 | 253 |
254 std::string OutputFilePath(std::string name, | 254 std::string OutputFilePath(std::string name, |
255 int input_rate, | 255 int input_rate, |
256 int output_rate, | 256 int output_rate, |
257 int reverse_input_rate, | 257 int reverse_input_rate, |
258 int reverse_output_rate, | 258 int reverse_output_rate, |
259 int num_input_channels, | 259 size_t num_input_channels, |
260 int num_output_channels, | 260 size_t num_output_channels, |
261 int num_reverse_input_channels, | 261 size_t num_reverse_input_channels, |
262 int num_reverse_output_channels, | 262 size_t num_reverse_output_channels, |
263 StreamDirection file_direction) { | 263 StreamDirection file_direction) { |
264 std::ostringstream ss; | 264 std::ostringstream ss; |
265 ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir" | 265 ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir" |
266 << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_"; | 266 << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_"; |
267 if (num_output_channels == 1) { | 267 if (num_output_channels == 1) { |
268 ss << "mono"; | 268 ss << "mono"; |
269 } else if (num_output_channels == 2) { | 269 } else if (num_output_channels == 2) { |
270 ss << "stereo"; | 270 ss << "stereo"; |
271 } else { | 271 } else { |
272 assert(false); | 272 assert(false); |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
347 | 347 |
348 // Used to select between int and float interface tests. | 348 // Used to select between int and float interface tests. |
349 enum Format { | 349 enum Format { |
350 kIntFormat, | 350 kIntFormat, |
351 kFloatFormat | 351 kFloatFormat |
352 }; | 352 }; |
353 | 353 |
354 void Init(int sample_rate_hz, | 354 void Init(int sample_rate_hz, |
355 int output_sample_rate_hz, | 355 int output_sample_rate_hz, |
356 int reverse_sample_rate_hz, | 356 int reverse_sample_rate_hz, |
357 int num_input_channels, | 357 size_t num_input_channels, |
358 int num_output_channels, | 358 size_t num_output_channels, |
359 int num_reverse_channels, | 359 size_t num_reverse_channels, |
360 bool open_output_file); | 360 bool open_output_file); |
361 void Init(AudioProcessing* ap); | 361 void Init(AudioProcessing* ap); |
362 void EnableAllComponents(); | 362 void EnableAllComponents(); |
363 bool ReadFrame(FILE* file, AudioFrame* frame); | 363 bool ReadFrame(FILE* file, AudioFrame* frame); |
364 bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb); | 364 bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb); |
365 void ReadFrameWithRewind(FILE* file, AudioFrame* frame); | 365 void ReadFrameWithRewind(FILE* file, AudioFrame* frame); |
366 void ReadFrameWithRewind(FILE* file, AudioFrame* frame, | 366 void ReadFrameWithRewind(FILE* file, AudioFrame* frame, |
367 ChannelBuffer<float>* cb); | 367 ChannelBuffer<float>* cb); |
368 void ProcessWithDefaultStreamParameters(AudioFrame* frame); | 368 void ProcessWithDefaultStreamParameters(AudioFrame* frame); |
369 void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, | 369 void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, |
370 int delay_min, int delay_max); | 370 int delay_min, int delay_max); |
371 void TestChangingChannelsInt16Interface( | 371 void TestChangingChannelsInt16Interface( |
372 int num_channels, | 372 size_t num_channels, |
373 AudioProcessing::Error expected_return); | 373 AudioProcessing::Error expected_return); |
374 void TestChangingForwardChannels(int num_in_channels, | 374 void TestChangingForwardChannels(size_t num_in_channels, |
375 int num_out_channels, | 375 size_t num_out_channels, |
376 AudioProcessing::Error expected_return); | 376 AudioProcessing::Error expected_return); |
377 void TestChangingReverseChannels(int num_rev_channels, | 377 void TestChangingReverseChannels(size_t num_rev_channels, |
378 AudioProcessing::Error expected_return); | 378 AudioProcessing::Error expected_return); |
379 void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate); | 379 void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate); |
380 void RunManualVolumeChangeIsPossibleTest(int sample_rate); | 380 void RunManualVolumeChangeIsPossibleTest(int sample_rate); |
381 void StreamParametersTest(Format format); | 381 void StreamParametersTest(Format format); |
382 int ProcessStreamChooser(Format format); | 382 int ProcessStreamChooser(Format format); |
383 int AnalyzeReverseStreamChooser(Format format); | 383 int AnalyzeReverseStreamChooser(Format format); |
384 void ProcessDebugDump(const std::string& in_filename, | 384 void ProcessDebugDump(const std::string& in_filename, |
385 const std::string& out_filename, | 385 const std::string& out_filename, |
386 Format format); | 386 Format format); |
387 void VerifyDebugDumpTest(Format format); | 387 void VerifyDebugDumpTest(Format format); |
388 | 388 |
389 const std::string output_path_; | 389 const std::string output_path_; |
390 const std::string ref_path_; | 390 const std::string ref_path_; |
391 const std::string ref_filename_; | 391 const std::string ref_filename_; |
392 rtc::scoped_ptr<AudioProcessing> apm_; | 392 rtc::scoped_ptr<AudioProcessing> apm_; |
393 AudioFrame* frame_; | 393 AudioFrame* frame_; |
394 AudioFrame* revframe_; | 394 AudioFrame* revframe_; |
395 rtc::scoped_ptr<ChannelBuffer<float> > float_cb_; | 395 rtc::scoped_ptr<ChannelBuffer<float> > float_cb_; |
396 rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_; | 396 rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_; |
397 int output_sample_rate_hz_; | 397 int output_sample_rate_hz_; |
398 int num_output_channels_; | 398 size_t num_output_channels_; |
399 FILE* far_file_; | 399 FILE* far_file_; |
400 FILE* near_file_; | 400 FILE* near_file_; |
401 FILE* out_file_; | 401 FILE* out_file_; |
402 }; | 402 }; |
403 | 403 |
404 ApmTest::ApmTest() | 404 ApmTest::ApmTest() |
405 : output_path_(test::OutputPath()), | 405 : output_path_(test::OutputPath()), |
406 ref_path_(test::ProjectRootPath() + "data/audio_processing/"), | 406 ref_path_(test::ProjectRootPath() + "data/audio_processing/"), |
407 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) | 407 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) |
408 ref_filename_(ref_path_ + "output_data_fixed.pb"), | 408 ref_filename_(ref_path_ + "output_data_fixed.pb"), |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
472 ap->Initialize( | 472 ap->Initialize( |
473 {{{frame_->sample_rate_hz_, frame_->num_channels_}, | 473 {{{frame_->sample_rate_hz_, frame_->num_channels_}, |
474 {output_sample_rate_hz_, num_output_channels_}, | 474 {output_sample_rate_hz_, num_output_channels_}, |
475 {revframe_->sample_rate_hz_, revframe_->num_channels_}, | 475 {revframe_->sample_rate_hz_, revframe_->num_channels_}, |
476 {revframe_->sample_rate_hz_, revframe_->num_channels_}}})); | 476 {revframe_->sample_rate_hz_, revframe_->num_channels_}}})); |
477 } | 477 } |
478 | 478 |
479 void ApmTest::Init(int sample_rate_hz, | 479 void ApmTest::Init(int sample_rate_hz, |
480 int output_sample_rate_hz, | 480 int output_sample_rate_hz, |
481 int reverse_sample_rate_hz, | 481 int reverse_sample_rate_hz, |
482 int num_input_channels, | 482 size_t num_input_channels, |
483 int num_output_channels, | 483 size_t num_output_channels, |
484 int num_reverse_channels, | 484 size_t num_reverse_channels, |
485 bool open_output_file) { | 485 bool open_output_file) { |
486 SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_); | 486 SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_); |
487 output_sample_rate_hz_ = output_sample_rate_hz; | 487 output_sample_rate_hz_ = output_sample_rate_hz; |
488 num_output_channels_ = num_output_channels; | 488 num_output_channels_ = num_output_channels; |
489 | 489 |
490 SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_, | 490 SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_, |
491 &revfloat_cb_); | 491 &revfloat_cb_); |
492 Init(apm_.get()); | 492 Init(apm_.get()); |
493 | 493 |
494 if (far_file_) { | 494 if (far_file_) { |
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
806 // Low limit of 0 ms. | 806 // Low limit of 0 ms. |
807 apm_->set_delay_offset_ms(-50); | 807 apm_->set_delay_offset_ms(-50); |
808 EXPECT_EQ(-50, apm_->delay_offset_ms()); | 808 EXPECT_EQ(-50, apm_->delay_offset_ms()); |
809 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20)); | 809 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20)); |
810 EXPECT_EQ(0, apm_->stream_delay_ms()); | 810 EXPECT_EQ(0, apm_->stream_delay_ms()); |
811 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100)); | 811 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100)); |
812 EXPECT_EQ(50, apm_->stream_delay_ms()); | 812 EXPECT_EQ(50, apm_->stream_delay_ms()); |
813 } | 813 } |
814 | 814 |
815 void ApmTest::TestChangingChannelsInt16Interface( | 815 void ApmTest::TestChangingChannelsInt16Interface( |
816 int num_channels, | 816 size_t num_channels, |
817 AudioProcessing::Error expected_return) { | 817 AudioProcessing::Error expected_return) { |
818 frame_->num_channels_ = num_channels; | 818 frame_->num_channels_ = num_channels; |
819 EXPECT_EQ(expected_return, apm_->ProcessStream(frame_)); | 819 EXPECT_EQ(expected_return, apm_->ProcessStream(frame_)); |
820 EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_)); | 820 EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_)); |
821 } | 821 } |
822 | 822 |
823 void ApmTest::TestChangingForwardChannels( | 823 void ApmTest::TestChangingForwardChannels( |
824 int num_in_channels, | 824 size_t num_in_channels, |
825 int num_out_channels, | 825 size_t num_out_channels, |
826 AudioProcessing::Error expected_return) { | 826 AudioProcessing::Error expected_return) { |
827 const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels}; | 827 const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels}; |
828 const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels}; | 828 const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels}; |
829 | 829 |
830 EXPECT_EQ(expected_return, | 830 EXPECT_EQ(expected_return, |
831 apm_->ProcessStream(float_cb_->channels(), input_stream, | 831 apm_->ProcessStream(float_cb_->channels(), input_stream, |
832 output_stream, float_cb_->channels())); | 832 output_stream, float_cb_->channels())); |
833 } | 833 } |
834 | 834 |
835 void ApmTest::TestChangingReverseChannels( | 835 void ApmTest::TestChangingReverseChannels( |
836 int num_rev_channels, | 836 size_t num_rev_channels, |
837 AudioProcessing::Error expected_return) { | 837 AudioProcessing::Error expected_return) { |
838 const ProcessingConfig processing_config = { | 838 const ProcessingConfig processing_config = { |
839 {{frame_->sample_rate_hz_, apm_->num_input_channels()}, | 839 {{frame_->sample_rate_hz_, apm_->num_input_channels()}, |
840 {output_sample_rate_hz_, apm_->num_output_channels()}, | 840 {output_sample_rate_hz_, apm_->num_output_channels()}, |
841 {frame_->sample_rate_hz_, num_rev_channels}, | 841 {frame_->sample_rate_hz_, num_rev_channels}, |
842 {frame_->sample_rate_hz_, num_rev_channels}}}; | 842 {frame_->sample_rate_hz_, num_rev_channels}}}; |
843 | 843 |
844 EXPECT_EQ( | 844 EXPECT_EQ( |
845 expected_return, | 845 expected_return, |
846 apm_->ProcessReverseStream( | 846 apm_->ProcessReverseStream( |
847 float_cb_->channels(), processing_config.reverse_input_stream(), | 847 float_cb_->channels(), processing_config.reverse_input_stream(), |
848 processing_config.reverse_output_stream(), float_cb_->channels())); | 848 processing_config.reverse_output_stream(), float_cb_->channels())); |
849 } | 849 } |
850 | 850 |
851 TEST_F(ApmTest, ChannelsInt16Interface) { | 851 TEST_F(ApmTest, ChannelsInt16Interface) { |
852 // Testing number of invalid and valid channels. | 852 // Testing number of invalid and valid channels. |
853 Init(16000, 16000, 16000, 4, 4, 4, false); | 853 Init(16000, 16000, 16000, 4, 4, 4, false); |
854 | 854 |
855 TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError); | 855 TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError); |
856 | 856 |
857 for (int i = 1; i < 4; i++) { | 857 for (size_t i = 1; i < 4; i++) { |
858 TestChangingChannelsInt16Interface(i, kNoErr); | 858 TestChangingChannelsInt16Interface(i, kNoErr); |
859 EXPECT_EQ(i, apm_->num_input_channels()); | 859 EXPECT_EQ(i, apm_->num_input_channels()); |
860 // We always force the number of reverse channels used for processing to 1. | 860 // We always force the number of reverse channels used for processing to 1. |
861 EXPECT_EQ(1, apm_->num_reverse_channels()); | 861 EXPECT_EQ(1u, apm_->num_reverse_channels()); |
862 } | 862 } |
863 } | 863 } |
864 | 864 |
865 TEST_F(ApmTest, Channels) { | 865 TEST_F(ApmTest, Channels) { |
866 // Testing number of invalid and valid channels. | 866 // Testing number of invalid and valid channels. |
867 Init(16000, 16000, 16000, 4, 4, 4, false); | 867 Init(16000, 16000, 16000, 4, 4, 4, false); |
868 | 868 |
869 TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError); | 869 TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError); |
870 TestChangingReverseChannels(0, apm_->kBadNumberChannelsError); | 870 TestChangingReverseChannels(0, apm_->kBadNumberChannelsError); |
871 | 871 |
872 for (int i = 1; i < 4; ++i) { | 872 for (size_t i = 1; i < 4; ++i) { |
873 for (int j = 0; j < 1; ++j) { | 873 for (size_t j = 0; j < 1; ++j) { |
874 // Output channels much be one or match input channels. | 874 // Output channels much be one or match input channels. |
875 if (j == 1 || i == j) { | 875 if (j == 1 || i == j) { |
876 TestChangingForwardChannels(i, j, kNoErr); | 876 TestChangingForwardChannels(i, j, kNoErr); |
877 TestChangingReverseChannels(i, kNoErr); | 877 TestChangingReverseChannels(i, kNoErr); |
878 | 878 |
879 EXPECT_EQ(i, apm_->num_input_channels()); | 879 EXPECT_EQ(i, apm_->num_input_channels()); |
880 EXPECT_EQ(j, apm_->num_output_channels()); | 880 EXPECT_EQ(j, apm_->num_output_channels()); |
881 // The number of reverse channels used for processing to is always 1. | 881 // The number of reverse channels used for processing to is always 1. |
882 EXPECT_EQ(1, apm_->num_reverse_channels()); | 882 EXPECT_EQ(1u, apm_->num_reverse_channels()); |
883 } else { | 883 } else { |
884 TestChangingForwardChannels(i, j, | 884 TestChangingForwardChannels(i, j, |
885 AudioProcessing::kBadNumberChannelsError); | 885 AudioProcessing::kBadNumberChannelsError); |
886 } | 886 } |
887 } | 887 } |
888 } | 888 } |
889 } | 889 } |
890 | 890 |
891 TEST_F(ApmTest, SampleRatesInt) { | 891 TEST_F(ApmTest, SampleRatesInt) { |
892 // Testing invalid sample rates | 892 // Testing invalid sample rates |
(...skipping 393 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1286 for (size_t i = 0; i < arraysize(kSampleRates); ++i) { | 1286 for (size_t i = 0; i < arraysize(kSampleRates); ++i) { |
1287 RunManualVolumeChangeIsPossibleTest(kSampleRates[i]); | 1287 RunManualVolumeChangeIsPossibleTest(kSampleRates[i]); |
1288 } | 1288 } |
1289 } | 1289 } |
1290 | 1290 |
1291 #if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) | 1291 #if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) |
1292 TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { | 1292 TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { |
1293 const int kSampleRateHz = 16000; | 1293 const int kSampleRateHz = 16000; |
1294 const size_t kSamplesPerChannel = | 1294 const size_t kSamplesPerChannel = |
1295 static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000); | 1295 static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000); |
1296 const int kNumInputChannels = 2; | 1296 const size_t kNumInputChannels = 2; |
1297 const int kNumOutputChannels = 1; | 1297 const size_t kNumOutputChannels = 1; |
1298 const size_t kNumChunks = 700; | 1298 const size_t kNumChunks = 700; |
1299 const float kScaleFactor = 0.25f; | 1299 const float kScaleFactor = 0.25f; |
1300 Config config; | 1300 Config config; |
1301 std::vector<webrtc::Point> geometry; | 1301 std::vector<webrtc::Point> geometry; |
1302 geometry.push_back(webrtc::Point(0.f, 0.f, 0.f)); | 1302 geometry.push_back(webrtc::Point(0.f, 0.f, 0.f)); |
1303 geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f)); | 1303 geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f)); |
1304 config.Set<Beamforming>(new Beamforming(true, geometry)); | 1304 config.Set<Beamforming>(new Beamforming(true, geometry)); |
1305 testing::NiceMock<MockNonlinearBeamformer>* beamformer = | 1305 testing::NiceMock<MockNonlinearBeamformer>* beamformer = |
1306 new testing::NiceMock<MockNonlinearBeamformer>(geometry); | 1306 new testing::NiceMock<MockNonlinearBeamformer>(geometry); |
1307 rtc::scoped_ptr<AudioProcessing> apm( | 1307 rtc::scoped_ptr<AudioProcessing> apm( |
(...skipping 12 matching lines...) Expand all Loading... |
1320 const int kDefaultCompressionGain = | 1320 const int kDefaultCompressionGain = |
1321 apm->gain_control()->compression_gain_db(); | 1321 apm->gain_control()->compression_gain_db(); |
1322 bool is_target = false; | 1322 bool is_target = false; |
1323 EXPECT_CALL(*beamformer, is_target_present()) | 1323 EXPECT_CALL(*beamformer, is_target_present()) |
1324 .WillRepeatedly(testing::ReturnPointee(&is_target)); | 1324 .WillRepeatedly(testing::ReturnPointee(&is_target)); |
1325 for (size_t i = 0; i < kNumChunks; ++i) { | 1325 for (size_t i = 0; i < kNumChunks; ++i) { |
1326 ASSERT_TRUE(ReadChunk(far_file, | 1326 ASSERT_TRUE(ReadChunk(far_file, |
1327 int_data.get(), | 1327 int_data.get(), |
1328 float_data.get(), | 1328 float_data.get(), |
1329 &src_buf)); | 1329 &src_buf)); |
1330 for (int j = 0; j < kNumInputChannels; ++j) { | 1330 for (size_t j = 0; j < kNumInputChannels; ++j) { |
1331 for (size_t k = 0; k < kSamplesPerChannel; ++k) { | 1331 for (size_t k = 0; k < kSamplesPerChannel; ++k) { |
1332 src_buf.channels()[j][k] *= kScaleFactor; | 1332 src_buf.channels()[j][k] *= kScaleFactor; |
1333 } | 1333 } |
1334 } | 1334 } |
1335 EXPECT_EQ(kNoErr, | 1335 EXPECT_EQ(kNoErr, |
1336 apm->ProcessStream(src_buf.channels(), | 1336 apm->ProcessStream(src_buf.channels(), |
1337 src_buf.num_frames(), | 1337 src_buf.num_frames(), |
1338 kSampleRateHz, | 1338 kSampleRateHz, |
1339 LayoutFromChannels(src_buf.num_channels()), | 1339 LayoutFromChannels(src_buf.num_channels()), |
1340 kSampleRateHz, | 1340 kSampleRateHz, |
1341 LayoutFromChannels(dest_buf.num_channels()), | 1341 LayoutFromChannels(dest_buf.num_channels()), |
1342 dest_buf.channels())); | 1342 dest_buf.channels())); |
1343 } | 1343 } |
1344 EXPECT_EQ(kDefaultVolume, | 1344 EXPECT_EQ(kDefaultVolume, |
1345 apm->gain_control()->stream_analog_level()); | 1345 apm->gain_control()->stream_analog_level()); |
1346 EXPECT_EQ(kDefaultCompressionGain, | 1346 EXPECT_EQ(kDefaultCompressionGain, |
1347 apm->gain_control()->compression_gain_db()); | 1347 apm->gain_control()->compression_gain_db()); |
1348 rewind(far_file); | 1348 rewind(far_file); |
1349 is_target = true; | 1349 is_target = true; |
1350 for (size_t i = 0; i < kNumChunks; ++i) { | 1350 for (size_t i = 0; i < kNumChunks; ++i) { |
1351 ASSERT_TRUE(ReadChunk(far_file, | 1351 ASSERT_TRUE(ReadChunk(far_file, |
1352 int_data.get(), | 1352 int_data.get(), |
1353 float_data.get(), | 1353 float_data.get(), |
1354 &src_buf)); | 1354 &src_buf)); |
1355 for (int j = 0; j < kNumInputChannels; ++j) { | 1355 for (size_t j = 0; j < kNumInputChannels; ++j) { |
1356 for (size_t k = 0; k < kSamplesPerChannel; ++k) { | 1356 for (size_t k = 0; k < kSamplesPerChannel; ++k) { |
1357 src_buf.channels()[j][k] *= kScaleFactor; | 1357 src_buf.channels()[j][k] *= kScaleFactor; |
1358 } | 1358 } |
1359 } | 1359 } |
1360 EXPECT_EQ(kNoErr, | 1360 EXPECT_EQ(kNoErr, |
1361 apm->ProcessStream(src_buf.channels(), | 1361 apm->ProcessStream(src_buf.channels(), |
1362 src_buf.num_frames(), | 1362 src_buf.num_frames(), |
1363 kSampleRateHz, | 1363 kSampleRateHz, |
1364 LayoutFromChannels(src_buf.num_channels()), | 1364 LayoutFromChannels(src_buf.num_channels()), |
1365 kSampleRateHz, | 1365 kSampleRateHz, |
(...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1735 // StartDebugRecording() writes an additional init message. Don't start | 1735 // StartDebugRecording() writes an additional init message. Don't start |
1736 // recording until after the first init to avoid the extra message. | 1736 // recording until after the first init to avoid the extra message. |
1737 EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str())); | 1737 EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str())); |
1738 first_init = false; | 1738 first_init = false; |
1739 } | 1739 } |
1740 | 1740 |
1741 } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) { | 1741 } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) { |
1742 const audioproc::ReverseStream msg = event_msg.reverse_stream(); | 1742 const audioproc::ReverseStream msg = event_msg.reverse_stream(); |
1743 | 1743 |
1744 if (msg.channel_size() > 0) { | 1744 if (msg.channel_size() > 0) { |
1745 ASSERT_EQ(revframe_->num_channels_, msg.channel_size()); | 1745 ASSERT_EQ(revframe_->num_channels_, |
| 1746 static_cast<size_t>(msg.channel_size())); |
1746 for (int i = 0; i < msg.channel_size(); ++i) { | 1747 for (int i = 0; i < msg.channel_size(); ++i) { |
1747 memcpy(revfloat_cb_->channels()[i], | 1748 memcpy(revfloat_cb_->channels()[i], |
1748 msg.channel(i).data(), | 1749 msg.channel(i).data(), |
1749 msg.channel(i).size()); | 1750 msg.channel(i).size()); |
1750 } | 1751 } |
1751 } else { | 1752 } else { |
1752 memcpy(revframe_->data_, msg.data().data(), msg.data().size()); | 1753 memcpy(revframe_->data_, msg.data().data(), msg.data().size()); |
1753 if (format == kFloatFormat) { | 1754 if (format == kFloatFormat) { |
1754 // We're using an int16 input file; convert to float. | 1755 // We're using an int16 input file; convert to float. |
1755 ConvertToFloat(*revframe_, revfloat_cb_.get()); | 1756 ConvertToFloat(*revframe_, revfloat_cb_.get()); |
1756 } | 1757 } |
1757 } | 1758 } |
1758 AnalyzeReverseStreamChooser(format); | 1759 AnalyzeReverseStreamChooser(format); |
1759 | 1760 |
1760 } else if (event_msg.type() == audioproc::Event::STREAM) { | 1761 } else if (event_msg.type() == audioproc::Event::STREAM) { |
1761 const audioproc::Stream msg = event_msg.stream(); | 1762 const audioproc::Stream msg = event_msg.stream(); |
1762 // ProcessStream could have changed this for the output frame. | 1763 // ProcessStream could have changed this for the output frame. |
1763 frame_->num_channels_ = apm_->num_input_channels(); | 1764 frame_->num_channels_ = apm_->num_input_channels(); |
1764 | 1765 |
1765 EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level())); | 1766 EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level())); |
1766 EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay())); | 1767 EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay())); |
1767 apm_->echo_cancellation()->set_stream_drift_samples(msg.drift()); | 1768 apm_->echo_cancellation()->set_stream_drift_samples(msg.drift()); |
1768 if (msg.has_keypress()) { | 1769 if (msg.has_keypress()) { |
1769 apm_->set_stream_key_pressed(msg.keypress()); | 1770 apm_->set_stream_key_pressed(msg.keypress()); |
1770 } else { | 1771 } else { |
1771 apm_->set_stream_key_pressed(true); | 1772 apm_->set_stream_key_pressed(true); |
1772 } | 1773 } |
1773 | 1774 |
1774 if (msg.input_channel_size() > 0) { | 1775 if (msg.input_channel_size() > 0) { |
1775 ASSERT_EQ(frame_->num_channels_, msg.input_channel_size()); | 1776 ASSERT_EQ(frame_->num_channels_, |
| 1777 static_cast<size_t>(msg.input_channel_size())); |
1776 for (int i = 0; i < msg.input_channel_size(); ++i) { | 1778 for (int i = 0; i < msg.input_channel_size(); ++i) { |
1777 memcpy(float_cb_->channels()[i], | 1779 memcpy(float_cb_->channels()[i], |
1778 msg.input_channel(i).data(), | 1780 msg.input_channel(i).data(), |
1779 msg.input_channel(i).size()); | 1781 msg.input_channel(i).size()); |
1780 } | 1782 } |
1781 } else { | 1783 } else { |
1782 memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size()); | 1784 memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size()); |
1783 if (format == kFloatFormat) { | 1785 if (format == kFloatFormat) { |
1784 // We're using an int16 input file; convert to float. | 1786 // We're using an int16 input file; convert to float. |
1785 ConvertToFloat(*frame_, float_cb_.get()); | 1787 ConvertToFloat(*frame_, float_cb_.get()); |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1923 EnableAllComponents(); | 1925 EnableAllComponents(); |
1924 EnableAllAPComponents(fapm.get()); | 1926 EnableAllAPComponents(fapm.get()); |
1925 for (int i = 0; i < ref_data.test_size(); i++) { | 1927 for (int i = 0; i < ref_data.test_size(); i++) { |
1926 printf("Running test %d of %d...\n", i + 1, ref_data.test_size()); | 1928 printf("Running test %d of %d...\n", i + 1, ref_data.test_size()); |
1927 | 1929 |
1928 audioproc::Test* test = ref_data.mutable_test(i); | 1930 audioproc::Test* test = ref_data.mutable_test(i); |
1929 // TODO(ajm): Restore downmixing test cases. | 1931 // TODO(ajm): Restore downmixing test cases. |
1930 if (test->num_input_channels() != test->num_output_channels()) | 1932 if (test->num_input_channels() != test->num_output_channels()) |
1931 continue; | 1933 continue; |
1932 | 1934 |
1933 const int num_render_channels = test->num_reverse_channels(); | 1935 const size_t num_render_channels = |
1934 const int num_input_channels = test->num_input_channels(); | 1936 static_cast<size_t>(test->num_reverse_channels()); |
1935 const int num_output_channels = test->num_output_channels(); | 1937 const size_t num_input_channels = |
| 1938 static_cast<size_t>(test->num_input_channels()); |
| 1939 const size_t num_output_channels = |
| 1940 static_cast<size_t>(test->num_output_channels()); |
1936 const size_t samples_per_channel = static_cast<size_t>( | 1941 const size_t samples_per_channel = static_cast<size_t>( |
1937 test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000); | 1942 test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000); |
1938 | 1943 |
1939 Init(test->sample_rate(), test->sample_rate(), test->sample_rate(), | 1944 Init(test->sample_rate(), test->sample_rate(), test->sample_rate(), |
1940 num_input_channels, num_output_channels, num_render_channels, true); | 1945 num_input_channels, num_output_channels, num_render_channels, true); |
1941 Init(fapm.get()); | 1946 Init(fapm.get()); |
1942 | 1947 |
1943 ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels); | 1948 ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels); |
1944 ChannelBuffer<int16_t> output_int16(samples_per_channel, | 1949 ChannelBuffer<int16_t> output_int16(samples_per_channel, |
1945 num_input_channels); | 1950 num_input_channels); |
(...skipping 22 matching lines...) Expand all Loading... |
1968 output_int16.channels()); | 1973 output_int16.channels()); |
1969 | 1974 |
1970 EXPECT_NOERR(fapm->ProcessStream( | 1975 EXPECT_NOERR(fapm->ProcessStream( |
1971 float_cb_->channels(), | 1976 float_cb_->channels(), |
1972 samples_per_channel, | 1977 samples_per_channel, |
1973 test->sample_rate(), | 1978 test->sample_rate(), |
1974 LayoutFromChannels(num_input_channels), | 1979 LayoutFromChannels(num_input_channels), |
1975 test->sample_rate(), | 1980 test->sample_rate(), |
1976 LayoutFromChannels(num_output_channels), | 1981 LayoutFromChannels(num_output_channels), |
1977 float_cb_->channels())); | 1982 float_cb_->channels())); |
1978 for (int j = 0; j < num_output_channels; ++j) { | 1983 for (size_t j = 0; j < num_output_channels; ++j) { |
1979 FloatToS16(float_cb_->channels()[j], | 1984 FloatToS16(float_cb_->channels()[j], |
1980 samples_per_channel, | 1985 samples_per_channel, |
1981 output_cb.channels()[j]); | 1986 output_cb.channels()[j]); |
1982 float variance = 0; | 1987 float variance = 0; |
1983 float snr = ComputeSNR(output_int16.channels()[j], | 1988 float snr = ComputeSNR(output_int16.channels()[j], |
1984 output_cb.channels()[j], | 1989 output_cb.channels()[j], |
1985 samples_per_channel, &variance); | 1990 samples_per_channel, &variance); |
1986 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) | 1991 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) |
1987 // There are a few chunks in the fixed-point profile that give low SNR. | 1992 // There are a few chunks in the fixed-point profile that give low SNR. |
1988 // Listening confirmed the difference is acceptable. | 1993 // Listening confirmed the difference is acceptable. |
(...skipping 12 matching lines...) Expand all Loading... |
2001 analog_level = fapm->gain_control()->stream_analog_level(); | 2006 analog_level = fapm->gain_control()->stream_analog_level(); |
2002 EXPECT_EQ(apm_->gain_control()->stream_analog_level(), | 2007 EXPECT_EQ(apm_->gain_control()->stream_analog_level(), |
2003 fapm->gain_control()->stream_analog_level()); | 2008 fapm->gain_control()->stream_analog_level()); |
2004 EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(), | 2009 EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(), |
2005 fapm->echo_cancellation()->stream_has_echo()); | 2010 fapm->echo_cancellation()->stream_has_echo()); |
2006 EXPECT_NEAR(apm_->noise_suppression()->speech_probability(), | 2011 EXPECT_NEAR(apm_->noise_suppression()->speech_probability(), |
2007 fapm->noise_suppression()->speech_probability(), | 2012 fapm->noise_suppression()->speech_probability(), |
2008 0.01); | 2013 0.01); |
2009 | 2014 |
2010 // Reset in case of downmixing. | 2015 // Reset in case of downmixing. |
2011 frame_->num_channels_ = test->num_input_channels(); | 2016 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); |
2012 } | 2017 } |
2013 rewind(far_file_); | 2018 rewind(far_file_); |
2014 rewind(near_file_); | 2019 rewind(near_file_); |
2015 } | 2020 } |
2016 } | 2021 } |
2017 | 2022 |
2018 // TODO(andrew): Add a test to process a few frames with different combinations | 2023 // TODO(andrew): Add a test to process a few frames with different combinations |
2019 // of enabled components. | 2024 // of enabled components. |
2020 | 2025 |
2021 TEST_F(ApmTest, Process) { | 2026 TEST_F(ApmTest, Process) { |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2062 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 2067 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
2063 config.Set<ExtendedFilter>( | 2068 config.Set<ExtendedFilter>( |
2064 new ExtendedFilter(test->use_aec_extended_filter())); | 2069 new ExtendedFilter(test->use_aec_extended_filter())); |
2065 apm_.reset(AudioProcessing::Create(config)); | 2070 apm_.reset(AudioProcessing::Create(config)); |
2066 | 2071 |
2067 EnableAllComponents(); | 2072 EnableAllComponents(); |
2068 | 2073 |
2069 Init(test->sample_rate(), | 2074 Init(test->sample_rate(), |
2070 test->sample_rate(), | 2075 test->sample_rate(), |
2071 test->sample_rate(), | 2076 test->sample_rate(), |
2072 test->num_input_channels(), | 2077 static_cast<size_t>(test->num_input_channels()), |
2073 test->num_output_channels(), | 2078 static_cast<size_t>(test->num_output_channels()), |
2074 test->num_reverse_channels(), | 2079 static_cast<size_t>(test->num_reverse_channels()), |
2075 true); | 2080 true); |
2076 | 2081 |
2077 int frame_count = 0; | 2082 int frame_count = 0; |
2078 int has_echo_count = 0; | 2083 int has_echo_count = 0; |
2079 int has_voice_count = 0; | 2084 int has_voice_count = 0; |
2080 int is_saturated_count = 0; | 2085 int is_saturated_count = 0; |
2081 int analog_level = 127; | 2086 int analog_level = 127; |
2082 int analog_level_average = 0; | 2087 int analog_level_average = 0; |
2083 int max_output_average = 0; | 2088 int max_output_average = 0; |
2084 float ns_speech_prob_average = 0.0f; | 2089 float ns_speech_prob_average = 0.0f; |
2085 | 2090 |
2086 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { | 2091 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { |
2087 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_)); | 2092 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_)); |
2088 | 2093 |
2089 frame_->vad_activity_ = AudioFrame::kVadUnknown; | 2094 frame_->vad_activity_ = AudioFrame::kVadUnknown; |
2090 | 2095 |
2091 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); | 2096 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); |
2092 apm_->echo_cancellation()->set_stream_drift_samples(0); | 2097 apm_->echo_cancellation()->set_stream_drift_samples(0); |
2093 EXPECT_EQ(apm_->kNoError, | 2098 EXPECT_EQ(apm_->kNoError, |
2094 apm_->gain_control()->set_stream_analog_level(analog_level)); | 2099 apm_->gain_control()->set_stream_analog_level(analog_level)); |
2095 | 2100 |
2096 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); | 2101 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); |
2097 | 2102 |
2098 // Ensure the frame was downmixed properly. | 2103 // Ensure the frame was downmixed properly. |
2099 EXPECT_EQ(test->num_output_channels(), frame_->num_channels_); | 2104 EXPECT_EQ(static_cast<size_t>(test->num_output_channels()), |
| 2105 frame_->num_channels_); |
2100 | 2106 |
2101 max_output_average += MaxAudioFrame(*frame_); | 2107 max_output_average += MaxAudioFrame(*frame_); |
2102 | 2108 |
2103 if (apm_->echo_cancellation()->stream_has_echo()) { | 2109 if (apm_->echo_cancellation()->stream_has_echo()) { |
2104 has_echo_count++; | 2110 has_echo_count++; |
2105 } | 2111 } |
2106 | 2112 |
2107 analog_level = apm_->gain_control()->stream_analog_level(); | 2113 analog_level = apm_->gain_control()->stream_analog_level(); |
2108 analog_level_average += analog_level; | 2114 analog_level_average += analog_level; |
2109 if (apm_->gain_control()->stream_is_saturated()) { | 2115 if (apm_->gain_control()->stream_is_saturated()) { |
2110 is_saturated_count++; | 2116 is_saturated_count++; |
2111 } | 2117 } |
2112 if (apm_->voice_detection()->stream_has_voice()) { | 2118 if (apm_->voice_detection()->stream_has_voice()) { |
2113 has_voice_count++; | 2119 has_voice_count++; |
2114 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_); | 2120 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_); |
2115 } else { | 2121 } else { |
2116 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_); | 2122 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_); |
2117 } | 2123 } |
2118 | 2124 |
2119 ns_speech_prob_average += apm_->noise_suppression()->speech_probability(); | 2125 ns_speech_prob_average += apm_->noise_suppression()->speech_probability(); |
2120 | 2126 |
2121 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_; | 2127 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_; |
2122 size_t write_count = fwrite(frame_->data_, | 2128 size_t write_count = fwrite(frame_->data_, |
2123 sizeof(int16_t), | 2129 sizeof(int16_t), |
2124 frame_size, | 2130 frame_size, |
2125 out_file_); | 2131 out_file_); |
2126 ASSERT_EQ(frame_size, write_count); | 2132 ASSERT_EQ(frame_size, write_count); |
2127 | 2133 |
2128 // Reset in case of downmixing. | 2134 // Reset in case of downmixing. |
2129 frame_->num_channels_ = test->num_input_channels(); | 2135 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); |
2130 frame_count++; | 2136 frame_count++; |
2131 } | 2137 } |
2132 max_output_average /= frame_count; | 2138 max_output_average /= frame_count; |
2133 analog_level_average /= frame_count; | 2139 analog_level_average /= frame_count; |
2134 ns_speech_prob_average /= frame_count; | 2140 ns_speech_prob_average /= frame_count; |
2135 | 2141 |
2136 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) | 2142 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) |
2137 EchoCancellation::Metrics echo_metrics; | 2143 EchoCancellation::Metrics echo_metrics; |
2138 EXPECT_EQ(apm_->kNoError, | 2144 EXPECT_EQ(apm_->kNoError, |
2139 apm_->echo_cancellation()->GetMetrics(&echo_metrics)); | 2145 apm_->echo_cancellation()->GetMetrics(&echo_metrics)); |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2343 reverse_input_rate_(std::tr1::get<2>(GetParam())), | 2349 reverse_input_rate_(std::tr1::get<2>(GetParam())), |
2344 reverse_output_rate_(std::tr1::get<3>(GetParam())), | 2350 reverse_output_rate_(std::tr1::get<3>(GetParam())), |
2345 expected_snr_(std::tr1::get<4>(GetParam())), | 2351 expected_snr_(std::tr1::get<4>(GetParam())), |
2346 expected_reverse_snr_(std::tr1::get<5>(GetParam())) {} | 2352 expected_reverse_snr_(std::tr1::get<5>(GetParam())) {} |
2347 | 2353 |
2348 virtual ~AudioProcessingTest() {} | 2354 virtual ~AudioProcessingTest() {} |
2349 | 2355 |
2350 static void SetUpTestCase() { | 2356 static void SetUpTestCase() { |
2351 // Create all needed output reference files. | 2357 // Create all needed output reference files. |
2352 const int kNativeRates[] = {8000, 16000, 32000, 48000}; | 2358 const int kNativeRates[] = {8000, 16000, 32000, 48000}; |
2353 const int kNumChannels[] = {1, 2}; | 2359 const size_t kNumChannels[] = {1, 2}; |
2354 for (size_t i = 0; i < arraysize(kNativeRates); ++i) { | 2360 for (size_t i = 0; i < arraysize(kNativeRates); ++i) { |
2355 for (size_t j = 0; j < arraysize(kNumChannels); ++j) { | 2361 for (size_t j = 0; j < arraysize(kNumChannels); ++j) { |
2356 for (size_t k = 0; k < arraysize(kNumChannels); ++k) { | 2362 for (size_t k = 0; k < arraysize(kNumChannels); ++k) { |
2357 // The reference files always have matching input and output channels. | 2363 // The reference files always have matching input and output channels. |
2358 ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i], | 2364 ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i], |
2359 kNativeRates[i], kNumChannels[j], kNumChannels[j], | 2365 kNativeRates[i], kNumChannels[j], kNumChannels[j], |
2360 kNumChannels[k], kNumChannels[k], "ref"); | 2366 kNumChannels[k], kNumChannels[k], "ref"); |
2361 } | 2367 } |
2362 } | 2368 } |
2363 } | 2369 } |
2364 } | 2370 } |
2365 | 2371 |
2366 static void TearDownTestCase() { | 2372 static void TearDownTestCase() { |
2367 ClearTempFiles(); | 2373 ClearTempFiles(); |
2368 } | 2374 } |
2369 | 2375 |
2370 // Runs a process pass on files with the given parameters and dumps the output | 2376 // Runs a process pass on files with the given parameters and dumps the output |
2371 // to a file specified with |output_file_prefix|. Both forward and reverse | 2377 // to a file specified with |output_file_prefix|. Both forward and reverse |
2372 // output streams are dumped. | 2378 // output streams are dumped. |
2373 static void ProcessFormat(int input_rate, | 2379 static void ProcessFormat(int input_rate, |
2374 int output_rate, | 2380 int output_rate, |
2375 int reverse_input_rate, | 2381 int reverse_input_rate, |
2376 int reverse_output_rate, | 2382 int reverse_output_rate, |
2377 int num_input_channels, | 2383 size_t num_input_channels, |
2378 int num_output_channels, | 2384 size_t num_output_channels, |
2379 int num_reverse_input_channels, | 2385 size_t num_reverse_input_channels, |
2380 int num_reverse_output_channels, | 2386 size_t num_reverse_output_channels, |
2381 std::string output_file_prefix) { | 2387 std::string output_file_prefix) { |
2382 Config config; | 2388 Config config; |
2383 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 2389 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
2384 rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config)); | 2390 rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config)); |
2385 EnableAllAPComponents(ap.get()); | 2391 EnableAllAPComponents(ap.get()); |
2386 | 2392 |
2387 ProcessingConfig processing_config = { | 2393 ProcessingConfig processing_config = { |
2388 {{input_rate, num_input_channels}, | 2394 {{input_rate, num_input_channels}, |
2389 {output_rate, num_output_channels}, | 2395 {output_rate, num_output_channels}, |
2390 {reverse_input_rate, num_reverse_input_channels}, | 2396 {reverse_input_rate, num_reverse_input_channels}, |
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2734 std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35), | 2740 std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35), |
2735 std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0), | 2741 std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0), |
2736 std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20), | 2742 std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20), |
2737 std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20), | 2743 std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20), |
2738 std::tr1::make_tuple(16000, 16000, 32000, 16000, 40, 20), | 2744 std::tr1::make_tuple(16000, 16000, 32000, 16000, 40, 20), |
2739 std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0))); | 2745 std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0))); |
2740 #endif | 2746 #endif |
2741 | 2747 |
2742 } // namespace | 2748 } // namespace |
2743 } // namespace webrtc | 2749 } // namespace webrtc |
OLD | NEW |