Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(385)

Side by Side Diff: webrtc/modules/audio_processing/test/audio_processing_unittest.cc

Issue 1316523002: Convert channel counts to size_t. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc@master
Patch Set: Rebase onto cleanup change Created 4 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 63
64 enum StreamDirection { kForward = 0, kReverse }; 64 enum StreamDirection { kForward = 0, kReverse };
65 65
66 void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) { 66 void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
67 ChannelBuffer<int16_t> cb_int(cb->num_frames(), 67 ChannelBuffer<int16_t> cb_int(cb->num_frames(),
68 cb->num_channels()); 68 cb->num_channels());
69 Deinterleave(int_data, 69 Deinterleave(int_data,
70 cb->num_frames(), 70 cb->num_frames(),
71 cb->num_channels(), 71 cb->num_channels(),
72 cb_int.channels()); 72 cb_int.channels());
73 for (int i = 0; i < cb->num_channels(); ++i) { 73 for (size_t i = 0; i < cb->num_channels(); ++i) {
74 S16ToFloat(cb_int.channels()[i], 74 S16ToFloat(cb_int.channels()[i],
75 cb->num_frames(), 75 cb->num_frames(),
76 cb->channels()[i]); 76 cb->channels()[i]);
77 } 77 }
78 } 78 }
79 79
80 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) { 80 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
81 ConvertToFloat(frame.data_, cb); 81 ConvertToFloat(frame.data_, cb);
82 } 82 }
83 83
84 // Number of channels including the keyboard channel. 84 // Number of channels including the keyboard channel.
85 int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { 85 size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
86 switch (layout) { 86 switch (layout) {
87 case AudioProcessing::kMono: 87 case AudioProcessing::kMono:
88 return 1; 88 return 1;
89 case AudioProcessing::kMonoAndKeyboard: 89 case AudioProcessing::kMonoAndKeyboard:
90 case AudioProcessing::kStereo: 90 case AudioProcessing::kStereo:
91 return 2; 91 return 2;
92 case AudioProcessing::kStereoAndKeyboard: 92 case AudioProcessing::kStereoAndKeyboard:
93 return 3; 93 return 3;
94 } 94 }
95 assert(false); 95 assert(false);
(...skipping 29 matching lines...) Expand all
125 } 125 }
126 126
127 void SetFrameTo(AudioFrame* frame, int16_t value) { 127 void SetFrameTo(AudioFrame* frame, int16_t value) {
128 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; 128 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
129 ++i) { 129 ++i) {
130 frame->data_[i] = value; 130 frame->data_[i] = value;
131 } 131 }
132 } 132 }
133 133
134 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) { 134 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
135 ASSERT_EQ(2, frame->num_channels_); 135 ASSERT_EQ(2u, frame->num_channels_);
136 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { 136 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
137 frame->data_[i] = left; 137 frame->data_[i] = left;
138 frame->data_[i + 1] = right; 138 frame->data_[i + 1] = right;
139 } 139 }
140 } 140 }
141 141
142 void ScaleFrame(AudioFrame* frame, float scale) { 142 void ScaleFrame(AudioFrame* frame, float scale) {
143 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; 143 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
144 ++i) { 144 ++i) {
145 frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale); 145 frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale);
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
250 // Temporary filenames unique to this process. Used to be able to run these 250 // Temporary filenames unique to this process. Used to be able to run these
251 // tests in parallel as each process needs to be running in isolation they can't 251 // tests in parallel as each process needs to be running in isolation they can't
252 // have competing filenames. 252 // have competing filenames.
253 std::map<std::string, std::string> temp_filenames; 253 std::map<std::string, std::string> temp_filenames;
254 254
255 std::string OutputFilePath(std::string name, 255 std::string OutputFilePath(std::string name,
256 int input_rate, 256 int input_rate,
257 int output_rate, 257 int output_rate,
258 int reverse_input_rate, 258 int reverse_input_rate,
259 int reverse_output_rate, 259 int reverse_output_rate,
260 int num_input_channels, 260 size_t num_input_channels,
261 int num_output_channels, 261 size_t num_output_channels,
262 int num_reverse_input_channels, 262 size_t num_reverse_input_channels,
263 int num_reverse_output_channels, 263 size_t num_reverse_output_channels,
264 StreamDirection file_direction) { 264 StreamDirection file_direction) {
265 std::ostringstream ss; 265 std::ostringstream ss;
266 ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir" 266 ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
267 << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_"; 267 << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_";
268 if (num_output_channels == 1) { 268 if (num_output_channels == 1) {
269 ss << "mono"; 269 ss << "mono";
270 } else if (num_output_channels == 2) { 270 } else if (num_output_channels == 2) {
271 ss << "stereo"; 271 ss << "stereo";
272 } else { 272 } else {
273 assert(false); 273 assert(false);
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
348 348
349 // Used to select between int and float interface tests. 349 // Used to select between int and float interface tests.
350 enum Format { 350 enum Format {
351 kIntFormat, 351 kIntFormat,
352 kFloatFormat 352 kFloatFormat
353 }; 353 };
354 354
355 void Init(int sample_rate_hz, 355 void Init(int sample_rate_hz,
356 int output_sample_rate_hz, 356 int output_sample_rate_hz,
357 int reverse_sample_rate_hz, 357 int reverse_sample_rate_hz,
358 int num_input_channels, 358 size_t num_input_channels,
359 int num_output_channels, 359 size_t num_output_channels,
360 int num_reverse_channels, 360 size_t num_reverse_channels,
361 bool open_output_file); 361 bool open_output_file);
362 void Init(AudioProcessing* ap); 362 void Init(AudioProcessing* ap);
363 void EnableAllComponents(); 363 void EnableAllComponents();
364 bool ReadFrame(FILE* file, AudioFrame* frame); 364 bool ReadFrame(FILE* file, AudioFrame* frame);
365 bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb); 365 bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb);
366 void ReadFrameWithRewind(FILE* file, AudioFrame* frame); 366 void ReadFrameWithRewind(FILE* file, AudioFrame* frame);
367 void ReadFrameWithRewind(FILE* file, AudioFrame* frame, 367 void ReadFrameWithRewind(FILE* file, AudioFrame* frame,
368 ChannelBuffer<float>* cb); 368 ChannelBuffer<float>* cb);
369 void ProcessWithDefaultStreamParameters(AudioFrame* frame); 369 void ProcessWithDefaultStreamParameters(AudioFrame* frame);
370 void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, 370 void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
371 int delay_min, int delay_max); 371 int delay_min, int delay_max);
372 void TestChangingChannelsInt16Interface( 372 void TestChangingChannelsInt16Interface(
373 int num_channels, 373 size_t num_channels,
374 AudioProcessing::Error expected_return); 374 AudioProcessing::Error expected_return);
375 void TestChangingForwardChannels(int num_in_channels, 375 void TestChangingForwardChannels(size_t num_in_channels,
376 int num_out_channels, 376 size_t num_out_channels,
377 AudioProcessing::Error expected_return); 377 AudioProcessing::Error expected_return);
378 void TestChangingReverseChannels(int num_rev_channels, 378 void TestChangingReverseChannels(size_t num_rev_channels,
379 AudioProcessing::Error expected_return); 379 AudioProcessing::Error expected_return);
380 void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate); 380 void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
381 void RunManualVolumeChangeIsPossibleTest(int sample_rate); 381 void RunManualVolumeChangeIsPossibleTest(int sample_rate);
382 void StreamParametersTest(Format format); 382 void StreamParametersTest(Format format);
383 int ProcessStreamChooser(Format format); 383 int ProcessStreamChooser(Format format);
384 int AnalyzeReverseStreamChooser(Format format); 384 int AnalyzeReverseStreamChooser(Format format);
385 void ProcessDebugDump(const std::string& in_filename, 385 void ProcessDebugDump(const std::string& in_filename,
386 const std::string& out_filename, 386 const std::string& out_filename,
387 Format format); 387 Format format);
388 void VerifyDebugDumpTest(Format format); 388 void VerifyDebugDumpTest(Format format);
389 389
390 const std::string output_path_; 390 const std::string output_path_;
391 const std::string ref_path_; 391 const std::string ref_path_;
392 const std::string ref_filename_; 392 const std::string ref_filename_;
393 rtc::scoped_ptr<AudioProcessing> apm_; 393 rtc::scoped_ptr<AudioProcessing> apm_;
394 AudioFrame* frame_; 394 AudioFrame* frame_;
395 AudioFrame* revframe_; 395 AudioFrame* revframe_;
396 rtc::scoped_ptr<ChannelBuffer<float> > float_cb_; 396 rtc::scoped_ptr<ChannelBuffer<float> > float_cb_;
397 rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_; 397 rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
398 int output_sample_rate_hz_; 398 int output_sample_rate_hz_;
399 int num_output_channels_; 399 size_t num_output_channels_;
400 FILE* far_file_; 400 FILE* far_file_;
401 FILE* near_file_; 401 FILE* near_file_;
402 FILE* out_file_; 402 FILE* out_file_;
403 }; 403 };
404 404
405 ApmTest::ApmTest() 405 ApmTest::ApmTest()
406 : output_path_(test::OutputPath()), 406 : output_path_(test::OutputPath()),
407 ref_path_(test::ProjectRootPath() + "data/audio_processing/"), 407 ref_path_(test::ProjectRootPath() + "data/audio_processing/"),
408 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) 408 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
409 ref_filename_(ref_path_ + "output_data_fixed.pb"), 409 ref_filename_(ref_path_ + "output_data_fixed.pb"),
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
473 ap->Initialize( 473 ap->Initialize(
474 {{{frame_->sample_rate_hz_, frame_->num_channels_}, 474 {{{frame_->sample_rate_hz_, frame_->num_channels_},
475 {output_sample_rate_hz_, num_output_channels_}, 475 {output_sample_rate_hz_, num_output_channels_},
476 {revframe_->sample_rate_hz_, revframe_->num_channels_}, 476 {revframe_->sample_rate_hz_, revframe_->num_channels_},
477 {revframe_->sample_rate_hz_, revframe_->num_channels_}}})); 477 {revframe_->sample_rate_hz_, revframe_->num_channels_}}}));
478 } 478 }
479 479
480 void ApmTest::Init(int sample_rate_hz, 480 void ApmTest::Init(int sample_rate_hz,
481 int output_sample_rate_hz, 481 int output_sample_rate_hz,
482 int reverse_sample_rate_hz, 482 int reverse_sample_rate_hz,
483 int num_input_channels, 483 size_t num_input_channels,
484 int num_output_channels, 484 size_t num_output_channels,
485 int num_reverse_channels, 485 size_t num_reverse_channels,
486 bool open_output_file) { 486 bool open_output_file) {
487 SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_); 487 SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
488 output_sample_rate_hz_ = output_sample_rate_hz; 488 output_sample_rate_hz_ = output_sample_rate_hz;
489 num_output_channels_ = num_output_channels; 489 num_output_channels_ = num_output_channels;
490 490
491 SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_, 491 SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_,
492 &revfloat_cb_); 492 &revfloat_cb_);
493 Init(apm_.get()); 493 Init(apm_.get());
494 494
495 if (far_file_) { 495 if (far_file_) {
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after
807 // Low limit of 0 ms. 807 // Low limit of 0 ms.
808 apm_->set_delay_offset_ms(-50); 808 apm_->set_delay_offset_ms(-50);
809 EXPECT_EQ(-50, apm_->delay_offset_ms()); 809 EXPECT_EQ(-50, apm_->delay_offset_ms());
810 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20)); 810 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20));
811 EXPECT_EQ(0, apm_->stream_delay_ms()); 811 EXPECT_EQ(0, apm_->stream_delay_ms());
812 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100)); 812 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
813 EXPECT_EQ(50, apm_->stream_delay_ms()); 813 EXPECT_EQ(50, apm_->stream_delay_ms());
814 } 814 }
815 815
816 void ApmTest::TestChangingChannelsInt16Interface( 816 void ApmTest::TestChangingChannelsInt16Interface(
817 int num_channels, 817 size_t num_channels,
818 AudioProcessing::Error expected_return) { 818 AudioProcessing::Error expected_return) {
819 frame_->num_channels_ = num_channels; 819 frame_->num_channels_ = num_channels;
820 EXPECT_EQ(expected_return, apm_->ProcessStream(frame_)); 820 EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
821 EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_)); 821 EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_));
822 } 822 }
823 823
824 void ApmTest::TestChangingForwardChannels( 824 void ApmTest::TestChangingForwardChannels(
825 int num_in_channels, 825 size_t num_in_channels,
826 int num_out_channels, 826 size_t num_out_channels,
827 AudioProcessing::Error expected_return) { 827 AudioProcessing::Error expected_return) {
828 const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels}; 828 const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels};
829 const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels}; 829 const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
830 830
831 EXPECT_EQ(expected_return, 831 EXPECT_EQ(expected_return,
832 apm_->ProcessStream(float_cb_->channels(), input_stream, 832 apm_->ProcessStream(float_cb_->channels(), input_stream,
833 output_stream, float_cb_->channels())); 833 output_stream, float_cb_->channels()));
834 } 834 }
835 835
836 void ApmTest::TestChangingReverseChannels( 836 void ApmTest::TestChangingReverseChannels(
837 int num_rev_channels, 837 size_t num_rev_channels,
838 AudioProcessing::Error expected_return) { 838 AudioProcessing::Error expected_return) {
839 const ProcessingConfig processing_config = { 839 const ProcessingConfig processing_config = {
840 {{frame_->sample_rate_hz_, apm_->num_input_channels()}, 840 {{frame_->sample_rate_hz_, apm_->num_input_channels()},
841 {output_sample_rate_hz_, apm_->num_output_channels()}, 841 {output_sample_rate_hz_, apm_->num_output_channels()},
842 {frame_->sample_rate_hz_, num_rev_channels}, 842 {frame_->sample_rate_hz_, num_rev_channels},
843 {frame_->sample_rate_hz_, num_rev_channels}}}; 843 {frame_->sample_rate_hz_, num_rev_channels}}};
844 844
845 EXPECT_EQ( 845 EXPECT_EQ(
846 expected_return, 846 expected_return,
847 apm_->ProcessReverseStream( 847 apm_->ProcessReverseStream(
848 float_cb_->channels(), processing_config.reverse_input_stream(), 848 float_cb_->channels(), processing_config.reverse_input_stream(),
849 processing_config.reverse_output_stream(), float_cb_->channels())); 849 processing_config.reverse_output_stream(), float_cb_->channels()));
850 } 850 }
851 851
852 TEST_F(ApmTest, ChannelsInt16Interface) { 852 TEST_F(ApmTest, ChannelsInt16Interface) {
853 // Testing number of invalid and valid channels. 853 // Testing number of invalid and valid channels.
854 Init(16000, 16000, 16000, 4, 4, 4, false); 854 Init(16000, 16000, 16000, 4, 4, 4, false);
855 855
856 TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError); 856 TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
857 857
858 for (int i = 1; i < 4; i++) { 858 for (size_t i = 1; i < 4; i++) {
859 TestChangingChannelsInt16Interface(i, kNoErr); 859 TestChangingChannelsInt16Interface(i, kNoErr);
860 EXPECT_EQ(i, apm_->num_input_channels()); 860 EXPECT_EQ(i, apm_->num_input_channels());
861 // We always force the number of reverse channels used for processing to 1. 861 // We always force the number of reverse channels used for processing to 1.
862 EXPECT_EQ(1, apm_->num_reverse_channels()); 862 EXPECT_EQ(1u, apm_->num_reverse_channels());
863 } 863 }
864 } 864 }
865 865
866 TEST_F(ApmTest, Channels) { 866 TEST_F(ApmTest, Channels) {
867 // Testing number of invalid and valid channels. 867 // Testing number of invalid and valid channels.
868 Init(16000, 16000, 16000, 4, 4, 4, false); 868 Init(16000, 16000, 16000, 4, 4, 4, false);
869 869
870 TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError); 870 TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
871 TestChangingReverseChannels(0, apm_->kBadNumberChannelsError); 871 TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
872 872
873 for (int i = 1; i < 4; ++i) { 873 for (size_t i = 1; i < 4; ++i) {
874 for (int j = 0; j < 1; ++j) { 874 for (size_t j = 0; j < 1; ++j) {
875 // Output channels much be one or match input channels. 875 // Output channels much be one or match input channels.
876 if (j == 1 || i == j) { 876 if (j == 1 || i == j) {
877 TestChangingForwardChannels(i, j, kNoErr); 877 TestChangingForwardChannels(i, j, kNoErr);
878 TestChangingReverseChannels(i, kNoErr); 878 TestChangingReverseChannels(i, kNoErr);
879 879
880 EXPECT_EQ(i, apm_->num_input_channels()); 880 EXPECT_EQ(i, apm_->num_input_channels());
881 EXPECT_EQ(j, apm_->num_output_channels()); 881 EXPECT_EQ(j, apm_->num_output_channels());
882 // The number of reverse channels used for processing to is always 1. 882 // The number of reverse channels used for processing to is always 1.
883 EXPECT_EQ(1, apm_->num_reverse_channels()); 883 EXPECT_EQ(1u, apm_->num_reverse_channels());
884 } else { 884 } else {
885 TestChangingForwardChannels(i, j, 885 TestChangingForwardChannels(i, j,
886 AudioProcessing::kBadNumberChannelsError); 886 AudioProcessing::kBadNumberChannelsError);
887 } 887 }
888 } 888 }
889 } 889 }
890 } 890 }
891 891
892 TEST_F(ApmTest, SampleRatesInt) { 892 TEST_F(ApmTest, SampleRatesInt) {
893 // Testing invalid sample rates 893 // Testing invalid sample rates
(...skipping 393 matching lines...) Expand 10 before | Expand all | Expand 10 after
1287 for (size_t i = 0; i < arraysize(kSampleRates); ++i) { 1287 for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
1288 RunManualVolumeChangeIsPossibleTest(kSampleRates[i]); 1288 RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
1289 } 1289 }
1290 } 1290 }
1291 1291
1292 #if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) 1292 #if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
1293 TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) { 1293 TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
1294 const int kSampleRateHz = 16000; 1294 const int kSampleRateHz = 16000;
1295 const size_t kSamplesPerChannel = 1295 const size_t kSamplesPerChannel =
1296 static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000); 1296 static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
1297 const int kNumInputChannels = 2; 1297 const size_t kNumInputChannels = 2;
1298 const int kNumOutputChannels = 1; 1298 const size_t kNumOutputChannels = 1;
1299 const size_t kNumChunks = 700; 1299 const size_t kNumChunks = 700;
1300 const float kScaleFactor = 0.25f; 1300 const float kScaleFactor = 0.25f;
1301 Config config; 1301 Config config;
1302 std::vector<webrtc::Point> geometry; 1302 std::vector<webrtc::Point> geometry;
1303 geometry.push_back(webrtc::Point(0.f, 0.f, 0.f)); 1303 geometry.push_back(webrtc::Point(0.f, 0.f, 0.f));
1304 geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f)); 1304 geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f));
1305 config.Set<Beamforming>(new Beamforming(true, geometry)); 1305 config.Set<Beamforming>(new Beamforming(true, geometry));
1306 testing::NiceMock<MockNonlinearBeamformer>* beamformer = 1306 testing::NiceMock<MockNonlinearBeamformer>* beamformer =
1307 new testing::NiceMock<MockNonlinearBeamformer>(geometry); 1307 new testing::NiceMock<MockNonlinearBeamformer>(geometry);
1308 rtc::scoped_ptr<AudioProcessing> apm( 1308 rtc::scoped_ptr<AudioProcessing> apm(
(...skipping 12 matching lines...) Expand all
1321 const int kDefaultCompressionGain = 1321 const int kDefaultCompressionGain =
1322 apm->gain_control()->compression_gain_db(); 1322 apm->gain_control()->compression_gain_db();
1323 bool is_target = false; 1323 bool is_target = false;
1324 EXPECT_CALL(*beamformer, is_target_present()) 1324 EXPECT_CALL(*beamformer, is_target_present())
1325 .WillRepeatedly(testing::ReturnPointee(&is_target)); 1325 .WillRepeatedly(testing::ReturnPointee(&is_target));
1326 for (size_t i = 0; i < kNumChunks; ++i) { 1326 for (size_t i = 0; i < kNumChunks; ++i) {
1327 ASSERT_TRUE(ReadChunk(far_file, 1327 ASSERT_TRUE(ReadChunk(far_file,
1328 int_data.get(), 1328 int_data.get(),
1329 float_data.get(), 1329 float_data.get(),
1330 &src_buf)); 1330 &src_buf));
1331 for (int j = 0; j < kNumInputChannels; ++j) { 1331 for (size_t j = 0; j < kNumInputChannels; ++j) {
1332 for (size_t k = 0; k < kSamplesPerChannel; ++k) { 1332 for (size_t k = 0; k < kSamplesPerChannel; ++k) {
1333 src_buf.channels()[j][k] *= kScaleFactor; 1333 src_buf.channels()[j][k] *= kScaleFactor;
1334 } 1334 }
1335 } 1335 }
1336 EXPECT_EQ(kNoErr, 1336 EXPECT_EQ(kNoErr,
1337 apm->ProcessStream(src_buf.channels(), 1337 apm->ProcessStream(src_buf.channels(),
1338 src_buf.num_frames(), 1338 src_buf.num_frames(),
1339 kSampleRateHz, 1339 kSampleRateHz,
1340 LayoutFromChannels(src_buf.num_channels()), 1340 LayoutFromChannels(src_buf.num_channels()),
1341 kSampleRateHz, 1341 kSampleRateHz,
1342 LayoutFromChannels(dest_buf.num_channels()), 1342 LayoutFromChannels(dest_buf.num_channels()),
1343 dest_buf.channels())); 1343 dest_buf.channels()));
1344 } 1344 }
1345 EXPECT_EQ(kDefaultVolume, 1345 EXPECT_EQ(kDefaultVolume,
1346 apm->gain_control()->stream_analog_level()); 1346 apm->gain_control()->stream_analog_level());
1347 EXPECT_EQ(kDefaultCompressionGain, 1347 EXPECT_EQ(kDefaultCompressionGain,
1348 apm->gain_control()->compression_gain_db()); 1348 apm->gain_control()->compression_gain_db());
1349 rewind(far_file); 1349 rewind(far_file);
1350 is_target = true; 1350 is_target = true;
1351 for (size_t i = 0; i < kNumChunks; ++i) { 1351 for (size_t i = 0; i < kNumChunks; ++i) {
1352 ASSERT_TRUE(ReadChunk(far_file, 1352 ASSERT_TRUE(ReadChunk(far_file,
1353 int_data.get(), 1353 int_data.get(),
1354 float_data.get(), 1354 float_data.get(),
1355 &src_buf)); 1355 &src_buf));
1356 for (int j = 0; j < kNumInputChannels; ++j) { 1356 for (size_t j = 0; j < kNumInputChannels; ++j) {
1357 for (size_t k = 0; k < kSamplesPerChannel; ++k) { 1357 for (size_t k = 0; k < kSamplesPerChannel; ++k) {
1358 src_buf.channels()[j][k] *= kScaleFactor; 1358 src_buf.channels()[j][k] *= kScaleFactor;
1359 } 1359 }
1360 } 1360 }
1361 EXPECT_EQ(kNoErr, 1361 EXPECT_EQ(kNoErr,
1362 apm->ProcessStream(src_buf.channels(), 1362 apm->ProcessStream(src_buf.channels(),
1363 src_buf.num_frames(), 1363 src_buf.num_frames(),
1364 kSampleRateHz, 1364 kSampleRateHz,
1365 LayoutFromChannels(src_buf.num_channels()), 1365 LayoutFromChannels(src_buf.num_channels()),
1366 kSampleRateHz, 1366 kSampleRateHz,
(...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after
1736 // StartDebugRecording() writes an additional init message. Don't start 1736 // StartDebugRecording() writes an additional init message. Don't start
1737 // recording until after the first init to avoid the extra message. 1737 // recording until after the first init to avoid the extra message.
1738 EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str())); 1738 EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str()));
1739 first_init = false; 1739 first_init = false;
1740 } 1740 }
1741 1741
1742 } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) { 1742 } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) {
1743 const audioproc::ReverseStream msg = event_msg.reverse_stream(); 1743 const audioproc::ReverseStream msg = event_msg.reverse_stream();
1744 1744
1745 if (msg.channel_size() > 0) { 1745 if (msg.channel_size() > 0) {
1746 ASSERT_EQ(revframe_->num_channels_, msg.channel_size()); 1746 ASSERT_EQ(revframe_->num_channels_,
1747 static_cast<size_t>(msg.channel_size()));
1747 for (int i = 0; i < msg.channel_size(); ++i) { 1748 for (int i = 0; i < msg.channel_size(); ++i) {
1748 memcpy(revfloat_cb_->channels()[i], 1749 memcpy(revfloat_cb_->channels()[i],
1749 msg.channel(i).data(), 1750 msg.channel(i).data(),
1750 msg.channel(i).size()); 1751 msg.channel(i).size());
1751 } 1752 }
1752 } else { 1753 } else {
1753 memcpy(revframe_->data_, msg.data().data(), msg.data().size()); 1754 memcpy(revframe_->data_, msg.data().data(), msg.data().size());
1754 if (format == kFloatFormat) { 1755 if (format == kFloatFormat) {
1755 // We're using an int16 input file; convert to float. 1756 // We're using an int16 input file; convert to float.
1756 ConvertToFloat(*revframe_, revfloat_cb_.get()); 1757 ConvertToFloat(*revframe_, revfloat_cb_.get());
1757 } 1758 }
1758 } 1759 }
1759 AnalyzeReverseStreamChooser(format); 1760 AnalyzeReverseStreamChooser(format);
1760 1761
1761 } else if (event_msg.type() == audioproc::Event::STREAM) { 1762 } else if (event_msg.type() == audioproc::Event::STREAM) {
1762 const audioproc::Stream msg = event_msg.stream(); 1763 const audioproc::Stream msg = event_msg.stream();
1763 // ProcessStream could have changed this for the output frame. 1764 // ProcessStream could have changed this for the output frame.
1764 frame_->num_channels_ = apm_->num_input_channels(); 1765 frame_->num_channels_ = apm_->num_input_channels();
1765 1766
1766 EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level())); 1767 EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level()));
1767 EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay())); 1768 EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
1768 apm_->echo_cancellation()->set_stream_drift_samples(msg.drift()); 1769 apm_->echo_cancellation()->set_stream_drift_samples(msg.drift());
1769 if (msg.has_keypress()) { 1770 if (msg.has_keypress()) {
1770 apm_->set_stream_key_pressed(msg.keypress()); 1771 apm_->set_stream_key_pressed(msg.keypress());
1771 } else { 1772 } else {
1772 apm_->set_stream_key_pressed(true); 1773 apm_->set_stream_key_pressed(true);
1773 } 1774 }
1774 1775
1775 if (msg.input_channel_size() > 0) { 1776 if (msg.input_channel_size() > 0) {
1776 ASSERT_EQ(frame_->num_channels_, msg.input_channel_size()); 1777 ASSERT_EQ(frame_->num_channels_,
1778 static_cast<size_t>(msg.input_channel_size()));
1777 for (int i = 0; i < msg.input_channel_size(); ++i) { 1779 for (int i = 0; i < msg.input_channel_size(); ++i) {
1778 memcpy(float_cb_->channels()[i], 1780 memcpy(float_cb_->channels()[i],
1779 msg.input_channel(i).data(), 1781 msg.input_channel(i).data(),
1780 msg.input_channel(i).size()); 1782 msg.input_channel(i).size());
1781 } 1783 }
1782 } else { 1784 } else {
1783 memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size()); 1785 memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size());
1784 if (format == kFloatFormat) { 1786 if (format == kFloatFormat) {
1785 // We're using an int16 input file; convert to float. 1787 // We're using an int16 input file; convert to float.
1786 ConvertToFloat(*frame_, float_cb_.get()); 1788 ConvertToFloat(*frame_, float_cb_.get());
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
1924 EnableAllComponents(); 1926 EnableAllComponents();
1925 EnableAllAPComponents(fapm.get()); 1927 EnableAllAPComponents(fapm.get());
1926 for (int i = 0; i < ref_data.test_size(); i++) { 1928 for (int i = 0; i < ref_data.test_size(); i++) {
1927 printf("Running test %d of %d...\n", i + 1, ref_data.test_size()); 1929 printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
1928 1930
1929 audioproc::Test* test = ref_data.mutable_test(i); 1931 audioproc::Test* test = ref_data.mutable_test(i);
1930 // TODO(ajm): Restore downmixing test cases. 1932 // TODO(ajm): Restore downmixing test cases.
1931 if (test->num_input_channels() != test->num_output_channels()) 1933 if (test->num_input_channels() != test->num_output_channels())
1932 continue; 1934 continue;
1933 1935
1934 const int num_render_channels = test->num_reverse_channels(); 1936 const size_t num_render_channels =
1935 const int num_input_channels = test->num_input_channels(); 1937 static_cast<size_t>(test->num_reverse_channels());
1936 const int num_output_channels = test->num_output_channels(); 1938 const size_t num_input_channels =
1939 static_cast<size_t>(test->num_input_channels());
1940 const size_t num_output_channels =
1941 static_cast<size_t>(test->num_output_channels());
1937 const size_t samples_per_channel = static_cast<size_t>( 1942 const size_t samples_per_channel = static_cast<size_t>(
1938 test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000); 1943 test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
1939 1944
1940 Init(test->sample_rate(), test->sample_rate(), test->sample_rate(), 1945 Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
1941 num_input_channels, num_output_channels, num_render_channels, true); 1946 num_input_channels, num_output_channels, num_render_channels, true);
1942 Init(fapm.get()); 1947 Init(fapm.get());
1943 1948
1944 ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels); 1949 ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
1945 ChannelBuffer<int16_t> output_int16(samples_per_channel, 1950 ChannelBuffer<int16_t> output_int16(samples_per_channel,
1946 num_input_channels); 1951 num_input_channels);
(...skipping 22 matching lines...) Expand all
1969 output_int16.channels()); 1974 output_int16.channels());
1970 1975
1971 EXPECT_NOERR(fapm->ProcessStream( 1976 EXPECT_NOERR(fapm->ProcessStream(
1972 float_cb_->channels(), 1977 float_cb_->channels(),
1973 samples_per_channel, 1978 samples_per_channel,
1974 test->sample_rate(), 1979 test->sample_rate(),
1975 LayoutFromChannels(num_input_channels), 1980 LayoutFromChannels(num_input_channels),
1976 test->sample_rate(), 1981 test->sample_rate(),
1977 LayoutFromChannels(num_output_channels), 1982 LayoutFromChannels(num_output_channels),
1978 float_cb_->channels())); 1983 float_cb_->channels()));
1979 for (int j = 0; j < num_output_channels; ++j) { 1984 for (size_t j = 0; j < num_output_channels; ++j) {
1980 FloatToS16(float_cb_->channels()[j], 1985 FloatToS16(float_cb_->channels()[j],
1981 samples_per_channel, 1986 samples_per_channel,
1982 output_cb.channels()[j]); 1987 output_cb.channels()[j]);
1983 float variance = 0; 1988 float variance = 0;
1984 float snr = ComputeSNR(output_int16.channels()[j], 1989 float snr = ComputeSNR(output_int16.channels()[j],
1985 output_cb.channels()[j], 1990 output_cb.channels()[j],
1986 samples_per_channel, &variance); 1991 samples_per_channel, &variance);
1987 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) 1992 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
1988 // There are a few chunks in the fixed-point profile that give low SNR. 1993 // There are a few chunks in the fixed-point profile that give low SNR.
1989 // Listening confirmed the difference is acceptable. 1994 // Listening confirmed the difference is acceptable.
(...skipping 12 matching lines...) Expand all
2002 analog_level = fapm->gain_control()->stream_analog_level(); 2007 analog_level = fapm->gain_control()->stream_analog_level();
2003 EXPECT_EQ(apm_->gain_control()->stream_analog_level(), 2008 EXPECT_EQ(apm_->gain_control()->stream_analog_level(),
2004 fapm->gain_control()->stream_analog_level()); 2009 fapm->gain_control()->stream_analog_level());
2005 EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(), 2010 EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(),
2006 fapm->echo_cancellation()->stream_has_echo()); 2011 fapm->echo_cancellation()->stream_has_echo());
2007 EXPECT_NEAR(apm_->noise_suppression()->speech_probability(), 2012 EXPECT_NEAR(apm_->noise_suppression()->speech_probability(),
2008 fapm->noise_suppression()->speech_probability(), 2013 fapm->noise_suppression()->speech_probability(),
2009 0.01); 2014 0.01);
2010 2015
2011 // Reset in case of downmixing. 2016 // Reset in case of downmixing.
2012 frame_->num_channels_ = test->num_input_channels(); 2017 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
2013 } 2018 }
2014 rewind(far_file_); 2019 rewind(far_file_);
2015 rewind(near_file_); 2020 rewind(near_file_);
2016 } 2021 }
2017 } 2022 }
2018 2023
2019 // TODO(andrew): Add a test to process a few frames with different combinations 2024 // TODO(andrew): Add a test to process a few frames with different combinations
2020 // of enabled components. 2025 // of enabled components.
2021 2026
2022 TEST_F(ApmTest, Process) { 2027 TEST_F(ApmTest, Process) {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2063 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 2068 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
2064 config.Set<ExtendedFilter>( 2069 config.Set<ExtendedFilter>(
2065 new ExtendedFilter(test->use_aec_extended_filter())); 2070 new ExtendedFilter(test->use_aec_extended_filter()));
2066 apm_.reset(AudioProcessing::Create(config)); 2071 apm_.reset(AudioProcessing::Create(config));
2067 2072
2068 EnableAllComponents(); 2073 EnableAllComponents();
2069 2074
2070 Init(test->sample_rate(), 2075 Init(test->sample_rate(),
2071 test->sample_rate(), 2076 test->sample_rate(),
2072 test->sample_rate(), 2077 test->sample_rate(),
2073 test->num_input_channels(), 2078 static_cast<size_t>(test->num_input_channels()),
2074 test->num_output_channels(), 2079 static_cast<size_t>(test->num_output_channels()),
2075 test->num_reverse_channels(), 2080 static_cast<size_t>(test->num_reverse_channels()),
2076 true); 2081 true);
2077 2082
2078 int frame_count = 0; 2083 int frame_count = 0;
2079 int has_echo_count = 0; 2084 int has_echo_count = 0;
2080 int has_voice_count = 0; 2085 int has_voice_count = 0;
2081 int is_saturated_count = 0; 2086 int is_saturated_count = 0;
2082 int analog_level = 127; 2087 int analog_level = 127;
2083 int analog_level_average = 0; 2088 int analog_level_average = 0;
2084 int max_output_average = 0; 2089 int max_output_average = 0;
2085 float ns_speech_prob_average = 0.0f; 2090 float ns_speech_prob_average = 0.0f;
2086 2091
2087 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { 2092 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
2088 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_)); 2093 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
2089 2094
2090 frame_->vad_activity_ = AudioFrame::kVadUnknown; 2095 frame_->vad_activity_ = AudioFrame::kVadUnknown;
2091 2096
2092 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0)); 2097 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
2093 apm_->echo_cancellation()->set_stream_drift_samples(0); 2098 apm_->echo_cancellation()->set_stream_drift_samples(0);
2094 EXPECT_EQ(apm_->kNoError, 2099 EXPECT_EQ(apm_->kNoError,
2095 apm_->gain_control()->set_stream_analog_level(analog_level)); 2100 apm_->gain_control()->set_stream_analog_level(analog_level));
2096 2101
2097 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_)); 2102 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
2098 2103
2099 // Ensure the frame was downmixed properly. 2104 // Ensure the frame was downmixed properly.
2100 EXPECT_EQ(test->num_output_channels(), frame_->num_channels_); 2105 EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
2106 frame_->num_channels_);
2101 2107
2102 max_output_average += MaxAudioFrame(*frame_); 2108 max_output_average += MaxAudioFrame(*frame_);
2103 2109
2104 if (apm_->echo_cancellation()->stream_has_echo()) { 2110 if (apm_->echo_cancellation()->stream_has_echo()) {
2105 has_echo_count++; 2111 has_echo_count++;
2106 } 2112 }
2107 2113
2108 analog_level = apm_->gain_control()->stream_analog_level(); 2114 analog_level = apm_->gain_control()->stream_analog_level();
2109 analog_level_average += analog_level; 2115 analog_level_average += analog_level;
2110 if (apm_->gain_control()->stream_is_saturated()) { 2116 if (apm_->gain_control()->stream_is_saturated()) {
2111 is_saturated_count++; 2117 is_saturated_count++;
2112 } 2118 }
2113 if (apm_->voice_detection()->stream_has_voice()) { 2119 if (apm_->voice_detection()->stream_has_voice()) {
2114 has_voice_count++; 2120 has_voice_count++;
2115 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_); 2121 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
2116 } else { 2122 } else {
2117 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_); 2123 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
2118 } 2124 }
2119 2125
2120 ns_speech_prob_average += apm_->noise_suppression()->speech_probability(); 2126 ns_speech_prob_average += apm_->noise_suppression()->speech_probability();
2121 2127
2122 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_; 2128 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_;
2123 size_t write_count = fwrite(frame_->data_, 2129 size_t write_count = fwrite(frame_->data_,
2124 sizeof(int16_t), 2130 sizeof(int16_t),
2125 frame_size, 2131 frame_size,
2126 out_file_); 2132 out_file_);
2127 ASSERT_EQ(frame_size, write_count); 2133 ASSERT_EQ(frame_size, write_count);
2128 2134
2129 // Reset in case of downmixing. 2135 // Reset in case of downmixing.
2130 frame_->num_channels_ = test->num_input_channels(); 2136 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
2131 frame_count++; 2137 frame_count++;
2132 } 2138 }
2133 max_output_average /= frame_count; 2139 max_output_average /= frame_count;
2134 analog_level_average /= frame_count; 2140 analog_level_average /= frame_count;
2135 ns_speech_prob_average /= frame_count; 2141 ns_speech_prob_average /= frame_count;
2136 2142
2137 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) 2143 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
2138 EchoCancellation::Metrics echo_metrics; 2144 EchoCancellation::Metrics echo_metrics;
2139 EXPECT_EQ(apm_->kNoError, 2145 EXPECT_EQ(apm_->kNoError,
2140 apm_->echo_cancellation()->GetMetrics(&echo_metrics)); 2146 apm_->echo_cancellation()->GetMetrics(&echo_metrics));
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
2344 reverse_input_rate_(std::tr1::get<2>(GetParam())), 2350 reverse_input_rate_(std::tr1::get<2>(GetParam())),
2345 reverse_output_rate_(std::tr1::get<3>(GetParam())), 2351 reverse_output_rate_(std::tr1::get<3>(GetParam())),
2346 expected_snr_(std::tr1::get<4>(GetParam())), 2352 expected_snr_(std::tr1::get<4>(GetParam())),
2347 expected_reverse_snr_(std::tr1::get<5>(GetParam())) {} 2353 expected_reverse_snr_(std::tr1::get<5>(GetParam())) {}
2348 2354
2349 virtual ~AudioProcessingTest() {} 2355 virtual ~AudioProcessingTest() {}
2350 2356
2351 static void SetUpTestCase() { 2357 static void SetUpTestCase() {
2352 // Create all needed output reference files. 2358 // Create all needed output reference files.
2353 const int kNativeRates[] = {8000, 16000, 32000, 48000}; 2359 const int kNativeRates[] = {8000, 16000, 32000, 48000};
2354 const int kNumChannels[] = {1, 2}; 2360 const size_t kNumChannels[] = {1, 2};
2355 for (size_t i = 0; i < arraysize(kNativeRates); ++i) { 2361 for (size_t i = 0; i < arraysize(kNativeRates); ++i) {
2356 for (size_t j = 0; j < arraysize(kNumChannels); ++j) { 2362 for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
2357 for (size_t k = 0; k < arraysize(kNumChannels); ++k) { 2363 for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
2358 // The reference files always have matching input and output channels. 2364 // The reference files always have matching input and output channels.
2359 ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i], 2365 ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i],
2360 kNativeRates[i], kNumChannels[j], kNumChannels[j], 2366 kNativeRates[i], kNumChannels[j], kNumChannels[j],
2361 kNumChannels[k], kNumChannels[k], "ref"); 2367 kNumChannels[k], kNumChannels[k], "ref");
2362 } 2368 }
2363 } 2369 }
2364 } 2370 }
2365 } 2371 }
2366 2372
2367 static void TearDownTestCase() { 2373 static void TearDownTestCase() {
2368 ClearTempFiles(); 2374 ClearTempFiles();
2369 } 2375 }
2370 2376
2371 // Runs a process pass on files with the given parameters and dumps the output 2377 // Runs a process pass on files with the given parameters and dumps the output
2372 // to a file specified with |output_file_prefix|. Both forward and reverse 2378 // to a file specified with |output_file_prefix|. Both forward and reverse
2373 // output streams are dumped. 2379 // output streams are dumped.
2374 static void ProcessFormat(int input_rate, 2380 static void ProcessFormat(int input_rate,
2375 int output_rate, 2381 int output_rate,
2376 int reverse_input_rate, 2382 int reverse_input_rate,
2377 int reverse_output_rate, 2383 int reverse_output_rate,
2378 int num_input_channels, 2384 size_t num_input_channels,
2379 int num_output_channels, 2385 size_t num_output_channels,
2380 int num_reverse_input_channels, 2386 size_t num_reverse_input_channels,
2381 int num_reverse_output_channels, 2387 size_t num_reverse_output_channels,
2382 std::string output_file_prefix) { 2388 std::string output_file_prefix) {
2383 Config config; 2389 Config config;
2384 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 2390 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
2385 rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config)); 2391 rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config));
2386 EnableAllAPComponents(ap.get()); 2392 EnableAllAPComponents(ap.get());
2387 2393
2388 ProcessingConfig processing_config = { 2394 ProcessingConfig processing_config = {
2389 {{input_rate, num_input_channels}, 2395 {{input_rate, num_input_channels},
2390 {output_rate, num_output_channels}, 2396 {output_rate, num_output_channels},
2391 {reverse_input_rate, num_reverse_input_channels}, 2397 {reverse_input_rate, num_reverse_input_channels},
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after
2735 std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35), 2741 std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35),
2736 std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0), 2742 std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0),
2737 std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20), 2743 std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20),
2738 std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20), 2744 std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20),
2739 std::tr1::make_tuple(16000, 16000, 32000, 16000, 40, 20), 2745 std::tr1::make_tuple(16000, 16000, 32000, 16000, 40, 20),
2740 std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0))); 2746 std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
2741 #endif 2747 #endif
2742 2748
2743 } // namespace 2749 } // namespace
2744 } // namespace webrtc 2750 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698