Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(256)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_unittest.cc

Issue 2750783004: Add mute state field to AudioFrame. (Closed)
Patch Set: Update new usages of AudioFrame::data_ Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 cb->num_channels(), 80 cb->num_channels(),
81 cb_int.channels()); 81 cb_int.channels());
82 for (size_t i = 0; i < cb->num_channels(); ++i) { 82 for (size_t i = 0; i < cb->num_channels(); ++i) {
83 S16ToFloat(cb_int.channels()[i], 83 S16ToFloat(cb_int.channels()[i],
84 cb->num_frames(), 84 cb->num_frames(),
85 cb->channels()[i]); 85 cb->channels()[i]);
86 } 86 }
87 } 87 }
88 88
89 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) { 89 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
90 ConvertToFloat(frame.data_, cb); 90 ConvertToFloat(frame.data(), cb);
91 } 91 }
92 92
93 // Number of channels including the keyboard channel. 93 // Number of channels including the keyboard channel.
94 size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { 94 size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
95 switch (layout) { 95 switch (layout) {
96 case AudioProcessing::kMono: 96 case AudioProcessing::kMono:
97 return 1; 97 return 1;
98 case AudioProcessing::kMonoAndKeyboard: 98 case AudioProcessing::kMonoAndKeyboard:
99 case AudioProcessing::kStereo: 99 case AudioProcessing::kStereo:
100 return 2; 100 return 2;
(...skipping 19 matching lines...) Expand all
120 for (size_t i = 0; i < samples_per_channel; ++i) 120 for (size_t i = 0; i < samples_per_channel; ++i)
121 mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1; 121 mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
122 } 122 }
123 123
124 void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) { 124 void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) {
125 for (size_t i = 0; i < samples_per_channel; i++) { 125 for (size_t i = 0; i < samples_per_channel; i++) {
126 stereo[i * 2 + 1] = stereo[i * 2]; 126 stereo[i * 2 + 1] = stereo[i * 2];
127 } 127 }
128 } 128 }
129 129
130 void VerifyChannelsAreEqual(int16_t* stereo, size_t samples_per_channel) { 130 void VerifyChannelsAreEqual(const int16_t* stereo, size_t samples_per_channel) {
131 for (size_t i = 0; i < samples_per_channel; i++) { 131 for (size_t i = 0; i < samples_per_channel; i++) {
132 EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]); 132 EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
133 } 133 }
134 } 134 }
135 135
136 void SetFrameTo(AudioFrame* frame, int16_t value) { 136 void SetFrameTo(AudioFrame* frame, int16_t value) {
137 int16_t* frame_data = frame->mutable_data();
137 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; 138 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
138 ++i) { 139 ++i) {
139 frame->data_[i] = value; 140 frame_data[i] = value;
140 } 141 }
141 } 142 }
142 143
143 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) { 144 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
144 ASSERT_EQ(2u, frame->num_channels_); 145 ASSERT_EQ(2u, frame->num_channels_);
146 int16_t* frame_data = frame->mutable_data();
145 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { 147 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
146 frame->data_[i] = left; 148 frame_data[i] = left;
147 frame->data_[i + 1] = right; 149 frame_data[i + 1] = right;
148 } 150 }
149 } 151 }
150 152
151 void ScaleFrame(AudioFrame* frame, float scale) { 153 void ScaleFrame(AudioFrame* frame, float scale) {
154 int16_t* frame_data = frame->mutable_data();
152 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; 155 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
153 ++i) { 156 ++i) {
154 frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale); 157 frame_data[i] = FloatS16ToS16(frame_data[i] * scale);
155 } 158 }
156 } 159 }
157 160
158 bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) { 161 bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
159 if (frame1.samples_per_channel_ != frame2.samples_per_channel_) { 162 if (frame1.samples_per_channel_ != frame2.samples_per_channel_) {
160 return false; 163 return false;
161 } 164 }
162 if (frame1.num_channels_ != frame2.num_channels_) { 165 if (frame1.num_channels_ != frame2.num_channels_) {
163 return false; 166 return false;
164 } 167 }
165 if (memcmp(frame1.data_, frame2.data_, 168 if (memcmp(frame1.data(), frame2.data(),
166 frame1.samples_per_channel_ * frame1.num_channels_ * 169 frame1.samples_per_channel_ * frame1.num_channels_ *
167 sizeof(int16_t))) { 170 sizeof(int16_t))) {
168 return false; 171 return false;
169 } 172 }
170 return true; 173 return true;
171 } 174 }
172 175
173 void EnableAllAPComponents(AudioProcessing* ap) { 176 void EnableAllAPComponents(AudioProcessing* ap) {
174 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) 177 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
175 EXPECT_NOERR(ap->echo_control_mobile()->Enable(true)); 178 EXPECT_NOERR(ap->echo_control_mobile()->Enable(true));
(...skipping 22 matching lines...) Expand all
198 } 201 }
199 202
200 // These functions are only used by ApmTest.Process. 203 // These functions are only used by ApmTest.Process.
201 template <class T> 204 template <class T>
202 T AbsValue(T a) { 205 T AbsValue(T a) {
203 return a > 0 ? a: -a; 206 return a > 0 ? a: -a;
204 } 207 }
205 208
206 int16_t MaxAudioFrame(const AudioFrame& frame) { 209 int16_t MaxAudioFrame(const AudioFrame& frame) {
207 const size_t length = frame.samples_per_channel_ * frame.num_channels_; 210 const size_t length = frame.samples_per_channel_ * frame.num_channels_;
208 int16_t max_data = AbsValue(frame.data_[0]); 211 const int16_t* frame_data = frame.data();
212 int16_t max_data = AbsValue(frame_data[0]);
209 for (size_t i = 1; i < length; i++) { 213 for (size_t i = 1; i < length; i++) {
210 max_data = std::max(max_data, AbsValue(frame.data_[i])); 214 max_data = std::max(max_data, AbsValue(frame_data[i]));
211 } 215 }
212 216
213 return max_data; 217 return max_data;
214 } 218 }
215 219
216 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) 220 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
217 void TestStats(const AudioProcessing::Statistic& test, 221 void TestStats(const AudioProcessing::Statistic& test,
218 const audioproc::Test::Statistic& reference) { 222 const audioproc::Test::Statistic& reference) {
219 EXPECT_EQ(reference.instant(), test.instant); 223 EXPECT_EQ(reference.instant(), test.instant);
220 EXPECT_EQ(reference.average(), test.average); 224 EXPECT_EQ(reference.average(), test.average);
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after
527 } 531 }
528 532
529 void ApmTest::EnableAllComponents() { 533 void ApmTest::EnableAllComponents() {
530 EnableAllAPComponents(apm_.get()); 534 EnableAllAPComponents(apm_.get());
531 } 535 }
532 536
533 bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame, 537 bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame,
534 ChannelBuffer<float>* cb) { 538 ChannelBuffer<float>* cb) {
535 // The files always contain stereo audio. 539 // The files always contain stereo audio.
536 size_t frame_size = frame->samples_per_channel_ * 2; 540 size_t frame_size = frame->samples_per_channel_ * 2;
537 size_t read_count = fread(frame->data_, 541 size_t read_count = fread(frame->mutable_data(),
538 sizeof(int16_t), 542 sizeof(int16_t),
539 frame_size, 543 frame_size,
540 file); 544 file);
541 if (read_count != frame_size) { 545 if (read_count != frame_size) {
542 // Check that the file really ended. 546 // Check that the file really ended.
543 EXPECT_NE(0, feof(file)); 547 EXPECT_NE(0, feof(file));
544 return false; // This is expected. 548 return false; // This is expected.
545 } 549 }
546 550
547 if (frame->num_channels_ == 1) { 551 if (frame->num_channels_ == 1) {
548 MixStereoToMono(frame->data_, frame->data_, 552 MixStereoToMono(frame->data(), frame->mutable_data(),
549 frame->samples_per_channel_); 553 frame->samples_per_channel_);
550 } 554 }
551 555
552 if (cb) { 556 if (cb) {
553 ConvertToFloat(*frame, cb); 557 ConvertToFloat(*frame, cb);
554 } 558 }
555 return true; 559 return true;
556 } 560 }
557 561
558 bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) { 562 bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
(...skipping 1037 matching lines...) Expand 10 before | Expand all | Expand 10 after
1596 kProcessSampleRates[i], 1600 kProcessSampleRates[i],
1597 kProcessSampleRates[i], 1601 kProcessSampleRates[i],
1598 2, 1602 2,
1599 2, 1603 2,
1600 2, 1604 2,
1601 false); 1605 false);
1602 int analog_level = 127; 1606 int analog_level = 127;
1603 ASSERT_EQ(0, feof(far_file_)); 1607 ASSERT_EQ(0, feof(far_file_));
1604 ASSERT_EQ(0, feof(near_file_)); 1608 ASSERT_EQ(0, feof(near_file_));
1605 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { 1609 while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
1606 CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_); 1610 CopyLeftToRightChannel(revframe_->mutable_data(),
1611 revframe_->samples_per_channel_);
1607 1612
1608 ASSERT_EQ(kNoErr, apm_->ProcessReverseStream(revframe_)); 1613 ASSERT_EQ(kNoErr, apm_->ProcessReverseStream(revframe_));
1609 1614
1610 CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_); 1615 CopyLeftToRightChannel(frame_->mutable_data(),
1616 frame_->samples_per_channel_);
1611 frame_->vad_activity_ = AudioFrame::kVadUnknown; 1617 frame_->vad_activity_ = AudioFrame::kVadUnknown;
1612 1618
1613 ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0)); 1619 ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0));
1614 apm_->echo_cancellation()->set_stream_drift_samples(0); 1620 apm_->echo_cancellation()->set_stream_drift_samples(0);
1615 ASSERT_EQ(kNoErr, 1621 ASSERT_EQ(kNoErr,
1616 apm_->gain_control()->set_stream_analog_level(analog_level)); 1622 apm_->gain_control()->set_stream_analog_level(analog_level));
1617 ASSERT_EQ(kNoErr, apm_->ProcessStream(frame_)); 1623 ASSERT_EQ(kNoErr, apm_->ProcessStream(frame_));
1618 analog_level = apm_->gain_control()->stream_analog_level(); 1624 analog_level = apm_->gain_control()->stream_analog_level();
1619 1625
1620 VerifyChannelsAreEqual(frame_->data_, frame_->samples_per_channel_); 1626 VerifyChannelsAreEqual(frame_->data(), frame_->samples_per_channel_);
1621 } 1627 }
1622 rewind(far_file_); 1628 rewind(far_file_);
1623 rewind(near_file_); 1629 rewind(near_file_);
1624 } 1630 }
1625 } 1631 }
1626 1632
1627 TEST_F(ApmTest, SplittingFilter) { 1633 TEST_F(ApmTest, SplittingFilter) {
1628 // Verify the filter is not active through undistorted audio when: 1634 // Verify the filter is not active through undistorted audio when:
1629 // 1. No components are enabled... 1635 // 1. No components are enabled...
1630 SetFrameTo(frame_, 1000); 1636 SetFrameTo(frame_, 1000);
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1742 1748
1743 if (msg.channel_size() > 0) { 1749 if (msg.channel_size() > 0) {
1744 ASSERT_EQ(revframe_->num_channels_, 1750 ASSERT_EQ(revframe_->num_channels_,
1745 static_cast<size_t>(msg.channel_size())); 1751 static_cast<size_t>(msg.channel_size()));
1746 for (int i = 0; i < msg.channel_size(); ++i) { 1752 for (int i = 0; i < msg.channel_size(); ++i) {
1747 memcpy(revfloat_cb_->channels()[i], 1753 memcpy(revfloat_cb_->channels()[i],
1748 msg.channel(i).data(), 1754 msg.channel(i).data(),
1749 msg.channel(i).size()); 1755 msg.channel(i).size());
1750 } 1756 }
1751 } else { 1757 } else {
1752 memcpy(revframe_->data_, msg.data().data(), msg.data().size()); 1758 memcpy(revframe_->mutable_data(), msg.data().data(), msg.data().size());
1753 if (format == kFloatFormat) { 1759 if (format == kFloatFormat) {
1754 // We're using an int16 input file; convert to float. 1760 // We're using an int16 input file; convert to float.
1755 ConvertToFloat(*revframe_, revfloat_cb_.get()); 1761 ConvertToFloat(*revframe_, revfloat_cb_.get());
1756 } 1762 }
1757 } 1763 }
1758 AnalyzeReverseStreamChooser(format); 1764 AnalyzeReverseStreamChooser(format);
1759 1765
1760 } else if (event_msg.type() == audioproc::Event::STREAM) { 1766 } else if (event_msg.type() == audioproc::Event::STREAM) {
1761 const audioproc::Stream msg = event_msg.stream(); 1767 const audioproc::Stream msg = event_msg.stream();
1762 // ProcessStream could have changed this for the output frame. 1768 // ProcessStream could have changed this for the output frame.
(...skipping 10 matching lines...) Expand all
1773 1779
1774 if (msg.input_channel_size() > 0) { 1780 if (msg.input_channel_size() > 0) {
1775 ASSERT_EQ(frame_->num_channels_, 1781 ASSERT_EQ(frame_->num_channels_,
1776 static_cast<size_t>(msg.input_channel_size())); 1782 static_cast<size_t>(msg.input_channel_size()));
1777 for (int i = 0; i < msg.input_channel_size(); ++i) { 1783 for (int i = 0; i < msg.input_channel_size(); ++i) {
1778 memcpy(float_cb_->channels()[i], 1784 memcpy(float_cb_->channels()[i],
1779 msg.input_channel(i).data(), 1785 msg.input_channel(i).data(),
1780 msg.input_channel(i).size()); 1786 msg.input_channel(i).size());
1781 } 1787 }
1782 } else { 1788 } else {
1783 memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size()); 1789 memcpy(frame_->mutable_data(), msg.input_data().data(),
1790 msg.input_data().size());
1784 if (format == kFloatFormat) { 1791 if (format == kFloatFormat) {
1785 // We're using an int16 input file; convert to float. 1792 // We're using an int16 input file; convert to float.
1786 ConvertToFloat(*frame_, float_cb_.get()); 1793 ConvertToFloat(*frame_, float_cb_.get());
1787 } 1794 }
1788 } 1795 }
1789 ProcessStreamChooser(format); 1796 ProcessStreamChooser(format);
1790 } 1797 }
1791 } 1798 }
1792 EXPECT_NOERR(apm_->StopDebugRecording()); 1799 EXPECT_NOERR(apm_->StopDebugRecording());
1793 fclose(in_file); 1800 fclose(in_file);
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
1982 LayoutFromChannels(num_render_channels))); 1989 LayoutFromChannels(num_render_channels)));
1983 1990
1984 EXPECT_NOERR(apm_->set_stream_delay_ms(0)); 1991 EXPECT_NOERR(apm_->set_stream_delay_ms(0));
1985 EXPECT_NOERR(fapm->set_stream_delay_ms(0)); 1992 EXPECT_NOERR(fapm->set_stream_delay_ms(0));
1986 apm_->echo_cancellation()->set_stream_drift_samples(0); 1993 apm_->echo_cancellation()->set_stream_drift_samples(0);
1987 fapm->echo_cancellation()->set_stream_drift_samples(0); 1994 fapm->echo_cancellation()->set_stream_drift_samples(0);
1988 EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level)); 1995 EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level));
1989 EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level)); 1996 EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level));
1990 1997
1991 EXPECT_NOERR(apm_->ProcessStream(frame_)); 1998 EXPECT_NOERR(apm_->ProcessStream(frame_));
1992 Deinterleave(frame_->data_, samples_per_channel, num_output_channels, 1999 Deinterleave(frame_->data(), samples_per_channel, num_output_channels,
1993 output_int16.channels()); 2000 output_int16.channels());
1994 2001
1995 EXPECT_NOERR(fapm->ProcessStream( 2002 EXPECT_NOERR(fapm->ProcessStream(
1996 float_cb_->channels(), 2003 float_cb_->channels(),
1997 samples_per_channel, 2004 samples_per_channel,
1998 test->sample_rate(), 2005 test->sample_rate(),
1999 LayoutFromChannels(num_input_channels), 2006 LayoutFromChannels(num_input_channels),
2000 test->sample_rate(), 2007 test->sample_rate(),
2001 LayoutFromChannels(num_output_channels), 2008 LayoutFromChannels(num_output_channels),
2002 float_cb_->channels())); 2009 float_cb_->channels()));
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
2146 if (apm_->voice_detection()->stream_has_voice()) { 2153 if (apm_->voice_detection()->stream_has_voice()) {
2147 has_voice_count++; 2154 has_voice_count++;
2148 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_); 2155 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
2149 } else { 2156 } else {
2150 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_); 2157 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
2151 } 2158 }
2152 2159
2153 ns_speech_prob_average += apm_->noise_suppression()->speech_probability(); 2160 ns_speech_prob_average += apm_->noise_suppression()->speech_probability();
2154 2161
2155 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_; 2162 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_;
2156 size_t write_count = fwrite(frame_->data_, 2163 size_t write_count = fwrite(frame_->data(),
2157 sizeof(int16_t), 2164 sizeof(int16_t),
2158 frame_size, 2165 frame_size,
2159 out_file_); 2166 out_file_);
2160 ASSERT_EQ(frame_size, write_count); 2167 ASSERT_EQ(frame_size, write_count);
2161 2168
2162 // Reset in case of downmixing. 2169 // Reset in case of downmixing.
2163 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); 2170 frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
2164 frame_count++; 2171 frame_count++;
2165 2172
2166 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) 2173 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
(...skipping 709 matching lines...) Expand 10 before | Expand all | Expand 10 after
2876 // TODO(peah): Remove the testing for 2883 // TODO(peah): Remove the testing for
2877 // apm->capture_nonlocked_.level_controller_enabled once the value in config_ 2884 // apm->capture_nonlocked_.level_controller_enabled once the value in config_
2878 // is instead used to activate the level controller. 2885 // is instead used to activate the level controller.
2879 EXPECT_FALSE(apm->capture_nonlocked_.level_controller_enabled); 2886 EXPECT_FALSE(apm->capture_nonlocked_.level_controller_enabled);
2880 EXPECT_NEAR(kTargetLcPeakLeveldBFS, 2887 EXPECT_NEAR(kTargetLcPeakLeveldBFS,
2881 apm->config_.level_controller.initial_peak_level_dbfs, 2888 apm->config_.level_controller.initial_peak_level_dbfs,
2882 std::numeric_limits<float>::epsilon()); 2889 std::numeric_limits<float>::epsilon());
2883 } 2890 }
2884 2891
2885 } // namespace webrtc 2892 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698