Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(812)

Side by Side Diff: webrtc/modules/audio_processing/test/audio_processing_simulator.cc

Issue 1907223003: Extension and refactoring of the audioproc_f tool to be a fully fledged tool for audio processing m… (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Changes in response to reviewer comments Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/modules/audio_processing/test/audio_processing_simulator.h"
12
13 #include <algorithm>
14 #include <string>
15
16 #include "webrtc/base/stringutils.h"
17 #include "webrtc/modules/audio_processing/include/audio_processing.h"
18
19 namespace webrtc {
20 namespace test {
21 namespace {
22
23 void CopyFromAudioFrame(const AudioFrame& src, ChannelBuffer<float>* dest) {
24 RTC_CHECK_EQ(src.num_channels_, dest->num_channels());
25 RTC_CHECK_EQ(src.samples_per_channel_, dest->num_frames());
26 // Copy the data from the input buffer.
27 for (size_t ch = 0; ch < src.num_channels_; ++ch) {
28 for (size_t sample = 0; sample < src.samples_per_channel_; ++sample) {
29 dest->channels()[ch][sample] =
30 src.data_[sample * src.num_channels_ + ch] / 32767.0f;
aluebs-webrtc 2016/04/27 16:06:47 There are de-interleave and format methods in audi
peah-webrtc 2016/04/28 07:41:11 Thanks! Done.
31 }
32 }
33 }
34
35 std::string GetIndexedOutputWavFilename(const std::string& wav_name,
36 int counter) {
37 char counter_string[10];
aluebs-webrtc 2016/04/27 16:06:47 Where does this 10 come from? Also it should proba
peah-webrtc 2016/04/28 07:41:11 This is now removed as the stringstream is now use
38 rtc::sprintfn(counter_string, sizeof(counter_string), "%d", counter);
39
40 std::string new_name = wav_name.substr(0, wav_name.size() - 4);
41 new_name += "_";
42 new_name += counter_string;
43 new_name += wav_name.substr(wav_name.size() - 4);
aluebs-webrtc 2016/04/27 16:06:47 For many concatenations it is better and simpler t
peah-webrtc 2016/04/28 07:41:11 Thanks! Very much better indeed! Done.
44 return new_name;
45 }
46
47 } // namespace
48
49 void CopyToAudioFrame(const ChannelBuffer<float>& src, AudioFrame* dest) {
50 RTC_CHECK_EQ(src.num_channels(), dest->num_channels_);
51 RTC_CHECK_EQ(src.num_frames(), dest->samples_per_channel_);
52 for (size_t ch = 0; ch < dest->num_channels_; ++ch) {
53 for (size_t sample = 0; sample < dest->samples_per_channel_; ++sample) {
54 dest->data_[sample * dest->num_channels_ + ch] =
55 src.channels()[ch][sample] * 32767;
56 }
57 }
58 }
59
60 AudioProcessingSimulator::ScopedTimer::~ScopedTimer() {
61 TickInterval interval = TickTime::Now() - start_time_;
62 proc_time_->sum += interval;
63 proc_time_->max = std::max(proc_time_->max, interval);
64 proc_time_->min = std::min(proc_time_->min, interval);
65 }
66
67 void AudioProcessingSimulator::ProcessStream(bool fixed_interface) {
68 if (fixed_interface) {
69 const auto st = ScopedTimer(mutable_proc_time());
70 RTC_CHECK_EQ(AudioProcessing::kNoError, ap_->ProcessStream(&fwd_frame_));
71 CopyFromAudioFrame(fwd_frame_, out_buf_.get());
aluebs-webrtc 2016/04/27 16:06:47 The st should have a tighter scope to not take int
peah-webrtc 2016/04/28 07:41:11 Good find! Done.
72 } else {
73 const auto st = ScopedTimer(mutable_proc_time());
74 RTC_CHECK_EQ(AudioProcessing::kNoError,
75 ap_->ProcessStream(in_buf_->channels(), in_config_,
76 out_config_, out_buf_->channels()));
77 }
78
79 if (buffer_writer_) {
80 buffer_writer_->Write(*out_buf_);
81 }
82
83 ++num_process_stream_calls_;
84 }
85
86 void AudioProcessingSimulator::ProcessReverseStream(bool fixed_interface) {
87 if (fixed_interface) {
88 const auto st = ScopedTimer(mutable_proc_time());
89 RTC_CHECK_EQ(AudioProcessing::kNoError, ap_->ProcessStream(&rev_frame_));
90 CopyFromAudioFrame(rev_frame_, out_buf_.get());
aluebs-webrtc 2016/04/27 16:06:47 rev_out_buf_
peah-webrtc 2016/04/28 07:41:11 Great find! Done.
91
92 } else {
93 const auto st = ScopedTimer(mutable_proc_time());
94 RTC_CHECK_EQ(AudioProcessing::kNoError,
95 ap_->ProcessReverseStream(
96 reverse_in_buf_->channels(), reverse_in_config_,
97 reverse_out_config_, reverse_out_buf_->channels()));
98 }
99
100 if (reverse_buffer_writer_) {
101 reverse_buffer_writer_->Write(*reverse_out_buf_);
102 }
103
104 ++num_reverse_process_stream_calls_;
105 }
106
107 void AudioProcessingSimulator::SetupBuffersConfigsOutputs(
108 int input_sample_rate_hz,
109 int output_sample_rate_hz,
110 int reverse_input_sample_rate_hz,
111 int reverse_output_sample_rate_hz,
112 int input_num_channels,
113 int output_num_channels,
114 int reverse_input_num_channels,
115 int reverse_output_num_channels) {
116 in_config_ = StreamConfig(input_sample_rate_hz, input_num_channels);
117 in_buf_.reset(new ChannelBuffer<float>(
118 rtc::CheckedDivExact(input_sample_rate_hz,
119 AudioProcessingSimulator::kChunksPerSecond),
aluebs-webrtc 2016/04/27 16:06:47 AudioProcessingSimulator:: not needed.
peah-webrtc 2016/04/28 07:41:11 Done.
120 input_num_channels));
121
122 reverse_in_config_ =
123 StreamConfig(reverse_input_sample_rate_hz, reverse_input_num_channels);
124 reverse_in_buf_.reset(new ChannelBuffer<float>(
125 rtc::CheckedDivExact(reverse_input_sample_rate_hz,
126 AudioProcessingSimulator::kChunksPerSecond),
127 reverse_input_num_channels));
128
129 out_config_ = StreamConfig(output_sample_rate_hz, output_num_channels);
130 out_buf_.reset(new ChannelBuffer<float>(
131 rtc::CheckedDivExact(output_sample_rate_hz,
132 AudioProcessingSimulator::kChunksPerSecond),
133 output_num_channels));
134
135 reverse_out_config_ =
136 StreamConfig(reverse_output_sample_rate_hz, reverse_output_num_channels);
137 reverse_out_buf_.reset(new ChannelBuffer<float>(
138 rtc::CheckedDivExact(reverse_output_sample_rate_hz,
139 AudioProcessingSimulator::kChunksPerSecond),
140 reverse_output_num_channels));
141
142 fwd_frame_.sample_rate_hz_ = input_sample_rate_hz;
143 fwd_frame_.samples_per_channel_ =
144 rtc::CheckedDivExact(fwd_frame_.sample_rate_hz_, kChunksPerSecond);
145 fwd_frame_.num_channels_ = input_num_channels;
146
147 rev_frame_.sample_rate_hz_ = reverse_input_sample_rate_hz;
148 rev_frame_.samples_per_channel_ =
149 rtc::CheckedDivExact(rev_frame_.sample_rate_hz_, kChunksPerSecond);
150 rev_frame_.num_channels_ = reverse_input_num_channels;
151
152 if (settings_.use_verbose_logging) {
153 printf("Sample rates:\n");
154 printf(" Forward input: %d\n", input_sample_rate_hz);
155 printf(" Forward output: %d\n", output_sample_rate_hz);
156 printf(" Reverse input: %d\n", reverse_input_sample_rate_hz);
157 printf(" Reverse output: %d\n", reverse_output_sample_rate_hz);
158 printf("Number of channels:\n");
159 printf(" Forward input: %d\n", input_num_channels);
160 printf(" Forward output: %d\n", output_num_channels);
161 printf(" Reverse input: %d\n", reverse_input_num_channels);
162 printf(" Reverse output: %d\n", reverse_output_num_channels);
163 }
164
165 SetupOutput();
166 }
167
168 void AudioProcessingSimulator::SetupOutput() {
169 if (settings_.output_filename) {
170 std::string filename;
171 if (settings_.store_intermediate_output) {
172 filename = GetIndexedOutputWavFilename(*settings_.output_filename,
173 output_reset_counter_);
174 } else {
175 filename = *settings_.output_filename;
176 }
177
178 std::unique_ptr<WavWriter> out_file(
179 new WavWriter(filename, out_config_.sample_rate_hz(),
aluebs-webrtc 2016/04/27 16:06:47 Is this going to work for the AudioFrame interface
peah-webrtc 2016/04/28 07:41:11 It will if the simulation is set up properly. I di
aluebs-webrtc 2016/04/30 02:08:04 Sounds good.
peah-webrtc 2016/05/09 11:37:30 Acknowledged.
180 static_cast<size_t>(out_config_.num_channels())));
181 buffer_writer_.reset(new ChannelBufferWavWriter(std::move(out_file)));
182 }
183
184 if (settings_.reverse_output_filename) {
185 std::string filename;
186 if (settings_.store_intermediate_output) {
187 filename = GetIndexedOutputWavFilename(*settings_.reverse_output_filename,
188 output_reset_counter_);
189 } else {
190 filename = *settings_.reverse_output_filename;
191 }
192
193 std::unique_ptr<WavWriter> reverse_out_file(
194 new WavWriter(filename, reverse_out_config_.sample_rate_hz(),
195 static_cast<size_t>(reverse_out_config_.num_channels())));
196 reverse_buffer_writer_.reset(
197 new ChannelBufferWavWriter(std::move(reverse_out_file)));
198 }
199
200 ++output_reset_counter_;
201 }
202
203 void AudioProcessingSimulator::DestroyAudioProcessor() {
204 if (settings_.aec_dump_output_filename) {
205 RTC_CHECK_EQ(AudioProcessing::kNoError, ap_->StopDebugRecording());
206 }
207 }
208
209 void AudioProcessingSimulator::CreateAudioProcessor() {
210 Config config;
211 if (settings_.use_bf && *settings_.use_bf) {
212 config.Set<Beamforming>(new Beamforming(
213 true, ParseArrayGeometry(*settings_.microphone_positions),
214 SphericalPointf(DegreesToRadians(settings_.target_angle_degrees), 0.f,
215 1.f)));
216 }
217 if (settings_.use_ts) {
218 config.Set<ExperimentalNs>(new ExperimentalNs(*settings_.use_ts));
219 }
220 if (settings_.use_ie) {
221 config.Set<Intelligibility>(new Intelligibility(*settings_.use_ie));
222 }
223 if (settings_.use_aec3) {
224 config.Set<EchoCanceller3>(new EchoCanceller3(*settings_.use_aec3));
225 }
226 if (settings_.use_refined_adaptive_filter) {
227 config.Set<RefinedAdaptiveFilter>(
228 new RefinedAdaptiveFilter(*settings_.use_refined_adaptive_filter));
229 }
230 if ((settings_.use_extended_filter && *settings_.use_extended_filter) ||
231 !settings_.use_extended_filter) {
aluebs-webrtc 2016/04/27 16:06:47 Instead of these complicated conditions, wouldn't
peah-webrtc 2016/04/28 07:41:11 Definitely, and I think that the reason that it is
aluebs-webrtc 2016/04/30 02:08:04 It can be simplified as: config.Set<ExtendedFilter
peah-webrtc 2016/05/02 06:18:42 Of course! Nice! Done.
232 config.Set<ExtendedFilter>(new ExtendedFilter(true));
233 }
234 if ((settings_.use_delay_agnostic && *settings_.use_delay_agnostic) ||
235 !settings_.use_delay_agnostic) {
236 config.Set<DelayAgnostic>(new DelayAgnostic(true));
237 }
238
239 ap_.reset(AudioProcessing::Create(config));
240
241 if (settings_.use_aec) {
242 RTC_CHECK_EQ(AudioProcessing::kNoError,
243 ap_->echo_cancellation()->Enable(*settings_.use_aec));
244 }
245 if (settings_.use_aecm) {
246 RTC_CHECK_EQ(AudioProcessing::kNoError,
247 ap_->echo_control_mobile()->Enable(*settings_.use_aecm));
248 }
249 if (settings_.use_agc) {
250 RTC_CHECK_EQ(AudioProcessing::kNoError,
251 ap_->gain_control()->Enable(*settings_.use_agc));
252 }
253 if (settings_.use_hpf) {
254 RTC_CHECK_EQ(AudioProcessing::kNoError,
255 ap_->high_pass_filter()->Enable(*settings_.use_hpf));
256 }
257 if (settings_.use_ns) {
258 RTC_CHECK_EQ(AudioProcessing::kNoError,
259 ap_->noise_suppression()->Enable(*settings_.use_ns));
260 }
261 if (settings_.use_le) {
262 RTC_CHECK_EQ(AudioProcessing::kNoError,
263 ap_->level_estimator()->Enable(*settings_.use_le));
264 }
265 if (settings_.use_vad) {
266 RTC_CHECK_EQ(AudioProcessing::kNoError,
267 ap_->voice_detection()->Enable(*settings_.use_vad));
268 }
269 if (settings_.use_agc_limiter) {
270 RTC_CHECK_EQ(AudioProcessing::kNoError, ap_->gain_control()->enable_limiter(
271 *settings_.use_agc_limiter));
272 }
273 if (settings_.agc_target_level) {
274 RTC_CHECK_EQ(AudioProcessing::kNoError,
275 ap_->gain_control()->set_target_level_dbfs(
276 *settings_.agc_target_level));
277 }
278
279 if (settings_.agc_mode) {
280 RTC_CHECK_EQ(
281 AudioProcessing::kNoError,
282 ap_->gain_control()->set_mode(
283 static_cast<webrtc::GainControl::Mode>(*settings_.agc_mode)));
284 }
285
286 if (settings_.use_drift_compensation) {
287 RTC_CHECK_EQ(AudioProcessing::kNoError,
288 ap_->echo_cancellation()->enable_drift_compensation(
289 *settings_.use_drift_compensation));
290 }
291
292 if (settings_.aec_suppression_level) {
293 RTC_CHECK_EQ(AudioProcessing::kNoError,
294 ap_->echo_cancellation()->set_suppression_level(
295 static_cast<webrtc::EchoCancellation::SuppressionLevel>(
296 *settings_.aec_suppression_level)));
297 }
298
299 if (settings_.aecm_routing_mode) {
300 RTC_CHECK_EQ(AudioProcessing::kNoError,
301 ap_->echo_control_mobile()->set_routing_mode(
302 static_cast<webrtc::EchoControlMobile::RoutingMode>(
303 *settings_.aecm_routing_mode)));
304 }
305
306 if (settings_.use_aecm_comfort_noise) {
307 RTC_CHECK_EQ(AudioProcessing::kNoError,
308 ap_->echo_control_mobile()->enable_comfort_noise(
309 *settings_.use_aecm_comfort_noise));
310 }
311
312 if (settings_.vad_likelihood) {
313 RTC_CHECK_EQ(AudioProcessing::kNoError,
314 ap_->voice_detection()->set_likelihood(
315 static_cast<webrtc::VoiceDetection::Likelihood>(
316 *settings_.vad_likelihood)));
317 }
318 if (settings_.ns_level) {
319 RTC_CHECK_EQ(
320 AudioProcessing::kNoError,
321 ap_->noise_suppression()->set_level(
322 static_cast<NoiseSuppression::Level>(*settings_.ns_level)));
323 }
324
325 if (settings_.use_ts) {
326 ap_->set_stream_key_pressed(*settings_.use_ts);
327 }
328
329 if (settings_.aec_dump_output_filename) {
330 size_t kMaxFilenameSize = AudioProcessing::kMaxFilenameSize;
331 RTC_CHECK_LE(settings_.aec_dump_output_filename->size(), kMaxFilenameSize);
332 RTC_CHECK_EQ(AudioProcessing::kNoError,
333 ap_->StartDebugRecording(
334 settings_.aec_dump_output_filename->c_str(), -1));
335 }
336 }
337
338 } // namespace test
339 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698