OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/modules/audio_processing/test/aec_dump_based_simulator.h" | |
12 | |
13 #include "webrtc/base/checks.h" | |
14 #include "webrtc/modules/audio_processing/test/protobuf_utils.h" | |
15 #include "webrtc/test/testsupport/trace_to_stderr.h" | |
16 | |
17 namespace webrtc { | |
18 namespace test { | |
19 namespace { | |
20 | |
21 // Verify output bitexactness for the fixed interface. | |
22 bool VerifyFixedBitExactness(const webrtc::audioproc::Stream& msg, | |
23 const AudioFrame& frame) { | |
24 RTC_CHECK_EQ( | |
25 sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_, | |
26 msg.output_data().size()); | |
27 for (size_t k = 0; k < frame.num_channels_ * frame.samples_per_channel_; | |
28 ++k) { | |
29 if (msg.output_data().data()[k] != frame.data_[k]) { | |
aluebs-webrtc
2016/05/03 22:30:40
Did you verify that it is possible to achieve bite
peah-webrtc
2016/05/09 11:37:31
It is/should definite be possible to achieve bitex
aluebs-webrtc
2016/05/09 16:27:32
As I said before it would have been better to firs
peah-webrtc
2016/05/11 12:19:27
Done.
| |
30 return false; | |
31 } | |
32 } | |
33 return true; | |
34 } | |
35 | |
36 // Verify output bitexactness for the float interface. | |
37 bool VerifyFloatBitExactness(const webrtc::audioproc::Stream& msg, | |
38 const StreamConfig& out_config, | |
39 const ChannelBuffer<float>& out_buf) { | |
40 if (static_cast<size_t>(msg.output_channel_size()) != | |
41 out_config.num_channels() || | |
42 msg.output_channel(0).size() != out_config.num_frames()) { | |
43 return false; | |
aluebs-webrtc
2016/05/03 22:30:40
Why does this return false while the above CHECKs
peah-webrtc
2016/05/09 11:37:31
Good point! Will fix that.
Done.
| |
44 } else { | |
45 for (int ch = 0; ch < msg.output_channel_size(); ++ch) { | |
46 for (size_t sample = 0; sample < out_config.num_frames(); ++sample) { | |
47 if (msg.output_channel(ch).data()[sample] != | |
48 out_buf.channels()[ch][sample]) { | |
49 return false; | |
50 } | |
51 } | |
52 } | |
53 } | |
54 return true; | |
55 } | |
56 | |
57 } // namespace | |
58 | |
59 void AecDumpBasedSimulator::PrepareProcessStreamCall( | |
60 const webrtc::audioproc::Stream& msg) { | |
61 if (msg.has_input_data()) { | |
aluebs-webrtc
2016/05/03 22:30:40
Why does this decides the interface? Is it just a
peah-webrtc
2016/05/09 11:37:31
Yes, that is how it is currently in the protobuf f
aluebs-webrtc
2016/05/09 16:27:32
Agreed.
peah-webrtc
2016/05/11 12:19:27
Acknowledged.
| |
62 // Fixed interface processing. | |
63 // Verify interface invariance. | |
64 RTC_CHECK(interface_used_ == InterfaceType::kFixedInterface || | |
65 interface_used_ == InterfaceType::kNotSpecified); | |
66 interface_used_ = InterfaceType::kFixedInterface; | |
67 | |
68 // ProcessStream could have changed this for the output frame. | |
aluebs-webrtc
2016/05/03 22:30:40
It doesn't though, right?
peah-webrtc
2016/05/09 11:37:31
This is how it is currently done in process_test.c
aluebs-webrtc
2016/05/09 16:27:32
The reason of moving away from process_test is to
peah-webrtc
2016/05/11 12:19:27
You mean that implicitly this happens in the line
aluebs-webrtc
2016/05/11 15:41:55
It seemed you agree with removing this line, but t
peah-webrtc
2016/05/18 12:18:50
Sorry, my mistake! Thanks for catching that!
Done.
| |
69 fwd_frame_.num_channels_ = ap_->num_input_channels(); | |
70 | |
71 // Populate input buffer. | |
72 RTC_CHECK_EQ(sizeof(int16_t) * fwd_frame_.samples_per_channel_ * | |
aluebs-webrtc
2016/05/03 22:30:40
sizeof(fwd_frame_.data[0])?
peah-webrtc
2016/05/09 11:37:31
Done.
| |
73 fwd_frame_.num_channels_, | |
74 msg.input_data().size()); | |
75 memcpy(fwd_frame_.data_, msg.input_data().data(), msg.input_data().size()); | |
76 } else { | |
77 // Float interface processing. | |
78 // Verify interface invariance. | |
79 RTC_CHECK(interface_used_ == InterfaceType::kFloatInterface || | |
80 interface_used_ == InterfaceType::kNotSpecified); | |
81 interface_used_ = InterfaceType::kFloatInterface; | |
82 | |
83 RTC_CHECK_EQ(in_buf_->num_channels(), | |
84 static_cast<size_t>(msg.input_channel_size())); | |
85 | |
86 // Populate input buffer. | |
87 for (int i = 0; i < msg.input_channel_size(); ++i) { | |
aluebs-webrtc
2016/05/03 22:30:40
size_t?
peah-webrtc
2016/05/09 11:37:31
No, msg.input_channel_size() is of type int.
aluebs-webrtc
2016/05/09 16:27:32
Acknowledged.
peah-webrtc
2016/05/11 12:19:27
Acknowledged.
| |
88 RTC_CHECK_EQ(in_buf_->num_frames() * sizeof(*in_buf_->channels()[i]), | |
89 msg.input_channel(i).size()); | |
90 std::memcpy(in_buf_->channels()[i], msg.input_channel(i).data(), | |
91 msg.input_channel(i).size()); | |
92 } | |
93 } | |
94 | |
95 if (!settings_.stream_delay) { | |
96 if (msg.has_delay()) { | |
aluebs-webrtc
2016/05/03 22:30:40
You probably still want to set_stream_delay_ms to
peah-webrtc
2016/05/09 11:37:31
No, I don't think so. If the recording has no stre
aluebs-webrtc
2016/05/09 16:27:32
Good point.
peah-webrtc
2016/05/11 12:19:27
Acknowledged.
| |
97 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
98 ap_->set_stream_delay_ms(msg.delay())); | |
99 } | |
100 } else { | |
101 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
102 ap_->set_stream_delay_ms(*settings_.stream_delay)); | |
103 } | |
104 | |
105 if (!settings_.stream_drift_samples) { | |
106 if (msg.has_drift()) { | |
107 ap_->echo_cancellation()->set_stream_drift_samples(msg.drift()); | |
108 } | |
109 } else { | |
110 ap_->echo_cancellation()->set_stream_drift_samples( | |
111 *settings_.stream_drift_samples); | |
112 } | |
113 | |
114 if (msg.has_keypress()) { | |
aluebs-webrtc
2016/05/03 22:30:40
If it doesn't, set it depending on use_ts.
peah-webrtc
2016/05/09 11:37:31
Done.
| |
115 ap_->set_stream_key_pressed(msg.keypress()); | |
116 } | |
117 } | |
aluebs-webrtc
2016/05/03 22:30:41
Aren't you missing set_stream_analog_level here?
peah-webrtc
2016/05/09 11:37:31
Great find!
Done.
| |
118 | |
119 void AecDumpBasedSimulator::VerifyProcessStreamBitExactness( | |
120 const webrtc::audioproc::Stream& msg) { | |
121 if (bitexact_output_) { | |
122 if (interface_used_ == InterfaceType::kFixedInterface) { | |
123 bitexact_output_ = VerifyFixedBitExactness(msg, fwd_frame_); | |
124 } else { | |
125 bitexact_output_ = VerifyFloatBitExactness(msg, out_config_, *out_buf_); | |
126 } | |
127 } | |
128 } | |
129 | |
130 void AecDumpBasedSimulator::PrepareReverseProcessStreamCall( | |
131 const webrtc::audioproc::ReverseStream& msg) { | |
132 if (msg.has_data()) { | |
133 // Fixed interface processing. | |
134 // Verify interface invariance. | |
135 RTC_CHECK(interface_used_ == InterfaceType::kFixedInterface || | |
136 interface_used_ == InterfaceType::kNotSpecified); | |
137 interface_used_ = InterfaceType::kFixedInterface; | |
138 | |
139 // Populate input buffer. | |
140 RTC_CHECK_EQ(sizeof(int16_t) * rev_frame_.samples_per_channel_ * | |
141 rev_frame_.num_channels_, | |
142 msg.data().size()); | |
143 memcpy(rev_frame_.data_, msg.data().data(), msg.data().size()); | |
144 } else { | |
145 // Float interface processing. | |
146 // Verify interface invariance. | |
147 RTC_CHECK(interface_used_ == InterfaceType::kFloatInterface || | |
148 interface_used_ == InterfaceType::kNotSpecified); | |
149 interface_used_ = InterfaceType::kFloatInterface; | |
150 | |
151 RTC_CHECK_EQ(reverse_in_buf_->num_channels(), | |
152 static_cast<size_t>(msg.channel_size())); | |
153 | |
154 // Populate input buffer. | |
155 for (int i = 0; i < msg.channel_size(); ++i) { | |
156 RTC_CHECK_EQ( | |
157 reverse_in_buf_->num_frames() * sizeof(*in_buf_->channels()[i]), | |
aluebs-webrtc
2016/05/03 22:30:40
reverse_in_buf_
peah-webrtc
2016/05/09 11:37:31
Good find!
Done.
| |
158 msg.channel(i).size()); | |
159 std::memcpy(reverse_in_buf_->channels()[i], msg.channel(i).data(), | |
160 msg.channel(i).size()); | |
161 } | |
162 } | |
163 } | |
164 | |
165 void AecDumpBasedSimulator::Process() { | |
166 std::unique_ptr<test::TraceToStderr> trace_to_stderr; | |
167 if (settings_.use_verbose_logging) { | |
168 trace_to_stderr.reset(new test::TraceToStderr(true)); | |
169 } | |
170 | |
171 CreateAudioProcessor(); | |
172 dump_input_file_ = OpenFile(settings_.aec_dump_input_filename->c_str(), "rb"); | |
173 | |
174 webrtc::audioproc::Event event_msg; | |
175 int num_forward_chunks_processed = 0; | |
176 while (ReadMessageFromFile(dump_input_file_, &event_msg)) { | |
177 switch (event_msg.type()) { | |
178 case webrtc::audioproc::Event::INIT: | |
179 RTC_CHECK(event_msg.has_init()); | |
180 HandleMessage(event_msg.init()); | |
181 break; | |
182 case webrtc::audioproc::Event::STREAM: | |
183 RTC_CHECK(event_msg.has_stream()); | |
184 HandleMessage(event_msg.stream()); | |
185 ++num_forward_chunks_processed; | |
186 break; | |
187 case webrtc::audioproc::Event::REVERSE_STREAM: | |
188 RTC_CHECK(event_msg.has_reverse_stream()); | |
189 HandleMessage(event_msg.reverse_stream()); | |
190 break; | |
191 case webrtc::audioproc::Event::CONFIG: | |
192 RTC_CHECK(event_msg.has_config()); | |
193 HandleMessage(event_msg.config()); | |
194 break; | |
195 default: | |
196 RTC_CHECK(false); | |
197 } | |
198 if (trace_to_stderr) { | |
199 trace_to_stderr->SetTimeSeconds( | |
200 num_forward_chunks_processed * 1.f / | |
aluebs-webrtc
2016/05/03 22:30:40
Again, cast to avoid multiplication?
peah-webrtc
2016/05/09 11:37:31
Done.
| |
201 AudioProcessingSimulator::kChunksPerSecond); | |
202 } | |
203 } | |
204 | |
205 fclose(dump_input_file_); | |
206 | |
207 DestroyAudioProcessor(); | |
208 } | |
209 | |
210 void AecDumpBasedSimulator::HandleMessage( | |
211 const webrtc::audioproc::Config& msg) { | |
212 if (settings_.use_verbose_logging) { | |
213 printf("Config at frame: "); | |
aluebs-webrtc
2016/05/03 22:30:40
Use cout instead of printf? You also probably want
peah-webrtc
2016/05/09 11:37:31
Fixed the new-line. Not sure about using cout thou
aluebs-webrtc
2016/05/09 16:27:32
I meant that we probably want to use cout everywhe
peah-webrtc
2016/05/11 12:19:27
I think the only reason that applies here is the t
| |
214 printf(" Forward: %zu\n", get_num_process_stream_calls()); | |
215 printf(" Reverse: %zu\n", get_num_reverse_process_stream_calls()); | |
216 } | |
217 | |
218 if (!settings_.discard_all_settings_in_aecdump) { | |
219 if (settings_.use_verbose_logging) { | |
220 printf("Setting used in config:\n"); | |
221 } | |
222 Config config; | |
223 | |
224 if (msg.has_aec_enabled()) { | |
aluebs-webrtc
2016/05/03 22:30:40
This logic makes it so that if you set a parameter
peah-webrtc
2016/05/09 11:37:31
Agree! Good find!
Done.
| |
225 bool enable = settings_.use_aec ? *settings_.use_aec : msg.aec_enabled(); | |
226 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
227 ap_->echo_cancellation()->Enable(enable)); | |
228 if (settings_.use_verbose_logging) { | |
229 printf(" aec_enabled: %s\n", enable ? "true" : "false"); | |
230 } | |
231 } | |
232 | |
233 if (msg.has_aec_delay_agnostic_enabled()) { | |
234 bool enable = settings_.use_delay_agnostic | |
235 ? *settings_.use_delay_agnostic | |
236 : msg.aec_delay_agnostic_enabled(); | |
237 config.Set<DelayAgnostic>(new DelayAgnostic(enable)); | |
238 if (settings_.use_verbose_logging) { | |
239 printf(" aec_delay_agnostic_enabled: %s\n", enable ? "true" : "false"); | |
240 } | |
241 } | |
242 | |
243 if (msg.has_aec_drift_compensation_enabled()) { | |
244 bool enable = settings_.use_drift_compensation | |
245 ? *settings_.use_drift_compensation | |
246 : msg.aec_drift_compensation_enabled(); | |
247 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
248 ap_->echo_cancellation()->enable_drift_compensation(enable)); | |
249 if (settings_.use_verbose_logging) { | |
250 printf(" aec_drift_compensation_enabled: %s\n", | |
251 enable ? "true" : "false"); | |
252 } | |
253 } | |
254 | |
255 if (msg.has_aec_extended_filter_enabled()) { | |
256 bool enable = settings_.use_extended_filter | |
257 ? *settings_.use_extended_filter | |
258 : msg.aec_extended_filter_enabled(); | |
259 config.Set<ExtendedFilter>(new ExtendedFilter(enable)); | |
260 if (settings_.use_verbose_logging) { | |
261 printf(" aec_extended_filter_enabled: %s\n", enable ? "true" : "false"); | |
262 } | |
263 } | |
264 | |
265 if (msg.has_aec_suppression_level()) { | |
266 int level = settings_.aec_suppression_level | |
267 ? *settings_.aec_suppression_level | |
268 : msg.aec_suppression_level(); | |
269 RTC_CHECK_EQ( | |
270 AudioProcessing::kNoError, | |
271 ap_->echo_cancellation()->set_suppression_level( | |
272 static_cast<webrtc::EchoCancellation::SuppressionLevel>(level))); | |
273 if (settings_.use_verbose_logging) { | |
274 printf(" aec_suppression_level: %d\n", level); | |
275 } | |
276 } | |
277 | |
278 if (msg.has_aecm_enabled()) { | |
279 bool enable = | |
280 settings_.use_aecm ? *settings_.use_aecm : msg.aecm_enabled(); | |
281 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
282 ap_->echo_control_mobile()->Enable(enable)); | |
283 if (settings_.use_verbose_logging) { | |
284 printf(" aecm_enabled: %s\n", enable ? "true" : "false"); | |
285 } | |
286 } | |
287 | |
288 if (msg.has_aecm_comfort_noise_enabled()) { | |
289 bool enable = settings_.use_aecm_comfort_noise | |
290 ? *settings_.use_aecm_comfort_noise | |
291 : msg.aecm_comfort_noise_enabled(); | |
292 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
293 ap_->echo_control_mobile()->enable_comfort_noise(enable)); | |
294 if (settings_.use_verbose_logging) { | |
295 printf(" aecm_comfort_noise_enabled: %s\n", enable ? "true" : "false"); | |
296 } | |
297 } | |
298 | |
299 if (msg.has_aecm_routing_mode()) { | |
300 int routing_mode = settings_.aecm_routing_mode | |
301 ? *settings_.aecm_routing_mode | |
302 : msg.aecm_routing_mode(); | |
303 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
304 ap_->echo_control_mobile()->set_routing_mode( | |
305 static_cast<webrtc::EchoControlMobile::RoutingMode>( | |
306 routing_mode))); | |
307 if (settings_.use_verbose_logging) { | |
308 printf(" aecm_routing_mode: %d\n", routing_mode); | |
309 } | |
310 } | |
311 | |
312 if (msg.has_agc_enabled()) { | |
313 bool enable = settings_.use_agc ? *settings_.use_agc : msg.agc_enabled(); | |
314 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
315 ap_->gain_control()->Enable(enable)); | |
316 if (settings_.use_verbose_logging) { | |
317 printf(" agc_enabled: %s\n", enable ? "true" : "false"); | |
318 } | |
319 } | |
320 | |
321 if (msg.has_agc_mode()) { | |
322 int mode = settings_.agc_mode ? *settings_.agc_mode : msg.agc_mode(); | |
323 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
324 ap_->gain_control()->set_mode( | |
325 static_cast<webrtc::GainControl::Mode>(mode))); | |
326 if (settings_.use_verbose_logging) { | |
327 printf(" agc_mode: %d\n", mode); | |
328 } | |
329 } | |
330 | |
331 if (msg.has_agc_limiter_enabled()) { | |
332 bool enable = settings_.use_agc_limiter ? *settings_.use_agc_limiter | |
333 : msg.agc_limiter_enabled(); | |
334 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
335 ap_->gain_control()->enable_limiter(enable)); | |
336 if (settings_.use_verbose_logging) { | |
337 printf(" agc_limiter_enabled: %s\n", enable ? "true" : "false"); | |
338 } | |
339 } | |
340 | |
341 if (msg.has_noise_robust_agc_enabled()) { | |
342 bool enable = | |
343 settings_.use_ts ? *settings_.use_ts : msg.noise_robust_agc_enabled(); | |
aluebs-webrtc
2016/05/03 22:30:40
What does the noise_robust_agc has to do with TS?
peah-webrtc
2016/05/09 11:37:31
Good find! I modified this code.
PTAL!
aluebs-webrtc
2016/05/09 16:27:32
Now it makes more sense.
peah-webrtc
2016/05/11 12:19:27
Acknowledged.
| |
344 config.Set<ExperimentalNs>(new ExperimentalNs(enable)); | |
345 if (settings_.use_verbose_logging) { | |
346 printf(" noise_robust_agc_enabled: %s\n", enable ? "true" : "false"); | |
347 } | |
348 } | |
349 | |
350 if (msg.has_hpf_enabled()) { | |
351 bool enable = settings_.use_hpf ? *settings_.use_hpf : msg.hpf_enabled(); | |
352 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
353 ap_->high_pass_filter()->Enable(enable)); | |
354 if (settings_.use_verbose_logging) { | |
355 printf(" hpf_enabled: %s\n", enable ? "true" : "false"); | |
356 } | |
357 } | |
358 | |
359 if (msg.has_ns_enabled()) { | |
360 bool enable = settings_.use_ns ? *settings_.use_ns : msg.ns_enabled(); | |
361 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
362 ap_->noise_suppression()->Enable(enable)); | |
363 if (settings_.use_verbose_logging) { | |
364 printf(" ns_enabled: %s\n", enable ? "true" : "false"); | |
365 } | |
366 } | |
367 | |
368 if (msg.has_ns_level()) { | |
369 int level = settings_.ns_level ? *settings_.ns_level : msg.ns_level(); | |
370 RTC_CHECK_EQ(AudioProcessing::kNoError, | |
371 ap_->noise_suppression()->set_level( | |
372 static_cast<NoiseSuppression::Level>(level))); | |
373 if (settings_.use_verbose_logging) { | |
374 printf(" ns_level: %d\n", level); | |
375 } | |
376 } | |
377 | |
378 if (settings_.use_verbose_logging && msg.has_experiments_description() && | |
379 msg.experiments_description().size() > 0) { | |
380 printf(" experiments not included by default in the simulation: %s\n", | |
381 msg.experiments_description().c_str()); | |
382 } | |
383 | |
384 if (settings_.use_refined_adaptive_filter) { | |
385 config.Set<RefinedAdaptiveFilter>( | |
386 new RefinedAdaptiveFilter(*settings_.use_refined_adaptive_filter)); | |
387 } | |
388 | |
389 if (settings_.use_aec3) { | |
390 config.Set<EchoCanceller3>(new EchoCanceller3(*settings_.use_aec3)); | |
391 } | |
392 | |
393 ap_->SetExtraOptions(config); | |
394 } | |
395 } | |
396 | |
397 void AecDumpBasedSimulator::HandleMessage(const webrtc::audioproc::Init& msg) { | |
398 RTC_CHECK(msg.has_sample_rate()); | |
399 RTC_CHECK(msg.has_num_input_channels()); | |
400 RTC_CHECK(msg.has_num_reverse_channels()); | |
401 RTC_CHECK(msg.has_reverse_sample_rate()); | |
402 | |
403 if (settings_.use_verbose_logging) { | |
404 printf("Init at frame:\n"); | |
405 printf(" Forward: %zu\n", get_num_process_stream_calls()); | |
406 printf(" Reverse: %zu\n", get_num_reverse_process_stream_calls()); | |
407 } | |
408 | |
409 int num_output_channels; | |
410 if (settings_.output_num_channels) { | |
411 num_output_channels = *settings_.output_num_channels; | |
412 } else { | |
413 num_output_channels = msg.has_num_output_channels() | |
414 ? msg.num_output_channels() | |
415 : msg.num_input_channels(); | |
416 } | |
417 | |
418 int output_sample_rate; | |
419 if (settings_.output_sample_rate_hz) { | |
420 output_sample_rate = *settings_.output_sample_rate_hz; | |
421 } else { | |
422 output_sample_rate = msg.has_output_sample_rate() ? msg.output_sample_rate() | |
423 : msg.sample_rate(); | |
424 } | |
425 | |
426 int num_reverse_output_channels; | |
427 if (settings_.reverse_output_num_channels) { | |
428 num_reverse_output_channels = *settings_.reverse_output_num_channels; | |
429 } else { | |
430 num_reverse_output_channels = msg.has_num_reverse_output_channels() | |
431 ? msg.num_reverse_output_channels() | |
432 : msg.num_reverse_channels(); | |
433 } | |
434 | |
435 int reverse_output_sample_rate; | |
436 if (settings_.reverse_output_sample_rate_hz) { | |
437 reverse_output_sample_rate = *settings_.reverse_output_sample_rate_hz; | |
438 } else { | |
439 reverse_output_sample_rate = msg.has_reverse_output_sample_rate() | |
440 ? msg.reverse_output_sample_rate() | |
441 : msg.reverse_sample_rate(); | |
442 } | |
443 | |
444 SetupBuffersConfigsOutputs( | |
445 msg.sample_rate(), output_sample_rate, msg.reverse_sample_rate(), | |
446 reverse_output_sample_rate, msg.num_input_channels(), num_output_channels, | |
447 msg.num_reverse_channels(), num_reverse_output_channels); | |
448 } | |
449 | |
450 void AecDumpBasedSimulator::HandleMessage( | |
451 const webrtc::audioproc::Stream& msg) { | |
452 PrepareProcessStreamCall(msg); | |
453 ProcessStream(interface_used_ == InterfaceType::kFixedInterface); | |
454 VerifyProcessStreamBitExactness(msg); | |
455 } | |
456 | |
457 void AecDumpBasedSimulator::HandleMessage( | |
458 const webrtc::audioproc::ReverseStream& msg) { | |
459 PrepareReverseProcessStreamCall(msg); | |
460 ProcessReverseStream(interface_used_ == InterfaceType::kFixedInterface); | |
461 } | |
462 | |
463 } // namespace test | |
464 } // namespace webrtc | |
OLD | NEW |