OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
216 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 216 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
217 transient_suppressor_enabled_(false), | 217 transient_suppressor_enabled_(false), |
218 #else | 218 #else |
219 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), | 219 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), |
220 #endif | 220 #endif |
221 beamformer_enabled_(config.Get<Beamforming>().enabled), | 221 beamformer_enabled_(config.Get<Beamforming>().enabled), |
222 beamformer_(beamformer), | 222 beamformer_(beamformer), |
223 array_geometry_(config.Get<Beamforming>().array_geometry), | 223 array_geometry_(config.Get<Beamforming>().array_geometry), |
224 target_direction_(config.Get<Beamforming>().target_direction), | 224 target_direction_(config.Get<Beamforming>().target_direction), |
225 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { | 225 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { |
226 echo_cancellation_ = new EchoCancellationImpl(this, crit_); | 226 render_thread_checker_.DetachFromThread(); |
227 capture_thread_checker_.DetachFromThread(); | |
228 | |
229 echo_cancellation_ = | |
230 new EchoCancellationImpl(this, crit_, &render_thread_checker_); | |
227 component_list_.push_back(echo_cancellation_); | 231 component_list_.push_back(echo_cancellation_); |
228 | 232 |
229 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); | 233 echo_control_mobile_ = |
234 new EchoControlMobileImpl(this, crit_, &render_thread_checker_); | |
230 component_list_.push_back(echo_control_mobile_); | 235 component_list_.push_back(echo_control_mobile_); |
231 | 236 |
232 gain_control_ = new GainControlImpl(this, crit_); | 237 gain_control_ = new GainControlImpl(this, crit_, &render_thread_checker_, |
238 &capture_thread_checker_); | |
233 component_list_.push_back(gain_control_); | 239 component_list_.push_back(gain_control_); |
234 | 240 |
235 high_pass_filter_ = new HighPassFilterImpl(this, crit_); | 241 high_pass_filter_ = new HighPassFilterImpl(this, crit_); |
236 component_list_.push_back(high_pass_filter_); | 242 component_list_.push_back(high_pass_filter_); |
237 | 243 |
238 level_estimator_ = new LevelEstimatorImpl(this, crit_); | 244 level_estimator_ = new LevelEstimatorImpl(this, crit_); |
239 component_list_.push_back(level_estimator_); | 245 component_list_.push_back(level_estimator_); |
240 | 246 |
241 noise_suppression_ = new NoiseSuppressionImpl(this, crit_); | 247 noise_suppression_ = new NoiseSuppressionImpl(this, crit_); |
242 component_list_.push_back(noise_suppression_); | 248 component_list_.push_back(noise_suppression_); |
243 | 249 |
244 voice_detection_ = new VoiceDetectionImpl(this, crit_); | 250 voice_detection_ = new VoiceDetectionImpl(this, crit_); |
245 component_list_.push_back(voice_detection_); | 251 component_list_.push_back(voice_detection_); |
246 | 252 |
247 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); | 253 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); |
248 | 254 |
249 SetExtraOptions(config); | 255 SetExtraOptions(config); |
250 } | 256 } |
251 | 257 |
252 AudioProcessingImpl::~AudioProcessingImpl() { | 258 AudioProcessingImpl::~AudioProcessingImpl() { |
259 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
253 { | 260 { |
254 CriticalSectionScoped crit_scoped(crit_); | 261 CriticalSectionScoped crit_scoped(crit_); |
255 // Depends on gain_control_ and gain_control_for_new_agc_. | 262 // Depends on gain_control_ and gain_control_for_new_agc_. |
256 agc_manager_.reset(); | 263 agc_manager_.reset(); |
257 // Depends on gain_control_. | 264 // Depends on gain_control_. |
258 gain_control_for_new_agc_.reset(); | 265 gain_control_for_new_agc_.reset(); |
259 while (!component_list_.empty()) { | 266 while (!component_list_.empty()) { |
260 ProcessingComponent* component = component_list_.front(); | 267 ProcessingComponent* component = component_list_.front(); |
261 component->Destroy(); | 268 component->Destroy(); |
262 delete component; | 269 delete component; |
263 component_list_.pop_front(); | 270 component_list_.pop_front(); |
264 } | 271 } |
265 | 272 |
266 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 273 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
267 if (debug_file_->Open()) { | 274 if (debug_file_->Open()) { |
268 debug_file_->CloseFile(); | 275 debug_file_->CloseFile(); |
269 } | 276 } |
270 #endif | 277 #endif |
271 } | 278 } |
272 delete crit_; | 279 delete crit_; |
273 crit_ = NULL; | 280 crit_ = NULL; |
274 } | 281 } |
275 | 282 |
276 int AudioProcessingImpl::Initialize() { | 283 int AudioProcessingImpl::Initialize() { |
284 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
277 CriticalSectionScoped crit_scoped(crit_); | 285 CriticalSectionScoped crit_scoped(crit_); |
278 return InitializeLocked(); | 286 return InitializeLocked(); |
279 } | 287 } |
280 | 288 |
281 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, | 289 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
282 int output_sample_rate_hz, | 290 int output_sample_rate_hz, |
283 int reverse_sample_rate_hz, | 291 int reverse_sample_rate_hz, |
284 ChannelLayout input_layout, | 292 ChannelLayout input_layout, |
285 ChannelLayout output_layout, | 293 ChannelLayout output_layout, |
286 ChannelLayout reverse_layout) { | 294 ChannelLayout reverse_layout) { |
295 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
287 const ProcessingConfig processing_config = { | 296 const ProcessingConfig processing_config = { |
288 {{input_sample_rate_hz, | 297 {{input_sample_rate_hz, |
289 ChannelsFromLayout(input_layout), | 298 ChannelsFromLayout(input_layout), |
290 LayoutHasKeyboard(input_layout)}, | 299 LayoutHasKeyboard(input_layout)}, |
291 {output_sample_rate_hz, | 300 {output_sample_rate_hz, |
292 ChannelsFromLayout(output_layout), | 301 ChannelsFromLayout(output_layout), |
293 LayoutHasKeyboard(output_layout)}, | 302 LayoutHasKeyboard(output_layout)}, |
294 {reverse_sample_rate_hz, | 303 {reverse_sample_rate_hz, |
295 ChannelsFromLayout(reverse_layout), | 304 ChannelsFromLayout(reverse_layout), |
296 LayoutHasKeyboard(reverse_layout)}, | 305 LayoutHasKeyboard(reverse_layout)}, |
297 {reverse_sample_rate_hz, | 306 {reverse_sample_rate_hz, |
298 ChannelsFromLayout(reverse_layout), | 307 ChannelsFromLayout(reverse_layout), |
299 LayoutHasKeyboard(reverse_layout)}}}; | 308 LayoutHasKeyboard(reverse_layout)}}}; |
300 | 309 |
301 return Initialize(processing_config); | 310 return Initialize(processing_config); |
302 } | 311 } |
303 | 312 |
304 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 313 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
314 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
305 CriticalSectionScoped crit_scoped(crit_); | 315 CriticalSectionScoped crit_scoped(crit_); |
306 return InitializeLocked(processing_config); | 316 return InitializeLocked(processing_config); |
307 } | 317 } |
308 | 318 |
319 int AudioProcessingImpl::MaybeInitializeLockedRender( | |
320 const ProcessingConfig& processing_config) { | |
321 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
322 return MaybeInitializeLocked(processing_config); | |
323 } | |
324 | |
325 int AudioProcessingImpl::MaybeInitializeLockedCapture( | |
326 const ProcessingConfig& processing_config) { | |
327 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
328 return MaybeInitializeLocked(processing_config); | |
329 } | |
330 | |
309 // Calls InitializeLocked() if any of the audio parameters have changed from | 331 // Calls InitializeLocked() if any of the audio parameters have changed from |
310 // their current values. | 332 // their current values. |
311 int AudioProcessingImpl::MaybeInitializeLocked( | 333 int AudioProcessingImpl::MaybeInitializeLocked( |
312 const ProcessingConfig& processing_config) { | 334 const ProcessingConfig& processing_config) { |
313 if (processing_config == shared_state_.api_format_) { | 335 if (processing_config == shared_state_.api_format_) { |
314 return kNoError; | 336 return kNoError; |
315 } | 337 } |
316 return InitializeLocked(processing_config); | 338 return InitializeLocked(processing_config); |
317 } | 339 } |
318 | 340 |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
372 if (err != kNoError) { | 394 if (err != kNoError) { |
373 return err; | 395 return err; |
374 } | 396 } |
375 } | 397 } |
376 #endif | 398 #endif |
377 | 399 |
378 return kNoError; | 400 return kNoError; |
379 } | 401 } |
380 | 402 |
381 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 403 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
404 // This is called from the initialization functionality which is shared | |
the sun
2015/11/25 08:53:56
I'm not a fan of repeated, verbose comments. Can y
peah-webrtc
2015/11/25 15:40:17
Could not be happier to oblige! :-)
Done.
| |
405 // between the render and capture parts, and also during the APM creation. | |
406 // Therefore it is neither possible to do thread checks nor to separate | |
407 // into different thread-specific implementations. | |
382 for (const auto& stream : config.streams) { | 408 for (const auto& stream : config.streams) { |
383 if (stream.num_channels() < 0) { | 409 if (stream.num_channels() < 0) { |
384 return kBadNumberChannelsError; | 410 return kBadNumberChannelsError; |
385 } | 411 } |
386 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 412 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
387 return kBadSampleRateError; | 413 return kBadSampleRateError; |
388 } | 414 } |
389 } | 415 } |
390 | 416 |
391 const int num_in_channels = config.input_stream().num_channels(); | 417 const int num_in_channels = config.input_stream().num_channels(); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
446 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 472 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
447 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 473 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
448 split_rate_ = kSampleRate16kHz; | 474 split_rate_ = kSampleRate16kHz; |
449 } else { | 475 } else { |
450 split_rate_ = fwd_proc_format_.sample_rate_hz(); | 476 split_rate_ = fwd_proc_format_.sample_rate_hz(); |
451 } | 477 } |
452 | 478 |
453 return InitializeLocked(); | 479 return InitializeLocked(); |
454 } | 480 } |
455 | 481 |
456 | |
457 void AudioProcessingImpl::SetExtraOptions(const Config& config) { | 482 void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
458 CriticalSectionScoped crit_scoped(crit_); | 483 CriticalSectionScoped crit_scoped(crit_); |
484 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
459 for (auto item : component_list_) { | 485 for (auto item : component_list_) { |
460 item->SetExtraOptions(config); | 486 item->SetExtraOptions(config); |
461 } | 487 } |
462 | 488 |
463 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { | 489 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { |
464 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; | 490 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; |
465 InitializeTransient(); | 491 InitializeTransient(); |
466 } | 492 } |
467 } | 493 } |
468 | 494 |
469 | 495 |
470 int AudioProcessingImpl::proc_sample_rate_hz() const { | 496 int AudioProcessingImpl::proc_sample_rate_hz() const { |
497 // This is called from the initialization functionality which is shared | |
498 // between the render and capture parts, and also during the APM creation. | |
499 // Therefore it is neither possible to do thread checks nor to separate | |
500 // into different thread-specific implementations. | |
471 return fwd_proc_format_.sample_rate_hz(); | 501 return fwd_proc_format_.sample_rate_hz(); |
472 } | 502 } |
473 | 503 |
474 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 504 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
505 // This is called from within the VAD submodule. It is used from code that is | |
506 // run by the capture and creation threads and is called via the | |
507 // public AudioProcessing API. Therefore it is not possible to do thread | |
508 // checks on these call without extending the public APM api. | |
475 return split_rate_; | 509 return split_rate_; |
476 } | 510 } |
477 | 511 |
478 int AudioProcessingImpl::num_reverse_channels() const { | 512 int AudioProcessingImpl::num_reverse_channels() const { |
513 // This is called from within the submodules. It is used from code that is | |
514 // run by the render, capture and creation threads and is called via the | |
515 // public AudioProcessing API. Therefore it is not possible to do thread | |
516 // checks on these call without extending the public APM api. | |
479 return rev_proc_format_.num_channels(); | 517 return rev_proc_format_.num_channels(); |
480 } | 518 } |
481 | 519 |
482 int AudioProcessingImpl::num_input_channels() const { | 520 int AudioProcessingImpl::num_input_channels() const { |
521 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
483 return shared_state_.api_format_.input_stream().num_channels(); | 522 return shared_state_.api_format_.input_stream().num_channels(); |
484 } | 523 } |
485 | 524 |
486 int AudioProcessingImpl::num_output_channels() const { | 525 int AudioProcessingImpl::num_output_channels() const { |
526 // This is called from within the submodules. It is used from code that is | |
527 // run by the render, capture and creation threads and is called via the | |
528 // public AudioProcessing API. Therefore it is not possible to do thread | |
529 // checks on these call without extending the public APM api. | |
487 return shared_state_.api_format_.output_stream().num_channels(); | 530 return shared_state_.api_format_.output_stream().num_channels(); |
488 } | 531 } |
489 | 532 |
490 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 533 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
491 CriticalSectionScoped lock(crit_); | 534 CriticalSectionScoped lock(crit_); |
535 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
492 output_will_be_muted_ = muted; | 536 output_will_be_muted_ = muted; |
493 if (agc_manager_.get()) { | 537 if (agc_manager_.get()) { |
494 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 538 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
495 } | 539 } |
496 } | 540 } |
497 | 541 |
498 | 542 |
499 int AudioProcessingImpl::ProcessStream(const float* const* src, | 543 int AudioProcessingImpl::ProcessStream(const float* const* src, |
500 size_t samples_per_channel, | 544 size_t samples_per_channel, |
501 int input_sample_rate_hz, | 545 int input_sample_rate_hz, |
502 ChannelLayout input_layout, | 546 ChannelLayout input_layout, |
503 int output_sample_rate_hz, | 547 int output_sample_rate_hz, |
504 ChannelLayout output_layout, | 548 ChannelLayout output_layout, |
505 float* const* dest) { | 549 float* const* dest) { |
506 CriticalSectionScoped crit_scoped(crit_); | 550 CriticalSectionScoped crit_scoped(crit_); |
551 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
507 StreamConfig input_stream = shared_state_.api_format_.input_stream(); | 552 StreamConfig input_stream = shared_state_.api_format_.input_stream(); |
508 input_stream.set_sample_rate_hz(input_sample_rate_hz); | 553 input_stream.set_sample_rate_hz(input_sample_rate_hz); |
509 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); | 554 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
510 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); | 555 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
511 | 556 |
512 StreamConfig output_stream = shared_state_.api_format_.output_stream(); | 557 StreamConfig output_stream = shared_state_.api_format_.output_stream(); |
513 output_stream.set_sample_rate_hz(output_sample_rate_hz); | 558 output_stream.set_sample_rate_hz(output_sample_rate_hz); |
514 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); | 559 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
515 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); | 560 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
516 | 561 |
517 if (samples_per_channel != input_stream.num_frames()) { | 562 if (samples_per_channel != input_stream.num_frames()) { |
518 return kBadDataLengthError; | 563 return kBadDataLengthError; |
519 } | 564 } |
520 return ProcessStream(src, input_stream, output_stream, dest); | 565 return ProcessStream(src, input_stream, output_stream, dest); |
521 } | 566 } |
522 | 567 |
523 int AudioProcessingImpl::ProcessStream(const float* const* src, | 568 int AudioProcessingImpl::ProcessStream(const float* const* src, |
524 const StreamConfig& input_config, | 569 const StreamConfig& input_config, |
525 const StreamConfig& output_config, | 570 const StreamConfig& output_config, |
526 float* const* dest) { | 571 float* const* dest) { |
527 CriticalSectionScoped crit_scoped(crit_); | 572 CriticalSectionScoped crit_scoped(crit_); |
573 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
528 if (!src || !dest) { | 574 if (!src || !dest) { |
529 return kNullPointerError; | 575 return kNullPointerError; |
530 } | 576 } |
531 | 577 |
532 echo_cancellation_->ReadQueuedRenderData(); | 578 echo_cancellation_->ReadQueuedRenderData(); |
533 echo_control_mobile_->ReadQueuedRenderData(); | 579 echo_control_mobile_->ReadQueuedRenderData(); |
534 gain_control_->ReadQueuedRenderData(); | 580 gain_control_->ReadQueuedRenderData(); |
535 | 581 |
536 ProcessingConfig processing_config = shared_state_.api_format_; | 582 ProcessingConfig processing_config = shared_state_.api_format_; |
537 processing_config.input_stream() = input_config; | 583 processing_config.input_stream() = input_config; |
538 processing_config.output_stream() = output_config; | 584 processing_config.output_stream() = output_config; |
539 | 585 |
540 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 586 RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config)); |
541 assert(processing_config.input_stream().num_frames() == | 587 assert(processing_config.input_stream().num_frames() == |
542 shared_state_.api_format_.input_stream().num_frames()); | 588 shared_state_.api_format_.input_stream().num_frames()); |
543 | 589 |
544 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 590 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
545 if (debug_file_->Open()) { | 591 if (debug_file_->Open()) { |
546 RETURN_ON_ERR(WriteConfigMessage(false)); | 592 RETURN_ON_ERR(WriteConfigMessage(false)); |
547 | 593 |
548 event_msg_->set_type(audioproc::Event::STREAM); | 594 event_msg_->set_type(audioproc::Event::STREAM); |
549 audioproc::Stream* msg = event_msg_->mutable_stream(); | 595 audioproc::Stream* msg = event_msg_->mutable_stream(); |
550 const size_t channel_size = | 596 const size_t channel_size = |
(...skipping 18 matching lines...) Expand all Loading... | |
569 msg->add_output_channel(dest[i], channel_size); | 615 msg->add_output_channel(dest[i], channel_size); |
570 RETURN_ON_ERR(WriteMessageToDebugFile()); | 616 RETURN_ON_ERR(WriteMessageToDebugFile()); |
571 } | 617 } |
572 #endif | 618 #endif |
573 | 619 |
574 return kNoError; | 620 return kNoError; |
575 } | 621 } |
576 | 622 |
577 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 623 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
578 CriticalSectionScoped crit_scoped(crit_); | 624 CriticalSectionScoped crit_scoped(crit_); |
625 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
579 echo_cancellation_->ReadQueuedRenderData(); | 626 echo_cancellation_->ReadQueuedRenderData(); |
580 echo_control_mobile_->ReadQueuedRenderData(); | 627 echo_control_mobile_->ReadQueuedRenderData(); |
581 gain_control_->ReadQueuedRenderData(); | 628 gain_control_->ReadQueuedRenderData(); |
582 | 629 |
583 if (!frame) { | 630 if (!frame) { |
584 return kNullPointerError; | 631 return kNullPointerError; |
585 } | 632 } |
586 // Must be a native rate. | 633 // Must be a native rate. |
587 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 634 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
588 frame->sample_rate_hz_ != kSampleRate16kHz && | 635 frame->sample_rate_hz_ != kSampleRate16kHz && |
589 frame->sample_rate_hz_ != kSampleRate32kHz && | 636 frame->sample_rate_hz_ != kSampleRate32kHz && |
590 frame->sample_rate_hz_ != kSampleRate48kHz) { | 637 frame->sample_rate_hz_ != kSampleRate48kHz) { |
591 return kBadSampleRateError; | 638 return kBadSampleRateError; |
592 } | 639 } |
593 | 640 |
594 if (echo_control_mobile_->is_enabled() && | 641 if (echo_control_mobile_->is_enabled() && |
595 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { | 642 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { |
596 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; | 643 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; |
597 return kUnsupportedComponentError; | 644 return kUnsupportedComponentError; |
598 } | 645 } |
599 | 646 |
600 // TODO(ajm): The input and output rates and channels are currently | 647 // TODO(ajm): The input and output rates and channels are currently |
601 // constrained to be identical in the int16 interface. | 648 // constrained to be identical in the int16 interface. |
602 ProcessingConfig processing_config = shared_state_.api_format_; | 649 ProcessingConfig processing_config = shared_state_.api_format_; |
603 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 650 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
604 processing_config.input_stream().set_num_channels(frame->num_channels_); | 651 processing_config.input_stream().set_num_channels(frame->num_channels_); |
605 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 652 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
606 processing_config.output_stream().set_num_channels(frame->num_channels_); | 653 processing_config.output_stream().set_num_channels(frame->num_channels_); |
607 | 654 |
608 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 655 RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config)); |
609 if (frame->samples_per_channel_ != | 656 if (frame->samples_per_channel_ != |
610 shared_state_.api_format_.input_stream().num_frames()) { | 657 shared_state_.api_format_.input_stream().num_frames()) { |
611 return kBadDataLengthError; | 658 return kBadDataLengthError; |
612 } | 659 } |
613 | 660 |
614 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 661 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
615 if (debug_file_->Open()) { | 662 if (debug_file_->Open()) { |
616 event_msg_->set_type(audioproc::Event::STREAM); | 663 event_msg_->set_type(audioproc::Event::STREAM); |
617 audioproc::Stream* msg = event_msg_->mutable_stream(); | 664 audioproc::Stream* msg = event_msg_->mutable_stream(); |
618 const size_t data_size = | 665 const size_t data_size = |
(...skipping 13 matching lines...) Expand all Loading... | |
632 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 679 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
633 msg->set_output_data(frame->data_, data_size); | 680 msg->set_output_data(frame->data_, data_size); |
634 RETURN_ON_ERR(WriteMessageToDebugFile()); | 681 RETURN_ON_ERR(WriteMessageToDebugFile()); |
635 } | 682 } |
636 #endif | 683 #endif |
637 | 684 |
638 return kNoError; | 685 return kNoError; |
639 } | 686 } |
640 | 687 |
641 int AudioProcessingImpl::ProcessStreamLocked() { | 688 int AudioProcessingImpl::ProcessStreamLocked() { |
689 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
642 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 690 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
643 if (debug_file_->Open()) { | 691 if (debug_file_->Open()) { |
644 audioproc::Stream* msg = event_msg_->mutable_stream(); | 692 audioproc::Stream* msg = event_msg_->mutable_stream(); |
645 msg->set_delay(stream_delay_ms_); | 693 msg->set_delay(stream_delay_ms_); |
646 msg->set_drift(echo_cancellation_->stream_drift_samples()); | 694 msg->set_drift(echo_cancellation_->stream_drift_samples()); |
647 msg->set_level(gain_control()->stream_analog_level()); | 695 msg->set_level(gain_control()->stream_analog_level()); |
648 msg->set_keypress(key_pressed_); | 696 msg->set_keypress(key_pressed_); |
649 } | 697 } |
650 #endif | 698 #endif |
651 | 699 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
713 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); | 761 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); |
714 | 762 |
715 was_stream_delay_set_ = false; | 763 was_stream_delay_set_ = false; |
716 return kNoError; | 764 return kNoError; |
717 } | 765 } |
718 | 766 |
719 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, | 767 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
720 size_t samples_per_channel, | 768 size_t samples_per_channel, |
721 int rev_sample_rate_hz, | 769 int rev_sample_rate_hz, |
722 ChannelLayout layout) { | 770 ChannelLayout layout) { |
771 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
723 const StreamConfig reverse_config = { | 772 const StreamConfig reverse_config = { |
724 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), | 773 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
725 }; | 774 }; |
726 if (samples_per_channel != reverse_config.num_frames()) { | 775 if (samples_per_channel != reverse_config.num_frames()) { |
727 return kBadDataLengthError; | 776 return kBadDataLengthError; |
728 } | 777 } |
729 return AnalyzeReverseStream(data, reverse_config, reverse_config); | 778 return AnalyzeReverseStream(data, reverse_config, reverse_config); |
730 } | 779 } |
731 | 780 |
732 int AudioProcessingImpl::ProcessReverseStream( | 781 int AudioProcessingImpl::ProcessReverseStream( |
733 const float* const* src, | 782 const float* const* src, |
734 const StreamConfig& reverse_input_config, | 783 const StreamConfig& reverse_input_config, |
735 const StreamConfig& reverse_output_config, | 784 const StreamConfig& reverse_output_config, |
736 float* const* dest) { | 785 float* const* dest) { |
786 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
737 RETURN_ON_ERR( | 787 RETURN_ON_ERR( |
738 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); | 788 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); |
739 if (is_rev_processed()) { | 789 if (is_rev_processed()) { |
740 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), | 790 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), |
741 dest); | 791 dest); |
742 } else if (rev_conversion_needed()) { | 792 } else if (render_check_rev_conversion_needed()) { |
743 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, | 793 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, |
744 reverse_output_config.num_samples()); | 794 reverse_output_config.num_samples()); |
745 } else { | 795 } else { |
746 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 796 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
747 reverse_input_config.num_channels(), dest); | 797 reverse_input_config.num_channels(), dest); |
748 } | 798 } |
749 | 799 |
750 return kNoError; | 800 return kNoError; |
751 } | 801 } |
752 | 802 |
753 int AudioProcessingImpl::AnalyzeReverseStream( | 803 int AudioProcessingImpl::AnalyzeReverseStream( |
754 const float* const* src, | 804 const float* const* src, |
755 const StreamConfig& reverse_input_config, | 805 const StreamConfig& reverse_input_config, |
756 const StreamConfig& reverse_output_config) { | 806 const StreamConfig& reverse_output_config) { |
757 CriticalSectionScoped crit_scoped(crit_); | 807 CriticalSectionScoped crit_scoped(crit_); |
808 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
758 if (src == NULL) { | 809 if (src == NULL) { |
759 return kNullPointerError; | 810 return kNullPointerError; |
760 } | 811 } |
761 | 812 |
762 if (reverse_input_config.num_channels() <= 0) { | 813 if (reverse_input_config.num_channels() <= 0) { |
763 return kBadNumberChannelsError; | 814 return kBadNumberChannelsError; |
764 } | 815 } |
765 | 816 |
766 ProcessingConfig processing_config = shared_state_.api_format_; | 817 ProcessingConfig processing_config = shared_state_.api_format_; |
767 processing_config.reverse_input_stream() = reverse_input_config; | 818 processing_config.reverse_input_stream() = reverse_input_config; |
768 processing_config.reverse_output_stream() = reverse_output_config; | 819 processing_config.reverse_output_stream() = reverse_output_config; |
769 | 820 |
770 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 821 RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config)); |
771 assert(reverse_input_config.num_frames() == | 822 assert(reverse_input_config.num_frames() == |
772 shared_state_.api_format_.reverse_input_stream().num_frames()); | 823 shared_state_.api_format_.reverse_input_stream().num_frames()); |
773 | 824 |
774 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 825 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
775 if (debug_file_->Open()) { | 826 if (debug_file_->Open()) { |
776 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 827 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
777 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 828 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
778 const size_t channel_size = | 829 const size_t channel_size = |
779 sizeof(float) * | 830 sizeof(float) * |
780 shared_state_.api_format_.reverse_input_stream().num_frames(); | 831 shared_state_.api_format_.reverse_input_stream().num_frames(); |
781 for (int i = 0; | 832 for (int i = 0; |
782 i < shared_state_.api_format_.reverse_input_stream().num_channels(); | 833 i < shared_state_.api_format_.reverse_input_stream().num_channels(); |
783 ++i) | 834 ++i) |
784 msg->add_channel(src[i], channel_size); | 835 msg->add_channel(src[i], channel_size); |
785 RETURN_ON_ERR(WriteMessageToDebugFile()); | 836 RETURN_ON_ERR(WriteMessageToDebugFile()); |
786 } | 837 } |
787 #endif | 838 #endif |
788 | 839 |
789 render_audio_->CopyFrom(src, | 840 render_audio_->CopyFrom(src, |
790 shared_state_.api_format_.reverse_input_stream()); | 841 shared_state_.api_format_.reverse_input_stream()); |
791 return ProcessReverseStreamLocked(); | 842 return ProcessReverseStreamLocked(); |
792 } | 843 } |
793 | 844 |
794 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 845 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
846 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
795 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | 847 RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
796 if (is_rev_processed()) { | 848 if (is_rev_processed()) { |
797 render_audio_->InterleaveTo(frame, true); | 849 render_audio_->InterleaveTo(frame, true); |
798 } | 850 } |
799 | 851 |
800 return kNoError; | 852 return kNoError; |
801 } | 853 } |
802 | 854 |
803 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 855 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
856 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
804 CriticalSectionScoped crit_scoped(crit_); | 857 CriticalSectionScoped crit_scoped(crit_); |
805 if (frame == NULL) { | 858 if (frame == NULL) { |
806 return kNullPointerError; | 859 return kNullPointerError; |
807 } | 860 } |
808 // Must be a native rate. | 861 // Must be a native rate. |
809 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 862 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
810 frame->sample_rate_hz_ != kSampleRate16kHz && | 863 frame->sample_rate_hz_ != kSampleRate16kHz && |
811 frame->sample_rate_hz_ != kSampleRate32kHz && | 864 frame->sample_rate_hz_ != kSampleRate32kHz && |
812 frame->sample_rate_hz_ != kSampleRate48kHz) { | 865 frame->sample_rate_hz_ != kSampleRate48kHz) { |
813 return kBadSampleRateError; | 866 return kBadSampleRateError; |
(...skipping 11 matching lines...) Expand all Loading... | |
825 ProcessingConfig processing_config = shared_state_.api_format_; | 878 ProcessingConfig processing_config = shared_state_.api_format_; |
826 processing_config.reverse_input_stream().set_sample_rate_hz( | 879 processing_config.reverse_input_stream().set_sample_rate_hz( |
827 frame->sample_rate_hz_); | 880 frame->sample_rate_hz_); |
828 processing_config.reverse_input_stream().set_num_channels( | 881 processing_config.reverse_input_stream().set_num_channels( |
829 frame->num_channels_); | 882 frame->num_channels_); |
830 processing_config.reverse_output_stream().set_sample_rate_hz( | 883 processing_config.reverse_output_stream().set_sample_rate_hz( |
831 frame->sample_rate_hz_); | 884 frame->sample_rate_hz_); |
832 processing_config.reverse_output_stream().set_num_channels( | 885 processing_config.reverse_output_stream().set_num_channels( |
833 frame->num_channels_); | 886 frame->num_channels_); |
834 | 887 |
835 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 888 RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config)); |
836 if (frame->samples_per_channel_ != | 889 if (frame->samples_per_channel_ != |
837 shared_state_.api_format_.reverse_input_stream().num_frames()) { | 890 shared_state_.api_format_.reverse_input_stream().num_frames()) { |
838 return kBadDataLengthError; | 891 return kBadDataLengthError; |
839 } | 892 } |
840 | 893 |
841 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 894 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
842 if (debug_file_->Open()) { | 895 if (debug_file_->Open()) { |
843 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 896 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
844 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 897 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
845 const size_t data_size = | 898 const size_t data_size = |
846 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 899 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
847 msg->set_data(frame->data_, data_size); | 900 msg->set_data(frame->data_, data_size); |
848 RETURN_ON_ERR(WriteMessageToDebugFile()); | 901 RETURN_ON_ERR(WriteMessageToDebugFile()); |
849 } | 902 } |
850 #endif | 903 #endif |
851 render_audio_->DeinterleaveFrom(frame); | 904 render_audio_->DeinterleaveFrom(frame); |
852 return ProcessReverseStreamLocked(); | 905 return ProcessReverseStreamLocked(); |
853 } | 906 } |
854 | 907 |
855 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 908 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
909 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
856 AudioBuffer* ra = render_audio_.get(); // For brevity. | 910 AudioBuffer* ra = render_audio_.get(); // For brevity. |
857 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { | 911 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { |
858 ra->SplitIntoFrequencyBands(); | 912 ra->SplitIntoFrequencyBands(); |
859 } | 913 } |
860 | 914 |
861 if (intelligibility_enabled_) { | 915 if (intelligibility_enabled_) { |
862 intelligibility_enhancer_->ProcessRenderAudio( | 916 intelligibility_enhancer_->ProcessRenderAudio( |
863 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); | 917 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); |
864 } | 918 } |
865 | 919 |
866 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); | 920 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); |
867 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); | 921 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); |
868 if (!use_new_agc_) { | 922 if (!use_new_agc_) { |
869 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); | 923 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); |
870 } | 924 } |
871 | 925 |
872 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && | 926 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && |
873 is_rev_processed()) { | 927 is_rev_processed()) { |
874 ra->MergeFrequencyBands(); | 928 ra->MergeFrequencyBands(); |
875 } | 929 } |
876 | 930 |
877 return kNoError; | 931 return kNoError; |
878 } | 932 } |
879 | 933 |
880 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 934 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
935 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
881 Error retval = kNoError; | 936 Error retval = kNoError; |
882 was_stream_delay_set_ = true; | 937 was_stream_delay_set_ = true; |
883 delay += delay_offset_ms_; | 938 delay += delay_offset_ms_; |
884 | 939 |
885 if (delay < 0) { | 940 if (delay < 0) { |
886 delay = 0; | 941 delay = 0; |
887 retval = kBadStreamParameterWarning; | 942 retval = kBadStreamParameterWarning; |
888 } | 943 } |
889 | 944 |
890 // TODO(ajm): the max is rather arbitrarily chosen; investigate. | 945 // TODO(ajm): the max is rather arbitrarily chosen; investigate. |
891 if (delay > 500) { | 946 if (delay > 500) { |
892 delay = 500; | 947 delay = 500; |
893 retval = kBadStreamParameterWarning; | 948 retval = kBadStreamParameterWarning; |
894 } | 949 } |
895 | 950 |
896 stream_delay_ms_ = delay; | 951 stream_delay_ms_ = delay; |
897 return retval; | 952 return retval; |
898 } | 953 } |
899 | 954 |
900 int AudioProcessingImpl::stream_delay_ms() const { | 955 int AudioProcessingImpl::stream_delay_ms() const { |
956 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
901 return stream_delay_ms_; | 957 return stream_delay_ms_; |
902 } | 958 } |
903 | 959 |
904 bool AudioProcessingImpl::was_stream_delay_set() const { | 960 bool AudioProcessingImpl::was_stream_delay_set() const { |
961 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
905 return was_stream_delay_set_; | 962 return was_stream_delay_set_; |
906 } | 963 } |
907 | 964 |
908 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { | 965 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
966 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
909 key_pressed_ = key_pressed; | 967 key_pressed_ = key_pressed; |
910 } | 968 } |
911 | 969 |
912 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 970 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
971 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
913 CriticalSectionScoped crit_scoped(crit_); | 972 CriticalSectionScoped crit_scoped(crit_); |
914 delay_offset_ms_ = offset; | 973 delay_offset_ms_ = offset; |
915 } | 974 } |
916 | 975 |
917 int AudioProcessingImpl::delay_offset_ms() const { | 976 int AudioProcessingImpl::delay_offset_ms() const { |
977 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
918 return delay_offset_ms_; | 978 return delay_offset_ms_; |
919 } | 979 } |
920 | 980 |
921 int AudioProcessingImpl::StartDebugRecording( | 981 int AudioProcessingImpl::StartDebugRecording( |
922 const char filename[AudioProcessing::kMaxFilenameSize]) { | 982 const char filename[AudioProcessing::kMaxFilenameSize]) { |
923 CriticalSectionScoped crit_scoped(crit_); | 983 CriticalSectionScoped crit_scoped(crit_); |
984 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
924 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 985 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
925 | 986 |
926 if (filename == NULL) { | 987 if (filename == NULL) { |
927 return kNullPointerError; | 988 return kNullPointerError; |
928 } | 989 } |
929 | 990 |
930 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 991 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
931 // Stop any ongoing recording. | 992 // Stop any ongoing recording. |
932 if (debug_file_->Open()) { | 993 if (debug_file_->Open()) { |
933 if (debug_file_->CloseFile() == -1) { | 994 if (debug_file_->CloseFile() == -1) { |
934 return kFileError; | 995 return kFileError; |
935 } | 996 } |
936 } | 997 } |
937 | 998 |
938 if (debug_file_->OpenFile(filename, false) == -1) { | 999 if (debug_file_->OpenFile(filename, false) == -1) { |
939 debug_file_->CloseFile(); | 1000 debug_file_->CloseFile(); |
940 return kFileError; | 1001 return kFileError; |
941 } | 1002 } |
942 | 1003 |
943 RETURN_ON_ERR(WriteConfigMessage(true)); | 1004 RETURN_ON_ERR(WriteConfigMessage(true)); |
944 RETURN_ON_ERR(WriteInitMessage()); | 1005 RETURN_ON_ERR(WriteInitMessage()); |
945 return kNoError; | 1006 return kNoError; |
946 #else | 1007 #else |
947 return kUnsupportedFunctionError; | 1008 return kUnsupportedFunctionError; |
948 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1009 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
949 } | 1010 } |
950 | 1011 |
951 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | 1012 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
952 CriticalSectionScoped crit_scoped(crit_); | 1013 CriticalSectionScoped crit_scoped(crit_); |
1014 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
953 | 1015 |
954 if (handle == NULL) { | 1016 if (handle == NULL) { |
955 return kNullPointerError; | 1017 return kNullPointerError; |
956 } | 1018 } |
957 | 1019 |
958 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1020 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
959 // Stop any ongoing recording. | 1021 // Stop any ongoing recording. |
960 if (debug_file_->Open()) { | 1022 if (debug_file_->Open()) { |
961 if (debug_file_->CloseFile() == -1) { | 1023 if (debug_file_->CloseFile() == -1) { |
962 return kFileError; | 1024 return kFileError; |
963 } | 1025 } |
964 } | 1026 } |
965 | 1027 |
966 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { | 1028 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { |
967 return kFileError; | 1029 return kFileError; |
968 } | 1030 } |
969 | 1031 |
970 RETURN_ON_ERR(WriteConfigMessage(true)); | 1032 RETURN_ON_ERR(WriteConfigMessage(true)); |
971 RETURN_ON_ERR(WriteInitMessage()); | 1033 RETURN_ON_ERR(WriteInitMessage()); |
972 return kNoError; | 1034 return kNoError; |
973 #else | 1035 #else |
974 return kUnsupportedFunctionError; | 1036 return kUnsupportedFunctionError; |
975 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1037 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
976 } | 1038 } |
977 | 1039 |
978 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1040 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
979 rtc::PlatformFile handle) { | 1041 rtc::PlatformFile handle) { |
1042 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
980 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1043 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
981 return StartDebugRecording(stream); | 1044 return StartDebugRecording(stream); |
982 } | 1045 } |
983 | 1046 |
984 int AudioProcessingImpl::StopDebugRecording() { | 1047 int AudioProcessingImpl::StopDebugRecording() { |
985 CriticalSectionScoped crit_scoped(crit_); | 1048 CriticalSectionScoped crit_scoped(crit_); |
1049 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
986 | 1050 |
987 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1051 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
988 // We just return if recording hasn't started. | 1052 // We just return if recording hasn't started. |
989 if (debug_file_->Open()) { | 1053 if (debug_file_->Open()) { |
990 if (debug_file_->CloseFile() == -1) { | 1054 if (debug_file_->CloseFile() == -1) { |
991 return kFileError; | 1055 return kFileError; |
992 } | 1056 } |
993 } | 1057 } |
994 return kNoError; | 1058 return kNoError; |
995 #else | 1059 #else |
(...skipping 26 matching lines...) Expand all Loading... | |
1022 | 1086 |
1023 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { | 1087 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
1024 return noise_suppression_; | 1088 return noise_suppression_; |
1025 } | 1089 } |
1026 | 1090 |
1027 VoiceDetection* AudioProcessingImpl::voice_detection() const { | 1091 VoiceDetection* AudioProcessingImpl::voice_detection() const { |
1028 return voice_detection_; | 1092 return voice_detection_; |
1029 } | 1093 } |
1030 | 1094 |
1031 bool AudioProcessingImpl::is_data_processed() const { | 1095 bool AudioProcessingImpl::is_data_processed() const { |
1096 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1032 if (beamformer_enabled_) { | 1097 if (beamformer_enabled_) { |
1033 return true; | 1098 return true; |
1034 } | 1099 } |
1035 | 1100 |
1036 int enabled_count = 0; | 1101 int enabled_count = 0; |
1037 for (auto item : component_list_) { | 1102 for (auto item : component_list_) { |
1038 if (item->is_component_enabled()) { | 1103 if (item->is_component_enabled()) { |
1039 enabled_count++; | 1104 enabled_count++; |
1040 } | 1105 } |
1041 } | 1106 } |
1042 | 1107 |
1043 // Data is unchanged if no components are enabled, or if only level_estimator_ | 1108 // Data is unchanged if no components are enabled, or if only level_estimator_ |
1044 // or voice_detection_ is enabled. | 1109 // or voice_detection_ is enabled. |
1045 if (enabled_count == 0) { | 1110 if (enabled_count == 0) { |
1046 return false; | 1111 return false; |
1047 } else if (enabled_count == 1) { | 1112 } else if (enabled_count == 1) { |
1048 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) { | 1113 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) { |
1049 return false; | 1114 return false; |
1050 } | 1115 } |
1051 } else if (enabled_count == 2) { | 1116 } else if (enabled_count == 2) { |
1052 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { | 1117 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { |
1053 return false; | 1118 return false; |
1054 } | 1119 } |
1055 } | 1120 } |
1056 return true; | 1121 return true; |
1057 } | 1122 } |
1058 | 1123 |
1059 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { | 1124 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
1125 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1060 // Check if we've upmixed or downmixed the audio. | 1126 // Check if we've upmixed or downmixed the audio. |
1061 return ((shared_state_.api_format_.output_stream().num_channels() != | 1127 return ((shared_state_.api_format_.output_stream().num_channels() != |
1062 shared_state_.api_format_.input_stream().num_channels()) || | 1128 shared_state_.api_format_.input_stream().num_channels()) || |
1063 is_data_processed || transient_suppressor_enabled_); | 1129 is_data_processed || transient_suppressor_enabled_); |
1064 } | 1130 } |
1065 | 1131 |
1066 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { | 1132 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { |
1133 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1067 return (is_data_processed && | 1134 return (is_data_processed && |
1068 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1135 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
1069 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); | 1136 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); |
1070 } | 1137 } |
1071 | 1138 |
1072 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { | 1139 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
1140 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1073 if (!is_data_processed && !voice_detection_->is_enabled() && | 1141 if (!is_data_processed && !voice_detection_->is_enabled() && |
1074 !transient_suppressor_enabled_) { | 1142 !transient_suppressor_enabled_) { |
1075 // Only level_estimator_ is enabled. | 1143 // Only level_estimator_ is enabled. |
1076 return false; | 1144 return false; |
1077 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1145 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
1078 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 1146 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
1079 // Something besides level_estimator_ is enabled, and we have super-wb. | 1147 // Something besides level_estimator_ is enabled, and we have super-wb. |
1080 return true; | 1148 return true; |
1081 } | 1149 } |
1082 return false; | 1150 return false; |
1083 } | 1151 } |
1084 | 1152 |
1085 bool AudioProcessingImpl::is_rev_processed() const { | 1153 bool AudioProcessingImpl::is_rev_processed() const { |
1154 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
1086 return intelligibility_enabled_ && intelligibility_enhancer_->active(); | 1155 return intelligibility_enabled_ && intelligibility_enhancer_->active(); |
1087 } | 1156 } |
1088 | 1157 |
1158 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { | |
1159 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | |
1160 return rev_conversion_needed(); | |
1161 } | |
1162 | |
1089 bool AudioProcessingImpl::rev_conversion_needed() const { | 1163 bool AudioProcessingImpl::rev_conversion_needed() const { |
1164 // This is called from the initialization functionality, which is shared | |
1165 // between the render and capture parts, and also during the APM creation. | |
1166 // Therefore it is neither possible to do thread checks nor to separate | |
1167 // into different thread-specific implementations. | |
1090 return (shared_state_.api_format_.reverse_input_stream() != | 1168 return (shared_state_.api_format_.reverse_input_stream() != |
1091 shared_state_.api_format_.reverse_output_stream()); | 1169 shared_state_.api_format_.reverse_output_stream()); |
1092 } | 1170 } |
1093 | 1171 |
1094 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1172 void AudioProcessingImpl::InitializeExperimentalAgc() { |
1173 // This is called from the initialization functionality, which is shared | |
1174 // between the render and capture parts, and also during the APM creation. | |
1175 // Therefore it is neither possible to do thread checks nor to separate | |
1176 // into different thread-specific implementations. | |
1095 if (use_new_agc_) { | 1177 if (use_new_agc_) { |
1096 if (!agc_manager_.get()) { | 1178 if (!agc_manager_.get()) { |
1097 agc_manager_.reset(new AgcManagerDirect(gain_control_, | 1179 agc_manager_.reset(new AgcManagerDirect(gain_control_, |
1098 gain_control_for_new_agc_.get(), | 1180 gain_control_for_new_agc_.get(), |
1099 agc_startup_min_volume_)); | 1181 agc_startup_min_volume_)); |
1100 } | 1182 } |
1101 agc_manager_->Initialize(); | 1183 agc_manager_->Initialize(); |
1102 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 1184 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
1103 } | 1185 } |
1104 } | 1186 } |
1105 | 1187 |
1106 void AudioProcessingImpl::InitializeTransient() { | 1188 void AudioProcessingImpl::InitializeTransient() { |
1189 // This is called from the initialization functionality, which is shared | |
1190 // between the render and capture parts, and also during the APM creation. | |
1191 // Therefore it is neither possible to do thread checks nor to separate | |
1192 // into different thread-specific implementations. | |
1107 if (transient_suppressor_enabled_) { | 1193 if (transient_suppressor_enabled_) { |
1108 if (!transient_suppressor_.get()) { | 1194 if (!transient_suppressor_.get()) { |
1109 transient_suppressor_.reset(new TransientSuppressor()); | 1195 transient_suppressor_.reset(new TransientSuppressor()); |
1110 } | 1196 } |
1111 transient_suppressor_->Initialize( | 1197 transient_suppressor_->Initialize( |
1112 fwd_proc_format_.sample_rate_hz(), split_rate_, | 1198 fwd_proc_format_.sample_rate_hz(), split_rate_, |
1113 shared_state_.api_format_.output_stream().num_channels()); | 1199 shared_state_.api_format_.output_stream().num_channels()); |
1114 } | 1200 } |
1115 } | 1201 } |
1116 | 1202 |
1117 void AudioProcessingImpl::InitializeBeamformer() { | 1203 void AudioProcessingImpl::InitializeBeamformer() { |
1204 // This is called from the initialization functionality, which is shared | |
1205 // between the render and capture parts, and also during the APM creation. | |
1206 // Therefore it is neither possible to do thread checks nor to separate | |
1207 // into different thread-specific implementations. | |
1118 if (beamformer_enabled_) { | 1208 if (beamformer_enabled_) { |
1119 if (!beamformer_) { | 1209 if (!beamformer_) { |
1120 beamformer_.reset( | 1210 beamformer_.reset( |
1121 new NonlinearBeamformer(array_geometry_, target_direction_)); | 1211 new NonlinearBeamformer(array_geometry_, target_direction_)); |
1122 } | 1212 } |
1123 beamformer_->Initialize(kChunkSizeMs, split_rate_); | 1213 beamformer_->Initialize(kChunkSizeMs, split_rate_); |
1124 } | 1214 } |
1125 } | 1215 } |
1126 | 1216 |
1127 void AudioProcessingImpl::InitializeIntelligibility() { | 1217 void AudioProcessingImpl::InitializeIntelligibility() { |
1218 // This is called from the initialization functionality, which is shared | |
1219 // between the render and capture parts, and also during the APM creation. | |
1220 // Therefore it is neither possible to do thread checks nor to separate | |
1221 // into different thread-specific implementations. | |
1128 if (intelligibility_enabled_) { | 1222 if (intelligibility_enabled_) { |
1129 IntelligibilityEnhancer::Config config; | 1223 IntelligibilityEnhancer::Config config; |
1130 config.sample_rate_hz = split_rate_; | 1224 config.sample_rate_hz = split_rate_; |
1131 config.num_capture_channels = capture_audio_->num_channels(); | 1225 config.num_capture_channels = capture_audio_->num_channels(); |
1132 config.num_render_channels = render_audio_->num_channels(); | 1226 config.num_render_channels = render_audio_->num_channels(); |
1133 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); | 1227 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); |
1134 } | 1228 } |
1135 } | 1229 } |
1136 | 1230 |
1137 void AudioProcessingImpl::MaybeUpdateHistograms() { | 1231 void AudioProcessingImpl::MaybeUpdateHistograms() { |
1232 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1138 static const int kMinDiffDelayMs = 60; | 1233 static const int kMinDiffDelayMs = 60; |
1139 | 1234 |
1140 if (echo_cancellation()->is_enabled()) { | 1235 if (echo_cancellation()->is_enabled()) { |
1141 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. | 1236 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. |
1142 // If a stream has echo we know that the echo_cancellation is in process. | 1237 // If a stream has echo we know that the echo_cancellation is in process. |
1143 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { | 1238 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { |
1144 stream_delay_jumps_ = 0; | 1239 stream_delay_jumps_ = 0; |
1145 } | 1240 } |
1146 if (aec_system_delay_jumps_ == -1 && | 1241 if (aec_system_delay_jumps_ == -1 && |
1147 echo_cancellation()->stream_has_echo()) { | 1242 echo_cancellation()->stream_has_echo()) { |
(...skipping 26 matching lines...) Expand all Loading... | |
1174 if (aec_system_delay_jumps_ == -1) { | 1269 if (aec_system_delay_jumps_ == -1) { |
1175 aec_system_delay_jumps_ = 0; // Activate counter if needed. | 1270 aec_system_delay_jumps_ = 0; // Activate counter if needed. |
1176 } | 1271 } |
1177 aec_system_delay_jumps_++; | 1272 aec_system_delay_jumps_++; |
1178 } | 1273 } |
1179 last_aec_system_delay_ms_ = aec_system_delay_ms; | 1274 last_aec_system_delay_ms_ = aec_system_delay_ms; |
1180 } | 1275 } |
1181 } | 1276 } |
1182 | 1277 |
1183 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { | 1278 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
1279 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1184 CriticalSectionScoped crit_scoped(crit_); | 1280 CriticalSectionScoped crit_scoped(crit_); |
1185 if (stream_delay_jumps_ > -1) { | 1281 if (stream_delay_jumps_ > -1) { |
1186 RTC_HISTOGRAM_ENUMERATION( | 1282 RTC_HISTOGRAM_ENUMERATION( |
1187 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", | 1283 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", |
1188 stream_delay_jumps_, 51); | 1284 stream_delay_jumps_, 51); |
1189 } | 1285 } |
1190 stream_delay_jumps_ = -1; | 1286 stream_delay_jumps_ = -1; |
1191 last_stream_delay_ms_ = 0; | 1287 last_stream_delay_ms_ = 0; |
1192 | 1288 |
1193 if (aec_system_delay_jumps_ > -1) { | 1289 if (aec_system_delay_jumps_ > -1) { |
1194 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1290 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
1195 aec_system_delay_jumps_, 51); | 1291 aec_system_delay_jumps_, 51); |
1196 } | 1292 } |
1197 aec_system_delay_jumps_ = -1; | 1293 aec_system_delay_jumps_ = -1; |
1198 last_aec_system_delay_ms_ = 0; | 1294 last_aec_system_delay_ms_ = 0; |
1199 } | 1295 } |
1200 | 1296 |
1201 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1297 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1202 int AudioProcessingImpl::WriteMessageToDebugFile() { | 1298 int AudioProcessingImpl::WriteMessageToDebugFile() { |
1299 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1203 int32_t size = event_msg_->ByteSize(); | 1300 int32_t size = event_msg_->ByteSize(); |
1204 if (size <= 0) { | 1301 if (size <= 0) { |
1205 return kUnspecifiedError; | 1302 return kUnspecifiedError; |
1206 } | 1303 } |
1207 #if defined(WEBRTC_ARCH_BIG_ENDIAN) | 1304 #if defined(WEBRTC_ARCH_BIG_ENDIAN) |
1208 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be | 1305 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be |
1209 // pretty safe in assuming little-endian. | 1306 // pretty safe in assuming little-endian. |
1210 #endif | 1307 #endif |
1211 | 1308 |
1212 if (!event_msg_->SerializeToString(&event_str_)) { | 1309 if (!event_msg_->SerializeToString(&event_str_)) { |
1213 return kUnspecifiedError; | 1310 return kUnspecifiedError; |
1214 } | 1311 } |
1215 | 1312 |
1216 // Write message preceded by its size. | 1313 // Write message preceded by its size. |
1217 if (!debug_file_->Write(&size, sizeof(int32_t))) { | 1314 if (!debug_file_->Write(&size, sizeof(int32_t))) { |
1218 return kFileError; | 1315 return kFileError; |
1219 } | 1316 } |
1220 if (!debug_file_->Write(event_str_.data(), event_str_.length())) { | 1317 if (!debug_file_->Write(event_str_.data(), event_str_.length())) { |
1221 return kFileError; | 1318 return kFileError; |
1222 } | 1319 } |
1223 | 1320 |
1224 event_msg_->Clear(); | 1321 event_msg_->Clear(); |
1225 | 1322 |
1226 return kNoError; | 1323 return kNoError; |
1227 } | 1324 } |
1228 | 1325 |
1229 int AudioProcessingImpl::WriteInitMessage() { | 1326 int AudioProcessingImpl::WriteInitMessage() { |
1327 // This is called from the initialization functionality, which is shared | |
1328 // between the render and capture parts, and also during the APM creation. | |
1329 // Therefore it is neither possible to do thread checks nor to separate | |
1330 // into different thread-specific implementations. | |
1230 event_msg_->set_type(audioproc::Event::INIT); | 1331 event_msg_->set_type(audioproc::Event::INIT); |
1231 audioproc::Init* msg = event_msg_->mutable_init(); | 1332 audioproc::Init* msg = event_msg_->mutable_init(); |
1232 msg->set_sample_rate( | 1333 msg->set_sample_rate( |
1233 shared_state_.api_format_.input_stream().sample_rate_hz()); | 1334 shared_state_.api_format_.input_stream().sample_rate_hz()); |
1234 msg->set_num_input_channels( | 1335 msg->set_num_input_channels( |
1235 shared_state_.api_format_.input_stream().num_channels()); | 1336 shared_state_.api_format_.input_stream().num_channels()); |
1236 msg->set_num_output_channels( | 1337 msg->set_num_output_channels( |
1237 shared_state_.api_format_.output_stream().num_channels()); | 1338 shared_state_.api_format_.output_stream().num_channels()); |
1238 msg->set_num_reverse_channels( | 1339 msg->set_num_reverse_channels( |
1239 shared_state_.api_format_.reverse_input_stream().num_channels()); | 1340 shared_state_.api_format_.reverse_input_stream().num_channels()); |
1240 msg->set_reverse_sample_rate( | 1341 msg->set_reverse_sample_rate( |
1241 shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); | 1342 shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); |
1242 msg->set_output_sample_rate( | 1343 msg->set_output_sample_rate( |
1243 shared_state_.api_format_.output_stream().sample_rate_hz()); | 1344 shared_state_.api_format_.output_stream().sample_rate_hz()); |
1244 // TODO(ekmeyerson): Add reverse output fields to event_msg_. | 1345 // TODO(ekmeyerson): Add reverse output fields to event_msg_. |
1245 | 1346 |
1246 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1347 RETURN_ON_ERR(WriteMessageToDebugFile()); |
1247 return kNoError; | 1348 return kNoError; |
1248 } | 1349 } |
1249 | 1350 |
1250 int AudioProcessingImpl::WriteConfigMessage(bool forced) { | 1351 int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
1352 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | |
1251 audioproc::Config config; | 1353 audioproc::Config config; |
1252 | 1354 |
1253 config.set_aec_enabled(echo_cancellation_->is_enabled()); | 1355 config.set_aec_enabled(echo_cancellation_->is_enabled()); |
1254 config.set_aec_delay_agnostic_enabled( | 1356 config.set_aec_delay_agnostic_enabled( |
1255 echo_cancellation_->is_delay_agnostic_enabled()); | 1357 echo_cancellation_->is_delay_agnostic_enabled()); |
1256 config.set_aec_drift_compensation_enabled( | 1358 config.set_aec_drift_compensation_enabled( |
1257 echo_cancellation_->is_drift_compensation_enabled()); | 1359 echo_cancellation_->is_drift_compensation_enabled()); |
1258 config.set_aec_extended_filter_enabled( | 1360 config.set_aec_extended_filter_enabled( |
1259 echo_cancellation_->is_extended_filter_enabled()); | 1361 echo_cancellation_->is_extended_filter_enabled()); |
1260 config.set_aec_suppression_level( | 1362 config.set_aec_suppression_level( |
(...skipping 26 matching lines...) Expand all Loading... | |
1287 | 1389 |
1288 event_msg_->set_type(audioproc::Event::CONFIG); | 1390 event_msg_->set_type(audioproc::Event::CONFIG); |
1289 event_msg_->mutable_config()->CopyFrom(config); | 1391 event_msg_->mutable_config()->CopyFrom(config); |
1290 | 1392 |
1291 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1393 RETURN_ON_ERR(WriteMessageToDebugFile()); |
1292 return kNoError; | 1394 return kNoError; |
1293 } | 1395 } |
1294 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1396 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1295 | 1397 |
1296 } // namespace webrtc | 1398 } // namespace webrtc |
OLD | NEW |