Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" | 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
| 12 | 12 |
| 13 #include <assert.h> | 13 #include <assert.h> |
| 14 #include <algorithm> | 14 #include <algorithm> |
| 15 | 15 |
| 16 #include "webrtc/base/checks.h" | 16 #include "webrtc/base/checks.h" |
| 17 #include "webrtc/base/platform_file.h" | 17 #include "webrtc/base/platform_file.h" |
| 18 #include "webrtc/common_audio/audio_converter.h" | |
| 19 #include "webrtc/common_audio/channel_buffer.h" | |
| 18 #include "webrtc/common_audio/include/audio_util.h" | 20 #include "webrtc/common_audio/include/audio_util.h" |
| 19 #include "webrtc/common_audio/channel_buffer.h" | |
| 20 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar y.h" | 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar y.h" |
| 21 extern "C" { | 22 extern "C" { |
| 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" | 23 #include "webrtc/modules/audio_processing/aec/aec_core.h" |
| 23 } | 24 } |
| 24 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" | 25 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" |
| 25 #include "webrtc/modules/audio_processing/audio_buffer.h" | 26 #include "webrtc/modules/audio_processing/audio_buffer.h" |
| 26 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" | 27 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" |
| 27 #include "webrtc/modules/audio_processing/common.h" | 28 #include "webrtc/modules/audio_processing/common.h" |
| 28 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" | 29 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" |
| 29 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
| 30 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
| 31 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" | 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" |
| 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" | |
| 32 #include "webrtc/modules/audio_processing/level_estimator_impl.h" | 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
| 33 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
| 34 #include "webrtc/modules/audio_processing/processing_component.h" | 36 #include "webrtc/modules/audio_processing/processing_component.h" |
| 35 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
| 36 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
| 37 #include "webrtc/modules/interface/module_common_types.h" | 39 #include "webrtc/modules/interface/module_common_types.h" |
| 38 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" | 40 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" |
| 39 #include "webrtc/system_wrappers/interface/file_wrapper.h" | 41 #include "webrtc/system_wrappers/interface/file_wrapper.h" |
| 40 #include "webrtc/system_wrappers/interface/logging.h" | 42 #include "webrtc/system_wrappers/interface/logging.h" |
| 41 #include "webrtc/system_wrappers/interface/metrics.h" | 43 #include "webrtc/system_wrappers/interface/metrics.h" |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 177 level_estimator_(NULL), | 179 level_estimator_(NULL), |
| 178 noise_suppression_(NULL), | 180 noise_suppression_(NULL), |
| 179 voice_detection_(NULL), | 181 voice_detection_(NULL), |
| 180 crit_(CriticalSectionWrapper::CreateCriticalSection()), | 182 crit_(CriticalSectionWrapper::CreateCriticalSection()), |
| 181 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 183 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 182 debug_file_(FileWrapper::Create()), | 184 debug_file_(FileWrapper::Create()), |
| 183 event_msg_(new audioproc::Event()), | 185 event_msg_(new audioproc::Event()), |
| 184 #endif | 186 #endif |
| 185 api_format_({{{kSampleRate16kHz, 1, false}, | 187 api_format_({{{kSampleRate16kHz, 1, false}, |
| 186 {kSampleRate16kHz, 1, false}, | 188 {kSampleRate16kHz, 1, false}, |
| 189 {kSampleRate16kHz, 1, false}, | |
| 187 {kSampleRate16kHz, 1, false}}}), | 190 {kSampleRate16kHz, 1, false}}}), |
| 188 fwd_proc_format_(kSampleRate16kHz), | 191 fwd_proc_format_(kSampleRate16kHz), |
| 189 rev_proc_format_(kSampleRate16kHz, 1), | 192 rev_proc_format_(kSampleRate16kHz, 1), |
| 190 split_rate_(kSampleRate16kHz), | 193 split_rate_(kSampleRate16kHz), |
| 191 stream_delay_ms_(0), | 194 stream_delay_ms_(0), |
| 192 delay_offset_ms_(0), | 195 delay_offset_ms_(0), |
| 193 was_stream_delay_set_(false), | 196 was_stream_delay_set_(false), |
| 194 last_stream_delay_ms_(0), | 197 last_stream_delay_ms_(0), |
| 195 last_aec_system_delay_ms_(0), | 198 last_aec_system_delay_ms_(0), |
| 196 stream_delay_jumps_(-1), | 199 stream_delay_jumps_(-1), |
| 197 aec_system_delay_jumps_(-1), | 200 aec_system_delay_jumps_(-1), |
| 198 output_will_be_muted_(false), | 201 output_will_be_muted_(false), |
| 199 key_pressed_(false), | 202 key_pressed_(false), |
| 200 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 203 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 201 use_new_agc_(false), | 204 use_new_agc_(false), |
| 202 #else | 205 #else |
| 203 use_new_agc_(config.Get<ExperimentalAgc>().enabled), | 206 use_new_agc_(config.Get<ExperimentalAgc>().enabled), |
| 204 #endif | 207 #endif |
| 205 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), | 208 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), |
| 206 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 209 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 207 transient_suppressor_enabled_(false), | 210 transient_suppressor_enabled_(false), |
| 208 #else | 211 #else |
| 209 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), | 212 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), |
| 210 #endif | 213 #endif |
| 211 beamformer_enabled_(config.Get<Beamforming>().enabled), | 214 beamformer_enabled_(config.Get<Beamforming>().enabled), |
| 212 beamformer_(beamformer), | 215 beamformer_(beamformer), |
| 213 array_geometry_(config.Get<Beamforming>().array_geometry) { | 216 array_geometry_(config.Get<Beamforming>().array_geometry), |
| 217 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { | |
| 214 echo_cancellation_ = new EchoCancellationImpl(this, crit_); | 218 echo_cancellation_ = new EchoCancellationImpl(this, crit_); |
| 215 component_list_.push_back(echo_cancellation_); | 219 component_list_.push_back(echo_cancellation_); |
| 216 | 220 |
| 217 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); | 221 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); |
| 218 component_list_.push_back(echo_control_mobile_); | 222 component_list_.push_back(echo_control_mobile_); |
| 219 | 223 |
| 220 gain_control_ = new GainControlImpl(this, crit_); | 224 gain_control_ = new GainControlImpl(this, crit_); |
| 221 component_list_.push_back(gain_control_); | 225 component_list_.push_back(gain_control_); |
| 222 | 226 |
| 223 high_pass_filter_ = new HighPassFilterImpl(this, crit_); | 227 high_pass_filter_ = new HighPassFilterImpl(this, crit_); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 275 return InitializeLocked(processing_config); | 279 return InitializeLocked(processing_config); |
| 276 } | 280 } |
| 277 | 281 |
| 278 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, | 282 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| 279 int output_sample_rate_hz, | 283 int output_sample_rate_hz, |
| 280 int reverse_sample_rate_hz, | 284 int reverse_sample_rate_hz, |
| 281 ChannelLayout input_layout, | 285 ChannelLayout input_layout, |
| 282 ChannelLayout output_layout, | 286 ChannelLayout output_layout, |
| 283 ChannelLayout reverse_layout) { | 287 ChannelLayout reverse_layout) { |
| 284 const ProcessingConfig processing_config = { | 288 const ProcessingConfig processing_config = { |
| 285 {{input_sample_rate_hz, ChannelsFromLayout(input_layout), | 289 {{input_sample_rate_hz, |
| 290 ChannelsFromLayout(input_layout), | |
| 286 LayoutHasKeyboard(input_layout)}, | 291 LayoutHasKeyboard(input_layout)}, |
| 287 {output_sample_rate_hz, ChannelsFromLayout(output_layout), | 292 {output_sample_rate_hz, |
| 293 ChannelsFromLayout(output_layout), | |
| 288 LayoutHasKeyboard(output_layout)}, | 294 LayoutHasKeyboard(output_layout)}, |
| 289 {reverse_sample_rate_hz, ChannelsFromLayout(reverse_layout), | 295 {reverse_sample_rate_hz, |
| 296 ChannelsFromLayout(reverse_layout), | |
| 297 LayoutHasKeyboard(reverse_layout)}, | |
| 298 {reverse_sample_rate_hz, | |
| 299 ChannelsFromLayout(reverse_layout), | |
| 290 LayoutHasKeyboard(reverse_layout)}}}; | 300 LayoutHasKeyboard(reverse_layout)}}}; |
| 291 | 301 |
| 292 return Initialize(processing_config); | 302 return Initialize(processing_config); |
| 293 } | 303 } |
| 294 | 304 |
| 295 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 305 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| 296 CriticalSectionScoped crit_scoped(crit_); | 306 CriticalSectionScoped crit_scoped(crit_); |
| 297 return InitializeLocked(processing_config); | 307 return InitializeLocked(processing_config); |
| 298 } | 308 } |
| 299 | 309 |
| 300 int AudioProcessingImpl::InitializeLocked() { | 310 int AudioProcessingImpl::InitializeLocked() { |
| 301 const int fwd_audio_buffer_channels = | 311 const int fwd_audio_buffer_channels = |
| 302 beamformer_enabled_ ? api_format_.input_stream().num_channels() | 312 beamformer_enabled_ ? api_format_.input_stream().num_channels() |
| 303 : api_format_.output_stream().num_channels(); | 313 : api_format_.output_stream().num_channels(); |
| 304 if (api_format_.reverse_stream().num_channels() > 0) { | 314 const int rev_audio_buffer_out_num_frames = |
| 315 api_format_.reverse_output_stream().num_frames() == 0 | |
| 316 ? rev_proc_format_.num_frames() | |
| 317 : api_format_.reverse_output_stream().num_frames(); | |
| 318 if (api_format_.reverse_input_stream().num_channels() > 0) { | |
| 305 render_audio_.reset(new AudioBuffer( | 319 render_audio_.reset(new AudioBuffer( |
| 306 api_format_.reverse_stream().num_frames(), | 320 api_format_.reverse_input_stream().num_frames(), |
| 307 api_format_.reverse_stream().num_channels(), | 321 api_format_.reverse_input_stream().num_channels(), |
| 308 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), | 322 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), |
| 309 rev_proc_format_.num_frames())); | 323 rev_audio_buffer_out_num_frames)); |
| 324 if (rev_conversion_needed()) { | |
| 325 render_converter_ = AudioConverter::Create( | |
| 326 api_format_.reverse_input_stream().num_channels(), | |
| 327 api_format_.reverse_input_stream().num_frames(), | |
| 328 api_format_.reverse_output_stream().num_channels(), | |
| 329 api_format_.reverse_output_stream().num_frames()); | |
| 330 } else { | |
| 331 render_converter_.reset(nullptr); | |
| 332 } | |
| 310 } else { | 333 } else { |
| 311 render_audio_.reset(nullptr); | 334 render_audio_.reset(nullptr); |
| 335 render_converter_.reset(nullptr); | |
| 312 } | 336 } |
| 313 capture_audio_.reset(new AudioBuffer( | 337 capture_audio_.reset(new AudioBuffer( |
| 314 api_format_.input_stream().num_frames(), | 338 api_format_.input_stream().num_frames(), |
| 315 api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(), | 339 api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(), |
| 316 fwd_audio_buffer_channels, api_format_.output_stream().num_frames())); | 340 fwd_audio_buffer_channels, api_format_.output_stream().num_frames())); |
| 317 | 341 |
| 318 // Initialize all components. | 342 // Initialize all components. |
| 319 for (auto item : component_list_) { | 343 for (auto item : component_list_) { |
| 320 int err = item->Initialize(); | 344 int err = item->Initialize(); |
| 321 if (err != kNoError) { | 345 if (err != kNoError) { |
| 322 return err; | 346 return err; |
| 323 } | 347 } |
| 324 } | 348 } |
| 325 | 349 |
| 326 InitializeExperimentalAgc(); | 350 InitializeExperimentalAgc(); |
| 327 | 351 |
| 328 InitializeTransient(); | 352 InitializeTransient(); |
| 329 | 353 |
| 330 InitializeBeamformer(); | 354 InitializeBeamformer(); |
| 331 | 355 |
| 356 InitializeIntelligibility(); | |
| 357 | |
| 332 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 358 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 333 if (debug_file_->Open()) { | 359 if (debug_file_->Open()) { |
| 334 int err = WriteInitMessage(); | 360 int err = WriteInitMessage(); |
| 335 if (err != kNoError) { | 361 if (err != kNoError) { |
| 336 return err; | 362 return err; |
| 337 } | 363 } |
| 338 } | 364 } |
| 339 #endif | 365 #endif |
| 340 | 366 |
| 341 return kNoError; | 367 return kNoError; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 389 } | 415 } |
| 390 | 416 |
| 391 fwd_proc_format_ = StreamConfig(fwd_proc_rate); | 417 fwd_proc_format_ = StreamConfig(fwd_proc_rate); |
| 392 | 418 |
| 393 // We normally process the reverse stream at 16 kHz. Unless... | 419 // We normally process the reverse stream at 16 kHz. Unless... |
| 394 int rev_proc_rate = kSampleRate16kHz; | 420 int rev_proc_rate = kSampleRate16kHz; |
| 395 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { | 421 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { |
| 396 // ...the forward stream is at 8 kHz. | 422 // ...the forward stream is at 8 kHz. |
| 397 rev_proc_rate = kSampleRate8kHz; | 423 rev_proc_rate = kSampleRate8kHz; |
| 398 } else { | 424 } else { |
| 399 if (api_format_.reverse_stream().sample_rate_hz() == kSampleRate32kHz) { | 425 if (api_format_.reverse_input_stream().sample_rate_hz() == |
| 426 kSampleRate32kHz) { | |
| 400 // ...or the input is at 32 kHz, in which case we use the splitting | 427 // ...or the input is at 32 kHz, in which case we use the splitting |
| 401 // filter rather than the resampler. | 428 // filter rather than the resampler. |
| 402 rev_proc_rate = kSampleRate32kHz; | 429 rev_proc_rate = kSampleRate32kHz; |
| 403 } | 430 } |
| 404 } | 431 } |
| 405 | 432 |
| 406 // Always downmix the reverse stream to mono for analysis. This has been | 433 // Always downmix the reverse stream to mono for analysis. This has been |
| 407 // demonstrated to work well for AEC in most practical scenarios. | 434 // demonstrated to work well for AEC in most practical scenarios. |
| 408 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); | 435 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); |
| 409 | 436 |
| (...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 616 msg->set_delay(stream_delay_ms_); | 643 msg->set_delay(stream_delay_ms_); |
| 617 msg->set_drift(echo_cancellation_->stream_drift_samples()); | 644 msg->set_drift(echo_cancellation_->stream_drift_samples()); |
| 618 msg->set_level(gain_control()->stream_analog_level()); | 645 msg->set_level(gain_control()->stream_analog_level()); |
| 619 msg->set_keypress(key_pressed_); | 646 msg->set_keypress(key_pressed_); |
| 620 } | 647 } |
| 621 #endif | 648 #endif |
| 622 | 649 |
| 623 MaybeUpdateHistograms(); | 650 MaybeUpdateHistograms(); |
| 624 | 651 |
| 625 AudioBuffer* ca = capture_audio_.get(); // For brevity. | 652 AudioBuffer* ca = capture_audio_.get(); // For brevity. |
| 653 | |
| 626 if (use_new_agc_ && gain_control_->is_enabled()) { | 654 if (use_new_agc_ && gain_control_->is_enabled()) { |
| 627 agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(), | 655 agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(), |
| 628 fwd_proc_format_.num_frames()); | 656 fwd_proc_format_.num_frames()); |
| 629 } | 657 } |
| 630 | 658 |
| 631 bool data_processed = is_data_processed(); | 659 bool data_processed = is_data_processed(); |
| 632 if (analysis_needed(data_processed)) { | 660 if (analysis_needed(data_processed)) { |
| 633 ca->SplitIntoFrequencyBands(); | 661 ca->SplitIntoFrequencyBands(); |
| 634 } | 662 } |
| 635 | 663 |
| 664 if (intelligibility_enabled_) { | |
| 665 intelligibility_enhancer_->AnalyzeCaptureAudio( | |
| 666 ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); | |
| 667 } | |
| 668 | |
| 636 if (beamformer_enabled_) { | 669 if (beamformer_enabled_) { |
| 637 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); | 670 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); |
| 638 ca->set_num_channels(1); | 671 ca->set_num_channels(1); |
| 639 } | 672 } |
| 640 | 673 |
| 641 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); | 674 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); |
| 642 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); | 675 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); |
| 643 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); | 676 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); |
| 644 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); | 677 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); |
| 645 | 678 |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 676 | 709 |
| 677 // The level estimator operates on the recombined data. | 710 // The level estimator operates on the recombined data. |
| 678 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); | 711 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); |
| 679 | 712 |
| 680 was_stream_delay_set_ = false; | 713 was_stream_delay_set_ = false; |
| 681 return kNoError; | 714 return kNoError; |
| 682 } | 715 } |
| 683 | 716 |
| 684 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, | 717 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| 685 int samples_per_channel, | 718 int samples_per_channel, |
| 686 int sample_rate_hz, | 719 int rev_sample_rate_hz, |
| 687 ChannelLayout layout) { | 720 ChannelLayout layout) { |
| 688 const StreamConfig reverse_config = { | 721 const StreamConfig reverse_config = { |
| 689 sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), | 722 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
| 690 }; | 723 }; |
| 691 if (samples_per_channel != reverse_config.num_frames()) { | 724 if (samples_per_channel != reverse_config.num_frames()) { |
| 692 return kBadDataLengthError; | 725 return kBadDataLengthError; |
| 693 } | 726 } |
| 694 return AnalyzeReverseStream(data, reverse_config); | 727 return AnalyzeReverseStream(data, reverse_config, reverse_config, data); |
| 728 } | |
| 729 | |
| 730 int AudioProcessingImpl::ProcessReverseStream( | |
| 731 const float* const* src, | |
| 732 const StreamConfig& reverse_input_config, | |
| 733 const StreamConfig& reverse_output_config, | |
| 734 float* const* dest) { | |
| 735 RETURN_ON_ERR(AnalyzeReverseStream(src, reverse_input_config, | |
| 736 reverse_output_config, dest)); | |
| 737 if (is_rev_processed()) { | |
| 738 render_audio_->CopyTo(api_format_.reverse_output_stream(), dest); | |
| 739 } else if (rev_conversion_needed()) { | |
| 740 render_converter_->Convert(src, reverse_input_config.num_frames() * | |
|
Andrew MacDonald
2015/08/10 19:24:05
Perhaps add a helper to StreamConfig?
int num_samp
ekm
2015/08/11 23:59:36
Done.
| |
| 741 reverse_input_config.num_channels(), | |
| 742 dest, reverse_output_config.num_frames() * | |
| 743 reverse_output_config.num_channels()); | |
| 744 } else { | |
| 745 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | |
| 746 reverse_input_config.num_channels(), dest); | |
| 747 } | |
| 748 | |
| 749 return kNoError; | |
| 695 } | 750 } |
| 696 | 751 |
| 697 int AudioProcessingImpl::AnalyzeReverseStream( | 752 int AudioProcessingImpl::AnalyzeReverseStream( |
| 698 const float* const* data, | 753 const float* const* src, |
| 699 const StreamConfig& reverse_config) { | 754 const StreamConfig& reverse_input_config, |
| 755 const StreamConfig& reverse_output_config, | |
| 756 const float* const* dest) { | |
| 700 CriticalSectionScoped crit_scoped(crit_); | 757 CriticalSectionScoped crit_scoped(crit_); |
| 701 if (data == NULL) { | 758 if (src == NULL) { |
| 702 return kNullPointerError; | 759 return kNullPointerError; |
| 703 } | 760 } |
| 704 | 761 |
| 705 if (reverse_config.num_channels() <= 0) { | 762 if (reverse_input_config.num_channels() <= 0) { |
| 706 return kBadNumberChannelsError; | 763 return kBadNumberChannelsError; |
| 707 } | 764 } |
| 708 | 765 |
| 709 ProcessingConfig processing_config = api_format_; | 766 ProcessingConfig processing_config = api_format_; |
| 710 processing_config.reverse_stream() = reverse_config; | 767 processing_config.reverse_input_stream() = reverse_input_config; |
| 768 processing_config.reverse_output_stream() = reverse_output_config; | |
| 711 | 769 |
| 712 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 770 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 713 assert(reverse_config.num_frames() == | 771 assert(reverse_input_config.num_frames() == |
| 714 api_format_.reverse_stream().num_frames()); | 772 api_format_.reverse_input_stream().num_frames()); |
| 715 | 773 |
| 716 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 774 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 717 if (debug_file_->Open()) { | 775 if (debug_file_->Open()) { |
| 718 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 776 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| 719 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 777 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| 720 const size_t channel_size = | 778 const size_t channel_size = |
| 721 sizeof(float) * api_format_.reverse_stream().num_frames(); | 779 sizeof(float) * api_format_.reverse_input_stream().num_frames(); |
| 722 for (int i = 0; i < api_format_.reverse_stream().num_channels(); ++i) | 780 for (int i = 0; i < api_format_.reverse_input_stream().num_channels(); ++i) |
| 723 msg->add_channel(data[i], channel_size); | 781 msg->add_channel(src[i], channel_size); |
| 724 RETURN_ON_ERR(WriteMessageToDebugFile()); | 782 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 725 } | 783 } |
| 726 #endif | 784 #endif |
| 727 | 785 |
| 728 render_audio_->CopyFrom(data, api_format_.reverse_stream()); | 786 render_audio_->CopyFrom(src, api_format_.reverse_input_stream()); |
| 729 return AnalyzeReverseStreamLocked(); | 787 return ProcessReverseStreamLocked(); |
| 788 } | |
| 789 | |
| 790 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | |
| 791 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | |
| 792 if (is_rev_processed()) { | |
| 793 render_audio_->InterleaveTo(frame, true); | |
| 794 } | |
| 795 | |
| 796 return kNoError; | |
| 730 } | 797 } |
| 731 | 798 |
| 732 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 799 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| 733 CriticalSectionScoped crit_scoped(crit_); | 800 CriticalSectionScoped crit_scoped(crit_); |
| 734 if (frame == NULL) { | 801 if (frame == NULL) { |
| 735 return kNullPointerError; | 802 return kNullPointerError; |
| 736 } | 803 } |
| 737 // Must be a native rate. | 804 // Must be a native rate. |
| 738 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 805 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 739 frame->sample_rate_hz_ != kSampleRate16kHz && | 806 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 740 frame->sample_rate_hz_ != kSampleRate32kHz && | 807 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 741 frame->sample_rate_hz_ != kSampleRate48kHz) { | 808 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 742 return kBadSampleRateError; | 809 return kBadSampleRateError; |
| 743 } | 810 } |
| 744 // This interface does not tolerate different forward and reverse rates. | 811 // This interface does not tolerate different forward and reverse rates. |
| 745 if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) { | 812 if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) { |
| 746 return kBadSampleRateError; | 813 return kBadSampleRateError; |
| 747 } | 814 } |
| 748 | 815 |
| 749 if (frame->num_channels_ <= 0) { | 816 if (frame->num_channels_ <= 0) { |
| 750 return kBadNumberChannelsError; | 817 return kBadNumberChannelsError; |
| 751 } | 818 } |
| 752 | 819 |
| 753 ProcessingConfig processing_config = api_format_; | 820 ProcessingConfig processing_config = api_format_; |
| 754 processing_config.reverse_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 821 processing_config.reverse_input_stream().set_sample_rate_hz( |
| 755 processing_config.reverse_stream().set_num_channels(frame->num_channels_); | 822 frame->sample_rate_hz_); |
| 823 processing_config.reverse_input_stream().set_num_channels( | |
| 824 frame->num_channels_); | |
| 825 processing_config.reverse_output_stream().set_sample_rate_hz( | |
| 826 frame->sample_rate_hz_); | |
| 827 processing_config.reverse_output_stream().set_num_channels( | |
| 828 frame->num_channels_); | |
| 756 | 829 |
| 757 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 830 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 758 if (frame->samples_per_channel_ != | 831 if (frame->samples_per_channel_ != |
| 759 api_format_.reverse_stream().num_frames()) { | 832 api_format_.reverse_input_stream().num_frames()) { |
| 760 return kBadDataLengthError; | 833 return kBadDataLengthError; |
| 761 } | 834 } |
| 762 | 835 |
| 763 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 836 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 764 if (debug_file_->Open()) { | 837 if (debug_file_->Open()) { |
| 765 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 838 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| 766 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 839 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| 767 const size_t data_size = | 840 const size_t data_size = |
| 768 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 841 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 769 msg->set_data(frame->data_, data_size); | 842 msg->set_data(frame->data_, data_size); |
| 770 RETURN_ON_ERR(WriteMessageToDebugFile()); | 843 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 771 } | 844 } |
| 772 #endif | 845 #endif |
| 773 | |
| 774 render_audio_->DeinterleaveFrom(frame); | 846 render_audio_->DeinterleaveFrom(frame); |
| 775 return AnalyzeReverseStreamLocked(); | 847 return ProcessReverseStreamLocked(); |
| 776 } | 848 } |
| 777 | 849 |
| 778 int AudioProcessingImpl::AnalyzeReverseStreamLocked() { | 850 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| 779 AudioBuffer* ra = render_audio_.get(); // For brevity. | 851 AudioBuffer* ra = render_audio_.get(); // For brevity. |
| 780 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { | 852 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { |
| 781 ra->SplitIntoFrequencyBands(); | 853 ra->SplitIntoFrequencyBands(); |
| 782 } | 854 } |
| 783 | 855 |
| 856 if (intelligibility_enabled_) { | |
| 857 intelligibility_enhancer_->ProcessRenderAudio( | |
| 858 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); | |
| 859 } | |
| 860 | |
| 784 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); | 861 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); |
| 785 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); | 862 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); |
| 786 if (!use_new_agc_) { | 863 if (!use_new_agc_) { |
| 787 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); | 864 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); |
| 788 } | 865 } |
| 789 | 866 |
| 867 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && | |
| 868 is_rev_processed()) { | |
| 869 ra->MergeFrequencyBands(); | |
| 870 } | |
| 871 | |
| 790 return kNoError; | 872 return kNoError; |
| 791 } | 873 } |
| 792 | 874 |
| 793 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 875 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| 794 Error retval = kNoError; | 876 Error retval = kNoError; |
| 795 was_stream_delay_set_ = true; | 877 was_stream_delay_set_ = true; |
| 796 delay += delay_offset_ms_; | 878 delay += delay_offset_ms_; |
| 797 | 879 |
| 798 if (delay < 0) { | 880 if (delay < 0) { |
| 799 delay = 0; | 881 delay = 0; |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 996 // Only level_estimator_ is enabled. | 1078 // Only level_estimator_ is enabled. |
| 997 return false; | 1079 return false; |
| 998 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1080 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| 999 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 1081 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
| 1000 // Something besides level_estimator_ is enabled, and we have super-wb. | 1082 // Something besides level_estimator_ is enabled, and we have super-wb. |
| 1001 return true; | 1083 return true; |
| 1002 } | 1084 } |
| 1003 return false; | 1085 return false; |
| 1004 } | 1086 } |
| 1005 | 1087 |
| 1088 bool AudioProcessingImpl::is_rev_processed() const { | |
| 1089 return intelligibility_enabled_ && intelligibility_enhancer_->active(); | |
| 1090 } | |
| 1091 | |
| 1092 bool AudioProcessingImpl::rev_conversion_needed() const { | |
| 1093 return (api_format_.reverse_input_stream() != | |
| 1094 api_format_.reverse_output_stream()); | |
| 1095 } | |
| 1096 | |
| 1006 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1097 void AudioProcessingImpl::InitializeExperimentalAgc() { |
| 1007 if (use_new_agc_) { | 1098 if (use_new_agc_) { |
| 1008 if (!agc_manager_.get()) { | 1099 if (!agc_manager_.get()) { |
| 1009 agc_manager_.reset(new AgcManagerDirect(gain_control_, | 1100 agc_manager_.reset(new AgcManagerDirect(gain_control_, |
| 1010 gain_control_for_new_agc_.get(), | 1101 gain_control_for_new_agc_.get(), |
| 1011 agc_startup_min_volume_)); | 1102 agc_startup_min_volume_)); |
| 1012 } | 1103 } |
| 1013 agc_manager_->Initialize(); | 1104 agc_manager_->Initialize(); |
| 1014 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 1105 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
| 1015 } | 1106 } |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 1028 | 1119 |
| 1029 void AudioProcessingImpl::InitializeBeamformer() { | 1120 void AudioProcessingImpl::InitializeBeamformer() { |
| 1030 if (beamformer_enabled_) { | 1121 if (beamformer_enabled_) { |
| 1031 if (!beamformer_) { | 1122 if (!beamformer_) { |
| 1032 beamformer_.reset(new NonlinearBeamformer(array_geometry_)); | 1123 beamformer_.reset(new NonlinearBeamformer(array_geometry_)); |
| 1033 } | 1124 } |
| 1034 beamformer_->Initialize(kChunkSizeMs, split_rate_); | 1125 beamformer_->Initialize(kChunkSizeMs, split_rate_); |
| 1035 } | 1126 } |
| 1036 } | 1127 } |
| 1037 | 1128 |
| 1129 void AudioProcessingImpl::InitializeIntelligibility() { | |
| 1130 if (intelligibility_enabled_) { | |
| 1131 IntelligibilityEnhancer::Config config; | |
| 1132 config.sample_rate_hz = split_rate_; | |
| 1133 config.num_capture_channels = capture_audio_->num_channels(); | |
| 1134 config.num_render_channels = render_audio_->num_channels(); | |
| 1135 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); | |
| 1136 } | |
| 1137 } | |
| 1138 | |
| 1038 void AudioProcessingImpl::MaybeUpdateHistograms() { | 1139 void AudioProcessingImpl::MaybeUpdateHistograms() { |
| 1039 static const int kMinDiffDelayMs = 60; | 1140 static const int kMinDiffDelayMs = 60; |
| 1040 | 1141 |
| 1041 if (echo_cancellation()->is_enabled()) { | 1142 if (echo_cancellation()->is_enabled()) { |
| 1042 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. | 1143 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. |
| 1043 // If a stream has echo we know that the echo_cancellation is in process. | 1144 // If a stream has echo we know that the echo_cancellation is in process. |
| 1044 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { | 1145 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { |
| 1045 stream_delay_jumps_ = 0; | 1146 stream_delay_jumps_ = 0; |
| 1046 } | 1147 } |
| 1047 if (aec_system_delay_jumps_ == -1 && | 1148 if (aec_system_delay_jumps_ == -1 && |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1126 | 1227 |
| 1127 return kNoError; | 1228 return kNoError; |
| 1128 } | 1229 } |
| 1129 | 1230 |
| 1130 int AudioProcessingImpl::WriteInitMessage() { | 1231 int AudioProcessingImpl::WriteInitMessage() { |
| 1131 event_msg_->set_type(audioproc::Event::INIT); | 1232 event_msg_->set_type(audioproc::Event::INIT); |
| 1132 audioproc::Init* msg = event_msg_->mutable_init(); | 1233 audioproc::Init* msg = event_msg_->mutable_init(); |
| 1133 msg->set_sample_rate(api_format_.input_stream().sample_rate_hz()); | 1234 msg->set_sample_rate(api_format_.input_stream().sample_rate_hz()); |
| 1134 msg->set_num_input_channels(api_format_.input_stream().num_channels()); | 1235 msg->set_num_input_channels(api_format_.input_stream().num_channels()); |
| 1135 msg->set_num_output_channels(api_format_.output_stream().num_channels()); | 1236 msg->set_num_output_channels(api_format_.output_stream().num_channels()); |
| 1136 msg->set_num_reverse_channels(api_format_.reverse_stream().num_channels()); | 1237 msg->set_num_reverse_channels( |
| 1137 msg->set_reverse_sample_rate(api_format_.reverse_stream().sample_rate_hz()); | 1238 api_format_.reverse_input_stream().num_channels()); |
| 1239 msg->set_reverse_sample_rate( | |
| 1240 api_format_.reverse_input_stream().sample_rate_hz()); | |
| 1138 msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz()); | 1241 msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz()); |
| 1242 // TODO(ekmeyerson): Add reverse output fields to event_msg_. | |
| 1139 | 1243 |
| 1140 int err = WriteMessageToDebugFile(); | 1244 int err = WriteMessageToDebugFile(); |
| 1141 if (err != kNoError) { | 1245 if (err != kNoError) { |
| 1142 return err; | 1246 return err; |
| 1143 } | 1247 } |
| 1144 | 1248 |
| 1145 return kNoError; | 1249 return kNoError; |
| 1146 } | 1250 } |
| 1147 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1251 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1148 | 1252 |
| 1149 } // namespace webrtc | 1253 } // namespace webrtc |
| OLD | NEW |