| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 188 gain_control_(NULL), | 188 gain_control_(NULL), |
| 189 high_pass_filter_(NULL), | 189 high_pass_filter_(NULL), |
| 190 level_estimator_(NULL), | 190 level_estimator_(NULL), |
| 191 noise_suppression_(NULL), | 191 noise_suppression_(NULL), |
| 192 voice_detection_(NULL), | 192 voice_detection_(NULL), |
| 193 crit_(CriticalSectionWrapper::CreateCriticalSection()), | 193 crit_(CriticalSectionWrapper::CreateCriticalSection()), |
| 194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 195 debug_file_(FileWrapper::Create()), | 195 debug_file_(FileWrapper::Create()), |
| 196 event_msg_(new audioproc::Event()), | 196 event_msg_(new audioproc::Event()), |
| 197 #endif | 197 #endif |
| 198 api_format_({{{kSampleRate16kHz, 1, false}, | |
| 199 {kSampleRate16kHz, 1, false}, | |
| 200 {kSampleRate16kHz, 1, false}, | |
| 201 {kSampleRate16kHz, 1, false}}}), | |
| 202 fwd_proc_format_(kSampleRate16kHz), | 198 fwd_proc_format_(kSampleRate16kHz), |
| 203 rev_proc_format_(kSampleRate16kHz, 1), | 199 rev_proc_format_(kSampleRate16kHz, 1), |
| 204 split_rate_(kSampleRate16kHz), | 200 split_rate_(kSampleRate16kHz), |
| 205 stream_delay_ms_(0), | 201 stream_delay_ms_(0), |
| 206 delay_offset_ms_(0), | 202 delay_offset_ms_(0), |
| 207 was_stream_delay_set_(false), | 203 was_stream_delay_set_(false), |
| 208 last_stream_delay_ms_(0), | 204 last_stream_delay_ms_(0), |
| 209 last_aec_system_delay_ms_(0), | 205 last_aec_system_delay_ms_(0), |
| 210 stream_delay_jumps_(-1), | 206 stream_delay_jumps_(-1), |
| 211 aec_system_delay_jumps_(-1), | 207 aec_system_delay_jumps_(-1), |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 303 LayoutHasKeyboard(reverse_layout)}}}; | 299 LayoutHasKeyboard(reverse_layout)}}}; |
| 304 | 300 |
| 305 return Initialize(processing_config); | 301 return Initialize(processing_config); |
| 306 } | 302 } |
| 307 | 303 |
| 308 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 304 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| 309 CriticalSectionScoped crit_scoped(crit_); | 305 CriticalSectionScoped crit_scoped(crit_); |
| 310 return InitializeLocked(processing_config); | 306 return InitializeLocked(processing_config); |
| 311 } | 307 } |
| 312 | 308 |
| 309 // Calls InitializeLocked() if any of the audio parameters have changed from |
| 310 // their current values. |
| 311 int AudioProcessingImpl::MaybeInitializeLocked( |
| 312 const ProcessingConfig& processing_config) { |
| 313 if (processing_config == shared_state_.api_format_) { |
| 314 return kNoError; |
| 315 } |
| 316 return InitializeLocked(processing_config); |
| 317 } |
| 318 |
| 313 int AudioProcessingImpl::InitializeLocked() { | 319 int AudioProcessingImpl::InitializeLocked() { |
| 314 const int fwd_audio_buffer_channels = | 320 const int fwd_audio_buffer_channels = |
| 315 beamformer_enabled_ ? api_format_.input_stream().num_channels() | 321 beamformer_enabled_ |
| 316 : api_format_.output_stream().num_channels(); | 322 ? shared_state_.api_format_.input_stream().num_channels() |
| 323 : shared_state_.api_format_.output_stream().num_channels(); |
| 317 const int rev_audio_buffer_out_num_frames = | 324 const int rev_audio_buffer_out_num_frames = |
| 318 api_format_.reverse_output_stream().num_frames() == 0 | 325 shared_state_.api_format_.reverse_output_stream().num_frames() == 0 |
| 319 ? rev_proc_format_.num_frames() | 326 ? rev_proc_format_.num_frames() |
| 320 : api_format_.reverse_output_stream().num_frames(); | 327 : shared_state_.api_format_.reverse_output_stream().num_frames(); |
| 321 if (api_format_.reverse_input_stream().num_channels() > 0) { | 328 if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) { |
| 322 render_audio_.reset(new AudioBuffer( | 329 render_audio_.reset(new AudioBuffer( |
| 323 api_format_.reverse_input_stream().num_frames(), | 330 shared_state_.api_format_.reverse_input_stream().num_frames(), |
| 324 api_format_.reverse_input_stream().num_channels(), | 331 shared_state_.api_format_.reverse_input_stream().num_channels(), |
| 325 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), | 332 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), |
| 326 rev_audio_buffer_out_num_frames)); | 333 rev_audio_buffer_out_num_frames)); |
| 327 if (rev_conversion_needed()) { | 334 if (rev_conversion_needed()) { |
| 328 render_converter_ = AudioConverter::Create( | 335 render_converter_ = AudioConverter::Create( |
| 329 api_format_.reverse_input_stream().num_channels(), | 336 shared_state_.api_format_.reverse_input_stream().num_channels(), |
| 330 api_format_.reverse_input_stream().num_frames(), | 337 shared_state_.api_format_.reverse_input_stream().num_frames(), |
| 331 api_format_.reverse_output_stream().num_channels(), | 338 shared_state_.api_format_.reverse_output_stream().num_channels(), |
| 332 api_format_.reverse_output_stream().num_frames()); | 339 shared_state_.api_format_.reverse_output_stream().num_frames()); |
| 333 } else { | 340 } else { |
| 334 render_converter_.reset(nullptr); | 341 render_converter_.reset(nullptr); |
| 335 } | 342 } |
| 336 } else { | 343 } else { |
| 337 render_audio_.reset(nullptr); | 344 render_audio_.reset(nullptr); |
| 338 render_converter_.reset(nullptr); | 345 render_converter_.reset(nullptr); |
| 339 } | 346 } |
| 340 capture_audio_.reset(new AudioBuffer( | 347 capture_audio_.reset( |
| 341 api_format_.input_stream().num_frames(), | 348 new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(), |
| 342 api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(), | 349 shared_state_.api_format_.input_stream().num_channels(), |
| 343 fwd_audio_buffer_channels, api_format_.output_stream().num_frames())); | 350 fwd_proc_format_.num_frames(), fwd_audio_buffer_channels, |
| 351 shared_state_.api_format_.output_stream().num_frames())); |
| 344 | 352 |
| 345 // Initialize all components. | 353 // Initialize all components. |
| 346 for (auto item : component_list_) { | 354 for (auto item : component_list_) { |
| 347 int err = item->Initialize(); | 355 int err = item->Initialize(); |
| 348 if (err != kNoError) { | 356 if (err != kNoError) { |
| 349 return err; | 357 return err; |
| 350 } | 358 } |
| 351 } | 359 } |
| 352 | 360 |
| 353 InitializeExperimentalAgc(); | 361 InitializeExperimentalAgc(); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 389 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { | 397 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { |
| 390 return kBadNumberChannelsError; | 398 return kBadNumberChannelsError; |
| 391 } | 399 } |
| 392 | 400 |
| 393 if (beamformer_enabled_ && | 401 if (beamformer_enabled_ && |
| 394 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || | 402 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || |
| 395 num_out_channels > 1)) { | 403 num_out_channels > 1)) { |
| 396 return kBadNumberChannelsError; | 404 return kBadNumberChannelsError; |
| 397 } | 405 } |
| 398 | 406 |
| 399 api_format_ = config; | 407 shared_state_.api_format_ = config; |
| 400 | 408 |
| 401 // We process at the closest native rate >= min(input rate, output rate)... | 409 // We process at the closest native rate >= min(input rate, output rate)... |
| 402 const int min_proc_rate = | 410 const int min_proc_rate = |
| 403 std::min(api_format_.input_stream().sample_rate_hz(), | 411 std::min(shared_state_.api_format_.input_stream().sample_rate_hz(), |
| 404 api_format_.output_stream().sample_rate_hz()); | 412 shared_state_.api_format_.output_stream().sample_rate_hz()); |
| 405 int fwd_proc_rate; | 413 int fwd_proc_rate; |
| 406 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { | 414 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { |
| 407 fwd_proc_rate = kNativeSampleRatesHz[i]; | 415 fwd_proc_rate = kNativeSampleRatesHz[i]; |
| 408 if (fwd_proc_rate >= min_proc_rate) { | 416 if (fwd_proc_rate >= min_proc_rate) { |
| 409 break; | 417 break; |
| 410 } | 418 } |
| 411 } | 419 } |
| 412 // ...with one exception. | 420 // ...with one exception. |
| 413 if (echo_control_mobile_->is_enabled() && | 421 if (echo_control_mobile_->is_enabled() && |
| 414 min_proc_rate > kMaxAECMSampleRateHz) { | 422 min_proc_rate > kMaxAECMSampleRateHz) { |
| 415 fwd_proc_rate = kMaxAECMSampleRateHz; | 423 fwd_proc_rate = kMaxAECMSampleRateHz; |
| 416 } | 424 } |
| 417 | 425 |
| 418 fwd_proc_format_ = StreamConfig(fwd_proc_rate); | 426 fwd_proc_format_ = StreamConfig(fwd_proc_rate); |
| 419 | 427 |
| 420 // We normally process the reverse stream at 16 kHz. Unless... | 428 // We normally process the reverse stream at 16 kHz. Unless... |
| 421 int rev_proc_rate = kSampleRate16kHz; | 429 int rev_proc_rate = kSampleRate16kHz; |
| 422 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { | 430 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { |
| 423 // ...the forward stream is at 8 kHz. | 431 // ...the forward stream is at 8 kHz. |
| 424 rev_proc_rate = kSampleRate8kHz; | 432 rev_proc_rate = kSampleRate8kHz; |
| 425 } else { | 433 } else { |
| 426 if (api_format_.reverse_input_stream().sample_rate_hz() == | 434 if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() == |
| 427 kSampleRate32kHz) { | 435 kSampleRate32kHz) { |
| 428 // ...or the input is at 32 kHz, in which case we use the splitting | 436 // ...or the input is at 32 kHz, in which case we use the splitting |
| 429 // filter rather than the resampler. | 437 // filter rather than the resampler. |
| 430 rev_proc_rate = kSampleRate32kHz; | 438 rev_proc_rate = kSampleRate32kHz; |
| 431 } | 439 } |
| 432 } | 440 } |
| 433 | 441 |
| 434 // Always downmix the reverse stream to mono for analysis. This has been | 442 // Always downmix the reverse stream to mono for analysis. This has been |
| 435 // demonstrated to work well for AEC in most practical scenarios. | 443 // demonstrated to work well for AEC in most practical scenarios. |
| 436 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); | 444 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); |
| 437 | 445 |
| 438 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 446 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| 439 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 447 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
| 440 split_rate_ = kSampleRate16kHz; | 448 split_rate_ = kSampleRate16kHz; |
| 441 } else { | 449 } else { |
| 442 split_rate_ = fwd_proc_format_.sample_rate_hz(); | 450 split_rate_ = fwd_proc_format_.sample_rate_hz(); |
| 443 } | 451 } |
| 444 | 452 |
| 445 return InitializeLocked(); | 453 return InitializeLocked(); |
| 446 } | 454 } |
| 447 | 455 |
| 448 // Calls InitializeLocked() if any of the audio parameters have changed from | |
| 449 // their current values. | |
| 450 int AudioProcessingImpl::MaybeInitializeLocked( | |
| 451 const ProcessingConfig& processing_config) { | |
| 452 if (processing_config == api_format_) { | |
| 453 return kNoError; | |
| 454 } | |
| 455 return InitializeLocked(processing_config); | |
| 456 } | |
| 457 | 456 |
| 458 void AudioProcessingImpl::SetExtraOptions(const Config& config) { | 457 void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
| 459 CriticalSectionScoped crit_scoped(crit_); | 458 CriticalSectionScoped crit_scoped(crit_); |
| 460 for (auto item : component_list_) { | 459 for (auto item : component_list_) { |
| 461 item->SetExtraOptions(config); | 460 item->SetExtraOptions(config); |
| 462 } | 461 } |
| 463 | 462 |
| 464 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { | 463 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { |
| 465 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; | 464 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; |
| 466 InitializeTransient(); | 465 InitializeTransient(); |
| 467 } | 466 } |
| 468 } | 467 } |
| 469 | 468 |
| 470 | 469 |
| 471 int AudioProcessingImpl::proc_sample_rate_hz() const { | 470 int AudioProcessingImpl::proc_sample_rate_hz() const { |
| 472 return fwd_proc_format_.sample_rate_hz(); | 471 return fwd_proc_format_.sample_rate_hz(); |
| 473 } | 472 } |
| 474 | 473 |
| 475 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 474 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
| 476 return split_rate_; | 475 return split_rate_; |
| 477 } | 476 } |
| 478 | 477 |
| 479 int AudioProcessingImpl::num_reverse_channels() const { | 478 int AudioProcessingImpl::num_reverse_channels() const { |
| 480 return rev_proc_format_.num_channels(); | 479 return rev_proc_format_.num_channels(); |
| 481 } | 480 } |
| 482 | 481 |
| 483 int AudioProcessingImpl::num_input_channels() const { | 482 int AudioProcessingImpl::num_input_channels() const { |
| 484 return api_format_.input_stream().num_channels(); | 483 return shared_state_.api_format_.input_stream().num_channels(); |
| 485 } | 484 } |
| 486 | 485 |
| 487 int AudioProcessingImpl::num_output_channels() const { | 486 int AudioProcessingImpl::num_output_channels() const { |
| 488 return api_format_.output_stream().num_channels(); | 487 return shared_state_.api_format_.output_stream().num_channels(); |
| 489 } | 488 } |
| 490 | 489 |
| 491 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 490 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| 492 CriticalSectionScoped lock(crit_); | 491 CriticalSectionScoped lock(crit_); |
| 493 output_will_be_muted_ = muted; | 492 output_will_be_muted_ = muted; |
| 494 if (agc_manager_.get()) { | 493 if (agc_manager_.get()) { |
| 495 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 494 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
| 496 } | 495 } |
| 497 } | 496 } |
| 498 | 497 |
| 499 | 498 |
| 500 int AudioProcessingImpl::ProcessStream(const float* const* src, | 499 int AudioProcessingImpl::ProcessStream(const float* const* src, |
| 501 size_t samples_per_channel, | 500 size_t samples_per_channel, |
| 502 int input_sample_rate_hz, | 501 int input_sample_rate_hz, |
| 503 ChannelLayout input_layout, | 502 ChannelLayout input_layout, |
| 504 int output_sample_rate_hz, | 503 int output_sample_rate_hz, |
| 505 ChannelLayout output_layout, | 504 ChannelLayout output_layout, |
| 506 float* const* dest) { | 505 float* const* dest) { |
| 507 CriticalSectionScoped crit_scoped(crit_); | 506 CriticalSectionScoped crit_scoped(crit_); |
| 508 StreamConfig input_stream = api_format_.input_stream(); | 507 StreamConfig input_stream = shared_state_.api_format_.input_stream(); |
| 509 input_stream.set_sample_rate_hz(input_sample_rate_hz); | 508 input_stream.set_sample_rate_hz(input_sample_rate_hz); |
| 510 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); | 509 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
| 511 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); | 510 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
| 512 | 511 |
| 513 StreamConfig output_stream = api_format_.output_stream(); | 512 StreamConfig output_stream = shared_state_.api_format_.output_stream(); |
| 514 output_stream.set_sample_rate_hz(output_sample_rate_hz); | 513 output_stream.set_sample_rate_hz(output_sample_rate_hz); |
| 515 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); | 514 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
| 516 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); | 515 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
| 517 | 516 |
| 518 if (samples_per_channel != input_stream.num_frames()) { | 517 if (samples_per_channel != input_stream.num_frames()) { |
| 519 return kBadDataLengthError; | 518 return kBadDataLengthError; |
| 520 } | 519 } |
| 521 return ProcessStream(src, input_stream, output_stream, dest); | 520 return ProcessStream(src, input_stream, output_stream, dest); |
| 522 } | 521 } |
| 523 | 522 |
| 524 int AudioProcessingImpl::ProcessStream(const float* const* src, | 523 int AudioProcessingImpl::ProcessStream(const float* const* src, |
| 525 const StreamConfig& input_config, | 524 const StreamConfig& input_config, |
| 526 const StreamConfig& output_config, | 525 const StreamConfig& output_config, |
| 527 float* const* dest) { | 526 float* const* dest) { |
| 528 CriticalSectionScoped crit_scoped(crit_); | 527 CriticalSectionScoped crit_scoped(crit_); |
| 529 if (!src || !dest) { | 528 if (!src || !dest) { |
| 530 return kNullPointerError; | 529 return kNullPointerError; |
| 531 } | 530 } |
| 532 | 531 |
| 533 echo_cancellation_->ReadQueuedRenderData(); | 532 echo_cancellation_->ReadQueuedRenderData(); |
| 534 echo_control_mobile_->ReadQueuedRenderData(); | 533 echo_control_mobile_->ReadQueuedRenderData(); |
| 535 gain_control_->ReadQueuedRenderData(); | 534 gain_control_->ReadQueuedRenderData(); |
| 536 | 535 |
| 537 ProcessingConfig processing_config = api_format_; | 536 ProcessingConfig processing_config = shared_state_.api_format_; |
| 538 processing_config.input_stream() = input_config; | 537 processing_config.input_stream() = input_config; |
| 539 processing_config.output_stream() = output_config; | 538 processing_config.output_stream() = output_config; |
| 540 | 539 |
| 541 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 540 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 542 assert(processing_config.input_stream().num_frames() == | 541 assert(processing_config.input_stream().num_frames() == |
| 543 api_format_.input_stream().num_frames()); | 542 shared_state_.api_format_.input_stream().num_frames()); |
| 544 | 543 |
| 545 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 544 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 546 if (debug_file_->Open()) { | 545 if (debug_file_->Open()) { |
| 547 RETURN_ON_ERR(WriteConfigMessage(false)); | 546 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 548 | 547 |
| 549 event_msg_->set_type(audioproc::Event::STREAM); | 548 event_msg_->set_type(audioproc::Event::STREAM); |
| 550 audioproc::Stream* msg = event_msg_->mutable_stream(); | 549 audioproc::Stream* msg = event_msg_->mutable_stream(); |
| 551 const size_t channel_size = | 550 const size_t channel_size = |
| 552 sizeof(float) * api_format_.input_stream().num_frames(); | 551 sizeof(float) * shared_state_.api_format_.input_stream().num_frames(); |
| 553 for (int i = 0; i < api_format_.input_stream().num_channels(); ++i) | 552 for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels(); |
| 553 ++i) |
| 554 msg->add_input_channel(src[i], channel_size); | 554 msg->add_input_channel(src[i], channel_size); |
| 555 } | 555 } |
| 556 #endif | 556 #endif |
| 557 | 557 |
| 558 capture_audio_->CopyFrom(src, api_format_.input_stream()); | 558 capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream()); |
| 559 RETURN_ON_ERR(ProcessStreamLocked()); | 559 RETURN_ON_ERR(ProcessStreamLocked()); |
| 560 capture_audio_->CopyTo(api_format_.output_stream(), dest); | 560 capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest); |
| 561 | 561 |
| 562 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 562 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 563 if (debug_file_->Open()) { | 563 if (debug_file_->Open()) { |
| 564 audioproc::Stream* msg = event_msg_->mutable_stream(); | 564 audioproc::Stream* msg = event_msg_->mutable_stream(); |
| 565 const size_t channel_size = | 565 const size_t channel_size = |
| 566 sizeof(float) * api_format_.output_stream().num_frames(); | 566 sizeof(float) * shared_state_.api_format_.output_stream().num_frames(); |
| 567 for (int i = 0; i < api_format_.output_stream().num_channels(); ++i) | 567 for (int i = 0; |
| 568 i < shared_state_.api_format_.output_stream().num_channels(); ++i) |
| 568 msg->add_output_channel(dest[i], channel_size); | 569 msg->add_output_channel(dest[i], channel_size); |
| 569 RETURN_ON_ERR(WriteMessageToDebugFile()); | 570 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 570 } | 571 } |
| 571 #endif | 572 #endif |
| 572 | 573 |
| 573 return kNoError; | 574 return kNoError; |
| 574 } | 575 } |
| 575 | 576 |
| 576 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 577 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| 577 CriticalSectionScoped crit_scoped(crit_); | 578 CriticalSectionScoped crit_scoped(crit_); |
| 578 echo_cancellation_->ReadQueuedRenderData(); | 579 echo_cancellation_->ReadQueuedRenderData(); |
| 579 echo_control_mobile_->ReadQueuedRenderData(); | 580 echo_control_mobile_->ReadQueuedRenderData(); |
| 580 gain_control_->ReadQueuedRenderData(); | 581 gain_control_->ReadQueuedRenderData(); |
| 581 | 582 |
| 582 if (!frame) { | 583 if (!frame) { |
| 583 return kNullPointerError; | 584 return kNullPointerError; |
| 584 } | 585 } |
| 585 // Must be a native rate. | 586 // Must be a native rate. |
| 586 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 587 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 587 frame->sample_rate_hz_ != kSampleRate16kHz && | 588 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 588 frame->sample_rate_hz_ != kSampleRate32kHz && | 589 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 589 frame->sample_rate_hz_ != kSampleRate48kHz) { | 590 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 590 return kBadSampleRateError; | 591 return kBadSampleRateError; |
| 591 } | 592 } |
| 593 |
| 592 if (echo_control_mobile_->is_enabled() && | 594 if (echo_control_mobile_->is_enabled() && |
| 593 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { | 595 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { |
| 594 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; | 596 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; |
| 595 return kUnsupportedComponentError; | 597 return kUnsupportedComponentError; |
| 596 } | 598 } |
| 597 | 599 |
| 598 // TODO(ajm): The input and output rates and channels are currently | 600 // TODO(ajm): The input and output rates and channels are currently |
| 599 // constrained to be identical in the int16 interface. | 601 // constrained to be identical in the int16 interface. |
| 600 ProcessingConfig processing_config = api_format_; | 602 ProcessingConfig processing_config = shared_state_.api_format_; |
| 601 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 603 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| 602 processing_config.input_stream().set_num_channels(frame->num_channels_); | 604 processing_config.input_stream().set_num_channels(frame->num_channels_); |
| 603 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 605 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| 604 processing_config.output_stream().set_num_channels(frame->num_channels_); | 606 processing_config.output_stream().set_num_channels(frame->num_channels_); |
| 605 | 607 |
| 606 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 608 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 607 if (frame->samples_per_channel_ != api_format_.input_stream().num_frames()) { | 609 if (frame->samples_per_channel_ != |
| 610 shared_state_.api_format_.input_stream().num_frames()) { |
| 608 return kBadDataLengthError; | 611 return kBadDataLengthError; |
| 609 } | 612 } |
| 610 | 613 |
| 611 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 614 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 612 if (debug_file_->Open()) { | 615 if (debug_file_->Open()) { |
| 613 event_msg_->set_type(audioproc::Event::STREAM); | 616 event_msg_->set_type(audioproc::Event::STREAM); |
| 614 audioproc::Stream* msg = event_msg_->mutable_stream(); | 617 audioproc::Stream* msg = event_msg_->mutable_stream(); |
| 615 const size_t data_size = | 618 const size_t data_size = |
| 616 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 619 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 617 msg->set_input_data(frame->data_, data_size); | 620 msg->set_input_data(frame->data_, data_size); |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 727 } | 730 } |
| 728 | 731 |
| 729 int AudioProcessingImpl::ProcessReverseStream( | 732 int AudioProcessingImpl::ProcessReverseStream( |
| 730 const float* const* src, | 733 const float* const* src, |
| 731 const StreamConfig& reverse_input_config, | 734 const StreamConfig& reverse_input_config, |
| 732 const StreamConfig& reverse_output_config, | 735 const StreamConfig& reverse_output_config, |
| 733 float* const* dest) { | 736 float* const* dest) { |
| 734 RETURN_ON_ERR( | 737 RETURN_ON_ERR( |
| 735 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); | 738 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); |
| 736 if (is_rev_processed()) { | 739 if (is_rev_processed()) { |
| 737 render_audio_->CopyTo(api_format_.reverse_output_stream(), dest); | 740 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), |
| 741 dest); |
| 738 } else if (rev_conversion_needed()) { | 742 } else if (rev_conversion_needed()) { |
| 739 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, | 743 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, |
| 740 reverse_output_config.num_samples()); | 744 reverse_output_config.num_samples()); |
| 741 } else { | 745 } else { |
| 742 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 746 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
| 743 reverse_input_config.num_channels(), dest); | 747 reverse_input_config.num_channels(), dest); |
| 744 } | 748 } |
| 745 | 749 |
| 746 return kNoError; | 750 return kNoError; |
| 747 } | 751 } |
| 748 | 752 |
| 749 int AudioProcessingImpl::AnalyzeReverseStream( | 753 int AudioProcessingImpl::AnalyzeReverseStream( |
| 750 const float* const* src, | 754 const float* const* src, |
| 751 const StreamConfig& reverse_input_config, | 755 const StreamConfig& reverse_input_config, |
| 752 const StreamConfig& reverse_output_config) { | 756 const StreamConfig& reverse_output_config) { |
| 753 CriticalSectionScoped crit_scoped(crit_); | 757 CriticalSectionScoped crit_scoped(crit_); |
| 754 if (src == NULL) { | 758 if (src == NULL) { |
| 755 return kNullPointerError; | 759 return kNullPointerError; |
| 756 } | 760 } |
| 757 | 761 |
| 758 if (reverse_input_config.num_channels() <= 0) { | 762 if (reverse_input_config.num_channels() <= 0) { |
| 759 return kBadNumberChannelsError; | 763 return kBadNumberChannelsError; |
| 760 } | 764 } |
| 761 | 765 |
| 762 ProcessingConfig processing_config = api_format_; | 766 ProcessingConfig processing_config = shared_state_.api_format_; |
| 763 processing_config.reverse_input_stream() = reverse_input_config; | 767 processing_config.reverse_input_stream() = reverse_input_config; |
| 764 processing_config.reverse_output_stream() = reverse_output_config; | 768 processing_config.reverse_output_stream() = reverse_output_config; |
| 765 | 769 |
| 766 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 770 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 767 assert(reverse_input_config.num_frames() == | 771 assert(reverse_input_config.num_frames() == |
| 768 api_format_.reverse_input_stream().num_frames()); | 772 shared_state_.api_format_.reverse_input_stream().num_frames()); |
| 769 | 773 |
| 770 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 774 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 771 if (debug_file_->Open()) { | 775 if (debug_file_->Open()) { |
| 772 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 776 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| 773 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 777 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| 774 const size_t channel_size = | 778 const size_t channel_size = |
| 775 sizeof(float) * api_format_.reverse_input_stream().num_frames(); | 779 sizeof(float) * |
| 776 for (int i = 0; i < api_format_.reverse_input_stream().num_channels(); ++i) | 780 shared_state_.api_format_.reverse_input_stream().num_frames(); |
| 781 for (int i = 0; |
| 782 i < shared_state_.api_format_.reverse_input_stream().num_channels(); |
| 783 ++i) |
| 777 msg->add_channel(src[i], channel_size); | 784 msg->add_channel(src[i], channel_size); |
| 778 RETURN_ON_ERR(WriteMessageToDebugFile()); | 785 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 779 } | 786 } |
| 780 #endif | 787 #endif |
| 781 | 788 |
| 782 render_audio_->CopyFrom(src, api_format_.reverse_input_stream()); | 789 render_audio_->CopyFrom(src, |
| 790 shared_state_.api_format_.reverse_input_stream()); |
| 783 return ProcessReverseStreamLocked(); | 791 return ProcessReverseStreamLocked(); |
| 784 } | 792 } |
| 785 | 793 |
| 786 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 794 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 787 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | 795 RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
| 788 if (is_rev_processed()) { | 796 if (is_rev_processed()) { |
| 789 render_audio_->InterleaveTo(frame, true); | 797 render_audio_->InterleaveTo(frame, true); |
| 790 } | 798 } |
| 791 | 799 |
| 792 return kNoError; | 800 return kNoError; |
| 793 } | 801 } |
| 794 | 802 |
| 795 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 803 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| 796 CriticalSectionScoped crit_scoped(crit_); | 804 CriticalSectionScoped crit_scoped(crit_); |
| 797 if (frame == NULL) { | 805 if (frame == NULL) { |
| 798 return kNullPointerError; | 806 return kNullPointerError; |
| 799 } | 807 } |
| 800 // Must be a native rate. | 808 // Must be a native rate. |
| 801 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 809 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 802 frame->sample_rate_hz_ != kSampleRate16kHz && | 810 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 803 frame->sample_rate_hz_ != kSampleRate32kHz && | 811 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 804 frame->sample_rate_hz_ != kSampleRate48kHz) { | 812 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 805 return kBadSampleRateError; | 813 return kBadSampleRateError; |
| 806 } | 814 } |
| 807 // This interface does not tolerate different forward and reverse rates. | 815 // This interface does not tolerate different forward and reverse rates. |
| 808 if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) { | 816 if (frame->sample_rate_hz_ != |
| 817 shared_state_.api_format_.input_stream().sample_rate_hz()) { |
| 809 return kBadSampleRateError; | 818 return kBadSampleRateError; |
| 810 } | 819 } |
| 811 | 820 |
| 812 if (frame->num_channels_ <= 0) { | 821 if (frame->num_channels_ <= 0) { |
| 813 return kBadNumberChannelsError; | 822 return kBadNumberChannelsError; |
| 814 } | 823 } |
| 815 | 824 |
| 816 ProcessingConfig processing_config = api_format_; | 825 ProcessingConfig processing_config = shared_state_.api_format_; |
| 817 processing_config.reverse_input_stream().set_sample_rate_hz( | 826 processing_config.reverse_input_stream().set_sample_rate_hz( |
| 818 frame->sample_rate_hz_); | 827 frame->sample_rate_hz_); |
| 819 processing_config.reverse_input_stream().set_num_channels( | 828 processing_config.reverse_input_stream().set_num_channels( |
| 820 frame->num_channels_); | 829 frame->num_channels_); |
| 821 processing_config.reverse_output_stream().set_sample_rate_hz( | 830 processing_config.reverse_output_stream().set_sample_rate_hz( |
| 822 frame->sample_rate_hz_); | 831 frame->sample_rate_hz_); |
| 823 processing_config.reverse_output_stream().set_num_channels( | 832 processing_config.reverse_output_stream().set_num_channels( |
| 824 frame->num_channels_); | 833 frame->num_channels_); |
| 825 | 834 |
| 826 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 835 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 827 if (frame->samples_per_channel_ != | 836 if (frame->samples_per_channel_ != |
| 828 api_format_.reverse_input_stream().num_frames()) { | 837 shared_state_.api_format_.reverse_input_stream().num_frames()) { |
| 829 return kBadDataLengthError; | 838 return kBadDataLengthError; |
| 830 } | 839 } |
| 831 | 840 |
| 832 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 841 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 833 if (debug_file_->Open()) { | 842 if (debug_file_->Open()) { |
| 834 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 843 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| 835 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 844 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| 836 const size_t data_size = | 845 const size_t data_size = |
| 837 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 846 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 838 msg->set_data(frame->data_, data_size); | 847 msg->set_data(frame->data_, data_size); |
| (...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1042 } else if (enabled_count == 2) { | 1051 } else if (enabled_count == 2) { |
| 1043 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { | 1052 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { |
| 1044 return false; | 1053 return false; |
| 1045 } | 1054 } |
| 1046 } | 1055 } |
| 1047 return true; | 1056 return true; |
| 1048 } | 1057 } |
| 1049 | 1058 |
| 1050 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { | 1059 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
| 1051 // Check if we've upmixed or downmixed the audio. | 1060 // Check if we've upmixed or downmixed the audio. |
| 1052 return ((api_format_.output_stream().num_channels() != | 1061 return ((shared_state_.api_format_.output_stream().num_channels() != |
| 1053 api_format_.input_stream().num_channels()) || | 1062 shared_state_.api_format_.input_stream().num_channels()) || |
| 1054 is_data_processed || transient_suppressor_enabled_); | 1063 is_data_processed || transient_suppressor_enabled_); |
| 1055 } | 1064 } |
| 1056 | 1065 |
| 1057 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { | 1066 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { |
| 1058 return (is_data_processed && | 1067 return (is_data_processed && |
| 1059 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1068 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| 1060 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); | 1069 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); |
| 1061 } | 1070 } |
| 1062 | 1071 |
| 1063 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { | 1072 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
| 1064 if (!is_data_processed && !voice_detection_->is_enabled() && | 1073 if (!is_data_processed && !voice_detection_->is_enabled() && |
| 1065 !transient_suppressor_enabled_) { | 1074 !transient_suppressor_enabled_) { |
| 1066 // Only level_estimator_ is enabled. | 1075 // Only level_estimator_ is enabled. |
| 1067 return false; | 1076 return false; |
| 1068 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1077 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| 1069 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 1078 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
| 1070 // Something besides level_estimator_ is enabled, and we have super-wb. | 1079 // Something besides level_estimator_ is enabled, and we have super-wb. |
| 1071 return true; | 1080 return true; |
| 1072 } | 1081 } |
| 1073 return false; | 1082 return false; |
| 1074 } | 1083 } |
| 1075 | 1084 |
| 1076 bool AudioProcessingImpl::is_rev_processed() const { | 1085 bool AudioProcessingImpl::is_rev_processed() const { |
| 1077 return intelligibility_enabled_ && intelligibility_enhancer_->active(); | 1086 return intelligibility_enabled_ && intelligibility_enhancer_->active(); |
| 1078 } | 1087 } |
| 1079 | 1088 |
| 1080 bool AudioProcessingImpl::rev_conversion_needed() const { | 1089 bool AudioProcessingImpl::rev_conversion_needed() const { |
| 1081 return (api_format_.reverse_input_stream() != | 1090 return (shared_state_.api_format_.reverse_input_stream() != |
| 1082 api_format_.reverse_output_stream()); | 1091 shared_state_.api_format_.reverse_output_stream()); |
| 1083 } | 1092 } |
| 1084 | 1093 |
| 1085 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1094 void AudioProcessingImpl::InitializeExperimentalAgc() { |
| 1086 if (use_new_agc_) { | 1095 if (use_new_agc_) { |
| 1087 if (!agc_manager_.get()) { | 1096 if (!agc_manager_.get()) { |
| 1088 agc_manager_.reset(new AgcManagerDirect(gain_control_, | 1097 agc_manager_.reset(new AgcManagerDirect(gain_control_, |
| 1089 gain_control_for_new_agc_.get(), | 1098 gain_control_for_new_agc_.get(), |
| 1090 agc_startup_min_volume_)); | 1099 agc_startup_min_volume_)); |
| 1091 } | 1100 } |
| 1092 agc_manager_->Initialize(); | 1101 agc_manager_->Initialize(); |
| 1093 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 1102 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
| 1094 } | 1103 } |
| 1095 } | 1104 } |
| 1096 | 1105 |
| 1097 void AudioProcessingImpl::InitializeTransient() { | 1106 void AudioProcessingImpl::InitializeTransient() { |
| 1098 if (transient_suppressor_enabled_) { | 1107 if (transient_suppressor_enabled_) { |
| 1099 if (!transient_suppressor_.get()) { | 1108 if (!transient_suppressor_.get()) { |
| 1100 transient_suppressor_.reset(new TransientSuppressor()); | 1109 transient_suppressor_.reset(new TransientSuppressor()); |
| 1101 } | 1110 } |
| 1102 transient_suppressor_->Initialize( | 1111 transient_suppressor_->Initialize( |
| 1103 fwd_proc_format_.sample_rate_hz(), split_rate_, | 1112 fwd_proc_format_.sample_rate_hz(), split_rate_, |
| 1104 api_format_.output_stream().num_channels()); | 1113 shared_state_.api_format_.output_stream().num_channels()); |
| 1105 } | 1114 } |
| 1106 } | 1115 } |
| 1107 | 1116 |
| 1108 void AudioProcessingImpl::InitializeBeamformer() { | 1117 void AudioProcessingImpl::InitializeBeamformer() { |
| 1109 if (beamformer_enabled_) { | 1118 if (beamformer_enabled_) { |
| 1110 if (!beamformer_) { | 1119 if (!beamformer_) { |
| 1111 beamformer_.reset( | 1120 beamformer_.reset( |
| 1112 new NonlinearBeamformer(array_geometry_, target_direction_)); | 1121 new NonlinearBeamformer(array_geometry_, target_direction_)); |
| 1113 } | 1122 } |
| 1114 beamformer_->Initialize(kChunkSizeMs, split_rate_); | 1123 beamformer_->Initialize(kChunkSizeMs, split_rate_); |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1213 } | 1222 } |
| 1214 | 1223 |
| 1215 event_msg_->Clear(); | 1224 event_msg_->Clear(); |
| 1216 | 1225 |
| 1217 return kNoError; | 1226 return kNoError; |
| 1218 } | 1227 } |
| 1219 | 1228 |
| 1220 int AudioProcessingImpl::WriteInitMessage() { | 1229 int AudioProcessingImpl::WriteInitMessage() { |
| 1221 event_msg_->set_type(audioproc::Event::INIT); | 1230 event_msg_->set_type(audioproc::Event::INIT); |
| 1222 audioproc::Init* msg = event_msg_->mutable_init(); | 1231 audioproc::Init* msg = event_msg_->mutable_init(); |
| 1223 msg->set_sample_rate(api_format_.input_stream().sample_rate_hz()); | 1232 msg->set_sample_rate( |
| 1224 msg->set_num_input_channels(api_format_.input_stream().num_channels()); | 1233 shared_state_.api_format_.input_stream().sample_rate_hz()); |
| 1225 msg->set_num_output_channels(api_format_.output_stream().num_channels()); | 1234 msg->set_num_input_channels( |
| 1235 shared_state_.api_format_.input_stream().num_channels()); |
| 1236 msg->set_num_output_channels( |
| 1237 shared_state_.api_format_.output_stream().num_channels()); |
| 1226 msg->set_num_reverse_channels( | 1238 msg->set_num_reverse_channels( |
| 1227 api_format_.reverse_input_stream().num_channels()); | 1239 shared_state_.api_format_.reverse_input_stream().num_channels()); |
| 1228 msg->set_reverse_sample_rate( | 1240 msg->set_reverse_sample_rate( |
| 1229 api_format_.reverse_input_stream().sample_rate_hz()); | 1241 shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); |
| 1230 msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz()); | 1242 msg->set_output_sample_rate( |
| 1243 shared_state_.api_format_.output_stream().sample_rate_hz()); |
| 1231 // TODO(ekmeyerson): Add reverse output fields to event_msg_. | 1244 // TODO(ekmeyerson): Add reverse output fields to event_msg_. |
| 1232 | 1245 |
| 1233 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1246 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 1234 return kNoError; | 1247 return kNoError; |
| 1235 } | 1248 } |
| 1236 | 1249 |
| 1237 int AudioProcessingImpl::WriteConfigMessage(bool forced) { | 1250 int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
| 1238 audioproc::Config config; | 1251 audioproc::Config config; |
| 1239 | 1252 |
| 1240 config.set_aec_enabled(echo_cancellation_->is_enabled()); | 1253 config.set_aec_enabled(echo_cancellation_->is_enabled()); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1274 | 1287 |
| 1275 event_msg_->set_type(audioproc::Event::CONFIG); | 1288 event_msg_->set_type(audioproc::Event::CONFIG); |
| 1276 event_msg_->mutable_config()->CopyFrom(config); | 1289 event_msg_->mutable_config()->CopyFrom(config); |
| 1277 | 1290 |
| 1278 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1291 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 1279 return kNoError; | 1292 return kNoError; |
| 1280 } | 1293 } |
| 1281 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1294 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1282 | 1295 |
| 1283 } // namespace webrtc | 1296 } // namespace webrtc |
| OLD | NEW |