Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(160)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_impl.cc

Issue 1413093002: Lock scheme #6: Preparational work before introducing the locks in order to harmonize the code (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@introduce_queue_agc_CL
Patch Set: Merge Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
188 gain_control_(NULL), 188 gain_control_(NULL),
189 high_pass_filter_(NULL), 189 high_pass_filter_(NULL),
190 level_estimator_(NULL), 190 level_estimator_(NULL),
191 noise_suppression_(NULL), 191 noise_suppression_(NULL),
192 voice_detection_(NULL), 192 voice_detection_(NULL),
193 crit_(CriticalSectionWrapper::CreateCriticalSection()), 193 crit_(CriticalSectionWrapper::CreateCriticalSection()),
194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
195 debug_file_(FileWrapper::Create()), 195 debug_file_(FileWrapper::Create()),
196 event_msg_(new audioproc::Event()), 196 event_msg_(new audioproc::Event()),
197 #endif 197 #endif
198 api_format_({{{kSampleRate16kHz, 1, false},
199 {kSampleRate16kHz, 1, false},
200 {kSampleRate16kHz, 1, false},
201 {kSampleRate16kHz, 1, false}}}),
202 fwd_proc_format_(kSampleRate16kHz), 198 fwd_proc_format_(kSampleRate16kHz),
203 rev_proc_format_(kSampleRate16kHz, 1), 199 rev_proc_format_(kSampleRate16kHz, 1),
204 split_rate_(kSampleRate16kHz), 200 split_rate_(kSampleRate16kHz),
205 stream_delay_ms_(0), 201 stream_delay_ms_(0),
206 delay_offset_ms_(0), 202 delay_offset_ms_(0),
207 was_stream_delay_set_(false), 203 was_stream_delay_set_(false),
208 last_stream_delay_ms_(0), 204 last_stream_delay_ms_(0),
209 last_aec_system_delay_ms_(0), 205 last_aec_system_delay_ms_(0),
210 stream_delay_jumps_(-1), 206 stream_delay_jumps_(-1),
211 aec_system_delay_jumps_(-1), 207 aec_system_delay_jumps_(-1),
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 LayoutHasKeyboard(reverse_layout)}}}; 298 LayoutHasKeyboard(reverse_layout)}}};
303 299
304 return Initialize(processing_config); 300 return Initialize(processing_config);
305 } 301 }
306 302
307 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { 303 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
308 CriticalSectionScoped crit_scoped(crit_); 304 CriticalSectionScoped crit_scoped(crit_);
309 return InitializeLocked(processing_config); 305 return InitializeLocked(processing_config);
310 } 306 }
311 307
308 // Calls InitializeLocked() if any of the audio parameters have changed from
309 // their current values.
310 int AudioProcessingImpl::MaybeInitializeLocked(
311 const ProcessingConfig& processing_config) {
312 if (processing_config == shared_state_.api_format_) {
313 return kNoError;
314 }
315 return InitializeLocked(processing_config);
316 }
317
312 int AudioProcessingImpl::InitializeLocked() { 318 int AudioProcessingImpl::InitializeLocked() {
313 const int fwd_audio_buffer_channels = 319 const int fwd_audio_buffer_channels =
314 beamformer_enabled_ ? api_format_.input_stream().num_channels() 320 beamformer_enabled_
315 : api_format_.output_stream().num_channels(); 321 ? shared_state_.api_format_.input_stream().num_channels()
322 : shared_state_.api_format_.output_stream().num_channels();
316 const int rev_audio_buffer_out_num_frames = 323 const int rev_audio_buffer_out_num_frames =
317 api_format_.reverse_output_stream().num_frames() == 0 324 shared_state_.api_format_.reverse_output_stream().num_frames() == 0
318 ? rev_proc_format_.num_frames() 325 ? rev_proc_format_.num_frames()
319 : api_format_.reverse_output_stream().num_frames(); 326 : shared_state_.api_format_.reverse_output_stream().num_frames();
320 if (api_format_.reverse_input_stream().num_channels() > 0) { 327 if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) {
321 render_audio_.reset(new AudioBuffer( 328 render_audio_.reset(new AudioBuffer(
322 api_format_.reverse_input_stream().num_frames(), 329 shared_state_.api_format_.reverse_input_stream().num_frames(),
323 api_format_.reverse_input_stream().num_channels(), 330 shared_state_.api_format_.reverse_input_stream().num_channels(),
324 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), 331 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(),
325 rev_audio_buffer_out_num_frames)); 332 rev_audio_buffer_out_num_frames));
326 if (rev_conversion_needed()) { 333 if (rev_conversion_needed()) {
327 render_converter_ = AudioConverter::Create( 334 render_converter_ = AudioConverter::Create(
328 api_format_.reverse_input_stream().num_channels(), 335 shared_state_.api_format_.reverse_input_stream().num_channels(),
329 api_format_.reverse_input_stream().num_frames(), 336 shared_state_.api_format_.reverse_input_stream().num_frames(),
330 api_format_.reverse_output_stream().num_channels(), 337 shared_state_.api_format_.reverse_output_stream().num_channels(),
331 api_format_.reverse_output_stream().num_frames()); 338 shared_state_.api_format_.reverse_output_stream().num_frames());
332 } else { 339 } else {
333 render_converter_.reset(nullptr); 340 render_converter_.reset(nullptr);
334 } 341 }
335 } else { 342 } else {
336 render_audio_.reset(nullptr); 343 render_audio_.reset(nullptr);
337 render_converter_.reset(nullptr); 344 render_converter_.reset(nullptr);
338 } 345 }
339 capture_audio_.reset(new AudioBuffer( 346 capture_audio_.reset(
340 api_format_.input_stream().num_frames(), 347 new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(),
341 api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(), 348 shared_state_.api_format_.input_stream().num_channels(),
342 fwd_audio_buffer_channels, api_format_.output_stream().num_frames())); 349 fwd_proc_format_.num_frames(), fwd_audio_buffer_channels,
350 shared_state_.api_format_.output_stream().num_frames()));
343 351
344 // Initialize all components. 352 // Initialize all components.
345 for (auto item : component_list_) { 353 for (auto item : component_list_) {
346 int err = item->Initialize(); 354 int err = item->Initialize();
347 if (err != kNoError) { 355 if (err != kNoError) {
348 return err; 356 return err;
349 } 357 }
350 } 358 }
351 359
352 InitializeExperimentalAgc(); 360 InitializeExperimentalAgc();
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { 396 !(num_out_channels == 1 || num_out_channels == num_in_channels)) {
389 return kBadNumberChannelsError; 397 return kBadNumberChannelsError;
390 } 398 }
391 399
392 if (beamformer_enabled_ && 400 if (beamformer_enabled_ &&
393 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || 401 (static_cast<size_t>(num_in_channels) != array_geometry_.size() ||
394 num_out_channels > 1)) { 402 num_out_channels > 1)) {
395 return kBadNumberChannelsError; 403 return kBadNumberChannelsError;
396 } 404 }
397 405
398 api_format_ = config; 406 shared_state_.api_format_ = config;
399 407
400 // We process at the closest native rate >= min(input rate, output rate)... 408 // We process at the closest native rate >= min(input rate, output rate)...
401 const int min_proc_rate = 409 const int min_proc_rate =
402 std::min(api_format_.input_stream().sample_rate_hz(), 410 std::min(shared_state_.api_format_.input_stream().sample_rate_hz(),
403 api_format_.output_stream().sample_rate_hz()); 411 shared_state_.api_format_.output_stream().sample_rate_hz());
404 int fwd_proc_rate; 412 int fwd_proc_rate;
405 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { 413 for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
406 fwd_proc_rate = kNativeSampleRatesHz[i]; 414 fwd_proc_rate = kNativeSampleRatesHz[i];
407 if (fwd_proc_rate >= min_proc_rate) { 415 if (fwd_proc_rate >= min_proc_rate) {
408 break; 416 break;
409 } 417 }
410 } 418 }
411 // ...with one exception. 419 // ...with one exception.
412 if (echo_control_mobile_->is_enabled() && 420 if (echo_control_mobile_->is_enabled() &&
413 min_proc_rate > kMaxAECMSampleRateHz) { 421 min_proc_rate > kMaxAECMSampleRateHz) {
414 fwd_proc_rate = kMaxAECMSampleRateHz; 422 fwd_proc_rate = kMaxAECMSampleRateHz;
415 } 423 }
416 424
417 fwd_proc_format_ = StreamConfig(fwd_proc_rate); 425 fwd_proc_format_ = StreamConfig(fwd_proc_rate);
418 426
419 // We normally process the reverse stream at 16 kHz. Unless... 427 // We normally process the reverse stream at 16 kHz. Unless...
420 int rev_proc_rate = kSampleRate16kHz; 428 int rev_proc_rate = kSampleRate16kHz;
421 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { 429 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) {
422 // ...the forward stream is at 8 kHz. 430 // ...the forward stream is at 8 kHz.
423 rev_proc_rate = kSampleRate8kHz; 431 rev_proc_rate = kSampleRate8kHz;
424 } else { 432 } else {
425 if (api_format_.reverse_input_stream().sample_rate_hz() == 433 if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() ==
426 kSampleRate32kHz) { 434 kSampleRate32kHz) {
427 // ...or the input is at 32 kHz, in which case we use the splitting 435 // ...or the input is at 32 kHz, in which case we use the splitting
428 // filter rather than the resampler. 436 // filter rather than the resampler.
429 rev_proc_rate = kSampleRate32kHz; 437 rev_proc_rate = kSampleRate32kHz;
430 } 438 }
431 } 439 }
432 440
433 // Always downmix the reverse stream to mono for analysis. This has been 441 // Always downmix the reverse stream to mono for analysis. This has been
434 // demonstrated to work well for AEC in most practical scenarios. 442 // demonstrated to work well for AEC in most practical scenarios.
435 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); 443 rev_proc_format_ = StreamConfig(rev_proc_rate, 1);
436 444
437 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 445 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
438 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { 446 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
439 split_rate_ = kSampleRate16kHz; 447 split_rate_ = kSampleRate16kHz;
440 } else { 448 } else {
441 split_rate_ = fwd_proc_format_.sample_rate_hz(); 449 split_rate_ = fwd_proc_format_.sample_rate_hz();
442 } 450 }
443 451
444 return InitializeLocked(); 452 return InitializeLocked();
445 } 453 }
446 454
447 // Calls InitializeLocked() if any of the audio parameters have changed from
448 // their current values.
449 int AudioProcessingImpl::MaybeInitializeLocked(
450 const ProcessingConfig& processing_config) {
451 if (processing_config == api_format_) {
452 return kNoError;
453 }
454 return InitializeLocked(processing_config);
455 }
456 455
457 void AudioProcessingImpl::SetExtraOptions(const Config& config) { 456 void AudioProcessingImpl::SetExtraOptions(const Config& config) {
458 CriticalSectionScoped crit_scoped(crit_); 457 CriticalSectionScoped crit_scoped(crit_);
459 for (auto item : component_list_) { 458 for (auto item : component_list_) {
460 item->SetExtraOptions(config); 459 item->SetExtraOptions(config);
461 } 460 }
462 461
463 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { 462 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
464 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; 463 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
465 InitializeTransient(); 464 InitializeTransient();
466 } 465 }
467 } 466 }
468 467
469 468
470 int AudioProcessingImpl::proc_sample_rate_hz() const { 469 int AudioProcessingImpl::proc_sample_rate_hz() const {
471 return fwd_proc_format_.sample_rate_hz(); 470 return fwd_proc_format_.sample_rate_hz();
472 } 471 }
473 472
474 int AudioProcessingImpl::proc_split_sample_rate_hz() const { 473 int AudioProcessingImpl::proc_split_sample_rate_hz() const {
475 return split_rate_; 474 return split_rate_;
476 } 475 }
477 476
478 int AudioProcessingImpl::num_reverse_channels() const { 477 int AudioProcessingImpl::num_reverse_channels() const {
479 return rev_proc_format_.num_channels(); 478 return rev_proc_format_.num_channels();
480 } 479 }
481 480
482 int AudioProcessingImpl::num_input_channels() const { 481 int AudioProcessingImpl::num_input_channels() const {
483 return api_format_.input_stream().num_channels(); 482 return shared_state_.api_format_.input_stream().num_channels();
484 } 483 }
485 484
486 int AudioProcessingImpl::num_output_channels() const { 485 int AudioProcessingImpl::num_output_channels() const {
487 return api_format_.output_stream().num_channels(); 486 return shared_state_.api_format_.output_stream().num_channels();
488 } 487 }
489 488
490 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { 489 void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
491 CriticalSectionScoped lock(crit_); 490 CriticalSectionScoped lock(crit_);
492 output_will_be_muted_ = muted; 491 output_will_be_muted_ = muted;
493 if (agc_manager_.get()) { 492 if (agc_manager_.get()) {
494 agc_manager_->SetCaptureMuted(output_will_be_muted_); 493 agc_manager_->SetCaptureMuted(output_will_be_muted_);
495 } 494 }
496 } 495 }
497 496
498 497
499 int AudioProcessingImpl::ProcessStream(const float* const* src, 498 int AudioProcessingImpl::ProcessStream(const float* const* src,
500 size_t samples_per_channel, 499 size_t samples_per_channel,
501 int input_sample_rate_hz, 500 int input_sample_rate_hz,
502 ChannelLayout input_layout, 501 ChannelLayout input_layout,
503 int output_sample_rate_hz, 502 int output_sample_rate_hz,
504 ChannelLayout output_layout, 503 ChannelLayout output_layout,
505 float* const* dest) { 504 float* const* dest) {
506 CriticalSectionScoped crit_scoped(crit_); 505 CriticalSectionScoped crit_scoped(crit_);
507 StreamConfig input_stream = api_format_.input_stream(); 506 StreamConfig input_stream = shared_state_.api_format_.input_stream();
508 input_stream.set_sample_rate_hz(input_sample_rate_hz); 507 input_stream.set_sample_rate_hz(input_sample_rate_hz);
509 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); 508 input_stream.set_num_channels(ChannelsFromLayout(input_layout));
510 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); 509 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
511 510
512 StreamConfig output_stream = api_format_.output_stream(); 511 StreamConfig output_stream = shared_state_.api_format_.output_stream();
513 output_stream.set_sample_rate_hz(output_sample_rate_hz); 512 output_stream.set_sample_rate_hz(output_sample_rate_hz);
514 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); 513 output_stream.set_num_channels(ChannelsFromLayout(output_layout));
515 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); 514 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
516 515
517 if (samples_per_channel != input_stream.num_frames()) { 516 if (samples_per_channel != input_stream.num_frames()) {
518 return kBadDataLengthError; 517 return kBadDataLengthError;
519 } 518 }
520 return ProcessStream(src, input_stream, output_stream, dest); 519 return ProcessStream(src, input_stream, output_stream, dest);
521 } 520 }
522 521
523 int AudioProcessingImpl::ProcessStream(const float* const* src, 522 int AudioProcessingImpl::ProcessStream(const float* const* src,
524 const StreamConfig& input_config, 523 const StreamConfig& input_config,
525 const StreamConfig& output_config, 524 const StreamConfig& output_config,
526 float* const* dest) { 525 float* const* dest) {
527 CriticalSectionScoped crit_scoped(crit_); 526 CriticalSectionScoped crit_scoped(crit_);
528 if (!src || !dest) { 527 if (!src || !dest) {
529 return kNullPointerError; 528 return kNullPointerError;
530 } 529 }
531 530
532 echo_cancellation_->ReadQueuedRenderData(); 531 echo_cancellation_->ReadQueuedRenderData();
533 echo_control_mobile_->ReadQueuedRenderData(); 532 echo_control_mobile_->ReadQueuedRenderData();
534 gain_control_->ReadQueuedRenderData(); 533 gain_control_->ReadQueuedRenderData();
535 534
536 ProcessingConfig processing_config = api_format_; 535 ProcessingConfig processing_config = shared_state_.api_format_;
537 processing_config.input_stream() = input_config; 536 processing_config.input_stream() = input_config;
538 processing_config.output_stream() = output_config; 537 processing_config.output_stream() = output_config;
539 538
540 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 539 RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
541 assert(processing_config.input_stream().num_frames() == 540 assert(processing_config.input_stream().num_frames() ==
542 api_format_.input_stream().num_frames()); 541 shared_state_.api_format_.input_stream().num_frames());
543 542
544 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 543 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
545 if (debug_file_->Open()) { 544 if (debug_file_->Open()) {
546 RETURN_ON_ERR(WriteConfigMessage(false)); 545 RETURN_ON_ERR(WriteConfigMessage(false));
547 546
548 event_msg_->set_type(audioproc::Event::STREAM); 547 event_msg_->set_type(audioproc::Event::STREAM);
549 audioproc::Stream* msg = event_msg_->mutable_stream(); 548 audioproc::Stream* msg = event_msg_->mutable_stream();
550 const size_t channel_size = 549 const size_t channel_size =
551 sizeof(float) * api_format_.input_stream().num_frames(); 550 sizeof(float) * shared_state_.api_format_.input_stream().num_frames();
552 for (int i = 0; i < api_format_.input_stream().num_channels(); ++i) 551 for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels();
552 ++i)
553 msg->add_input_channel(src[i], channel_size); 553 msg->add_input_channel(src[i], channel_size);
554 } 554 }
555 #endif 555 #endif
556 556
557 capture_audio_->CopyFrom(src, api_format_.input_stream()); 557 capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream());
558 RETURN_ON_ERR(ProcessStreamLocked()); 558 RETURN_ON_ERR(ProcessStreamLocked());
559 capture_audio_->CopyTo(api_format_.output_stream(), dest); 559 capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest);
560 560
561 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 561 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
562 if (debug_file_->Open()) { 562 if (debug_file_->Open()) {
563 audioproc::Stream* msg = event_msg_->mutable_stream(); 563 audioproc::Stream* msg = event_msg_->mutable_stream();
564 const size_t channel_size = 564 const size_t channel_size =
565 sizeof(float) * api_format_.output_stream().num_frames(); 565 sizeof(float) * shared_state_.api_format_.output_stream().num_frames();
566 for (int i = 0; i < api_format_.output_stream().num_channels(); ++i) 566 for (int i = 0;
567 i < shared_state_.api_format_.output_stream().num_channels(); ++i)
567 msg->add_output_channel(dest[i], channel_size); 568 msg->add_output_channel(dest[i], channel_size);
568 RETURN_ON_ERR(WriteMessageToDebugFile()); 569 RETURN_ON_ERR(WriteMessageToDebugFile());
569 } 570 }
570 #endif 571 #endif
571 572
572 return kNoError; 573 return kNoError;
573 } 574 }
574 575
575 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { 576 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
576 CriticalSectionScoped crit_scoped(crit_); 577 CriticalSectionScoped crit_scoped(crit_);
577 echo_cancellation_->ReadQueuedRenderData(); 578 echo_cancellation_->ReadQueuedRenderData();
578 echo_control_mobile_->ReadQueuedRenderData(); 579 echo_control_mobile_->ReadQueuedRenderData();
579 gain_control_->ReadQueuedRenderData(); 580 gain_control_->ReadQueuedRenderData();
580 581
581 if (!frame) { 582 if (!frame) {
582 return kNullPointerError; 583 return kNullPointerError;
583 } 584 }
584 // Must be a native rate. 585 // Must be a native rate.
585 if (frame->sample_rate_hz_ != kSampleRate8kHz && 586 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
586 frame->sample_rate_hz_ != kSampleRate16kHz && 587 frame->sample_rate_hz_ != kSampleRate16kHz &&
587 frame->sample_rate_hz_ != kSampleRate32kHz && 588 frame->sample_rate_hz_ != kSampleRate32kHz &&
588 frame->sample_rate_hz_ != kSampleRate48kHz) { 589 frame->sample_rate_hz_ != kSampleRate48kHz) {
589 return kBadSampleRateError; 590 return kBadSampleRateError;
590 } 591 }
592
591 if (echo_control_mobile_->is_enabled() && 593 if (echo_control_mobile_->is_enabled() &&
592 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { 594 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
593 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; 595 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
594 return kUnsupportedComponentError; 596 return kUnsupportedComponentError;
595 } 597 }
596 598
597 // TODO(ajm): The input and output rates and channels are currently 599 // TODO(ajm): The input and output rates and channels are currently
598 // constrained to be identical in the int16 interface. 600 // constrained to be identical in the int16 interface.
599 ProcessingConfig processing_config = api_format_; 601 ProcessingConfig processing_config = shared_state_.api_format_;
600 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); 602 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
601 processing_config.input_stream().set_num_channels(frame->num_channels_); 603 processing_config.input_stream().set_num_channels(frame->num_channels_);
602 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); 604 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
603 processing_config.output_stream().set_num_channels(frame->num_channels_); 605 processing_config.output_stream().set_num_channels(frame->num_channels_);
604 606
605 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 607 RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
606 if (frame->samples_per_channel_ != api_format_.input_stream().num_frames()) { 608 if (frame->samples_per_channel_ !=
609 shared_state_.api_format_.input_stream().num_frames()) {
607 return kBadDataLengthError; 610 return kBadDataLengthError;
608 } 611 }
609 612
610 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 613 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
611 if (debug_file_->Open()) { 614 if (debug_file_->Open()) {
612 event_msg_->set_type(audioproc::Event::STREAM); 615 event_msg_->set_type(audioproc::Event::STREAM);
613 audioproc::Stream* msg = event_msg_->mutable_stream(); 616 audioproc::Stream* msg = event_msg_->mutable_stream();
614 const size_t data_size = 617 const size_t data_size =
615 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 618 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
616 msg->set_input_data(frame->data_, data_size); 619 msg->set_input_data(frame->data_, data_size);
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
726 } 729 }
727 730
728 int AudioProcessingImpl::ProcessReverseStream( 731 int AudioProcessingImpl::ProcessReverseStream(
729 const float* const* src, 732 const float* const* src,
730 const StreamConfig& reverse_input_config, 733 const StreamConfig& reverse_input_config,
731 const StreamConfig& reverse_output_config, 734 const StreamConfig& reverse_output_config,
732 float* const* dest) { 735 float* const* dest) {
733 RETURN_ON_ERR( 736 RETURN_ON_ERR(
734 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); 737 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config));
735 if (is_rev_processed()) { 738 if (is_rev_processed()) {
736 render_audio_->CopyTo(api_format_.reverse_output_stream(), dest); 739 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(),
740 dest);
737 } else if (rev_conversion_needed()) { 741 } else if (rev_conversion_needed()) {
738 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, 742 render_converter_->Convert(src, reverse_input_config.num_samples(), dest,
739 reverse_output_config.num_samples()); 743 reverse_output_config.num_samples());
740 } else { 744 } else {
741 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), 745 CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
742 reverse_input_config.num_channels(), dest); 746 reverse_input_config.num_channels(), dest);
743 } 747 }
744 748
745 return kNoError; 749 return kNoError;
746 } 750 }
747 751
748 int AudioProcessingImpl::AnalyzeReverseStream( 752 int AudioProcessingImpl::AnalyzeReverseStream(
749 const float* const* src, 753 const float* const* src,
750 const StreamConfig& reverse_input_config, 754 const StreamConfig& reverse_input_config,
751 const StreamConfig& reverse_output_config) { 755 const StreamConfig& reverse_output_config) {
752 CriticalSectionScoped crit_scoped(crit_); 756 CriticalSectionScoped crit_scoped(crit_);
753 if (src == NULL) { 757 if (src == NULL) {
754 return kNullPointerError; 758 return kNullPointerError;
755 } 759 }
756 760
757 if (reverse_input_config.num_channels() <= 0) { 761 if (reverse_input_config.num_channels() <= 0) {
758 return kBadNumberChannelsError; 762 return kBadNumberChannelsError;
759 } 763 }
760 764
761 ProcessingConfig processing_config = api_format_; 765 ProcessingConfig processing_config = shared_state_.api_format_;
762 processing_config.reverse_input_stream() = reverse_input_config; 766 processing_config.reverse_input_stream() = reverse_input_config;
763 processing_config.reverse_output_stream() = reverse_output_config; 767 processing_config.reverse_output_stream() = reverse_output_config;
764 768
765 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 769 RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
766 assert(reverse_input_config.num_frames() == 770 assert(reverse_input_config.num_frames() ==
767 api_format_.reverse_input_stream().num_frames()); 771 shared_state_.api_format_.reverse_input_stream().num_frames());
768 772
769 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 773 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
770 if (debug_file_->Open()) { 774 if (debug_file_->Open()) {
771 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); 775 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
772 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); 776 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
773 const size_t channel_size = 777 const size_t channel_size =
774 sizeof(float) * api_format_.reverse_input_stream().num_frames(); 778 sizeof(float) *
775 for (int i = 0; i < api_format_.reverse_input_stream().num_channels(); ++i) 779 shared_state_.api_format_.reverse_input_stream().num_frames();
780 for (int i = 0;
781 i < shared_state_.api_format_.reverse_input_stream().num_channels();
782 ++i)
776 msg->add_channel(src[i], channel_size); 783 msg->add_channel(src[i], channel_size);
777 RETURN_ON_ERR(WriteMessageToDebugFile()); 784 RETURN_ON_ERR(WriteMessageToDebugFile());
778 } 785 }
779 #endif 786 #endif
780 787
781 render_audio_->CopyFrom(src, api_format_.reverse_input_stream()); 788 render_audio_->CopyFrom(src,
789 shared_state_.api_format_.reverse_input_stream());
782 return ProcessReverseStreamLocked(); 790 return ProcessReverseStreamLocked();
783 } 791 }
784 792
785 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { 793 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
786 RETURN_ON_ERR(AnalyzeReverseStream(frame)); 794 RETURN_ON_ERR(AnalyzeReverseStream(frame));
787 if (is_rev_processed()) { 795 if (is_rev_processed()) {
788 render_audio_->InterleaveTo(frame, true); 796 render_audio_->InterleaveTo(frame, true);
789 } 797 }
790 798
791 return kNoError; 799 return kNoError;
792 } 800 }
793 801
794 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { 802 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
795 CriticalSectionScoped crit_scoped(crit_); 803 CriticalSectionScoped crit_scoped(crit_);
796 if (frame == NULL) { 804 if (frame == NULL) {
797 return kNullPointerError; 805 return kNullPointerError;
798 } 806 }
799 // Must be a native rate. 807 // Must be a native rate.
800 if (frame->sample_rate_hz_ != kSampleRate8kHz && 808 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
801 frame->sample_rate_hz_ != kSampleRate16kHz && 809 frame->sample_rate_hz_ != kSampleRate16kHz &&
802 frame->sample_rate_hz_ != kSampleRate32kHz && 810 frame->sample_rate_hz_ != kSampleRate32kHz &&
803 frame->sample_rate_hz_ != kSampleRate48kHz) { 811 frame->sample_rate_hz_ != kSampleRate48kHz) {
804 return kBadSampleRateError; 812 return kBadSampleRateError;
805 } 813 }
806 // This interface does not tolerate different forward and reverse rates. 814 // This interface does not tolerate different forward and reverse rates.
807 if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) { 815 if (frame->sample_rate_hz_ !=
816 shared_state_.api_format_.input_stream().sample_rate_hz()) {
808 return kBadSampleRateError; 817 return kBadSampleRateError;
809 } 818 }
810 819
811 if (frame->num_channels_ <= 0) { 820 if (frame->num_channels_ <= 0) {
812 return kBadNumberChannelsError; 821 return kBadNumberChannelsError;
813 } 822 }
814 823
815 ProcessingConfig processing_config = api_format_; 824 ProcessingConfig processing_config = shared_state_.api_format_;
816 processing_config.reverse_input_stream().set_sample_rate_hz( 825 processing_config.reverse_input_stream().set_sample_rate_hz(
817 frame->sample_rate_hz_); 826 frame->sample_rate_hz_);
818 processing_config.reverse_input_stream().set_num_channels( 827 processing_config.reverse_input_stream().set_num_channels(
819 frame->num_channels_); 828 frame->num_channels_);
820 processing_config.reverse_output_stream().set_sample_rate_hz( 829 processing_config.reverse_output_stream().set_sample_rate_hz(
821 frame->sample_rate_hz_); 830 frame->sample_rate_hz_);
822 processing_config.reverse_output_stream().set_num_channels( 831 processing_config.reverse_output_stream().set_num_channels(
823 frame->num_channels_); 832 frame->num_channels_);
824 833
825 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 834 RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
826 if (frame->samples_per_channel_ != 835 if (frame->samples_per_channel_ !=
827 api_format_.reverse_input_stream().num_frames()) { 836 shared_state_.api_format_.reverse_input_stream().num_frames()) {
828 return kBadDataLengthError; 837 return kBadDataLengthError;
829 } 838 }
830 839
831 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 840 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
832 if (debug_file_->Open()) { 841 if (debug_file_->Open()) {
833 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); 842 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
834 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); 843 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
835 const size_t data_size = 844 const size_t data_size =
836 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 845 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
837 msg->set_data(frame->data_, data_size); 846 msg->set_data(frame->data_, data_size);
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
1041 } else if (enabled_count == 2) { 1050 } else if (enabled_count == 2) {
1042 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { 1051 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
1043 return false; 1052 return false;
1044 } 1053 }
1045 } 1054 }
1046 return true; 1055 return true;
1047 } 1056 }
1048 1057
1049 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { 1058 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
1050 // Check if we've upmixed or downmixed the audio. 1059 // Check if we've upmixed or downmixed the audio.
1051 return ((api_format_.output_stream().num_channels() != 1060 return ((shared_state_.api_format_.output_stream().num_channels() !=
1052 api_format_.input_stream().num_channels()) || 1061 shared_state_.api_format_.input_stream().num_channels()) ||
1053 is_data_processed || transient_suppressor_enabled_); 1062 is_data_processed || transient_suppressor_enabled_);
1054 } 1063 }
1055 1064
1056 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { 1065 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
1057 return (is_data_processed && 1066 return (is_data_processed &&
1058 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 1067 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
1059 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); 1068 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz));
1060 } 1069 }
1061 1070
1062 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { 1071 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
1063 if (!is_data_processed && !voice_detection_->is_enabled() && 1072 if (!is_data_processed && !voice_detection_->is_enabled() &&
1064 !transient_suppressor_enabled_) { 1073 !transient_suppressor_enabled_) {
1065 // Only level_estimator_ is enabled. 1074 // Only level_estimator_ is enabled.
1066 return false; 1075 return false;
1067 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 1076 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
1068 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { 1077 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
1069 // Something besides level_estimator_ is enabled, and we have super-wb. 1078 // Something besides level_estimator_ is enabled, and we have super-wb.
1070 return true; 1079 return true;
1071 } 1080 }
1072 return false; 1081 return false;
1073 } 1082 }
1074 1083
1075 bool AudioProcessingImpl::is_rev_processed() const { 1084 bool AudioProcessingImpl::is_rev_processed() const {
1076 return intelligibility_enabled_ && intelligibility_enhancer_->active(); 1085 return intelligibility_enabled_ && intelligibility_enhancer_->active();
1077 } 1086 }
1078 1087
1079 bool AudioProcessingImpl::rev_conversion_needed() const { 1088 bool AudioProcessingImpl::rev_conversion_needed() const {
1080 return (api_format_.reverse_input_stream() != 1089 return (shared_state_.api_format_.reverse_input_stream() !=
1081 api_format_.reverse_output_stream()); 1090 shared_state_.api_format_.reverse_output_stream());
1082 } 1091 }
1083 1092
1084 void AudioProcessingImpl::InitializeExperimentalAgc() { 1093 void AudioProcessingImpl::InitializeExperimentalAgc() {
1085 if (use_new_agc_) { 1094 if (use_new_agc_) {
1086 if (!agc_manager_.get()) { 1095 if (!agc_manager_.get()) {
1087 agc_manager_.reset(new AgcManagerDirect(gain_control_, 1096 agc_manager_.reset(new AgcManagerDirect(gain_control_,
1088 gain_control_for_new_agc_.get(), 1097 gain_control_for_new_agc_.get(),
1089 agc_startup_min_volume_)); 1098 agc_startup_min_volume_));
1090 } 1099 }
1091 agc_manager_->Initialize(); 1100 agc_manager_->Initialize();
1092 agc_manager_->SetCaptureMuted(output_will_be_muted_); 1101 agc_manager_->SetCaptureMuted(output_will_be_muted_);
1093 } 1102 }
1094 } 1103 }
1095 1104
1096 void AudioProcessingImpl::InitializeTransient() { 1105 void AudioProcessingImpl::InitializeTransient() {
1097 if (transient_suppressor_enabled_) { 1106 if (transient_suppressor_enabled_) {
1098 if (!transient_suppressor_.get()) { 1107 if (!transient_suppressor_.get()) {
1099 transient_suppressor_.reset(new TransientSuppressor()); 1108 transient_suppressor_.reset(new TransientSuppressor());
1100 } 1109 }
1101 transient_suppressor_->Initialize( 1110 transient_suppressor_->Initialize(
1102 fwd_proc_format_.sample_rate_hz(), split_rate_, 1111 fwd_proc_format_.sample_rate_hz(), split_rate_,
1103 api_format_.output_stream().num_channels()); 1112 shared_state_.api_format_.output_stream().num_channels());
1104 } 1113 }
1105 } 1114 }
1106 1115
1107 void AudioProcessingImpl::InitializeBeamformer() { 1116 void AudioProcessingImpl::InitializeBeamformer() {
1108 if (beamformer_enabled_) { 1117 if (beamformer_enabled_) {
1109 if (!beamformer_) { 1118 if (!beamformer_) {
1110 beamformer_.reset(new NonlinearBeamformer(array_geometry_)); 1119 beamformer_.reset(new NonlinearBeamformer(array_geometry_));
1111 } 1120 }
1112 beamformer_->Initialize(kChunkSizeMs, split_rate_); 1121 beamformer_->Initialize(kChunkSizeMs, split_rate_);
1113 } 1122 }
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
1211 } 1220 }
1212 1221
1213 event_msg_->Clear(); 1222 event_msg_->Clear();
1214 1223
1215 return kNoError; 1224 return kNoError;
1216 } 1225 }
1217 1226
1218 int AudioProcessingImpl::WriteInitMessage() { 1227 int AudioProcessingImpl::WriteInitMessage() {
1219 event_msg_->set_type(audioproc::Event::INIT); 1228 event_msg_->set_type(audioproc::Event::INIT);
1220 audioproc::Init* msg = event_msg_->mutable_init(); 1229 audioproc::Init* msg = event_msg_->mutable_init();
1221 msg->set_sample_rate(api_format_.input_stream().sample_rate_hz()); 1230 msg->set_sample_rate(
1222 msg->set_num_input_channels(api_format_.input_stream().num_channels()); 1231 shared_state_.api_format_.input_stream().sample_rate_hz());
1223 msg->set_num_output_channels(api_format_.output_stream().num_channels()); 1232 msg->set_num_input_channels(
1233 shared_state_.api_format_.input_stream().num_channels());
1234 msg->set_num_output_channels(
1235 shared_state_.api_format_.output_stream().num_channels());
1224 msg->set_num_reverse_channels( 1236 msg->set_num_reverse_channels(
1225 api_format_.reverse_input_stream().num_channels()); 1237 shared_state_.api_format_.reverse_input_stream().num_channels());
1226 msg->set_reverse_sample_rate( 1238 msg->set_reverse_sample_rate(
1227 api_format_.reverse_input_stream().sample_rate_hz()); 1239 shared_state_.api_format_.reverse_input_stream().sample_rate_hz());
1228 msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz()); 1240 msg->set_output_sample_rate(
1241 shared_state_.api_format_.output_stream().sample_rate_hz());
1229 // TODO(ekmeyerson): Add reverse output fields to event_msg_. 1242 // TODO(ekmeyerson): Add reverse output fields to event_msg_.
1230 1243
1231 RETURN_ON_ERR(WriteMessageToDebugFile()); 1244 RETURN_ON_ERR(WriteMessageToDebugFile());
1232 return kNoError; 1245 return kNoError;
1233 } 1246 }
1234 1247
1235 int AudioProcessingImpl::WriteConfigMessage(bool forced) { 1248 int AudioProcessingImpl::WriteConfigMessage(bool forced) {
1236 audioproc::Config config; 1249 audioproc::Config config;
1237 1250
1238 config.set_aec_enabled(echo_cancellation_->is_enabled()); 1251 config.set_aec_enabled(echo_cancellation_->is_enabled());
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1272 1285
1273 event_msg_->set_type(audioproc::Event::CONFIG); 1286 event_msg_->set_type(audioproc::Event::CONFIG);
1274 event_msg_->mutable_config()->CopyFrom(config); 1287 event_msg_->mutable_config()->CopyFrom(config);
1275 1288
1276 RETURN_ON_ERR(WriteMessageToDebugFile()); 1289 RETURN_ON_ERR(WriteMessageToDebugFile());
1277 return kNoError; 1290 return kNoError;
1278 } 1291 }
1279 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1292 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1280 1293
1281 } // namespace webrtc 1294 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698