OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 392 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
403 return err; | 403 return err; |
404 } | 404 } |
405 } | 405 } |
406 #endif | 406 #endif |
407 | 407 |
408 return kNoError; | 408 return kNoError; |
409 } | 409 } |
410 | 410 |
411 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 411 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
412 for (const auto& stream : config.streams) { | 412 for (const auto& stream : config.streams) { |
413 if (stream.num_channels() < 0) { | |
414 return kBadNumberChannelsError; | |
415 } | |
416 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 413 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
417 return kBadSampleRateError; | 414 return kBadSampleRateError; |
418 } | 415 } |
419 } | 416 } |
420 | 417 |
421 const int num_in_channels = config.input_stream().num_channels(); | 418 const size_t num_in_channels = config.input_stream().num_channels(); |
422 const int num_out_channels = config.output_stream().num_channels(); | 419 const size_t num_out_channels = config.output_stream().num_channels(); |
423 | 420 |
424 // Need at least one input channel. | 421 // Need at least one input channel. |
425 // Need either one output channel or as many outputs as there are inputs. | 422 // Need either one output channel or as many outputs as there are inputs. |
426 if (num_in_channels == 0 || | 423 if (num_in_channels == 0 || |
427 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { | 424 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { |
428 return kBadNumberChannelsError; | 425 return kBadNumberChannelsError; |
429 } | 426 } |
430 | 427 |
431 if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) != | 428 if (constants_.beamformer_enabled && |
432 constants_.array_geometry.size() || | 429 (num_in_channels != constants_.array_geometry.size() || |
433 num_out_channels > 1)) { | 430 num_out_channels > 1)) { |
434 return kBadNumberChannelsError; | 431 return kBadNumberChannelsError; |
435 } | 432 } |
436 | 433 |
437 formats_.api_format = config; | 434 formats_.api_format = config; |
438 | 435 |
439 // We process at the closest native rate >= min(input rate, output rate)... | 436 // We process at the closest native rate >= min(input rate, output rate)... |
440 const int min_proc_rate = | 437 const int min_proc_rate = |
441 std::min(formats_.api_format.input_stream().sample_rate_hz(), | 438 std::min(formats_.api_format.input_stream().sample_rate_hz(), |
442 formats_.api_format.output_stream().sample_rate_hz()); | 439 formats_.api_format.output_stream().sample_rate_hz()); |
443 int fwd_proc_rate; | 440 int fwd_proc_rate; |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
509 int AudioProcessingImpl::proc_sample_rate_hz() const { | 506 int AudioProcessingImpl::proc_sample_rate_hz() const { |
510 // Used as callback from submodules, hence locking is not allowed. | 507 // Used as callback from submodules, hence locking is not allowed. |
511 return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); | 508 return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
512 } | 509 } |
513 | 510 |
514 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 511 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
515 // Used as callback from submodules, hence locking is not allowed. | 512 // Used as callback from submodules, hence locking is not allowed. |
516 return capture_nonlocked_.split_rate; | 513 return capture_nonlocked_.split_rate; |
517 } | 514 } |
518 | 515 |
519 int AudioProcessingImpl::num_reverse_channels() const { | 516 size_t AudioProcessingImpl::num_reverse_channels() const { |
520 // Used as callback from submodules, hence locking is not allowed. | 517 // Used as callback from submodules, hence locking is not allowed. |
521 return formats_.rev_proc_format.num_channels(); | 518 return formats_.rev_proc_format.num_channels(); |
522 } | 519 } |
523 | 520 |
524 int AudioProcessingImpl::num_input_channels() const { | 521 size_t AudioProcessingImpl::num_input_channels() const { |
525 // Used as callback from submodules, hence locking is not allowed. | 522 // Used as callback from submodules, hence locking is not allowed. |
526 return formats_.api_format.input_stream().num_channels(); | 523 return formats_.api_format.input_stream().num_channels(); |
527 } | 524 } |
528 | 525 |
529 int AudioProcessingImpl::num_output_channels() const { | 526 size_t AudioProcessingImpl::num_output_channels() const { |
530 // Used as callback from submodules, hence locking is not allowed. | 527 // Used as callback from submodules, hence locking is not allowed. |
531 return formats_.api_format.output_stream().num_channels(); | 528 return formats_.api_format.output_stream().num_channels(); |
532 } | 529 } |
533 | 530 |
534 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 531 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
535 rtc::CritScope cs(&crit_capture_); | 532 rtc::CritScope cs(&crit_capture_); |
536 capture_.output_will_be_muted = muted; | 533 capture_.output_will_be_muted = muted; |
537 if (private_submodules_->agc_manager.get()) { | 534 if (private_submodules_->agc_manager.get()) { |
538 private_submodules_->agc_manager->SetCaptureMuted( | 535 private_submodules_->agc_manager->SetCaptureMuted( |
539 capture_.output_will_be_muted); | 536 capture_.output_will_be_muted); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
608 formats_.api_format.input_stream().num_frames()); | 605 formats_.api_format.input_stream().num_frames()); |
609 | 606 |
610 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 607 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
611 if (debug_dump_.debug_file->Open()) { | 608 if (debug_dump_.debug_file->Open()) { |
612 RETURN_ON_ERR(WriteConfigMessage(false)); | 609 RETURN_ON_ERR(WriteConfigMessage(false)); |
613 | 610 |
614 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 611 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
615 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 612 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
616 const size_t channel_size = | 613 const size_t channel_size = |
617 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 614 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
618 for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i) | 615 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 616 ++i) |
619 msg->add_input_channel(src[i], channel_size); | 617 msg->add_input_channel(src[i], channel_size); |
620 } | 618 } |
621 #endif | 619 #endif |
622 | 620 |
623 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 621 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
624 RETURN_ON_ERR(ProcessStreamLocked()); | 622 RETURN_ON_ERR(ProcessStreamLocked()); |
625 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 623 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
626 | 624 |
627 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 625 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
628 if (debug_dump_.debug_file->Open()) { | 626 if (debug_dump_.debug_file->Open()) { |
629 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 627 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
630 const size_t channel_size = | 628 const size_t channel_size = |
631 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 629 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
632 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i) | 630 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 631 ++i) |
633 msg->add_output_channel(dest[i], channel_size); | 632 msg->add_output_channel(dest[i], channel_size); |
634 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 633 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
635 &crit_debug_, &debug_dump_.capture)); | 634 &crit_debug_, &debug_dump_.capture)); |
636 } | 635 } |
637 #endif | 636 #endif |
638 | 637 |
639 return kNoError; | 638 return kNoError; |
640 } | 639 } |
641 | 640 |
642 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 641 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
856 } | 855 } |
857 | 856 |
858 int AudioProcessingImpl::AnalyzeReverseStreamLocked( | 857 int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
859 const float* const* src, | 858 const float* const* src, |
860 const StreamConfig& reverse_input_config, | 859 const StreamConfig& reverse_input_config, |
861 const StreamConfig& reverse_output_config) { | 860 const StreamConfig& reverse_output_config) { |
862 if (src == nullptr) { | 861 if (src == nullptr) { |
863 return kNullPointerError; | 862 return kNullPointerError; |
864 } | 863 } |
865 | 864 |
866 if (reverse_input_config.num_channels() <= 0) { | 865 if (reverse_input_config.num_channels() == 0) { |
867 return kBadNumberChannelsError; | 866 return kBadNumberChannelsError; |
868 } | 867 } |
869 | 868 |
870 ProcessingConfig processing_config = formats_.api_format; | 869 ProcessingConfig processing_config = formats_.api_format; |
871 processing_config.reverse_input_stream() = reverse_input_config; | 870 processing_config.reverse_input_stream() = reverse_input_config; |
872 processing_config.reverse_output_stream() = reverse_output_config; | 871 processing_config.reverse_output_stream() = reverse_output_config; |
873 | 872 |
874 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 873 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
875 assert(reverse_input_config.num_frames() == | 874 assert(reverse_input_config.num_frames() == |
876 formats_.api_format.reverse_input_stream().num_frames()); | 875 formats_.api_format.reverse_input_stream().num_frames()); |
877 | 876 |
878 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 877 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
879 if (debug_dump_.debug_file->Open()) { | 878 if (debug_dump_.debug_file->Open()) { |
880 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | 879 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
881 audioproc::ReverseStream* msg = | 880 audioproc::ReverseStream* msg = |
882 debug_dump_.render.event_msg->mutable_reverse_stream(); | 881 debug_dump_.render.event_msg->mutable_reverse_stream(); |
883 const size_t channel_size = | 882 const size_t channel_size = |
884 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 883 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
885 for (int i = 0; | 884 for (size_t i = 0; |
886 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 885 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
887 msg->add_channel(src[i], channel_size); | 886 msg->add_channel(src[i], channel_size); |
888 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 887 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
889 &crit_debug_, &debug_dump_.render)); | 888 &crit_debug_, &debug_dump_.render)); |
890 } | 889 } |
891 #endif | 890 #endif |
892 | 891 |
893 render_.render_audio->CopyFrom(src, | 892 render_.render_audio->CopyFrom(src, |
894 formats_.api_format.reverse_input_stream()); | 893 formats_.api_format.reverse_input_stream()); |
895 return ProcessReverseStreamLocked(); | 894 return ProcessReverseStreamLocked(); |
(...skipping 536 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1432 debug_state->event_msg->Clear(); | 1431 debug_state->event_msg->Clear(); |
1433 | 1432 |
1434 return kNoError; | 1433 return kNoError; |
1435 } | 1434 } |
1436 | 1435 |
1437 int AudioProcessingImpl::WriteInitMessage() { | 1436 int AudioProcessingImpl::WriteInitMessage() { |
1438 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); | 1437 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); |
1439 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); | 1438 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); |
1440 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); | 1439 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); |
1441 | 1440 |
1442 msg->set_num_input_channels( | 1441 msg->set_num_input_channels(static_cast<google::protobuf::int32>( |
1443 formats_.api_format.input_stream().num_channels()); | 1442 formats_.api_format.input_stream().num_channels())); |
1444 msg->set_num_output_channels( | 1443 msg->set_num_output_channels(static_cast<google::protobuf::int32>( |
1445 formats_.api_format.output_stream().num_channels()); | 1444 formats_.api_format.output_stream().num_channels())); |
1446 msg->set_num_reverse_channels( | 1445 msg->set_num_reverse_channels(static_cast<google::protobuf::int32>( |
1447 formats_.api_format.reverse_input_stream().num_channels()); | 1446 formats_.api_format.reverse_input_stream().num_channels())); |
1448 msg->set_reverse_sample_rate( | 1447 msg->set_reverse_sample_rate( |
1449 formats_.api_format.reverse_input_stream().sample_rate_hz()); | 1448 formats_.api_format.reverse_input_stream().sample_rate_hz()); |
1450 msg->set_output_sample_rate( | 1449 msg->set_output_sample_rate( |
1451 formats_.api_format.output_stream().sample_rate_hz()); | 1450 formats_.api_format.output_stream().sample_rate_hz()); |
1452 // TODO(ekmeyerson): Add reverse output fields to | 1451 // TODO(ekmeyerson): Add reverse output fields to |
1453 // debug_dump_.capture.event_msg. | 1452 // debug_dump_.capture.event_msg. |
1454 | 1453 |
1455 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1454 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
1456 &crit_debug_, &debug_dump_.capture)); | 1455 &crit_debug_, &debug_dump_.capture)); |
1457 return kNoError; | 1456 return kNoError; |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1504 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | 1503 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
1505 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 1504 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
1506 | 1505 |
1507 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1506 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
1508 &crit_debug_, &debug_dump_.capture)); | 1507 &crit_debug_, &debug_dump_.capture)); |
1509 return kNoError; | 1508 return kNoError; |
1510 } | 1509 } |
1511 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1510 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1512 | 1511 |
1513 } // namespace webrtc | 1512 } // namespace webrtc |
OLD | NEW |