| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 392 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 403 return err; | 403 return err; |
| 404 } | 404 } |
| 405 } | 405 } |
| 406 #endif | 406 #endif |
| 407 | 407 |
| 408 return kNoError; | 408 return kNoError; |
| 409 } | 409 } |
| 410 | 410 |
| 411 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 411 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 412 for (const auto& stream : config.streams) { | 412 for (const auto& stream : config.streams) { |
| 413 if (stream.num_channels() < 0) { | |
| 414 return kBadNumberChannelsError; | |
| 415 } | |
| 416 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 413 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 417 return kBadSampleRateError; | 414 return kBadSampleRateError; |
| 418 } | 415 } |
| 419 } | 416 } |
| 420 | 417 |
| 421 const int num_in_channels = config.input_stream().num_channels(); | 418 const size_t num_in_channels = config.input_stream().num_channels(); |
| 422 const int num_out_channels = config.output_stream().num_channels(); | 419 const size_t num_out_channels = config.output_stream().num_channels(); |
| 423 | 420 |
| 424 // Need at least one input channel. | 421 // Need at least one input channel. |
| 425 // Need either one output channel or as many outputs as there are inputs. | 422 // Need either one output channel or as many outputs as there are inputs. |
| 426 if (num_in_channels == 0 || | 423 if (num_in_channels == 0 || |
| 427 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { | 424 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { |
| 428 return kBadNumberChannelsError; | 425 return kBadNumberChannelsError; |
| 429 } | 426 } |
| 430 | 427 |
| 431 if (capture_nonlocked_.beamformer_enabled && | 428 if (capture_nonlocked_.beamformer_enabled && |
| 432 static_cast<size_t>(num_in_channels) != capture_.array_geometry.size()) { | 429 num_in_channels != capture_.array_geometry.size()) { |
| 433 return kBadNumberChannelsError; | 430 return kBadNumberChannelsError; |
| 434 } | 431 } |
| 435 | 432 |
| 436 formats_.api_format = config; | 433 formats_.api_format = config; |
| 437 | 434 |
| 438 // We process at the closest native rate >= min(input rate, output rate)... | 435 // We process at the closest native rate >= min(input rate, output rate)... |
| 439 const int min_proc_rate = | 436 const int min_proc_rate = |
| 440 std::min(formats_.api_format.input_stream().sample_rate_hz(), | 437 std::min(formats_.api_format.input_stream().sample_rate_hz(), |
| 441 formats_.api_format.output_stream().sample_rate_hz()); | 438 formats_.api_format.output_stream().sample_rate_hz()); |
| 442 int fwd_proc_rate; | 439 int fwd_proc_rate; |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 520 int AudioProcessingImpl::proc_sample_rate_hz() const { | 517 int AudioProcessingImpl::proc_sample_rate_hz() const { |
| 521 // Used as callback from submodules, hence locking is not allowed. | 518 // Used as callback from submodules, hence locking is not allowed. |
| 522 return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); | 519 return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
| 523 } | 520 } |
| 524 | 521 |
| 525 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 522 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
| 526 // Used as callback from submodules, hence locking is not allowed. | 523 // Used as callback from submodules, hence locking is not allowed. |
| 527 return capture_nonlocked_.split_rate; | 524 return capture_nonlocked_.split_rate; |
| 528 } | 525 } |
| 529 | 526 |
| 530 int AudioProcessingImpl::num_reverse_channels() const { | 527 size_t AudioProcessingImpl::num_reverse_channels() const { |
| 531 // Used as callback from submodules, hence locking is not allowed. | 528 // Used as callback from submodules, hence locking is not allowed. |
| 532 return formats_.rev_proc_format.num_channels(); | 529 return formats_.rev_proc_format.num_channels(); |
| 533 } | 530 } |
| 534 | 531 |
| 535 int AudioProcessingImpl::num_input_channels() const { | 532 size_t AudioProcessingImpl::num_input_channels() const { |
| 536 // Used as callback from submodules, hence locking is not allowed. | 533 // Used as callback from submodules, hence locking is not allowed. |
| 537 return formats_.api_format.input_stream().num_channels(); | 534 return formats_.api_format.input_stream().num_channels(); |
| 538 } | 535 } |
| 539 | 536 |
| 540 int AudioProcessingImpl::num_proc_channels() const { | 537 size_t AudioProcessingImpl::num_proc_channels() const { |
| 541 // Used as callback from submodules, hence locking is not allowed. | 538 // Used as callback from submodules, hence locking is not allowed. |
| 542 return capture_nonlocked_.beamformer_enabled ? 1 : num_output_channels(); | 539 return capture_nonlocked_.beamformer_enabled ? 1 : num_output_channels(); |
| 543 } | 540 } |
| 544 | 541 |
| 545 int AudioProcessingImpl::num_output_channels() const { | 542 size_t AudioProcessingImpl::num_output_channels() const { |
| 546 // Used as callback from submodules, hence locking is not allowed. | 543 // Used as callback from submodules, hence locking is not allowed. |
| 547 return formats_.api_format.output_stream().num_channels(); | 544 return formats_.api_format.output_stream().num_channels(); |
| 548 } | 545 } |
| 549 | 546 |
| 550 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 547 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| 551 rtc::CritScope cs(&crit_capture_); | 548 rtc::CritScope cs(&crit_capture_); |
| 552 capture_.output_will_be_muted = muted; | 549 capture_.output_will_be_muted = muted; |
| 553 if (private_submodules_->agc_manager.get()) { | 550 if (private_submodules_->agc_manager.get()) { |
| 554 private_submodules_->agc_manager->SetCaptureMuted( | 551 private_submodules_->agc_manager->SetCaptureMuted( |
| 555 capture_.output_will_be_muted); | 552 capture_.output_will_be_muted); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 624 formats_.api_format.input_stream().num_frames()); | 621 formats_.api_format.input_stream().num_frames()); |
| 625 | 622 |
| 626 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 623 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 627 if (debug_dump_.debug_file->Open()) { | 624 if (debug_dump_.debug_file->Open()) { |
| 628 RETURN_ON_ERR(WriteConfigMessage(false)); | 625 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 629 | 626 |
| 630 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 627 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 631 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 628 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 632 const size_t channel_size = | 629 const size_t channel_size = |
| 633 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 630 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 634 for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i) | 631 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 632 ++i) |
| 635 msg->add_input_channel(src[i], channel_size); | 633 msg->add_input_channel(src[i], channel_size); |
| 636 } | 634 } |
| 637 #endif | 635 #endif |
| 638 | 636 |
| 639 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 637 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 640 RETURN_ON_ERR(ProcessStreamLocked()); | 638 RETURN_ON_ERR(ProcessStreamLocked()); |
| 641 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 639 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 642 | 640 |
| 643 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 641 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 644 if (debug_dump_.debug_file->Open()) { | 642 if (debug_dump_.debug_file->Open()) { |
| 645 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 643 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 646 const size_t channel_size = | 644 const size_t channel_size = |
| 647 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 645 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 648 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i) | 646 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 647 ++i) |
| 649 msg->add_output_channel(dest[i], channel_size); | 648 msg->add_output_channel(dest[i], channel_size); |
| 650 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 649 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 651 &crit_debug_, &debug_dump_.capture)); | 650 &crit_debug_, &debug_dump_.capture)); |
| 652 } | 651 } |
| 653 #endif | 652 #endif |
| 654 | 653 |
| 655 return kNoError; | 654 return kNoError; |
| 656 } | 655 } |
| 657 | 656 |
| 658 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 657 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 872 } | 871 } |
| 873 | 872 |
| 874 int AudioProcessingImpl::AnalyzeReverseStreamLocked( | 873 int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
| 875 const float* const* src, | 874 const float* const* src, |
| 876 const StreamConfig& reverse_input_config, | 875 const StreamConfig& reverse_input_config, |
| 877 const StreamConfig& reverse_output_config) { | 876 const StreamConfig& reverse_output_config) { |
| 878 if (src == nullptr) { | 877 if (src == nullptr) { |
| 879 return kNullPointerError; | 878 return kNullPointerError; |
| 880 } | 879 } |
| 881 | 880 |
| 882 if (reverse_input_config.num_channels() <= 0) { | 881 if (reverse_input_config.num_channels() == 0) { |
| 883 return kBadNumberChannelsError; | 882 return kBadNumberChannelsError; |
| 884 } | 883 } |
| 885 | 884 |
| 886 ProcessingConfig processing_config = formats_.api_format; | 885 ProcessingConfig processing_config = formats_.api_format; |
| 887 processing_config.reverse_input_stream() = reverse_input_config; | 886 processing_config.reverse_input_stream() = reverse_input_config; |
| 888 processing_config.reverse_output_stream() = reverse_output_config; | 887 processing_config.reverse_output_stream() = reverse_output_config; |
| 889 | 888 |
| 890 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 889 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
| 891 assert(reverse_input_config.num_frames() == | 890 assert(reverse_input_config.num_frames() == |
| 892 formats_.api_format.reverse_input_stream().num_frames()); | 891 formats_.api_format.reverse_input_stream().num_frames()); |
| 893 | 892 |
| 894 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 893 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 895 if (debug_dump_.debug_file->Open()) { | 894 if (debug_dump_.debug_file->Open()) { |
| 896 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | 895 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 897 audioproc::ReverseStream* msg = | 896 audioproc::ReverseStream* msg = |
| 898 debug_dump_.render.event_msg->mutable_reverse_stream(); | 897 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 899 const size_t channel_size = | 898 const size_t channel_size = |
| 900 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 899 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 901 for (int i = 0; | 900 for (size_t i = 0; |
| 902 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 901 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 903 msg->add_channel(src[i], channel_size); | 902 msg->add_channel(src[i], channel_size); |
| 904 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 903 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 905 &crit_debug_, &debug_dump_.render)); | 904 &crit_debug_, &debug_dump_.render)); |
| 906 } | 905 } |
| 907 #endif | 906 #endif |
| 908 | 907 |
| 909 render_.render_audio->CopyFrom(src, | 908 render_.render_audio->CopyFrom(src, |
| 910 formats_.api_format.reverse_input_stream()); | 909 formats_.api_format.reverse_input_stream()); |
| 911 return ProcessReverseStreamLocked(); | 910 return ProcessReverseStreamLocked(); |
| (...skipping 536 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1448 debug_state->event_msg->Clear(); | 1447 debug_state->event_msg->Clear(); |
| 1449 | 1448 |
| 1450 return kNoError; | 1449 return kNoError; |
| 1451 } | 1450 } |
| 1452 | 1451 |
| 1453 int AudioProcessingImpl::WriteInitMessage() { | 1452 int AudioProcessingImpl::WriteInitMessage() { |
| 1454 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); | 1453 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); |
| 1455 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); | 1454 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); |
| 1456 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); | 1455 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); |
| 1457 | 1456 |
| 1458 msg->set_num_input_channels( | 1457 msg->set_num_input_channels(static_cast<google::protobuf::int32>( |
| 1459 formats_.api_format.input_stream().num_channels()); | 1458 formats_.api_format.input_stream().num_channels())); |
| 1460 msg->set_num_output_channels( | 1459 msg->set_num_output_channels(static_cast<google::protobuf::int32>( |
| 1461 formats_.api_format.output_stream().num_channels()); | 1460 formats_.api_format.output_stream().num_channels())); |
| 1462 msg->set_num_reverse_channels( | 1461 msg->set_num_reverse_channels(static_cast<google::protobuf::int32>( |
| 1463 formats_.api_format.reverse_input_stream().num_channels()); | 1462 formats_.api_format.reverse_input_stream().num_channels())); |
| 1464 msg->set_reverse_sample_rate( | 1463 msg->set_reverse_sample_rate( |
| 1465 formats_.api_format.reverse_input_stream().sample_rate_hz()); | 1464 formats_.api_format.reverse_input_stream().sample_rate_hz()); |
| 1466 msg->set_output_sample_rate( | 1465 msg->set_output_sample_rate( |
| 1467 formats_.api_format.output_stream().sample_rate_hz()); | 1466 formats_.api_format.output_stream().sample_rate_hz()); |
| 1468 // TODO(ekmeyerson): Add reverse output fields to | 1467 // TODO(ekmeyerson): Add reverse output fields to |
| 1469 // debug_dump_.capture.event_msg. | 1468 // debug_dump_.capture.event_msg. |
| 1470 | 1469 |
| 1471 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1470 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1472 &crit_debug_, &debug_dump_.capture)); | 1471 &crit_debug_, &debug_dump_.capture)); |
| 1473 return kNoError; | 1472 return kNoError; |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1520 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | 1519 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1521 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 1520 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1522 | 1521 |
| 1523 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1522 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1524 &crit_debug_, &debug_dump_.capture)); | 1523 &crit_debug_, &debug_dump_.capture)); |
| 1525 return kNoError; | 1524 return kNoError; |
| 1526 } | 1525 } |
| 1527 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1526 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1528 | 1527 |
| 1529 } // namespace webrtc | 1528 } // namespace webrtc |
| OLD | NEW |