| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 336 return err; | 336 return err; |
| 337 } | 337 } |
| 338 } | 338 } |
| 339 #endif | 339 #endif |
| 340 | 340 |
| 341 return kNoError; | 341 return kNoError; |
| 342 } | 342 } |
| 343 | 343 |
| 344 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 344 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 345 for (const auto& stream : config.streams) { | 345 for (const auto& stream : config.streams) { |
| 346 if (stream.num_channels() < 0) { | |
| 347 return kBadNumberChannelsError; | |
| 348 } | |
| 349 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 346 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 350 return kBadSampleRateError; | 347 return kBadSampleRateError; |
| 351 } | 348 } |
| 352 } | 349 } |
| 353 | 350 |
| 354 const int num_in_channels = config.input_stream().num_channels(); | 351 const size_t num_in_channels = config.input_stream().num_channels(); |
| 355 const int num_out_channels = config.output_stream().num_channels(); | 352 const size_t num_out_channels = config.output_stream().num_channels(); |
| 356 | 353 |
| 357 // Need at least one input channel. | 354 // Need at least one input channel. |
| 358 // Need either one output channel or as many outputs as there are inputs. | 355 // Need either one output channel or as many outputs as there are inputs. |
| 359 if (num_in_channels == 0 || | 356 if (num_in_channels == 0 || |
| 360 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { | 357 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { |
| 361 return kBadNumberChannelsError; | 358 return kBadNumberChannelsError; |
| 362 } | 359 } |
| 363 | 360 |
| 364 if (beamformer_enabled_ && | 361 if (beamformer_enabled_ && |
| 365 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || | 362 (num_in_channels != array_geometry_.size() || num_out_channels > 1)) { |
| 366 num_out_channels > 1)) { | |
| 367 return kBadNumberChannelsError; | 363 return kBadNumberChannelsError; |
| 368 } | 364 } |
| 369 | 365 |
| 370 api_format_ = config; | 366 api_format_ = config; |
| 371 | 367 |
| 372 // We process at the closest native rate >= min(input rate, output rate)... | 368 // We process at the closest native rate >= min(input rate, output rate)... |
| 373 const int min_proc_rate = | 369 const int min_proc_rate = |
| 374 std::min(api_format_.input_stream().sample_rate_hz(), | 370 std::min(api_format_.input_stream().sample_rate_hz(), |
| 375 api_format_.output_stream().sample_rate_hz()); | 371 api_format_.output_stream().sample_rate_hz()); |
| 376 int fwd_proc_rate; | 372 int fwd_proc_rate; |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 450 } | 446 } |
| 451 | 447 |
| 452 int AudioProcessingImpl::proc_sample_rate_hz() const { | 448 int AudioProcessingImpl::proc_sample_rate_hz() const { |
| 453 return fwd_proc_format_.sample_rate_hz(); | 449 return fwd_proc_format_.sample_rate_hz(); |
| 454 } | 450 } |
| 455 | 451 |
| 456 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 452 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
| 457 return split_rate_; | 453 return split_rate_; |
| 458 } | 454 } |
| 459 | 455 |
| 460 int AudioProcessingImpl::num_reverse_channels() const { | 456 size_t AudioProcessingImpl::num_reverse_channels() const { |
| 461 return rev_proc_format_.num_channels(); | 457 return rev_proc_format_.num_channels(); |
| 462 } | 458 } |
| 463 | 459 |
| 464 int AudioProcessingImpl::num_input_channels() const { | 460 size_t AudioProcessingImpl::num_input_channels() const { |
| 465 return api_format_.input_stream().num_channels(); | 461 return api_format_.input_stream().num_channels(); |
| 466 } | 462 } |
| 467 | 463 |
| 468 int AudioProcessingImpl::num_output_channels() const { | 464 size_t AudioProcessingImpl::num_output_channels() const { |
| 469 return api_format_.output_stream().num_channels(); | 465 return api_format_.output_stream().num_channels(); |
| 470 } | 466 } |
| 471 | 467 |
| 472 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 468 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| 473 CriticalSectionScoped lock(crit_); | 469 CriticalSectionScoped lock(crit_); |
| 474 output_will_be_muted_ = muted; | 470 output_will_be_muted_ = muted; |
| 475 if (agc_manager_.get()) { | 471 if (agc_manager_.get()) { |
| 476 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 472 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
| 477 } | 473 } |
| 478 } | 474 } |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 521 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 517 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 522 assert(processing_config.input_stream().num_frames() == | 518 assert(processing_config.input_stream().num_frames() == |
| 523 api_format_.input_stream().num_frames()); | 519 api_format_.input_stream().num_frames()); |
| 524 | 520 |
| 525 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 521 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 526 if (debug_file_->Open()) { | 522 if (debug_file_->Open()) { |
| 527 event_msg_->set_type(audioproc::Event::STREAM); | 523 event_msg_->set_type(audioproc::Event::STREAM); |
| 528 audioproc::Stream* msg = event_msg_->mutable_stream(); | 524 audioproc::Stream* msg = event_msg_->mutable_stream(); |
| 529 const size_t channel_size = | 525 const size_t channel_size = |
| 530 sizeof(float) * api_format_.input_stream().num_frames(); | 526 sizeof(float) * api_format_.input_stream().num_frames(); |
| 531 for (int i = 0; i < api_format_.input_stream().num_channels(); ++i) | 527 for (size_t i = 0; i < api_format_.input_stream().num_channels(); ++i) |
| 532 msg->add_input_channel(src[i], channel_size); | 528 msg->add_input_channel(src[i], channel_size); |
| 533 } | 529 } |
| 534 #endif | 530 #endif |
| 535 | 531 |
| 536 capture_audio_->CopyFrom(src, api_format_.input_stream()); | 532 capture_audio_->CopyFrom(src, api_format_.input_stream()); |
| 537 RETURN_ON_ERR(ProcessStreamLocked()); | 533 RETURN_ON_ERR(ProcessStreamLocked()); |
| 538 capture_audio_->CopyTo(api_format_.output_stream(), dest); | 534 capture_audio_->CopyTo(api_format_.output_stream(), dest); |
| 539 | 535 |
| 540 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 536 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 541 if (debug_file_->Open()) { | 537 if (debug_file_->Open()) { |
| 542 audioproc::Stream* msg = event_msg_->mutable_stream(); | 538 audioproc::Stream* msg = event_msg_->mutable_stream(); |
| 543 const size_t channel_size = | 539 const size_t channel_size = |
| 544 sizeof(float) * api_format_.output_stream().num_frames(); | 540 sizeof(float) * api_format_.output_stream().num_frames(); |
| 545 for (int i = 0; i < api_format_.output_stream().num_channels(); ++i) | 541 for (size_t i = 0; i < api_format_.output_stream().num_channels(); ++i) |
| 546 msg->add_output_channel(dest[i], channel_size); | 542 msg->add_output_channel(dest[i], channel_size); |
| 547 RETURN_ON_ERR(WriteMessageToDebugFile()); | 543 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 548 } | 544 } |
| 549 #endif | 545 #endif |
| 550 | 546 |
| 551 return kNoError; | 547 return kNoError; |
| 552 } | 548 } |
| 553 | 549 |
| 554 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 550 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| 555 CriticalSectionScoped crit_scoped(crit_); | 551 CriticalSectionScoped crit_scoped(crit_); |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 695 } | 691 } |
| 696 | 692 |
| 697 int AudioProcessingImpl::AnalyzeReverseStream( | 693 int AudioProcessingImpl::AnalyzeReverseStream( |
| 698 const float* const* data, | 694 const float* const* data, |
| 699 const StreamConfig& reverse_config) { | 695 const StreamConfig& reverse_config) { |
| 700 CriticalSectionScoped crit_scoped(crit_); | 696 CriticalSectionScoped crit_scoped(crit_); |
| 701 if (data == NULL) { | 697 if (data == NULL) { |
| 702 return kNullPointerError; | 698 return kNullPointerError; |
| 703 } | 699 } |
| 704 | 700 |
| 705 if (reverse_config.num_channels() <= 0) { | 701 if (reverse_config.num_channels() == 0) { |
| 706 return kBadNumberChannelsError; | 702 return kBadNumberChannelsError; |
| 707 } | 703 } |
| 708 | 704 |
| 709 ProcessingConfig processing_config = api_format_; | 705 ProcessingConfig processing_config = api_format_; |
| 710 processing_config.reverse_stream() = reverse_config; | 706 processing_config.reverse_stream() = reverse_config; |
| 711 | 707 |
| 712 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 708 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| 713 assert(reverse_config.num_frames() == | 709 assert(reverse_config.num_frames() == |
| 714 api_format_.reverse_stream().num_frames()); | 710 api_format_.reverse_stream().num_frames()); |
| 715 | 711 |
| 716 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 712 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 717 if (debug_file_->Open()) { | 713 if (debug_file_->Open()) { |
| 718 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 714 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| 719 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 715 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| 720 const size_t channel_size = | 716 const size_t channel_size = |
| 721 sizeof(float) * api_format_.reverse_stream().num_frames(); | 717 sizeof(float) * api_format_.reverse_stream().num_frames(); |
| 722 for (int i = 0; i < api_format_.reverse_stream().num_channels(); ++i) | 718 for (size_t i = 0; i < api_format_.reverse_stream().num_channels(); ++i) |
| 723 msg->add_channel(data[i], channel_size); | 719 msg->add_channel(data[i], channel_size); |
| 724 RETURN_ON_ERR(WriteMessageToDebugFile()); | 720 RETURN_ON_ERR(WriteMessageToDebugFile()); |
| 725 } | 721 } |
| 726 #endif | 722 #endif |
| 727 | 723 |
| 728 render_audio_->CopyFrom(data, api_format_.reverse_stream()); | 724 render_audio_->CopyFrom(data, api_format_.reverse_stream()); |
| 729 return AnalyzeReverseStreamLocked(); | 725 return AnalyzeReverseStreamLocked(); |
| 730 } | 726 } |
| 731 | 727 |
| 732 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 728 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| (...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1124 | 1120 |
| 1125 event_msg_->Clear(); | 1121 event_msg_->Clear(); |
| 1126 | 1122 |
| 1127 return kNoError; | 1123 return kNoError; |
| 1128 } | 1124 } |
| 1129 | 1125 |
| 1130 int AudioProcessingImpl::WriteInitMessage() { | 1126 int AudioProcessingImpl::WriteInitMessage() { |
| 1131 event_msg_->set_type(audioproc::Event::INIT); | 1127 event_msg_->set_type(audioproc::Event::INIT); |
| 1132 audioproc::Init* msg = event_msg_->mutable_init(); | 1128 audioproc::Init* msg = event_msg_->mutable_init(); |
| 1133 msg->set_sample_rate(api_format_.input_stream().sample_rate_hz()); | 1129 msg->set_sample_rate(api_format_.input_stream().sample_rate_hz()); |
| 1134 msg->set_num_input_channels(api_format_.input_stream().num_channels()); | 1130 msg->set_num_input_channels(static_cast<google::protobuf::int32>( |
| 1135 msg->set_num_output_channels(api_format_.output_stream().num_channels()); | 1131 api_format_.input_stream().num_channels()); |
| 1136 msg->set_num_reverse_channels(api_format_.reverse_stream().num_channels()); | 1132 msg->set_num_output_channels(static_cast<google::protobuf::int32>( |
| 1133 api_format_.output_stream().num_channels()); |
| 1134 msg->set_num_reverse_channels(static_cast<google::protobuf::int32>( |
| 1135 api_format_.reverse_stream().num_channels()); |
| 1137 msg->set_reverse_sample_rate(api_format_.reverse_stream().sample_rate_hz()); | 1136 msg->set_reverse_sample_rate(api_format_.reverse_stream().sample_rate_hz()); |
| 1138 msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz()); | 1137 msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz()); |
| 1139 | 1138 |
| 1140 int err = WriteMessageToDebugFile(); | 1139 int err = WriteMessageToDebugFile(); |
| 1141 if (err != kNoError) { | 1140 if (err != kNoError) { |
| 1142 return err; | 1141 return err; |
| 1143 } | 1142 } |
| 1144 | 1143 |
| 1145 return kNoError; | 1144 return kNoError; |
| 1146 } | 1145 } |
| 1147 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1146 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1148 | 1147 |
| 1149 } // namespace webrtc | 1148 } // namespace webrtc |
| OLD | NEW |