| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 819 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 830 #endif | 830 #endif |
| 831 | 831 |
| 832 render_.render_audio->CopyFrom(src, | 832 render_.render_audio->CopyFrom(src, |
| 833 formats_.api_format.reverse_input_stream()); | 833 formats_.api_format.reverse_input_stream()); |
| 834 return ProcessReverseStreamLocked(); | 834 return ProcessReverseStreamLocked(); |
| 835 } | 835 } |
| 836 | 836 |
| 837 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 837 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 838 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 838 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
| 839 rtc::CritScope cs(&crit_render_); | 839 rtc::CritScope cs(&crit_render_); |
| 840 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | |
| 841 if (is_rev_processed()) { | |
| 842 render_.render_audio->InterleaveTo(frame, true); | |
| 843 } | |
| 844 | |
| 845 return kNoError; | |
| 846 } | |
| 847 | |
| 848 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | |
| 849 TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_AudioFrame"); | |
| 850 rtc::CritScope cs(&crit_render_); | |
| 851 if (frame == nullptr) { | 840 if (frame == nullptr) { |
| 852 return kNullPointerError; | 841 return kNullPointerError; |
| 853 } | 842 } |
| 854 // Must be a native rate. | 843 // Must be a native rate. |
| 855 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 844 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 856 frame->sample_rate_hz_ != kSampleRate16kHz && | 845 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 857 frame->sample_rate_hz_ != kSampleRate32kHz && | 846 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 858 frame->sample_rate_hz_ != kSampleRate48kHz) { | 847 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 859 return kBadSampleRateError; | 848 return kBadSampleRateError; |
| 860 } | 849 } |
| (...skipping 25 matching lines...) Expand all Loading... |
| 886 debug_dump_.render.event_msg->mutable_reverse_stream(); | 875 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 887 const size_t data_size = | 876 const size_t data_size = |
| 888 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 877 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 889 msg->set_data(frame->data_, data_size); | 878 msg->set_data(frame->data_, data_size); |
| 890 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 879 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 891 &debug_dump_.num_bytes_left_for_log_, | 880 &debug_dump_.num_bytes_left_for_log_, |
| 892 &crit_debug_, &debug_dump_.render)); | 881 &crit_debug_, &debug_dump_.render)); |
| 893 } | 882 } |
| 894 #endif | 883 #endif |
| 895 render_.render_audio->DeinterleaveFrom(frame); | 884 render_.render_audio->DeinterleaveFrom(frame); |
| 896 return ProcessReverseStreamLocked(); | 885 RETURN_ON_ERR(ProcessReverseStreamLocked()); |
| 886 if (is_rev_processed()) { |
| 887 render_.render_audio->InterleaveTo(frame, true); |
| 888 } |
| 889 return kNoError; |
| 897 } | 890 } |
| 898 | 891 |
| 899 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 892 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| 900 AudioBuffer* ra = render_.render_audio.get(); // For brevity. | 893 AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
| 901 if (rev_analysis_needed()) { | 894 if (rev_analysis_needed()) { |
| 902 ra->SplitIntoFrequencyBands(); | 895 ra->SplitIntoFrequencyBands(); |
| 903 } | 896 } |
| 904 | 897 |
| 905 if (constants_.intelligibility_enabled) { | 898 if (constants_.intelligibility_enabled) { |
| 906 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( | 899 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( |
| (...skipping 540 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1447 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 1440 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1448 | 1441 |
| 1449 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1442 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1450 &debug_dump_.num_bytes_left_for_log_, | 1443 &debug_dump_.num_bytes_left_for_log_, |
| 1451 &crit_debug_, &debug_dump_.capture)); | 1444 &crit_debug_, &debug_dump_.capture)); |
| 1452 return kNoError; | 1445 return kNoError; |
| 1453 } | 1446 } |
| 1454 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1447 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1455 | 1448 |
| 1456 } // namespace webrtc | 1449 } // namespace webrtc |
| OLD | NEW |