| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" | 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
| 12 | 12 |
| 13 #include <assert.h> | |
| 14 #include <algorithm> | 13 #include <algorithm> |
| 15 | 14 |
| 16 #include "webrtc/base/checks.h" | 15 #include "webrtc/base/checks.h" |
| 17 #include "webrtc/base/platform_file.h" | 16 #include "webrtc/base/platform_file.h" |
| 18 #include "webrtc/base/trace_event.h" | 17 #include "webrtc/base/trace_event.h" |
| 19 #include "webrtc/common_audio/audio_converter.h" | 18 #include "webrtc/common_audio/audio_converter.h" |
| 20 #include "webrtc/common_audio/channel_buffer.h" | 19 #include "webrtc/common_audio/channel_buffer.h" |
| 21 #include "webrtc/common_audio/include/audio_util.h" | 20 #include "webrtc/common_audio/include/audio_util.h" |
| 22 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" |
| 23 #include "webrtc/modules/audio_processing/aec/aec_core.h" | 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 85 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { | 84 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
| 86 switch (layout) { | 85 switch (layout) { |
| 87 case AudioProcessing::kMono: | 86 case AudioProcessing::kMono: |
| 88 case AudioProcessing::kStereo: | 87 case AudioProcessing::kStereo: |
| 89 return false; | 88 return false; |
| 90 case AudioProcessing::kMonoAndKeyboard: | 89 case AudioProcessing::kMonoAndKeyboard: |
| 91 case AudioProcessing::kStereoAndKeyboard: | 90 case AudioProcessing::kStereoAndKeyboard: |
| 92 return true; | 91 return true; |
| 93 } | 92 } |
| 94 | 93 |
| 95 assert(false); | 94 RTC_NOTREACHED(); |
| 96 return false; | 95 return false; |
| 97 } | 96 } |
| 98 | 97 |
| 99 bool SampleRateSupportsMultiBand(int sample_rate_hz) { | 98 bool SampleRateSupportsMultiBand(int sample_rate_hz) { |
| 100 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || | 99 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || |
| 101 sample_rate_hz == AudioProcessing::kSampleRate48kHz; | 100 sample_rate_hz == AudioProcessing::kSampleRate48kHz; |
| 102 } | 101 } |
| 103 | 102 |
| 104 int FindNativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { | 103 int FindNativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { |
| 105 #ifdef WEBRTC_ARCH_ARM_FAMILY | 104 #ifdef WEBRTC_ARCH_ARM_FAMILY |
| (...skipping 569 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 675 processing_config.input_stream() = input_config; | 674 processing_config.input_stream() = input_config; |
| 676 processing_config.output_stream() = output_config; | 675 processing_config.output_stream() = output_config; |
| 677 | 676 |
| 678 { | 677 { |
| 679 // Do conditional reinitialization. | 678 // Do conditional reinitialization. |
| 680 rtc::CritScope cs_render(&crit_render_); | 679 rtc::CritScope cs_render(&crit_render_); |
| 681 RETURN_ON_ERR( | 680 RETURN_ON_ERR( |
| 682 MaybeInitializeCapture(processing_config, reinitialization_required)); | 681 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 683 } | 682 } |
| 684 rtc::CritScope cs_capture(&crit_capture_); | 683 rtc::CritScope cs_capture(&crit_capture_); |
| 685 assert(processing_config.input_stream().num_frames() == | 684 RTC_DCHECK_EQ(processing_config.input_stream().num_frames(), |
| 686 formats_.api_format.input_stream().num_frames()); | 685 formats_.api_format.input_stream().num_frames()); |
| 687 | 686 |
| 688 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 687 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 689 if (debug_dump_.debug_file->is_open()) { | 688 if (debug_dump_.debug_file->is_open()) { |
| 690 RETURN_ON_ERR(WriteConfigMessage(false)); | 689 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 691 | 690 |
| 692 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 691 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 693 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 692 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 694 const size_t channel_size = | 693 const size_t channel_size = |
| 695 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 694 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 696 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 695 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 992 | 991 |
| 993 if (reverse_input_config.num_channels() == 0) { | 992 if (reverse_input_config.num_channels() == 0) { |
| 994 return kBadNumberChannelsError; | 993 return kBadNumberChannelsError; |
| 995 } | 994 } |
| 996 | 995 |
| 997 ProcessingConfig processing_config = formats_.api_format; | 996 ProcessingConfig processing_config = formats_.api_format; |
| 998 processing_config.reverse_input_stream() = reverse_input_config; | 997 processing_config.reverse_input_stream() = reverse_input_config; |
| 999 processing_config.reverse_output_stream() = reverse_output_config; | 998 processing_config.reverse_output_stream() = reverse_output_config; |
| 1000 | 999 |
| 1001 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 1000 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
| 1002 assert(reverse_input_config.num_frames() == | 1001 RTC_DCHECK_EQ(reverse_input_config.num_frames(), |
| 1003 formats_.api_format.reverse_input_stream().num_frames()); | 1002 formats_.api_format.reverse_input_stream().num_frames()); |
| 1004 | 1003 |
| 1005 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1004 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1006 if (debug_dump_.debug_file->is_open()) { | 1005 if (debug_dump_.debug_file->is_open()) { |
| 1007 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | 1006 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 1008 audioproc::ReverseStream* msg = | 1007 audioproc::ReverseStream* msg = |
| 1009 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1008 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 1010 const size_t channel_size = | 1009 const size_t channel_size = |
| 1011 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1010 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1012 for (size_t i = 0; | 1011 for (size_t i = 0; |
| 1013 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1012 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| (...skipping 584 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1598 fwd_proc_format(kSampleRate16kHz), | 1597 fwd_proc_format(kSampleRate16kHz), |
| 1599 split_rate(kSampleRate16kHz) {} | 1598 split_rate(kSampleRate16kHz) {} |
| 1600 | 1599 |
| 1601 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 1600 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 1602 | 1601 |
| 1603 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 1602 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 1604 | 1603 |
| 1605 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 1604 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 1606 | 1605 |
| 1607 } // namespace webrtc | 1606 } // namespace webrtc |
| OLD | NEW |