| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" | 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
| 12 | 12 |
| 13 #include <assert.h> | |
| 14 #include <algorithm> | 13 #include <algorithm> |
| 15 | 14 |
| 16 #include "webrtc/base/checks.h" | 15 #include "webrtc/base/checks.h" |
| 17 #include "webrtc/base/platform_file.h" | 16 #include "webrtc/base/platform_file.h" |
| 18 #include "webrtc/base/trace_event.h" | 17 #include "webrtc/base/trace_event.h" |
| 19 #include "webrtc/common_audio/audio_converter.h" | 18 #include "webrtc/common_audio/audio_converter.h" |
| 20 #include "webrtc/common_audio/channel_buffer.h" | 19 #include "webrtc/common_audio/channel_buffer.h" |
| 21 #include "webrtc/common_audio/include/audio_util.h" | 20 #include "webrtc/common_audio/include/audio_util.h" |
| 22 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" |
| 23 #include "webrtc/modules/audio_processing/aec/aec_core.h" | 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 85 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { | 84 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
| 86 switch (layout) { | 85 switch (layout) { |
| 87 case AudioProcessing::kMono: | 86 case AudioProcessing::kMono: |
| 88 case AudioProcessing::kStereo: | 87 case AudioProcessing::kStereo: |
| 89 return false; | 88 return false; |
| 90 case AudioProcessing::kMonoAndKeyboard: | 89 case AudioProcessing::kMonoAndKeyboard: |
| 91 case AudioProcessing::kStereoAndKeyboard: | 90 case AudioProcessing::kStereoAndKeyboard: |
| 92 return true; | 91 return true; |
| 93 } | 92 } |
| 94 | 93 |
| 95 assert(false); | 94 RTC_NOTREACHED(); |
| 96 return false; | 95 return false; |
| 97 } | 96 } |
| 98 | 97 |
| 99 bool SampleRateSupportsMultiBand(int sample_rate_hz) { | 98 bool SampleRateSupportsMultiBand(int sample_rate_hz) { |
| 100 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || | 99 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || |
| 101 sample_rate_hz == AudioProcessing::kSampleRate48kHz; | 100 sample_rate_hz == AudioProcessing::kSampleRate48kHz; |
| 102 } | 101 } |
| 103 | 102 |
| 104 int FindNativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { | 103 int FindNativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { |
| 105 #ifdef WEBRTC_ARCH_ARM_FAMILY | 104 #ifdef WEBRTC_ARCH_ARM_FAMILY |
| (...skipping 585 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 691 processing_config.input_stream() = input_config; | 690 processing_config.input_stream() = input_config; |
| 692 processing_config.output_stream() = output_config; | 691 processing_config.output_stream() = output_config; |
| 693 | 692 |
| 694 { | 693 { |
| 695 // Do conditional reinitialization. | 694 // Do conditional reinitialization. |
| 696 rtc::CritScope cs_render(&crit_render_); | 695 rtc::CritScope cs_render(&crit_render_); |
| 697 RETURN_ON_ERR( | 696 RETURN_ON_ERR( |
| 698 MaybeInitializeCapture(processing_config, reinitialization_required)); | 697 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 699 } | 698 } |
| 700 rtc::CritScope cs_capture(&crit_capture_); | 699 rtc::CritScope cs_capture(&crit_capture_); |
| 701 assert(processing_config.input_stream().num_frames() == | 700 RTC_DCHECK_EQ(processing_config.input_stream().num_frames(), |
| 702 formats_.api_format.input_stream().num_frames()); | 701 formats_.api_format.input_stream().num_frames()); |
| 703 | 702 |
| 704 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 703 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 705 if (debug_dump_.debug_file->is_open()) { | 704 if (debug_dump_.debug_file->is_open()) { |
| 706 RETURN_ON_ERR(WriteConfigMessage(false)); | 705 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 707 | 706 |
| 708 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 707 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 709 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 708 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 710 const size_t channel_size = | 709 const size_t channel_size = |
| 711 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 710 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 712 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 711 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1008 | 1007 |
| 1009 if (reverse_input_config.num_channels() == 0) { | 1008 if (reverse_input_config.num_channels() == 0) { |
| 1010 return kBadNumberChannelsError; | 1009 return kBadNumberChannelsError; |
| 1011 } | 1010 } |
| 1012 | 1011 |
| 1013 ProcessingConfig processing_config = formats_.api_format; | 1012 ProcessingConfig processing_config = formats_.api_format; |
| 1014 processing_config.reverse_input_stream() = reverse_input_config; | 1013 processing_config.reverse_input_stream() = reverse_input_config; |
| 1015 processing_config.reverse_output_stream() = reverse_output_config; | 1014 processing_config.reverse_output_stream() = reverse_output_config; |
| 1016 | 1015 |
| 1017 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 1016 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
| 1018 assert(reverse_input_config.num_frames() == | 1017 RTC_DCHECK_EQ(reverse_input_config.num_frames(), |
| 1019 formats_.api_format.reverse_input_stream().num_frames()); | 1018 formats_.api_format.reverse_input_stream().num_frames()); |
| 1020 | 1019 |
| 1021 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1020 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1022 if (debug_dump_.debug_file->is_open()) { | 1021 if (debug_dump_.debug_file->is_open()) { |
| 1023 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | 1022 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 1024 audioproc::ReverseStream* msg = | 1023 audioproc::ReverseStream* msg = |
| 1025 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1024 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 1026 const size_t channel_size = | 1025 const size_t channel_size = |
| 1027 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1026 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1028 for (size_t i = 0; | 1027 for (size_t i = 0; |
| 1029 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1028 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| (...skipping 584 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1614 fwd_proc_format(kSampleRate16kHz), | 1613 fwd_proc_format(kSampleRate16kHz), |
| 1615 split_rate(kSampleRate16kHz) {} | 1614 split_rate(kSampleRate16kHz) {} |
| 1616 | 1615 |
| 1617 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 1616 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 1618 | 1617 |
| 1619 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 1618 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 1620 | 1619 |
| 1621 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 1620 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 1622 | 1621 |
| 1623 } // namespace webrtc | 1622 } // namespace webrtc |
| OLD | NEW |