| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 
| 3  * | 3  * | 
| 4  *  Use of this source code is governed by a BSD-style license | 4  *  Use of this source code is governed by a BSD-style license | 
| 5  *  that can be found in the LICENSE file in the root of the source | 5  *  that can be found in the LICENSE file in the root of the source | 
| 6  *  tree. An additional intellectual property rights grant can be found | 6  *  tree. An additional intellectual property rights grant can be found | 
| 7  *  in the file PATENTS.  All contributing project authors may | 7  *  in the file PATENTS.  All contributing project authors may | 
| 8  *  be found in the AUTHORS file in the root of the source tree. | 8  *  be found in the AUTHORS file in the root of the source tree. | 
| 9  */ | 9  */ | 
| 10 | 10 | 
| 11 #include "webrtc/voice_engine/utility.h" | 11 #include "webrtc/voice_engine/utility.h" | 
| 12 | 12 | 
| 13 #include "webrtc/common_audio/resampler/include/push_resampler.h" | 13 #include "webrtc/common_audio/resampler/include/push_resampler.h" | 
| 14 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
     y.h" | 14 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
     y.h" | 
| 15 #include "webrtc/common_types.h" | 15 #include "webrtc/common_types.h" | 
| 16 #include "webrtc/modules/interface/module_common_types.h" | 16 #include "webrtc/modules/interface/module_common_types.h" | 
| 17 #include "webrtc/modules/utility/interface/audio_frame_operations.h" | 17 #include "webrtc/modules/utility/interface/audio_frame_operations.h" | 
| 18 #include "webrtc/system_wrappers/interface/logging.h" | 18 #include "webrtc/system_wrappers/interface/logging.h" | 
| 19 #include "webrtc/voice_engine/voice_engine_defines.h" | 19 #include "webrtc/voice_engine/voice_engine_defines.h" | 
| 20 | 20 | 
| 21 namespace webrtc { | 21 namespace webrtc { | 
| 22 namespace voe { | 22 namespace voe { | 
| 23 | 23 | 
| 24 // TODO(ajm): There is significant overlap between RemixAndResample and | 24 // TODO(ajm): There is significant overlap between RemixAndResample and | 
| 25 // ConvertToCodecFormat. Consolidate using AudioConverter. | 25 // ConvertToCodecFormat. Consolidate using AudioConverter. | 
| 26 void RemixAndResample(const AudioFrame& src_frame, | 26 void RemixAndResample(const AudioFrame& src_frame, | 
| 27                       PushResampler<int16_t>* resampler, | 27                       PushResampler<int16_t>* resampler, | 
| 28                       AudioFrame* dst_frame) { | 28                       AudioFrame* dst_frame) { | 
| 29   const int16_t* audio_ptr = src_frame.data_; | 29   const int16_t* audio_ptr = src_frame.data_; | 
| 30   int audio_ptr_num_channels = src_frame.num_channels_; | 30   size_t audio_ptr_num_channels = src_frame.num_channels_; | 
| 31   int16_t mono_audio[AudioFrame::kMaxDataSizeSamples]; | 31   int16_t mono_audio[AudioFrame::kMaxDataSizeSamples]; | 
| 32 | 32 | 
| 33   // Downmix before resampling. | 33   // Downmix before resampling. | 
| 34   if (src_frame.num_channels_ == 2 && dst_frame->num_channels_ == 1) { | 34   if (src_frame.num_channels_ == 2 && dst_frame->num_channels_ == 1) { | 
| 35     AudioFrameOperations::StereoToMono(src_frame.data_, | 35     AudioFrameOperations::StereoToMono(src_frame.data_, | 
| 36                                        src_frame.samples_per_channel_, | 36                                        src_frame.samples_per_channel_, | 
| 37                                        mono_audio); | 37                                        mono_audio); | 
| 38     audio_ptr = mono_audio; | 38     audio_ptr = mono_audio; | 
| 39     audio_ptr_num_channels = 1; | 39     audio_ptr_num_channels = 1; | 
| 40   } | 40   } | 
| 41 | 41 | 
| 42   if (resampler->InitializeIfNeeded(src_frame.sample_rate_hz_, | 42   if (resampler->InitializeIfNeeded(src_frame.sample_rate_hz_, | 
| 43                                     dst_frame->sample_rate_hz_, | 43                                     dst_frame->sample_rate_hz_, | 
| 44                                     audio_ptr_num_channels) == -1) { | 44                                     audio_ptr_num_channels) == -1) { | 
| 45     LOG_FERR3(LS_ERROR, InitializeIfNeeded, src_frame.sample_rate_hz_, | 45     LOG_FERR3(LS_ERROR, InitializeIfNeeded, src_frame.sample_rate_hz_, | 
| 46               dst_frame->sample_rate_hz_, audio_ptr_num_channels); | 46               dst_frame->sample_rate_hz_, audio_ptr_num_channels); | 
| 47     assert(false); | 47     assert(false); | 
| 48   } | 48   } | 
| 49 | 49 | 
| 50   const size_t src_length = src_frame.samples_per_channel_ * | 50   const size_t src_length = | 
| 51                          audio_ptr_num_channels; | 51       src_frame.samples_per_channel_ * audio_ptr_num_channels; | 
| 52   int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_, | 52   int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_, | 
| 53                                        AudioFrame::kMaxDataSizeSamples); | 53                                        AudioFrame::kMaxDataSizeSamples); | 
| 54   if (out_length == -1) { | 54   if (out_length == -1) { | 
| 55     LOG_FERR3(LS_ERROR, Resample, audio_ptr, src_length, dst_frame->data_); | 55     LOG_FERR3(LS_ERROR, Resample, audio_ptr, src_length, dst_frame->data_); | 
| 56     assert(false); | 56     assert(false); | 
| 57   } | 57   } | 
| 58   dst_frame->samples_per_channel_ = | 58   dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels; | 
| 59       static_cast<size_t>(out_length / audio_ptr_num_channels); |  | 
| 60 | 59 | 
| 61   // Upmix after resampling. | 60   // Upmix after resampling. | 
| 62   if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) { | 61   if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) { | 
| 63     // The audio in dst_frame really is mono at this point; MonoToStereo will | 62     // The audio in dst_frame really is mono at this point; MonoToStereo will | 
| 64     // set this back to stereo. | 63     // set this back to stereo. | 
| 65     dst_frame->num_channels_ = 1; | 64     dst_frame->num_channels_ = 1; | 
| 66     AudioFrameOperations::MonoToStereo(dst_frame); | 65     AudioFrameOperations::MonoToStereo(dst_frame); | 
| 67   } | 66   } | 
| 68 | 67 | 
| 69   dst_frame->timestamp_ = src_frame.timestamp_; | 68   dst_frame->timestamp_ = src_frame.timestamp_; | 
| 70   dst_frame->elapsed_time_ms_ = src_frame.elapsed_time_ms_; | 69   dst_frame->elapsed_time_ms_ = src_frame.elapsed_time_ms_; | 
| 71   dst_frame->ntp_time_ms_ = src_frame.ntp_time_ms_; | 70   dst_frame->ntp_time_ms_ = src_frame.ntp_time_ms_; | 
| 72 } | 71 } | 
| 73 | 72 | 
| 74 void DownConvertToCodecFormat(const int16_t* src_data, | 73 void DownConvertToCodecFormat(const int16_t* src_data, | 
| 75                               size_t samples_per_channel, | 74                               size_t samples_per_channel, | 
| 76                               int num_channels, | 75                               size_t num_channels, | 
| 77                               int sample_rate_hz, | 76                               int sample_rate_hz, | 
| 78                               int codec_num_channels, | 77                               size_t codec_num_channels, | 
| 79                               int codec_rate_hz, | 78                               int codec_rate_hz, | 
| 80                               int16_t* mono_buffer, | 79                               int16_t* mono_buffer, | 
| 81                               PushResampler<int16_t>* resampler, | 80                               PushResampler<int16_t>* resampler, | 
| 82                               AudioFrame* dst_af) { | 81                               AudioFrame* dst_af) { | 
| 83   assert(samples_per_channel <= kMaxMonoDataSizeSamples); | 82   assert(samples_per_channel <= kMaxMonoDataSizeSamples); | 
| 84   assert(num_channels == 1 || num_channels == 2); | 83   assert(num_channels == 1 || num_channels == 2); | 
| 85   assert(codec_num_channels == 1 || codec_num_channels == 2); | 84   assert(codec_num_channels == 1 || codec_num_channels == 2); | 
| 86   dst_af->Reset(); | 85   dst_af->Reset(); | 
| 87 | 86 | 
| 88   // Never upsample the capture signal here. This should be done at the | 87   // Never upsample the capture signal here. This should be done at the | 
| (...skipping 20 matching lines...) Expand all  Loading... | 
| 109   } | 108   } | 
| 110 | 109 | 
| 111   const size_t in_length = samples_per_channel * num_channels; | 110   const size_t in_length = samples_per_channel * num_channels; | 
| 112   int out_length = resampler->Resample( | 111   int out_length = resampler->Resample( | 
| 113       src_data, in_length, dst_af->data_, AudioFrame::kMaxDataSizeSamples); | 112       src_data, in_length, dst_af->data_, AudioFrame::kMaxDataSizeSamples); | 
| 114   if (out_length == -1) { | 113   if (out_length == -1) { | 
| 115     LOG_FERR3(LS_ERROR, Resample, src_data, in_length, dst_af->data_); | 114     LOG_FERR3(LS_ERROR, Resample, src_data, in_length, dst_af->data_); | 
| 116     assert(false); | 115     assert(false); | 
| 117   } | 116   } | 
| 118 | 117 | 
| 119   dst_af->samples_per_channel_ = static_cast<size_t>(out_length / num_channels); | 118   dst_af->samples_per_channel_ = out_length / num_channels; | 
| 120   dst_af->sample_rate_hz_ = destination_rate; | 119   dst_af->sample_rate_hz_ = destination_rate; | 
| 121   dst_af->num_channels_ = num_channels; | 120   dst_af->num_channels_ = num_channels; | 
| 122 } | 121 } | 
| 123 | 122 | 
| 124 void MixWithSat(int16_t target[], | 123 void MixWithSat(int16_t target[], | 
| 125                 int target_channel, | 124                 size_t target_channel, | 
| 126                 const int16_t source[], | 125                 const int16_t source[], | 
| 127                 int source_channel, | 126                 size_t source_channel, | 
| 128                 size_t source_len) { | 127                 size_t source_len) { | 
| 129   assert(target_channel == 1 || target_channel == 2); | 128   assert(target_channel == 1 || target_channel == 2); | 
| 130   assert(source_channel == 1 || source_channel == 2); | 129   assert(source_channel == 1 || source_channel == 2); | 
| 131 | 130 | 
| 132   if (target_channel == 2 && source_channel == 1) { | 131   if (target_channel == 2 && source_channel == 1) { | 
| 133     // Convert source from mono to stereo. | 132     // Convert source from mono to stereo. | 
| 134     int32_t left = 0; | 133     int32_t left = 0; | 
| 135     int32_t right = 0; | 134     int32_t right = 0; | 
| 136     for (size_t i = 0; i < source_len; ++i) { | 135     for (size_t i = 0; i < source_len; ++i) { | 
| 137       left = source[i] + target[i * 2]; | 136       left = source[i] + target[i * 2]; | 
| (...skipping 12 matching lines...) Expand all  Loading... | 
| 150     int32_t temp = 0; | 149     int32_t temp = 0; | 
| 151     for (size_t i = 0; i < source_len; ++i) { | 150     for (size_t i = 0; i < source_len; ++i) { | 
| 152       temp = source[i] + target[i]; | 151       temp = source[i] + target[i]; | 
| 153       target[i] = WebRtcSpl_SatW32ToW16(temp); | 152       target[i] = WebRtcSpl_SatW32ToW16(temp); | 
| 154     } | 153     } | 
| 155   } | 154   } | 
| 156 } | 155 } | 
| 157 | 156 | 
| 158 }  // namespace voe | 157 }  // namespace voe | 
| 159 }  // namespace webrtc | 158 }  // namespace webrtc | 
| OLD | NEW | 
|---|