OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/voice_engine/utility.h" | 11 #include "webrtc/voice_engine/utility.h" |
12 | 12 |
13 #include "webrtc/common_audio/resampler/include/push_resampler.h" | 13 #include "webrtc/common_audio/resampler/include/push_resampler.h" |
14 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | 14 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" |
15 #include "webrtc/common_types.h" | 15 #include "webrtc/common_types.h" |
16 #include "webrtc/modules/interface/module_common_types.h" | 16 #include "webrtc/modules/interface/module_common_types.h" |
17 #include "webrtc/modules/utility/interface/audio_frame_operations.h" | 17 #include "webrtc/modules/utility/interface/audio_frame_operations.h" |
18 #include "webrtc/system_wrappers/interface/logging.h" | 18 #include "webrtc/system_wrappers/interface/logging.h" |
19 #include "webrtc/voice_engine/voice_engine_defines.h" | 19 #include "webrtc/voice_engine/voice_engine_defines.h" |
20 | 20 |
21 namespace webrtc { | 21 namespace webrtc { |
22 namespace voe { | 22 namespace voe { |
23 | 23 |
24 // TODO(ajm): There is significant overlap between RemixAndResample and | |
25 // ConvertToCodecFormat. Consolidate using AudioConverter. | |
26 void RemixAndResample(const AudioFrame& src_frame, | 24 void RemixAndResample(const AudioFrame& src_frame, |
27 PushResampler<int16_t>* resampler, | 25 PushResampler<int16_t>* resampler, |
28 AudioFrame* dst_frame) { | 26 AudioFrame* dst_frame) { |
29 const int16_t* audio_ptr = src_frame.data_; | 27 RemixAndResample(src_frame.data_, src_frame.samples_per_channel_, |
30 int audio_ptr_num_channels = src_frame.num_channels_; | 28 src_frame.num_channels_, src_frame.sample_rate_hz_, |
| 29 resampler, dst_frame); |
| 30 dst_frame->timestamp_ = src_frame.timestamp_; |
| 31 dst_frame->elapsed_time_ms_ = src_frame.elapsed_time_ms_; |
| 32 dst_frame->ntp_time_ms_ = src_frame.ntp_time_ms_; |
| 33 } |
| 34 |
| 35 void RemixAndResample(const int16_t* src_data, |
| 36 size_t samples_per_channel, |
| 37 int num_channels, |
| 38 int sample_rate_hz, |
| 39 PushResampler<int16_t>* resampler, |
| 40 AudioFrame* dst_frame) { |
| 41 const int16_t* audio_ptr = src_data; |
| 42 int audio_ptr_num_channels = num_channels; |
31 int16_t mono_audio[AudioFrame::kMaxDataSizeSamples]; | 43 int16_t mono_audio[AudioFrame::kMaxDataSizeSamples]; |
32 | 44 |
33 // Downmix before resampling. | 45 // Downmix before resampling. |
34 if (src_frame.num_channels_ == 2 && dst_frame->num_channels_ == 1) { | 46 if (num_channels == 2 && dst_frame->num_channels_ == 1) { |
35 AudioFrameOperations::StereoToMono(src_frame.data_, | 47 AudioFrameOperations::StereoToMono(src_data, samples_per_channel, |
36 src_frame.samples_per_channel_, | |
37 mono_audio); | 48 mono_audio); |
38 audio_ptr = mono_audio; | 49 audio_ptr = mono_audio; |
39 audio_ptr_num_channels = 1; | 50 audio_ptr_num_channels = 1; |
40 } | 51 } |
41 | 52 |
42 if (resampler->InitializeIfNeeded(src_frame.sample_rate_hz_, | 53 if (resampler->InitializeIfNeeded(sample_rate_hz, dst_frame->sample_rate_hz_, |
43 dst_frame->sample_rate_hz_, | |
44 audio_ptr_num_channels) == -1) { | 54 audio_ptr_num_channels) == -1) { |
45 LOG_FERR3(LS_ERROR, InitializeIfNeeded, src_frame.sample_rate_hz_, | 55 LOG_FERR3(LS_ERROR, InitializeIfNeeded, sample_rate_hz, |
46 dst_frame->sample_rate_hz_, audio_ptr_num_channels); | 56 dst_frame->sample_rate_hz_, audio_ptr_num_channels); |
47 assert(false); | 57 assert(false); |
48 } | 58 } |
49 | 59 |
50 const size_t src_length = src_frame.samples_per_channel_ * | 60 const size_t src_length = samples_per_channel * audio_ptr_num_channels; |
51 audio_ptr_num_channels; | |
52 int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_, | 61 int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_, |
53 AudioFrame::kMaxDataSizeSamples); | 62 AudioFrame::kMaxDataSizeSamples); |
54 if (out_length == -1) { | 63 if (out_length == -1) { |
55 LOG_FERR3(LS_ERROR, Resample, audio_ptr, src_length, dst_frame->data_); | 64 LOG_FERR3(LS_ERROR, Resample, audio_ptr, src_length, dst_frame->data_); |
56 assert(false); | 65 assert(false); |
57 } | 66 } |
58 dst_frame->samples_per_channel_ = | 67 dst_frame->samples_per_channel_ = |
59 static_cast<size_t>(out_length / audio_ptr_num_channels); | 68 static_cast<size_t>(out_length / audio_ptr_num_channels); |
60 | 69 |
61 // Upmix after resampling. | 70 // Upmix after resampling. |
62 if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) { | 71 if (num_channels == 1 && dst_frame->num_channels_ == 2) { |
63 // The audio in dst_frame really is mono at this point; MonoToStereo will | 72 // The audio in dst_frame really is mono at this point; MonoToStereo will |
64 // set this back to stereo. | 73 // set this back to stereo. |
65 dst_frame->num_channels_ = 1; | 74 dst_frame->num_channels_ = 1; |
66 AudioFrameOperations::MonoToStereo(dst_frame); | 75 AudioFrameOperations::MonoToStereo(dst_frame); |
67 } | 76 } |
68 | |
69 dst_frame->timestamp_ = src_frame.timestamp_; | |
70 dst_frame->elapsed_time_ms_ = src_frame.elapsed_time_ms_; | |
71 dst_frame->ntp_time_ms_ = src_frame.ntp_time_ms_; | |
72 } | |
73 | |
74 void DownConvertToCodecFormat(const int16_t* src_data, | |
75 size_t samples_per_channel, | |
76 int num_channels, | |
77 int sample_rate_hz, | |
78 int codec_num_channels, | |
79 int codec_rate_hz, | |
80 int16_t* mono_buffer, | |
81 PushResampler<int16_t>* resampler, | |
82 AudioFrame* dst_af) { | |
83 assert(samples_per_channel <= kMaxMonoDataSizeSamples); | |
84 assert(num_channels == 1 || num_channels == 2); | |
85 assert(codec_num_channels == 1 || codec_num_channels == 2); | |
86 dst_af->Reset(); | |
87 | |
88 // Never upsample the capture signal here. This should be done at the | |
89 // end of the send chain. | |
90 int destination_rate = std::min(codec_rate_hz, sample_rate_hz); | |
91 | |
92 // If no stereo codecs are in use, we downmix a stereo stream from the | |
93 // device early in the chain, before resampling. | |
94 if (num_channels == 2 && codec_num_channels == 1) { | |
95 AudioFrameOperations::StereoToMono(src_data, samples_per_channel, | |
96 mono_buffer); | |
97 src_data = mono_buffer; | |
98 num_channels = 1; | |
99 } | |
100 | |
101 if (resampler->InitializeIfNeeded( | |
102 sample_rate_hz, destination_rate, num_channels) != 0) { | |
103 LOG_FERR3(LS_ERROR, | |
104 InitializeIfNeeded, | |
105 sample_rate_hz, | |
106 destination_rate, | |
107 num_channels); | |
108 assert(false); | |
109 } | |
110 | |
111 const size_t in_length = samples_per_channel * num_channels; | |
112 int out_length = resampler->Resample( | |
113 src_data, in_length, dst_af->data_, AudioFrame::kMaxDataSizeSamples); | |
114 if (out_length == -1) { | |
115 LOG_FERR3(LS_ERROR, Resample, src_data, in_length, dst_af->data_); | |
116 assert(false); | |
117 } | |
118 | |
119 dst_af->samples_per_channel_ = static_cast<size_t>(out_length / num_channels); | |
120 dst_af->sample_rate_hz_ = destination_rate; | |
121 dst_af->num_channels_ = num_channels; | |
122 } | 77 } |
123 | 78 |
124 void MixWithSat(int16_t target[], | 79 void MixWithSat(int16_t target[], |
125 int target_channel, | 80 int target_channel, |
126 const int16_t source[], | 81 const int16_t source[], |
127 int source_channel, | 82 int source_channel, |
128 size_t source_len) { | 83 size_t source_len) { |
129 assert(target_channel == 1 || target_channel == 2); | 84 assert(target_channel == 1 || target_channel == 2); |
130 assert(source_channel == 1 || source_channel == 2); | 85 assert(source_channel == 1 || source_channel == 2); |
131 | 86 |
(...skipping 18 matching lines...) Expand all Loading... |
150 int32_t temp = 0; | 105 int32_t temp = 0; |
151 for (size_t i = 0; i < source_len; ++i) { | 106 for (size_t i = 0; i < source_len; ++i) { |
152 temp = source[i] + target[i]; | 107 temp = source[i] + target[i]; |
153 target[i] = WebRtcSpl_SatW32ToW16(temp); | 108 target[i] = WebRtcSpl_SatW32ToW16(temp); |
154 } | 109 } |
155 } | 110 } |
156 } | 111 } |
157 | 112 |
158 } // namespace voe | 113 } // namespace voe |
159 } // namespace webrtc | 114 } // namespace webrtc |
OLD | NEW |