| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/audio/utility/audio_frame_operations.h" |
| 12 |
| 13 #include <algorithm> |
| 14 |
| 15 #include "webrtc/base/checks.h" |
| 16 #include "webrtc/base/safe_conversions.h" |
| 11 #include "webrtc/modules/include/module_common_types.h" | 17 #include "webrtc/modules/include/module_common_types.h" |
| 12 #include "webrtc/modules/utility/include/audio_frame_operations.h" | |
| 13 #include "webrtc/base/checks.h" | |
| 14 | 18 |
| 15 namespace webrtc { | 19 namespace webrtc { |
| 16 namespace { | 20 namespace { |
| 17 | 21 |
| 18 // 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz. | 22 // 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz. |
| 19 const size_t kMuteFadeFrames = 128; | 23 const size_t kMuteFadeFrames = 128; |
| 20 const float kMuteFadeInc = 1.0f / kMuteFadeFrames; | 24 const float kMuteFadeInc = 1.0f / kMuteFadeFrames; |
| 21 | 25 |
| 22 } // namespace { | 26 } // namespace |
| 27 |
| 28 void AudioFrameOperations::Add(const AudioFrame& frame_to_add, |
| 29 AudioFrame* result_frame) { |
| 30 // Sanity check. |
| 31 RTC_DCHECK(result_frame); |
| 32 RTC_DCHECK_GT(result_frame->num_channels_, 0); |
| 33 RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_); |
| 34 |
| 35 bool no_previous_data = false; |
| 36 if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) { |
| 37 // Special case we have no data to start with. |
| 38 RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0); |
| 39 result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_; |
| 40 no_previous_data = true; |
| 41 } |
| 42 |
| 43 if (result_frame->vad_activity_ == AudioFrame::kVadActive || |
| 44 frame_to_add.vad_activity_ == AudioFrame::kVadActive) { |
| 45 result_frame->vad_activity_ = AudioFrame::kVadActive; |
| 46 } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown || |
| 47 frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) { |
| 48 result_frame->vad_activity_ = AudioFrame::kVadUnknown; |
| 49 } |
| 50 |
| 51 if (result_frame->speech_type_ != frame_to_add.speech_type_) |
| 52 result_frame->speech_type_ = AudioFrame::kUndefined; |
| 53 |
| 54 if (no_previous_data) { |
| 55 std::copy(frame_to_add.data_, frame_to_add.data_ + |
| 56 frame_to_add.samples_per_channel_ * |
| 57 result_frame->num_channels_, |
| 58 result_frame->data_); |
| 59 } else { |
| 60 for (size_t i = 0; |
| 61 i < result_frame->samples_per_channel_ * result_frame->num_channels_; |
| 62 i++) { |
| 63 const int32_t wrap_guard = static_cast<int32_t>(result_frame->data_[i]) + |
| 64 static_cast<int32_t>(frame_to_add.data_[i]); |
| 65 result_frame->data_[i] = rtc::saturated_cast<int16_t>(wrap_guard); |
| 66 } |
| 67 } |
| 68 return; |
| 69 } |
| 23 | 70 |
| 24 void AudioFrameOperations::MonoToStereo(const int16_t* src_audio, | 71 void AudioFrameOperations::MonoToStereo(const int16_t* src_audio, |
| 25 size_t samples_per_channel, | 72 size_t samples_per_channel, |
| 26 int16_t* dst_audio) { | 73 int16_t* dst_audio) { |
| 27 for (size_t i = 0; i < samples_per_channel; i++) { | 74 for (size_t i = 0; i < samples_per_channel; i++) { |
| 28 dst_audio[2 * i] = src_audio[i]; | 75 dst_audio[2 * i] = src_audio[i]; |
| 29 dst_audio[2 * i + 1] = src_audio[i]; | 76 dst_audio[2 * i + 1] = src_audio[i]; |
| 30 } | 77 } |
| 31 } | 78 } |
| 32 | 79 |
| (...skipping 28 matching lines...) Expand all Loading... |
| 61 return -1; | 108 return -1; |
| 62 } | 109 } |
| 63 | 110 |
| 64 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_); | 111 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_); |
| 65 frame->num_channels_ = 1; | 112 frame->num_channels_ = 1; |
| 66 | 113 |
| 67 return 0; | 114 return 0; |
| 68 } | 115 } |
| 69 | 116 |
| 70 void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) { | 117 void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) { |
| 71 if (frame->num_channels_ != 2) return; | 118 RTC_DCHECK(frame); |
| 119 if (frame->num_channels_ != 2) { |
| 120 return; |
| 121 } |
| 72 | 122 |
| 73 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { | 123 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { |
| 74 int16_t temp_data = frame->data_[i]; | 124 int16_t temp_data = frame->data_[i]; |
| 75 frame->data_[i] = frame->data_[i + 1]; | 125 frame->data_[i] = frame->data_[i + 1]; |
| 76 frame->data_[i + 1] = temp_data; | 126 frame->data_[i + 1] = temp_data; |
| 77 } | 127 } |
| 78 } | 128 } |
| 79 | 129 |
| 80 void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted, | 130 void AudioFrameOperations::Mute(AudioFrame* frame, |
| 131 bool previous_frame_muted, |
| 81 bool current_frame_muted) { | 132 bool current_frame_muted) { |
| 82 RTC_DCHECK(frame); | 133 RTC_DCHECK(frame); |
| 83 if (!previous_frame_muted && !current_frame_muted) { | 134 if (!previous_frame_muted && !current_frame_muted) { |
| 84 // Not muted, don't touch. | 135 // Not muted, don't touch. |
| 85 } else if (previous_frame_muted && current_frame_muted) { | 136 } else if (previous_frame_muted && current_frame_muted) { |
| 86 // Frame fully muted. | 137 // Frame fully muted. |
| 87 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_; | 138 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_; |
| 88 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples); | 139 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples); |
| 89 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples); | 140 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples); |
| 90 } else { | 141 } else { |
| (...skipping 27 matching lines...) Expand all Loading... |
| 118 for (size_t j = 0; j < channels; ++j) { | 169 for (size_t j = 0; j < channels; ++j) { |
| 119 float g = start_g; | 170 float g = start_g; |
| 120 for (size_t i = start * channels; i < end * channels; i += channels) { | 171 for (size_t i = start * channels; i < end * channels; i += channels) { |
| 121 g += inc; | 172 g += inc; |
| 122 frame->data_[i + j] *= g; | 173 frame->data_[i + j] *= g; |
| 123 } | 174 } |
| 124 } | 175 } |
| 125 } | 176 } |
| 126 } | 177 } |
| 127 | 178 |
| 179 void AudioFrameOperations::Mute(AudioFrame* frame) { |
| 180 Mute(frame, true, true); |
| 181 } |
| 182 |
| 183 void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) { |
| 184 RTC_DCHECK(frame); |
| 185 RTC_DCHECK_GT(frame->num_channels_, 0); |
| 186 if (frame->num_channels_ < 1) { |
| 187 return; |
| 188 } |
| 189 |
| 190 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; |
| 191 i++) { |
| 192 frame->data_[i] = frame->data_[i] >> 1; |
| 193 } |
| 194 } |
| 195 |
| 128 int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) { | 196 int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) { |
| 129 if (frame.num_channels_ != 2) { | 197 if (frame.num_channels_ != 2) { |
| 130 return -1; | 198 return -1; |
| 131 } | 199 } |
| 132 | 200 |
| 133 for (size_t i = 0; i < frame.samples_per_channel_; i++) { | 201 for (size_t i = 0; i < frame.samples_per_channel_; i++) { |
| 134 frame.data_[2 * i] = | 202 frame.data_[2 * i] = static_cast<int16_t>(left * frame.data_[2 * i]); |
| 135 static_cast<int16_t>(left * frame.data_[2 * i]); | |
| 136 frame.data_[2 * i + 1] = | 203 frame.data_[2 * i + 1] = |
| 137 static_cast<int16_t>(right * frame.data_[2 * i + 1]); | 204 static_cast<int16_t>(right * frame.data_[2 * i + 1]); |
| 138 } | 205 } |
| 139 return 0; | 206 return 0; |
| 140 } | 207 } |
| 141 | 208 |
| 142 int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) { | 209 int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) { |
| 143 int32_t temp_data = 0; | 210 int32_t temp_data = 0; |
| 144 | 211 |
| 145 // Ensure that the output result is saturated [-32768, +32767]. | 212 // Ensure that the output result is saturated [-32768, +32767]. |
| 146 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_; | 213 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_; |
| 147 i++) { | 214 i++) { |
| 148 temp_data = static_cast<int32_t>(scale * frame.data_[i]); | 215 temp_data = static_cast<int32_t>(scale * frame.data_[i]); |
| 149 if (temp_data < -32768) { | 216 if (temp_data < -32768) { |
| 150 frame.data_[i] = -32768; | 217 frame.data_[i] = -32768; |
| 151 } else if (temp_data > 32767) { | 218 } else if (temp_data > 32767) { |
| 152 frame.data_[i] = 32767; | 219 frame.data_[i] = 32767; |
| 153 } else { | 220 } else { |
| 154 frame.data_[i] = static_cast<int16_t>(temp_data); | 221 frame.data_[i] = static_cast<int16_t>(temp_data); |
| 155 } | 222 } |
| 156 } | 223 } |
| 157 return 0; | 224 return 0; |
| 158 } | 225 } |
| 159 | |
| 160 } // namespace webrtc | 226 } // namespace webrtc |
| OLD | NEW |