| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 #include "webrtc/modules/include/module_common_types.h" | |
| 12 #include "webrtc/modules/utility/include/audio_frame_operations.h" | |
| 13 #include "webrtc/base/checks.h" | |
| 14 | |
| 15 namespace webrtc { | |
| 16 namespace { | |
| 17 | |
| 18 // 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz. | |
| 19 const size_t kMuteFadeFrames = 128; | |
| 20 const float kMuteFadeInc = 1.0f / kMuteFadeFrames; | |
| 21 | |
| 22 } // namespace { | |
| 23 | |
| 24 void AudioFrameOperations::MonoToStereo(const int16_t* src_audio, | |
| 25 size_t samples_per_channel, | |
| 26 int16_t* dst_audio) { | |
| 27 for (size_t i = 0; i < samples_per_channel; i++) { | |
| 28 dst_audio[2 * i] = src_audio[i]; | |
| 29 dst_audio[2 * i + 1] = src_audio[i]; | |
| 30 } | |
| 31 } | |
| 32 | |
| 33 int AudioFrameOperations::MonoToStereo(AudioFrame* frame) { | |
| 34 if (frame->num_channels_ != 1) { | |
| 35 return -1; | |
| 36 } | |
| 37 if ((frame->samples_per_channel_ * 2) >= AudioFrame::kMaxDataSizeSamples) { | |
| 38 // Not enough memory to expand from mono to stereo. | |
| 39 return -1; | |
| 40 } | |
| 41 | |
| 42 int16_t data_copy[AudioFrame::kMaxDataSizeSamples]; | |
| 43 memcpy(data_copy, frame->data_, | |
| 44 sizeof(int16_t) * frame->samples_per_channel_); | |
| 45 MonoToStereo(data_copy, frame->samples_per_channel_, frame->data_); | |
| 46 frame->num_channels_ = 2; | |
| 47 | |
| 48 return 0; | |
| 49 } | |
| 50 | |
| 51 void AudioFrameOperations::StereoToMono(const int16_t* src_audio, | |
| 52 size_t samples_per_channel, | |
| 53 int16_t* dst_audio) { | |
| 54 for (size_t i = 0; i < samples_per_channel; i++) { | |
| 55 dst_audio[i] = (src_audio[2 * i] + src_audio[2 * i + 1]) >> 1; | |
| 56 } | |
| 57 } | |
| 58 | |
| 59 int AudioFrameOperations::StereoToMono(AudioFrame* frame) { | |
| 60 if (frame->num_channels_ != 2) { | |
| 61 return -1; | |
| 62 } | |
| 63 | |
| 64 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_); | |
| 65 frame->num_channels_ = 1; | |
| 66 | |
| 67 return 0; | |
| 68 } | |
| 69 | |
| 70 void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) { | |
| 71 if (frame->num_channels_ != 2) return; | |
| 72 | |
| 73 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { | |
| 74 int16_t temp_data = frame->data_[i]; | |
| 75 frame->data_[i] = frame->data_[i + 1]; | |
| 76 frame->data_[i + 1] = temp_data; | |
| 77 } | |
| 78 } | |
| 79 | |
| 80 void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted, | |
| 81 bool current_frame_muted) { | |
| 82 RTC_DCHECK(frame); | |
| 83 if (!previous_frame_muted && !current_frame_muted) { | |
| 84 // Not muted, don't touch. | |
| 85 } else if (previous_frame_muted && current_frame_muted) { | |
| 86 // Frame fully muted. | |
| 87 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_; | |
| 88 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples); | |
| 89 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples); | |
| 90 } else { | |
| 91 // Limit number of samples to fade, if frame isn't long enough. | |
| 92 size_t count = kMuteFadeFrames; | |
| 93 float inc = kMuteFadeInc; | |
| 94 if (frame->samples_per_channel_ < kMuteFadeFrames) { | |
| 95 count = frame->samples_per_channel_; | |
| 96 if (count > 0) { | |
| 97 inc = 1.0f / count; | |
| 98 } | |
| 99 } | |
| 100 | |
| 101 size_t start = 0; | |
| 102 size_t end = count; | |
| 103 float start_g = 0.0f; | |
| 104 if (current_frame_muted) { | |
| 105 // Fade out the last |count| samples of frame. | |
| 106 RTC_DCHECK(!previous_frame_muted); | |
| 107 start = frame->samples_per_channel_ - count; | |
| 108 end = frame->samples_per_channel_; | |
| 109 start_g = 1.0f; | |
| 110 inc = -inc; | |
| 111 } else { | |
| 112 // Fade in the first |count| samples of frame. | |
| 113 RTC_DCHECK(previous_frame_muted); | |
| 114 } | |
| 115 | |
| 116 // Perform fade. | |
| 117 size_t channels = frame->num_channels_; | |
| 118 for (size_t j = 0; j < channels; ++j) { | |
| 119 float g = start_g; | |
| 120 for (size_t i = start * channels; i < end * channels; i += channels) { | |
| 121 g += inc; | |
| 122 frame->data_[i + j] *= g; | |
| 123 } | |
| 124 } | |
| 125 } | |
| 126 } | |
| 127 | |
| 128 int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) { | |
| 129 if (frame.num_channels_ != 2) { | |
| 130 return -1; | |
| 131 } | |
| 132 | |
| 133 for (size_t i = 0; i < frame.samples_per_channel_; i++) { | |
| 134 frame.data_[2 * i] = | |
| 135 static_cast<int16_t>(left * frame.data_[2 * i]); | |
| 136 frame.data_[2 * i + 1] = | |
| 137 static_cast<int16_t>(right * frame.data_[2 * i + 1]); | |
| 138 } | |
| 139 return 0; | |
| 140 } | |
| 141 | |
| 142 int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) { | |
| 143 int32_t temp_data = 0; | |
| 144 | |
| 145 // Ensure that the output result is saturated [-32768, +32767]. | |
| 146 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_; | |
| 147 i++) { | |
| 148 temp_data = static_cast<int32_t>(scale * frame.data_[i]); | |
| 149 if (temp_data < -32768) { | |
| 150 frame.data_[i] = -32768; | |
| 151 } else if (temp_data > 32767) { | |
| 152 frame.data_[i] = 32767; | |
| 153 } else { | |
| 154 frame.data_[i] = static_cast<int16_t>(temp_data); | |
| 155 } | |
| 156 } | |
| 157 return 0; | |
| 158 } | |
| 159 | |
| 160 } // namespace webrtc | |
| OLD | NEW |