Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(137)

Side by Side Diff: webrtc/audio/utility/audio_frame_operations.cc

Issue 2424173003: Move functionality out from AudioFrame and into AudioFrameOperations. (Closed)
Patch Set: New tests, DCHECKS, style and default member initializers. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <algorithm>
the sun 2016/12/02 13:39:55 order!
aleloi 2016/12/05 09:18:37 Done.
12
13 #include "webrtc/audio/utility/audio_frame_operations.h"
14
15 #include "webrtc/base/checks.h"
16 #include "webrtc/base/safe_conversions.h"
11 #include "webrtc/modules/include/module_common_types.h" 17 #include "webrtc/modules/include/module_common_types.h"
12 #include "webrtc/modules/utility/include/audio_frame_operations.h"
13 #include "webrtc/base/checks.h"
14 18
15 namespace webrtc { 19 namespace webrtc {
16 namespace { 20 namespace {
17 21
18 // 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz. 22 // 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
19 const size_t kMuteFadeFrames = 128; 23 const size_t kMuteFadeFrames = 128;
20 const float kMuteFadeInc = 1.0f / kMuteFadeFrames; 24 const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
21 25
22 } // namespace { 26 } // namespace
27
28 void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
29 AudioFrame* result_frame) {
30 // Sanity check.
31 RTC_DCHECK(result_frame);
32 RTC_DCHECK_GT(result_frame->num_channels_, 0);
33 RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
34 if (result_frame->num_channels_ < 1) {
35 return;
36 }
37 if (result_frame->num_channels_ != frame_to_add.num_channels_) {
38 return;
39 }
40
41 bool no_previous_data = false;
42 if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
43 RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
44 if (result_frame->samples_per_channel_ == 0) {
the sun 2016/12/02 13:39:55 No need for this conditional handling now that you
aleloi 2016/12/05 09:18:37 Done.
45 // Special case we have no data to start with.
46 result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
47 no_previous_data = true;
48 } else {
49 return;
50 }
51 }
52
53 if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
54 frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
55 result_frame->vad_activity_ = AudioFrame::kVadActive;
56 } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
57 frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
58 result_frame->vad_activity_ = AudioFrame::kVadUnknown;
59 }
60
61 if (result_frame->speech_type_ != frame_to_add.speech_type_)
62 result_frame->speech_type_ = AudioFrame::kUndefined;
63
64 if (no_previous_data) {
65 std::copy(frame_to_add.data_, frame_to_add.data_ +
66 frame_to_add.samples_per_channel_ *
67 result_frame->num_channels_,
68 result_frame->data_);
69 } else {
70 for (size_t i = 0;
71 i < result_frame->samples_per_channel_ * result_frame->num_channels_;
72 i++) {
73 const int32_t wrap_guard = static_cast<int32_t>(result_frame->data_[i]) +
74 static_cast<int32_t>(frame_to_add.data_[i]);
75 result_frame->data_[i] = rtc::saturated_cast<int16_t>(wrap_guard);
76 }
77 }
78 return;
79 }
23 80
24 void AudioFrameOperations::MonoToStereo(const int16_t* src_audio, 81 void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
25 size_t samples_per_channel, 82 size_t samples_per_channel,
26 int16_t* dst_audio) { 83 int16_t* dst_audio) {
27 for (size_t i = 0; i < samples_per_channel; i++) { 84 for (size_t i = 0; i < samples_per_channel; i++) {
28 dst_audio[2 * i] = src_audio[i]; 85 dst_audio[2 * i] = src_audio[i];
29 dst_audio[2 * i + 1] = src_audio[i]; 86 dst_audio[2 * i + 1] = src_audio[i];
30 } 87 }
31 } 88 }
32 89
(...skipping 28 matching lines...) Expand all
61 return -1; 118 return -1;
62 } 119 }
63 120
64 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_); 121 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_);
65 frame->num_channels_ = 1; 122 frame->num_channels_ = 1;
66 123
67 return 0; 124 return 0;
68 } 125 }
69 126
70 void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) { 127 void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
71 if (frame->num_channels_ != 2) return; 128 RTC_DCHECK(frame);
129 if (frame->num_channels_ != 2) {
130 return;
131 }
72 132
73 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) { 133 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
74 int16_t temp_data = frame->data_[i]; 134 int16_t temp_data = frame->data_[i];
75 frame->data_[i] = frame->data_[i + 1]; 135 frame->data_[i] = frame->data_[i + 1];
76 frame->data_[i + 1] = temp_data; 136 frame->data_[i + 1] = temp_data;
77 } 137 }
78 } 138 }
79 139
80 void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted, 140 void AudioFrameOperations::Mute(AudioFrame* frame,
141 bool previous_frame_muted,
81 bool current_frame_muted) { 142 bool current_frame_muted) {
82 RTC_DCHECK(frame); 143 RTC_DCHECK(frame);
83 if (!previous_frame_muted && !current_frame_muted) { 144 if (!previous_frame_muted && !current_frame_muted) {
84 // Not muted, don't touch. 145 // Not muted, don't touch.
85 } else if (previous_frame_muted && current_frame_muted) { 146 } else if (previous_frame_muted && current_frame_muted) {
86 // Frame fully muted. 147 // Frame fully muted.
87 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_; 148 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_;
88 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples); 149 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples);
89 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples); 150 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples);
90 } else { 151 } else {
(...skipping 27 matching lines...) Expand all
118 for (size_t j = 0; j < channels; ++j) { 179 for (size_t j = 0; j < channels; ++j) {
119 float g = start_g; 180 float g = start_g;
120 for (size_t i = start * channels; i < end * channels; i += channels) { 181 for (size_t i = start * channels; i < end * channels; i += channels) {
121 g += inc; 182 g += inc;
122 frame->data_[i + j] *= g; 183 frame->data_[i + j] *= g;
123 } 184 }
124 } 185 }
125 } 186 }
126 } 187 }
127 188
189 void AudioFrameOperations::Mute(AudioFrame* frame) {
190 Mute(frame, true, true);
191 }
192
193 void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
the sun 2016/12/02 13:39:55 DCHECK(frame) since you deref it, not just pass on
aleloi 2016/12/05 09:18:37 Done.
194 RTC_DCHECK_GT(frame->num_channels_, 0);
195 if (frame->num_channels_ < 1) {
196 return;
197 }
198
199 for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
200 i++) {
201 frame->data_[i] = frame->data_[i] >> 1;
202 }
203 }
204
128 int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) { 205 int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
129 if (frame.num_channels_ != 2) { 206 if (frame.num_channels_ != 2) {
130 return -1; 207 return -1;
131 } 208 }
132 209
133 for (size_t i = 0; i < frame.samples_per_channel_; i++) { 210 for (size_t i = 0; i < frame.samples_per_channel_; i++) {
134 frame.data_[2 * i] = 211 frame.data_[2 * i] = static_cast<int16_t>(left * frame.data_[2 * i]);
135 static_cast<int16_t>(left * frame.data_[2 * i]);
136 frame.data_[2 * i + 1] = 212 frame.data_[2 * i + 1] =
137 static_cast<int16_t>(right * frame.data_[2 * i + 1]); 213 static_cast<int16_t>(right * frame.data_[2 * i + 1]);
138 } 214 }
139 return 0; 215 return 0;
140 } 216 }
141 217
142 int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) { 218 int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
143 int32_t temp_data = 0; 219 int32_t temp_data = 0;
144 220
145 // Ensure that the output result is saturated [-32768, +32767]. 221 // Ensure that the output result is saturated [-32768, +32767].
146 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_; 222 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
147 i++) { 223 i++) {
148 temp_data = static_cast<int32_t>(scale * frame.data_[i]); 224 temp_data = static_cast<int32_t>(scale * frame.data_[i]);
149 if (temp_data < -32768) { 225 if (temp_data < -32768) {
150 frame.data_[i] = -32768; 226 frame.data_[i] = -32768;
151 } else if (temp_data > 32767) { 227 } else if (temp_data > 32767) {
152 frame.data_[i] = 32767; 228 frame.data_[i] = 32767;
153 } else { 229 } else {
154 frame.data_[i] = static_cast<int16_t>(temp_data); 230 frame.data_[i] = static_cast<int16_t>(temp_data);
155 } 231 }
156 } 232 }
157 return 0; 233 return 0;
158 } 234 }
159
160 } // namespace webrtc 235 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698