Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(299)

Side by Side Diff: webrtc/modules/audio_mixer/frame_combiner.cc

Issue 2750783004: Add mute state field to AudioFrame. (Closed)
Patch Set: Update new usages of AudioFrame::data_ Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 limiter->ProcessStream(audio_frame_for_mixing); 43 limiter->ProcessStream(audio_frame_for_mixing);
44 } 44 }
45 } 45 }
46 46
47 void CombineOneFrame(const AudioFrame* input_frame, 47 void CombineOneFrame(const AudioFrame* input_frame,
48 bool use_limiter, 48 bool use_limiter,
49 AudioProcessing* limiter, 49 AudioProcessing* limiter,
50 AudioFrame* audio_frame_for_mixing) { 50 AudioFrame* audio_frame_for_mixing) {
51 audio_frame_for_mixing->timestamp_ = input_frame->timestamp_; 51 audio_frame_for_mixing->timestamp_ = input_frame->timestamp_;
52 audio_frame_for_mixing->elapsed_time_ms_ = input_frame->elapsed_time_ms_; 52 audio_frame_for_mixing->elapsed_time_ms_ = input_frame->elapsed_time_ms_;
53 std::copy(input_frame->data_, 53 // TODO(yujo): can we optimize muted frames?
54 input_frame->data_ + 54 std::copy(input_frame->data(),
55 input_frame->data() +
55 input_frame->num_channels_ * input_frame->samples_per_channel_, 56 input_frame->num_channels_ * input_frame->samples_per_channel_,
56 audio_frame_for_mixing->data_); 57 audio_frame_for_mixing->mutable_data());
57 if (use_limiter) { 58 if (use_limiter) {
58 AudioFrameOperations::ApplyHalfGain(audio_frame_for_mixing); 59 AudioFrameOperations::ApplyHalfGain(audio_frame_for_mixing);
59 RTC_DCHECK(limiter); 60 RTC_DCHECK(limiter);
60 limiter->ProcessStream(audio_frame_for_mixing); 61 limiter->ProcessStream(audio_frame_for_mixing);
61 AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing); 62 AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing);
62 } 63 }
63 } 64 }
64 65
65 // Lower-level helper function called from Combine(...) when there 66 // Lower-level helper function called from Combine(...) when there
66 // are several input frames. 67 // are several input frames.
(...skipping 21 matching lines...) Expand all
88 // statically allocated int32 buffer. For > 2 participants this is 89 // statically allocated int32 buffer. For > 2 participants this is
89 // more efficient than addition in place in the int16 audio 90 // more efficient than addition in place in the int16 audio
90 // frame. The audio quality loss due to halving the samples is 91 // frame. The audio quality loss due to halving the samples is
91 // smaller than 16-bit addition in place. 92 // smaller than 16-bit addition in place.
92 RTC_DCHECK_GE(kMaximalFrameSize, frame_length); 93 RTC_DCHECK_GE(kMaximalFrameSize, frame_length);
93 std::array<int32_t, kMaximalFrameSize> add_buffer; 94 std::array<int32_t, kMaximalFrameSize> add_buffer;
94 95
95 add_buffer.fill(0); 96 add_buffer.fill(0);
96 97
97 for (const auto& frame : input_frames) { 98 for (const auto& frame : input_frames) {
99 // TODO(yujo): skip this for muted frames.
98 std::transform(frame.begin(), frame.end(), add_buffer.begin(), 100 std::transform(frame.begin(), frame.end(), add_buffer.begin(),
99 add_buffer.begin(), std::plus<int32_t>()); 101 add_buffer.begin(), std::plus<int32_t>());
100 } 102 }
101 103
102 if (use_limiter) { 104 if (use_limiter) {
103 // Halve all samples to avoid saturation before limiting. 105 // Halve all samples to avoid saturation before limiting.
104 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, 106 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length,
105 audio_frame_for_mixing->data_, [](int32_t a) { 107 audio_frame_for_mixing->mutable_data(), [](int32_t a) {
106 return rtc::saturated_cast<int16_t>(a / 2); 108 return rtc::saturated_cast<int16_t>(a / 2);
107 }); 109 });
108 110
109 // Smoothly limit the audio. 111 // Smoothly limit the audio.
110 RTC_DCHECK(limiter); 112 RTC_DCHECK(limiter);
111 const int error = limiter->ProcessStream(audio_frame_for_mixing); 113 const int error = limiter->ProcessStream(audio_frame_for_mixing);
112 if (error != limiter->kNoError) { 114 if (error != limiter->kNoError) {
113 LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error; 115 LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error;
114 RTC_NOTREACHED(); 116 RTC_NOTREACHED();
115 } 117 }
116 118
117 // And now we can safely restore the level. This procedure results in 119 // And now we can safely restore the level. This procedure results in
118 // some loss of resolution, deemed acceptable. 120 // some loss of resolution, deemed acceptable.
119 // 121 //
120 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 122 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
121 // and compression gain of 6 dB). However, in the transition frame when this 123 // and compression gain of 6 dB). However, in the transition frame when this
122 // is enabled (moving from one to two audio sources) it has the potential to 124 // is enabled (moving from one to two audio sources) it has the potential to
123 // create discontinuities in the mixed frame. 125 // create discontinuities in the mixed frame.
124 // 126 //
125 // Instead we double the frame (with addition since left-shifting a 127 // Instead we double the frame (with addition since left-shifting a
126 // negative value is undefined). 128 // negative value is undefined).
127 AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing); 129 AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing);
128 } else { 130 } else {
129 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, 131 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length,
130 audio_frame_for_mixing->data_, 132 audio_frame_for_mixing->mutable_data(),
131 [](int32_t a) { return rtc::saturated_cast<int16_t>(a); }); 133 [](int32_t a) { return rtc::saturated_cast<int16_t>(a); });
132 } 134 }
133 } 135 }
134 136
135 std::unique_ptr<AudioProcessing> CreateLimiter() { 137 std::unique_ptr<AudioProcessing> CreateLimiter() {
136 Config config; 138 Config config;
137 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 139 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
138 140
139 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); 141 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config));
140 RTC_DCHECK(limiter); 142 RTC_DCHECK(limiter);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 if (mix_list.empty()) { 201 if (mix_list.empty()) {
200 CombineZeroFrames(use_limiter_this_round, limiter_.get(), 202 CombineZeroFrames(use_limiter_this_round, limiter_.get(),
201 audio_frame_for_mixing); 203 audio_frame_for_mixing);
202 } else if (mix_list.size() == 1) { 204 } else if (mix_list.size() == 1) {
203 CombineOneFrame(mix_list.front(), use_limiter_this_round, limiter_.get(), 205 CombineOneFrame(mix_list.front(), use_limiter_this_round, limiter_.get(),
204 audio_frame_for_mixing); 206 audio_frame_for_mixing);
205 } else { 207 } else {
206 std::vector<rtc::ArrayView<const int16_t>> input_frames; 208 std::vector<rtc::ArrayView<const int16_t>> input_frames;
207 for (size_t i = 0; i < mix_list.size(); ++i) { 209 for (size_t i = 0; i < mix_list.size(); ++i) {
208 input_frames.push_back(rtc::ArrayView<const int16_t>( 210 input_frames.push_back(rtc::ArrayView<const int16_t>(
209 mix_list[i]->data_, samples_per_channel * number_of_channels)); 211 mix_list[i]->data(), samples_per_channel * number_of_channels));
210 } 212 }
211 CombineMultipleFrames(input_frames, use_limiter_this_round, limiter_.get(), 213 CombineMultipleFrames(input_frames, use_limiter_this_round, limiter_.get(),
212 audio_frame_for_mixing); 214 audio_frame_for_mixing);
213 } 215 }
214 } 216 }
217
215 } // namespace webrtc 218 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc ('k') | webrtc/modules/audio_mixer/frame_combiner_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698