OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 17 matching lines...) Expand all Loading... |
28 | 28 |
29 void CombineZeroFrames(AudioFrame* audio_frame_for_mixing) { | 29 void CombineZeroFrames(AudioFrame* audio_frame_for_mixing) { |
30 audio_frame_for_mixing->elapsed_time_ms_ = -1; | 30 audio_frame_for_mixing->elapsed_time_ms_ = -1; |
31 AudioFrameOperations::Mute(audio_frame_for_mixing); | 31 AudioFrameOperations::Mute(audio_frame_for_mixing); |
32 } | 32 } |
33 | 33 |
34 void CombineOneFrame(const AudioFrame* input_frame, | 34 void CombineOneFrame(const AudioFrame* input_frame, |
35 AudioFrame* audio_frame_for_mixing) { | 35 AudioFrame* audio_frame_for_mixing) { |
36 audio_frame_for_mixing->timestamp_ = input_frame->timestamp_; | 36 audio_frame_for_mixing->timestamp_ = input_frame->timestamp_; |
37 audio_frame_for_mixing->elapsed_time_ms_ = input_frame->elapsed_time_ms_; | 37 audio_frame_for_mixing->elapsed_time_ms_ = input_frame->elapsed_time_ms_; |
38 std::copy(input_frame->data_, | 38 if (!input_frame->muted()) { |
39 input_frame->data_ + | 39 size_t length = |
40 input_frame->num_channels_ * input_frame->samples_per_channel_, | 40 input_frame->num_channels_ * input_frame->samples_per_channel_; |
41 audio_frame_for_mixing->data_); | 41 const int16_t* input_data = input_frame->data(); |
| 42 std::copy(input_data, input_data + length, |
| 43 audio_frame_for_mixing->mutable_data()); |
| 44 } else { |
| 45 AudioFrameOperations::Mute(audio_frame_for_mixing); |
| 46 } |
42 } | 47 } |
43 | 48 |
44 std::unique_ptr<AudioProcessing> CreateLimiter() { | 49 std::unique_ptr<AudioProcessing> CreateLimiter() { |
45 Config config; | 50 Config config; |
46 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 51 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
47 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); | 52 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); |
48 RTC_DCHECK(limiter); | 53 RTC_DCHECK(limiter); |
49 | 54 |
50 const auto check_no_error = [](int x) { | 55 const auto check_no_error = [](int x) { |
51 RTC_DCHECK_EQ(x, AudioProcessing::kNoError); | 56 RTC_DCHECK_EQ(x, AudioProcessing::kNoError); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
98 AudioFrame::kVadUnknown, number_of_channels); | 103 AudioFrame::kVadUnknown, number_of_channels); |
99 | 104 |
100 if (mix_list.empty()) { | 105 if (mix_list.empty()) { |
101 CombineZeroFrames(audio_frame_for_mixing); | 106 CombineZeroFrames(audio_frame_for_mixing); |
102 } else if (mix_list.size() == 1) { | 107 } else if (mix_list.size() == 1) { |
103 CombineOneFrame(mix_list.front(), audio_frame_for_mixing); | 108 CombineOneFrame(mix_list.front(), audio_frame_for_mixing); |
104 } else { | 109 } else { |
105 std::vector<rtc::ArrayView<const int16_t>> input_frames; | 110 std::vector<rtc::ArrayView<const int16_t>> input_frames; |
106 for (size_t i = 0; i < mix_list.size(); ++i) { | 111 for (size_t i = 0; i < mix_list.size(); ++i) { |
107 input_frames.push_back(rtc::ArrayView<const int16_t>( | 112 input_frames.push_back(rtc::ArrayView<const int16_t>( |
108 mix_list[i]->data_, samples_per_channel * number_of_channels)); | 113 mix_list[i]->data(), samples_per_channel * number_of_channels)); |
109 } | 114 } |
110 CombineMultipleFrames(input_frames, audio_frame_for_mixing); | 115 CombineMultipleFrames(input_frames, audio_frame_for_mixing); |
111 } | 116 } |
112 } | 117 } |
113 | 118 |
114 void FrameCombiner::CombineMultipleFrames( | 119 void FrameCombiner::CombineMultipleFrames( |
115 const std::vector<rtc::ArrayView<const int16_t>>& input_frames, | 120 const std::vector<rtc::ArrayView<const int16_t>>& input_frames, |
116 AudioFrame* audio_frame_for_mixing) const { | 121 AudioFrame* audio_frame_for_mixing) const { |
117 RTC_DCHECK(!input_frames.empty()); | 122 RTC_DCHECK(!input_frames.empty()); |
118 RTC_DCHECK(audio_frame_for_mixing); | 123 RTC_DCHECK(audio_frame_for_mixing); |
119 | 124 |
120 const size_t frame_length = input_frames.front().size(); | 125 const size_t frame_length = input_frames.front().size(); |
121 for (const auto& frame : input_frames) { | 126 for (const auto& frame : input_frames) { |
122 RTC_DCHECK_EQ(frame_length, frame.size()); | 127 RTC_DCHECK_EQ(frame_length, frame.size()); |
123 } | 128 } |
124 | 129 |
125 // Algorithm: int16 frames are added to a sufficiently large | 130 // Algorithm: int16 frames are added to a sufficiently large |
126 // statically allocated int32 buffer. For > 2 participants this is | 131 // statically allocated int32 buffer. For > 2 participants this is |
127 // more efficient than addition in place in the int16 audio | 132 // more efficient than addition in place in the int16 audio |
128 // frame. The audio quality loss due to halving the samples is | 133 // frame. The audio quality loss due to halving the samples is |
129 // smaller than 16-bit addition in place. | 134 // smaller than 16-bit addition in place. |
130 RTC_DCHECK_GE(kMaximalFrameSize, frame_length); | 135 RTC_DCHECK_GE(kMaximalFrameSize, frame_length); |
131 std::array<int32_t, kMaximalFrameSize> add_buffer; | 136 std::array<int32_t, kMaximalFrameSize> add_buffer; |
132 | 137 |
133 add_buffer.fill(0); | 138 add_buffer.fill(0); |
134 | 139 |
135 for (const auto& frame : input_frames) { | 140 for (const auto& frame : input_frames) { |
| 141 // TODO(yujo): skip this for muted input frames. |
136 std::transform(frame.begin(), frame.end(), add_buffer.begin(), | 142 std::transform(frame.begin(), frame.end(), add_buffer.begin(), |
137 add_buffer.begin(), std::plus<int32_t>()); | 143 add_buffer.begin(), std::plus<int32_t>()); |
138 } | 144 } |
139 | 145 |
140 if (use_apm_limiter_) { | 146 if (use_apm_limiter_) { |
141 // Halve all samples to avoid saturation before limiting. | 147 // Halve all samples to avoid saturation before limiting. |
142 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, | 148 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, |
143 audio_frame_for_mixing->data_, [](int32_t a) { | 149 audio_frame_for_mixing->mutable_data(), [](int32_t a) { |
144 return rtc::saturated_cast<int16_t>(a / 2); | 150 return rtc::saturated_cast<int16_t>(a / 2); |
145 }); | 151 }); |
146 | 152 |
147 // Smoothly limit the audio. | 153 // Smoothly limit the audio. |
148 RTC_DCHECK(limiter_); | 154 RTC_DCHECK(limiter_); |
149 const int error = limiter_->ProcessStream(audio_frame_for_mixing); | 155 const int error = limiter_->ProcessStream(audio_frame_for_mixing); |
150 if (error != limiter_->kNoError) { | 156 if (error != limiter_->kNoError) { |
151 LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error; | 157 LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error; |
152 RTC_NOTREACHED(); | 158 RTC_NOTREACHED(); |
153 } | 159 } |
154 | 160 |
155 // And now we can safely restore the level. This procedure results in | 161 // And now we can safely restore the level. This procedure results in |
156 // some loss of resolution, deemed acceptable. | 162 // some loss of resolution, deemed acceptable. |
157 // | 163 // |
158 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | 164 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS |
159 // and compression gain of 6 dB). However, in the transition frame when this | 165 // and compression gain of 6 dB). However, in the transition frame when this |
160 // is enabled (moving from one to two audio sources) it has the potential to | 166 // is enabled (moving from one to two audio sources) it has the potential to |
161 // create discontinuities in the mixed frame. | 167 // create discontinuities in the mixed frame. |
162 // | 168 // |
163 // Instead we double the frame (with addition since left-shifting a | 169 // Instead we double the frame (with addition since left-shifting a |
164 // negative value is undefined). | 170 // negative value is undefined). |
165 AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing); | 171 AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing); |
166 } else { | 172 } else { |
167 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, | 173 std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, |
168 audio_frame_for_mixing->data_, | 174 audio_frame_for_mixing->mutable_data(), |
169 [](int32_t a) { return rtc::saturated_cast<int16_t>(a); }); | 175 [](int32_t a) { return rtc::saturated_cast<int16_t>(a); }); |
170 } | 176 } |
171 } | 177 } |
172 } // namespace webrtc | 178 } // namespace webrtc |
OLD | NEW |