OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
93 return speech_encoder_->Max10MsFramesInAPacket(); | 93 return speech_encoder_->Max10MsFramesInAPacket(); |
94 } | 94 } |
95 | 95 |
96 int AudioEncoderCng::GetTargetBitrate() const { | 96 int AudioEncoderCng::GetTargetBitrate() const { |
97 return speech_encoder_->GetTargetBitrate(); | 97 return speech_encoder_->GetTargetBitrate(); |
98 } | 98 } |
99 | 99 |
100 AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal( | 100 AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal( |
101 uint32_t rtp_timestamp, | 101 uint32_t rtp_timestamp, |
102 rtc::ArrayView<const int16_t> audio, | 102 rtc::ArrayView<const int16_t> audio, |
103 size_t max_encoded_bytes, | 103 rtc::Buffer* encoded) { |
104 uint8_t* encoded) { | |
105 RTC_CHECK_GE(max_encoded_bytes, | |
106 static_cast<size_t>(num_cng_coefficients_ + 1)); | |
107 const size_t samples_per_10ms_frame = SamplesPer10msFrame(); | 104 const size_t samples_per_10ms_frame = SamplesPer10msFrame(); |
108 RTC_CHECK_EQ(speech_buffer_.size(), | 105 RTC_CHECK_EQ(speech_buffer_.size(), |
109 rtp_timestamps_.size() * samples_per_10ms_frame); | 106 rtp_timestamps_.size() * samples_per_10ms_frame); |
110 rtp_timestamps_.push_back(rtp_timestamp); | 107 rtp_timestamps_.push_back(rtp_timestamp); |
111 RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size()); | 108 RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size()); |
112 speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend()); | 109 speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend()); |
113 const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket(); | 110 const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket(); |
114 if (rtp_timestamps_.size() < frames_to_encode) { | 111 if (rtp_timestamps_.size() < frames_to_encode) { |
115 return EncodedInfo(); | 112 return EncodedInfo(); |
116 } | 113 } |
(...skipping 21 matching lines...) Expand all Loading... | |
138 if (activity == Vad::kPassive && blocks_in_second_vad_call > 0) { | 135 if (activity == Vad::kPassive && blocks_in_second_vad_call > 0) { |
139 // Only check the second block if the first was passive. | 136 // Only check the second block if the first was passive. |
140 activity = vad_->VoiceActivity( | 137 activity = vad_->VoiceActivity( |
141 &speech_buffer_[samples_per_10ms_frame * blocks_in_first_vad_call], | 138 &speech_buffer_[samples_per_10ms_frame * blocks_in_first_vad_call], |
142 samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz()); | 139 samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz()); |
143 } | 140 } |
144 | 141 |
145 EncodedInfo info; | 142 EncodedInfo info; |
146 switch (activity) { | 143 switch (activity) { |
147 case Vad::kPassive: { | 144 case Vad::kPassive: { |
148 info = EncodePassive(frames_to_encode, max_encoded_bytes, encoded); | 145 info = EncodePassive(frames_to_encode, encoded); |
149 last_frame_active_ = false; | 146 last_frame_active_ = false; |
150 break; | 147 break; |
151 } | 148 } |
152 case Vad::kActive: { | 149 case Vad::kActive: { |
153 info = EncodeActive(frames_to_encode, max_encoded_bytes, encoded); | 150 info = EncodeActive(frames_to_encode, encoded); |
154 last_frame_active_ = true; | 151 last_frame_active_ = true; |
155 break; | 152 break; |
156 } | 153 } |
157 case Vad::kError: { | 154 case Vad::kError: { |
158 FATAL(); // Fails only if fed invalid data. | 155 FATAL(); // Fails only if fed invalid data. |
159 break; | 156 break; |
160 } | 157 } |
161 } | 158 } |
162 | 159 |
163 speech_buffer_.erase( | 160 speech_buffer_.erase( |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
197 void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) { | 194 void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) { |
198 speech_encoder_->SetProjectedPacketLossRate(fraction); | 195 speech_encoder_->SetProjectedPacketLossRate(fraction); |
199 } | 196 } |
200 | 197 |
201 void AudioEncoderCng::SetTargetBitrate(int bits_per_second) { | 198 void AudioEncoderCng::SetTargetBitrate(int bits_per_second) { |
202 speech_encoder_->SetTargetBitrate(bits_per_second); | 199 speech_encoder_->SetTargetBitrate(bits_per_second); |
203 } | 200 } |
204 | 201 |
205 AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive( | 202 AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive( |
206 size_t frames_to_encode, | 203 size_t frames_to_encode, |
207 size_t max_encoded_bytes, | 204 rtc::Buffer* encoded) { |
208 uint8_t* encoded) { | |
209 bool force_sid = last_frame_active_; | 205 bool force_sid = last_frame_active_; |
210 bool output_produced = false; | 206 bool output_produced = false; |
211 const size_t samples_per_10ms_frame = SamplesPer10msFrame(); | 207 const size_t samples_per_10ms_frame = SamplesPer10msFrame(); |
212 RTC_CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame); | 208 const size_t bytes_to_encode = frames_to_encode * samples_per_10ms_frame; |
213 AudioEncoder::EncodedInfo info; | 209 AudioEncoder::EncodedInfo info; |
214 for (size_t i = 0; i < frames_to_encode; ++i) { | 210 |
215 // It's important not to pass &info.encoded_bytes directly to | 211 encoded->AppendData(bytes_to_encode, [&] (rtc::ArrayView<uint8_t> encoded) { |
The Sun (google.com)
2016/03/01 21:06:15
FYI: Chromium C++11 style guide says to not use de
ossu
2016/03/02 08:39:42
Well yes, and no. It says don't use it, referring
kwiberg-webrtc
2016/03/02 09:40:24
Yes. The Google style guide used to blanket ban de
| |
216 // WebRtcCng_Encode(), since later loop iterations may return zero in that | 212 for (size_t i = 0; i < frames_to_encode; ++i) { |
217 // value, in which case we don't want to overwrite any value from an earlier | 213 // It's important not to pass &info.encoded_bytes directly to |
218 // iteration. | 214 // WebRtcCng_Encode(), since later loop iterations may return zero in |
219 size_t encoded_bytes_tmp = 0; | 215 // that value, in which case we don't want to overwrite any value from |
220 RTC_CHECK_GE(WebRtcCng_Encode(cng_inst_.get(), | 216 // an earlier iteration. |
221 &speech_buffer_[i * samples_per_10ms_frame], | 217 size_t encoded_bytes_tmp = 0; |
222 samples_per_10ms_frame, encoded, | 218 RTC_CHECK_GE( |
223 &encoded_bytes_tmp, force_sid), | 219 WebRtcCng_Encode(cng_inst_.get(), |
224 0); | 220 &speech_buffer_[i * samples_per_10ms_frame], |
225 if (encoded_bytes_tmp > 0) { | 221 samples_per_10ms_frame, encoded.data(), |
226 RTC_CHECK(!output_produced); | 222 &encoded_bytes_tmp, force_sid), |
227 info.encoded_bytes = encoded_bytes_tmp; | 223 0); |
228 output_produced = true; | 224 if (encoded_bytes_tmp > 0) { |
229 force_sid = false; | 225 RTC_CHECK(!output_produced); |
230 } | 226 info.encoded_bytes = encoded_bytes_tmp; |
231 } | 227 output_produced = true; |
228 force_sid = false; | |
229 } | |
230 } | |
231 | |
232 return info.encoded_bytes; | |
233 }); | |
234 | |
232 info.encoded_timestamp = rtp_timestamps_.front(); | 235 info.encoded_timestamp = rtp_timestamps_.front(); |
233 info.payload_type = cng_payload_type_; | 236 info.payload_type = cng_payload_type_; |
234 info.send_even_if_empty = true; | 237 info.send_even_if_empty = true; |
235 info.speech = false; | 238 info.speech = false; |
236 return info; | 239 return info; |
237 } | 240 } |
238 | 241 |
239 AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive( | 242 AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive( |
240 size_t frames_to_encode, | 243 size_t frames_to_encode, |
241 size_t max_encoded_bytes, | 244 rtc::Buffer* encoded) { |
242 uint8_t* encoded) { | |
243 const size_t samples_per_10ms_frame = SamplesPer10msFrame(); | 245 const size_t samples_per_10ms_frame = SamplesPer10msFrame(); |
244 AudioEncoder::EncodedInfo info; | 246 AudioEncoder::EncodedInfo info; |
245 for (size_t i = 0; i < frames_to_encode; ++i) { | 247 for (size_t i = 0; i < frames_to_encode; ++i) { |
246 info = | 248 info = |
247 speech_encoder_->Encode(rtp_timestamps_.front(), | 249 speech_encoder_->Encode(rtp_timestamps_.front(), |
248 rtc::ArrayView<const int16_t>( | 250 rtc::ArrayView<const int16_t>( |
249 &speech_buffer_[i * samples_per_10ms_frame], | 251 &speech_buffer_[i * samples_per_10ms_frame], |
250 samples_per_10ms_frame), | 252 samples_per_10ms_frame), |
251 max_encoded_bytes, encoded); | 253 encoded); |
252 if (i + 1 == frames_to_encode) { | 254 if (i + 1 == frames_to_encode) { |
253 RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data."; | 255 RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data."; |
254 } else { | 256 } else { |
255 RTC_CHECK_EQ(info.encoded_bytes, 0u) | 257 RTC_CHECK_EQ(info.encoded_bytes, 0u) |
256 << "Encoder delivered data too early."; | 258 << "Encoder delivered data too early."; |
257 } | 259 } |
258 } | 260 } |
259 return info; | 261 return info; |
260 } | 262 } |
261 | 263 |
262 size_t AudioEncoderCng::SamplesPer10msFrame() const { | 264 size_t AudioEncoderCng::SamplesPer10msFrame() const { |
263 return rtc::CheckedDivExact(10 * SampleRateHz(), 1000); | 265 return rtc::CheckedDivExact(10 * SampleRateHz(), 1000); |
264 } | 266 } |
265 | 267 |
266 } // namespace webrtc | 268 } // namespace webrtc |
OLD | NEW |