Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(462)

Side by Side Diff: webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc

Issue 1303413003: AudioCodingModuleImpl::Encode: Use a Buffer instead of a stack-allocated array (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@acm-common-defs
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
150 if (InitializeReceiverSafe() < 0) { 150 if (InitializeReceiverSafe() < 0) {
151 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 151 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
152 "Cannot initialize receiver"); 152 "Cannot initialize receiver");
153 } 153 }
154 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created"); 154 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created");
155 } 155 }
156 156
157 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default; 157 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
158 158
159 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) { 159 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
160 uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE]; // Make room for 1 RED payload.
161 AudioEncoder::EncodedInfo encoded_info; 160 AudioEncoder::EncodedInfo encoded_info;
162 uint8_t previous_pltype; 161 uint8_t previous_pltype;
163 162
164 // Check if there is an encoder before. 163 // Check if there is an encoder before.
165 if (!HaveValidEncoder("Process")) 164 if (!HaveValidEncoder("Process"))
166 return -1; 165 return -1;
167 166
168 AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder(); 167 AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();
169 // Scale the timestamp to the codec's RTP timestamp rate. 168 // Scale the timestamp to the codec's RTP timestamp rate.
170 uint32_t rtp_timestamp = 169 uint32_t rtp_timestamp =
171 first_frame_ ? input_data.input_timestamp 170 first_frame_ ? input_data.input_timestamp
172 : last_rtp_timestamp_ + 171 : last_rtp_timestamp_ +
173 rtc::CheckedDivExact( 172 rtc::CheckedDivExact(
174 input_data.input_timestamp - last_timestamp_, 173 input_data.input_timestamp - last_timestamp_,
175 static_cast<uint32_t>(rtc::CheckedDivExact( 174 static_cast<uint32_t>(rtc::CheckedDivExact(
176 audio_encoder->SampleRateHz(), 175 audio_encoder->SampleRateHz(),
177 audio_encoder->RtpTimestampRateHz()))); 176 audio_encoder->RtpTimestampRateHz())));
178 last_timestamp_ = input_data.input_timestamp; 177 last_timestamp_ = input_data.input_timestamp;
179 last_rtp_timestamp_ = rtp_timestamp; 178 last_rtp_timestamp_ = rtp_timestamp;
180 first_frame_ = false; 179 first_frame_ = false;
181 180
182 encoded_info = audio_encoder->Encode(rtp_timestamp, input_data.audio, 181 encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
183 input_data.length_per_channel, 182 encoded_info = audio_encoder->Encode(
184 sizeof(stream), stream); 183 rtp_timestamp, input_data.audio, input_data.length_per_channel,
184 encode_buffer_.size(), encode_buffer_.data());
185 encode_buffer_.SetSize(encoded_info.encoded_bytes);
185 bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000); 186 bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);
186 if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) { 187 if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
hlundin-webrtc 2015/08/25 12:43:43 Why change this while still using encoded_info.enc
kwiberg-webrtc 2015/08/25 13:20:24 Indeed, that was dismeticulous of me. Fixed (by us
hlundin-webrtc 2015/08/26 13:10:17 Meticulosity is restored.
187 // Not enough data. 188 // Not enough data.
188 return 0; 189 return 0;
189 } 190 }
190 previous_pltype = previous_pltype_; // Read it while we have the critsect. 191 previous_pltype = previous_pltype_; // Read it while we have the critsect.
191 192
192 RTPFragmentationHeader my_fragmentation; 193 RTPFragmentationHeader my_fragmentation;
193 ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation); 194 ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
194 FrameType frame_type; 195 FrameType frame_type;
195 if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) { 196 if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) {
196 frame_type = kFrameEmpty; 197 frame_type = kFrameEmpty;
197 encoded_info.payload_type = previous_pltype; 198 encoded_info.payload_type = previous_pltype;
198 } else { 199 } else {
199 DCHECK_GT(encoded_info.encoded_bytes, 0u); 200 DCHECK_GT(encoded_info.encoded_bytes, 0u);
200 frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN; 201 frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
201 } 202 }
202 203
203 { 204 {
204 CriticalSectionScoped lock(callback_crit_sect_.get()); 205 CriticalSectionScoped lock(callback_crit_sect_.get());
205 if (packetization_callback_) { 206 if (packetization_callback_) {
206 packetization_callback_->SendData( 207 packetization_callback_->SendData(
207 frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp, 208 frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
208 stream, encoded_info.encoded_bytes, 209 encode_buffer_.data(), encode_buffer_.size(),
209 my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation 210 my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
210 : nullptr); 211 : nullptr);
211 } 212 }
212 213
213 if (vad_callback_) { 214 if (vad_callback_) {
214 // Callback with VAD decision. 215 // Callback with VAD decision.
215 vad_callback_->InFrameType(frame_type); 216 vad_callback_->InFrameType(frame_type);
216 } 217 }
217 } 218 }
218 previous_pltype_ = encoded_info.payload_type; 219 previous_pltype_ = encoded_info.payload_type;
(...skipping 930 matching lines...) Expand 10 before | Expand all | Expand 10 after
1149 *channels = 1; 1150 *channels = 1;
1150 break; 1151 break;
1151 #endif 1152 #endif
1152 default: 1153 default:
1153 FATAL() << "Codec type " << codec_type << " not supported."; 1154 FATAL() << "Codec type " << codec_type << " not supported.";
1154 } 1155 }
1155 return true; 1156 return true;
1156 } 1157 }
1157 1158
1158 } // namespace webrtc 1159 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698