Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(263)

Side by Side Diff: webrtc/modules/video_coding/codecs/test/videoprocessor.cc

Issue 2696503003: Tighten up encode time measurement in VideoProcessor. (Closed)
Patch Set: sprang comments 1. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
213 } 213 }
214 214
215 rtc::scoped_refptr<VideoFrameBuffer> buffer(frame_reader_->ReadFrame()); 215 rtc::scoped_refptr<VideoFrameBuffer> buffer(frame_reader_->ReadFrame());
216 if (buffer) { 216 if (buffer) {
217 // Use the frame number as "timestamp" to identify frames. 217 // Use the frame number as "timestamp" to identify frames.
218 VideoFrame source_frame(buffer, frame_number, 0, webrtc::kVideoRotation_0); 218 VideoFrame source_frame(buffer, frame_number, 0, webrtc::kVideoRotation_0);
219 219
220 // Ensure we have a new statistics data object we can fill. 220 // Ensure we have a new statistics data object we can fill.
221 FrameStatistic& stat = stats_->NewFrame(frame_number); 221 FrameStatistic& stat = stats_->NewFrame(frame_number);
222 222
223 encode_start_ns_ = rtc::TimeNanos();
224
225 // Decide if we are going to force a keyframe. 223 // Decide if we are going to force a keyframe.
226 std::vector<FrameType> frame_types(1, kVideoFrameDelta); 224 std::vector<FrameType> frame_types(1, kVideoFrameDelta);
227 if (config_.keyframe_interval > 0 && 225 if (config_.keyframe_interval > 0 &&
228 frame_number % config_.keyframe_interval == 0) { 226 frame_number % config_.keyframe_interval == 0) {
229 frame_types[0] = kVideoFrameKey; 227 frame_types[0] = kVideoFrameKey;
230 } 228 }
231 229
232 // For dropped frames, we regard them as zero size encoded frames. 230 // For dropped frames, we regard them as zero size encoded frames.
233 encoded_frame_size_ = 0; 231 encoded_frame_size_ = 0;
234 encoded_frame_type_ = kVideoFrameDelta; 232 encoded_frame_type_ = kVideoFrameDelta;
235 233
234 // For the highest measurement accuracy of the encode time, the start/stop
235 // time recordings should wrap the Encode call as tightly as possible.
236 encode_start_ns_ = rtc::TimeNanos();
236 int32_t encode_result = 237 int32_t encode_result =
237 encoder_->Encode(source_frame, nullptr, &frame_types); 238 encoder_->Encode(source_frame, nullptr, &frame_types);
238 239
239 if (encode_result != WEBRTC_VIDEO_CODEC_OK) { 240 if (encode_result != WEBRTC_VIDEO_CODEC_OK) {
240 fprintf(stderr, "Failed to encode frame %d, return code: %d\n", 241 fprintf(stderr, "Failed to encode frame %d, return code: %d\n",
241 frame_number, encode_result); 242 frame_number, encode_result);
242 } 243 }
243 stat.encode_return_code = encode_result; 244 stat.encode_return_code = encode_result;
244 245
245 return true; 246 return true;
246 } else { 247 } else {
247 // Last frame has been reached. 248 // Last frame has been reached.
248 return false; 249 return false;
249 } 250 }
250 } 251 }
251 252
252 void VideoProcessorImpl::FrameEncoded( 253 void VideoProcessorImpl::FrameEncoded(
253 webrtc::VideoCodecType codec, 254 webrtc::VideoCodecType codec,
254 const EncodedImage& encoded_image, 255 const EncodedImage& encoded_image,
255 const webrtc::RTPFragmentationHeader* fragmentation) { 256 const webrtc::RTPFragmentationHeader* fragmentation) {
257 // For the highest measurement accuracy of the encode time, the start/stop
258 // time recordings should wrap the Encode call as tightly as possible.
259 int64_t encode_stop_ns = rtc::TimeNanos();
260
256 // Timestamp is frame number, so this gives us #dropped frames. 261 // Timestamp is frame number, so this gives us #dropped frames.
257 int num_dropped_from_prev_encode = 262 int num_dropped_from_prev_encode =
258 encoded_image._timeStamp - prev_time_stamp_ - 1; 263 encoded_image._timeStamp - prev_time_stamp_ - 1;
259 num_dropped_frames_ += num_dropped_from_prev_encode; 264 num_dropped_frames_ += num_dropped_from_prev_encode;
260 prev_time_stamp_ = encoded_image._timeStamp; 265 prev_time_stamp_ = encoded_image._timeStamp;
261 if (num_dropped_from_prev_encode > 0) { 266 if (num_dropped_from_prev_encode > 0) {
262 // For dropped frames, we write out the last decoded frame to avoid getting 267 // For dropped frames, we write out the last decoded frame to avoid getting
263 // out of sync for the computation of PSNR and SSIM. 268 // out of sync for the computation of PSNR and SSIM.
264 for (int i = 0; i < num_dropped_from_prev_encode; i++) { 269 for (int i = 0; i < num_dropped_from_prev_encode; i++) {
265 frame_writer_->WriteFrame(last_successful_frame_buffer_.get()); 270 frame_writer_->WriteFrame(last_successful_frame_buffer_.get());
266 } 271 }
267 } 272 }
268 // Frame is not dropped, so update the encoded frame size 273 // Frame is not dropped, so update the encoded frame size
269 // (encoder callback is only called for non-zero length frames). 274 // (encoder callback is only called for non-zero length frames).
270 encoded_frame_size_ = encoded_image._length; 275 encoded_frame_size_ = encoded_image._length;
271 encoded_frame_type_ = encoded_image._frameType; 276 encoded_frame_type_ = encoded_image._frameType;
272
273 int64_t encode_stop_ns = rtc::TimeNanos();
274 int frame_number = encoded_image._timeStamp; 277 int frame_number = encoded_image._timeStamp;
275 278
276 FrameStatistic& stat = stats_->stats_[frame_number]; 279 FrameStatistic& stat = stats_->stats_[frame_number];
277 stat.encode_time_in_us = 280 stat.encode_time_in_us =
278 GetElapsedTimeMicroseconds(encode_start_ns_, encode_stop_ns); 281 GetElapsedTimeMicroseconds(encode_start_ns_, encode_stop_ns);
279 stat.encoding_successful = true; 282 stat.encoding_successful = true;
280 stat.encoded_frame_length_in_bytes = encoded_image._length; 283 stat.encoded_frame_length_in_bytes = encoded_image._length;
281 stat.frame_number = encoded_image._timeStamp; 284 stat.frame_number = encoded_image._timeStamp;
282 stat.frame_type = encoded_image._frameType; 285 stat.frame_type = encoded_image._frameType;
283 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; 286 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 copied_image._size = copied_buffer_size; 318 copied_image._size = copied_buffer_size;
316 copied_image._buffer = copied_buffer.get(); 319 copied_image._buffer = copied_buffer.get();
317 320
318 if (!exclude_this_frame) { 321 if (!exclude_this_frame) {
319 stat.packets_dropped = 322 stat.packets_dropped =
320 packet_manipulator_->ManipulatePackets(&copied_image); 323 packet_manipulator_->ManipulatePackets(&copied_image);
321 } 324 }
322 325
323 // Keep track of if frames are lost due to packet loss so we can tell 326 // Keep track of if frames are lost due to packet loss so we can tell
324 // this to the encoder (this is handled by the RTP logic in the full stack). 327 // this to the encoder (this is handled by the RTP logic in the full stack).
325 decode_start_ns_ = rtc::TimeNanos();
326 // TODO(kjellander): Pass fragmentation header to the decoder when 328 // TODO(kjellander): Pass fragmentation header to the decoder when
327 // CL 172001 has been submitted and PacketManipulator supports this. 329 // CL 172001 has been submitted and PacketManipulator supports this.
330
331 // For the highest measurement accuracy of the decode time, the start/stop
332 // time recordings should wrap the Decode call as tightly as possible.
333 decode_start_ns_ = rtc::TimeNanos();
328 int32_t decode_result = 334 int32_t decode_result =
329 decoder_->Decode(copied_image, last_frame_missing_, nullptr); 335 decoder_->Decode(copied_image, last_frame_missing_, nullptr);
330 stat.decode_return_code = decode_result; 336 stat.decode_return_code = decode_result;
331 337
332 if (decode_result != WEBRTC_VIDEO_CODEC_OK) { 338 if (decode_result != WEBRTC_VIDEO_CODEC_OK) {
333 // Write the last successful frame the output file to avoid getting it out 339 // Write the last successful frame the output file to avoid getting it out
334 // of sync with the source file for SSIM and PSNR comparisons. 340 // of sync with the source file for SSIM and PSNR comparisons.
335 frame_writer_->WriteFrame(last_successful_frame_buffer_.get()); 341 frame_writer_->WriteFrame(last_successful_frame_buffer_.get());
336 } 342 }
337 343
338 // Save status for losses so we can inform the decoder for the next frame. 344 // Save status for losses so we can inform the decoder for the next frame.
339 last_frame_missing_ = copied_image._length == 0; 345 last_frame_missing_ = copied_image._length == 0;
340 } 346 }
341 347
342 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { 348 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
349 // For the highest measurement accuracy of the decode time, the start/stop
350 // time recordings should wrap the Decode call as tightly as possible.
343 int64_t decode_stop_ns = rtc::TimeNanos(); 351 int64_t decode_stop_ns = rtc::TimeNanos();
344 352
345 // Report stats. 353 // Report stats.
346 int frame_number = image.timestamp(); 354 int frame_number = image.timestamp();
347 FrameStatistic& stat = stats_->stats_[frame_number]; 355 FrameStatistic& stat = stats_->stats_[frame_number];
348 stat.decode_time_in_us = 356 stat.decode_time_in_us =
349 GetElapsedTimeMicroseconds(decode_start_ns_, decode_stop_ns); 357 GetElapsedTimeMicroseconds(decode_start_ns_, decode_stop_ns);
350 stat.decoding_successful = true; 358 stat.decoding_successful = true;
351 359
352 // Check for resize action (either down or up). 360 // Check for resize action (either down or up).
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 int VideoProcessorImpl::GetElapsedTimeMicroseconds(int64_t start, 411 int VideoProcessorImpl::GetElapsedTimeMicroseconds(int64_t start,
404 int64_t stop) { 412 int64_t stop) {
405 int64_t encode_time = (stop - start) / rtc::kNumNanosecsPerMicrosec; 413 int64_t encode_time = (stop - start) / rtc::kNumNanosecsPerMicrosec;
406 RTC_DCHECK_GE(encode_time, std::numeric_limits<int>::min()); 414 RTC_DCHECK_GE(encode_time, std::numeric_limits<int>::min());
407 RTC_DCHECK_LE(encode_time, std::numeric_limits<int>::max()); 415 RTC_DCHECK_LE(encode_time, std::numeric_limits<int>::max());
408 return static_cast<int>(encode_time); 416 return static_cast<int>(encode_time);
409 } 417 }
410 418
411 } // namespace test 419 } // namespace test
412 } // namespace webrtc 420 } // namespace webrtc
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698