| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/video_coding/codecs/test/videoprocessor.h" | 11 #include "webrtc/modules/video_coding/codecs/test/videoprocessor.h" |
| 12 | 12 |
| 13 #include <assert.h> | 13 #include <assert.h> |
| 14 #include <string.h> | 14 #include <string.h> |
| 15 | 15 |
| 16 #include <limits> | 16 #include <limits> |
| 17 #include <memory> | 17 #include <memory> |
| 18 #include <vector> | 18 #include <vector> |
| 19 | 19 |
| 20 #include "webrtc/base/timeutils.h" |
| 20 #include "webrtc/system_wrappers/include/cpu_info.h" | 21 #include "webrtc/system_wrappers/include/cpu_info.h" |
| 21 | 22 |
| 22 namespace webrtc { | 23 namespace webrtc { |
| 23 namespace test { | 24 namespace test { |
| 24 | 25 |
| 25 TestConfig::TestConfig() | 26 TestConfig::TestConfig() |
| 26 : name(""), | 27 : name(""), |
| 27 description(""), | 28 description(""), |
| 28 test_number(0), | 29 test_number(0), |
| 29 input_filename(""), | 30 input_filename(""), |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 191 prev_time_stamp_ = -1; | 192 prev_time_stamp_ = -1; |
| 192 } | 193 } |
| 193 if (frame_reader_->ReadFrame(source_buffer_)) { | 194 if (frame_reader_->ReadFrame(source_buffer_)) { |
| 194 // Copy the source frame to the newly read frame data. | 195 // Copy the source frame to the newly read frame data. |
| 195 source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width, | 196 source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width, |
| 196 config_.codec_settings->height, kVideoRotation_0); | 197 config_.codec_settings->height, kVideoRotation_0); |
| 197 | 198 |
| 198 // Ensure we have a new statistics data object we can fill: | 199 // Ensure we have a new statistics data object we can fill: |
| 199 FrameStatistic& stat = stats_->NewFrame(frame_number); | 200 FrameStatistic& stat = stats_->NewFrame(frame_number); |
| 200 | 201 |
| 201 encode_start_ = TickTime::Now(); | 202 encode_start_ns_ = rtc::TimeNanos(); |
| 202 // Use the frame number as "timestamp" to identify frames | 203 // Use the frame number as "timestamp" to identify frames |
| 203 source_frame_.set_timestamp(frame_number); | 204 source_frame_.set_timestamp(frame_number); |
| 204 | 205 |
| 205 // Decide if we're going to force a keyframe: | 206 // Decide if we're going to force a keyframe: |
| 206 std::vector<FrameType> frame_types(1, kVideoFrameDelta); | 207 std::vector<FrameType> frame_types(1, kVideoFrameDelta); |
| 207 if (config_.keyframe_interval > 0 && | 208 if (config_.keyframe_interval > 0 && |
| 208 frame_number % config_.keyframe_interval == 0) { | 209 frame_number % config_.keyframe_interval == 0) { |
| 209 frame_types[0] = kVideoFrameKey; | 210 frame_types[0] = kVideoFrameKey; |
| 210 } | 211 } |
| 211 | 212 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 241 for (int i = 0; i < num_dropped_from_prev_encode; i++) { | 242 for (int i = 0; i < num_dropped_from_prev_encode; i++) { |
| 242 frame_writer_->WriteFrame(last_successful_frame_buffer_); | 243 frame_writer_->WriteFrame(last_successful_frame_buffer_); |
| 243 } | 244 } |
| 244 } | 245 } |
| 245 // Frame is not dropped, so update the encoded frame size | 246 // Frame is not dropped, so update the encoded frame size |
| 246 // (encoder callback is only called for non-zero length frames). | 247 // (encoder callback is only called for non-zero length frames). |
| 247 encoded_frame_size_ = encoded_image._length; | 248 encoded_frame_size_ = encoded_image._length; |
| 248 | 249 |
| 249 encoded_frame_type_ = encoded_image._frameType; | 250 encoded_frame_type_ = encoded_image._frameType; |
| 250 | 251 |
| 251 TickTime encode_stop = TickTime::Now(); | 252 int64_t encode_stop_ns = rtc::TimeNanos(); |
| 252 int frame_number = encoded_image._timeStamp; | 253 int frame_number = encoded_image._timeStamp; |
| 253 FrameStatistic& stat = stats_->stats_[frame_number]; | 254 FrameStatistic& stat = stats_->stats_[frame_number]; |
| 254 stat.encode_time_in_us = | 255 stat.encode_time_in_us = |
| 255 GetElapsedTimeMicroseconds(encode_start_, encode_stop); | 256 GetElapsedTimeMicroseconds(encode_start_ns_, encode_stop_ns); |
| 256 stat.encoding_successful = true; | 257 stat.encoding_successful = true; |
| 257 stat.encoded_frame_length_in_bytes = encoded_image._length; | 258 stat.encoded_frame_length_in_bytes = encoded_image._length; |
| 258 stat.frame_number = encoded_image._timeStamp; | 259 stat.frame_number = encoded_image._timeStamp; |
| 259 stat.frame_type = encoded_image._frameType; | 260 stat.frame_type = encoded_image._frameType; |
| 260 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; | 261 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; |
| 261 stat.total_packets = | 262 stat.total_packets = |
| 262 encoded_image._length / config_.networking_config.packet_size_in_bytes + | 263 encoded_image._length / config_.networking_config.packet_size_in_bytes + |
| 263 1; | 264 1; |
| 264 | 265 |
| 265 // Perform packet loss if criteria is fullfilled: | 266 // Perform packet loss if criteria is fullfilled: |
| (...skipping 26 matching lines...) Expand all Loading... |
| 292 copied_image._size = copied_buffer_size; | 293 copied_image._size = copied_buffer_size; |
| 293 copied_image._buffer = copied_buffer.get(); | 294 copied_image._buffer = copied_buffer.get(); |
| 294 | 295 |
| 295 if (!exclude_this_frame) { | 296 if (!exclude_this_frame) { |
| 296 stat.packets_dropped = | 297 stat.packets_dropped = |
| 297 packet_manipulator_->ManipulatePackets(&copied_image); | 298 packet_manipulator_->ManipulatePackets(&copied_image); |
| 298 } | 299 } |
| 299 | 300 |
| 300 // Keep track of if frames are lost due to packet loss so we can tell | 301 // Keep track of if frames are lost due to packet loss so we can tell |
| 301 // this to the encoder (this is handled by the RTP logic in the full stack) | 302 // this to the encoder (this is handled by the RTP logic in the full stack) |
| 302 decode_start_ = TickTime::Now(); | 303 decode_start_ns_ = rtc::TimeNanos(); |
| 303 // TODO(kjellander): Pass fragmentation header to the decoder when | 304 // TODO(kjellander): Pass fragmentation header to the decoder when |
| 304 // CL 172001 has been submitted and PacketManipulator supports this. | 305 // CL 172001 has been submitted and PacketManipulator supports this. |
| 305 int32_t decode_result = | 306 int32_t decode_result = |
| 306 decoder_->Decode(copied_image, last_frame_missing_, NULL); | 307 decoder_->Decode(copied_image, last_frame_missing_, NULL); |
| 307 stat.decode_return_code = decode_result; | 308 stat.decode_return_code = decode_result; |
| 308 if (decode_result != WEBRTC_VIDEO_CODEC_OK) { | 309 if (decode_result != WEBRTC_VIDEO_CODEC_OK) { |
| 309 // Write the last successful frame the output file to avoid getting it out | 310 // Write the last successful frame the output file to avoid getting it out |
| 310 // of sync with the source file for SSIM and PSNR comparisons: | 311 // of sync with the source file for SSIM and PSNR comparisons: |
| 311 frame_writer_->WriteFrame(last_successful_frame_buffer_); | 312 frame_writer_->WriteFrame(last_successful_frame_buffer_); |
| 312 } | 313 } |
| 313 // save status for losses so we can inform the decoder for the next frame: | 314 // save status for losses so we can inform the decoder for the next frame: |
| 314 last_frame_missing_ = copied_image._length == 0; | 315 last_frame_missing_ = copied_image._length == 0; |
| 315 } | 316 } |
| 316 | 317 |
| 317 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { | 318 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { |
| 318 TickTime decode_stop = TickTime::Now(); | 319 int64_t decode_stop_ns = rtc::TimeNanos(); |
| 319 int frame_number = image.timestamp(); | 320 int frame_number = image.timestamp(); |
| 320 // Report stats | 321 // Report stats |
| 321 FrameStatistic& stat = stats_->stats_[frame_number]; | 322 FrameStatistic& stat = stats_->stats_[frame_number]; |
| 322 stat.decode_time_in_us = | 323 stat.decode_time_in_us = |
| 323 GetElapsedTimeMicroseconds(decode_start_, decode_stop); | 324 GetElapsedTimeMicroseconds(decode_start_ns_, decode_stop_ns); |
| 324 stat.decoding_successful = true; | 325 stat.decoding_successful = true; |
| 325 | 326 |
| 326 // Check for resize action (either down or up): | 327 // Check for resize action (either down or up): |
| 327 if (static_cast<int>(image.width()) != last_encoder_frame_width_ || | 328 if (static_cast<int>(image.width()) != last_encoder_frame_width_ || |
| 328 static_cast<int>(image.height()) != last_encoder_frame_height_) { | 329 static_cast<int>(image.height()) != last_encoder_frame_height_) { |
| 329 ++num_spatial_resizes_; | 330 ++num_spatial_resizes_; |
| 330 last_encoder_frame_width_ = image.width(); | 331 last_encoder_frame_width_ = image.width(); |
| 331 last_encoder_frame_height_ = image.height(); | 332 last_encoder_frame_height_ = image.height(); |
| 332 } | 333 } |
| 333 // Check if codec size is different from native/original size, and if so, | 334 // Check if codec size is different from native/original size, and if so, |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 371 memcpy(last_successful_frame_buffer_, image_buffer.get(), extracted_length); | 372 memcpy(last_successful_frame_buffer_, image_buffer.get(), extracted_length); |
| 372 | 373 |
| 373 bool write_success = frame_writer_->WriteFrame(image_buffer.get()); | 374 bool write_success = frame_writer_->WriteFrame(image_buffer.get()); |
| 374 assert(write_success); | 375 assert(write_success); |
| 375 if (!write_success) { | 376 if (!write_success) { |
| 376 fprintf(stderr, "Failed to write frame %d to disk!", frame_number); | 377 fprintf(stderr, "Failed to write frame %d to disk!", frame_number); |
| 377 } | 378 } |
| 378 } | 379 } |
| 379 } | 380 } |
| 380 | 381 |
| 381 int VideoProcessorImpl::GetElapsedTimeMicroseconds( | 382 int VideoProcessorImpl::GetElapsedTimeMicroseconds(int64_t start, |
| 382 const webrtc::TickTime& start, | 383 int64_t stop) { |
| 383 const webrtc::TickTime& stop) { | 384 uint64_t encode_time = (stop - start) / rtc::kNumNanosecsPerMicrosec; |
| 384 uint64_t encode_time = (stop - start).Microseconds(); | |
| 385 assert(encode_time < | 385 assert(encode_time < |
| 386 static_cast<unsigned int>(std::numeric_limits<int>::max())); | 386 static_cast<unsigned int>(std::numeric_limits<int>::max())); |
| 387 return static_cast<int>(encode_time); | 387 return static_cast<int>(encode_time); |
| 388 } | 388 } |
| 389 | 389 |
| 390 const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e) { | 390 const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e) { |
| 391 switch (e) { | 391 switch (e) { |
| 392 case kExcludeOnlyFirstKeyFrame: | 392 case kExcludeOnlyFirstKeyFrame: |
| 393 return "ExcludeOnlyFirstKeyFrame"; | 393 return "ExcludeOnlyFirstKeyFrame"; |
| 394 case kExcludeAllKeyFrames: | 394 case kExcludeAllKeyFrames: |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 431 } | 431 } |
| 432 int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded( | 432 int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded( |
| 433 VideoFrame& image) { | 433 VideoFrame& image) { |
| 434 // Forward to parent class. | 434 // Forward to parent class. |
| 435 video_processor_->FrameDecoded(image); | 435 video_processor_->FrameDecoded(image); |
| 436 return 0; | 436 return 0; |
| 437 } | 437 } |
| 438 | 438 |
| 439 } // namespace test | 439 } // namespace test |
| 440 } // namespace webrtc | 440 } // namespace webrtc |
| OLD | NEW |