Index: webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
index c814dfe0e78131d0b962e0cb51c7fa197bd0c9aa..7376000bd5d7937c0bc6804b0dd12558c69eb77e 100644 |
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
@@ -93,14 +93,18 @@ bool VideoProcessorImpl::Init() { |
int32_t register_result = |
encoder_->RegisterEncodeCompleteCallback(encode_callback_); |
if (register_result != WEBRTC_VIDEO_CODEC_OK) { |
- fprintf(stderr, "Failed to register encode complete callback, return code: " |
- "%d\n", register_result); |
+ fprintf(stderr, |
+ "Failed to register encode complete callback, return code: " |
+ "%d\n", |
+ register_result); |
return false; |
} |
register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_); |
if (register_result != WEBRTC_VIDEO_CODEC_OK) { |
- fprintf(stderr, "Failed to register decode complete callback, return code: " |
- "%d\n", register_result); |
+ fprintf(stderr, |
+ "Failed to register decode complete callback, return code: " |
+ "%d\n", |
+ register_result); |
return false; |
} |
// Init the encoder and decoder |
@@ -146,13 +150,14 @@ VideoProcessorImpl::~VideoProcessorImpl() { |
delete decode_callback_; |
} |
- |
void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) { |
int set_rates_result = encoder_->SetRates(bit_rate, frame_rate); |
assert(set_rates_result >= 0); |
if (set_rates_result < 0) { |
- fprintf(stderr, "Failed to update encoder with new rate %d, " |
- "return code: %d\n", bit_rate, set_rates_result); |
+ fprintf(stderr, |
+ "Failed to update encoder with new rate %d, " |
+ "return code: %d\n", |
+ bit_rate, set_rates_result); |
} |
num_dropped_frames_ = 0; |
num_spatial_resizes_ = 0; |
@@ -175,7 +180,7 @@ int VideoProcessorImpl::NumberSpatialResizes() { |
} |
bool VideoProcessorImpl::ProcessFrame(int frame_number) { |
- assert(frame_number >=0); |
+ assert(frame_number >= 0); |
if (!initialized_) { |
fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n"); |
return false; |
@@ -186,10 +191,8 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) { |
} |
if (frame_reader_->ReadFrame(source_buffer_)) { |
// Copy the source frame to the newly read frame data. |
- source_frame_.CreateFrame(source_buffer_, |
- config_.codec_settings->width, |
- config_.codec_settings->height, |
- kVideoRotation_0); |
+ source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width, |
+ config_.codec_settings->height, kVideoRotation_0); |
// Ensure we have a new statistics data object we can fill: |
FrameStatistic& stat = stats_->NewFrame(frame_number); |
@@ -224,10 +227,10 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) { |
void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { |
// Timestamp is frame number, so this gives us #dropped frames. |
- int num_dropped_from_prev_encode = encoded_image._timeStamp - |
- prev_time_stamp_ - 1; |
- num_dropped_frames_ += num_dropped_from_prev_encode; |
- prev_time_stamp_ = encoded_image._timeStamp; |
+ int num_dropped_from_prev_encode = |
+ encoded_image._timeStamp - prev_time_stamp_ - 1; |
+ num_dropped_frames_ += num_dropped_from_prev_encode; |
+ prev_time_stamp_ = encoded_image._timeStamp; |
if (num_dropped_from_prev_encode > 0) { |
// For dropped frames, we write out the last decoded frame to avoid getting |
// out of sync for the computation of PSNR and SSIM. |
@@ -244,15 +247,16 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { |
TickTime encode_stop = TickTime::Now(); |
int frame_number = encoded_image._timeStamp; |
FrameStatistic& stat = stats_->stats_[frame_number]; |
- stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_, |
- encode_stop); |
+ stat.encode_time_in_us = |
+ GetElapsedTimeMicroseconds(encode_start_, encode_stop); |
stat.encoding_successful = true; |
stat.encoded_frame_length_in_bytes = encoded_image._length; |
stat.frame_number = encoded_image._timeStamp; |
stat.frame_type = encoded_image._frameType; |
stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; |
- stat.total_packets = encoded_image._length / |
- config_.networking_config.packet_size_in_bytes + 1; |
+ stat.total_packets = |
+ encoded_image._length / config_.networking_config.packet_size_in_bytes + |
+ 1; |
// Perform packet loss if criteria is fullfilled: |
bool exclude_this_frame = false; |
@@ -280,7 +284,7 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { |
copied_image._buffer = copied_buffer.get(); |
if (!exclude_this_frame) { |
stat.packets_dropped = |
- packet_manipulator_->ManipulatePackets(&copied_image); |
+ packet_manipulator_->ManipulatePackets(&copied_image); |
} |
// Keep track of if frames are lost due to packet loss so we can tell |
@@ -305,26 +309,25 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { |
int frame_number = image.timestamp(); |
// Report stats |
FrameStatistic& stat = stats_->stats_[frame_number]; |
- stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_, |
- decode_stop); |
+ stat.decode_time_in_us = |
+ GetElapsedTimeMicroseconds(decode_start_, decode_stop); |
stat.decoding_successful = true; |
// Check for resize action (either down or up): |
if (static_cast<int>(image.width()) != last_encoder_frame_width_ || |
- static_cast<int>(image.height()) != last_encoder_frame_height_ ) { |
+ static_cast<int>(image.height()) != last_encoder_frame_height_) { |
++num_spatial_resizes_; |
last_encoder_frame_width_ = image.width(); |
last_encoder_frame_height_ = image.height(); |
} |
// Check if codec size is different from native/original size, and if so, |
// upsample back to original size: needed for PSNR and SSIM computations. |
- if (image.width() != config_.codec_settings->width || |
+ if (image.width() != config_.codec_settings->width || |
image.height() != config_.codec_settings->height) { |
VideoFrame up_image; |
- int ret_val = scaler_.Set(image.width(), image.height(), |
- config_.codec_settings->width, |
- config_.codec_settings->height, |
- kI420, kI420, kScaleBilinear); |
+ int ret_val = scaler_.Set( |
+ image.width(), image.height(), config_.codec_settings->width, |
+ config_.codec_settings->height, kI420, kI420, kScaleBilinear); |
assert(ret_val >= 0); |
if (ret_val < 0) { |
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n", |
@@ -366,7 +369,8 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { |
} |
int VideoProcessorImpl::GetElapsedTimeMicroseconds( |
- const webrtc::TickTime& start, const webrtc::TickTime& stop) { |
+ const webrtc::TickTime& start, |
+ const webrtc::TickTime& stop) { |
uint64_t encode_time = (stop - start).Microseconds(); |
assert(encode_time < |
static_cast<unsigned int>(std::numeric_limits<int>::max())); |
@@ -404,8 +408,7 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) { |
} |
// Callbacks |
-int32_t |
-VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded( |
+int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded( |
const EncodedImage& encoded_image, |
const webrtc::CodecSpecificInfo* codec_specific_info, |
const webrtc::RTPFragmentationHeader* fragmentation) { |