Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: webrtc/modules/video_coding/codecs/test/videoprocessor.cc

Issue 2711133002: Step #1: Support pipelining codecs in VideoProcessor. (Closed)
Patch Set: Rebase. Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/modules/video_coding/codecs/test/videoprocessor.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 12 matching lines...) Expand all
23 #include "webrtc/common_types.h" 23 #include "webrtc/common_types.h"
24 #include "webrtc/modules/video_coding/include/video_codec_initializer.h" 24 #include "webrtc/modules/video_coding/include/video_codec_initializer.h"
25 #include "webrtc/modules/video_coding/utility/default_video_bitrate_allocator.h" 25 #include "webrtc/modules/video_coding/utility/default_video_bitrate_allocator.h"
26 #include "webrtc/modules/video_coding/utility/simulcast_rate_allocator.h" 26 #include "webrtc/modules/video_coding/utility/simulcast_rate_allocator.h"
27 #include "webrtc/system_wrappers/include/cpu_info.h" 27 #include "webrtc/system_wrappers/include/cpu_info.h"
28 28
29 namespace webrtc { 29 namespace webrtc {
30 namespace test { 30 namespace test {
31 31
32 namespace { 32 namespace {
33
34 // TODO(brandtr): Update this to use the real frame rate.
33 const int k90khzTimestampFrameDiff = 3000; // Assuming 30 fps. 35 const int k90khzTimestampFrameDiff = 3000; // Assuming 30 fps.
34 36
37 // All foreman_* files in //resources are 300 frames long.
sprang_webrtc 2017/03/06 10:28:24 Sounds hacky. Should we add a todo to fix this?
brandtr 2017/03/06 15:27:31 Removed, thanks to suggestion from Åsa :)
38 const size_t kInitialFrameInfoSize = 300;
39
35 std::unique_ptr<VideoBitrateAllocator> CreateBitrateAllocator( 40 std::unique_ptr<VideoBitrateAllocator> CreateBitrateAllocator(
36 const TestConfig& config) { 41 const TestConfig& config) {
37 std::unique_ptr<TemporalLayersFactory> tl_factory; 42 std::unique_ptr<TemporalLayersFactory> tl_factory;
38 if (config.codec_settings->codecType == VideoCodecType::kVideoCodecVP8) { 43 if (config.codec_settings->codecType == VideoCodecType::kVideoCodecVP8) {
39 tl_factory.reset(new TemporalLayersFactory()); 44 tl_factory.reset(new TemporalLayersFactory());
40 config.codec_settings->VP8()->tl_factory = tl_factory.get(); 45 config.codec_settings->VP8()->tl_factory = tl_factory.get();
41 } 46 }
42 return std::unique_ptr<VideoBitrateAllocator>( 47 return std::unique_ptr<VideoBitrateAllocator>(
43 VideoCodecInitializer::CreateBitrateAllocator(*config.codec_settings, 48 VideoCodecInitializer::CreateBitrateAllocator(*config.codec_settings,
44 std::move(tl_factory))); 49 std::move(tl_factory)));
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
90 bitrate_allocator_(CreateBitrateAllocator(config)), 95 bitrate_allocator_(CreateBitrateAllocator(config)),
91 encode_callback_(new VideoProcessorEncodeCompleteCallback(this)), 96 encode_callback_(new VideoProcessorEncodeCompleteCallback(this)),
92 decode_callback_(new VideoProcessorDecodeCompleteCallback(this)), 97 decode_callback_(new VideoProcessorDecodeCompleteCallback(this)),
93 packet_manipulator_(packet_manipulator), 98 packet_manipulator_(packet_manipulator),
94 config_(config), 99 config_(config),
95 analysis_frame_reader_(analysis_frame_reader), 100 analysis_frame_reader_(analysis_frame_reader),
96 analysis_frame_writer_(analysis_frame_writer), 101 analysis_frame_writer_(analysis_frame_writer),
97 source_frame_writer_(source_frame_writer), 102 source_frame_writer_(source_frame_writer),
98 encoded_frame_writer_(encoded_frame_writer), 103 encoded_frame_writer_(encoded_frame_writer),
99 decoded_frame_writer_(decoded_frame_writer), 104 decoded_frame_writer_(decoded_frame_writer),
105 initialized_(false),
106 frame_infos_(),
sprang_webrtc 2017/03/06 10:28:25 nit: usually omit default constructors in initiali
brandtr 2017/03/06 15:27:31 Done.
107 last_encoded_frame_num_(-1),
108 last_decoded_frame_num_(-1),
100 first_key_frame_has_been_excluded_(false), 109 first_key_frame_has_been_excluded_(false),
101 last_frame_missing_(false), 110 last_decoded_frame_buffer_(0, analysis_frame_reader_->FrameLength()),
102 initialized_(false),
103 encoded_frame_size_(0),
104 encoded_frame_type_(kVideoFrameKey),
105 prev_time_stamp_(0),
106 last_encoder_frame_width_(0),
107 last_encoder_frame_height_(0),
108 stats_(stats), 111 stats_(stats),
109 num_dropped_frames_(0), 112 num_dropped_frames_(0),
110 num_spatial_resizes_(0), 113 num_spatial_resizes_(0),
111 bit_rate_factor_(0.0), 114 bit_rate_factor_(0.0) {
112 encode_start_ns_(0),
113 decode_start_ns_(0) {
114 RTC_DCHECK(encoder); 115 RTC_DCHECK(encoder);
115 RTC_DCHECK(decoder); 116 RTC_DCHECK(decoder);
116 RTC_DCHECK(packet_manipulator); 117 RTC_DCHECK(packet_manipulator);
117 RTC_DCHECK(analysis_frame_reader); 118 RTC_DCHECK(analysis_frame_reader);
118 RTC_DCHECK(analysis_frame_writer); 119 RTC_DCHECK(analysis_frame_writer);
119 RTC_DCHECK(stats); 120 RTC_DCHECK(stats);
121
122 frame_infos_.reserve(kInitialFrameInfoSize);
åsapersson 2017/03/03 12:09:21 Could analysis_frame_reader_->NumberOfFrames() be
brandtr 2017/03/06 15:27:32 Good idea. Done.
120 } 123 }
121 124
122 bool VideoProcessorImpl::Init() { 125 bool VideoProcessorImpl::Init() {
126 RTC_CHECK(!initialized_)
127 << "This VideoProcessor has already been initialized.";
128
123 // Calculate a factor used for bit rate calculations. 129 // Calculate a factor used for bit rate calculations.
124 bit_rate_factor_ = config_.codec_settings->maxFramerate * 0.001 * 8; // bits 130 bit_rate_factor_ = config_.codec_settings->maxFramerate * 0.001 * 8; // bits
125 131
126 // Initialize data structures used by the encoder/decoder APIs.
127 size_t frame_length_in_bytes = analysis_frame_reader_->FrameLength();
128 last_successful_frame_buffer_.reset(new uint8_t[frame_length_in_bytes]);
129
130 // Set fixed properties common for all frames.
131 // To keep track of spatial resize actions by encoder.
132 last_encoder_frame_width_ = config_.codec_settings->width;
133 last_encoder_frame_height_ = config_.codec_settings->height;
134
135 // Setup required callbacks for the encoder/decoder. 132 // Setup required callbacks for the encoder/decoder.
136 RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(encode_callback_.get()), 133 RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(encode_callback_.get()),
137 WEBRTC_VIDEO_CODEC_OK) 134 WEBRTC_VIDEO_CODEC_OK)
138 << "Failed to register encode complete callback"; 135 << "Failed to register encode complete callback";
139 RTC_CHECK_EQ(decoder_->RegisterDecodeCompleteCallback(decode_callback_.get()), 136 RTC_CHECK_EQ(decoder_->RegisterDecodeCompleteCallback(decode_callback_.get()),
140 WEBRTC_VIDEO_CODEC_OK) 137 WEBRTC_VIDEO_CODEC_OK)
141 << "Failed to register decode complete callback"; 138 << "Failed to register decode complete callback";
142 139
143 // Initialize the encoder and decoder. 140 // Initialize the encoder and decoder.
144 uint32_t num_cores = 141 uint32_t num_cores =
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
177 config_.codec_settings->VP8()->errorConcealmentOn); 174 config_.codec_settings->VP8()->errorConcealmentOn);
178 printf(" Frame dropping : %d\n", 175 printf(" Frame dropping : %d\n",
179 config_.codec_settings->VP8()->frameDroppingOn); 176 config_.codec_settings->VP8()->frameDroppingOn);
180 printf(" Resilience : %d\n", 177 printf(" Resilience : %d\n",
181 config_.codec_settings->VP8()->resilience); 178 config_.codec_settings->VP8()->resilience);
182 } else if (config_.codec_settings->codecType == kVideoCodecVP9) { 179 } else if (config_.codec_settings->codecType == kVideoCodecVP9) {
183 printf(" Resilience : %d\n", 180 printf(" Resilience : %d\n",
184 config_.codec_settings->VP9()->resilience); 181 config_.codec_settings->VP9()->resilience);
185 } 182 }
186 } 183 }
184
187 initialized_ = true; 185 initialized_ = true;
186
188 return true; 187 return true;
189 } 188 }
190 189
191 VideoProcessorImpl::~VideoProcessorImpl() { 190 VideoProcessorImpl::~VideoProcessorImpl() {
192 encoder_->RegisterEncodeCompleteCallback(nullptr); 191 encoder_->RegisterEncodeCompleteCallback(nullptr);
193 decoder_->RegisterDecodeCompleteCallback(nullptr); 192 decoder_->RegisterDecodeCompleteCallback(nullptr);
194 } 193 }
195 194
196 void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) { 195 void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) {
197 int set_rates_result = encoder_->SetRateAllocation( 196 int set_rates_result = encoder_->SetRateAllocation(
198 bitrate_allocator_->GetAllocation(bit_rate * 1000, frame_rate), 197 bitrate_allocator_->GetAllocation(bit_rate * 1000, frame_rate),
199 frame_rate); 198 frame_rate);
200 RTC_CHECK_GE(set_rates_result, 0) << "Failed to update encoder with new rate " 199 RTC_CHECK_GE(set_rates_result, 0) << "Failed to update encoder with new rate "
201 << bit_rate; 200 << bit_rate;
202 num_dropped_frames_ = 0; 201 num_dropped_frames_ = 0;
203 num_spatial_resizes_ = 0; 202 num_spatial_resizes_ = 0;
204 } 203 }
205 204
205 // TODO(brandtr): Update implementation of EncodedFrameSize and EncodedFrameType
206 // to support batch processing in the caller.
206 size_t VideoProcessorImpl::EncodedFrameSize() { 207 size_t VideoProcessorImpl::EncodedFrameSize() {
207 return encoded_frame_size_; 208 RTC_CHECK(!frame_infos_.empty());
209 return frame_infos_.back().encoded_frame_size;
208 } 210 }
209 211
210 FrameType VideoProcessorImpl::EncodedFrameType() { 212 FrameType VideoProcessorImpl::EncodedFrameType() {
211 return encoded_frame_type_; 213 RTC_CHECK(!frame_infos_.empty());
214 return frame_infos_.back().encoded_frame_type;
212 } 215 }
213 216
214 int VideoProcessorImpl::NumberDroppedFrames() { 217 int VideoProcessorImpl::NumberDroppedFrames() {
215 return num_dropped_frames_; 218 return num_dropped_frames_;
216 } 219 }
217 220
218 int VideoProcessorImpl::NumberSpatialResizes() { 221 int VideoProcessorImpl::NumberSpatialResizes() {
219 return num_spatial_resizes_; 222 return num_spatial_resizes_;
220 } 223 }
221 224
222 bool VideoProcessorImpl::ProcessFrame(int frame_number) { 225 bool VideoProcessorImpl::ProcessFrame(int frame_number) {
223 RTC_CHECK_GE(frame_number, 0); 226 RTC_CHECK_GE(frame_number, 0);
227 RTC_CHECK_LE(frame_number, frame_infos_.size())
228 << "Must process frames without gaps.";
224 RTC_CHECK(initialized_) << "Attempting to use uninitialized VideoProcessor"; 229 RTC_CHECK(initialized_) << "Attempting to use uninitialized VideoProcessor";
225 230
226 rtc::scoped_refptr<VideoFrameBuffer> buffer( 231 rtc::scoped_refptr<VideoFrameBuffer> buffer(
227 analysis_frame_reader_->ReadFrame()); 232 analysis_frame_reader_->ReadFrame());
228 if (buffer) {
229 if (source_frame_writer_) {
230 // TODO(brandtr): Introduce temp buffer as data member, to avoid
231 // allocating for every frame.
232 size_t length = CalcBufferSize(kI420, buffer->width(), buffer->height());
233 std::unique_ptr<uint8_t[]> extracted_buffer(new uint8_t[length]);
234 int extracted_length =
235 ExtractBuffer(buffer, length, extracted_buffer.get());
236 RTC_CHECK_EQ(extracted_length, source_frame_writer_->FrameLength());
237 source_frame_writer_->WriteFrame(extracted_buffer.get());
238 }
239 233
240 // Use the frame number as basis for timestamp to identify frames. Let the 234 if (!buffer) {
241 // first timestamp be non-zero, to not make the IvfFileWriter believe that
242 // we want to use capture timestamps in the IVF files.
243 VideoFrame source_frame(buffer,
244 (frame_number + 1) * k90khzTimestampFrameDiff, 0,
245 webrtc::kVideoRotation_0);
246
247 // Ensure we have a new statistics data object we can fill.
248 FrameStatistic& stat = stats_->NewFrame(frame_number);
249
250 // Decide if we are going to force a keyframe.
251 std::vector<FrameType> frame_types(1, kVideoFrameDelta);
252 if (config_.keyframe_interval > 0 &&
253 frame_number % config_.keyframe_interval == 0) {
254 frame_types[0] = kVideoFrameKey;
255 }
256
257 // For dropped frames, we regard them as zero size encoded frames.
258 encoded_frame_size_ = 0;
259 encoded_frame_type_ = kVideoFrameDelta;
260
261 // For the highest measurement accuracy of the encode time, the start/stop
262 // time recordings should wrap the Encode call as tightly as possible.
263 encode_start_ns_ = rtc::TimeNanos();
264 int32_t encode_result =
265 encoder_->Encode(source_frame, nullptr, &frame_types);
266
267 if (encode_result != WEBRTC_VIDEO_CODEC_OK) {
268 fprintf(stderr, "Failed to encode frame %d, return code: %d\n",
269 frame_number, encode_result);
270 }
271 stat.encode_return_code = encode_result;
272
273 return true;
274 } else {
275 // Last frame has been reached. 235 // Last frame has been reached.
276 return false; 236 return false;
277 } 237 }
238
239 if (source_frame_writer_) {
240 size_t length = CalcBufferSize(kI420, buffer->width(), buffer->height());
241 rtc::Buffer extracted_buffer(length);
242 int extracted_length =
243 ExtractBuffer(buffer, length, extracted_buffer.data());
244 RTC_CHECK_EQ(extracted_length, source_frame_writer_->FrameLength());
245 RTC_CHECK(source_frame_writer_->WriteFrame(extracted_buffer.data()));
246 }
247
248 // Use the frame number as the basis for timestamp to identify frames. Let the
249 // first timestamp be non-zero, to not make the IvfFileWriter believe that
250 // we want to use capture timestamps in the IVF files.
251 uint32_t timestamp = (frame_number + 1) * k90khzTimestampFrameDiff;
252 VideoFrame source_frame(buffer, timestamp, 0, webrtc::kVideoRotation_0);
253
254 // Store frame information during the different stages of encode and decode.
255 frame_infos_.emplace_back();
256 FrameInfo& frame_info = frame_infos_.back();
257 frame_info.timestamp = timestamp;
258
259 // Store frame statistics for aggregation at end of test run.
260 FrameStatistic& frame_stat = stats_->NewFrame(frame_number);
åsapersson 2017/03/03 12:09:21 maybe move closer to where it is used
brandtr 2017/03/06 15:27:32 Done. (Must be created before encode call, since i
261
262 // Decide if we are going to force a keyframe.
263 std::vector<FrameType> frame_types(1, kVideoFrameDelta);
264 if (config_.keyframe_interval > 0 &&
265 frame_number % config_.keyframe_interval == 0) {
266 frame_types[0] = kVideoFrameKey;
267 }
268
269 // For the highest measurement accuracy of the encode time, the start/stop
270 // time recordings should wrap the Encode call as tightly as possible.
271 frame_info.encode_start_ns = rtc::TimeNanos();
272 int32_t encode_result = encoder_->Encode(source_frame, nullptr, &frame_types);
sprang_webrtc 2017/03/06 10:28:25 Think you drop encode_result and just use frame_st
brandtr 2017/03/06 15:27:31 Done.
273 frame_stat.encode_return_code = encode_result;
274
275 if (encode_result != WEBRTC_VIDEO_CODEC_OK) {
276 fprintf(stderr, "Failed to encode frame %d, return code: %d\n",
277 frame_number, encode_result);
278 }
279
280 return true;
278 } 281 }
279 282
280 void VideoProcessorImpl::FrameEncoded( 283 void VideoProcessorImpl::FrameEncoded(
281 webrtc::VideoCodecType codec, 284 webrtc::VideoCodecType codec,
282 const EncodedImage& encoded_image, 285 const EncodedImage& encoded_image,
283 const webrtc::RTPFragmentationHeader* fragmentation) { 286 const webrtc::RTPFragmentationHeader* fragmentation) {
284 // For the highest measurement accuracy of the encode time, the start/stop 287 // For the highest measurement accuracy of the encode time, the start/stop
285 // time recordings should wrap the Encode call as tightly as possible. 288 // time recordings should wrap the Encode call as tightly as possible.
286 int64_t encode_stop_ns = rtc::TimeNanos(); 289 int64_t encode_stop_ns = rtc::TimeNanos();
287 290
288 if (encoded_frame_writer_) { 291 if (encoded_frame_writer_) {
289 RTC_CHECK(encoded_frame_writer_->WriteFrame(encoded_image, codec)); 292 RTC_CHECK(encoded_frame_writer_->WriteFrame(encoded_image, codec));
290 } 293 }
291 294
292 // Timestamp is proportional to frame number, so this gives us number of 295 // Timestamp is proportional to frame number, so this gives us number of
293 // dropped frames. 296 // dropped frames.
294 int num_dropped_from_prev_encode = 297 int frame_number = encoded_image._timeStamp / k90khzTimestampFrameDiff - 1;
sprang_webrtc 2017/03/06 10:28:24 nit: would prefer extra parenthesis for clarity =
brandtr 2017/03/06 15:27:32 Agree. Done in helper function.
295 (encoded_image._timeStamp - prev_time_stamp_) / k90khzTimestampFrameDiff - 298 bool last_frame_missing = false;
296 1; 299 if (frame_number > 0) {
297 num_dropped_frames_ += num_dropped_from_prev_encode; 300 RTC_CHECK_GE(last_encoded_frame_num_, 0);
sprang_webrtc 2017/03/06 10:28:24 Why CHECK rather than DCHECK everywhere? Will you
brandtr 2017/03/06 15:27:31 Since this test will often be run on devices in Re
sprang_webrtc 2017/03/06 17:21:38 Acknowledged.
298 prev_time_stamp_ = encoded_image._timeStamp; 301 const FrameInfo& last_encoded_frame_info =
299 if (num_dropped_from_prev_encode > 0) { 302 frame_infos_[last_encoded_frame_num_];
300 // For dropped frames, we write out the last decoded frame to avoid getting 303 last_frame_missing = (last_encoded_frame_info.manipulated_length == 0);
301 // out of sync for the computation of PSNR and SSIM. 304 int num_dropped_from_last_encode =
302 for (int i = 0; i < num_dropped_from_prev_encode; i++) { 305 (encoded_image._timeStamp - last_encoded_frame_info.timestamp) /
303 RTC_CHECK(analysis_frame_writer_->WriteFrame( 306 k90khzTimestampFrameDiff -
304 last_successful_frame_buffer_.get())); 307 1;
sprang_webrtc 2017/03/06 10:28:25 Sanity that encoded_image._timeStamp % k90khzTimes
sprang_webrtc 2017/03/06 10:28:25 parenthesis
brandtr 2017/03/06 15:27:32 Good idea. Done.
305 if (decoded_frame_writer_) { 308 num_dropped_frames_ += num_dropped_from_last_encode;
306 RTC_CHECK(decoded_frame_writer_->WriteFrame( 309 if (num_dropped_from_last_encode > 0) {
307 last_successful_frame_buffer_.get())); 310 // For dropped frames, we write out the last decoded frame to avoid
311 // getting out of sync for the computation of PSNR and SSIM.
312 for (int i = 0; i < num_dropped_from_last_encode; i++) {
313 RTC_CHECK_EQ(last_decoded_frame_buffer_.size(),
314 analysis_frame_writer_->FrameLength());
sprang_webrtc 2017/03/06 10:28:24 nit: git cl format
brandtr 2017/03/06 15:27:31 Done.
315 RTC_CHECK(analysis_frame_writer_->WriteFrame(
316 last_decoded_frame_buffer_.data()));
317 if (decoded_frame_writer_) {
318 RTC_CHECK_EQ(last_decoded_frame_buffer_.size(),
319 decoded_frame_writer_->FrameLength());
320 RTC_CHECK(decoded_frame_writer_->WriteFrame(
321 last_decoded_frame_buffer_.data()));
322 }
308 } 323 }
309 } 324 }
310 } 325 }
326 // Ensure strict monotonicity.
327 RTC_CHECK_GT(frame_number, last_encoded_frame_num_);
328 last_encoded_frame_num_ = frame_number;
311 329
312 // Frame is not dropped, so update the encoded frame size 330 // Frame is not dropped, so update frame information and statistics.
313 // (encoder callback is only called for non-zero length frames). 331 RTC_CHECK_LT(frame_number, frame_infos_.size());
314 encoded_frame_size_ = encoded_image._length; 332 FrameInfo& frame_info = frame_infos_[frame_number];
sprang_webrtc 2017/03/06 10:28:25 Use pointer instead of non-const ref.
brandtr 2017/03/06 15:27:31 Done.
315 encoded_frame_type_ = encoded_image._frameType; 333 frame_info.encoded_frame_size = encoded_image._length;
316 int frame_number = encoded_image._timeStamp / k90khzTimestampFrameDiff - 1; 334 frame_info.encoded_frame_type = encoded_image._frameType;
317 FrameStatistic& stat = stats_->stats_[frame_number]; 335 FrameStatistic& frame_stat = stats_->stats_[frame_number];
sprang_webrtc 2017/03/06 10:28:24 dito
brandtr 2017/03/06 15:27:32 Done.
318 stat.encode_time_in_us = 336 frame_stat.encode_time_in_us =
319 GetElapsedTimeMicroseconds(encode_start_ns_, encode_stop_ns); 337 GetElapsedTimeMicroseconds(frame_info.encode_start_ns, encode_stop_ns);
sprang_webrtc 2017/03/06 10:28:25 GetElapsedTimeMicroseconds(), but all the variable
brandtr 2017/03/06 15:27:32 Only the input variables are _ns, output variable
sprang_webrtc 2017/03/06 17:21:38 Right, I misread. Twice :)
320 stat.encoding_successful = true; 338 frame_stat.encoding_successful = true;
321 stat.encoded_frame_length_in_bytes = encoded_image._length; 339 frame_stat.encoded_frame_length_in_bytes = encoded_image._length;
322 stat.frame_number = frame_number; 340 frame_stat.frame_number = frame_number;
323 stat.frame_type = encoded_image._frameType; 341 frame_stat.frame_type = encoded_image._frameType;
324 stat.qp = encoded_image.qp_; 342 frame_stat.qp = encoded_image.qp_;
325 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; 343 frame_stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
326 stat.total_packets = 344 frame_stat.total_packets =
327 encoded_image._length / config_.networking_config.packet_size_in_bytes + 345 encoded_image._length / config_.networking_config.packet_size_in_bytes +
328 1; 346 1;
329 347
330 // Simulate packet loss. 348 // Simulate packet loss.
331 bool exclude_this_frame = false; 349 bool exclude_this_frame = false;
332 if (encoded_image._frameType == kVideoFrameKey) { 350 if (encoded_image._frameType == kVideoFrameKey) {
333 // Only keyframes can be excluded. 351 // Only keyframes can be excluded.
334 switch (config_.exclude_frame_types) { 352 switch (config_.exclude_frame_types) {
335 case kExcludeOnlyFirstKeyFrame: 353 case kExcludeOnlyFirstKeyFrame:
336 if (!first_key_frame_has_been_excluded_) { 354 if (!first_key_frame_has_been_excluded_) {
(...skipping 13 matching lines...) Expand all
350 size_t copied_buffer_size = encoded_image._length + 368 size_t copied_buffer_size = encoded_image._length +
351 EncodedImage::GetBufferPaddingBytes(codec); 369 EncodedImage::GetBufferPaddingBytes(codec);
352 std::unique_ptr<uint8_t[]> copied_buffer(new uint8_t[copied_buffer_size]); 370 std::unique_ptr<uint8_t[]> copied_buffer(new uint8_t[copied_buffer_size]);
353 memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length); 371 memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length);
354 // The image to feed to the decoder. 372 // The image to feed to the decoder.
355 EncodedImage copied_image; 373 EncodedImage copied_image;
356 memcpy(&copied_image, &encoded_image, sizeof(copied_image)); 374 memcpy(&copied_image, &encoded_image, sizeof(copied_image));
357 copied_image._size = copied_buffer_size; 375 copied_image._size = copied_buffer_size;
358 copied_image._buffer = copied_buffer.get(); 376 copied_image._buffer = copied_buffer.get();
359 377
378 frame_info.manipulated_length = copied_image._length;
360 if (!exclude_this_frame) { 379 if (!exclude_this_frame) {
361 stat.packets_dropped = 380 frame_stat.packets_dropped =
362 packet_manipulator_->ManipulatePackets(&copied_image); 381 packet_manipulator_->ManipulatePackets(&copied_image);
382 frame_info.manipulated_length = copied_image._length;
363 } 383 }
364 384
365 // Keep track of if frames are lost due to packet loss so we can tell 385 // Keep track of if frames are lost due to packet loss so we can tell
366 // this to the encoder (this is handled by the RTP logic in the full stack). 386 // this to the encoder (this is handled by the RTP logic in the full stack).
367 // TODO(kjellander): Pass fragmentation header to the decoder when 387 // TODO(kjellander): Pass fragmentation header to the decoder when
368 // CL 172001 has been submitted and PacketManipulator supports this. 388 // CL 172001 has been submitted and PacketManipulator supports this.
369 389
370 // For the highest measurement accuracy of the decode time, the start/stop 390 // For the highest measurement accuracy of the decode time, the start/stop
371 // time recordings should wrap the Decode call as tightly as possible. 391 // time recordings should wrap the Decode call as tightly as possible.
372 decode_start_ns_ = rtc::TimeNanos(); 392 frame_info.decode_start_ns = rtc::TimeNanos();
373 int32_t decode_result = 393 int32_t decode_result =
374 decoder_->Decode(copied_image, last_frame_missing_, nullptr); 394 decoder_->Decode(copied_image, last_frame_missing, nullptr);
375 stat.decode_return_code = decode_result; 395 frame_stat.decode_return_code = decode_result;
376 396
377 if (decode_result != WEBRTC_VIDEO_CODEC_OK) { 397 if (decode_result != WEBRTC_VIDEO_CODEC_OK) {
378 // Write the last successful frame the output file to avoid getting it out 398 // Write the last successful frame the output file to avoid getting it out
379 // of sync with the source file for SSIM and PSNR comparisons. 399 // of sync with the source file for SSIM and PSNR comparisons.
380 RTC_CHECK(analysis_frame_writer_->WriteFrame( 400 RTC_CHECK_EQ(last_decoded_frame_buffer_.size(),
381 last_successful_frame_buffer_.get())); 401 analysis_frame_writer_->FrameLength());
402 RTC_CHECK(
403 analysis_frame_writer_->WriteFrame(last_decoded_frame_buffer_.data()));
382 if (decoded_frame_writer_) { 404 if (decoded_frame_writer_) {
383 RTC_CHECK(decoded_frame_writer_->WriteFrame( 405 RTC_CHECK_EQ(last_decoded_frame_buffer_.size(),
384 last_successful_frame_buffer_.get())); 406 decoded_frame_writer_->FrameLength());
407 RTC_CHECK(
408 decoded_frame_writer_->WriteFrame(last_decoded_frame_buffer_.data()));
385 } 409 }
386 } 410 }
387
388 // Save status for losses so we can inform the decoder for the next frame.
389 last_frame_missing_ = copied_image._length == 0;
390 } 411 }
391 412
392 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { 413 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
393 // For the highest measurement accuracy of the decode time, the start/stop 414 // For the highest measurement accuracy of the decode time, the start/stop
394 // time recordings should wrap the Decode call as tightly as possible. 415 // time recordings should wrap the Decode call as tightly as possible.
395 int64_t decode_stop_ns = rtc::TimeNanos(); 416 int64_t decode_stop_ns = rtc::TimeNanos();
396 417
397 // Report stats. 418 // Update frame information and statistics.
398 int frame_number = image.timestamp() / k90khzTimestampFrameDiff - 1; 419 int frame_number = image.timestamp() / k90khzTimestampFrameDiff - 1;
399 FrameStatistic& stat = stats_->stats_[frame_number]; 420 RTC_CHECK_LE(frame_number, frame_infos_.size());
åsapersson 2017/03/03 12:09:21 LT?
brandtr 2017/03/06 15:27:32 Yes!
400 stat.decode_time_in_us = 421 FrameInfo& frame_info = frame_infos_[frame_number];
sprang_webrtc 2017/03/06 10:28:24 pointer
brandtr 2017/03/06 15:27:31 Done.
401 GetElapsedTimeMicroseconds(decode_start_ns_, decode_stop_ns); 422 frame_info.decoded_width = image.width();
402 stat.decoding_successful = true; 423 frame_info.decoded_height = image.height();
424 FrameStatistic& frame_stat = stats_->stats_[frame_number];
425 frame_stat.decode_time_in_us =
426 GetElapsedTimeMicroseconds(frame_info.decode_start_ns, decode_stop_ns);
sprang_webrtc 2017/03/06 10:28:25 units again?
brandtr 2017/03/06 15:27:32 Same rationale as above.
sprang_webrtc 2017/03/06 17:21:38 Acknowledged.
427 frame_stat.decoding_successful = true;
403 428
404 // Check for resize action (either down or up). 429 // Check if the codecs have resized the frame since previously decoded frame.
405 if (static_cast<int>(image.width()) != last_encoder_frame_width_ || 430 if (frame_number > 0) {
406 static_cast<int>(image.height()) != last_encoder_frame_height_) { 431 RTC_CHECK_GE(last_decoded_frame_num_, 0);
407 ++num_spatial_resizes_; 432 const FrameInfo& last_decoded_frame_info =
408 last_encoder_frame_width_ = image.width(); 433 frame_infos_[last_decoded_frame_num_];
409 last_encoder_frame_height_ = image.height(); 434 if (static_cast<int>(image.width()) !=
435 last_decoded_frame_info.decoded_width ||
436 static_cast<int>(image.height()) !=
437 last_decoded_frame_info.decoded_height) {
438 ++num_spatial_resizes_;
439 }
410 } 440 }
411 // Check if codec size is different from native/original size, and if so, 441 // Ensure strict monotonicity.
412 // upsample back to original size. This is needed for PSNR and SSIM 442 RTC_CHECK_GT(frame_number, last_decoded_frame_num_);
443 last_decoded_frame_num_ = frame_number;
444
445 // Check if codec size is different from the original size, and if so,
446 // scale back to original size. This is needed for the PSNR and SSIM
413 // calculations. 447 // calculations.
448 size_t extracted_length;
449 rtc::Buffer extracted_buffer;
414 if (image.width() != config_.codec_settings->width || 450 if (image.width() != config_.codec_settings->width ||
415 image.height() != config_.codec_settings->height) { 451 image.height() != config_.codec_settings->height) {
416 rtc::scoped_refptr<I420Buffer> up_image( 452 rtc::scoped_refptr<I420Buffer> scaled_buffer(I420Buffer::Create(
417 I420Buffer::Create(config_.codec_settings->width, 453 config_.codec_settings->width, config_.codec_settings->height));
418 config_.codec_settings->height));
419
420 // Should be the same aspect ratio, no cropping needed. 454 // Should be the same aspect ratio, no cropping needed.
421 if (image.video_frame_buffer()->native_handle()) { 455 if (image.video_frame_buffer()->native_handle()) {
422 up_image->ScaleFrom(*image.video_frame_buffer()->NativeToI420Buffer()); 456 scaled_buffer->ScaleFrom(
457 *image.video_frame_buffer()->NativeToI420Buffer());
423 } else { 458 } else {
424 up_image->ScaleFrom(*image.video_frame_buffer()); 459 scaled_buffer->ScaleFrom(*image.video_frame_buffer());
425 } 460 }
426 461
427 // TODO(mikhal): Extracting the buffer for now - need to update test.
428 size_t length = 462 size_t length =
429 CalcBufferSize(kI420, up_image->width(), up_image->height()); 463 CalcBufferSize(kI420, scaled_buffer->width(), scaled_buffer->height());
430 std::unique_ptr<uint8_t[]> image_buffer(new uint8_t[length]); 464 extracted_buffer.SetSize(length);
431 int extracted_length = ExtractBuffer(up_image, length, image_buffer.get()); 465 extracted_length =
432 RTC_CHECK_GT(extracted_length, 0); 466 ExtractBuffer(scaled_buffer, length, extracted_buffer.data());
433 // Update our copy of the last successful frame. 467 } else {
434 memcpy(last_successful_frame_buffer_.get(), image_buffer.get(), 468 // No resize.
435 extracted_length);
436
437 RTC_CHECK(analysis_frame_writer_->WriteFrame(image_buffer.get()));
438 if (decoded_frame_writer_) {
439 RTC_CHECK(decoded_frame_writer_->WriteFrame(image_buffer.get()));
440 }
441 } else { // No resize.
442 // Update our copy of the last successful frame.
443 // TODO(mikhal): Add as a member function, so won't be allocated per frame.
444 size_t length = CalcBufferSize(kI420, image.width(), image.height()); 469 size_t length = CalcBufferSize(kI420, image.width(), image.height());
445 std::unique_ptr<uint8_t[]> image_buffer(new uint8_t[length]); 470 extracted_buffer.SetSize(length);
446 int extracted_length;
447 if (image.video_frame_buffer()->native_handle()) { 471 if (image.video_frame_buffer()->native_handle()) {
448 extracted_length = 472 extracted_length =
449 ExtractBuffer(image.video_frame_buffer()->NativeToI420Buffer(), 473 ExtractBuffer(image.video_frame_buffer()->NativeToI420Buffer(),
450 length, image_buffer.get()); 474 length, extracted_buffer.data());
451 } else { 475 } else {
452 extracted_length = 476 extracted_length = ExtractBuffer(image.video_frame_buffer(), length,
453 ExtractBuffer(image.video_frame_buffer(), length, image_buffer.get()); 477 extracted_buffer.data());
454 }
455 RTC_CHECK_GT(extracted_length, 0);
456 memcpy(last_successful_frame_buffer_.get(), image_buffer.get(),
457 extracted_length);
458
459 RTC_CHECK(analysis_frame_writer_->WriteFrame(image_buffer.get()));
460 if (decoded_frame_writer_) {
461 RTC_CHECK(decoded_frame_writer_->WriteFrame(image_buffer.get()));
462 } 478 }
463 } 479 }
480
481 RTC_CHECK_EQ(extracted_length, analysis_frame_writer_->FrameLength());
482 RTC_CHECK(analysis_frame_writer_->WriteFrame(extracted_buffer.data()));
483 if (decoded_frame_writer_) {
484 RTC_CHECK_EQ(extracted_length, decoded_frame_writer_->FrameLength());
485 RTC_CHECK(decoded_frame_writer_->WriteFrame(extracted_buffer.data()));
486 }
487
488 last_decoded_frame_buffer_ = std::move(extracted_buffer);
464 } 489 }
465 490
466 int VideoProcessorImpl::GetElapsedTimeMicroseconds(int64_t start, 491 int VideoProcessorImpl::GetElapsedTimeMicroseconds(int64_t start,
467 int64_t stop) { 492 int64_t stop) {
468 int64_t encode_time = (stop - start) / rtc::kNumNanosecsPerMicrosec; 493 int64_t encode_time = (stop - start) / rtc::kNumNanosecsPerMicrosec;
469 RTC_DCHECK_GE(encode_time, std::numeric_limits<int>::min()); 494 RTC_DCHECK_GE(encode_time, std::numeric_limits<int>::min());
470 RTC_DCHECK_LE(encode_time, std::numeric_limits<int>::max()); 495 RTC_DCHECK_LE(encode_time, std::numeric_limits<int>::max());
471 return static_cast<int>(encode_time); 496 return static_cast<int>(encode_time);
472 } 497 }
473 498
474 } // namespace test 499 } // namespace test
475 } // namespace webrtc 500 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/video_coding/codecs/test/videoprocessor.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698