OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
131 packet_manipulator_(packet_manipulator), | 131 packet_manipulator_(packet_manipulator), |
132 analysis_frame_reader_(analysis_frame_reader), | 132 analysis_frame_reader_(analysis_frame_reader), |
133 analysis_frame_writer_(analysis_frame_writer), | 133 analysis_frame_writer_(analysis_frame_writer), |
134 encoded_frame_writer_(encoded_frame_writer), | 134 encoded_frame_writer_(encoded_frame_writer), |
135 decoded_frame_writer_(decoded_frame_writer), | 135 decoded_frame_writer_(decoded_frame_writer), |
136 last_encoded_frame_num_(-1), | 136 last_encoded_frame_num_(-1), |
137 last_decoded_frame_num_(-1), | 137 last_decoded_frame_num_(-1), |
138 first_key_frame_has_been_excluded_(false), | 138 first_key_frame_has_been_excluded_(false), |
139 last_decoded_frame_buffer_(analysis_frame_reader->FrameLength()), | 139 last_decoded_frame_buffer_(analysis_frame_reader->FrameLength()), |
140 stats_(stats), | 140 stats_(stats), |
141 num_dropped_frames_(0), | 141 rate_update_index_(-1) { |
142 num_spatial_resizes_(0) { | |
143 RTC_DCHECK(encoder); | 142 RTC_DCHECK(encoder); |
144 RTC_DCHECK(decoder); | 143 RTC_DCHECK(decoder); |
145 RTC_DCHECK(packet_manipulator); | 144 RTC_DCHECK(packet_manipulator); |
146 RTC_DCHECK(analysis_frame_reader); | 145 RTC_DCHECK(analysis_frame_reader); |
147 RTC_DCHECK(analysis_frame_writer); | 146 RTC_DCHECK(analysis_frame_writer); |
148 RTC_DCHECK(stats); | 147 RTC_DCHECK(stats); |
149 frame_infos_.reserve(analysis_frame_reader->NumberOfFrames()); | 148 frame_infos_.reserve(analysis_frame_reader->NumberOfFrames()); |
150 } | 149 } |
151 | 150 |
152 VideoProcessor::~VideoProcessor() = default; | 151 VideoProcessor::~VideoProcessor() = default; |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
213 void VideoProcessor::ProcessFrame(int frame_number) { | 212 void VideoProcessor::ProcessFrame(int frame_number) { |
214 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 213 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
215 RTC_DCHECK_EQ(frame_number, frame_infos_.size()) | 214 RTC_DCHECK_EQ(frame_number, frame_infos_.size()) |
216 << "Must process frames in sequence."; | 215 << "Must process frames in sequence."; |
217 RTC_DCHECK(initialized_) << "VideoProcessor not initialized."; | 216 RTC_DCHECK(initialized_) << "VideoProcessor not initialized."; |
218 | 217 |
219 // Get frame from file. | 218 // Get frame from file. |
220 rtc::scoped_refptr<I420BufferInterface> buffer( | 219 rtc::scoped_refptr<I420BufferInterface> buffer( |
221 analysis_frame_reader_->ReadFrame()); | 220 analysis_frame_reader_->ReadFrame()); |
222 RTC_CHECK(buffer) << "Tried to read too many frames from the file."; | 221 RTC_CHECK(buffer) << "Tried to read too many frames from the file."; |
| 222 // Use the frame number as the basis for timestamp to identify frames. Let the |
| 223 // first timestamp be non-zero, to not make the IvfFileWriter believe that we |
| 224 // want to use capture timestamps in the IVF files. |
| 225 const uint32_t rtp_timestamp = (frame_number + 1) * kRtpClockRateHz / |
| 226 config_.codec_settings.maxFramerate; |
| 227 rtp_timestamp_to_frame_num_[rtp_timestamp] = frame_number; |
223 const int64_t kNoRenderTime = 0; | 228 const int64_t kNoRenderTime = 0; |
224 VideoFrame source_frame(buffer, FrameNumberToTimestamp(frame_number), | 229 VideoFrame source_frame(buffer, rtp_timestamp, kNoRenderTime, |
225 kNoRenderTime, webrtc::kVideoRotation_0); | 230 webrtc::kVideoRotation_0); |
226 | 231 |
227 // Decide if we are going to force a keyframe. | 232 // Decide if we are going to force a keyframe. |
228 std::vector<FrameType> frame_types(1, kVideoFrameDelta); | 233 std::vector<FrameType> frame_types(1, kVideoFrameDelta); |
229 if (config_.keyframe_interval > 0 && | 234 if (config_.keyframe_interval > 0 && |
230 frame_number % config_.keyframe_interval == 0) { | 235 frame_number % config_.keyframe_interval == 0) { |
231 frame_types[0] = kVideoFrameKey; | 236 frame_types[0] = kVideoFrameKey; |
232 } | 237 } |
233 | 238 |
234 // Store frame information during the different stages of encode and decode. | 239 // Store frame information during the different stages of encode and decode. |
235 frame_infos_.emplace_back(); | 240 frame_infos_.emplace_back(); |
(...skipping 10 matching lines...) Expand all Loading... |
246 | 251 |
247 if (frame_stat->encode_return_code != WEBRTC_VIDEO_CODEC_OK) { | 252 if (frame_stat->encode_return_code != WEBRTC_VIDEO_CODEC_OK) { |
248 LOG(LS_WARNING) << "Failed to encode frame " << frame_number | 253 LOG(LS_WARNING) << "Failed to encode frame " << frame_number |
249 << ", return code: " << frame_stat->encode_return_code | 254 << ", return code: " << frame_stat->encode_return_code |
250 << "."; | 255 << "."; |
251 } | 256 } |
252 } | 257 } |
253 | 258 |
254 void VideoProcessor::SetRates(int bitrate_kbps, int framerate_fps) { | 259 void VideoProcessor::SetRates(int bitrate_kbps, int framerate_fps) { |
255 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 260 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
256 | |
257 config_.codec_settings.maxFramerate = framerate_fps; | 261 config_.codec_settings.maxFramerate = framerate_fps; |
258 int set_rates_result = encoder_->SetRateAllocation( | 262 int set_rates_result = encoder_->SetRateAllocation( |
259 bitrate_allocator_->GetAllocation(bitrate_kbps * 1000, framerate_fps), | 263 bitrate_allocator_->GetAllocation(bitrate_kbps * 1000, framerate_fps), |
260 framerate_fps); | 264 framerate_fps); |
261 RTC_DCHECK_GE(set_rates_result, 0) | 265 RTC_DCHECK_GE(set_rates_result, 0) |
262 << "Failed to update encoder with new rate " << bitrate_kbps << "."; | 266 << "Failed to update encoder with new rate " << bitrate_kbps << "."; |
263 num_dropped_frames_ = 0; | 267 ++rate_update_index_; |
264 num_spatial_resizes_ = 0; | 268 num_dropped_frames_.push_back(0); |
| 269 num_spatial_resizes_.push_back(0); |
265 } | 270 } |
266 | 271 |
267 int VideoProcessor::GetQpFromEncoder(int frame_number) const { | 272 int VideoProcessor::GetQpFromEncoder(int frame_number) const { |
268 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 273 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
269 RTC_CHECK_LT(frame_number, frame_infos_.size()); | 274 RTC_CHECK_LT(frame_number, frame_infos_.size()); |
270 return frame_infos_[frame_number].qp_encoder; | 275 return frame_infos_[frame_number].qp_encoder; |
271 } | 276 } |
272 | 277 |
273 int VideoProcessor::GetQpFromBitstream(int frame_number) const { | 278 int VideoProcessor::GetQpFromBitstream(int frame_number) const { |
274 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 279 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
275 RTC_CHECK_LT(frame_number, frame_infos_.size()); | 280 RTC_CHECK_LT(frame_number, frame_infos_.size()); |
276 return frame_infos_[frame_number].qp_bitstream; | 281 return frame_infos_[frame_number].qp_bitstream; |
277 } | 282 } |
278 | 283 |
279 int VideoProcessor::NumberDroppedFrames() { | 284 std::vector<int> VideoProcessor::NumberDroppedFramesPerRateUpdate() const { |
280 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 285 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
281 return num_dropped_frames_; | 286 return num_dropped_frames_; |
282 } | 287 } |
283 | 288 |
284 int VideoProcessor::NumberSpatialResizes() { | 289 std::vector<int> VideoProcessor::NumberSpatialResizesPerRateUpdate() const { |
285 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 290 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
286 return num_spatial_resizes_; | 291 return num_spatial_resizes_; |
287 } | 292 } |
288 | 293 |
289 void VideoProcessor::FrameEncoded( | 294 void VideoProcessor::FrameEncoded( |
290 webrtc::VideoCodecType codec, | 295 webrtc::VideoCodecType codec, |
291 const EncodedImage& encoded_image, | 296 const EncodedImage& encoded_image, |
292 const webrtc::RTPFragmentationHeader* fragmentation) { | 297 const webrtc::RTPFragmentationHeader* fragmentation) { |
293 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 298 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
294 | 299 |
295 // For the highest measurement accuracy of the encode time, the start/stop | 300 // For the highest measurement accuracy of the encode time, the start/stop |
296 // time recordings should wrap the Encode call as tightly as possible. | 301 // time recordings should wrap the Encode call as tightly as possible. |
297 int64_t encode_stop_ns = rtc::TimeNanos(); | 302 int64_t encode_stop_ns = rtc::TimeNanos(); |
298 | 303 |
299 if (encoded_frame_writer_) { | 304 if (encoded_frame_writer_) { |
300 RTC_CHECK(encoded_frame_writer_->WriteFrame(encoded_image, codec)); | 305 RTC_CHECK(encoded_frame_writer_->WriteFrame(encoded_image, codec)); |
301 } | 306 } |
302 | 307 |
303 // Timestamp is proportional to frame number, so this gives us number of | 308 // Check for dropped frames. |
304 // dropped frames. | 309 const int frame_number = |
305 int frame_number = TimestampToFrameNumber(encoded_image._timeStamp); | 310 rtp_timestamp_to_frame_num_[encoded_image._timeStamp]; |
306 bool last_frame_missing = false; | 311 bool last_frame_missing = false; |
307 if (frame_number > 0) { | 312 if (frame_number > 0) { |
308 RTC_DCHECK_GE(last_encoded_frame_num_, 0); | 313 RTC_DCHECK_GE(last_encoded_frame_num_, 0); |
309 int num_dropped_from_last_encode = | 314 int num_dropped_from_last_encode = |
310 frame_number - last_encoded_frame_num_ - 1; | 315 frame_number - last_encoded_frame_num_ - 1; |
311 RTC_DCHECK_GE(num_dropped_from_last_encode, 0); | 316 RTC_DCHECK_GE(num_dropped_from_last_encode, 0); |
312 num_dropped_frames_ += num_dropped_from_last_encode; | 317 RTC_CHECK_GE(rate_update_index_, 0); |
| 318 num_dropped_frames_[rate_update_index_] += num_dropped_from_last_encode; |
313 if (num_dropped_from_last_encode > 0) { | 319 if (num_dropped_from_last_encode > 0) { |
314 // For dropped frames, we write out the last decoded frame to avoid | 320 // For dropped frames, we write out the last decoded frame to avoid |
315 // getting out of sync for the computation of PSNR and SSIM. | 321 // getting out of sync for the computation of PSNR and SSIM. |
316 for (int i = 0; i < num_dropped_from_last_encode; i++) { | 322 for (int i = 0; i < num_dropped_from_last_encode; i++) { |
317 RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(), | 323 RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(), |
318 analysis_frame_writer_->FrameLength()); | 324 analysis_frame_writer_->FrameLength()); |
319 RTC_CHECK(analysis_frame_writer_->WriteFrame( | 325 RTC_CHECK(analysis_frame_writer_->WriteFrame( |
320 last_decoded_frame_buffer_.data())); | 326 last_decoded_frame_buffer_.data())); |
321 if (decoded_frame_writer_) { | 327 if (decoded_frame_writer_) { |
322 RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(), | 328 RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(), |
323 decoded_frame_writer_->FrameLength()); | 329 decoded_frame_writer_->FrameLength()); |
324 RTC_CHECK(decoded_frame_writer_->WriteFrame( | 330 RTC_CHECK(decoded_frame_writer_->WriteFrame( |
325 last_decoded_frame_buffer_.data())); | 331 last_decoded_frame_buffer_.data())); |
326 } | 332 } |
327 } | 333 } |
328 } | 334 } |
329 | |
330 last_frame_missing = | 335 last_frame_missing = |
331 (frame_infos_[last_encoded_frame_num_].manipulated_length == 0); | 336 (frame_infos_[last_encoded_frame_num_].manipulated_length == 0); |
332 } | 337 } |
333 // Ensure strict monotonicity. | 338 // Ensure strict monotonicity. |
334 RTC_CHECK_GT(frame_number, last_encoded_frame_num_); | 339 RTC_CHECK_GT(frame_number, last_encoded_frame_num_); |
335 last_encoded_frame_num_ = frame_number; | 340 last_encoded_frame_num_ = frame_number; |
336 | 341 |
337 // Frame is not dropped, so update frame information and statistics. | 342 // Update frame information and statistics. |
338 RTC_CHECK_LT(frame_number, frame_infos_.size()); | 343 RTC_CHECK_LT(frame_number, frame_infos_.size()); |
339 FrameInfo* frame_info = &frame_infos_[frame_number]; | 344 FrameInfo* frame_info = &frame_infos_[frame_number]; |
340 frame_info->qp_encoder = encoded_image.qp_; | 345 frame_info->qp_encoder = encoded_image.qp_; |
341 if (codec == kVideoCodecVP8) { | 346 if (codec == kVideoCodecVP8) { |
342 vp8::GetQp(encoded_image._buffer, encoded_image._length, | 347 vp8::GetQp(encoded_image._buffer, encoded_image._length, |
343 &frame_info->qp_bitstream); | 348 &frame_info->qp_bitstream); |
344 } else if (codec == kVideoCodecVP9) { | 349 } else if (codec == kVideoCodecVP9) { |
345 vp9::GetQp(encoded_image._buffer, encoded_image._length, | 350 vp9::GetQp(encoded_image._buffer, encoded_image._length, |
346 &frame_info->qp_bitstream); | 351 &frame_info->qp_bitstream); |
347 } | 352 } |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 memcpy(&copied_image, &encoded_image, sizeof(copied_image)); | 393 memcpy(&copied_image, &encoded_image, sizeof(copied_image)); |
389 copied_image._size = copied_buffer_size; | 394 copied_image._size = copied_buffer_size; |
390 copied_image._buffer = copied_buffer.get(); | 395 copied_image._buffer = copied_buffer.get(); |
391 | 396 |
392 if (!exclude_this_frame) { | 397 if (!exclude_this_frame) { |
393 frame_stat->packets_dropped = | 398 frame_stat->packets_dropped = |
394 packet_manipulator_->ManipulatePackets(&copied_image); | 399 packet_manipulator_->ManipulatePackets(&copied_image); |
395 } | 400 } |
396 frame_info->manipulated_length = copied_image._length; | 401 frame_info->manipulated_length = copied_image._length; |
397 | 402 |
398 // Keep track of if frames are lost due to packet loss so we can tell | |
399 // this to the encoder (this is handled by the RTP logic in the full stack). | |
400 // TODO(kjellander): Pass fragmentation header to the decoder when | |
401 // CL 172001 has been submitted and PacketManipulator supports this. | |
402 | |
403 // For the highest measurement accuracy of the decode time, the start/stop | 403 // For the highest measurement accuracy of the decode time, the start/stop |
404 // time recordings should wrap the Decode call as tightly as possible. | 404 // time recordings should wrap the Decode call as tightly as possible. |
405 frame_info->decode_start_ns = rtc::TimeNanos(); | 405 frame_info->decode_start_ns = rtc::TimeNanos(); |
406 frame_stat->decode_return_code = | 406 frame_stat->decode_return_code = |
407 decoder_->Decode(copied_image, last_frame_missing, nullptr); | 407 decoder_->Decode(copied_image, last_frame_missing, nullptr); |
408 | 408 |
409 if (frame_stat->decode_return_code != WEBRTC_VIDEO_CODEC_OK) { | 409 if (frame_stat->decode_return_code != WEBRTC_VIDEO_CODEC_OK) { |
410 // Write the last successful frame the output file to avoid getting it out | 410 // Write the last successful frame the output file to avoid getting it out |
411 // of sync with the source file for SSIM and PSNR comparisons. | 411 // of sync with the source file for SSIM and PSNR comparisons. |
412 RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(), | 412 RTC_DCHECK_EQ(last_decoded_frame_buffer_.size(), |
(...skipping 10 matching lines...) Expand all Loading... |
423 } | 423 } |
424 | 424 |
425 void VideoProcessor::FrameDecoded(const VideoFrame& image) { | 425 void VideoProcessor::FrameDecoded(const VideoFrame& image) { |
426 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 426 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
427 | 427 |
428 // For the highest measurement accuracy of the decode time, the start/stop | 428 // For the highest measurement accuracy of the decode time, the start/stop |
429 // time recordings should wrap the Decode call as tightly as possible. | 429 // time recordings should wrap the Decode call as tightly as possible. |
430 int64_t decode_stop_ns = rtc::TimeNanos(); | 430 int64_t decode_stop_ns = rtc::TimeNanos(); |
431 | 431 |
432 // Update frame information and statistics. | 432 // Update frame information and statistics. |
433 int frame_number = TimestampToFrameNumber(image.timestamp()); | 433 const int frame_number = rtp_timestamp_to_frame_num_[image.timestamp()]; |
434 RTC_DCHECK_LT(frame_number, frame_infos_.size()); | 434 RTC_CHECK_LT(frame_number, frame_infos_.size()); |
435 FrameInfo* frame_info = &frame_infos_[frame_number]; | 435 FrameInfo* frame_info = &frame_infos_[frame_number]; |
436 frame_info->decoded_width = image.width(); | 436 frame_info->decoded_width = image.width(); |
437 frame_info->decoded_height = image.height(); | 437 frame_info->decoded_height = image.height(); |
438 FrameStatistic* frame_stat = &stats_->stats_[frame_number]; | 438 FrameStatistic* frame_stat = &stats_->stats_[frame_number]; |
439 frame_stat->decode_time_in_us = | 439 frame_stat->decode_time_in_us = |
440 GetElapsedTimeMicroseconds(frame_info->decode_start_ns, decode_stop_ns); | 440 GetElapsedTimeMicroseconds(frame_info->decode_start_ns, decode_stop_ns); |
441 frame_stat->decoding_successful = true; | 441 frame_stat->decoding_successful = true; |
442 | 442 |
443 // Check if the codecs have resized the frame since previously decoded frame. | 443 // Check if the codecs have resized the frame since previously decoded frame. |
444 if (frame_number > 0) { | 444 if (frame_number > 0) { |
445 RTC_DCHECK_GE(last_decoded_frame_num_, 0); | 445 RTC_CHECK_GE(last_decoded_frame_num_, 0); |
446 const FrameInfo& last_decoded_frame_info = | 446 const FrameInfo& last_decoded_frame_info = |
447 frame_infos_[last_decoded_frame_num_]; | 447 frame_infos_[last_decoded_frame_num_]; |
448 if (static_cast<int>(image.width()) != | 448 if (static_cast<int>(image.width()) != |
449 last_decoded_frame_info.decoded_width || | 449 last_decoded_frame_info.decoded_width || |
450 static_cast<int>(image.height()) != | 450 static_cast<int>(image.height()) != |
451 last_decoded_frame_info.decoded_height) { | 451 last_decoded_frame_info.decoded_height) { |
452 ++num_spatial_resizes_; | 452 RTC_CHECK_GE(rate_update_index_, 0); |
| 453 ++num_spatial_resizes_[rate_update_index_]; |
453 } | 454 } |
454 } | 455 } |
455 // Ensure strict monotonicity. | 456 // Ensure strict monotonicity. |
456 RTC_CHECK_GT(frame_number, last_decoded_frame_num_); | 457 RTC_CHECK_GT(frame_number, last_decoded_frame_num_); |
457 last_decoded_frame_num_ = frame_number; | 458 last_decoded_frame_num_ = frame_number; |
458 | 459 |
459 // Check if codec size is different from the original size, and if so, | 460 // Check if codec size is different from the original size, and if so, |
460 // scale back to original size. This is needed for the PSNR and SSIM | 461 // scale back to original size. This is needed for the PSNR and SSIM |
461 // calculations. | 462 // calculations. |
462 size_t extracted_length; | 463 size_t extracted_length; |
(...skipping 22 matching lines...) Expand all Loading... |
485 RTC_DCHECK_EQ(extracted_length, analysis_frame_writer_->FrameLength()); | 486 RTC_DCHECK_EQ(extracted_length, analysis_frame_writer_->FrameLength()); |
486 RTC_CHECK(analysis_frame_writer_->WriteFrame(extracted_buffer.data())); | 487 RTC_CHECK(analysis_frame_writer_->WriteFrame(extracted_buffer.data())); |
487 if (decoded_frame_writer_) { | 488 if (decoded_frame_writer_) { |
488 RTC_DCHECK_EQ(extracted_length, decoded_frame_writer_->FrameLength()); | 489 RTC_DCHECK_EQ(extracted_length, decoded_frame_writer_->FrameLength()); |
489 RTC_CHECK(decoded_frame_writer_->WriteFrame(extracted_buffer.data())); | 490 RTC_CHECK(decoded_frame_writer_->WriteFrame(extracted_buffer.data())); |
490 } | 491 } |
491 | 492 |
492 last_decoded_frame_buffer_ = std::move(extracted_buffer); | 493 last_decoded_frame_buffer_ = std::move(extracted_buffer); |
493 } | 494 } |
494 | 495 |
495 uint32_t VideoProcessor::FrameNumberToTimestamp(int frame_number) const { | 496 int VideoProcessor::LastDecodedFrameNumber() const { |
496 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | 497 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); |
497 | 498 return last_decoded_frame_num_; |
498 RTC_DCHECK_GE(frame_number, 0); | |
499 const int ticks_per_frame = | |
500 kRtpClockRateHz / config_.codec_settings.maxFramerate; | |
501 return (frame_number + 1) * ticks_per_frame; | |
502 } | |
503 | |
504 int VideoProcessor::TimestampToFrameNumber(uint32_t timestamp) const { | |
505 RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_); | |
506 | |
507 RTC_DCHECK_GT(timestamp, 0); | |
508 const int ticks_per_frame = | |
509 kRtpClockRateHz / config_.codec_settings.maxFramerate; | |
510 RTC_DCHECK_EQ(timestamp % ticks_per_frame, 0); | |
511 return (timestamp / ticks_per_frame) - 1; | |
512 } | 499 } |
513 | 500 |
514 } // namespace test | 501 } // namespace test |
515 } // namespace webrtc | 502 } // namespace webrtc |
OLD | NEW |