Chromium Code Reviews| Index: webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
| diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
| index 9cda6922b4ba250eccf3dd8d0d67f18a1ced1a42..25d419fd0c3affbba653db756e83c3eaf05dd6ea 100644 |
| --- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
| +++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
| @@ -30,8 +30,13 @@ namespace webrtc { |
| namespace test { |
| namespace { |
| + |
| +// TODO(brandtr): Update this to use the real frame rate. |
| const int k90khzTimestampFrameDiff = 3000; // Assuming 30 fps. |
| +// All foreman_* files in //resources are 300 frames long. |
|
sprang_webrtc
2017/03/06 10:28:24
Sounds hacky. Should we add a todo to fix this?
brandtr
2017/03/06 15:27:31
Removed, thanks to suggestion from Åsa :)
|
| +const size_t kInitialFrameInfoSize = 300; |
| + |
| std::unique_ptr<VideoBitrateAllocator> CreateBitrateAllocator( |
| const TestConfig& config) { |
| std::unique_ptr<TemporalLayersFactory> tl_factory; |
| @@ -97,41 +102,33 @@ VideoProcessorImpl::VideoProcessorImpl(webrtc::VideoEncoder* encoder, |
| source_frame_writer_(source_frame_writer), |
| encoded_frame_writer_(encoded_frame_writer), |
| decoded_frame_writer_(decoded_frame_writer), |
| - first_key_frame_has_been_excluded_(false), |
| - last_frame_missing_(false), |
| initialized_(false), |
| - encoded_frame_size_(0), |
| - encoded_frame_type_(kVideoFrameKey), |
| - prev_time_stamp_(0), |
| - last_encoder_frame_width_(0), |
| - last_encoder_frame_height_(0), |
| + frame_infos_(), |
|
sprang_webrtc
2017/03/06 10:28:25
nit: usually omit default constructors in initiali
brandtr
2017/03/06 15:27:31
Done.
|
| + last_encoded_frame_num_(-1), |
| + last_decoded_frame_num_(-1), |
| + first_key_frame_has_been_excluded_(false), |
| + last_decoded_frame_buffer_(0, analysis_frame_reader_->FrameLength()), |
| stats_(stats), |
| num_dropped_frames_(0), |
| num_spatial_resizes_(0), |
| - bit_rate_factor_(0.0), |
| - encode_start_ns_(0), |
| - decode_start_ns_(0) { |
| + bit_rate_factor_(0.0) { |
| RTC_DCHECK(encoder); |
| RTC_DCHECK(decoder); |
| RTC_DCHECK(packet_manipulator); |
| RTC_DCHECK(analysis_frame_reader); |
| RTC_DCHECK(analysis_frame_writer); |
| RTC_DCHECK(stats); |
| + |
| + frame_infos_.reserve(kInitialFrameInfoSize); |
|
åsapersson
2017/03/03 12:09:21
Could analysis_frame_reader_->NumberOfFrames() be
brandtr
2017/03/06 15:27:32
Good idea. Done.
|
| } |
| bool VideoProcessorImpl::Init() { |
| + RTC_CHECK(!initialized_) |
| + << "This VideoProcessor has already been initialized."; |
| + |
| // Calculate a factor used for bit rate calculations. |
| bit_rate_factor_ = config_.codec_settings->maxFramerate * 0.001 * 8; // bits |
| - // Initialize data structures used by the encoder/decoder APIs. |
| - size_t frame_length_in_bytes = analysis_frame_reader_->FrameLength(); |
| - last_successful_frame_buffer_.reset(new uint8_t[frame_length_in_bytes]); |
| - |
| - // Set fixed properties common for all frames. |
| - // To keep track of spatial resize actions by encoder. |
| - last_encoder_frame_width_ = config_.codec_settings->width; |
| - last_encoder_frame_height_ = config_.codec_settings->height; |
| - |
| // Setup required callbacks for the encoder/decoder. |
| RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(encode_callback_.get()), |
| WEBRTC_VIDEO_CODEC_OK) |
| @@ -184,7 +181,9 @@ bool VideoProcessorImpl::Init() { |
| config_.codec_settings->VP9()->resilience); |
| } |
| } |
| + |
| initialized_ = true; |
| + |
| return true; |
| } |
| @@ -203,12 +202,16 @@ void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) { |
| num_spatial_resizes_ = 0; |
| } |
| +// TODO(brandtr): Update implementation of EncodedFrameSize and EncodedFrameType |
| +// to support batch processing in the caller. |
| size_t VideoProcessorImpl::EncodedFrameSize() { |
| - return encoded_frame_size_; |
| + RTC_CHECK(!frame_infos_.empty()); |
| + return frame_infos_.back().encoded_frame_size; |
| } |
| FrameType VideoProcessorImpl::EncodedFrameType() { |
| - return encoded_frame_type_; |
| + RTC_CHECK(!frame_infos_.empty()); |
| + return frame_infos_.back().encoded_frame_type; |
| } |
| int VideoProcessorImpl::NumberDroppedFrames() { |
| @@ -221,60 +224,60 @@ int VideoProcessorImpl::NumberSpatialResizes() { |
| bool VideoProcessorImpl::ProcessFrame(int frame_number) { |
| RTC_CHECK_GE(frame_number, 0); |
| + RTC_CHECK_LE(frame_number, frame_infos_.size()) |
| + << "Must process frames without gaps."; |
| RTC_CHECK(initialized_) << "Attempting to use uninitialized VideoProcessor"; |
| rtc::scoped_refptr<VideoFrameBuffer> buffer( |
| analysis_frame_reader_->ReadFrame()); |
| - if (buffer) { |
| - if (source_frame_writer_) { |
| - // TODO(brandtr): Introduce temp buffer as data member, to avoid |
| - // allocating for every frame. |
| - size_t length = CalcBufferSize(kI420, buffer->width(), buffer->height()); |
| - std::unique_ptr<uint8_t[]> extracted_buffer(new uint8_t[length]); |
| - int extracted_length = |
| - ExtractBuffer(buffer, length, extracted_buffer.get()); |
| - RTC_CHECK_EQ(extracted_length, source_frame_writer_->FrameLength()); |
| - source_frame_writer_->WriteFrame(extracted_buffer.get()); |
| - } |
| - // Use the frame number as basis for timestamp to identify frames. Let the |
| - // first timestamp be non-zero, to not make the IvfFileWriter believe that |
| - // we want to use capture timestamps in the IVF files. |
| - VideoFrame source_frame(buffer, |
| - (frame_number + 1) * k90khzTimestampFrameDiff, 0, |
| - webrtc::kVideoRotation_0); |
| - |
| - // Ensure we have a new statistics data object we can fill. |
| - FrameStatistic& stat = stats_->NewFrame(frame_number); |
| - |
| - // Decide if we are going to force a keyframe. |
| - std::vector<FrameType> frame_types(1, kVideoFrameDelta); |
| - if (config_.keyframe_interval > 0 && |
| - frame_number % config_.keyframe_interval == 0) { |
| - frame_types[0] = kVideoFrameKey; |
| - } |
| + if (!buffer) { |
| + // Last frame has been reached. |
| + return false; |
| + } |
| - // For dropped frames, we regard them as zero size encoded frames. |
| - encoded_frame_size_ = 0; |
| - encoded_frame_type_ = kVideoFrameDelta; |
| + if (source_frame_writer_) { |
| + size_t length = CalcBufferSize(kI420, buffer->width(), buffer->height()); |
| + rtc::Buffer extracted_buffer(length); |
| + int extracted_length = |
| + ExtractBuffer(buffer, length, extracted_buffer.data()); |
| + RTC_CHECK_EQ(extracted_length, source_frame_writer_->FrameLength()); |
| + RTC_CHECK(source_frame_writer_->WriteFrame(extracted_buffer.data())); |
| + } |
| - // For the highest measurement accuracy of the encode time, the start/stop |
| - // time recordings should wrap the Encode call as tightly as possible. |
| - encode_start_ns_ = rtc::TimeNanos(); |
| - int32_t encode_result = |
| - encoder_->Encode(source_frame, nullptr, &frame_types); |
| + // Use the frame number as the basis for timestamp to identify frames. Let the |
| + // first timestamp be non-zero, to not make the IvfFileWriter believe that |
| + // we want to use capture timestamps in the IVF files. |
| + uint32_t timestamp = (frame_number + 1) * k90khzTimestampFrameDiff; |
| + VideoFrame source_frame(buffer, timestamp, 0, webrtc::kVideoRotation_0); |
| + |
| + // Store frame information during the different stages of encode and decode. |
| + frame_infos_.emplace_back(); |
| + FrameInfo& frame_info = frame_infos_.back(); |
| + frame_info.timestamp = timestamp; |
| + |
| + // Store frame statistics for aggregation at end of test run. |
| + FrameStatistic& frame_stat = stats_->NewFrame(frame_number); |
|
åsapersson
2017/03/03 12:09:21
maybe move closer to where it is used
brandtr
2017/03/06 15:27:32
Done. (Must be created before encode call, since i
|
| + |
| + // Decide if we are going to force a keyframe. |
| + std::vector<FrameType> frame_types(1, kVideoFrameDelta); |
| + if (config_.keyframe_interval > 0 && |
| + frame_number % config_.keyframe_interval == 0) { |
| + frame_types[0] = kVideoFrameKey; |
| + } |
| - if (encode_result != WEBRTC_VIDEO_CODEC_OK) { |
| - fprintf(stderr, "Failed to encode frame %d, return code: %d\n", |
| - frame_number, encode_result); |
| - } |
| - stat.encode_return_code = encode_result; |
| + // For the highest measurement accuracy of the encode time, the start/stop |
| + // time recordings should wrap the Encode call as tightly as possible. |
| + frame_info.encode_start_ns = rtc::TimeNanos(); |
| + int32_t encode_result = encoder_->Encode(source_frame, nullptr, &frame_types); |
|
sprang_webrtc
2017/03/06 10:28:25
Think you drop encode_result and just use frame_st
brandtr
2017/03/06 15:27:31
Done.
|
| + frame_stat.encode_return_code = encode_result; |
| - return true; |
| - } else { |
| - // Last frame has been reached. |
| - return false; |
| + if (encode_result != WEBRTC_VIDEO_CODEC_OK) { |
| + fprintf(stderr, "Failed to encode frame %d, return code: %d\n", |
| + frame_number, encode_result); |
| } |
| + |
| + return true; |
| } |
| void VideoProcessorImpl::FrameEncoded( |
| @@ -291,39 +294,54 @@ void VideoProcessorImpl::FrameEncoded( |
| // Timestamp is proportional to frame number, so this gives us number of |
| // dropped frames. |
| - int num_dropped_from_prev_encode = |
| - (encoded_image._timeStamp - prev_time_stamp_) / k90khzTimestampFrameDiff - |
| - 1; |
| - num_dropped_frames_ += num_dropped_from_prev_encode; |
| - prev_time_stamp_ = encoded_image._timeStamp; |
| - if (num_dropped_from_prev_encode > 0) { |
| - // For dropped frames, we write out the last decoded frame to avoid getting |
| - // out of sync for the computation of PSNR and SSIM. |
| - for (int i = 0; i < num_dropped_from_prev_encode; i++) { |
| - RTC_CHECK(analysis_frame_writer_->WriteFrame( |
| - last_successful_frame_buffer_.get())); |
| - if (decoded_frame_writer_) { |
| - RTC_CHECK(decoded_frame_writer_->WriteFrame( |
| - last_successful_frame_buffer_.get())); |
| + int frame_number = encoded_image._timeStamp / k90khzTimestampFrameDiff - 1; |
|
sprang_webrtc
2017/03/06 10:28:24
nit: would prefer extra parenthesis for clarity
=
brandtr
2017/03/06 15:27:32
Agree. Done in helper function.
|
| + bool last_frame_missing = false; |
| + if (frame_number > 0) { |
| + RTC_CHECK_GE(last_encoded_frame_num_, 0); |
|
sprang_webrtc
2017/03/06 10:28:24
Why CHECK rather than DCHECK everywhere? Will you
brandtr
2017/03/06 15:27:31
Since this test will often be run on devices in Re
sprang_webrtc
2017/03/06 17:21:38
Acknowledged.
|
| + const FrameInfo& last_encoded_frame_info = |
| + frame_infos_[last_encoded_frame_num_]; |
| + last_frame_missing = (last_encoded_frame_info.manipulated_length == 0); |
| + int num_dropped_from_last_encode = |
| + (encoded_image._timeStamp - last_encoded_frame_info.timestamp) / |
| + k90khzTimestampFrameDiff - |
| + 1; |
|
sprang_webrtc
2017/03/06 10:28:25
Sanity that encoded_image._timeStamp % k90khzTimes
sprang_webrtc
2017/03/06 10:28:25
parenthesis
brandtr
2017/03/06 15:27:32
Good idea. Done.
|
| + num_dropped_frames_ += num_dropped_from_last_encode; |
| + if (num_dropped_from_last_encode > 0) { |
| + // For dropped frames, we write out the last decoded frame to avoid |
| + // getting out of sync for the computation of PSNR and SSIM. |
| + for (int i = 0; i < num_dropped_from_last_encode; i++) { |
| + RTC_CHECK_EQ(last_decoded_frame_buffer_.size(), |
| + analysis_frame_writer_->FrameLength()); |
|
sprang_webrtc
2017/03/06 10:28:24
nit: git cl format
brandtr
2017/03/06 15:27:31
Done.
|
| + RTC_CHECK(analysis_frame_writer_->WriteFrame( |
| + last_decoded_frame_buffer_.data())); |
| + if (decoded_frame_writer_) { |
| + RTC_CHECK_EQ(last_decoded_frame_buffer_.size(), |
| + decoded_frame_writer_->FrameLength()); |
| + RTC_CHECK(decoded_frame_writer_->WriteFrame( |
| + last_decoded_frame_buffer_.data())); |
| + } |
| } |
| } |
| } |
| - |
| - // Frame is not dropped, so update the encoded frame size |
| - // (encoder callback is only called for non-zero length frames). |
| - encoded_frame_size_ = encoded_image._length; |
| - encoded_frame_type_ = encoded_image._frameType; |
| - int frame_number = encoded_image._timeStamp / k90khzTimestampFrameDiff - 1; |
| - FrameStatistic& stat = stats_->stats_[frame_number]; |
| - stat.encode_time_in_us = |
| - GetElapsedTimeMicroseconds(encode_start_ns_, encode_stop_ns); |
| - stat.encoding_successful = true; |
| - stat.encoded_frame_length_in_bytes = encoded_image._length; |
| - stat.frame_number = frame_number; |
| - stat.frame_type = encoded_image._frameType; |
| - stat.qp = encoded_image.qp_; |
| - stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; |
| - stat.total_packets = |
| + // Ensure strict monotonicity. |
| + RTC_CHECK_GT(frame_number, last_encoded_frame_num_); |
| + last_encoded_frame_num_ = frame_number; |
| + |
| + // Frame is not dropped, so update frame information and statistics. |
| + RTC_CHECK_LT(frame_number, frame_infos_.size()); |
| + FrameInfo& frame_info = frame_infos_[frame_number]; |
|
sprang_webrtc
2017/03/06 10:28:25
Use pointer instead of non-const ref.
brandtr
2017/03/06 15:27:31
Done.
|
| + frame_info.encoded_frame_size = encoded_image._length; |
| + frame_info.encoded_frame_type = encoded_image._frameType; |
| + FrameStatistic& frame_stat = stats_->stats_[frame_number]; |
|
sprang_webrtc
2017/03/06 10:28:24
dito
brandtr
2017/03/06 15:27:32
Done.
|
| + frame_stat.encode_time_in_us = |
| + GetElapsedTimeMicroseconds(frame_info.encode_start_ns, encode_stop_ns); |
|
sprang_webrtc
2017/03/06 10:28:25
GetElapsedTimeMicroseconds(), but all the variable
brandtr
2017/03/06 15:27:32
Only the input variables are _ns, output variable
sprang_webrtc
2017/03/06 17:21:38
Right, I misread. Twice :)
|
| + frame_stat.encoding_successful = true; |
| + frame_stat.encoded_frame_length_in_bytes = encoded_image._length; |
| + frame_stat.frame_number = frame_number; |
| + frame_stat.frame_type = encoded_image._frameType; |
| + frame_stat.qp = encoded_image.qp_; |
| + frame_stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; |
| + frame_stat.total_packets = |
| encoded_image._length / config_.networking_config.packet_size_in_bytes + |
| 1; |
| @@ -357,9 +375,11 @@ void VideoProcessorImpl::FrameEncoded( |
| copied_image._size = copied_buffer_size; |
| copied_image._buffer = copied_buffer.get(); |
| + frame_info.manipulated_length = copied_image._length; |
| if (!exclude_this_frame) { |
| - stat.packets_dropped = |
| + frame_stat.packets_dropped = |
| packet_manipulator_->ManipulatePackets(&copied_image); |
| + frame_info.manipulated_length = copied_image._length; |
| } |
| // Keep track of if frames are lost due to packet loss so we can tell |
| @@ -369,24 +389,25 @@ void VideoProcessorImpl::FrameEncoded( |
| // For the highest measurement accuracy of the decode time, the start/stop |
| // time recordings should wrap the Decode call as tightly as possible. |
| - decode_start_ns_ = rtc::TimeNanos(); |
| + frame_info.decode_start_ns = rtc::TimeNanos(); |
| int32_t decode_result = |
| - decoder_->Decode(copied_image, last_frame_missing_, nullptr); |
| - stat.decode_return_code = decode_result; |
| + decoder_->Decode(copied_image, last_frame_missing, nullptr); |
| + frame_stat.decode_return_code = decode_result; |
| if (decode_result != WEBRTC_VIDEO_CODEC_OK) { |
| // Write the last successful frame the output file to avoid getting it out |
| // of sync with the source file for SSIM and PSNR comparisons. |
| - RTC_CHECK(analysis_frame_writer_->WriteFrame( |
| - last_successful_frame_buffer_.get())); |
| + RTC_CHECK_EQ(last_decoded_frame_buffer_.size(), |
| + analysis_frame_writer_->FrameLength()); |
| + RTC_CHECK( |
| + analysis_frame_writer_->WriteFrame(last_decoded_frame_buffer_.data())); |
| if (decoded_frame_writer_) { |
| - RTC_CHECK(decoded_frame_writer_->WriteFrame( |
| - last_successful_frame_buffer_.get())); |
| + RTC_CHECK_EQ(last_decoded_frame_buffer_.size(), |
| + decoded_frame_writer_->FrameLength()); |
| + RTC_CHECK( |
| + decoded_frame_writer_->WriteFrame(last_decoded_frame_buffer_.data())); |
| } |
| } |
| - |
| - // Save status for losses so we can inform the decoder for the next frame. |
| - last_frame_missing_ = copied_image._length == 0; |
| } |
| void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { |
| @@ -394,73 +415,77 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { |
| // time recordings should wrap the Decode call as tightly as possible. |
| int64_t decode_stop_ns = rtc::TimeNanos(); |
| - // Report stats. |
| + // Update frame information and statistics. |
| int frame_number = image.timestamp() / k90khzTimestampFrameDiff - 1; |
| - FrameStatistic& stat = stats_->stats_[frame_number]; |
| - stat.decode_time_in_us = |
| - GetElapsedTimeMicroseconds(decode_start_ns_, decode_stop_ns); |
| - stat.decoding_successful = true; |
| - |
| - // Check for resize action (either down or up). |
| - if (static_cast<int>(image.width()) != last_encoder_frame_width_ || |
| - static_cast<int>(image.height()) != last_encoder_frame_height_) { |
| - ++num_spatial_resizes_; |
| - last_encoder_frame_width_ = image.width(); |
| - last_encoder_frame_height_ = image.height(); |
| + RTC_CHECK_LE(frame_number, frame_infos_.size()); |
|
åsapersson
2017/03/03 12:09:21
LT?
brandtr
2017/03/06 15:27:32
Yes!
|
| + FrameInfo& frame_info = frame_infos_[frame_number]; |
|
sprang_webrtc
2017/03/06 10:28:24
pointer
brandtr
2017/03/06 15:27:31
Done.
|
| + frame_info.decoded_width = image.width(); |
| + frame_info.decoded_height = image.height(); |
| + FrameStatistic& frame_stat = stats_->stats_[frame_number]; |
| + frame_stat.decode_time_in_us = |
| + GetElapsedTimeMicroseconds(frame_info.decode_start_ns, decode_stop_ns); |
|
sprang_webrtc
2017/03/06 10:28:25
units again?
brandtr
2017/03/06 15:27:32
Same rationale as above.
sprang_webrtc
2017/03/06 17:21:38
Acknowledged.
|
| + frame_stat.decoding_successful = true; |
| + |
| + // Check if the codecs have resized the frame since previously decoded frame. |
| + if (frame_number > 0) { |
| + RTC_CHECK_GE(last_decoded_frame_num_, 0); |
| + const FrameInfo& last_decoded_frame_info = |
| + frame_infos_[last_decoded_frame_num_]; |
| + if (static_cast<int>(image.width()) != |
| + last_decoded_frame_info.decoded_width || |
| + static_cast<int>(image.height()) != |
| + last_decoded_frame_info.decoded_height) { |
| + ++num_spatial_resizes_; |
| + } |
| } |
| - // Check if codec size is different from native/original size, and if so, |
| - // upsample back to original size. This is needed for PSNR and SSIM |
| + // Ensure strict monotonicity. |
| + RTC_CHECK_GT(frame_number, last_decoded_frame_num_); |
| + last_decoded_frame_num_ = frame_number; |
| + |
| + // Check if codec size is different from the original size, and if so, |
| + // scale back to original size. This is needed for the PSNR and SSIM |
| // calculations. |
| + size_t extracted_length; |
| + rtc::Buffer extracted_buffer; |
| if (image.width() != config_.codec_settings->width || |
| image.height() != config_.codec_settings->height) { |
| - rtc::scoped_refptr<I420Buffer> up_image( |
| - I420Buffer::Create(config_.codec_settings->width, |
| - config_.codec_settings->height)); |
| - |
| + rtc::scoped_refptr<I420Buffer> scaled_buffer(I420Buffer::Create( |
| + config_.codec_settings->width, config_.codec_settings->height)); |
| // Should be the same aspect ratio, no cropping needed. |
| if (image.video_frame_buffer()->native_handle()) { |
| - up_image->ScaleFrom(*image.video_frame_buffer()->NativeToI420Buffer()); |
| + scaled_buffer->ScaleFrom( |
| + *image.video_frame_buffer()->NativeToI420Buffer()); |
| } else { |
| - up_image->ScaleFrom(*image.video_frame_buffer()); |
| + scaled_buffer->ScaleFrom(*image.video_frame_buffer()); |
| } |
| - // TODO(mikhal): Extracting the buffer for now - need to update test. |
| size_t length = |
| - CalcBufferSize(kI420, up_image->width(), up_image->height()); |
| - std::unique_ptr<uint8_t[]> image_buffer(new uint8_t[length]); |
| - int extracted_length = ExtractBuffer(up_image, length, image_buffer.get()); |
| - RTC_CHECK_GT(extracted_length, 0); |
| - // Update our copy of the last successful frame. |
| - memcpy(last_successful_frame_buffer_.get(), image_buffer.get(), |
| - extracted_length); |
| - |
| - RTC_CHECK(analysis_frame_writer_->WriteFrame(image_buffer.get())); |
| - if (decoded_frame_writer_) { |
| - RTC_CHECK(decoded_frame_writer_->WriteFrame(image_buffer.get())); |
| - } |
| - } else { // No resize. |
| - // Update our copy of the last successful frame. |
| - // TODO(mikhal): Add as a member function, so won't be allocated per frame. |
| + CalcBufferSize(kI420, scaled_buffer->width(), scaled_buffer->height()); |
| + extracted_buffer.SetSize(length); |
| + extracted_length = |
| + ExtractBuffer(scaled_buffer, length, extracted_buffer.data()); |
| + } else { |
| + // No resize. |
| size_t length = CalcBufferSize(kI420, image.width(), image.height()); |
| - std::unique_ptr<uint8_t[]> image_buffer(new uint8_t[length]); |
| - int extracted_length; |
| + extracted_buffer.SetSize(length); |
| if (image.video_frame_buffer()->native_handle()) { |
| extracted_length = |
| ExtractBuffer(image.video_frame_buffer()->NativeToI420Buffer(), |
| - length, image_buffer.get()); |
| + length, extracted_buffer.data()); |
| } else { |
| - extracted_length = |
| - ExtractBuffer(image.video_frame_buffer(), length, image_buffer.get()); |
| + extracted_length = ExtractBuffer(image.video_frame_buffer(), length, |
| + extracted_buffer.data()); |
| } |
| - RTC_CHECK_GT(extracted_length, 0); |
| - memcpy(last_successful_frame_buffer_.get(), image_buffer.get(), |
| - extracted_length); |
| + } |
| - RTC_CHECK(analysis_frame_writer_->WriteFrame(image_buffer.get())); |
| - if (decoded_frame_writer_) { |
| - RTC_CHECK(decoded_frame_writer_->WriteFrame(image_buffer.get())); |
| - } |
| + RTC_CHECK_EQ(extracted_length, analysis_frame_writer_->FrameLength()); |
| + RTC_CHECK(analysis_frame_writer_->WriteFrame(extracted_buffer.data())); |
| + if (decoded_frame_writer_) { |
| + RTC_CHECK_EQ(extracted_length, decoded_frame_writer_->FrameLength()); |
| + RTC_CHECK(decoded_frame_writer_->WriteFrame(extracted_buffer.data())); |
| } |
| + |
| + last_decoded_frame_buffer_ = std::move(extracted_buffer); |
| } |
| int VideoProcessorImpl::GetElapsedTimeMicroseconds(int64_t start, |