| Index: webrtc/modules/video_coding/codecs/test/videoprocessor.cc
|
| diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
|
| index e788f23b9dea1e3de62dcfb94c3caeb337600e51..dd6bd65448d44fa35967ac17d22ce45d59e714a9 100644
|
| --- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
|
| +++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
|
| @@ -16,6 +16,8 @@
|
| #include <limits>
|
| #include <vector>
|
|
|
| +#include "webrtc/base/checks.h"
|
| +#include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h"
|
| #include "webrtc/system_wrappers/interface/cpu_info.h"
|
|
|
| namespace webrtc {
|
| @@ -216,7 +218,9 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
|
| }
|
| }
|
|
|
| -void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
|
| +void VideoProcessorImpl::FrameEncoded(
|
| + const EncodedImage& encoded_image,
|
| + const webrtc::RTPFragmentationHeader* fragmentation) {
|
| // Timestamp is frame number, so this gives us #dropped frames.
|
| int num_dropped_from_prev_encode = encoded_image._timeStamp -
|
| prev_time_stamp_ - 1;
|
| @@ -264,12 +268,31 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
|
| assert(false);
|
| }
|
| }
|
| - rtc::scoped_ptr<uint8_t[]> copied_buffer(new uint8_t[encoded_image._length]);
|
| - memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length);
|
| +
|
| + // The image to feed to the decoder.
|
| EncodedImage copied_image;
|
| memcpy(&copied_image, &encoded_image, sizeof(copied_image));
|
| - copied_image._size = copied_image._length;
|
| - copied_image._buffer = copied_buffer.get();
|
| + rtc::scoped_ptr<uint8_t[]> copied_buffer;
|
| + if (config_.codec_settings->codecType != kVideoCodecH264) {
|
| + // Make a raw copy of the |encoded_image| buffer.
|
| + copied_buffer.reset(new uint8_t[encoded_image._length]);
|
| + memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length);
|
| + copied_image._size = copied_image._length;
|
| + copied_image._buffer = copied_buffer.get();
|
| + } else {
|
| + // H264: Defragmentization necessary for decoder to be able to decode the
|
| + // EncodedImage.
|
| + size_t copied_buffer_size =
|
| + H264EncoderImpl::RTPDefragmentizeBufferLengthWithNAL(encoded_image,
|
| + fragmentation);
|
| + copied_buffer.reset(new uint8_t[copied_buffer_size]);
|
| + H264EncoderImpl::RTPDefragmentize(
|
| + encoded_image, fragmentation, copied_buffer.get(), copied_buffer_size);
|
| + copied_image._size = copied_buffer_size;
|
| + copied_image._length = copied_buffer_size;
|
| + copied_image._buffer = copied_buffer.get();
|
| + }
|
| +
|
| if (!exclude_this_frame) {
|
| stat.packets_dropped =
|
| packet_manipulator_->ManipulatePackets(&copied_image);
|
| @@ -401,12 +424,14 @@ VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
|
| const EncodedImage& encoded_image,
|
| const webrtc::CodecSpecificInfo* codec_specific_info,
|
| const webrtc::RTPFragmentationHeader* fragmentation) {
|
| - video_processor_->FrameEncoded(encoded_image); // Forward to parent class.
|
| + // Forward to parent class.
|
| + video_processor_->FrameEncoded(encoded_image, fragmentation);
|
| return 0;
|
| }
|
| int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
|
| VideoFrame& image) {
|
| - video_processor_->FrameDecoded(image); // forward to parent class
|
| + // Forward to parent class.
|
| + video_processor_->FrameDecoded(image);
|
| return 0;
|
| }
|
|
|
|
|