Index: webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
index e788f23b9dea1e3de62dcfb94c3caeb337600e51..2d1c5eaf86b7a011a1d4e5c7a9701467eb0a9485 100644 |
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc |
@@ -16,6 +16,8 @@ |
#include <limits> |
#include <vector> |
+#include "webrtc/base/checks.h" |
+#include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h" |
#include "webrtc/system_wrappers/interface/cpu_info.h" |
namespace webrtc { |
@@ -216,7 +218,9 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) { |
} |
} |
-void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { |
+void VideoProcessorImpl::FrameEncoded( |
+ const EncodedImage& encoded_image, |
+ const webrtc::RTPFragmentationHeader* fragmentation) { |
// Timestamp is frame number, so this gives us #dropped frames. |
int num_dropped_from_prev_encode = encoded_image._timeStamp - |
prev_time_stamp_ - 1; |
@@ -264,12 +268,37 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { |
assert(false); |
} |
} |
- rtc::scoped_ptr<uint8_t[]> copied_buffer(new uint8_t[encoded_image._length]); |
- memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length); |
+ |
+ // The image to feed to the decoder. |
EncodedImage copied_image; |
memcpy(&copied_image, &encoded_image, sizeof(copied_image)); |
- copied_image._size = copied_image._length; |
- copied_image._buffer = copied_buffer.get(); |
+ rtc::scoped_ptr<uint8_t[]> copied_buffer; |
+ bool copied_image_done = false; |
+#if defined(WEBRTC_OPENH264) |
stefan-webrtc
2015/09/28 11:19:02
Do you need this ifdef if you have the if statemen
hbos
2015/09/30 15:35:19
(Removed, no longer needed due to including start
|
+ if (config_.codec_settings->codecType == kVideoCodecH264 && |
+ H264Encoder::IsSupportedOpenH264()) { |
+ // H264 with OpenH264 encoder (H264EncoderImpl): Defragmentization necessary |
+ // for decoder to be able to decode the EncodedImage. |
+ size_t copied_buffer_size = |
+ H264EncoderImpl::RTPDefragmentizeBufferLengthWithNAL(encoded_image, |
stefan-webrtc
2015/09/28 11:19:02
Code should probably live in this test?
hbos
2015/09/30 15:35:19
(Removed, no longer needed due to including start
|
+ fragmentation); |
+ copied_buffer.reset(new uint8_t[copied_buffer_size]); |
+ H264EncoderImpl::RTPDefragmentize( |
+ encoded_image, fragmentation, copied_buffer.get(), copied_buffer_size); |
+ copied_image._size = copied_buffer_size; |
+ copied_image._length = copied_buffer_size; |
+ copied_image._buffer = copied_buffer.get(); |
+ copied_image_done = true; |
+ } |
+#endif |
+ if (!copied_image_done) { |
+ // Make a raw copy of the |encoded_image| buffer. |
+ copied_buffer.reset(new uint8_t[encoded_image._length]); |
+ memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length); |
+ copied_image._size = copied_image._length; |
+ copied_image._buffer = copied_buffer.get(); |
+ } |
+ |
if (!exclude_this_frame) { |
stat.packets_dropped = |
packet_manipulator_->ManipulatePackets(&copied_image); |
@@ -401,12 +430,14 @@ VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded( |
const EncodedImage& encoded_image, |
const webrtc::CodecSpecificInfo* codec_specific_info, |
const webrtc::RTPFragmentationHeader* fragmentation) { |
- video_processor_->FrameEncoded(encoded_image); // Forward to parent class. |
+ // Forward to parent class. |
+ video_processor_->FrameEncoded(encoded_image, fragmentation); |
return 0; |
} |
int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded( |
VideoFrame& image) { |
- video_processor_->FrameDecoded(image); // forward to parent class |
+ // Forward to parent class. |
+ video_processor_->FrameDecoded(image); |
return 0; |
} |