Index: webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc |
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc |
index 75d2bfa73245a050aa9688e53b0f0fe1538884b0..4d1bd2d75b454252cbc6b58f6d8209b663e3c736 100644 |
--- a/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc |
+++ b/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc |
@@ -77,11 +77,7 @@ void InitializeFFmpeg() { |
#endif // !defined(WEBRTC_CHROMIUM_BUILD) |
-// Called by FFmpeg when it is done with a frame buffer, see AVGetBuffer2. |
-void AVFreeBuffer2(void* opaque, uint8_t* data) { |
- VideoFrame* video_frame = static_cast<VideoFrame*>(opaque); |
- delete video_frame; |
-} |
+} // namespace |
// Called by FFmpeg when it needs a frame buffer to store decoded frames in. |
// The VideoFrames returned by FFmpeg at |Decode| originate from here. They are |
@@ -90,10 +86,17 @@ void AVFreeBuffer2(void* opaque, uint8_t* data) { |
// Could be owned by decoder, |static_cast<H264DecoderImpl*>(context->opaque)|. |
stefan-webrtc
2016/02/01 14:02:09
Should this comment be removed now?
hbos
2016/02/02 16:13:03
Done.
|
// Consider verifying that the buffer was allocated by us to avoid unsafe type |
// cast. See https://bugs.chromium.org/p/webrtc/issues/detail?id=5428. |
-int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) { |
- RTC_CHECK_EQ(context->pix_fmt, kPixelFormat); // Same as in InitDecode. |
+int H264DecoderImpl::AVGetBuffer2( |
+ AVCodecContext* context, AVFrame* av_frame, int flags) { |
+ H264DecoderImpl* decoder = |
+ reinterpret_cast<H264DecoderImpl*>(context->opaque); |
hbos
2016/01/29 10:16:56
To reinterpret_cast or static_cast, that is the qu
hbos
2016/02/02 16:13:03
static_cast, that is the answer.
|
+ |
+ // DCHECK values set in |InitDecode|. |
+ RTC_DCHECK(decoder); |
+ RTC_DCHECK_EQ(context->pix_fmt, kPixelFormat); |
// Necessary capability to be allowed to provide our own buffers. |
- RTC_CHECK(context->codec->capabilities | AV_CODEC_CAP_DR1); |
+ RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1); |
+ |
// |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the |
// actual image's dimensions and may be different from |context->width| and |
// |context->coded_width| due to reordering. |
@@ -121,25 +124,18 @@ int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) { |
// The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version |
// of a video frame and will be set up to reference |video_frame|'s buffers. |
VideoFrame* video_frame = new VideoFrame(); |
- int stride_y = width; |
- int stride_uv = (width + 1) / 2; |
- RTC_CHECK_EQ(0, video_frame->CreateEmptyFrame( |
- width, height, stride_y, stride_uv, stride_uv)); |
- int total_size = video_frame->allocated_size(kYPlane) + |
- video_frame->allocated_size(kUPlane) + |
- video_frame->allocated_size(kVPlane); |
- RTC_DCHECK_EQ(total_size, stride_y * height + |
- (stride_uv + stride_uv) * ((height + 1) / 2)); |
- |
// FFmpeg expects the initial allocation to be zero-initialized according to |
- // http://crbug.com/390941. |
- // Using a single |av_frame->buf| - YUV is required to be a continuous blob of |
- // memory. We can zero-initialize with one memset operation for all planes. |
+ // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers. |
+ video_frame->set_video_frame_buffer( |
+ decoder->pool_.CreateBuffer(width, height)); |
+ // DCHECK that we have a continuous buffer as is required. |
RTC_DCHECK_EQ(video_frame->buffer(kUPlane), |
video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane)); |
RTC_DCHECK_EQ(video_frame->buffer(kVPlane), |
video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane)); |
- memset(video_frame->buffer(kYPlane), 0, total_size); |
stefan-webrtc
2016/02/01 14:02:09
The memset you did in the pool doesn't memset the
hbos
2016/02/02 16:13:03
That memset and this memset are the same. See prev
|
+ int total_size = video_frame->allocated_size(kYPlane) + |
+ video_frame->allocated_size(kUPlane) + |
+ video_frame->allocated_size(kVPlane); |
av_frame->format = context->pix_fmt; |
av_frame->reordered_opaque = context->reordered_opaque; |
@@ -162,10 +158,14 @@ int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) { |
return 0; |
} |
-} // namespace |
+// Called by FFmpeg when it is done with a frame buffer, see AVGetBuffer2. |
+void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) { |
+ VideoFrame* video_frame = static_cast<VideoFrame*>(opaque); |
+ delete video_frame; |
stefan-webrtc
2016/02/01 14:02:09
Seems odd that we delete frames when we are using
hbos
2016/02/02 16:13:03
Added a comment explaining why.
|
+} |
-H264DecoderImpl::H264DecoderImpl() |
- : decoded_image_callback_(nullptr) { |
+H264DecoderImpl::H264DecoderImpl() : pool_(true), |
+ decoded_image_callback_(nullptr) { |
} |
H264DecoderImpl::~H264DecoderImpl() { |
@@ -208,13 +208,15 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings, |
av_context_->extradata = nullptr; |
av_context_->extradata_size = 0; |
+ // If this is ever increased, look at |av_context_->thread_safe_callbacks| and |
+ // make it possible to disable the thread checker in the frame buffer pool. |
av_context_->thread_count = 1; |
av_context_->thread_type = FF_THREAD_SLICE; |
- // FFmpeg will get video buffers from our AVGetBuffer2, memory managed by us. |
+ // Function used by FFmpeg to get buffers to store decoded frames in. |
av_context_->get_buffer2 = AVGetBuffer2; |
- // get_buffer2 is called with the context, there |opaque| can be used to get a |
- // pointer |this|. |
+ // |get_buffer2| is called with the context, there |opaque| can be used to get |
+ // a pointer |this|. |
av_context_->opaque = this; |
// Use ref counted frames (av_frame_unref). |
av_context_->refcounted_frames = 1; // true |