Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(838)

Side by Side Diff: webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc

Issue 1645543003: H264: Improve FFmpeg decoder performance by using I420BufferPool. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: common_video not unnecessarily depending on webrtc_h264, fixed circular dependency Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 * 9 *
10 */ 10 */
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
69 RTC_NOTREACHED() << "av_lockmgr_register failed."; 69 RTC_NOTREACHED() << "av_lockmgr_register failed.";
70 return; 70 return;
71 } 71 }
72 av_register_all(); 72 av_register_all();
73 ffmpeg_initialized = true; 73 ffmpeg_initialized = true;
74 } 74 }
75 } 75 }
76 76
77 #endif // defined(WEBRTC_INITIALIZE_FFMPEG) 77 #endif // defined(WEBRTC_INITIALIZE_FFMPEG)
78 78
79 // Called by FFmpeg when it is done with a frame buffer, see AVGetBuffer2. 79 } // namespace
80 void AVFreeBuffer2(void* opaque, uint8_t* data) {
81 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
82 delete video_frame;
83 }
84 80
85 // Called by FFmpeg when it needs a frame buffer to store decoded frames in. 81 int H264DecoderImpl::AVGetBuffer2(
86 // The VideoFrames returned by FFmpeg at |Decode| originate from here. They are 82 AVCodecContext* context, AVFrame* av_frame, int flags) {
87 // reference counted and freed by FFmpeg using |AVFreeBuffer2|. 83 // Set in |InitDecode|.
88 // TODO(hbos): Use a frame pool for better performance instead of create/free. 84 H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
89 // Could be owned by decoder, |static_cast<H264DecoderImpl*>(context->opaque)|. 85 // DCHECK values set in |InitDecode|.
90 // Consider verifying that the buffer was allocated by us to avoid unsafe type 86 RTC_DCHECK(decoder);
91 // cast. See https://bugs.chromium.org/p/webrtc/issues/detail?id=5428. 87 RTC_DCHECK_EQ(context->pix_fmt, kPixelFormat);
92 int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) {
93 RTC_CHECK_EQ(context->pix_fmt, kPixelFormat); // Same as in InitDecode.
94 // Necessary capability to be allowed to provide our own buffers. 88 // Necessary capability to be allowed to provide our own buffers.
95 RTC_CHECK(context->codec->capabilities | AV_CODEC_CAP_DR1); 89 RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
90
96 // |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the 91 // |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the
97 // actual image's dimensions and may be different from |context->width| and 92 // actual image's dimensions and may be different from |context->width| and
98 // |context->coded_width| due to reordering. 93 // |context->coded_width| due to reordering.
99 int width = av_frame->width; 94 int width = av_frame->width;
100 int height = av_frame->height; 95 int height = av_frame->height;
101 // See |lowres|, if used the decoder scales the image by 1/2^(lowres). This 96 // See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
102 // has implications on which resolutions are valid, but we don't use it. 97 // has implications on which resolutions are valid, but we don't use it.
103 RTC_CHECK_EQ(context->lowres, 0); 98 RTC_CHECK_EQ(context->lowres, 0);
104 // Adjust the |width| and |height| to values acceptable by the decoder. 99 // Adjust the |width| and |height| to values acceptable by the decoder.
105 // Without this, FFmpeg may overflow the buffer. If modified, |width| and/or 100 // Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
106 // |height| are larger than the actual image and the image has to be cropped 101 // |height| are larger than the actual image and the image has to be cropped
107 // (top-left corner) after decoding to avoid visible borders to the right and 102 // (top-left corner) after decoding to avoid visible borders to the right and
108 // bottom of the actual image. 103 // bottom of the actual image.
109 avcodec_align_dimensions(context, &width, &height); 104 avcodec_align_dimensions(context, &width, &height);
110 105
111 RTC_CHECK_GE(width, 0); 106 RTC_CHECK_GE(width, 0);
112 RTC_CHECK_GE(height, 0); 107 RTC_CHECK_GE(height, 0);
113 int ret = av_image_check_size(static_cast<unsigned int>(width), 108 int ret = av_image_check_size(static_cast<unsigned int>(width),
114 static_cast<unsigned int>(height), 0, nullptr); 109 static_cast<unsigned int>(height), 0, nullptr);
115 if (ret < 0) { 110 if (ret < 0) {
116 LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height; 111 LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
117 return ret; 112 return ret;
118 } 113 }
119 114
120 // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version 115 // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
121 // of a video frame and will be set up to reference |video_frame|'s buffers. 116 // of a video frame and will be set up to reference |video_frame|'s buffers.
122 VideoFrame* video_frame = new VideoFrame(); 117 VideoFrame* video_frame = new VideoFrame();
123 int stride_y = width;
124 int stride_uv = (width + 1) / 2;
125 RTC_CHECK_EQ(0, video_frame->CreateEmptyFrame(
126 width, height, stride_y, stride_uv, stride_uv));
127 int total_size = video_frame->allocated_size(kYPlane) +
128 video_frame->allocated_size(kUPlane) +
129 video_frame->allocated_size(kVPlane);
130 RTC_DCHECK_EQ(total_size, stride_y * height +
131 (stride_uv + stride_uv) * ((height + 1) / 2));
132
133 // FFmpeg expects the initial allocation to be zero-initialized according to 118 // FFmpeg expects the initial allocation to be zero-initialized according to
134 // http://crbug.com/390941. 119 // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
135 // Using a single |av_frame->buf| - YUV is required to be a continuous blob of 120 video_frame->set_video_frame_buffer(
136 // memory. We can zero-initialize with one memset operation for all planes. 121 decoder->pool_.CreateBuffer(width, height));
122 // DCHECK that we have a continuous buffer as is required.
137 RTC_DCHECK_EQ(video_frame->buffer(kUPlane), 123 RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
138 video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane)); 124 video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
139 RTC_DCHECK_EQ(video_frame->buffer(kVPlane), 125 RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
140 video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane)); 126 video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
141 memset(video_frame->buffer(kYPlane), 0, total_size); 127 int total_size = video_frame->allocated_size(kYPlane) +
128 video_frame->allocated_size(kUPlane) +
129 video_frame->allocated_size(kVPlane);
142 130
143 av_frame->format = context->pix_fmt; 131 av_frame->format = context->pix_fmt;
144 av_frame->reordered_opaque = context->reordered_opaque; 132 av_frame->reordered_opaque = context->reordered_opaque;
145 133
146 // Set |av_frame| members as required by FFmpeg. 134 // Set |av_frame| members as required by FFmpeg.
147 av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane); 135 av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
148 av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane); 136 av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
149 av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane); 137 av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
150 av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane); 138 av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
151 av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane); 139 av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
152 av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane); 140 av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
153 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data); 141 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
154 142
155 av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex], 143 av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
156 total_size, 144 total_size,
157 AVFreeBuffer2, 145 AVFreeBuffer2,
158 static_cast<void*>(video_frame), 146 static_cast<void*>(video_frame),
159 0); 147 0);
160 RTC_CHECK(av_frame->buf[0]); 148 RTC_CHECK(av_frame->buf[0]);
161 return 0; 149 return 0;
162 } 150 }
163 151
164 } // namespace 152 void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
153 // The buffer pool recycles the buffer used by |video_frame| when there are no
154 // more references to it. |video_frame| is a thin buffer holder and is not
155 // recycled.
156 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
157 delete video_frame;
158 }
165 159
166 H264DecoderImpl::H264DecoderImpl() 160 H264DecoderImpl::H264DecoderImpl() : pool_(true),
167 : decoded_image_callback_(nullptr) { 161 decoded_image_callback_(nullptr) {
168 } 162 }
169 163
170 H264DecoderImpl::~H264DecoderImpl() { 164 H264DecoderImpl::~H264DecoderImpl() {
171 Release(); 165 Release();
172 } 166 }
173 167
174 int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings, 168 int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
175 int32_t number_of_cores) { 169 int32_t number_of_cores) {
176 if (codec_settings && 170 if (codec_settings &&
177 codec_settings->codecType != kVideoCodecH264) { 171 codec_settings->codecType != kVideoCodecH264) {
(...skipping 24 matching lines...) Expand all
202 av_context_->codec_type = AVMEDIA_TYPE_VIDEO; 196 av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
203 av_context_->codec_id = AV_CODEC_ID_H264; 197 av_context_->codec_id = AV_CODEC_ID_H264;
204 if (codec_settings) { 198 if (codec_settings) {
205 av_context_->coded_width = codec_settings->width; 199 av_context_->coded_width = codec_settings->width;
206 av_context_->coded_height = codec_settings->height; 200 av_context_->coded_height = codec_settings->height;
207 } 201 }
208 av_context_->pix_fmt = kPixelFormat; 202 av_context_->pix_fmt = kPixelFormat;
209 av_context_->extradata = nullptr; 203 av_context_->extradata = nullptr;
210 av_context_->extradata_size = 0; 204 av_context_->extradata_size = 0;
211 205
206 // If this is ever increased, look at |av_context_->thread_safe_callbacks| and
207 // make it possible to disable the thread checker in the frame buffer pool.
212 av_context_->thread_count = 1; 208 av_context_->thread_count = 1;
213 av_context_->thread_type = FF_THREAD_SLICE; 209 av_context_->thread_type = FF_THREAD_SLICE;
214 210
215 // FFmpeg will get video buffers from our AVGetBuffer2, memory managed by us. 211 // Function used by FFmpeg to get buffers to store decoded frames in.
216 av_context_->get_buffer2 = AVGetBuffer2; 212 av_context_->get_buffer2 = AVGetBuffer2;
217 // get_buffer2 is called with the context, there |opaque| can be used to get a 213 // |get_buffer2| is called with the context, there |opaque| can be used to get
218 // pointer |this|. 214 // a pointer |this|.
219 av_context_->opaque = this; 215 av_context_->opaque = this;
220 // Use ref counted frames (av_frame_unref). 216 // Use ref counted frames (av_frame_unref).
221 av_context_->refcounted_frames = 1; // true 217 av_context_->refcounted_frames = 1; // true
222 218
223 AVCodec* codec = avcodec_find_decoder(av_context_->codec_id); 219 AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
224 if (!codec) { 220 if (!codec) {
225 // This is an indication that FFmpeg has not been initialized or it has not 221 // This is an indication that FFmpeg has not been initialized or it has not
226 // been compiled/initialized with the correct set of codecs. 222 // been compiled/initialized with the correct set of codecs.
227 LOG(LS_ERROR) << "FFmpeg H.264 decoder not found."; 223 LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
228 Release(); 224 Release();
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
354 return ret; 350 return ret;
355 } 351 }
356 return WEBRTC_VIDEO_CODEC_OK; 352 return WEBRTC_VIDEO_CODEC_OK;
357 } 353 }
358 354
359 bool H264DecoderImpl::IsInitialized() const { 355 bool H264DecoderImpl::IsInitialized() const {
360 return av_context_ != nullptr; 356 return av_context_ != nullptr;
361 } 357 }
362 358
363 } // namespace webrtc 359 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698