Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(313)

Side by Side Diff: webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc

Issue 1306813009: H.264 video codec support using OpenH264/FFmpeg (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Addressed noahric's comments + minor fixes Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 *
10 */
11
12 #include "webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h"
13
14 #include <algorithm>
15
16 extern "C" {
17 #include "third_party/ffmpeg/libavcodec/avcodec.h"
18 #include "third_party/ffmpeg/libavformat/avformat.h"
19 #include "third_party/ffmpeg/libavutil/imgutils.h"
20 } // extern "C"
21
22 #include "webrtc/base/checks.h"
23 #include "webrtc/base/criticalsection.h"
24 #include "webrtc/base/logging.h"
25
26 namespace webrtc {
27
28 namespace {
29
30 static const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P;
31 static const size_t kYPlaneIndex = 0;
32 static const size_t kUPlaneIndex = 1;
33 static const size_t kVPlaneIndex = 2;
34
35 #if !defined(WEBRTC_CHROMIUM_BUILD)
36
37 static bool ffmpeg_initialized = false;
38
39 // Called by FFmpeg to do mutex operations if init using InitializeFFmpeg.
40 static int LockManagerOperation(void** lock, AVLockOp op)
41 EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() {
42 switch (op) {
43 case AV_LOCK_CREATE:
44 *lock = new rtc::CriticalSection();
45 return 0;
46 case AV_LOCK_OBTAIN:
47 static_cast<rtc::CriticalSection*>(*lock)->Enter();
48 return 0;
49 case AV_LOCK_RELEASE:
50 static_cast<rtc::CriticalSection*>(*lock)->Leave();
51 return 0;
52 case AV_LOCK_DESTROY:
53 delete static_cast<rtc::CriticalSection*>(*lock);
54 *lock = nullptr;
55 return 0;
56 }
57 return 1;
58 }
59
60 // TODO(hbos): Assumed to be called on a single thread. Should DCHECK that
61 // InitializeFFmpeg is only called on one thread or make it thread safe.
62 static bool InitializeFFmpeg() {
63 if (!ffmpeg_initialized) {
64 if (av_lockmgr_register(LockManagerOperation) < 0) {
65 LOG(LS_ERROR) << "av_lockmgr_register failed.";
66 return false;
67 }
68 av_register_all();
69 ffmpeg_initialized = true;
70 }
71 return true;
72 }
73
74 #endif // !defined(WEBRTC_CHROMIUM_BUILD)
75
76 static int NumberOfThreads(int width, int height, int number_of_cores) {
77 if (width * height >= 1920 * 1080 && number_of_cores > 8) {
78 return 8; // 8 threads for 1080p on high perf machines.
79 } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
80 // 3 threads for 1080p.
81 return 3;
stefan-webrtc 2016/01/08 09:25:04 Comment on the same line as above? Same below.
hbos 2016/01/11 16:21:53 Done.
82 } else if (width * height > 640 * 480 && number_of_cores >= 3) {
83 // 2 threads for qHD/HD.
84 return 2;
85 } else {
86 // 1 thread for VGA or less.
87 return 1;
88 }
89 }
90
91 // Called by FFmpeg when it is done with a frame buffer, see AVGetBuffer2.
92 static void AVFreeBuffer2(void* opaque, uint8_t* data) {
93 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
94 delete video_frame;
95 }
96
97 // Called by FFmpeg when it needs a frame buffer to store decoded frames in.
98 // The VideoFrames returned by FFmpeg at Decode originate from here. They are
99 // reference counted and freed by FFmpeg using AVFreeBuffer2.
100 // TODO(hbos): Use a frame pool for better performance instead of create/free.
101 // Could be owned by decoder, static_cast<H264DecoderImpl*>(context->opaque).
102 static int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) {
103 RTC_CHECK_EQ(context->pix_fmt, kPixelFormat); // Same as in InitDecode.
104 // width/height and coded_width/coded_height can be different due to cropping
105 // or |lowres|.
106 int width = std::max(context->width, context->coded_width);
107 int height = std::max(context->height, context->coded_height);
108 // See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
109 // has implications on which resolutions are valid, but we don't use it.
110 RTC_CHECK_EQ(context->lowres, 0);
111
112 RTC_CHECK_GE(width, 0);
113 RTC_CHECK_GE(height, 0);
114 int ret = av_image_check_size(width, height, 0, nullptr);
115 if (ret < 0) {
116 LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
117 return ret;
118 }
119
120 // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
121 // of a video frame and will be set up to reference |video_frame|'s buffers.
122 VideoFrame* video_frame = new VideoFrame();
123 int stride_y = width;
124 int stride_uv = (width + 1) / 2;
125 RTC_CHECK_EQ(0, video_frame->CreateEmptyFrame(
126 width, height, stride_y, stride_uv, stride_uv));
127 size_t total_size = video_frame->allocated_size(kYPlane) +
128 video_frame->allocated_size(kUPlane) +
129 video_frame->allocated_size(kVPlane);
130 RTC_DCHECK_EQ(total_size, static_cast<size_t>(stride_y * height +
131 (stride_uv + stride_uv) * ((height + 1) / 2)));
132 // FFmpeg note: "Each data plane must be aligned to the maximum required by
133 // the target CPU." See get_buffer2.
134 // TODO(hbos): Memory alignment on a per-plane basis. CreateEmptyFrame only
135 // guarantees that the buffer of all planes is memory aligned, not each
136 // individual plane. Or does "data plane" here refer to one data[] entry or
137 // one allocation?
138
139 // FFmpeg expects the initial allocation to be zero-initialized according to
140 // http://crbug.com/390941.
141 // Using a single |av_frame->buf| - YUV is required to be a continuous blob of
142 // memory. We can zero-initialize with one memset operation for all planes.
143 RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
144 video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
145 RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
146 video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
147 memset(video_frame->buffer(kYPlane), 0, total_size);
148
149 RTC_DCHECK_EQ(av_frame->width, width);
150 RTC_DCHECK_EQ(av_frame->height, height);
151 av_frame->format = context->pix_fmt;
152 av_frame->reordered_opaque = context->reordered_opaque;
153
154 // Set |av_frame| members as required by FFmpeg.
155 av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
156 av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
157 av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
158 av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
159 av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
160 av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
161 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
162
163 av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
164 total_size,
165 AVFreeBuffer2,
166 static_cast<void*>(video_frame),
167 0);
168 RTC_CHECK(av_frame->buf[0]);
169 return 0;
170 }
171
172 } // namespace
173
174 H264DecoderImpl::H264DecoderImpl()
175 : decoded_image_callback_(nullptr) {
176 }
177
178 H264DecoderImpl::~H264DecoderImpl() {
179 Release();
180 }
181
182 int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
183 int32_t number_of_cores) {
184 if (codec_settings &&
185 codec_settings->codecType != kVideoCodecH264) {
186 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
187 }
188
189 // In Chromium FFmpeg will be initialized outside of WebRTC and we should not
190 // attempt to do so ourselves or it will be initialized twice.
191 // TODO(hbos): Put behind a different flag in case non-chromium project wants
192 // to initialize externally.
193 #if !defined(WEBRTC_CHROMIUM_BUILD)
194 // Make sure FFmpeg has been initialized.
195 InitializeFFmpeg();
196 #endif
197
198 // Release necessary in case of re-initializing.
199 int32_t ret = Release();
200 if (ret != WEBRTC_VIDEO_CODEC_OK)
201 return ret;
202 RTC_DCHECK(!av_context_);
203
204 // Initialize AVCodecContext.
205 av_context_.reset(avcodec_alloc_context3(nullptr));
206
207 av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
208 av_context_->codec_id = AV_CODEC_ID_H264;
209 if (codec_settings) {
210 av_context_->coded_width = codec_settings->width;
211 av_context_->coded_height = codec_settings->height;
212 }
213 av_context_->pix_fmt = kPixelFormat;
214 av_context_->extradata = nullptr;
215 av_context_->extradata_size = 0;
216
217 av_context_->thread_count = NumberOfThreads(av_context_->coded_width,
218 av_context_->coded_height,
219 number_of_cores);
220 av_context_->thread_type = FF_THREAD_SLICE;
221
222 // FFmpeg will get video buffers from our AVGetBuffer2, memory managed by us.
223 av_context_->get_buffer2 = AVGetBuffer2;
224 // get_buffer2 is called with the context, there |opaque| can be used to get a
225 // pointer |this|.
226 av_context_->opaque = this;
227 // Use ref counted frames (av_frame_unref).
228 av_context_->refcounted_frames = 1; // true
229
230 AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
231 if (!codec) {
232 // This is an indication that FFmpeg has not been initialized or it has not
233 // been compiled/initialized with the correct set of codecs.
234 LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
235 Release();
236 return WEBRTC_VIDEO_CODEC_ERROR;
237 }
238 int res = avcodec_open2(av_context_.get(), codec, nullptr);
239 if (res < 0) {
240 LOG(LS_ERROR) << "avcodec_open2 error: " << res;
241 Release();
242 return WEBRTC_VIDEO_CODEC_ERROR;
243 }
244
245 av_frame_.reset(av_frame_alloc());
246 return WEBRTC_VIDEO_CODEC_OK;
247 }
248
249 int32_t H264DecoderImpl::Release() {
250 av_context_.reset();
251 av_frame_.reset();
252 return WEBRTC_VIDEO_CODEC_OK;
253 }
254
255 int32_t H264DecoderImpl::Reset() {
256 if (!IsInitialized())
257 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
258 InitDecode(nullptr, 1);
259 return WEBRTC_VIDEO_CODEC_OK;
260 }
261
262 int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
263 DecodedImageCallback* callback) {
264 decoded_image_callback_ = callback;
265 return WEBRTC_VIDEO_CODEC_OK;
266 }
267
268 int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
269 bool /*missing_frames*/,
270 const RTPFragmentationHeader* /*fragmentation*/,
271 const CodecSpecificInfo* codec_specific_info,
272 int64_t /*render_time_ms*/) {
273 if (!IsInitialized())
274 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
275 if (!decoded_image_callback_) {
276 LOG(LS_WARNING) << "InitDecode() has been called, but a callback function "
277 "has not been set with RegisterDecodeCompleteCallback()";
278 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
279 }
280 if (!input_image._buffer || !input_image._length)
281 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
282 if (codec_specific_info &&
283 codec_specific_info->codecType != kVideoCodecH264) {
284 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
285 }
286
287 AVPacket packet;
288 av_init_packet(&packet);
289 // TODO(hbos): "The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger
290 // than the actual read bytes because some optimized bitstream readers read 32
291 // or 64 bits at once and could read over the end." See avcodec_decode_video2.
292 // - Is this an issue? Do we have to make sure EncodedImage is allocated with
293 // additional bytes or do we have to do an otherwise unnecessary copy? Might
294 // only be a problem with non-mul-32 frame widths?
295 // ("If the first 23 bits of the additional bytes are not 0, then damaged MPEG
296 // bitstreams could cause overread and segfault.")
297 packet.data = input_image._buffer;
298 packet.size = input_image._length;
299 av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000; // ms -> μs
300
301 int frame_decoded = 0;
302 int result = avcodec_decode_video2(av_context_.get(),
303 av_frame_.get(),
304 &frame_decoded,
305 &packet);
306 if (result < 0) {
307 LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result;
308 return WEBRTC_VIDEO_CODEC_ERROR;
309 }
310 // |result| is number of bytes used, which should be all of them.
311 if (result != packet.size) {
312 LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes "
313 "when " << packet.size << " bytes were expected.";
314 return WEBRTC_VIDEO_CODEC_ERROR;
315 }
316
317 if (!frame_decoded) {
318 LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was "
319 "decoded.";
320 return WEBRTC_VIDEO_CODEC_OK;
321 }
322
323 // Obtain the |video_frame| containing the decoded image.
324 VideoFrame* video_frame = static_cast<VideoFrame*>(
325 av_buffer_get_opaque(av_frame_->buf[0]));
326 RTC_DCHECK(video_frame);
327 RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
328 RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
329 RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
330 video_frame->set_timestamp(input_image._timeStamp);
331
332 // Return decoded frame.
333 int32_t ret = decoded_image_callback_->Decoded(*video_frame);
334 // Stop referencing it, possibly freeing |video_frame|.
335 av_frame_unref(av_frame_.get());
336 video_frame = nullptr;
337
338 if (ret) {
339 LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;
340 return ret;
341 }
342 return WEBRTC_VIDEO_CODEC_OK;
343 }
344
345 bool H264DecoderImpl::IsInitialized() const {
346 return av_context_ != nullptr;
347 }
348
349 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698