| Index: media/filters/ffmpeg_video_decoder.cc
|
| diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
|
| index 92028dce636ab97364e71c1d09651689e3b5dd99..8f842dbd4825e7946178941921a4c1523316f626 100644
|
| --- a/media/filters/ffmpeg_video_decoder.cc
|
| +++ b/media/filters/ffmpeg_video_decoder.cc
|
| @@ -16,6 +16,7 @@
|
| #include "base/location.h"
|
| #include "base/single_thread_task_runner.h"
|
| #include "base/strings/string_number_conversions.h"
|
| +#include "base/sys_info.h"
|
| #include "media/base/bind_to_current_loop.h"
|
| #include "media/base/decoder_buffer.h"
|
| #include "media/base/limits.h"
|
| @@ -25,6 +26,7 @@
|
| #include "media/base/video_util.h"
|
| #include "media/ffmpeg/ffmpeg_common.h"
|
| #include "media/filters/ffmpeg_glue.h"
|
| +#include "third_party/ffmpeg/libavutil/intreadwrite.h"
|
|
|
| namespace media {
|
|
|
| @@ -42,14 +44,27 @@ static const int kMaxDecodeThreads = 16;
|
|
|
| // Returns the number of threads given the FFmpeg CodecID. Also inspects the
|
| // command line for a valid --video-threads flag.
|
| -static int GetThreadCount(AVCodecID codec_id) {
|
| +static int GetThreadCount(const VideoDecoderConfig& config) {
|
| // Refer to http://crbug.com/93932 for tsan suppressions on decoding.
|
| int decode_threads = kDecodeThreads;
|
|
|
| const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
|
| std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
|
| - if (threads.empty() || !base::StringToInt(threads, &decode_threads))
|
| + if (threads.empty() || !base::StringToInt(threads, &decode_threads)) {
|
| + // TODO(chcunningham): Investigate perf with more threads.
|
| + if (config.codec() == kCodecVP9) {
|
| + // For VP9 decode when using the default thread count, increase the number
|
| + // of decode threads to equal the maximum number of tiles possible for
|
| + // higher resolution streams.
|
| + if (config.coded_size().width() >= 2048)
|
| + decode_threads = 8;
|
| + else if (config.coded_size().width() >= 1024)
|
| + decode_threads = 4;
|
| + }
|
| + decode_threads =
|
| + std::min(decode_threads, base::SysInfo::NumberOfProcessors());
|
| return decode_threads;
|
| + }
|
|
|
| decode_threads = std::max(decode_threads, 0);
|
| decode_threads = std::min(decode_threads, kMaxDecodeThreads);
|
| @@ -75,7 +90,7 @@ bool FFmpegVideoDecoder::IsCodecSupported(VideoCodec codec) {
|
| }
|
|
|
| FFmpegVideoDecoder::FFmpegVideoDecoder()
|
| - : state_(kUninitialized), decode_nalus_(false) {
|
| + : state_(kUninitialized), decode_nalus_(false), parser_context_(nullptr) {
|
| thread_checker_.DetachFromThread();
|
| }
|
|
|
| @@ -283,64 +298,88 @@ bool FFmpegVideoDecoder::FFmpegDecode(
|
| bool* has_produced_frame) {
|
| DCHECK(!*has_produced_frame);
|
|
|
| + size_t remaining_size = buffer->end_of_stream() ? 0 : buffer->data_size();
|
| +
|
| // Create a packet for input data.
|
| // Due to FFmpeg API changes we no longer have const read-only pointers.
|
| AVPacket packet;
|
| - av_init_packet(&packet);
|
| - if (buffer->end_of_stream()) {
|
| - packet.data = NULL;
|
| - packet.size = 0;
|
| - } else {
|
| - packet.data = const_cast<uint8_t*>(buffer->data());
|
| - packet.size = buffer->data_size();
|
|
|
| - // Let FFmpeg handle presentation timestamp reordering.
|
| - codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
|
| - }
|
| + do {
|
| + av_init_packet(&packet);
|
| + if (buffer->end_of_stream()) {
|
| + packet.data = NULL;
|
| + packet.size = 0;
|
| + } else {
|
| + // FFvp9 can't handle superframes, which are multiple frames inside a
|
| + // single packet. Normally its demuxer handles this parsing, but when
|
| + // ChunkDemuxer is used, the packet isn't split right.
|
| + if (codec_context_->codec_id == AV_CODEC_ID_VP9) {
|
| + if (!parser_context_) {
|
| + parser_context_.reset(av_parser_init(AV_CODEC_ID_VP9));
|
| + }
|
| + // TODO(chcunningham): The PTS/DTS stuff is a bit weird, seems mostly
|
| + // unused in internal parsing code. Setting NOPTS for now
|
| + // I think superframes have no presentation, so it should be fine to
|
| + // set the same timestamp (like alt-ref?)
|
| + int len = av_parser_parse2(
|
| + parser_context_.get(), codec_context_.get(), &packet.data,
|
| + &packet.size,
|
| + buffer->writable_data() + (buffer->data_size() - remaining_size),
|
| + remaining_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, -1);
|
| + remaining_size -= len;
|
| + } else {
|
| + packet.data = const_cast<uint8_t*>(buffer->data());
|
| + packet.size = buffer->data_size();
|
| + remaining_size = 0;
|
| + }
|
| +
|
| + // Let FFmpeg handle presentation timestamp reordering.
|
| + codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
|
| + }
|
|
|
| - int frame_decoded = 0;
|
| - int result = avcodec_decode_video2(codec_context_.get(),
|
| - av_frame_.get(),
|
| - &frame_decoded,
|
| - &packet);
|
| - // Log the problem if we can't decode a video frame and exit early.
|
| - if (result < 0) {
|
| - LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
|
| - return false;
|
| - }
|
| + int got_picture = 0;
|
| + int result = avcodec_decode_video2(codec_context_.get(), av_frame_.get(),
|
| + &got_picture, &packet);
|
| + // Log the problem if we can't decode a video frame and exit early.
|
| + if (result < 0) {
|
| + LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
|
| + return false;
|
| + }
|
|
|
| - // FFmpeg says some codecs might have multiple frames per packet. Previous
|
| - // discussions with rbultje@ indicate this shouldn't be true for the codecs
|
| - // we use.
|
| - DCHECK_EQ(result, packet.size);
|
| -
|
| - // If no frame was produced then signal that more data is required to
|
| - // produce more frames. This can happen under two circumstances:
|
| - // 1) Decoder was recently initialized/flushed
|
| - // 2) End of stream was reached and all internal frames have been output
|
| - if (frame_decoded == 0) {
|
| - return true;
|
| - }
|
| + // FFmpeg says some codecs might have multiple frames per packet. Previous
|
| + // discussions with rbultje@ indicate this shouldn't be true for the codecs
|
| + // we use.
|
| + DCHECK_EQ(result, packet.size);
|
| +
|
| + // If no picture was produced then signal that more data is required to
|
| + // produce more frames. This can happen under three circumstances:
|
| + // 1) Decoder was recently initialized/flushed
|
| + // 2) End of stream was reached and all internal frames have been output
|
| + // 3) The frame is needed for decoding dependencies, but is not rendered
|
| + if (got_picture == 0) {
|
| + continue;
|
| + }
|
|
|
| - // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
|
| - // The decoder is in a bad state and not decoding correctly.
|
| - // Checking for NULL avoids a crash in CopyPlane().
|
| - if (!av_frame_->data[VideoFrame::kYPlane] ||
|
| - !av_frame_->data[VideoFrame::kUPlane] ||
|
| - !av_frame_->data[VideoFrame::kVPlane]) {
|
| - LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
|
| - av_frame_unref(av_frame_.get());
|
| - return false;
|
| - }
|
| + // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
|
| + // The decoder is in a bad state and not decoding correctly.
|
| + // Checking for NULL avoids a crash in CopyPlane().
|
| + if (!av_frame_->data[VideoFrame::kYPlane] ||
|
| + !av_frame_->data[VideoFrame::kUPlane] ||
|
| + !av_frame_->data[VideoFrame::kVPlane]) {
|
| + LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
|
| + av_frame_unref(av_frame_.get());
|
| + return false;
|
| + }
|
|
|
| - scoped_refptr<VideoFrame> frame =
|
| - reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
|
| - frame->set_timestamp(
|
| - base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
|
| - *has_produced_frame = true;
|
| - output_cb_.Run(frame);
|
| + scoped_refptr<VideoFrame> frame =
|
| + reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
|
| + frame->set_timestamp(
|
| + base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
|
| + *has_produced_frame = true;
|
| + output_cb_.Run(frame);
|
|
|
| - av_frame_unref(av_frame_.get());
|
| + av_frame_unref(av_frame_.get());
|
| + } while (remaining_size > 0);
|
| return true;
|
| }
|
|
|
| @@ -360,7 +399,7 @@ bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) {
|
| codec_context_.reset(avcodec_alloc_context3(NULL));
|
| VideoDecoderConfigToAVCodecContext(config_, codec_context_.get());
|
|
|
| - codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
|
| + codec_context_->thread_count = GetThreadCount(config_);
|
| codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME;
|
| codec_context_->opaque = this;
|
| codec_context_->flags |= CODEC_FLAG_EMU_EDGE;
|
| @@ -376,6 +415,10 @@ bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) {
|
| return false;
|
| }
|
|
|
| + LOG(ERROR) << "Using ffmpeg!";
|
| + LOG(ERROR) << "Configured FFmpeg decoder with thread count:"
|
| + << codec_context_->thread_count;
|
| +
|
| av_frame_.reset(av_frame_alloc());
|
| return true;
|
| }
|
|
|