| Index: content/common/gpu/media/vt_video_encode_accelerator_mac.cc
 | 
| diff --git a/media/cast/sender/h264_vt_encoder.cc b/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
 | 
| similarity index 11%
 | 
| copy from media/cast/sender/h264_vt_encoder.cc
 | 
| copy to content/common/gpu/media/vt_video_encode_accelerator_mac.cc
 | 
| index 33e7366d80a3e534653e8b7cfd94c530fac3f28d..71c80ef3a9f1bfb58b4dc8b5f97769409af42fcb 100644
 | 
| --- a/media/cast/sender/h264_vt_encoder.cc
 | 
| +++ b/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
 | 
| @@ -1,769 +1,552 @@
 | 
| -// Copyright 2014 The Chromium Authors. All rights reserved.
 | 
| +// Copyright 2016 The Chromium Authors. All rights reserved.
 | 
|  // Use of this source code is governed by a BSD-style license that can be
 | 
|  // found in the LICENSE file.
 | 
|  
 | 
| -#include "media/cast/sender/h264_vt_encoder.h"
 | 
| +#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
 | 
|  
 | 
| -#include <stddef.h>
 | 
| -
 | 
| -#include <string>
 | 
| -#include <vector>
 | 
| -
 | 
| -#include "base/big_endian.h"
 | 
| -#include "base/bind.h"
 | 
| -#include "base/bind_helpers.h"
 | 
| -#include "base/location.h"
 | 
| -#include "base/logging.h"
 | 
| -#include "base/macros.h"
 | 
| -#include "base/power_monitor/power_monitor.h"
 | 
| -#include "base/synchronization/lock.h"
 | 
| -#include "build/build_config.h"
 | 
| +#include "base/thread_task_runner_handle.h"
 | 
| +#include "media/base/mac/coremedia_glue.h"
 | 
|  #include "media/base/mac/corevideo_glue.h"
 | 
|  #include "media/base/mac/video_frame_mac.h"
 | 
| -#include "media/cast/common/rtp_time.h"
 | 
| -#include "media/cast/constants.h"
 | 
| -#include "media/cast/sender/video_frame_factory.h"
 | 
|  
 | 
| -namespace media {
 | 
| -namespace cast {
 | 
| +namespace content {
 | 
|  
 | 
|  namespace {
 | 
|  
 | 
| -// Container for the associated data of a video frame being processed.
 | 
| -struct InProgressFrameEncode {
 | 
| -  const RtpTimeTicks rtp_timestamp;
 | 
| +// TODO(emircan): Check if we can find the actual system capabilities via
 | 
| +// creating VTCompressionSessions with varying requirements.
 | 
| +// See crbug.com/584784.
 | 
| +const size_t kBitsPerByte = 8;
 | 
| +const size_t kDefaultResolutionWidth = 640;
 | 
| +const size_t kDefaultResolutionHeight = 480;
 | 
| +const size_t kMaxFrameRateNumerator = 30;
 | 
| +const size_t kMaxFrameRateDenominator = 1;
 | 
| +const size_t kMaxResolutionWidth = 4096;
 | 
| +const size_t kMaxResolutionHeight = 2160;
 | 
| +const size_t kNumInputBuffers = 3;
 | 
| +
 | 
| +}  // namespace
 | 
| +
 | 
| +struct VTVideoEncodeAccelerator::InProgressFrameEncode {
 | 
| +  InProgressFrameEncode(base::TimeDelta rtp_timestamp,
 | 
| +                        base::TimeTicks ref_time)
 | 
| +      : timestamp(rtp_timestamp), reference_time(ref_time) {}
 | 
| +  const base::TimeDelta timestamp;
 | 
|    const base::TimeTicks reference_time;
 | 
| -  const VideoEncoder::FrameEncodedCallback frame_encoded_callback;
 | 
| -
 | 
| -  InProgressFrameEncode(RtpTimeTicks rtp,
 | 
| -                        base::TimeTicks r_time,
 | 
| -                        VideoEncoder::FrameEncodedCallback callback)
 | 
| -      : rtp_timestamp(rtp),
 | 
| -        reference_time(r_time),
 | 
| -        frame_encoded_callback(callback) {}
 | 
| +
 | 
| + private:
 | 
| +  DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
 | 
|  };
 | 
|  
 | 
| -base::ScopedCFTypeRef<CFDictionaryRef>
 | 
| -DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) {
 | 
| -  return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate(
 | 
| -      kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks,
 | 
| -      &kCFTypeDictionaryValueCallBacks));
 | 
| -}
 | 
| +struct VTVideoEncodeAccelerator::EncodeOutput {
 | 
| +  EncodeOutput(VTEncodeInfoFlags info_flags, CMSampleBufferRef sbuf)
 | 
| +      : info(info_flags), sample_buffer(sbuf, base::scoped_policy::RETAIN) {}
 | 
| +  const VTEncodeInfoFlags info;
 | 
| +  const base::ScopedCFTypeRef<CMSampleBufferRef> sample_buffer;
 | 
|  
 | 
| -base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key,
 | 
| -                                                              CFTypeRef value) {
 | 
| -  CFTypeRef keys[1] = {key};
 | 
| -  CFTypeRef values[1] = {value};
 | 
| -  return DictionaryWithKeysAndValues(keys, values, 1);
 | 
| -}
 | 
| + private:
 | 
| +  DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
 | 
| +};
 | 
|  
 | 
| -base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) {
 | 
| -  std::vector<CFNumberRef> numbers;
 | 
| -  numbers.reserve(size);
 | 
| -  for (const int* end = v + size; v < end; ++v)
 | 
| -    numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v));
 | 
| -  base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
 | 
| -      kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]),
 | 
| -      numbers.size(), &kCFTypeArrayCallBacks));
 | 
| -  for (auto& number : numbers) {
 | 
| -    CFRelease(number);
 | 
| -  }
 | 
| -  return array;
 | 
| -}
 | 
| +struct VTVideoEncodeAccelerator::BitstreamBufferRef {
 | 
| +  BitstreamBufferRef(int32_t id,
 | 
| +                     scoped_ptr<base::SharedMemory> shm,
 | 
| +                     size_t size)
 | 
| +      : id(id), shm(std::move(shm)), size(size) {}
 | 
| +  const int32_t id;
 | 
| +  const scoped_ptr<base::SharedMemory> shm;
 | 
| +  const size_t size;
 | 
|  
 | 
| -template <typename NalSizeType>
 | 
| -void CopyNalsToAnnexB(char* avcc_buffer,
 | 
| -                      const size_t avcc_size,
 | 
| -                      std::string* annexb_buffer) {
 | 
| -  static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 ||
 | 
| -                    sizeof(NalSizeType) == 4,
 | 
| -                "NAL size type has unsupported size");
 | 
| -  static const char startcode_3[3] = {0, 0, 1};
 | 
| -  DCHECK(avcc_buffer);
 | 
| -  DCHECK(annexb_buffer);
 | 
| -  size_t bytes_left = avcc_size;
 | 
| -  while (bytes_left > 0) {
 | 
| -    DCHECK_GT(bytes_left, sizeof(NalSizeType));
 | 
| -    NalSizeType nal_size;
 | 
| -    base::ReadBigEndian(avcc_buffer, &nal_size);
 | 
| -    bytes_left -= sizeof(NalSizeType);
 | 
| -    avcc_buffer += sizeof(NalSizeType);
 | 
| -
 | 
| -    DCHECK_GE(bytes_left, nal_size);
 | 
| -    annexb_buffer->append(startcode_3, sizeof(startcode_3));
 | 
| -    annexb_buffer->append(avcc_buffer, nal_size);
 | 
| -    bytes_left -= nal_size;
 | 
| -    avcc_buffer += nal_size;
 | 
| -  }
 | 
| + private:
 | 
| +  DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
 | 
| +};
 | 
| +
 | 
| +VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
 | 
| +    : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
 | 
| +      encoder_thread_("VTEncoderThread"),
 | 
| +      encoder_task_weak_factory_(this) {
 | 
| +  encoder_weak_ptr_ = encoder_task_weak_factory_.GetWeakPtr();
 | 
|  }
 | 
|  
 | 
| -// Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies
 | 
| -// parameter sets for keyframes before the frame data as well.
 | 
| -void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf,
 | 
| -                                    std::string* annexb_buffer,
 | 
| -                                    bool keyframe) {
 | 
| -  // Perform two pass, one to figure out the total output size, and another to
 | 
| -  // copy the data after having performed a single output allocation. Note that
 | 
| -  // we'll allocate a bit more because we'll count 4 bytes instead of 3 for
 | 
| -  // video NALs.
 | 
| -
 | 
| -  OSStatus status;
 | 
| -
 | 
| -  // Get the sample buffer's block buffer and format description.
 | 
| -  auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
 | 
| -  DCHECK(bb);
 | 
| -  auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
 | 
| -  DCHECK(fdesc);
 | 
| -
 | 
| -  size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb);
 | 
| -  size_t total_bytes = bb_size;
 | 
| -
 | 
| -  size_t pset_count;
 | 
| -  int nal_size_field_bytes;
 | 
| -  status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
 | 
| -      fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes);
 | 
| -  if (status ==
 | 
| -      CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) {
 | 
| -    DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header";
 | 
| -    pset_count = 2;
 | 
| -    nal_size_field_bytes = 4;
 | 
| -  } else if (status != noErr) {
 | 
| -    DLOG(ERROR)
 | 
| -        << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
 | 
| -        << status;
 | 
| -    return;
 | 
| -  }
 | 
| +VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
 | 
| +  DVLOG(3) << __FUNCTION__;
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
|  
 | 
| -  if (keyframe) {
 | 
| -    const uint8_t* pset;
 | 
| -    size_t pset_size;
 | 
| -    for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
 | 
| -      status =
 | 
| -          CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
 | 
| -              fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
 | 
| -      if (status != noErr) {
 | 
| -        DLOG(ERROR)
 | 
| -            << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
 | 
| -            << status;
 | 
| -        return;
 | 
| -      }
 | 
| -      total_bytes += pset_size + nal_size_field_bytes;
 | 
| -    }
 | 
| -  }
 | 
| +  Destroy();
 | 
| +  DCHECK(!encoder_thread_.IsRunning());
 | 
| +  DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
 | 
| +}
 | 
|  
 | 
| -  annexb_buffer->reserve(total_bytes);
 | 
| -
 | 
| -  // Copy all parameter sets before keyframes.
 | 
| -  if (keyframe) {
 | 
| -    const uint8_t* pset;
 | 
| -    size_t pset_size;
 | 
| -    for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
 | 
| -      status =
 | 
| -          CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
 | 
| -              fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
 | 
| -      if (status != noErr) {
 | 
| -        DLOG(ERROR)
 | 
| -            << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
 | 
| -            << status;
 | 
| -        return;
 | 
| -      }
 | 
| -      static const char startcode_4[4] = {0, 0, 0, 1};
 | 
| -      annexb_buffer->append(startcode_4, sizeof(startcode_4));
 | 
| -      annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size);
 | 
| -    }
 | 
| -  }
 | 
| +media::VideoEncodeAccelerator::SupportedProfiles
 | 
| +VTVideoEncodeAccelerator::GetSupportedProfiles() {
 | 
| +  DVLOG(3) << __FUNCTION__;
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
|  
 | 
| -  // Block buffers can be composed of non-contiguous chunks. For the sake of
 | 
| -  // keeping this code simple, flatten non-contiguous block buffers.
 | 
| -  base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb(
 | 
| -      bb, base::scoped_policy::RETAIN);
 | 
| -  if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) {
 | 
| -    contiguous_bb.reset();
 | 
| -    status = CoreMediaGlue::CMBlockBufferCreateContiguous(
 | 
| -        kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0,
 | 
| -        contiguous_bb.InitializeInto());
 | 
| -    if (status != noErr) {
 | 
| -      DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status;
 | 
| -      return;
 | 
| -    }
 | 
| +  SupportedProfiles profiles;
 | 
| +  // Check if HW encoder is supported initially.
 | 
| +  videotoolbox_glue_ = VideoToolboxGlue::Get();
 | 
| +  if (!videotoolbox_glue_) {
 | 
| +    DLOG(ERROR) << "Failed creating VideoToolbox glue.";
 | 
| +    return profiles;
 | 
|    }
 | 
| -
 | 
| -  // Copy all the NAL units. In the process convert them from AVCC format
 | 
| -  // (length header) to AnnexB format (start code).
 | 
| -  char* bb_data;
 | 
| -  status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr,
 | 
| -                                                      nullptr, &bb_data);
 | 
| -  if (status != noErr) {
 | 
| -    DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status;
 | 
| -    return;
 | 
| +  const bool rv = CreateCompressionSession(
 | 
| +      media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
 | 
| +      gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
 | 
| +  DestroyCompressionSession();
 | 
| +  if (!rv) {
 | 
| +    VLOG(1)
 | 
| +        << "Hardware encode acceleration is not available on this platform.";
 | 
| +    return profiles;
 | 
|    }
 | 
|  
 | 
| -  if (nal_size_field_bytes == 1) {
 | 
| -    CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
 | 
| -  } else if (nal_size_field_bytes == 2) {
 | 
| -    CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
 | 
| -  } else if (nal_size_field_bytes == 4) {
 | 
| -    CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
 | 
| -  } else {
 | 
| -    NOTREACHED();
 | 
| -  }
 | 
| +  SupportedProfile profile;
 | 
| +  profile.profile = media::H264PROFILE_BASELINE;
 | 
| +  profile.max_framerate_numerator = kMaxFrameRateNumerator;
 | 
| +  profile.max_framerate_denominator = kMaxFrameRateDenominator;
 | 
| +  profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
 | 
| +  profiles.push_back(profile);
 | 
| +  return profiles;
 | 
|  }
 | 
|  
 | 
| -}  // namespace
 | 
| +bool VTVideoEncodeAccelerator::Initialize(
 | 
| +    media::VideoPixelFormat format,
 | 
| +    const gfx::Size& input_visible_size,
 | 
| +    media::VideoCodecProfile output_profile,
 | 
| +    uint32_t initial_bitrate,
 | 
| +    Client* client) {
 | 
| +  DVLOG(3) << __FUNCTION__
 | 
| +           << ": input_format=" << media::VideoPixelFormatToString(format)
 | 
| +           << ", input_visible_size=" << input_visible_size.ToString()
 | 
| +           << ", output_profile=" << output_profile
 | 
| +           << ", initial_bitrate=" << initial_bitrate;
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| +  DCHECK(client);
 | 
|  
 | 
| -class H264VideoToolboxEncoder::VideoFrameFactoryImpl
 | 
| -    : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>,
 | 
| -      public VideoFrameFactory {
 | 
| - public:
 | 
| -  // Type that proxies the VideoFrameFactory interface to this class.
 | 
| -  class Proxy;
 | 
| -
 | 
| -  VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder,
 | 
| -                        const scoped_refptr<CastEnvironment>& cast_environment)
 | 
| -      : encoder_(encoder), cast_environment_(cast_environment) {}
 | 
| -
 | 
| -  scoped_refptr<VideoFrame> MaybeCreateFrame(
 | 
| -      const gfx::Size& frame_size,
 | 
| -      base::TimeDelta timestamp) final {
 | 
| -    if (frame_size.IsEmpty()) {
 | 
| -      DVLOG(1) << "Rejecting empty video frame.";
 | 
| -      return nullptr;
 | 
| -    }
 | 
| -
 | 
| -    base::AutoLock auto_lock(lock_);
 | 
| -
 | 
| -    // If the pool size does not match, speculatively reset the encoder to use
 | 
| -    // the new size and return null. Cache the new frame size right away and
 | 
| -    // toss away the pixel buffer pool to avoid spurious tasks until the encoder
 | 
| -    // is done resetting.
 | 
| -    if (frame_size != pool_frame_size_) {
 | 
| -      DVLOG(1) << "MaybeCreateFrame: Detected frame size change.";
 | 
| -      cast_environment_->PostTask(
 | 
| -          CastEnvironment::MAIN, FROM_HERE,
 | 
| -          base::Bind(&H264VideoToolboxEncoder::UpdateFrameSize, encoder_,
 | 
| -                     frame_size));
 | 
| -      pool_frame_size_ = frame_size;
 | 
| -      pool_.reset();
 | 
| -      return nullptr;
 | 
| -    }
 | 
| -
 | 
| -    if (!pool_) {
 | 
| -      DVLOG(1) << "MaybeCreateFrame: No pixel buffer pool.";
 | 
| -      return nullptr;
 | 
| -    }
 | 
| -
 | 
| -    // Allocate a pixel buffer from the pool and return a wrapper VideoFrame.
 | 
| -    base::ScopedCFTypeRef<CVPixelBufferRef> buffer;
 | 
| -    auto status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_,
 | 
| -                                                     buffer.InitializeInto());
 | 
| -    if (status != kCVReturnSuccess) {
 | 
| -      DLOG(ERROR) << "CVPixelBufferPoolCreatePixelBuffer failed: " << status;
 | 
| -      return nullptr;
 | 
| -    }
 | 
| -
 | 
| -    DCHECK(buffer);
 | 
| -    return VideoFrame::WrapCVPixelBuffer(buffer, timestamp);
 | 
| +  if (media::PIXEL_FORMAT_I420 != format) {
 | 
| +    DLOG(ERROR) << "Input format not supported= "
 | 
| +                << media::VideoPixelFormatToString(format);
 | 
| +    return false;
 | 
|    }
 | 
| -
 | 
| -  void Update(const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool,
 | 
| -              const gfx::Size& frame_size) {
 | 
| -    base::AutoLock auto_lock(lock_);
 | 
| -    pool_ = pool;
 | 
| -    pool_frame_size_ = frame_size;
 | 
| +  if (media::H264PROFILE_BASELINE != output_profile) {
 | 
| +    DLOG(ERROR) << "Output profile not supported= "
 | 
| +                << output_profile;
 | 
| +    return false;
 | 
|    }
 | 
|  
 | 
| - private:
 | 
| -  friend class base::RefCountedThreadSafe<VideoFrameFactoryImpl>;
 | 
| -  ~VideoFrameFactoryImpl() final {}
 | 
| -
 | 
| -  base::Lock lock_;
 | 
| -  base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_;
 | 
| -  gfx::Size pool_frame_size_;
 | 
| +  videotoolbox_glue_ = VideoToolboxGlue::Get();
 | 
| +  if (!videotoolbox_glue_) {
 | 
| +    DLOG(ERROR) << "Failed creating VideoToolbox glue.";
 | 
| +    return false;
 | 
| +  }
 | 
|  
 | 
| -  // Weak back reference to the encoder and the cast envrionment so we can
 | 
| -  // message the encoder when the frame size changes.
 | 
| -  const base::WeakPtr<H264VideoToolboxEncoder> encoder_;
 | 
| -  const scoped_refptr<CastEnvironment> cast_environment_;
 | 
| +  client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
 | 
| +  client_ = client_ptr_factory_->GetWeakPtr();
 | 
| +  input_visible_size_ = input_visible_size;
 | 
| +  frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
 | 
| +  target_bitrate_ = initial_bitrate;
 | 
| +  bitstream_buffer_size_ = input_visible_size.GetArea();
 | 
|  
 | 
| -  DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
 | 
| -};
 | 
| -
 | 
| -class H264VideoToolboxEncoder::VideoFrameFactoryImpl::Proxy
 | 
| -    : public VideoFrameFactory {
 | 
| - public:
 | 
| -  explicit Proxy(
 | 
| -      const scoped_refptr<VideoFrameFactoryImpl>& video_frame_factory)
 | 
| -      : video_frame_factory_(video_frame_factory) {
 | 
| -    DCHECK(video_frame_factory_);
 | 
| +  if (!encoder_thread_.Start()) {
 | 
| +    DLOG(ERROR) << "Failed spawning encoder thread.";
 | 
| +    return false;
 | 
|    }
 | 
| +  encoder_thread_task_runner_ = encoder_thread_.task_runner();
 | 
|  
 | 
| -  scoped_refptr<VideoFrame> MaybeCreateFrame(
 | 
| -      const gfx::Size& frame_size,
 | 
| -      base::TimeDelta timestamp) final {
 | 
| -    return video_frame_factory_->MaybeCreateFrame(frame_size, timestamp);
 | 
| +  if (!ResetCompressionSession()) {
 | 
| +    DLOG(ERROR) << "Failed creating compression session.";
 | 
| +    return false;
 | 
|    }
 | 
|  
 | 
| - private:
 | 
| -  ~Proxy() final {}
 | 
| -
 | 
| -  const scoped_refptr<VideoFrameFactoryImpl> video_frame_factory_;
 | 
| -
 | 
| -  DISALLOW_COPY_AND_ASSIGN(Proxy);
 | 
| -};
 | 
| -
 | 
| -// static
 | 
| -bool H264VideoToolboxEncoder::IsSupported(
 | 
| -    const VideoSenderConfig& video_config) {
 | 
| -  return video_config.codec == CODEC_VIDEO_H264 && VideoToolboxGlue::Get();
 | 
| -}
 | 
| -
 | 
| -H264VideoToolboxEncoder::H264VideoToolboxEncoder(
 | 
| -    const scoped_refptr<CastEnvironment>& cast_environment,
 | 
| -    const VideoSenderConfig& video_config,
 | 
| -    const StatusChangeCallback& status_change_cb)
 | 
| -    : cast_environment_(cast_environment),
 | 
| -      videotoolbox_glue_(VideoToolboxGlue::Get()),
 | 
| -      video_config_(video_config),
 | 
| -      status_change_cb_(status_change_cb),
 | 
| -      last_frame_id_(kFirstFrameId - 1),
 | 
| -      encode_next_frame_as_keyframe_(false),
 | 
| -      power_suspended_(false),
 | 
| -      weak_factory_(this) {
 | 
| -  DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
 | 
| -  DCHECK(!status_change_cb_.is_null());
 | 
| -
 | 
| -  OperationalStatus operational_status =
 | 
| -      H264VideoToolboxEncoder::IsSupported(video_config)
 | 
| -          ? STATUS_INITIALIZED
 | 
| -          : STATUS_UNSUPPORTED_CODEC;
 | 
| -  cast_environment_->PostTask(
 | 
| -      CastEnvironment::MAIN, FROM_HERE,
 | 
| -      base::Bind(status_change_cb_, operational_status));
 | 
| -
 | 
| -  if (operational_status == STATUS_INITIALIZED) {
 | 
| -    // Create the shared video frame factory. It persists for the combined
 | 
| -    // lifetime of the encoder and all video frame factory proxies created by
 | 
| -    // |CreateVideoFrameFactory| that reference it.
 | 
| -    video_frame_factory_ =
 | 
| -        scoped_refptr<VideoFrameFactoryImpl>(new VideoFrameFactoryImpl(
 | 
| -            weak_factory_.GetWeakPtr(), cast_environment_));
 | 
| -
 | 
| -    // Register for power state changes.
 | 
| -    auto power_monitor = base::PowerMonitor::Get();
 | 
| -    if (power_monitor) {
 | 
| -      power_monitor->AddObserver(this);
 | 
| -      VLOG(1) << "Registered for power state changes.";
 | 
| -    } else {
 | 
| -      DLOG(WARNING) << "No power monitor. Process suspension will invalidate "
 | 
| -                       "the encoder.";
 | 
| -    }
 | 
| -  }
 | 
| +  client_task_runner_->PostTask(
 | 
| +      FROM_HERE,
 | 
| +      base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
 | 
| +                 input_visible_size_, bitstream_buffer_size_));
 | 
| +  return true;
 | 
|  }
 | 
|  
 | 
| -H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
 | 
| -  DestroyCompressionSession();
 | 
| +void VTVideoEncodeAccelerator::Encode(
 | 
| +    const scoped_refptr<media::VideoFrame>& frame,
 | 
| +    bool force_keyframe) {
 | 
| +  DVLOG(3) << __FUNCTION__;
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
|  
 | 
| -  // If video_frame_factory_ is not null, the encoder registered for power state
 | 
| -  // changes in the ctor and it must now unregister.
 | 
| -  if (video_frame_factory_) {
 | 
| -    auto power_monitor = base::PowerMonitor::Get();
 | 
| -    if (power_monitor)
 | 
| -      power_monitor->RemoveObserver(this);
 | 
| -  }
 | 
| +  encoder_thread_task_runner_->PostTask(
 | 
| +      FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
 | 
| +                            base::Unretained(this), frame, force_keyframe));
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::ResetCompressionSession() {
 | 
| +void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
 | 
| +    const media::BitstreamBuffer& buffer) {
 | 
| +  DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
 | 
|    DCHECK(thread_checker_.CalledOnValidThread());
 | 
|  
 | 
| -  // Ignore reset requests while power suspended.
 | 
| -  if (power_suspended_)
 | 
| +  if (buffer.size() < bitstream_buffer_size_) {
 | 
| +    DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
 | 
| +                << " vs. " << bitstream_buffer_size_;
 | 
| +    client_->NotifyError(kInvalidArgumentError);
 | 
|      return;
 | 
| +  }
 | 
|  
 | 
| -  // Notify that we're resetting the encoder.
 | 
| -  cast_environment_->PostTask(
 | 
| -      CastEnvironment::MAIN, FROM_HERE,
 | 
| -      base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING));
 | 
| -
 | 
| -  // Destroy the current session, if any.
 | 
| -  DestroyCompressionSession();
 | 
| -
 | 
| -  // On OS X, allow the hardware encoder. Don't require it, it does not support
 | 
| -  // all configurations (some of which are used for testing).
 | 
| -  base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
 | 
| -#if !defined(OS_IOS)
 | 
| -  encoder_spec = DictionaryWithKeyValue(
 | 
| -      videotoolbox_glue_
 | 
| -          ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder(),
 | 
| -      kCFBooleanTrue);
 | 
| -#endif
 | 
| -
 | 
| -  // Force 420v so that clients can easily use these buffers as GPU textures.
 | 
| -  const int format[] = {
 | 
| -      CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
 | 
| -
 | 
| -  // Keep these attachment settings in-sync with those in ConfigureSession().
 | 
| -  CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey,
 | 
| -                                  kCVImageBufferTransferFunctionKey,
 | 
| -                                  kCVImageBufferYCbCrMatrixKey};
 | 
| -  CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2,
 | 
| -                                    kCVImageBufferTransferFunction_ITU_R_709_2,
 | 
| -                                    kCVImageBufferYCbCrMatrix_ITU_R_709_2};
 | 
| -  CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey,
 | 
| -                                        kCVBufferPropagatedAttachmentsKey};
 | 
| -  CFTypeRef buffer_attributes_values[] = {
 | 
| -      ArrayWithIntegers(format, arraysize(format)).release(),
 | 
| -      DictionaryWithKeysAndValues(attachments_keys, attachments_values,
 | 
| -                                  arraysize(attachments_keys)).release()};
 | 
| -  const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
 | 
| -      DictionaryWithKeysAndValues(buffer_attributes_keys,
 | 
| -                                  buffer_attributes_values,
 | 
| -                                  arraysize(buffer_attributes_keys));
 | 
| -  for (auto& v : buffer_attributes_values)
 | 
| -    CFRelease(v);
 | 
| -
 | 
| -  // Create the compression session.
 | 
| -
 | 
| -  // Note that the encoder object is given to the compression session as the
 | 
| -  // callback context using a raw pointer. The C API does not allow us to use a
 | 
| -  // smart pointer, nor is this encoder ref counted. However, this is still
 | 
| -  // safe, because we 1) we own the compression session and 2) we tear it down
 | 
| -  // safely. When destructing the encoder, the compression session is flushed
 | 
| -  // and invalidated. Internally, VideoToolbox will join all of its threads
 | 
| -  // before returning to the client. Therefore, when control returns to us, we
 | 
| -  // are guaranteed that the output callback will not execute again.
 | 
| -  OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
 | 
| -      kCFAllocatorDefault, frame_size_.width(), frame_size_.height(),
 | 
| -      CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes,
 | 
| -      nullptr /* compressedDataAllocator */,
 | 
| -      &H264VideoToolboxEncoder::CompressionCallback,
 | 
| -      reinterpret_cast<void*>(this), compression_session_.InitializeInto());
 | 
| -  if (status != noErr) {
 | 
| -    DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
 | 
| -    // Notify that reinitialization has failed.
 | 
| -    cast_environment_->PostTask(
 | 
| -        CastEnvironment::MAIN, FROM_HERE,
 | 
| -        base::Bind(status_change_cb_, STATUS_CODEC_INIT_FAILED));
 | 
| +  scoped_ptr<base::SharedMemory> shm(
 | 
| +      new base::SharedMemory(buffer.handle(), false));
 | 
| +  if (!shm->Map(buffer.size())) {
 | 
| +    DLOG(ERROR) << "Failed mapping shared memory.";
 | 
| +    client_->NotifyError(kPlatformFailureError);
 | 
|      return;
 | 
|    }
 | 
|  
 | 
| -  // Configure the session (apply session properties based on the current state
 | 
| -  // of the encoder, experimental tuning and requirements).
 | 
| -  ConfigureCompressionSession();
 | 
| -
 | 
| -  // Update the video frame factory.
 | 
| -  base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool(
 | 
| -      videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool(
 | 
| -          compression_session_),
 | 
| -      base::scoped_policy::RETAIN);
 | 
| -  video_frame_factory_->Update(pool, frame_size_);
 | 
| -
 | 
| -  // Notify that reinitialization is done.
 | 
| -  cast_environment_->PostTask(
 | 
| -      CastEnvironment::MAIN, FROM_HERE,
 | 
| -      base::Bind(status_change_cb_, STATUS_INITIALIZED));
 | 
| -}
 | 
| +  scoped_ptr<BitstreamBufferRef> buffer_ref(
 | 
| +      new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
 | 
|  
 | 
| -void H264VideoToolboxEncoder::ConfigureCompressionSession() {
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
 | 
| -      videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel());
 | 
| -  SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(),
 | 
| -                     true);
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
 | 
| -      false);
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240);
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_
 | 
| -          ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(),
 | 
| -      240);
 | 
| -  // TODO(jfroy): implement better bitrate control
 | 
| -  //              https://crbug.com/425352
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
 | 
| -      (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
 | 
| -      video_config_.max_frame_rate);
 | 
| -  // Keep these attachment settings in-sync with those in Initialize().
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
 | 
| -      kCVImageBufferColorPrimaries_ITU_R_709_2);
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
 | 
| -      kCVImageBufferTransferFunction_ITU_R_709_2);
 | 
| -  SetSessionProperty(
 | 
| -      videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
 | 
| -      kCVImageBufferYCbCrMatrix_ITU_R_709_2);
 | 
| -  if (video_config_.max_number_of_video_buffers_used > 0) {
 | 
| -    SetSessionProperty(
 | 
| -        videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(),
 | 
| -        video_config_.max_number_of_video_buffers_used);
 | 
| -  }
 | 
| +  encoder_thread_task_runner_->PostTask(
 | 
| +      FROM_HERE,
 | 
| +      base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
 | 
| +                 base::Unretained(this), base::Passed(&buffer_ref)));
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::DestroyCompressionSession() {
 | 
| +void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
 | 
| +    uint32_t bitrate,
 | 
| +    uint32_t framerate) {
 | 
| +  DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate
 | 
| +           << ": framerate=" << framerate;
 | 
|    DCHECK(thread_checker_.CalledOnValidThread());
 | 
|  
 | 
| -  // If the compression session exists, invalidate it. This blocks until all
 | 
| -  // pending output callbacks have returned and any internal threads have
 | 
| -  // joined, ensuring no output callback ever sees a dangling encoder pointer.
 | 
| -  //
 | 
| -  // Before destroying the compression session, the video frame factory's pool
 | 
| -  // is updated to null so that no thread will produce new video frames via the
 | 
| -  // factory until a new compression session is created. The current frame size
 | 
| -  // is passed to prevent the video frame factory from posting |UpdateFrameSize|
 | 
| -  // tasks. Indeed, |DestroyCompressionSession| is either called from
 | 
| -  // |ResetCompressionSession|, in which case a new pool and frame size will be
 | 
| -  // set, or from callsites that require that there be no compression session
 | 
| -  // (ex: the dtor).
 | 
| -  if (compression_session_) {
 | 
| -    video_frame_factory_->Update(
 | 
| -        base::ScopedCFTypeRef<CVPixelBufferPoolRef>(nullptr), frame_size_);
 | 
| -    videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
 | 
| -    compression_session_.reset();
 | 
| -  }
 | 
| +  encoder_thread_task_runner_->PostTask(
 | 
| +      FROM_HERE,
 | 
| +      base::Bind(&VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask,
 | 
| +                 base::Unretained(this), bitrate, framerate));
 | 
|  }
 | 
|  
 | 
| -bool H264VideoToolboxEncoder::EncodeVideoFrame(
 | 
| -    const scoped_refptr<media::VideoFrame>& video_frame,
 | 
| -    const base::TimeTicks& reference_time,
 | 
| -    const FrameEncodedCallback& frame_encoded_callback) {
 | 
| +void VTVideoEncodeAccelerator::Destroy() {
 | 
| +  DVLOG(3) << __FUNCTION__;
 | 
|    DCHECK(thread_checker_.CalledOnValidThread());
 | 
| -  DCHECK(!frame_encoded_callback.is_null());
 | 
|  
 | 
| -  // Reject empty video frames.
 | 
| -  const gfx::Size frame_size = video_frame->visible_rect().size();
 | 
| -  if (frame_size.IsEmpty()) {
 | 
| -    DVLOG(1) << "Rejecting empty video frame.";
 | 
| -    return false;
 | 
| -  }
 | 
| -
 | 
| -  // Handle frame size changes. This will reset the compression session.
 | 
| -  if (frame_size != frame_size_) {
 | 
| -    DVLOG(1) << "EncodeVideoFrame: Detected frame size change.";
 | 
| -    UpdateFrameSize(frame_size);
 | 
| -  }
 | 
| +  // Cancel all callbacks.
 | 
| +  client_ptr_factory_.reset();
 | 
|  
 | 
| -  // Need a compression session to continue.
 | 
| -  if (!compression_session_) {
 | 
| -    DLOG(ERROR) << "No compression session.";
 | 
| -    return false;
 | 
| +  if (encoder_thread_.IsRunning()) {
 | 
| +    encoder_thread_task_runner_->PostTask(
 | 
| +        FROM_HERE,
 | 
| +        base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
 | 
| +                   base::Unretained(this)));
 | 
| +    encoder_thread_.Stop();
 | 
| +  } else {
 | 
| +    DestroyTask();
 | 
|    }
 | 
| +}
 | 
|  
 | 
| -  // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be
 | 
| -  // copied. If the VideoFrame was created by this encoder's video frame
 | 
| -  // factory, then the returned CVPixelBuffer will have been obtained from the
 | 
| -  // compression session's pixel buffer pool. This will eliminate a copy of the
 | 
| -  // frame into memory visible by the hardware encoder. The VideoFrame's
 | 
| -  // lifetime is extended for the lifetime of the returned CVPixelBuffer.
 | 
| -  auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame);
 | 
| -  if (!pixel_buffer) {
 | 
| -    DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed.";
 | 
| -    return false;
 | 
| +void VTVideoEncodeAccelerator::EncodeTask(
 | 
| +    const scoped_refptr<media::VideoFrame>& frame,
 | 
| +    bool force_keyframe) {
 | 
| +  DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
 | 
| +  DCHECK(compression_session_);
 | 
| +  DCHECK(frame);
 | 
| +
 | 
| +  // TODO(emircan): See if we can eliminate a copy here by using
 | 
| +  // CVPixelBufferPool for the allocation of incoming VideoFrames.
 | 
| +  base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
 | 
| +      media::WrapVideoFrameInCVPixelBuffer(*frame);
 | 
| +  base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
 | 
| +      media::video_toolbox::DictionaryWithKeyValue(
 | 
| +          videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
 | 
| +          force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
 | 
| +
 | 
| +  base::TimeTicks ref_time;
 | 
| +  if (!frame->metadata()->GetTimeTicks(
 | 
| +          media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
 | 
| +    ref_time = base::TimeTicks::Now();
 | 
|    }
 | 
| -
 | 
| -  // Convert the frame timestamp to CMTime.
 | 
|    auto timestamp_cm = CoreMediaGlue::CMTimeMake(
 | 
| -      (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
 | 
| -
 | 
| +      frame->timestamp().InMicroseconds(), USEC_PER_SEC);
 | 
|    // Wrap information we'll need after the frame is encoded in a heap object.
 | 
|    // We'll get the pointer back from the VideoToolbox completion callback.
 | 
|    scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
 | 
| -      RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency),
 | 
| -      reference_time, frame_encoded_callback));
 | 
| -
 | 
| -  // Build a suitable frame properties dictionary for keyframes.
 | 
| -  base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
 | 
| -  if (encode_next_frame_as_keyframe_) {
 | 
| -    frame_props = DictionaryWithKeyValue(
 | 
| -        videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
 | 
| -        kCFBooleanTrue);
 | 
| -    encode_next_frame_as_keyframe_ = false;
 | 
| -  }
 | 
| +      frame->timestamp(), ref_time));
 | 
|  
 | 
| -  // Submit the frame to the compression session. The function returns as soon
 | 
| -  // as the frame has been enqueued.
 | 
| +  // We can pass the ownership of |request| to the encode callback if
 | 
| +  // successful. Otherwise let it fall out of scope.
 | 
|    OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
 | 
|        compression_session_, pixel_buffer, timestamp_cm,
 | 
|        CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
 | 
| -      reinterpret_cast<void*>(request.release()), nullptr);
 | 
| +      reinterpret_cast<void*>(request.get()), nullptr);
 | 
|    if (status != noErr) {
 | 
|      DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
 | 
| -    return false;
 | 
| +    NotifyError(kPlatformFailureError);
 | 
| +  } else {
 | 
| +    CHECK(request.release());
 | 
|    }
 | 
| -
 | 
| -  return true;
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
 | 
| -  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| +void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
 | 
| +    scoped_ptr<BitstreamBufferRef> buffer_ref) {
 | 
| +  DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
 | 
|  
 | 
| -  // Our video frame factory posts a task to update the frame size when its
 | 
| -  // cache of the frame size differs from what the client requested. To avoid
 | 
| -  // spurious encoder resets, check again here.
 | 
| -  if (size_needed == frame_size_) {
 | 
| -    DCHECK(compression_session_);
 | 
| +  // If there is already EncodeOutput waiting, copy its output first.
 | 
| +  if (!encoder_output_queue_.empty()) {
 | 
| +    scoped_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
 | 
| +        std::move(encoder_output_queue_.front());
 | 
| +    encoder_output_queue_.pop_front();
 | 
| +    ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
 | 
|      return;
 | 
|    }
 | 
|  
 | 
| -  VLOG(1) << "Resetting compression session (for frame size change from "
 | 
| -          << frame_size_.ToString() << " to " << size_needed.ToString() << ").";
 | 
| +  bitstream_buffer_queue_.push_back(std::move(buffer_ref));
 | 
| +}
 | 
|  
 | 
| -  // If there is an existing session, finish every pending frame.
 | 
| -  if (compression_session_) {
 | 
| -    EmitFrames();
 | 
| -  }
 | 
| +void VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
 | 
| +    uint32_t bitrate,
 | 
| +    uint32_t framerate) {
 | 
| +  DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
 | 
|  
 | 
| -  // Store the new frame size.
 | 
| -  frame_size_ = size_needed;
 | 
| +  frame_rate_ = framerate > 1 ? framerate : 1;
 | 
| +  target_bitrate_ = bitrate > 1 ? bitrate : 1;
 | 
|  
 | 
| -  // Reset the compression session.
 | 
| -  ResetCompressionSession();
 | 
| +  if (!compression_session_) {
 | 
| +    NotifyError(kPlatformFailureError);
 | 
| +    return;
 | 
| +  }
 | 
| +
 | 
| +  media::video_toolbox::SessionPropertySetter session_property_setter(
 | 
| +      compression_session_, videotoolbox_glue_);
 | 
| +  // TODO(emircan): See crbug.com/425352.
 | 
| +  bool rv = session_property_setter.Set(
 | 
| +      videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
 | 
| +      target_bitrate_);
 | 
| +  rv &= session_property_setter.Set(
 | 
| +      videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
 | 
| +      frame_rate_);
 | 
| +  rv &= session_property_setter.Set(
 | 
| +      videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
 | 
| +      media::video_toolbox::ArrayWithIntegerAndFloat(
 | 
| +          target_bitrate_ / kBitsPerByte, 1.0f));
 | 
| +  DLOG_IF(ERROR, !rv) << "Couldn't change session encoding parameters.";
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::SetBitRate(int /*new_bit_rate*/) {
 | 
| -  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| -  // VideoToolbox does not seem to support bitrate reconfiguration.
 | 
| +void VTVideoEncodeAccelerator::DestroyTask() {
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread() ||
 | 
| +         (encoder_thread_.IsRunning() &&
 | 
| +          encoder_thread_task_runner_->BelongsToCurrentThread()));
 | 
| +
 | 
| +  // Cancel all encoder thread callbacks.
 | 
| +  encoder_task_weak_factory_.InvalidateWeakPtrs();
 | 
| +
 | 
| +  // This call blocks until all pending frames are flushed out.
 | 
| +  DestroyCompressionSession();
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::GenerateKeyFrame() {
 | 
| -  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| -  encode_next_frame_as_keyframe_ = true;
 | 
| +void VTVideoEncodeAccelerator::NotifyError(
 | 
| +    media::VideoEncodeAccelerator::Error error) {
 | 
| +  DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
 | 
| +  client_task_runner_->PostTask(
 | 
| +      FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
 | 
|  }
 | 
|  
 | 
| -scoped_ptr<VideoFrameFactory>
 | 
| -H264VideoToolboxEncoder::CreateVideoFrameFactory() {
 | 
| -  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| -  return scoped_ptr<VideoFrameFactory>(
 | 
| -      new VideoFrameFactoryImpl::Proxy(video_frame_factory_));
 | 
| +// static
 | 
| +void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
 | 
| +                                                   void* request_opaque,
 | 
| +                                                   OSStatus status,
 | 
| +                                                   VTEncodeInfoFlags info,
 | 
| +                                                   CMSampleBufferRef sbuf) {
 | 
| +  // This function may be called asynchronously, on a different thread from the
 | 
| +  // one that calls VTCompressionSessionEncodeFrame.
 | 
| +  DVLOG(3) << __FUNCTION__;
 | 
| +
 | 
| +  auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
 | 
| +  DCHECK(encoder);
 | 
| +
 | 
| +  // Release InProgressFrameEncode, since we don't have support to return
 | 
| +  // timestamps at this point.
 | 
| +  scoped_ptr<InProgressFrameEncode> request(
 | 
| +      reinterpret_cast<InProgressFrameEncode*>(request_opaque));
 | 
| +  request.reset();
 | 
| +
 | 
| +  // EncodeOutput holds onto CMSampleBufferRef when posting task between
 | 
| +  // threads.
 | 
| +  scoped_ptr<EncodeOutput> encode_output(new EncodeOutput(info, sbuf));
 | 
| +
 | 
| +  // This method is NOT called on |encoder_thread_|, so we still need to
 | 
| +  // post a task back to it to do work.
 | 
| +  encoder->encoder_thread_task_runner_->PostTask(
 | 
| +      FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
 | 
| +                            encoder->encoder_weak_ptr_, status,
 | 
| +                            base::Passed(&encode_output)));
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::EmitFrames() {
 | 
| -  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| -  if (!compression_session_)
 | 
| -    return;
 | 
| +void VTVideoEncodeAccelerator::CompressionCallbackTask(
 | 
| +    OSStatus status,
 | 
| +    scoped_ptr<EncodeOutput> encode_output) {
 | 
| +  DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
 | 
|  
 | 
| -  OSStatus status = videotoolbox_glue_->VTCompressionSessionCompleteFrames(
 | 
| -      compression_session_, CoreMediaGlue::CMTime{0, 0, 0, 0});
 | 
|    if (status != noErr) {
 | 
| -    DLOG(ERROR) << " VTCompressionSessionCompleteFrames failed: " << status;
 | 
| +    DLOG(ERROR) << " encode failed: " << status;
 | 
| +    NotifyError(kPlatformFailureError);
 | 
| +    return;
 | 
|    }
 | 
| -}
 | 
|  
 | 
| -void H264VideoToolboxEncoder::OnSuspend() {
 | 
| -  VLOG(1)
 | 
| -      << "OnSuspend: Emitting all frames and destroying compression session.";
 | 
| -  EmitFrames();
 | 
| -  DestroyCompressionSession();
 | 
| -  power_suspended_ = true;
 | 
| +  // If there isn't any BitstreamBuffer to copy into, add it to a queue for
 | 
| +  // later use.
 | 
| +  if (bitstream_buffer_queue_.empty()) {
 | 
| +    encoder_output_queue_.push_back(std::move(encode_output));
 | 
| +    return;
 | 
| +  }
 | 
| +
 | 
| +  scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
 | 
| +      std::move(bitstream_buffer_queue_.front());
 | 
| +  bitstream_buffer_queue_.pop_front();
 | 
| +  ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::OnResume() {
 | 
| -  power_suspended_ = false;
 | 
| +void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
 | 
| +    scoped_ptr<EncodeOutput> encode_output,
 | 
| +    scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
 | 
| +  DVLOG(3) << __FUNCTION__;
 | 
| +  DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
 | 
|  
 | 
| -  // Reset the compression session only if the frame size is not zero (which
 | 
| -  // will obviously fail). It is possible for the frame size to be zero if no
 | 
| -  // frame was submitted for encoding or requested from the video frame factory
 | 
| -  // before suspension.
 | 
| -  if (!frame_size_.IsEmpty()) {
 | 
| -    VLOG(1) << "OnResume: Resetting compression session.";
 | 
| -    ResetCompressionSession();
 | 
| +  if (encode_output->info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) {
 | 
| +    DVLOG(2) << " frame dropped";
 | 
| +    client_task_runner_->PostTask(
 | 
| +        FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
 | 
| +                              buffer_ref->id, 0, false));
 | 
| +    return;
 | 
|    }
 | 
| -}
 | 
|  
 | 
| -bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
 | 
| -                                                 int32_t value) {
 | 
| -  base::ScopedCFTypeRef<CFNumberRef> cfvalue(
 | 
| -      CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
 | 
| -  return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
 | 
| -                                                  cfvalue) == noErr;
 | 
| -}
 | 
| +  auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
 | 
| +      CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
 | 
| +          encode_output->sample_buffer.get(), true),
 | 
| +      0));
 | 
| +  const bool keyframe =
 | 
| +      !CFDictionaryContainsKey(sample_attachments,
 | 
| +                               CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
 | 
| +
 | 
| +  size_t used_buffer_size = 0;
 | 
| +  const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
 | 
| +      encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
 | 
| +      reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
 | 
| +  if (!copy_rv) {
 | 
| +    DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
 | 
| +    used_buffer_size = 0;
 | 
| +  }
 | 
|  
 | 
| -bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
 | 
| -  CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
 | 
| -  return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
 | 
| -                                                  cfvalue) == noErr;
 | 
| +  client_task_runner_->PostTask(
 | 
| +      FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
 | 
| +                            buffer_ref->id, used_buffer_size, keyframe));
 | 
|  }
 | 
|  
 | 
| -bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
 | 
| -                                                 CFStringRef value) {
 | 
| -  return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
 | 
| -                                                  value) == noErr;
 | 
| +bool VTVideoEncodeAccelerator::ResetCompressionSession() {
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| +
 | 
| +  DestroyCompressionSession();
 | 
| +
 | 
| +  CFTypeRef attributes_keys[] = {
 | 
| +    kCVPixelBufferOpenGLCompatibilityKey,
 | 
| +    kCVPixelBufferIOSurfacePropertiesKey,
 | 
| +    kCVPixelBufferPixelFormatTypeKey
 | 
| +  };
 | 
| +  const int format[] = {
 | 
| +      CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
 | 
| +  CFTypeRef attributes_values[] = {
 | 
| +      kCFBooleanTrue,
 | 
| +      media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
 | 
| +          .release(),
 | 
| +      media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
 | 
| +          .release()};
 | 
| +  const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
 | 
| +      media::video_toolbox::DictionaryWithKeysAndValues(
 | 
| +          attributes_keys, attributes_values, arraysize(attributes_keys));
 | 
| +  for (auto& v : attributes_values)
 | 
| +    CFRelease(v);
 | 
| +
 | 
| +  bool session_rv =
 | 
| +      CreateCompressionSession(attributes, input_visible_size_, false);
 | 
| +  if (!session_rv) {
 | 
| +    DestroyCompressionSession();
 | 
| +    return false;
 | 
| +  }
 | 
| +
 | 
| +  const bool configure_rv = ConfigureCompressionSession();
 | 
| +  if (configure_rv)
 | 
| +    RequestEncodingParametersChange(target_bitrate_, frame_rate_);
 | 
| +  return configure_rv;
 | 
|  }
 | 
|  
 | 
| -void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
 | 
| -                                                  void* request_opaque,
 | 
| -                                                  OSStatus status,
 | 
| -                                                  VTEncodeInfoFlags info,
 | 
| -                                                  CMSampleBufferRef sbuf) {
 | 
| -  auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
 | 
| -  const scoped_ptr<InProgressFrameEncode> request(
 | 
| -      reinterpret_cast<InProgressFrameEncode*>(request_opaque));
 | 
| -  bool keyframe = false;
 | 
| -  bool has_frame_data = false;
 | 
| +bool VTVideoEncodeAccelerator::CreateCompressionSession(
 | 
| +    base::ScopedCFTypeRef<CFDictionaryRef> attributes,
 | 
| +    const gfx::Size& input_size,
 | 
| +    bool require_hw_encoding) {
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
|  
 | 
| -  if (status != noErr) {
 | 
| -    DLOG(ERROR) << " encoding failed: " << status;
 | 
| -    encoder->cast_environment_->PostTask(
 | 
| -        CastEnvironment::MAIN, FROM_HERE,
 | 
| -        base::Bind(encoder->status_change_cb_, STATUS_CODEC_RUNTIME_ERROR));
 | 
| -  } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
 | 
| -    DVLOG(2) << " frame dropped";
 | 
| +  std::vector<CFTypeRef> encoder_keys;
 | 
| +  std::vector<CFTypeRef> encoder_values;
 | 
| +  if (require_hw_encoding) {
 | 
| +    encoder_keys.push_back(videotoolbox_glue_
 | 
| +      ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
 | 
| +    encoder_values.push_back(kCFBooleanTrue);
 | 
|    } else {
 | 
| -    auto sample_attachments =
 | 
| -        static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
 | 
| -            CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
 | 
| -            0));
 | 
| -
 | 
| -    // If the NotSync key is not present, it implies Sync, which indicates a
 | 
| -    // keyframe (at least I think, VT documentation is, erm, sparse). Could
 | 
| -    // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
 | 
| -    keyframe = !CFDictionaryContainsKey(
 | 
| -                   sample_attachments,
 | 
| -                   CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
 | 
| -    has_frame_data = true;
 | 
| +    encoder_keys.push_back(videotoolbox_glue_
 | 
| +        ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
 | 
| +    encoder_values.push_back(kCFBooleanTrue);
 | 
|    }
 | 
| +  base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
 | 
| +      media::video_toolbox::DictionaryWithKeysAndValues(
 | 
| +          encoder_keys.data(), encoder_values.data(), encoder_keys.size());
 | 
|  
 | 
| -  // Increment the encoder-scoped frame id and assign the new value to this
 | 
| -  // frame. VideoToolbox calls the output callback serially, so this is safe.
 | 
| -  const uint32_t frame_id = ++encoder->last_frame_id_;
 | 
| -
 | 
| -  scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
 | 
| -  encoded_frame->frame_id = frame_id;
 | 
| -  encoded_frame->reference_time = request->reference_time;
 | 
| -  encoded_frame->rtp_timestamp = request->rtp_timestamp;
 | 
| -  if (keyframe) {
 | 
| -    encoded_frame->dependency = EncodedFrame::KEY;
 | 
| -    encoded_frame->referenced_frame_id = frame_id;
 | 
| -  } else {
 | 
| -    encoded_frame->dependency = EncodedFrame::DEPENDENT;
 | 
| -    // H.264 supports complex frame reference schemes (multiple reference
 | 
| -    // frames, slice references, backward and forward references, etc). Cast
 | 
| -    // doesn't support the concept of forward-referencing frame dependencies or
 | 
| -    // multiple frame dependencies; so pretend that all frames are only
 | 
| -    // decodable after their immediately preceding frame is decoded. This will
 | 
| -    // ensure a Cast receiver only attempts to decode the frames sequentially
 | 
| -    // and in order. Furthermore, the encoder is configured to never use forward
 | 
| -    // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There
 | 
| -    // is no way to prevent multiple reference frames.
 | 
| -    encoded_frame->referenced_frame_id = frame_id - 1;
 | 
| +  // Create the compression session.
 | 
| +  // Note that the encoder object is given to the compression session as the
 | 
| +  // callback context using a raw pointer. The C API does not allow us to use a
 | 
| +  // smart pointer, nor is this encoder ref counted. However, this is still
 | 
| +  // safe, because we 1) we own the compression session and 2) we tear it down
 | 
| +  // safely. When destructing the encoder, the compression session is flushed
 | 
| +  // and invalidated. Internally, VideoToolbox will join all of its threads
 | 
| +  // before returning to the client. Therefore, when control returns to us, we
 | 
| +  // are guaranteed that the output callback will not execute again.
 | 
| +  OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
 | 
| +      kCFAllocatorDefault,
 | 
| +      input_size.width(),
 | 
| +      input_size.height(),
 | 
| +      CoreMediaGlue::kCMVideoCodecType_H264,
 | 
| +      encoder_spec,
 | 
| +      attributes,
 | 
| +      nullptr /* compressedDataAllocator */,
 | 
| +      &VTVideoEncodeAccelerator::CompressionCallback,
 | 
| +      reinterpret_cast<void*>(this),
 | 
| +      compression_session_.InitializeInto());
 | 
| +  if (status != noErr) {
 | 
| +    DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
 | 
| +    return false;
 | 
|    }
 | 
| +  DVLOG(3) << " VTCompressionSession created with HW encode: "
 | 
| +           << require_hw_encoding << ", input size=" << input_size.ToString();
 | 
| +  return true;
 | 
| +}
 | 
|  
 | 
| -  if (has_frame_data)
 | 
| -    CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
 | 
| +bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread());
 | 
| +  DCHECK(compression_session_);
 | 
|  
 | 
| -  // TODO(miu): Compute and populate the |deadline_utilization| and
 | 
| -  // |lossy_utilization| performance metrics in |encoded_frame|.
 | 
| +  media::video_toolbox::SessionPropertySetter session_property_setter(
 | 
| +      compression_session_, videotoolbox_glue_);
 | 
| +  bool rv = true;
 | 
| +  rv &= session_property_setter.Set(
 | 
| +      videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
 | 
| +      videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel());
 | 
| +  rv &= session_property_setter.Set(
 | 
| +      videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
 | 
| +  rv &= session_property_setter.Set(
 | 
| +      videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
 | 
| +      false);
 | 
| +  DLOG_IF(ERROR, !rv) << " Setting session property failed.";
 | 
| +  return rv;
 | 
| +}
 | 
|  
 | 
| -  encoded_frame->encode_completion_time =
 | 
| -      encoder->cast_environment_->Clock()->NowTicks();
 | 
| -  encoder->cast_environment_->PostTask(
 | 
| -      CastEnvironment::MAIN, FROM_HERE,
 | 
| -      base::Bind(request->frame_encoded_callback,
 | 
| -                 base::Passed(&encoded_frame)));
 | 
| +void VTVideoEncodeAccelerator::DestroyCompressionSession() {
 | 
| +  DCHECK(thread_checker_.CalledOnValidThread() ||
 | 
| +         (encoder_thread_.IsRunning() &&
 | 
| +          encoder_thread_task_runner_->BelongsToCurrentThread()));
 | 
| +
 | 
| +  if (compression_session_) {
 | 
| +    videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
 | 
| +    compression_session_.reset();
 | 
| +  }
 | 
|  }
 | 
|  
 | 
| -}  // namespace cast
 | 
| -}  // namespace media
 | 
| +} // namespace content
 | 
| 
 |