OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 * |
| 10 */ |
| 11 |
| 12 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h" |
| 13 |
| 14 #if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) |
| 15 |
| 16 #include "libyuv/convert.h" |
| 17 #include "webrtc/base/checks.h" |
| 18 #include "webrtc/base/logging.h" |
| 19 #include "webrtc/common_video/interface/video_frame_buffer.h" |
| 20 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h" |
| 21 #include "webrtc/video_frame.h" |
| 22 |
| 23 namespace internal { |
| 24 |
| 25 // Convenience function for creating a dictionary. |
| 26 inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys, |
| 27 CFTypeRef* values, |
| 28 size_t size) { |
| 29 return CFDictionaryCreate(nullptr, keys, values, size, |
| 30 &kCFTypeDictionaryKeyCallBacks, |
| 31 &kCFTypeDictionaryValueCallBacks); |
| 32 } |
| 33 |
| 34 // Struct that we pass to the decoder per frame to decode. We receive it again |
| 35 // in the decoder callback. |
| 36 struct FrameDecodeParams { |
| 37 FrameDecodeParams(webrtc::DecodedImageCallback* cb, int64_t ts) |
| 38 : callback(cb), timestamp(ts) {} |
| 39 webrtc::DecodedImageCallback* callback; |
| 40 int64_t timestamp; |
| 41 }; |
| 42 |
| 43 // On decode we receive a CVPixelBuffer, which we need to convert to a frame |
| 44 // buffer for use in the rest of WebRTC. Unfortunately this involves a frame |
| 45 // copy. |
| 46 // TODO(tkchin): Stuff CVPixelBuffer into a TextureBuffer and pass that along |
| 47 // instead once the pipeline supports it. |
| 48 rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer( |
| 49 CVPixelBufferRef pixel_buffer) { |
| 50 DCHECK(pixel_buffer); |
| 51 DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) == |
| 52 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange); |
| 53 size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0); |
| 54 size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0); |
| 55 // TODO(tkchin): Use a frame buffer pool. |
| 56 rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer = |
| 57 new rtc::RefCountedObject<webrtc::I420Buffer>(width, height); |
| 58 CVPixelBufferLockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly); |
| 59 const uint8* src_y = reinterpret_cast<const uint8*>( |
| 60 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0)); |
| 61 int src_y_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0); |
| 62 const uint8* src_uv = reinterpret_cast<const uint8*>( |
| 63 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1)); |
| 64 int src_uv_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1); |
| 65 int ret = libyuv::NV12ToI420( |
| 66 src_y, src_y_stride, src_uv, src_uv_stride, |
| 67 buffer->data(webrtc::kYPlane), buffer->stride(webrtc::kYPlane), |
| 68 buffer->data(webrtc::kUPlane), buffer->stride(webrtc::kUPlane), |
| 69 buffer->data(webrtc::kVPlane), buffer->stride(webrtc::kVPlane), |
| 70 width, height); |
| 71 CVPixelBufferUnlockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly); |
| 72 if (ret) { |
| 73 LOG(LS_ERROR) << "Error converting NV12 to I420: " << ret; |
| 74 return nullptr; |
| 75 } |
| 76 return buffer; |
| 77 } |
| 78 |
| 79 // This is the callback function that VideoToolbox calls when decode is |
| 80 // complete. |
| 81 void VTDecompressionOutputCallback(void* decoder, |
| 82 void* params, |
| 83 OSStatus status, |
| 84 VTDecodeInfoFlags info_flags, |
| 85 CVImageBufferRef image_buffer, |
| 86 CMTime timestamp, |
| 87 CMTime duration) { |
| 88 rtc::scoped_ptr<FrameDecodeParams> decode_params( |
| 89 reinterpret_cast<FrameDecodeParams*>(params)); |
| 90 if (status != noErr) { |
| 91 LOG(LS_ERROR) << "Failed to decode frame. Status: " << status; |
| 92 return; |
| 93 } |
| 94 // TODO(tkchin): Handle CVO properly. |
| 95 rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer = |
| 96 VideoFrameBufferForPixelBuffer(image_buffer); |
| 97 webrtc::VideoFrame decoded_frame(buffer, decode_params->timestamp, 0, |
| 98 webrtc::kVideoRotation_0); |
| 99 decode_params->callback->Decoded(decoded_frame); |
| 100 } |
| 101 |
| 102 } // namespace internal |
| 103 |
| 104 namespace webrtc { |
| 105 |
| 106 H264VideoToolboxDecoder::H264VideoToolboxDecoder() |
| 107 : callback_(nullptr), |
| 108 video_format_(nullptr), |
| 109 decompression_session_(nullptr) { |
| 110 } |
| 111 |
| 112 H264VideoToolboxDecoder::~H264VideoToolboxDecoder() { |
| 113 DestroyDecompressionSession(); |
| 114 SetVideoFormat(nullptr); |
| 115 } |
| 116 |
| 117 int H264VideoToolboxDecoder::InitDecode(const VideoCodec* video_codec, |
| 118 int number_of_cores) { |
| 119 return WEBRTC_VIDEO_CODEC_OK; |
| 120 } |
| 121 |
| 122 int H264VideoToolboxDecoder::Decode( |
| 123 const EncodedImage& input_image, |
| 124 bool missing_frames, |
| 125 const RTPFragmentationHeader* fragmentation, |
| 126 const CodecSpecificInfo* codec_specific_info, |
| 127 int64_t render_time_ms) { |
| 128 DCHECK(input_image._buffer); |
| 129 |
| 130 CMSampleBufferRef sample_buffer = nullptr; |
| 131 if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer, |
| 132 input_image._length, |
| 133 video_format_, |
| 134 &sample_buffer)) { |
| 135 return WEBRTC_VIDEO_CODEC_ERROR; |
| 136 } |
| 137 DCHECK(sample_buffer); |
| 138 // Check if the video format has changed, and reinitialize decoder if needed. |
| 139 CMVideoFormatDescriptionRef description = |
| 140 CMSampleBufferGetFormatDescription(sample_buffer); |
| 141 if (!CMFormatDescriptionEqual(description, video_format_)) { |
| 142 SetVideoFormat(description); |
| 143 ResetDecompressionSession(); |
| 144 } |
| 145 VTDecodeFrameFlags decode_flags = |
| 146 kVTDecodeFrame_EnableAsynchronousDecompression; |
| 147 rtc::scoped_ptr<internal::FrameDecodeParams> frame_decode_params; |
| 148 frame_decode_params.reset( |
| 149 new internal::FrameDecodeParams(callback_, input_image._timeStamp)); |
| 150 OSStatus status = VTDecompressionSessionDecodeFrame( |
| 151 decompression_session_, sample_buffer, decode_flags, |
| 152 frame_decode_params.release(), nullptr); |
| 153 CFRelease(sample_buffer); |
| 154 if (status != noErr) { |
| 155 LOG(LS_ERROR) << "Failed to decode frame with code: " << status; |
| 156 return WEBRTC_VIDEO_CODEC_ERROR; |
| 157 } |
| 158 return WEBRTC_VIDEO_CODEC_OK; |
| 159 } |
| 160 |
| 161 int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback( |
| 162 DecodedImageCallback* callback) { |
| 163 DCHECK(!callback_); |
| 164 callback_ = callback; |
| 165 return WEBRTC_VIDEO_CODEC_OK; |
| 166 } |
| 167 |
| 168 int H264VideoToolboxDecoder::Release() { |
| 169 callback_ = nullptr; |
| 170 return WEBRTC_VIDEO_CODEC_OK; |
| 171 } |
| 172 |
| 173 int H264VideoToolboxDecoder::Reset() { |
| 174 ResetDecompressionSession(); |
| 175 return WEBRTC_VIDEO_CODEC_OK; |
| 176 } |
| 177 |
| 178 int H264VideoToolboxDecoder::ResetDecompressionSession() { |
| 179 DestroyDecompressionSession(); |
| 180 |
| 181 // Need to wait for the first SPS to initialize decoder. |
| 182 if (!video_format_) { |
| 183 return WEBRTC_VIDEO_CODEC_OK; |
| 184 } |
| 185 |
| 186 // Set keys for OpenGL and IOSurface compatibilty, which makes the encoder |
| 187 // create pixel buffers with GPU backed memory. The intent here is to pass |
| 188 // the pixel buffers directly so we avoid a texture upload later during |
| 189 // rendering. This currently is moot because we are converting back to an |
| 190 // I420 frame after decode, but eventually we will be able to plumb |
| 191 // CVPixelBuffers directly to the renderer. |
| 192 // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that |
| 193 // we can pass CVPixelBuffers as native handles in decoder output. |
| 194 static size_t const attributes_size = 3; |
| 195 CFTypeRef keys[attributes_size] = { |
| 196 #if defined(WEBRTC_IOS) |
| 197 kCVPixelBufferOpenGLESCompatibilityKey, |
| 198 #elif defined(WEBRTC_MAC) |
| 199 kCVPixelBufferOpenGLCompatibilityKey, |
| 200 #endif |
| 201 kCVPixelBufferIOSurfacePropertiesKey, |
| 202 kCVPixelBufferPixelFormatTypeKey |
| 203 }; |
| 204 CFDictionaryRef io_surface_value = |
| 205 internal::CreateCFDictionary(nullptr, nullptr, 0); |
| 206 int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; |
| 207 CFNumberRef pixel_format = |
| 208 CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); |
| 209 CFTypeRef values[attributes_size] = { |
| 210 kCFBooleanTrue, |
| 211 io_surface_value, |
| 212 pixel_format |
| 213 }; |
| 214 CFDictionaryRef attributes = |
| 215 internal::CreateCFDictionary(keys, values, attributes_size); |
| 216 if (io_surface_value) { |
| 217 CFRelease(io_surface_value); |
| 218 io_surface_value = nullptr; |
| 219 } |
| 220 if (pixel_format) { |
| 221 CFRelease(pixel_format); |
| 222 pixel_format = nullptr; |
| 223 } |
| 224 VTDecompressionOutputCallbackRecord record = { |
| 225 internal::VTDecompressionOutputCallback, this, |
| 226 }; |
| 227 OSStatus status = |
| 228 VTDecompressionSessionCreate(nullptr, video_format_, nullptr, attributes, |
| 229 &record, &decompression_session_); |
| 230 CFRelease(attributes); |
| 231 if (status != noErr) { |
| 232 DestroyDecompressionSession(); |
| 233 return WEBRTC_VIDEO_CODEC_ERROR; |
| 234 } |
| 235 ConfigureDecompressionSession(); |
| 236 |
| 237 return WEBRTC_VIDEO_CODEC_OK; |
| 238 } |
| 239 |
| 240 void H264VideoToolboxDecoder::ConfigureDecompressionSession() { |
| 241 DCHECK(decompression_session_); |
| 242 #if defined(WEBRTC_IOS) |
| 243 VTSessionSetProperty(decompression_session_, |
| 244 kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue); |
| 245 #endif |
| 246 } |
| 247 |
| 248 void H264VideoToolboxDecoder::DestroyDecompressionSession() { |
| 249 if (decompression_session_) { |
| 250 VTDecompressionSessionInvalidate(decompression_session_); |
| 251 decompression_session_ = nullptr; |
| 252 } |
| 253 } |
| 254 |
| 255 void H264VideoToolboxDecoder::SetVideoFormat( |
| 256 CMVideoFormatDescriptionRef video_format) { |
| 257 if (video_format_ == video_format) { |
| 258 return; |
| 259 } |
| 260 if (video_format_) { |
| 261 CFRelease(video_format_); |
| 262 } |
| 263 video_format_ = video_format; |
| 264 if (video_format_) { |
| 265 CFRetain(video_format_); |
| 266 } |
| 267 } |
| 268 |
| 269 } // namespace webrtc |
| 270 |
| 271 #endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) |
OLD | NEW |