OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 * | 9 * |
10 */ | 10 */ |
11 | 11 |
12 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h" | 12 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h" |
13 | 13 |
14 #if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) | 14 #if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) |
15 | 15 |
16 #include <memory> | 16 #include <memory> |
17 | 17 |
18 #include "libyuv/convert.h" | 18 #include "libyuv/convert.h" |
19 #include "webrtc/base/checks.h" | 19 #include "webrtc/base/checks.h" |
20 #include "webrtc/base/logging.h" | 20 #include "webrtc/base/logging.h" |
21 #if defined(WEBRTC_IOS) | 21 #if defined(WEBRTC_IOS) |
22 #include "webrtc/base/objc/RTCUIApplication.h" | 22 #include "webrtc/base/objc/RTCUIApplication.h" |
23 #endif | 23 #endif |
24 #include "webrtc/common_video/include/video_frame_buffer.h" | 24 #include "webrtc/common_video/include/corevideo_frame_buffer.h" |
25 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h" | 25 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h" |
26 #include "webrtc/video_frame.h" | 26 #include "webrtc/video_frame.h" |
27 | 27 |
28 namespace internal { | 28 namespace internal { |
29 | 29 |
| 30 static const int64_t kMsPerSec = 1000; |
| 31 |
30 // Convenience function for creating a dictionary. | 32 // Convenience function for creating a dictionary. |
31 inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys, | 33 inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys, |
32 CFTypeRef* values, | 34 CFTypeRef* values, |
33 size_t size) { | 35 size_t size) { |
34 return CFDictionaryCreate(nullptr, keys, values, size, | 36 return CFDictionaryCreate(nullptr, keys, values, size, |
35 &kCFTypeDictionaryKeyCallBacks, | 37 &kCFTypeDictionaryKeyCallBacks, |
36 &kCFTypeDictionaryValueCallBacks); | 38 &kCFTypeDictionaryValueCallBacks); |
37 } | 39 } |
38 | 40 |
39 // Struct that we pass to the decoder per frame to decode. We receive it again | 41 // Struct that we pass to the decoder per frame to decode. We receive it again |
40 // in the decoder callback. | 42 // in the decoder callback. |
41 struct FrameDecodeParams { | 43 struct FrameDecodeParams { |
42 FrameDecodeParams(webrtc::DecodedImageCallback* cb, int64_t ts) | 44 FrameDecodeParams(webrtc::DecodedImageCallback* cb, int64_t ts) |
43 : callback(cb), timestamp(ts) {} | 45 : callback(cb), timestamp(ts) {} |
44 webrtc::DecodedImageCallback* callback; | 46 webrtc::DecodedImageCallback* callback; |
45 int64_t timestamp; | 47 int64_t timestamp; |
46 }; | 48 }; |
47 | 49 |
48 // On decode we receive a CVPixelBuffer, which we need to convert to a frame | |
49 // buffer for use in the rest of WebRTC. Unfortunately this involves a frame | |
50 // copy. | |
51 // TODO(tkchin): Stuff CVPixelBuffer into a TextureBuffer and pass that along | |
52 // instead once the pipeline supports it. | |
53 rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer( | |
54 CVPixelBufferRef pixel_buffer) { | |
55 RTC_DCHECK(pixel_buffer); | |
56 RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) == | |
57 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange); | |
58 size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0); | |
59 size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0); | |
60 // TODO(tkchin): Use a frame buffer pool. | |
61 rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer = | |
62 new rtc::RefCountedObject<webrtc::I420Buffer>(width, height); | |
63 CVPixelBufferLockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly); | |
64 const uint8_t* src_y = reinterpret_cast<const uint8_t*>( | |
65 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0)); | |
66 int src_y_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0); | |
67 const uint8_t* src_uv = reinterpret_cast<const uint8_t*>( | |
68 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1)); | |
69 int src_uv_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1); | |
70 int ret = libyuv::NV12ToI420( | |
71 src_y, src_y_stride, src_uv, src_uv_stride, | |
72 buffer->MutableData(webrtc::kYPlane), buffer->stride(webrtc::kYPlane), | |
73 buffer->MutableData(webrtc::kUPlane), buffer->stride(webrtc::kUPlane), | |
74 buffer->MutableData(webrtc::kVPlane), buffer->stride(webrtc::kVPlane), | |
75 width, height); | |
76 CVPixelBufferUnlockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly); | |
77 if (ret) { | |
78 LOG(LS_ERROR) << "Error converting NV12 to I420: " << ret; | |
79 return nullptr; | |
80 } | |
81 return buffer; | |
82 } | |
83 | |
84 // This is the callback function that VideoToolbox calls when decode is | 50 // This is the callback function that VideoToolbox calls when decode is |
85 // complete. | 51 // complete. |
86 void VTDecompressionOutputCallback(void* decoder, | 52 void VTDecompressionOutputCallback(void* decoder, |
87 void* params, | 53 void* params, |
88 OSStatus status, | 54 OSStatus status, |
89 VTDecodeInfoFlags info_flags, | 55 VTDecodeInfoFlags info_flags, |
90 CVImageBufferRef image_buffer, | 56 CVImageBufferRef image_buffer, |
91 CMTime timestamp, | 57 CMTime timestamp, |
92 CMTime duration) { | 58 CMTime duration) { |
93 std::unique_ptr<FrameDecodeParams> decode_params( | 59 std::unique_ptr<FrameDecodeParams> decode_params( |
94 reinterpret_cast<FrameDecodeParams*>(params)); | 60 reinterpret_cast<FrameDecodeParams*>(params)); |
95 if (status != noErr) { | 61 if (status != noErr) { |
96 LOG(LS_ERROR) << "Failed to decode frame. Status: " << status; | 62 LOG(LS_ERROR) << "Failed to decode frame. Status: " << status; |
97 return; | 63 return; |
98 } | 64 } |
99 // TODO(tkchin): Handle CVO properly. | 65 // TODO(tkchin): Handle CVO properly. |
100 rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer = | 66 rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer = |
101 VideoFrameBufferForPixelBuffer(image_buffer); | 67 new rtc::RefCountedObject<webrtc::CoreVideoFrameBuffer>(image_buffer); |
102 webrtc::VideoFrame decoded_frame(buffer, decode_params->timestamp, 0, | 68 webrtc::VideoFrame decoded_frame(buffer, decode_params->timestamp, |
| 69 CMTimeGetSeconds(timestamp) * kMsPerSec, |
103 webrtc::kVideoRotation_0); | 70 webrtc::kVideoRotation_0); |
104 decode_params->callback->Decoded(decoded_frame); | 71 decode_params->callback->Decoded(decoded_frame); |
105 } | 72 } |
106 | 73 |
107 } // namespace internal | 74 } // namespace internal |
108 | 75 |
109 namespace webrtc { | 76 namespace webrtc { |
110 | 77 |
111 H264VideoToolboxDecoder::H264VideoToolboxDecoder() | 78 H264VideoToolboxDecoder::H264VideoToolboxDecoder() |
112 : callback_(nullptr), | 79 : callback_(nullptr), |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
305 } | 272 } |
306 } | 273 } |
307 | 274 |
308 const char* H264VideoToolboxDecoder::ImplementationName() const { | 275 const char* H264VideoToolboxDecoder::ImplementationName() const { |
309 return "VideoToolbox"; | 276 return "VideoToolbox"; |
310 } | 277 } |
311 | 278 |
312 } // namespace webrtc | 279 } // namespace webrtc |
313 | 280 |
314 #endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) | 281 #endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) |
OLD | NEW |