OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 * | |
10 */ | |
11 | |
12 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h" | |
13 | |
14 #if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) | |
15 | |
16 #include <memory> | |
17 #include <string> | |
18 #include <vector> | |
19 | |
20 #if defined(WEBRTC_IOS) | |
21 #include "RTCUIApplication.h" | |
22 #endif | |
23 #include "libyuv/convert_from.h" | |
24 #include "webrtc/base/checks.h" | |
25 #include "webrtc/base/logging.h" | |
26 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h" | |
27 #include "webrtc/system_wrappers/include/clock.h" | |
28 | |
29 namespace internal { | |
30 | |
31 // The ratio between kVTCompressionPropertyKey_DataRateLimits and | |
32 // kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher | |
33 // than the average bit rate to avoid undershooting the target. | |
34 const float kLimitToAverageBitRateFactor = 1.5f; | |
35 | |
36 // Convenience function for creating a dictionary. | |
37 inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys, | |
38 CFTypeRef* values, | |
39 size_t size) { | |
40 return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size, | |
41 &kCFTypeDictionaryKeyCallBacks, | |
42 &kCFTypeDictionaryValueCallBacks); | |
43 } | |
44 | |
45 // Copies characters from a CFStringRef into a std::string. | |
46 std::string CFStringToString(const CFStringRef cf_string) { | |
47 RTC_DCHECK(cf_string); | |
48 std::string std_string; | |
49 // Get the size needed for UTF8 plus terminating character. | |
50 size_t buffer_size = | |
51 CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string), | |
52 kCFStringEncodingUTF8) + | |
53 1; | |
54 std::unique_ptr<char[]> buffer(new char[buffer_size]); | |
55 if (CFStringGetCString(cf_string, buffer.get(), buffer_size, | |
56 kCFStringEncodingUTF8)) { | |
57 // Copy over the characters. | |
58 std_string.assign(buffer.get()); | |
59 } | |
60 return std_string; | |
61 } | |
62 | |
63 // Convenience function for setting a VT property. | |
64 void SetVTSessionProperty(VTSessionRef session, | |
65 CFStringRef key, | |
66 int32_t value) { | |
67 CFNumberRef cfNum = | |
68 CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value); | |
69 OSStatus status = VTSessionSetProperty(session, key, cfNum); | |
70 CFRelease(cfNum); | |
71 if (status != noErr) { | |
72 std::string key_string = CFStringToString(key); | |
73 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
74 << " to " << value << ": " << status; | |
75 } | |
76 } | |
77 | |
78 // Convenience function for setting a VT property. | |
79 void SetVTSessionProperty(VTSessionRef session, | |
80 CFStringRef key, | |
81 uint32_t value) { | |
82 int64_t value_64 = value; | |
83 CFNumberRef cfNum = | |
84 CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &value_64); | |
85 OSStatus status = VTSessionSetProperty(session, key, cfNum); | |
86 CFRelease(cfNum); | |
87 if (status != noErr) { | |
88 std::string key_string = CFStringToString(key); | |
89 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
90 << " to " << value << ": " << status; | |
91 } | |
92 } | |
93 | |
94 // Convenience function for setting a VT property. | |
95 void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) { | |
96 CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse; | |
97 OSStatus status = VTSessionSetProperty(session, key, cf_bool); | |
98 if (status != noErr) { | |
99 std::string key_string = CFStringToString(key); | |
100 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
101 << " to " << value << ": " << status; | |
102 } | |
103 } | |
104 | |
105 // Convenience function for setting a VT property. | |
106 void SetVTSessionProperty(VTSessionRef session, | |
107 CFStringRef key, | |
108 CFStringRef value) { | |
109 OSStatus status = VTSessionSetProperty(session, key, value); | |
110 if (status != noErr) { | |
111 std::string key_string = CFStringToString(key); | |
112 std::string val_string = CFStringToString(value); | |
113 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
114 << " to " << val_string << ": " << status; | |
115 } | |
116 } | |
117 | |
118 // Struct that we pass to the encoder per frame to encode. We receive it again | |
119 // in the encoder callback. | |
120 struct FrameEncodeParams { | |
121 FrameEncodeParams(webrtc::H264VideoToolboxEncoder* e, | |
122 const webrtc::CodecSpecificInfo* csi, | |
123 int32_t w, | |
124 int32_t h, | |
125 int64_t rtms, | |
126 uint32_t ts, | |
127 webrtc::VideoRotation r) | |
128 : encoder(e), | |
129 width(w), | |
130 height(h), | |
131 render_time_ms(rtms), | |
132 timestamp(ts), | |
133 rotation(r) { | |
134 if (csi) { | |
135 codec_specific_info = *csi; | |
136 } else { | |
137 codec_specific_info.codecType = webrtc::kVideoCodecH264; | |
138 } | |
139 } | |
140 | |
141 webrtc::H264VideoToolboxEncoder* encoder; | |
142 webrtc::CodecSpecificInfo codec_specific_info; | |
143 int32_t width; | |
144 int32_t height; | |
145 int64_t render_time_ms; | |
146 uint32_t timestamp; | |
147 webrtc::VideoRotation rotation; | |
148 }; | |
149 | |
150 // We receive I420Frames as input, but we need to feed CVPixelBuffers into the | |
151 // encoder. This performs the copy and format conversion. | |
152 // TODO(tkchin): See if encoder will accept i420 frames and compare performance. | |
153 bool CopyVideoFrameToPixelBuffer( | |
154 const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& frame, | |
155 CVPixelBufferRef pixel_buffer) { | |
156 RTC_DCHECK(pixel_buffer); | |
157 RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) == | |
158 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange); | |
159 RTC_DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) == | |
160 static_cast<size_t>(frame->height())); | |
161 RTC_DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) == | |
162 static_cast<size_t>(frame->width())); | |
163 | |
164 CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0); | |
165 if (cvRet != kCVReturnSuccess) { | |
166 LOG(LS_ERROR) << "Failed to lock base address: " << cvRet; | |
167 return false; | |
168 } | |
169 uint8_t* dst_y = reinterpret_cast<uint8_t*>( | |
170 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0)); | |
171 int dst_stride_y = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0); | |
172 uint8_t* dst_uv = reinterpret_cast<uint8_t*>( | |
173 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1)); | |
174 int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1); | |
175 // Convert I420 to NV12. | |
176 int ret = libyuv::I420ToNV12( | |
177 frame->DataY(), frame->StrideY(), | |
178 frame->DataU(), frame->StrideU(), | |
179 frame->DataV(), frame->StrideV(), | |
180 dst_y, dst_stride_y, dst_uv, dst_stride_uv, | |
181 frame->width(), frame->height()); | |
182 CVPixelBufferUnlockBaseAddress(pixel_buffer, 0); | |
183 if (ret) { | |
184 LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret; | |
185 return false; | |
186 } | |
187 return true; | |
188 } | |
189 | |
190 // This is the callback function that VideoToolbox calls when encode is | |
191 // complete. From inspection this happens on its own queue. | |
192 void VTCompressionOutputCallback(void* encoder, | |
193 void* params, | |
194 OSStatus status, | |
195 VTEncodeInfoFlags info_flags, | |
196 CMSampleBufferRef sample_buffer) { | |
197 std::unique_ptr<FrameEncodeParams> encode_params( | |
198 reinterpret_cast<FrameEncodeParams*>(params)); | |
199 encode_params->encoder->OnEncodedFrame( | |
200 status, info_flags, sample_buffer, encode_params->codec_specific_info, | |
201 encode_params->width, encode_params->height, | |
202 encode_params->render_time_ms, encode_params->timestamp, | |
203 encode_params->rotation); | |
204 } | |
205 | |
206 } // namespace internal | |
207 | |
208 namespace webrtc { | |
209 | |
210 // .5 is set as a mininum to prevent overcompensating for large temporary | |
211 // overshoots. We don't want to degrade video quality too badly. | |
212 // .95 is set to prevent oscillations. When a lower bitrate is set on the | |
213 // encoder than previously set, its output seems to have a brief period of | |
214 // drastically reduced bitrate, so we want to avoid that. In steady state | |
215 // conditions, 0.95 seems to give us better overall bitrate over long periods | |
216 // of time. | |
217 H264VideoToolboxEncoder::H264VideoToolboxEncoder() | |
218 : callback_(nullptr), | |
219 compression_session_(nullptr), | |
220 bitrate_adjuster_(Clock::GetRealTimeClock(), .5, .95) {} | |
221 | |
222 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { | |
223 DestroyCompressionSession(); | |
224 } | |
225 | |
226 int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings, | |
227 int number_of_cores, | |
228 size_t max_payload_size) { | |
229 RTC_DCHECK(codec_settings); | |
230 RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264); | |
231 { | |
232 rtc::CritScope lock(&quality_scaler_crit_); | |
233 quality_scaler_.Init(QualityScaler::kLowH264QpThreshold, | |
234 QualityScaler::kBadH264QpThreshold, | |
235 codec_settings->startBitrate, codec_settings->width, | |
236 codec_settings->height, codec_settings->maxFramerate); | |
237 QualityScaler::Resolution res = quality_scaler_.GetScaledResolution(); | |
238 // TODO(tkchin): We may need to enforce width/height dimension restrictions | |
239 // to match what the encoder supports. | |
240 width_ = res.width; | |
241 height_ = res.height; | |
242 } | |
243 // We can only set average bitrate on the HW encoder. | |
244 target_bitrate_bps_ = codec_settings->startBitrate; | |
245 bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_); | |
246 | |
247 // TODO(tkchin): Try setting payload size via | |
248 // kVTCompressionPropertyKey_MaxH264SliceBytes. | |
249 | |
250 return ResetCompressionSession(); | |
251 } | |
252 | |
253 rtc::scoped_refptr<VideoFrameBuffer> | |
254 H264VideoToolboxEncoder::GetScaledBufferOnEncode( | |
255 const rtc::scoped_refptr<VideoFrameBuffer>& frame) { | |
256 rtc::CritScope lock(&quality_scaler_crit_); | |
257 quality_scaler_.OnEncodeFrame(frame->width(), frame->height()); | |
258 if (!frame->native_handle()) | |
259 return quality_scaler_.GetScaledBuffer(frame); | |
260 | |
261 // Handle native (CVImageRef) scaling. | |
262 const QualityScaler::Resolution res = quality_scaler_.GetScaledResolution(); | |
263 if (res.width == frame->width() && res.height == frame->height()) | |
264 return frame; | |
265 // TODO(magjed): Implement efficient CVImageRef -> CVImageRef scaling instead | |
266 // of doing it via I420. | |
267 return quality_scaler_.GetScaledBuffer(frame->NativeToI420Buffer()); | |
268 } | |
269 | |
270 int H264VideoToolboxEncoder::Encode( | |
271 const VideoFrame& frame, | |
272 const CodecSpecificInfo* codec_specific_info, | |
273 const std::vector<FrameType>* frame_types) { | |
274 RTC_DCHECK(!frame.IsZeroSize()); | |
275 if (!callback_ || !compression_session_) { | |
276 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
277 } | |
278 #if defined(WEBRTC_IOS) | |
279 if (!RTCIsUIApplicationActive()) { | |
280 // Ignore all encode requests when app isn't active. In this state, the | |
281 // hardware encoder has been invalidated by the OS. | |
282 return WEBRTC_VIDEO_CODEC_OK; | |
283 } | |
284 #endif | |
285 bool is_keyframe_required = false; | |
286 rtc::scoped_refptr<VideoFrameBuffer> input_image( | |
287 GetScaledBufferOnEncode(frame.video_frame_buffer())); | |
288 | |
289 if (input_image->width() != width_ || input_image->height() != height_) { | |
290 width_ = input_image->width(); | |
291 height_ = input_image->height(); | |
292 int ret = ResetCompressionSession(); | |
293 if (ret < 0) | |
294 return ret; | |
295 } | |
296 | |
297 CVPixelBufferRef pixel_buffer = | |
298 static_cast<CVPixelBufferRef>(input_image->native_handle()); | |
299 if (pixel_buffer) { | |
300 CVBufferRetain(pixel_buffer); | |
301 } else { | |
302 // Get a pixel buffer from the pool and copy frame data over. | |
303 CVPixelBufferPoolRef pixel_buffer_pool = | |
304 VTCompressionSessionGetPixelBufferPool(compression_session_); | |
305 #if defined(WEBRTC_IOS) | |
306 if (!pixel_buffer_pool) { | |
307 // Kind of a hack. On backgrounding, the compression session seems to get | |
308 // invalidated, which causes this pool call to fail when the application | |
309 // is foregrounded and frames are being sent for encoding again. | |
310 // Resetting the session when this happens fixes the issue. | |
311 // In addition we request a keyframe so video can recover quickly. | |
312 ResetCompressionSession(); | |
313 pixel_buffer_pool = | |
314 VTCompressionSessionGetPixelBufferPool(compression_session_); | |
315 is_keyframe_required = true; | |
316 } | |
317 #endif | |
318 if (!pixel_buffer_pool) { | |
319 LOG(LS_ERROR) << "Failed to get pixel buffer pool."; | |
320 return WEBRTC_VIDEO_CODEC_ERROR; | |
321 } | |
322 CVReturn ret = CVPixelBufferPoolCreatePixelBuffer( | |
323 nullptr, pixel_buffer_pool, &pixel_buffer); | |
324 if (ret != kCVReturnSuccess) { | |
325 LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret; | |
326 // We probably want to drop frames here, since failure probably means | |
327 // that the pool is empty. | |
328 return WEBRTC_VIDEO_CODEC_ERROR; | |
329 } | |
330 RTC_DCHECK(pixel_buffer); | |
331 if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) { | |
332 LOG(LS_ERROR) << "Failed to copy frame data."; | |
333 CVBufferRelease(pixel_buffer); | |
334 return WEBRTC_VIDEO_CODEC_ERROR; | |
335 } | |
336 } | |
337 | |
338 // Check if we need a keyframe. | |
339 if (!is_keyframe_required && frame_types) { | |
340 for (auto frame_type : *frame_types) { | |
341 if (frame_type == kVideoFrameKey) { | |
342 is_keyframe_required = true; | |
343 break; | |
344 } | |
345 } | |
346 } | |
347 | |
348 CMTime presentation_time_stamp = | |
349 CMTimeMake(frame.render_time_ms(), 1000); | |
350 CFDictionaryRef frame_properties = nullptr; | |
351 if (is_keyframe_required) { | |
352 CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame}; | |
353 CFTypeRef values[] = {kCFBooleanTrue}; | |
354 frame_properties = internal::CreateCFDictionary(keys, values, 1); | |
355 } | |
356 std::unique_ptr<internal::FrameEncodeParams> encode_params; | |
357 encode_params.reset(new internal::FrameEncodeParams( | |
358 this, codec_specific_info, width_, height_, frame.render_time_ms(), | |
359 frame.timestamp(), frame.rotation())); | |
360 | |
361 // Update the bitrate if needed. | |
362 SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps()); | |
363 | |
364 OSStatus status = VTCompressionSessionEncodeFrame( | |
365 compression_session_, pixel_buffer, presentation_time_stamp, | |
366 kCMTimeInvalid, frame_properties, encode_params.release(), nullptr); | |
367 if (frame_properties) { | |
368 CFRelease(frame_properties); | |
369 } | |
370 if (pixel_buffer) { | |
371 CVBufferRelease(pixel_buffer); | |
372 } | |
373 if (status != noErr) { | |
374 LOG(LS_ERROR) << "Failed to encode frame with code: " << status; | |
375 return WEBRTC_VIDEO_CODEC_ERROR; | |
376 } | |
377 return WEBRTC_VIDEO_CODEC_OK; | |
378 } | |
379 | |
380 int H264VideoToolboxEncoder::RegisterEncodeCompleteCallback( | |
381 EncodedImageCallback* callback) { | |
382 callback_ = callback; | |
383 return WEBRTC_VIDEO_CODEC_OK; | |
384 } | |
385 | |
386 void H264VideoToolboxEncoder::OnDroppedFrame() { | |
387 rtc::CritScope lock(&quality_scaler_crit_); | |
388 quality_scaler_.ReportDroppedFrame(); | |
389 } | |
390 | |
391 int H264VideoToolboxEncoder::SetChannelParameters(uint32_t packet_loss, | |
392 int64_t rtt) { | |
393 // Encoder doesn't know anything about packet loss or rtt so just return. | |
394 return WEBRTC_VIDEO_CODEC_OK; | |
395 } | |
396 | |
397 int H264VideoToolboxEncoder::SetRates(uint32_t new_bitrate_kbit, | |
398 uint32_t frame_rate) { | |
399 target_bitrate_bps_ = 1000 * new_bitrate_kbit; | |
400 bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_); | |
401 SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps()); | |
402 | |
403 rtc::CritScope lock(&quality_scaler_crit_); | |
404 quality_scaler_.ReportFramerate(frame_rate); | |
405 | |
406 return WEBRTC_VIDEO_CODEC_OK; | |
407 } | |
408 | |
409 int H264VideoToolboxEncoder::Release() { | |
410 // Need to reset so that the session is invalidated and won't use the | |
411 // callback anymore. Do not remove callback until the session is invalidated | |
412 // since async encoder callbacks can occur until invalidation. | |
413 int ret = ResetCompressionSession(); | |
414 callback_ = nullptr; | |
415 return ret; | |
416 } | |
417 | |
418 int H264VideoToolboxEncoder::ResetCompressionSession() { | |
419 DestroyCompressionSession(); | |
420 | |
421 // Set source image buffer attributes. These attributes will be present on | |
422 // buffers retrieved from the encoder's pixel buffer pool. | |
423 const size_t attributes_size = 3; | |
424 CFTypeRef keys[attributes_size] = { | |
425 #if defined(WEBRTC_IOS) | |
426 kCVPixelBufferOpenGLESCompatibilityKey, | |
427 #elif defined(WEBRTC_MAC) | |
428 kCVPixelBufferOpenGLCompatibilityKey, | |
429 #endif | |
430 kCVPixelBufferIOSurfacePropertiesKey, | |
431 kCVPixelBufferPixelFormatTypeKey | |
432 }; | |
433 CFDictionaryRef io_surface_value = | |
434 internal::CreateCFDictionary(nullptr, nullptr, 0); | |
435 int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; | |
436 CFNumberRef pixel_format = | |
437 CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); | |
438 CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value, | |
439 pixel_format}; | |
440 CFDictionaryRef source_attributes = | |
441 internal::CreateCFDictionary(keys, values, attributes_size); | |
442 if (io_surface_value) { | |
443 CFRelease(io_surface_value); | |
444 io_surface_value = nullptr; | |
445 } | |
446 if (pixel_format) { | |
447 CFRelease(pixel_format); | |
448 pixel_format = nullptr; | |
449 } | |
450 OSStatus status = VTCompressionSessionCreate( | |
451 nullptr, // use default allocator | |
452 width_, height_, kCMVideoCodecType_H264, | |
453 nullptr, // use default encoder | |
454 source_attributes, | |
455 nullptr, // use default compressed data allocator | |
456 internal::VTCompressionOutputCallback, this, &compression_session_); | |
457 if (source_attributes) { | |
458 CFRelease(source_attributes); | |
459 source_attributes = nullptr; | |
460 } | |
461 if (status != noErr) { | |
462 LOG(LS_ERROR) << "Failed to create compression session: " << status; | |
463 return WEBRTC_VIDEO_CODEC_ERROR; | |
464 } | |
465 ConfigureCompressionSession(); | |
466 return WEBRTC_VIDEO_CODEC_OK; | |
467 } | |
468 | |
469 void H264VideoToolboxEncoder::ConfigureCompressionSession() { | |
470 RTC_DCHECK(compression_session_); | |
471 internal::SetVTSessionProperty(compression_session_, | |
472 kVTCompressionPropertyKey_RealTime, true); | |
473 internal::SetVTSessionProperty(compression_session_, | |
474 kVTCompressionPropertyKey_ProfileLevel, | |
475 kVTProfileLevel_H264_Baseline_AutoLevel); | |
476 internal::SetVTSessionProperty(compression_session_, | |
477 kVTCompressionPropertyKey_AllowFrameReordering, | |
478 false); | |
479 SetEncoderBitrateBps(target_bitrate_bps_); | |
480 // TODO(tkchin): Look at entropy mode and colorspace matrices. | |
481 // TODO(tkchin): Investigate to see if there's any way to make this work. | |
482 // May need it to interop with Android. Currently this call just fails. | |
483 // On inspecting encoder output on iOS8, this value is set to 6. | |
484 // internal::SetVTSessionProperty(compression_session_, | |
485 // kVTCompressionPropertyKey_MaxFrameDelayCount, | |
486 // 1); | |
487 | |
488 // Set a relatively large value for keyframe emission (7200 frames or | |
489 // 4 minutes). | |
490 internal::SetVTSessionProperty( | |
491 compression_session_, | |
492 kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200); | |
493 internal::SetVTSessionProperty( | |
494 compression_session_, | |
495 kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240); | |
496 } | |
497 | |
498 void H264VideoToolboxEncoder::DestroyCompressionSession() { | |
499 if (compression_session_) { | |
500 VTCompressionSessionInvalidate(compression_session_); | |
501 CFRelease(compression_session_); | |
502 compression_session_ = nullptr; | |
503 } | |
504 } | |
505 | |
506 const char* H264VideoToolboxEncoder::ImplementationName() const { | |
507 return "VideoToolbox"; | |
508 } | |
509 | |
510 bool H264VideoToolboxEncoder::SupportsNativeHandle() const { | |
511 return true; | |
512 } | |
513 | |
514 void H264VideoToolboxEncoder::SetBitrateBps(uint32_t bitrate_bps) { | |
515 if (encoder_bitrate_bps_ != bitrate_bps) { | |
516 SetEncoderBitrateBps(bitrate_bps); | |
517 } | |
518 } | |
519 | |
520 void H264VideoToolboxEncoder::SetEncoderBitrateBps(uint32_t bitrate_bps) { | |
521 if (compression_session_) { | |
522 internal::SetVTSessionProperty(compression_session_, | |
523 kVTCompressionPropertyKey_AverageBitRate, | |
524 bitrate_bps); | |
525 | |
526 // TODO(tkchin): Add a helper method to set array value. | |
527 int64_t data_limit_bytes_per_second_value = static_cast<int64_t>( | |
528 bitrate_bps * internal::kLimitToAverageBitRateFactor / 8); | |
529 CFNumberRef bytes_per_second = | |
530 CFNumberCreate(kCFAllocatorDefault, | |
531 kCFNumberSInt64Type, | |
532 &data_limit_bytes_per_second_value); | |
533 int64_t one_second_value = 1; | |
534 CFNumberRef one_second = | |
535 CFNumberCreate(kCFAllocatorDefault, | |
536 kCFNumberSInt64Type, | |
537 &one_second_value); | |
538 const void* nums[2] = { bytes_per_second, one_second }; | |
539 CFArrayRef data_rate_limits = | |
540 CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks); | |
541 OSStatus status = | |
542 VTSessionSetProperty(compression_session_, | |
543 kVTCompressionPropertyKey_DataRateLimits, | |
544 data_rate_limits); | |
545 if (bytes_per_second) { | |
546 CFRelease(bytes_per_second); | |
547 } | |
548 if (one_second) { | |
549 CFRelease(one_second); | |
550 } | |
551 if (data_rate_limits) { | |
552 CFRelease(data_rate_limits); | |
553 } | |
554 if (status != noErr) { | |
555 LOG(LS_ERROR) << "Failed to set data rate limit"; | |
556 } | |
557 | |
558 encoder_bitrate_bps_ = bitrate_bps; | |
559 } | |
560 } | |
561 | |
562 void H264VideoToolboxEncoder::OnEncodedFrame( | |
563 OSStatus status, | |
564 VTEncodeInfoFlags info_flags, | |
565 CMSampleBufferRef sample_buffer, | |
566 CodecSpecificInfo codec_specific_info, | |
567 int32_t width, | |
568 int32_t height, | |
569 int64_t render_time_ms, | |
570 uint32_t timestamp, | |
571 VideoRotation rotation) { | |
572 if (status != noErr) { | |
573 LOG(LS_ERROR) << "H264 encode failed."; | |
574 return; | |
575 } | |
576 if (info_flags & kVTEncodeInfo_FrameDropped) { | |
577 LOG(LS_INFO) << "H264 encode dropped frame."; | |
578 rtc::CritScope lock(&quality_scaler_crit_); | |
579 quality_scaler_.ReportDroppedFrame(); | |
580 return; | |
581 } | |
582 | |
583 bool is_keyframe = false; | |
584 CFArrayRef attachments = | |
585 CMSampleBufferGetSampleAttachmentsArray(sample_buffer, 0); | |
586 if (attachments != nullptr && CFArrayGetCount(attachments)) { | |
587 CFDictionaryRef attachment = | |
588 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0)); | |
589 is_keyframe = | |
590 !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync); | |
591 } | |
592 | |
593 if (is_keyframe) { | |
594 LOG(LS_INFO) << "Generated keyframe"; | |
595 } | |
596 | |
597 // Convert the sample buffer into a buffer suitable for RTP packetization. | |
598 // TODO(tkchin): Allocate buffers through a pool. | |
599 std::unique_ptr<rtc::Buffer> buffer(new rtc::Buffer()); | |
600 std::unique_ptr<webrtc::RTPFragmentationHeader> header; | |
601 { | |
602 webrtc::RTPFragmentationHeader* header_raw; | |
603 bool result = H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe, | |
604 buffer.get(), &header_raw); | |
605 header.reset(header_raw); | |
606 if (!result) { | |
607 return; | |
608 } | |
609 } | |
610 webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size()); | |
611 frame._encodedWidth = width; | |
612 frame._encodedHeight = height; | |
613 frame._completeFrame = true; | |
614 frame._frameType = | |
615 is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta; | |
616 frame.capture_time_ms_ = render_time_ms; | |
617 frame._timeStamp = timestamp; | |
618 frame.rotation_ = rotation; | |
619 | |
620 h264_bitstream_parser_.ParseBitstream(buffer->data(), buffer->size()); | |
621 int qp; | |
622 if (h264_bitstream_parser_.GetLastSliceQp(&qp)) { | |
623 rtc::CritScope lock(&quality_scaler_crit_); | |
624 quality_scaler_.ReportQP(qp); | |
625 } | |
626 | |
627 int result = callback_->Encoded(frame, &codec_specific_info, header.get()); | |
628 if (result != 0) { | |
629 LOG(LS_ERROR) << "Encode callback failed: " << result; | |
630 return; | |
631 } | |
632 bitrate_adjuster_.Update(frame._size); | |
633 } | |
634 | |
635 } // namespace webrtc | |
636 | |
637 #endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) | |
OLD | NEW |