| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 * | |
| 10 */ | |
| 11 | |
| 12 #include "webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.h" | |
| 13 | |
| 14 #include <memory> | |
| 15 #include <string> | |
| 16 #include <vector> | |
| 17 | |
| 18 #if defined(WEBRTC_IOS) | |
| 19 #import "WebRTC/UIDevice+RTCDevice.h" | |
| 20 #include "RTCUIApplication.h" | |
| 21 #endif | |
| 22 #include "libyuv/convert_from.h" | |
| 23 #include "webrtc/base/checks.h" | |
| 24 #include "webrtc/base/logging.h" | |
| 25 #include "webrtc/common_video/include/corevideo_frame_buffer.h" | |
| 26 #include "webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_nalu.h" | |
| 27 #include "webrtc/system_wrappers/include/clock.h" | |
| 28 | |
| 29 namespace internal { | |
| 30 | |
| 31 // The ratio between kVTCompressionPropertyKey_DataRateLimits and | |
| 32 // kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher | |
| 33 // than the average bit rate to avoid undershooting the target. | |
| 34 const float kLimitToAverageBitRateFactor = 1.5f; | |
| 35 // These thresholds deviate from the default h264 QP thresholds, as they | |
| 36 // have been found to work better on devices that support VideoToolbox | |
| 37 const int kLowH264QpThreshold = 28; | |
| 38 const int kHighH264QpThreshold = 39; | |
| 39 | |
| 40 // Convenience function for creating a dictionary. | |
| 41 inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys, | |
| 42 CFTypeRef* values, | |
| 43 size_t size) { | |
| 44 return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size, | |
| 45 &kCFTypeDictionaryKeyCallBacks, | |
| 46 &kCFTypeDictionaryValueCallBacks); | |
| 47 } | |
| 48 | |
| 49 // Copies characters from a CFStringRef into a std::string. | |
| 50 std::string CFStringToString(const CFStringRef cf_string) { | |
| 51 RTC_DCHECK(cf_string); | |
| 52 std::string std_string; | |
| 53 // Get the size needed for UTF8 plus terminating character. | |
| 54 size_t buffer_size = | |
| 55 CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string), | |
| 56 kCFStringEncodingUTF8) + | |
| 57 1; | |
| 58 std::unique_ptr<char[]> buffer(new char[buffer_size]); | |
| 59 if (CFStringGetCString(cf_string, buffer.get(), buffer_size, | |
| 60 kCFStringEncodingUTF8)) { | |
| 61 // Copy over the characters. | |
| 62 std_string.assign(buffer.get()); | |
| 63 } | |
| 64 return std_string; | |
| 65 } | |
| 66 | |
| 67 // Convenience function for setting a VT property. | |
| 68 void SetVTSessionProperty(VTSessionRef session, | |
| 69 CFStringRef key, | |
| 70 int32_t value) { | |
| 71 CFNumberRef cfNum = | |
| 72 CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value); | |
| 73 OSStatus status = VTSessionSetProperty(session, key, cfNum); | |
| 74 CFRelease(cfNum); | |
| 75 if (status != noErr) { | |
| 76 std::string key_string = CFStringToString(key); | |
| 77 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
| 78 << " to " << value << ": " << status; | |
| 79 } | |
| 80 } | |
| 81 | |
| 82 // Convenience function for setting a VT property. | |
| 83 void SetVTSessionProperty(VTSessionRef session, | |
| 84 CFStringRef key, | |
| 85 uint32_t value) { | |
| 86 int64_t value_64 = value; | |
| 87 CFNumberRef cfNum = | |
| 88 CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &value_64); | |
| 89 OSStatus status = VTSessionSetProperty(session, key, cfNum); | |
| 90 CFRelease(cfNum); | |
| 91 if (status != noErr) { | |
| 92 std::string key_string = CFStringToString(key); | |
| 93 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
| 94 << " to " << value << ": " << status; | |
| 95 } | |
| 96 } | |
| 97 | |
| 98 // Convenience function for setting a VT property. | |
| 99 void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) { | |
| 100 CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse; | |
| 101 OSStatus status = VTSessionSetProperty(session, key, cf_bool); | |
| 102 if (status != noErr) { | |
| 103 std::string key_string = CFStringToString(key); | |
| 104 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
| 105 << " to " << value << ": " << status; | |
| 106 } | |
| 107 } | |
| 108 | |
| 109 // Convenience function for setting a VT property. | |
| 110 void SetVTSessionProperty(VTSessionRef session, | |
| 111 CFStringRef key, | |
| 112 CFStringRef value) { | |
| 113 OSStatus status = VTSessionSetProperty(session, key, value); | |
| 114 if (status != noErr) { | |
| 115 std::string key_string = CFStringToString(key); | |
| 116 std::string val_string = CFStringToString(value); | |
| 117 LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string | |
| 118 << " to " << val_string << ": " << status; | |
| 119 } | |
| 120 } | |
| 121 | |
| 122 // Struct that we pass to the encoder per frame to encode. We receive it again | |
| 123 // in the encoder callback. | |
| 124 struct FrameEncodeParams { | |
| 125 FrameEncodeParams(webrtc::H264VideoToolboxEncoder* e, | |
| 126 const webrtc::CodecSpecificInfo* csi, | |
| 127 int32_t w, | |
| 128 int32_t h, | |
| 129 int64_t rtms, | |
| 130 uint32_t ts, | |
| 131 webrtc::VideoRotation r) | |
| 132 : encoder(e), | |
| 133 width(w), | |
| 134 height(h), | |
| 135 render_time_ms(rtms), | |
| 136 timestamp(ts), | |
| 137 rotation(r) { | |
| 138 if (csi) { | |
| 139 codec_specific_info = *csi; | |
| 140 } else { | |
| 141 codec_specific_info.codecType = webrtc::kVideoCodecH264; | |
| 142 } | |
| 143 } | |
| 144 | |
| 145 webrtc::H264VideoToolboxEncoder* encoder; | |
| 146 webrtc::CodecSpecificInfo codec_specific_info; | |
| 147 int32_t width; | |
| 148 int32_t height; | |
| 149 int64_t render_time_ms; | |
| 150 uint32_t timestamp; | |
| 151 webrtc::VideoRotation rotation; | |
| 152 }; | |
| 153 | |
| 154 // We receive I420Frames as input, but we need to feed CVPixelBuffers into the | |
| 155 // encoder. This performs the copy and format conversion. | |
| 156 // TODO(tkchin): See if encoder will accept i420 frames and compare performance. | |
| 157 bool CopyVideoFrameToPixelBuffer( | |
| 158 const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& frame, | |
| 159 CVPixelBufferRef pixel_buffer) { | |
| 160 RTC_DCHECK(pixel_buffer); | |
| 161 RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixel_buffer), | |
| 162 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange); | |
| 163 RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0), | |
| 164 static_cast<size_t>(frame->height())); | |
| 165 RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0), | |
| 166 static_cast<size_t>(frame->width())); | |
| 167 | |
| 168 CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0); | |
| 169 if (cvRet != kCVReturnSuccess) { | |
| 170 LOG(LS_ERROR) << "Failed to lock base address: " << cvRet; | |
| 171 return false; | |
| 172 } | |
| 173 uint8_t* dst_y = reinterpret_cast<uint8_t*>( | |
| 174 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0)); | |
| 175 int dst_stride_y = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0); | |
| 176 uint8_t* dst_uv = reinterpret_cast<uint8_t*>( | |
| 177 CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1)); | |
| 178 int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1); | |
| 179 // Convert I420 to NV12. | |
| 180 int ret = libyuv::I420ToNV12( | |
| 181 frame->DataY(), frame->StrideY(), | |
| 182 frame->DataU(), frame->StrideU(), | |
| 183 frame->DataV(), frame->StrideV(), | |
| 184 dst_y, dst_stride_y, dst_uv, dst_stride_uv, | |
| 185 frame->width(), frame->height()); | |
| 186 CVPixelBufferUnlockBaseAddress(pixel_buffer, 0); | |
| 187 if (ret) { | |
| 188 LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret; | |
| 189 return false; | |
| 190 } | |
| 191 return true; | |
| 192 } | |
| 193 | |
| 194 CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) { | |
| 195 if (!pixel_buffer_pool) { | |
| 196 LOG(LS_ERROR) << "Failed to get pixel buffer pool."; | |
| 197 return nullptr; | |
| 198 } | |
| 199 CVPixelBufferRef pixel_buffer; | |
| 200 CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, | |
| 201 &pixel_buffer); | |
| 202 if (ret != kCVReturnSuccess) { | |
| 203 LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret; | |
| 204 // We probably want to drop frames here, since failure probably means | |
| 205 // that the pool is empty. | |
| 206 return nullptr; | |
| 207 } | |
| 208 return pixel_buffer; | |
| 209 } | |
| 210 | |
| 211 // This is the callback function that VideoToolbox calls when encode is | |
| 212 // complete. From inspection this happens on its own queue. | |
| 213 void VTCompressionOutputCallback(void* encoder, | |
| 214 void* params, | |
| 215 OSStatus status, | |
| 216 VTEncodeInfoFlags info_flags, | |
| 217 CMSampleBufferRef sample_buffer) { | |
| 218 std::unique_ptr<FrameEncodeParams> encode_params( | |
| 219 reinterpret_cast<FrameEncodeParams*>(params)); | |
| 220 encode_params->encoder->OnEncodedFrame( | |
| 221 status, info_flags, sample_buffer, encode_params->codec_specific_info, | |
| 222 encode_params->width, encode_params->height, | |
| 223 encode_params->render_time_ms, encode_params->timestamp, | |
| 224 encode_params->rotation); | |
| 225 } | |
| 226 | |
| 227 } // namespace internal | |
| 228 | |
| 229 namespace webrtc { | |
| 230 | |
| 231 // .5 is set as a mininum to prevent overcompensating for large temporary | |
| 232 // overshoots. We don't want to degrade video quality too badly. | |
| 233 // .95 is set to prevent oscillations. When a lower bitrate is set on the | |
| 234 // encoder than previously set, its output seems to have a brief period of | |
| 235 // drastically reduced bitrate, so we want to avoid that. In steady state | |
| 236 // conditions, 0.95 seems to give us better overall bitrate over long periods | |
| 237 // of time. | |
| 238 H264VideoToolboxEncoder::H264VideoToolboxEncoder() | |
| 239 : callback_(nullptr), | |
| 240 compression_session_(nullptr), | |
| 241 bitrate_adjuster_(Clock::GetRealTimeClock(), .5, .95) { | |
| 242 } | |
| 243 | |
| 244 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { | |
| 245 DestroyCompressionSession(); | |
| 246 } | |
| 247 | |
| 248 int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings, | |
| 249 int number_of_cores, | |
| 250 size_t max_payload_size) { | |
| 251 RTC_DCHECK(codec_settings); | |
| 252 RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264); | |
| 253 { | |
| 254 rtc::CritScope lock(&quality_scaler_crit_); | |
| 255 quality_scaler_.Init(internal::kLowH264QpThreshold, | |
| 256 internal::kHighH264QpThreshold, | |
| 257 codec_settings->startBitrate, codec_settings->width, | |
| 258 codec_settings->height, codec_settings->maxFramerate); | |
| 259 QualityScaler::Resolution res = quality_scaler_.GetScaledResolution(); | |
| 260 // TODO(tkchin): We may need to enforce width/height dimension restrictions | |
| 261 // to match what the encoder supports. | |
| 262 width_ = res.width; | |
| 263 height_ = res.height; | |
| 264 } | |
| 265 // We can only set average bitrate on the HW encoder. | |
| 266 target_bitrate_bps_ = codec_settings->startBitrate; | |
| 267 bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_); | |
| 268 | |
| 269 // TODO(tkchin): Try setting payload size via | |
| 270 // kVTCompressionPropertyKey_MaxH264SliceBytes. | |
| 271 | |
| 272 return ResetCompressionSession(); | |
| 273 } | |
| 274 | |
| 275 int H264VideoToolboxEncoder::Encode( | |
| 276 const VideoFrame& frame, | |
| 277 const CodecSpecificInfo* codec_specific_info, | |
| 278 const std::vector<FrameType>* frame_types) { | |
| 279 RTC_DCHECK(!frame.IsZeroSize()); | |
| 280 if (!callback_ || !compression_session_) { | |
| 281 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
| 282 } | |
| 283 #if defined(WEBRTC_IOS) | |
| 284 if (!RTCIsUIApplicationActive()) { | |
| 285 // Ignore all encode requests when app isn't active. In this state, the | |
| 286 // hardware encoder has been invalidated by the OS. | |
| 287 return WEBRTC_VIDEO_CODEC_OK; | |
| 288 } | |
| 289 #endif | |
| 290 bool is_keyframe_required = false; | |
| 291 | |
| 292 quality_scaler_.OnEncodeFrame(frame.width(), frame.height()); | |
| 293 const QualityScaler::Resolution scaled_res = | |
| 294 quality_scaler_.GetScaledResolution(); | |
| 295 | |
| 296 if (scaled_res.width != width_ || scaled_res.height != height_) { | |
| 297 width_ = scaled_res.width; | |
| 298 height_ = scaled_res.height; | |
| 299 int ret = ResetCompressionSession(); | |
| 300 if (ret < 0) | |
| 301 return ret; | |
| 302 } | |
| 303 | |
| 304 // Get a pixel buffer from the pool and copy frame data over. | |
| 305 CVPixelBufferPoolRef pixel_buffer_pool = | |
| 306 VTCompressionSessionGetPixelBufferPool(compression_session_); | |
| 307 #if defined(WEBRTC_IOS) | |
| 308 if (!pixel_buffer_pool) { | |
| 309 // Kind of a hack. On backgrounding, the compression session seems to get | |
| 310 // invalidated, which causes this pool call to fail when the application | |
| 311 // is foregrounded and frames are being sent for encoding again. | |
| 312 // Resetting the session when this happens fixes the issue. | |
| 313 // In addition we request a keyframe so video can recover quickly. | |
| 314 ResetCompressionSession(); | |
| 315 pixel_buffer_pool = | |
| 316 VTCompressionSessionGetPixelBufferPool(compression_session_); | |
| 317 is_keyframe_required = true; | |
| 318 LOG(LS_INFO) << "Resetting compression session due to invalid pool."; | |
| 319 } | |
| 320 #endif | |
| 321 | |
| 322 CVPixelBufferRef pixel_buffer = static_cast<CVPixelBufferRef>( | |
| 323 frame.video_frame_buffer()->native_handle()); | |
| 324 if (pixel_buffer) { | |
| 325 // Native frame. | |
| 326 rtc::scoped_refptr<CoreVideoFrameBuffer> core_video_frame_buffer( | |
| 327 static_cast<CoreVideoFrameBuffer*>(frame.video_frame_buffer().get())); | |
| 328 if (!core_video_frame_buffer->RequiresCropping()) { | |
| 329 // This pixel buffer might have a higher resolution than what the | |
| 330 // compression session is configured to. The compression session can | |
| 331 // handle that and will output encoded frames in the configured | |
| 332 // resolution regardless of the input pixel buffer resolution. | |
| 333 CVBufferRetain(pixel_buffer); | |
| 334 } else { | |
| 335 // Cropping required, we need to crop and scale to a new pixel buffer. | |
| 336 pixel_buffer = internal::CreatePixelBuffer(pixel_buffer_pool); | |
| 337 if (!pixel_buffer) { | |
| 338 return WEBRTC_VIDEO_CODEC_ERROR; | |
| 339 } | |
| 340 if (!core_video_frame_buffer->CropAndScaleTo(&nv12_scale_buffer_, | |
| 341 pixel_buffer)) { | |
| 342 return WEBRTC_VIDEO_CODEC_ERROR; | |
| 343 } | |
| 344 } | |
| 345 } else { | |
| 346 pixel_buffer = internal::CreatePixelBuffer(pixel_buffer_pool); | |
| 347 if (!pixel_buffer) { | |
| 348 return WEBRTC_VIDEO_CODEC_ERROR; | |
| 349 } | |
| 350 // TODO(magjed): Optimize by merging scaling and NV12 pixel buffer | |
| 351 // conversion once libyuv::MergeUVPlanes is available. | |
| 352 rtc::scoped_refptr<VideoFrameBuffer> scaled_i420_buffer = | |
| 353 quality_scaler_.GetScaledBuffer(frame.video_frame_buffer()); | |
| 354 if (!internal::CopyVideoFrameToPixelBuffer(scaled_i420_buffer, | |
| 355 pixel_buffer)) { | |
| 356 LOG(LS_ERROR) << "Failed to copy frame data."; | |
| 357 CVBufferRelease(pixel_buffer); | |
| 358 return WEBRTC_VIDEO_CODEC_ERROR; | |
| 359 } | |
| 360 } | |
| 361 | |
| 362 // Check if we need a keyframe. | |
| 363 if (!is_keyframe_required && frame_types) { | |
| 364 for (auto frame_type : *frame_types) { | |
| 365 if (frame_type == kVideoFrameKey) { | |
| 366 is_keyframe_required = true; | |
| 367 break; | |
| 368 } | |
| 369 } | |
| 370 } | |
| 371 | |
| 372 CMTime presentation_time_stamp = | |
| 373 CMTimeMake(frame.render_time_ms(), 1000); | |
| 374 CFDictionaryRef frame_properties = nullptr; | |
| 375 if (is_keyframe_required) { | |
| 376 CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame}; | |
| 377 CFTypeRef values[] = {kCFBooleanTrue}; | |
| 378 frame_properties = internal::CreateCFDictionary(keys, values, 1); | |
| 379 } | |
| 380 std::unique_ptr<internal::FrameEncodeParams> encode_params; | |
| 381 encode_params.reset(new internal::FrameEncodeParams( | |
| 382 this, codec_specific_info, width_, height_, frame.render_time_ms(), | |
| 383 frame.timestamp(), frame.rotation())); | |
| 384 | |
| 385 // Update the bitrate if needed. | |
| 386 SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps()); | |
| 387 | |
| 388 OSStatus status = VTCompressionSessionEncodeFrame( | |
| 389 compression_session_, pixel_buffer, presentation_time_stamp, | |
| 390 kCMTimeInvalid, frame_properties, encode_params.release(), nullptr); | |
| 391 if (frame_properties) { | |
| 392 CFRelease(frame_properties); | |
| 393 } | |
| 394 if (pixel_buffer) { | |
| 395 CVBufferRelease(pixel_buffer); | |
| 396 } | |
| 397 if (status != noErr) { | |
| 398 LOG(LS_ERROR) << "Failed to encode frame with code: " << status; | |
| 399 return WEBRTC_VIDEO_CODEC_ERROR; | |
| 400 } | |
| 401 return WEBRTC_VIDEO_CODEC_OK; | |
| 402 } | |
| 403 | |
| 404 int H264VideoToolboxEncoder::RegisterEncodeCompleteCallback( | |
| 405 EncodedImageCallback* callback) { | |
| 406 callback_ = callback; | |
| 407 return WEBRTC_VIDEO_CODEC_OK; | |
| 408 } | |
| 409 | |
| 410 void H264VideoToolboxEncoder::OnDroppedFrame() { | |
| 411 rtc::CritScope lock(&quality_scaler_crit_); | |
| 412 quality_scaler_.ReportDroppedFrame(); | |
| 413 } | |
| 414 | |
| 415 int H264VideoToolboxEncoder::SetChannelParameters(uint32_t packet_loss, | |
| 416 int64_t rtt) { | |
| 417 // Encoder doesn't know anything about packet loss or rtt so just return. | |
| 418 return WEBRTC_VIDEO_CODEC_OK; | |
| 419 } | |
| 420 | |
| 421 int H264VideoToolboxEncoder::SetRates(uint32_t new_bitrate_kbit, | |
| 422 uint32_t frame_rate) { | |
| 423 target_bitrate_bps_ = 1000 * new_bitrate_kbit; | |
| 424 bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_); | |
| 425 SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps()); | |
| 426 | |
| 427 rtc::CritScope lock(&quality_scaler_crit_); | |
| 428 quality_scaler_.ReportFramerate(frame_rate); | |
| 429 | |
| 430 return WEBRTC_VIDEO_CODEC_OK; | |
| 431 } | |
| 432 | |
| 433 int H264VideoToolboxEncoder::Release() { | |
| 434 // Need to reset so that the session is invalidated and won't use the | |
| 435 // callback anymore. Do not remove callback until the session is invalidated | |
| 436 // since async encoder callbacks can occur until invalidation. | |
| 437 int ret = ResetCompressionSession(); | |
| 438 callback_ = nullptr; | |
| 439 return ret; | |
| 440 } | |
| 441 | |
| 442 int H264VideoToolboxEncoder::ResetCompressionSession() { | |
| 443 DestroyCompressionSession(); | |
| 444 | |
| 445 // Set source image buffer attributes. These attributes will be present on | |
| 446 // buffers retrieved from the encoder's pixel buffer pool. | |
| 447 const size_t attributes_size = 3; | |
| 448 CFTypeRef keys[attributes_size] = { | |
| 449 #if defined(WEBRTC_IOS) | |
| 450 kCVPixelBufferOpenGLESCompatibilityKey, | |
| 451 #elif defined(WEBRTC_MAC) | |
| 452 kCVPixelBufferOpenGLCompatibilityKey, | |
| 453 #endif | |
| 454 kCVPixelBufferIOSurfacePropertiesKey, | |
| 455 kCVPixelBufferPixelFormatTypeKey | |
| 456 }; | |
| 457 CFDictionaryRef io_surface_value = | |
| 458 internal::CreateCFDictionary(nullptr, nullptr, 0); | |
| 459 int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; | |
| 460 CFNumberRef pixel_format = | |
| 461 CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); | |
| 462 CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value, | |
| 463 pixel_format}; | |
| 464 CFDictionaryRef source_attributes = | |
| 465 internal::CreateCFDictionary(keys, values, attributes_size); | |
| 466 if (io_surface_value) { | |
| 467 CFRelease(io_surface_value); | |
| 468 io_surface_value = nullptr; | |
| 469 } | |
| 470 if (pixel_format) { | |
| 471 CFRelease(pixel_format); | |
| 472 pixel_format = nullptr; | |
| 473 } | |
| 474 OSStatus status = VTCompressionSessionCreate( | |
| 475 nullptr, // use default allocator | |
| 476 width_, height_, kCMVideoCodecType_H264, | |
| 477 nullptr, // use default encoder | |
| 478 source_attributes, | |
| 479 nullptr, // use default compressed data allocator | |
| 480 internal::VTCompressionOutputCallback, this, &compression_session_); | |
| 481 if (source_attributes) { | |
| 482 CFRelease(source_attributes); | |
| 483 source_attributes = nullptr; | |
| 484 } | |
| 485 if (status != noErr) { | |
| 486 LOG(LS_ERROR) << "Failed to create compression session: " << status; | |
| 487 return WEBRTC_VIDEO_CODEC_ERROR; | |
| 488 } | |
| 489 ConfigureCompressionSession(); | |
| 490 return WEBRTC_VIDEO_CODEC_OK; | |
| 491 } | |
| 492 | |
| 493 void H264VideoToolboxEncoder::ConfigureCompressionSession() { | |
| 494 RTC_DCHECK(compression_session_); | |
| 495 internal::SetVTSessionProperty(compression_session_, | |
| 496 kVTCompressionPropertyKey_RealTime, true); | |
| 497 internal::SetVTSessionProperty(compression_session_, | |
| 498 kVTCompressionPropertyKey_ProfileLevel, | |
| 499 kVTProfileLevel_H264_Baseline_AutoLevel); | |
| 500 internal::SetVTSessionProperty(compression_session_, | |
| 501 kVTCompressionPropertyKey_AllowFrameReordering, | |
| 502 false); | |
| 503 SetEncoderBitrateBps(target_bitrate_bps_); | |
| 504 // TODO(tkchin): Look at entropy mode and colorspace matrices. | |
| 505 // TODO(tkchin): Investigate to see if there's any way to make this work. | |
| 506 // May need it to interop with Android. Currently this call just fails. | |
| 507 // On inspecting encoder output on iOS8, this value is set to 6. | |
| 508 // internal::SetVTSessionProperty(compression_session_, | |
| 509 // kVTCompressionPropertyKey_MaxFrameDelayCount, | |
| 510 // 1); | |
| 511 | |
| 512 // Set a relatively large value for keyframe emission (7200 frames or | |
| 513 // 4 minutes). | |
| 514 internal::SetVTSessionProperty( | |
| 515 compression_session_, | |
| 516 kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200); | |
| 517 internal::SetVTSessionProperty( | |
| 518 compression_session_, | |
| 519 kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240); | |
| 520 } | |
| 521 | |
| 522 void H264VideoToolboxEncoder::DestroyCompressionSession() { | |
| 523 if (compression_session_) { | |
| 524 VTCompressionSessionInvalidate(compression_session_); | |
| 525 CFRelease(compression_session_); | |
| 526 compression_session_ = nullptr; | |
| 527 } | |
| 528 } | |
| 529 | |
| 530 const char* H264VideoToolboxEncoder::ImplementationName() const { | |
| 531 return "VideoToolbox"; | |
| 532 } | |
| 533 | |
| 534 bool H264VideoToolboxEncoder::SupportsNativeHandle() const { | |
| 535 return true; | |
| 536 } | |
| 537 | |
| 538 void H264VideoToolboxEncoder::SetBitrateBps(uint32_t bitrate_bps) { | |
| 539 if (encoder_bitrate_bps_ != bitrate_bps) { | |
| 540 SetEncoderBitrateBps(bitrate_bps); | |
| 541 } | |
| 542 } | |
| 543 | |
| 544 void H264VideoToolboxEncoder::SetEncoderBitrateBps(uint32_t bitrate_bps) { | |
| 545 if (compression_session_) { | |
| 546 internal::SetVTSessionProperty(compression_session_, | |
| 547 kVTCompressionPropertyKey_AverageBitRate, | |
| 548 bitrate_bps); | |
| 549 | |
| 550 // TODO(tkchin): Add a helper method to set array value. | |
| 551 int64_t data_limit_bytes_per_second_value = static_cast<int64_t>( | |
| 552 bitrate_bps * internal::kLimitToAverageBitRateFactor / 8); | |
| 553 CFNumberRef bytes_per_second = | |
| 554 CFNumberCreate(kCFAllocatorDefault, | |
| 555 kCFNumberSInt64Type, | |
| 556 &data_limit_bytes_per_second_value); | |
| 557 int64_t one_second_value = 1; | |
| 558 CFNumberRef one_second = | |
| 559 CFNumberCreate(kCFAllocatorDefault, | |
| 560 kCFNumberSInt64Type, | |
| 561 &one_second_value); | |
| 562 const void* nums[2] = { bytes_per_second, one_second }; | |
| 563 CFArrayRef data_rate_limits = | |
| 564 CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks); | |
| 565 OSStatus status = | |
| 566 VTSessionSetProperty(compression_session_, | |
| 567 kVTCompressionPropertyKey_DataRateLimits, | |
| 568 data_rate_limits); | |
| 569 if (bytes_per_second) { | |
| 570 CFRelease(bytes_per_second); | |
| 571 } | |
| 572 if (one_second) { | |
| 573 CFRelease(one_second); | |
| 574 } | |
| 575 if (data_rate_limits) { | |
| 576 CFRelease(data_rate_limits); | |
| 577 } | |
| 578 if (status != noErr) { | |
| 579 LOG(LS_ERROR) << "Failed to set data rate limit"; | |
| 580 } | |
| 581 | |
| 582 encoder_bitrate_bps_ = bitrate_bps; | |
| 583 } | |
| 584 } | |
| 585 | |
| 586 void H264VideoToolboxEncoder::OnEncodedFrame( | |
| 587 OSStatus status, | |
| 588 VTEncodeInfoFlags info_flags, | |
| 589 CMSampleBufferRef sample_buffer, | |
| 590 CodecSpecificInfo codec_specific_info, | |
| 591 int32_t width, | |
| 592 int32_t height, | |
| 593 int64_t render_time_ms, | |
| 594 uint32_t timestamp, | |
| 595 VideoRotation rotation) { | |
| 596 if (status != noErr) { | |
| 597 LOG(LS_ERROR) << "H264 encode failed."; | |
| 598 return; | |
| 599 } | |
| 600 if (info_flags & kVTEncodeInfo_FrameDropped) { | |
| 601 LOG(LS_INFO) << "H264 encode dropped frame."; | |
| 602 rtc::CritScope lock(&quality_scaler_crit_); | |
| 603 quality_scaler_.ReportDroppedFrame(); | |
| 604 return; | |
| 605 } | |
| 606 | |
| 607 bool is_keyframe = false; | |
| 608 CFArrayRef attachments = | |
| 609 CMSampleBufferGetSampleAttachmentsArray(sample_buffer, 0); | |
| 610 if (attachments != nullptr && CFArrayGetCount(attachments)) { | |
| 611 CFDictionaryRef attachment = | |
| 612 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0)); | |
| 613 is_keyframe = | |
| 614 !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync); | |
| 615 } | |
| 616 | |
| 617 if (is_keyframe) { | |
| 618 LOG(LS_INFO) << "Generated keyframe"; | |
| 619 } | |
| 620 | |
| 621 // Convert the sample buffer into a buffer suitable for RTP packetization. | |
| 622 // TODO(tkchin): Allocate buffers through a pool. | |
| 623 std::unique_ptr<rtc::Buffer> buffer(new rtc::Buffer()); | |
| 624 std::unique_ptr<webrtc::RTPFragmentationHeader> header; | |
| 625 { | |
| 626 webrtc::RTPFragmentationHeader* header_raw; | |
| 627 bool result = H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe, | |
| 628 buffer.get(), &header_raw); | |
| 629 header.reset(header_raw); | |
| 630 if (!result) { | |
| 631 return; | |
| 632 } | |
| 633 } | |
| 634 webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size()); | |
| 635 frame._encodedWidth = width; | |
| 636 frame._encodedHeight = height; | |
| 637 frame._completeFrame = true; | |
| 638 frame._frameType = | |
| 639 is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta; | |
| 640 frame.capture_time_ms_ = render_time_ms; | |
| 641 frame._timeStamp = timestamp; | |
| 642 frame.rotation_ = rotation; | |
| 643 | |
| 644 h264_bitstream_parser_.ParseBitstream(buffer->data(), buffer->size()); | |
| 645 int qp; | |
| 646 if (h264_bitstream_parser_.GetLastSliceQp(&qp)) { | |
| 647 rtc::CritScope lock(&quality_scaler_crit_); | |
| 648 quality_scaler_.ReportQP(qp); | |
| 649 } | |
| 650 | |
| 651 EncodedImageCallback::Result result = | |
| 652 callback_->OnEncodedImage(frame, &codec_specific_info, header.get()); | |
| 653 if (result.error != EncodedImageCallback::Result::OK) { | |
| 654 LOG(LS_ERROR) << "Encode callback failed: " << result.error; | |
| 655 return; | |
| 656 } | |
| 657 bitrate_adjuster_.Update(frame._size); | |
| 658 } | |
| 659 | |
| 660 } // namespace webrtc | |
| OLD | NEW |