OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 * | 9 * |
10 */ | 10 */ |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 // be deleted and reallocated if a bigger buffer is required. | 74 // be deleted and reallocated if a bigger buffer is required. |
75 // | 75 // |
76 // After OpenH264 encoding, the encoded bytes are stored in |info| spread out | 76 // After OpenH264 encoding, the encoded bytes are stored in |info| spread out |
77 // over a number of layers and "NAL units". Each NAL unit is a fragment starting | 77 // over a number of layers and "NAL units". Each NAL unit is a fragment starting |
78 // with the four-byte start code {0,0,0,1}. All of this data (including the | 78 // with the four-byte start code {0,0,0,1}. All of this data (including the |
79 // start codes) is copied to the |encoded_image->_buffer| and the |frag_header| | 79 // start codes) is copied to the |encoded_image->_buffer| and the |frag_header| |
80 // is updated to point to each fragment, with offsets and lengths set as to | 80 // is updated to point to each fragment, with offsets and lengths set as to |
81 // exclude the start codes. | 81 // exclude the start codes. |
82 static void RtpFragmentize(EncodedImage* encoded_image, | 82 static void RtpFragmentize(EncodedImage* encoded_image, |
83 std::unique_ptr<uint8_t[]>* encoded_image_buffer, | 83 std::unique_ptr<uint8_t[]>* encoded_image_buffer, |
84 const VideoFrame& frame, | 84 const VideoFrameBuffer& frame_buffer, |
85 SFrameBSInfo* info, | 85 SFrameBSInfo* info, |
86 RTPFragmentationHeader* frag_header) { | 86 RTPFragmentationHeader* frag_header) { |
87 // Calculate minimum buffer size required to hold encoded data. | 87 // Calculate minimum buffer size required to hold encoded data. |
88 size_t required_size = 0; | 88 size_t required_size = 0; |
89 size_t fragments_count = 0; | 89 size_t fragments_count = 0; |
90 for (int layer = 0; layer < info->iLayerNum; ++layer) { | 90 for (int layer = 0; layer < info->iLayerNum; ++layer) { |
91 const SLayerBSInfo& layerInfo = info->sLayerInfo[layer]; | 91 const SLayerBSInfo& layerInfo = info->sLayerInfo[layer]; |
92 for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) { | 92 for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) { |
93 RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0); | 93 RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0); |
94 // Ensure |required_size| will not overflow. | 94 // Ensure |required_size| will not overflow. |
95 RTC_CHECK_LE(static_cast<size_t>(layerInfo.pNalLengthInByte[nal]), | 95 RTC_CHECK_LE(static_cast<size_t>(layerInfo.pNalLengthInByte[nal]), |
96 std::numeric_limits<size_t>::max() - required_size); | 96 std::numeric_limits<size_t>::max() - required_size); |
97 required_size += layerInfo.pNalLengthInByte[nal]; | 97 required_size += layerInfo.pNalLengthInByte[nal]; |
98 } | 98 } |
99 } | 99 } |
100 if (encoded_image->_size < required_size) { | 100 if (encoded_image->_size < required_size) { |
101 // Increase buffer size. Allocate enough to hold an unencoded image, this | 101 // Increase buffer size. Allocate enough to hold an unencoded image, this |
102 // should be more than enough to hold any encoded data of future frames of | 102 // should be more than enough to hold any encoded data of future frames of |
103 // the same size (avoiding possible future reallocation due to variations in | 103 // the same size (avoiding possible future reallocation due to variations in |
104 // required size). | 104 // required size). |
105 encoded_image->_size = CalcBufferSize(kI420, frame.width(), frame.height()); | 105 encoded_image->_size = |
| 106 CalcBufferSize(kI420, frame_buffer.width(), frame_buffer.height()); |
106 if (encoded_image->_size < required_size) { | 107 if (encoded_image->_size < required_size) { |
107 // Encoded data > unencoded data. Allocate required bytes. | 108 // Encoded data > unencoded data. Allocate required bytes. |
108 LOG(LS_WARNING) << "Encoding produced more bytes than the original image " | 109 LOG(LS_WARNING) << "Encoding produced more bytes than the original image " |
109 << "data! Original bytes: " << encoded_image->_size | 110 << "data! Original bytes: " << encoded_image->_size |
110 << ", encoded bytes: " << required_size << "."; | 111 << ", encoded bytes: " << required_size << "."; |
111 encoded_image->_size = required_size; | 112 encoded_image->_size = required_size; |
112 } | 113 } |
113 encoded_image->_buffer = new uint8_t[encoded_image->_size]; | 114 encoded_image->_buffer = new uint8_t[encoded_image->_size]; |
114 encoded_image_buffer->reset(encoded_image->_buffer); | 115 encoded_image_buffer->reset(encoded_image->_buffer); |
115 } | 116 } |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
191 return WEBRTC_VIDEO_CODEC_ERROR; | 192 return WEBRTC_VIDEO_CODEC_ERROR; |
192 } | 193 } |
193 RTC_DCHECK(openh264_encoder_); | 194 RTC_DCHECK(openh264_encoder_); |
194 if (kOpenH264EncoderDetailedLogging) { | 195 if (kOpenH264EncoderDetailedLogging) { |
195 int trace_level = WELS_LOG_DETAIL; | 196 int trace_level = WELS_LOG_DETAIL; |
196 openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, | 197 openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, |
197 &trace_level); | 198 &trace_level); |
198 } | 199 } |
199 // else WELS_LOG_DEFAULT is used by default. | 200 // else WELS_LOG_DEFAULT is used by default. |
200 | 201 |
| 202 number_of_cores_ = number_of_cores; |
201 codec_settings_ = *codec_settings; | 203 codec_settings_ = *codec_settings; |
202 if (codec_settings_.targetBitrate == 0) | 204 if (codec_settings_.targetBitrate == 0) |
203 codec_settings_.targetBitrate = codec_settings_.startBitrate; | 205 codec_settings_.targetBitrate = codec_settings_.startBitrate; |
204 | 206 |
205 // Initialization parameters. | 207 SEncParamExt encoder_params = CreateEncoderParams(); |
206 // There are two ways to initialize. There is SEncParamBase (cleared with | |
207 // memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt | |
208 // which is a superset of SEncParamBase (cleared with GetDefaultParams) used | |
209 // in InitializeExt. | |
210 SEncParamExt init_params; | |
211 openh264_encoder_->GetDefaultParams(&init_params); | |
212 if (codec_settings_.mode == kRealtimeVideo) { | |
213 init_params.iUsageType = CAMERA_VIDEO_REAL_TIME; | |
214 } else if (codec_settings_.mode == kScreensharing) { | |
215 init_params.iUsageType = SCREEN_CONTENT_REAL_TIME; | |
216 } else { | |
217 ReportError(); | |
218 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
219 } | |
220 init_params.iPicWidth = codec_settings_.width; | |
221 init_params.iPicHeight = codec_settings_.height; | |
222 // |init_params| uses bit/s, |codec_settings_| uses kbit/s. | |
223 init_params.iTargetBitrate = codec_settings_.targetBitrate * 1000; | |
224 init_params.iMaxBitrate = codec_settings_.maxBitrate * 1000; | |
225 // Rate Control mode | |
226 init_params.iRCMode = RC_BITRATE_MODE; | |
227 init_params.fMaxFrameRate = static_cast<float>(codec_settings_.maxFramerate); | |
228 | |
229 // The following parameters are extension parameters (they're in SEncParamExt, | |
230 // not in SEncParamBase). | |
231 init_params.bEnableFrameSkip = | |
232 codec_settings_.codecSpecific.H264.frameDroppingOn; | |
233 // |uiIntraPeriod| - multiple of GOP size | |
234 // |keyFrameInterval| - number of frames | |
235 init_params.uiIntraPeriod = | |
236 codec_settings_.codecSpecific.H264.keyFrameInterval; | |
237 init_params.uiMaxNalSize = 0; | |
238 // Threading model: use auto. | |
239 // 0: auto (dynamic imp. internal encoder) | |
240 // 1: single thread (default value) | |
241 // >1: number of threads | |
242 init_params.iMultipleThreadIdc = NumberOfThreads(init_params.iPicWidth, | |
243 init_params.iPicHeight, | |
244 number_of_cores); | |
245 // The base spatial layer 0 is the only one we use. | |
246 init_params.sSpatialLayers[0].iVideoWidth = init_params.iPicWidth; | |
247 init_params.sSpatialLayers[0].iVideoHeight = init_params.iPicHeight; | |
248 init_params.sSpatialLayers[0].fFrameRate = init_params.fMaxFrameRate; | |
249 init_params.sSpatialLayers[0].iSpatialBitrate = init_params.iTargetBitrate; | |
250 init_params.sSpatialLayers[0].iMaxSpatialBitrate = init_params.iMaxBitrate; | |
251 // Slice num according to number of threads. | |
252 init_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE; | |
253 | |
254 // Initialize. | 208 // Initialize. |
255 if (openh264_encoder_->InitializeExt(&init_params) != 0) { | 209 if (openh264_encoder_->InitializeExt(&encoder_params) != 0) { |
256 LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder"; | 210 LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder"; |
257 Release(); | 211 Release(); |
258 ReportError(); | 212 ReportError(); |
259 return WEBRTC_VIDEO_CODEC_ERROR; | 213 return WEBRTC_VIDEO_CODEC_ERROR; |
260 } | 214 } |
| 215 // TODO(pbos): Base init params on these values before submitting. |
| 216 quality_scaler_.Init(QualityScaler::kLowH264QpThreshold, |
| 217 QualityScaler::kBadH264QpThreshold, |
| 218 codec_settings_.startBitrate, codec_settings_.width, |
| 219 codec_settings_.height, codec_settings_.maxFramerate); |
261 int video_format = EVideoFormatType::videoFormatI420; | 220 int video_format = EVideoFormatType::videoFormatI420; |
262 openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, | 221 openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, |
263 &video_format); | 222 &video_format); |
264 | 223 |
265 // Initialize encoded image. Default buffer size: size of unencoded data. | 224 // Initialize encoded image. Default buffer size: size of unencoded data. |
266 encoded_image_._size = CalcBufferSize( | 225 encoded_image_._size = CalcBufferSize( |
267 kI420, codec_settings_.width, codec_settings_.height); | 226 kI420, codec_settings_.width, codec_settings_.height); |
268 encoded_image_._buffer = new uint8_t[encoded_image_._size]; | 227 encoded_image_._buffer = new uint8_t[encoded_image_._size]; |
269 encoded_image_buffer_.reset(encoded_image_._buffer); | 228 encoded_image_buffer_.reset(encoded_image_._buffer); |
270 encoded_image_._completeFrame = true; | 229 encoded_image_._completeFrame = true; |
271 encoded_image_._encodedWidth = 0; | 230 encoded_image_._encodedWidth = 0; |
272 encoded_image_._encodedHeight = 0; | 231 encoded_image_._encodedHeight = 0; |
273 encoded_image_._length = 0; | 232 encoded_image_._length = 0; |
274 return WEBRTC_VIDEO_CODEC_OK; | 233 return WEBRTC_VIDEO_CODEC_OK; |
275 } | 234 } |
276 | 235 |
277 int32_t H264EncoderImpl::Release() { | 236 int32_t H264EncoderImpl::Release() { |
278 if (openh264_encoder_) { | 237 if (openh264_encoder_) { |
279 int uninit_ret = openh264_encoder_->Uninitialize(); | 238 RTC_CHECK_EQ(0, openh264_encoder_->Uninitialize()); |
280 if (uninit_ret != 0) { | |
281 LOG(LS_WARNING) << "OpenH264 encoder's Uninitialize() returned " | |
282 << "unsuccessful: " << uninit_ret; | |
283 } | |
284 WelsDestroySVCEncoder(openh264_encoder_); | 239 WelsDestroySVCEncoder(openh264_encoder_); |
285 openh264_encoder_ = nullptr; | 240 openh264_encoder_ = nullptr; |
286 } | 241 } |
287 if (encoded_image_._buffer != nullptr) { | 242 encoded_image_._buffer = nullptr; |
288 encoded_image_._buffer = nullptr; | 243 encoded_image_buffer_.reset(); |
289 encoded_image_buffer_.reset(); | |
290 } | |
291 return WEBRTC_VIDEO_CODEC_OK; | 244 return WEBRTC_VIDEO_CODEC_OK; |
292 } | 245 } |
293 | 246 |
294 int32_t H264EncoderImpl::RegisterEncodeCompleteCallback( | 247 int32_t H264EncoderImpl::RegisterEncodeCompleteCallback( |
295 EncodedImageCallback* callback) { | 248 EncodedImageCallback* callback) { |
296 encoded_image_callback_ = callback; | 249 encoded_image_callback_ = callback; |
297 return WEBRTC_VIDEO_CODEC_OK; | 250 return WEBRTC_VIDEO_CODEC_OK; |
298 } | 251 } |
299 | 252 |
300 int32_t H264EncoderImpl::SetRates(uint32_t bitrate, uint32_t framerate) { | 253 int32_t H264EncoderImpl::SetRates(uint32_t bitrate, uint32_t framerate) { |
301 if (bitrate <= 0 || framerate <= 0) { | 254 if (bitrate <= 0 || framerate <= 0) { |
302 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 255 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
303 } | 256 } |
304 codec_settings_.targetBitrate = bitrate; | 257 codec_settings_.targetBitrate = bitrate; |
305 codec_settings_.maxFramerate = framerate; | 258 codec_settings_.maxFramerate = framerate; |
| 259 quality_scaler_.ReportFramerate(framerate); |
306 | 260 |
307 SBitrateInfo target_bitrate; | 261 SBitrateInfo target_bitrate; |
308 memset(&target_bitrate, 0, sizeof(SBitrateInfo)); | 262 memset(&target_bitrate, 0, sizeof(SBitrateInfo)); |
309 target_bitrate.iLayer = SPATIAL_LAYER_ALL, | 263 target_bitrate.iLayer = SPATIAL_LAYER_ALL, |
310 target_bitrate.iBitrate = codec_settings_.targetBitrate * 1000; | 264 target_bitrate.iBitrate = codec_settings_.targetBitrate * 1000; |
311 openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE, | 265 openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE, |
312 &target_bitrate); | 266 &target_bitrate); |
313 float max_framerate = static_cast<float>(codec_settings_.maxFramerate); | 267 float max_framerate = static_cast<float>(codec_settings_.maxFramerate); |
314 openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE, | 268 openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE, |
315 &max_framerate); | 269 &max_framerate); |
316 return WEBRTC_VIDEO_CODEC_OK; | 270 return WEBRTC_VIDEO_CODEC_OK; |
317 } | 271 } |
318 | 272 |
319 int32_t H264EncoderImpl::Encode( | 273 int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame, |
320 const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info, | 274 const CodecSpecificInfo* codec_specific_info, |
321 const std::vector<FrameType>* frame_types) { | 275 const std::vector<FrameType>* frame_types) { |
322 if (!IsInitialized()) { | 276 if (!IsInitialized()) { |
323 ReportError(); | 277 ReportError(); |
324 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 278 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
325 } | 279 } |
326 if (frame.IsZeroSize()) { | 280 if (input_frame.IsZeroSize()) { |
327 ReportError(); | 281 ReportError(); |
328 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 282 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
329 } | 283 } |
330 if (!encoded_image_callback_) { | 284 if (!encoded_image_callback_) { |
331 LOG(LS_WARNING) << "InitEncode() has been called, but a callback function " | 285 LOG(LS_WARNING) << "InitEncode() has been called, but a callback function " |
332 << "has not been set with RegisterEncodeCompleteCallback()"; | 286 << "has not been set with RegisterEncodeCompleteCallback()"; |
333 ReportError(); | 287 ReportError(); |
334 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 288 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
335 } | 289 } |
336 if (frame.width() != codec_settings_.width || | 290 |
337 frame.height() != codec_settings_.height) { | 291 quality_scaler_.OnEncodeFrame(input_frame.width(), input_frame.height()); |
338 LOG(LS_WARNING) << "Encoder initialized for " << codec_settings_.width | 292 rtc::scoped_refptr<const VideoFrameBuffer> frame_buffer = |
339 << "x" << codec_settings_.height << " but trying to encode " | 293 quality_scaler_.GetScaledBuffer(input_frame.video_frame_buffer()); |
340 << frame.width() << "x" << frame.height() << " frame."; | 294 if (frame_buffer->width() != codec_settings_.width || |
341 ReportError(); | 295 frame_buffer->height() != codec_settings_.height) { |
342 return WEBRTC_VIDEO_CODEC_ERR_SIZE; | 296 LOG(LS_INFO) << "Encoder reinitialized from " << codec_settings_.width |
| 297 << "x" << codec_settings_.height << " to " |
| 298 << frame_buffer->width() << "x" << frame_buffer->height(); |
| 299 codec_settings_.width = frame_buffer->width(); |
| 300 codec_settings_.height = frame_buffer->height(); |
| 301 SEncParamExt encoder_params = CreateEncoderParams(); |
| 302 openh264_encoder_->SetOption(ENCODER_OPTION_SVC_ENCODE_PARAM_EXT, |
| 303 &encoder_params); |
343 } | 304 } |
344 | 305 |
345 bool force_key_frame = false; | 306 bool force_key_frame = false; |
346 if (frame_types != nullptr) { | 307 if (frame_types != nullptr) { |
347 // We only support a single stream. | 308 // We only support a single stream. |
348 RTC_DCHECK_EQ(frame_types->size(), static_cast<size_t>(1)); | 309 RTC_DCHECK_EQ(frame_types->size(), static_cast<size_t>(1)); |
349 // Skip frame? | 310 // Skip frame? |
350 if ((*frame_types)[0] == kEmptyFrame) { | 311 if ((*frame_types)[0] == kEmptyFrame) { |
351 return WEBRTC_VIDEO_CODEC_OK; | 312 return WEBRTC_VIDEO_CODEC_OK; |
352 } | 313 } |
353 // Force key frame? | 314 // Force key frame? |
354 force_key_frame = (*frame_types)[0] == kVideoFrameKey; | 315 force_key_frame = (*frame_types)[0] == kVideoFrameKey; |
355 } | 316 } |
356 if (force_key_frame) { | 317 if (force_key_frame) { |
357 // API doc says ForceIntraFrame(false) does nothing, but calling this | 318 // API doc says ForceIntraFrame(false) does nothing, but calling this |
358 // function forces a key frame regardless of the |bIDR| argument's value. | 319 // function forces a key frame regardless of the |bIDR| argument's value. |
359 // (If every frame is a key frame we get lag/delays.) | 320 // (If every frame is a key frame we get lag/delays.) |
360 openh264_encoder_->ForceIntraFrame(true); | 321 openh264_encoder_->ForceIntraFrame(true); |
361 } | 322 } |
362 | 323 |
363 // EncodeFrame input. | 324 // EncodeFrame input. |
364 SSourcePicture picture; | 325 SSourcePicture picture; |
365 memset(&picture, 0, sizeof(SSourcePicture)); | 326 memset(&picture, 0, sizeof(SSourcePicture)); |
366 picture.iPicWidth = frame.width(); | 327 picture.iPicWidth = frame_buffer->width(); |
367 picture.iPicHeight = frame.height(); | 328 picture.iPicHeight = frame_buffer->height(); |
368 picture.iColorFormat = EVideoFormatType::videoFormatI420; | 329 picture.iColorFormat = EVideoFormatType::videoFormatI420; |
369 picture.uiTimeStamp = frame.ntp_time_ms(); | 330 picture.uiTimeStamp = input_frame.ntp_time_ms(); |
370 picture.iStride[0] = frame.video_frame_buffer()->StrideY(); | 331 picture.iStride[0] = frame_buffer->StrideY(); |
371 picture.iStride[1] = frame.video_frame_buffer()->StrideU(); | 332 picture.iStride[1] = frame_buffer->StrideU(); |
372 picture.iStride[2] = frame.video_frame_buffer()->StrideV(); | 333 picture.iStride[2] = frame_buffer->StrideV(); |
373 picture.pData[0] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataY()); | 334 picture.pData[0] = const_cast<uint8_t*>(frame_buffer->DataY()); |
374 picture.pData[1] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataU()); | 335 picture.pData[1] = const_cast<uint8_t*>(frame_buffer->DataU()); |
375 picture.pData[2] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataV()); | 336 picture.pData[2] = const_cast<uint8_t*>(frame_buffer->DataV()); |
376 | 337 |
377 // EncodeFrame output. | 338 // EncodeFrame output. |
378 SFrameBSInfo info; | 339 SFrameBSInfo info; |
379 memset(&info, 0, sizeof(SFrameBSInfo)); | 340 memset(&info, 0, sizeof(SFrameBSInfo)); |
380 | 341 |
381 // Encode! | 342 // Encode! |
382 int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info); | 343 int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info); |
383 if (enc_ret != 0) { | 344 if (enc_ret != 0) { |
384 LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned " | 345 LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned " |
385 << enc_ret << "."; | 346 << enc_ret << "."; |
386 ReportError(); | 347 ReportError(); |
387 return WEBRTC_VIDEO_CODEC_ERROR; | 348 return WEBRTC_VIDEO_CODEC_ERROR; |
388 } | 349 } |
389 | 350 |
390 encoded_image_._encodedWidth = frame.width(); | 351 encoded_image_._encodedWidth = frame_buffer->width(); |
391 encoded_image_._encodedHeight = frame.height(); | 352 encoded_image_._encodedHeight = frame_buffer->height(); |
392 encoded_image_._timeStamp = frame.timestamp(); | 353 encoded_image_._timeStamp = input_frame.timestamp(); |
393 encoded_image_.ntp_time_ms_ = frame.ntp_time_ms(); | 354 encoded_image_.ntp_time_ms_ = input_frame.ntp_time_ms(); |
394 encoded_image_.capture_time_ms_ = frame.render_time_ms(); | 355 encoded_image_.capture_time_ms_ = input_frame.render_time_ms(); |
395 encoded_image_.rotation_ = frame.rotation(); | 356 encoded_image_.rotation_ = input_frame.rotation(); |
396 encoded_image_._frameType = ConvertToVideoFrameType(info.eFrameType); | 357 encoded_image_._frameType = ConvertToVideoFrameType(info.eFrameType); |
397 | 358 |
398 // Split encoded image up into fragments. This also updates |encoded_image_|. | 359 // Split encoded image up into fragments. This also updates |encoded_image_|. |
399 RTPFragmentationHeader frag_header; | 360 RTPFragmentationHeader frag_header; |
400 RtpFragmentize(&encoded_image_, &encoded_image_buffer_, frame, &info, | 361 RtpFragmentize(&encoded_image_, &encoded_image_buffer_, *frame_buffer, &info, |
401 &frag_header); | 362 &frag_header); |
402 | 363 |
403 // Encoder can skip frames to save bandwidth in which case | 364 // Encoder can skip frames to save bandwidth in which case |
404 // |encoded_image_._length| == 0. | 365 // |encoded_image_._length| == 0. |
405 if (encoded_image_._length > 0) { | 366 if (encoded_image_._length > 0) { |
406 // Deliver encoded image. | 367 // Deliver encoded image. |
407 CodecSpecificInfo codec_specific; | 368 CodecSpecificInfo codec_specific; |
408 codec_specific.codecType = kVideoCodecH264; | 369 codec_specific.codecType = kVideoCodecH264; |
409 encoded_image_callback_->Encoded(encoded_image_, | 370 encoded_image_callback_->Encoded(encoded_image_, &codec_specific, |
410 &codec_specific, | |
411 &frag_header); | 371 &frag_header); |
| 372 |
| 373 // Parse and report QP. |
| 374 h264_bitstream_parser_.ParseBitstream(encoded_image_._buffer, |
| 375 encoded_image_._length); |
| 376 int qp = -1; |
| 377 if (h264_bitstream_parser_.GetLastSliceQp(&qp)) |
| 378 quality_scaler_.ReportQP(qp); |
| 379 } else { |
| 380 quality_scaler_.ReportDroppedFrame(); |
412 } | 381 } |
413 return WEBRTC_VIDEO_CODEC_OK; | 382 return WEBRTC_VIDEO_CODEC_OK; |
414 } | 383 } |
415 | 384 |
416 bool H264EncoderImpl::IsInitialized() const { | 385 bool H264EncoderImpl::IsInitialized() const { |
417 return openh264_encoder_ != nullptr; | 386 return openh264_encoder_ != nullptr; |
418 } | 387 } |
419 | 388 |
| 389 // Initialization parameters. |
| 390 // There are two ways to initialize. There is SEncParamBase (cleared with |
| 391 // memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt |
| 392 // which is a superset of SEncParamBase (cleared with GetDefaultParams) used |
| 393 // in InitializeExt. |
| 394 SEncParamExt H264EncoderImpl::CreateEncoderParams() const { |
| 395 RTC_DCHECK(openh264_encoder_); |
| 396 SEncParamExt encoder_params; |
| 397 openh264_encoder_->GetDefaultParams(&encoder_params); |
| 398 if (codec_settings_.mode == kRealtimeVideo) { |
| 399 encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME; |
| 400 } else if (codec_settings_.mode == kScreensharing) { |
| 401 encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME; |
| 402 } else { |
| 403 RTC_NOTREACHED(); |
| 404 } |
| 405 encoder_params.iPicWidth = codec_settings_.width; |
| 406 encoder_params.iPicHeight = codec_settings_.height; |
| 407 // |encoder_params| uses bit/s, |codec_settings_| uses kbit/s. |
| 408 encoder_params.iTargetBitrate = codec_settings_.targetBitrate * 1000; |
| 409 encoder_params.iMaxBitrate = codec_settings_.maxBitrate * 1000; |
| 410 // Rate Control mode |
| 411 encoder_params.iRCMode = RC_BITRATE_MODE; |
| 412 encoder_params.fMaxFrameRate = |
| 413 static_cast<float>(codec_settings_.maxFramerate); |
| 414 |
| 415 // The following parameters are extension parameters (they're in SEncParamExt, |
| 416 // not in SEncParamBase). |
| 417 encoder_params.bEnableFrameSkip = |
| 418 codec_settings_.codecSpecific.H264.frameDroppingOn; |
| 419 // |uiIntraPeriod| - multiple of GOP size |
| 420 // |keyFrameInterval| - number of frames |
| 421 encoder_params.uiIntraPeriod = |
| 422 codec_settings_.codecSpecific.H264.keyFrameInterval; |
| 423 encoder_params.uiMaxNalSize = 0; |
| 424 // Threading model: use auto. |
| 425 // 0: auto (dynamic imp. internal encoder) |
| 426 // 1: single thread (default value) |
| 427 // >1: number of threads |
| 428 encoder_params.iMultipleThreadIdc = NumberOfThreads( |
| 429 encoder_params.iPicWidth, encoder_params.iPicHeight, number_of_cores_); |
| 430 // The base spatial layer 0 is the only one we use. |
| 431 encoder_params.sSpatialLayers[0].iVideoWidth = encoder_params.iPicWidth; |
| 432 encoder_params.sSpatialLayers[0].iVideoHeight = encoder_params.iPicHeight; |
| 433 encoder_params.sSpatialLayers[0].fFrameRate = encoder_params.fMaxFrameRate; |
| 434 encoder_params.sSpatialLayers[0].iSpatialBitrate = |
| 435 encoder_params.iTargetBitrate; |
| 436 encoder_params.sSpatialLayers[0].iMaxSpatialBitrate = |
| 437 encoder_params.iMaxBitrate; |
| 438 // Slice num according to number of threads. |
| 439 encoder_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE; |
| 440 |
| 441 return encoder_params; |
| 442 } |
| 443 |
420 void H264EncoderImpl::ReportInit() { | 444 void H264EncoderImpl::ReportInit() { |
421 if (has_reported_init_) | 445 if (has_reported_init_) |
422 return; | 446 return; |
423 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event", | 447 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event", |
424 kH264EncoderEventInit, | 448 kH264EncoderEventInit, |
425 kH264EncoderEventMax); | 449 kH264EncoderEventMax); |
426 has_reported_init_ = true; | 450 has_reported_init_ = true; |
427 } | 451 } |
428 | 452 |
429 void H264EncoderImpl::ReportError() { | 453 void H264EncoderImpl::ReportError() { |
430 if (has_reported_error_) | 454 if (has_reported_error_) |
431 return; | 455 return; |
432 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event", | 456 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event", |
433 kH264EncoderEventError, | 457 kH264EncoderEventError, |
434 kH264EncoderEventMax); | 458 kH264EncoderEventMax); |
435 has_reported_error_ = true; | 459 has_reported_error_ = true; |
436 } | 460 } |
437 | 461 |
438 int32_t H264EncoderImpl::SetChannelParameters( | 462 int32_t H264EncoderImpl::SetChannelParameters( |
439 uint32_t packet_loss, int64_t rtt) { | 463 uint32_t packet_loss, int64_t rtt) { |
440 return WEBRTC_VIDEO_CODEC_OK; | 464 return WEBRTC_VIDEO_CODEC_OK; |
441 } | 465 } |
442 | 466 |
443 int32_t H264EncoderImpl::SetPeriodicKeyFrames(bool enable) { | 467 int32_t H264EncoderImpl::SetPeriodicKeyFrames(bool enable) { |
444 return WEBRTC_VIDEO_CODEC_OK; | 468 return WEBRTC_VIDEO_CODEC_OK; |
445 } | 469 } |
446 | 470 |
447 void H264EncoderImpl::OnDroppedFrame() { | 471 void H264EncoderImpl::OnDroppedFrame() { |
| 472 quality_scaler_.ReportDroppedFrame(); |
448 } | 473 } |
449 | 474 |
450 } // namespace webrtc | 475 } // namespace webrtc |
OLD | NEW |