OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 * | 9 * |
10 */ | 10 */ |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
57 } | 57 } |
58 | 58 |
59 void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, | 59 void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, |
60 void* user_data) { | 60 void* user_data) { |
61 VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data); | 61 VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data); |
62 enc->GetEncodedLayerFrame(pkt); | 62 enc->GetEncodedLayerFrame(pkt); |
63 } | 63 } |
64 | 64 |
65 VP9EncoderImpl::VP9EncoderImpl() | 65 VP9EncoderImpl::VP9EncoderImpl() |
66 : encoded_image_(), | 66 : encoded_image_(), |
67 encoded_complete_callback_(NULL), | 67 encoded_complete_callback_(nullptr), |
68 inited_(false), | 68 inited_(false), |
69 timestamp_(0), | 69 timestamp_(0), |
70 cpu_speed_(3), | 70 cpu_speed_(3), |
71 rc_max_intra_target_(0), | 71 rc_max_intra_target_(0), |
72 encoder_(NULL), | 72 encoder_(nullptr), |
73 config_(NULL), | 73 config_(nullptr), |
74 raw_(NULL), | 74 raw_(nullptr), |
75 input_image_(NULL), | 75 input_image_(nullptr), |
76 frames_since_kf_(0), | 76 frames_since_kf_(0), |
77 num_temporal_layers_(0), | 77 num_temporal_layers_(0), |
78 num_spatial_layers_(0), | 78 num_spatial_layers_(0), |
79 is_flexible_mode_(false), | 79 is_flexible_mode_(false), |
80 frames_encoded_(0), | 80 frames_encoded_(0), |
81 // Use two spatial when screensharing with flexible mode. | 81 // Use two spatial when screensharing with flexible mode. |
82 spatial_layer_(new ScreenshareLayersVP9(2)) { | 82 spatial_layer_(new ScreenshareLayersVP9(2)) { |
83 memset(&codec_, 0, sizeof(codec_)); | 83 memset(&codec_, 0, sizeof(codec_)); |
84 memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t)); | 84 memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t)); |
85 | 85 |
86 Random random(rtc::TimeMicros()); | 86 Random random(rtc::TimeMicros()); |
87 picture_id_ = random.Rand<uint16_t>() & 0x7FFF; | 87 picture_id_ = random.Rand<uint16_t>() & 0x7FFF; |
88 tl0_pic_idx_ = random.Rand<uint8_t>(); | 88 tl0_pic_idx_ = random.Rand<uint8_t>(); |
89 } | 89 } |
90 | 90 |
91 VP9EncoderImpl::~VP9EncoderImpl() { | 91 VP9EncoderImpl::~VP9EncoderImpl() { |
92 Release(); | 92 Release(); |
93 } | 93 } |
94 | 94 |
95 int VP9EncoderImpl::Release() { | 95 int VP9EncoderImpl::Release() { |
96 if (encoded_image_._buffer != NULL) { | 96 if (encoded_image_._buffer != nullptr) { |
97 delete[] encoded_image_._buffer; | 97 delete[] encoded_image_._buffer; |
98 encoded_image_._buffer = NULL; | 98 encoded_image_._buffer = nullptr; |
99 } | 99 } |
100 if (encoder_ != NULL) { | 100 if (encoder_ != nullptr) { |
101 if (vpx_codec_destroy(encoder_)) { | 101 if (vpx_codec_destroy(encoder_)) { |
102 return WEBRTC_VIDEO_CODEC_MEMORY; | 102 return WEBRTC_VIDEO_CODEC_MEMORY; |
103 } | 103 } |
104 delete encoder_; | 104 delete encoder_; |
105 encoder_ = NULL; | 105 encoder_ = nullptr; |
106 } | 106 } |
107 if (config_ != NULL) { | 107 if (config_ != nullptr) { |
108 delete config_; | 108 delete config_; |
109 config_ = NULL; | 109 config_ = nullptr; |
110 } | 110 } |
111 if (raw_ != NULL) { | 111 if (raw_ != nullptr) { |
112 vpx_img_free(raw_); | 112 vpx_img_free(raw_); |
113 raw_ = NULL; | 113 raw_ = nullptr; |
114 } | 114 } |
115 inited_ = false; | 115 inited_ = false; |
116 return WEBRTC_VIDEO_CODEC_OK; | 116 return WEBRTC_VIDEO_CODEC_OK; |
117 } | 117 } |
118 | 118 |
119 bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const { | 119 bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const { |
120 // We check target_bitrate_bps of the 0th layer to see if the spatial layers | 120 // We check target_bitrate_bps of the 0th layer to see if the spatial layers |
121 // (i.e. bitrates) were explicitly configured. | 121 // (i.e. bitrates) were explicitly configured. |
122 return num_spatial_layers_ > 1 && | 122 return num_spatial_layers_ > 1 && |
123 codec_.spatialLayers[0].target_bitrate_bps > 0; | 123 codec_.spatialLayers[0].target_bitrate_bps > 0; |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
225 // Update encoder context | 225 // Update encoder context |
226 if (vpx_codec_enc_config_set(encoder_, config_)) { | 226 if (vpx_codec_enc_config_set(encoder_, config_)) { |
227 return WEBRTC_VIDEO_CODEC_ERROR; | 227 return WEBRTC_VIDEO_CODEC_ERROR; |
228 } | 228 } |
229 return WEBRTC_VIDEO_CODEC_OK; | 229 return WEBRTC_VIDEO_CODEC_OK; |
230 } | 230 } |
231 | 231 |
232 int VP9EncoderImpl::InitEncode(const VideoCodec* inst, | 232 int VP9EncoderImpl::InitEncode(const VideoCodec* inst, |
233 int number_of_cores, | 233 int number_of_cores, |
234 size_t /*max_payload_size*/) { | 234 size_t /*max_payload_size*/) { |
235 if (inst == NULL) { | 235 if (inst == nullptr) { |
236 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 236 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
237 } | 237 } |
238 if (inst->maxFramerate < 1) { | 238 if (inst->maxFramerate < 1) { |
239 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 239 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
240 } | 240 } |
241 // Allow zero to represent an unspecified maxBitRate | 241 // Allow zero to represent an unspecified maxBitRate |
242 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) { | 242 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) { |
243 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 243 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
244 } | 244 } |
245 if (inst->width < 1 || inst->height < 1) { | 245 if (inst->width < 1 || inst->height < 1) { |
246 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 246 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
247 } | 247 } |
248 if (number_of_cores < 1) { | 248 if (number_of_cores < 1) { |
249 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 249 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
250 } | 250 } |
251 if (inst->VP9().numberOfTemporalLayers > 3) { | 251 if (inst->VP9().numberOfTemporalLayers > 3) { |
252 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 252 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
253 } | 253 } |
254 // libvpx probably does not support more than 3 spatial layers. | 254 // libvpx probably does not support more than 3 spatial layers. |
255 if (inst->VP9().numberOfSpatialLayers > 3) { | 255 if (inst->VP9().numberOfSpatialLayers > 3) { |
256 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 256 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
257 } | 257 } |
258 | 258 |
259 int ret_val = Release(); | 259 int ret_val = Release(); |
260 if (ret_val < 0) { | 260 if (ret_val < 0) { |
261 return ret_val; | 261 return ret_val; |
262 } | 262 } |
263 if (encoder_ == NULL) { | 263 if (encoder_ == nullptr) { |
264 encoder_ = new vpx_codec_ctx_t; | 264 encoder_ = new vpx_codec_ctx_t; |
265 } | 265 } |
266 if (config_ == NULL) { | 266 if (config_ == nullptr) { |
267 config_ = new vpx_codec_enc_cfg_t; | 267 config_ = new vpx_codec_enc_cfg_t; |
268 } | 268 } |
269 timestamp_ = 0; | 269 timestamp_ = 0; |
270 if (&codec_ != inst) { | 270 if (&codec_ != inst) { |
271 codec_ = *inst; | 271 codec_ = *inst; |
272 } | 272 } |
273 | 273 |
274 num_spatial_layers_ = inst->VP9().numberOfSpatialLayers; | 274 num_spatial_layers_ = inst->VP9().numberOfSpatialLayers; |
275 num_temporal_layers_ = inst->VP9().numberOfTemporalLayers; | 275 num_temporal_layers_ = inst->VP9().numberOfTemporalLayers; |
276 if (num_temporal_layers_ == 0) | 276 if (num_temporal_layers_ == 0) |
277 num_temporal_layers_ = 1; | 277 num_temporal_layers_ = 1; |
278 | 278 |
279 // Allocate memory for encoded image | 279 // Allocate memory for encoded image |
280 if (encoded_image_._buffer != NULL) { | 280 if (encoded_image_._buffer != nullptr) { |
281 delete[] encoded_image_._buffer; | 281 delete[] encoded_image_._buffer; |
282 } | 282 } |
283 encoded_image_._size = | 283 encoded_image_._size = |
284 CalcBufferSize(VideoType::kI420, codec_.width, codec_.height); | 284 CalcBufferSize(VideoType::kI420, codec_.width, codec_.height); |
285 encoded_image_._buffer = new uint8_t[encoded_image_._size]; | 285 encoded_image_._buffer = new uint8_t[encoded_image_._size]; |
286 encoded_image_._completeFrame = true; | 286 encoded_image_._completeFrame = true; |
287 // Creating a wrapper to the image - setting image data to NULL. Actual | 287 // Creating a wrapper to the image - setting image data to nullptr. Actual |
288 // pointer will be set in encode. Setting align to 1, as it is meaningless | 288 // pointer will be set in encode. Setting align to 1, as it is meaningless |
289 // (actual memory is not allocated). | 289 // (actual memory is not allocated). |
290 raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height, 1, | 290 raw_ = vpx_img_wrap(nullptr, VPX_IMG_FMT_I420, codec_.width, codec_.height, 1, |
291 NULL); | 291 nullptr); |
292 // Populate encoder configuration with default values. | 292 // Populate encoder configuration with default values. |
293 if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) { | 293 if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) { |
294 return WEBRTC_VIDEO_CODEC_ERROR; | 294 return WEBRTC_VIDEO_CODEC_ERROR; |
295 } | 295 } |
296 config_->g_w = codec_.width; | 296 config_->g_w = codec_.width; |
297 config_->g_h = codec_.height; | 297 config_->g_h = codec_.height; |
298 config_->rc_target_bitrate = inst->startBitrate; // in kbit/s | 298 config_->rc_target_bitrate = inst->startBitrate; // in kbit/s |
299 config_->g_error_resilient = inst->VP9().resilienceOn ? 1 : 0; | 299 config_->g_error_resilient = inst->VP9().resilienceOn ? 1 : 0; |
300 // Setting the time base of the codec. | 300 // Setting the time base of the codec. |
301 config_->g_timebase.num = 1; | 301 config_->g_timebase.num = 1; |
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
483 const uint32_t min_intra_size = 300; | 483 const uint32_t min_intra_size = 300; |
484 return (target_pct < min_intra_size) ? min_intra_size : target_pct; | 484 return (target_pct < min_intra_size) ? min_intra_size : target_pct; |
485 } | 485 } |
486 | 486 |
487 int VP9EncoderImpl::Encode(const VideoFrame& input_image, | 487 int VP9EncoderImpl::Encode(const VideoFrame& input_image, |
488 const CodecSpecificInfo* codec_specific_info, | 488 const CodecSpecificInfo* codec_specific_info, |
489 const std::vector<FrameType>* frame_types) { | 489 const std::vector<FrameType>* frame_types) { |
490 if (!inited_) { | 490 if (!inited_) { |
491 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 491 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
492 } | 492 } |
493 if (encoded_complete_callback_ == NULL) { | 493 if (encoded_complete_callback_ == nullptr) { |
494 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 494 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
495 } | 495 } |
496 FrameType frame_type = kVideoFrameDelta; | 496 FrameType frame_type = kVideoFrameDelta; |
497 // We only support one stream at the moment. | 497 // We only support one stream at the moment. |
498 if (frame_types && frame_types->size() > 0) { | 498 if (frame_types && frame_types->size() > 0) { |
499 frame_type = (*frame_types)[0]; | 499 frame_type = (*frame_types)[0]; |
500 } | 500 } |
501 RTC_DCHECK_EQ(input_image.width(), raw_->d_w); | 501 RTC_DCHECK_EQ(input_image.width(), raw_->d_w); |
502 RTC_DCHECK_EQ(input_image.height(), raw_->d_h); | 502 RTC_DCHECK_EQ(input_image.height(), raw_->d_h); |
503 | 503 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
540 settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(), | 540 settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(), |
541 send_keyframe); | 541 send_keyframe); |
542 } | 542 } |
543 enc_layer_conf = GenerateRefsAndFlags(settings); | 543 enc_layer_conf = GenerateRefsAndFlags(settings); |
544 layer_id.temporal_layer_id = 0; | 544 layer_id.temporal_layer_id = 0; |
545 layer_id.spatial_layer_id = settings.start_layer; | 545 layer_id.spatial_layer_id = settings.start_layer; |
546 vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id); | 546 vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id); |
547 vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf); | 547 vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf); |
548 } | 548 } |
549 | 549 |
550 assert(codec_.maxFramerate > 0); | 550 RTC_CHECK_GT(codec_.maxFramerate, 0); |
551 uint32_t duration = 90000 / codec_.maxFramerate; | 551 uint32_t duration = 90000 / codec_.maxFramerate; |
552 if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, | 552 if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, |
553 VPX_DL_REALTIME)) { | 553 VPX_DL_REALTIME)) { |
554 return WEBRTC_VIDEO_CODEC_ERROR; | 554 return WEBRTC_VIDEO_CODEC_ERROR; |
555 } | 555 } |
556 timestamp_ += duration; | 556 timestamp_ += duration; |
557 | 557 |
558 return WEBRTC_VIDEO_CODEC_OK; | 558 return WEBRTC_VIDEO_CODEC_OK; |
559 } | 559 } |
560 | 560 |
561 void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, | 561 void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, |
562 const vpx_codec_cx_pkt& pkt, | 562 const vpx_codec_cx_pkt& pkt, |
563 uint32_t timestamp) { | 563 uint32_t timestamp) { |
564 assert(codec_specific != NULL); | 564 RTC_CHECK(codec_specific != nullptr); |
565 codec_specific->codecType = kVideoCodecVP9; | 565 codec_specific->codecType = kVideoCodecVP9; |
566 codec_specific->codec_name = ImplementationName(); | 566 codec_specific->codec_name = ImplementationName(); |
567 CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9); | 567 CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9); |
568 // TODO(asapersson): Set correct value. | 568 // TODO(asapersson): Set correct value. |
569 vp9_info->inter_pic_predicted = | 569 vp9_info->inter_pic_predicted = |
570 (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true; | 570 (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true; |
571 vp9_info->flexible_mode = codec_.VP9()->flexibleMode; | 571 vp9_info->flexible_mode = codec_.VP9()->flexibleMode; |
572 vp9_info->ss_data_available = | 572 vp9_info->ss_data_available = |
573 ((pkt.data.frame.flags & VPX_FRAME_IS_KEY) && !codec_.VP9()->flexibleMode) | 573 ((pkt.data.frame.flags & VPX_FRAME_IS_KEY) && !codec_.VP9()->flexibleMode) |
574 ? true | 574 ? true |
575 : false; | 575 : false; |
576 | 576 |
577 vpx_svc_layer_id_t layer_id = {0}; | 577 vpx_svc_layer_id_t layer_id = {0}; |
578 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); | 578 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); |
579 | 579 |
580 assert(num_temporal_layers_ > 0); | 580 RTC_CHECK_GT(num_temporal_layers_, 0); |
581 assert(num_spatial_layers_ > 0); | 581 RTC_CHECK_GT(num_spatial_layers_, 0); |
582 if (num_temporal_layers_ == 1) { | 582 if (num_temporal_layers_ == 1) { |
583 assert(layer_id.temporal_layer_id == 0); | 583 RTC_CHECK_EQ(layer_id.temporal_layer_id, 0); |
584 vp9_info->temporal_idx = kNoTemporalIdx; | 584 vp9_info->temporal_idx = kNoTemporalIdx; |
585 } else { | 585 } else { |
586 vp9_info->temporal_idx = layer_id.temporal_layer_id; | 586 vp9_info->temporal_idx = layer_id.temporal_layer_id; |
587 } | 587 } |
588 if (num_spatial_layers_ == 1) { | 588 if (num_spatial_layers_ == 1) { |
589 assert(layer_id.spatial_layer_id == 0); | 589 RTC_CHECK_EQ(layer_id.spatial_layer_id, 0); |
590 vp9_info->spatial_idx = kNoSpatialIdx; | 590 vp9_info->spatial_idx = kNoSpatialIdx; |
591 } else { | 591 } else { |
592 vp9_info->spatial_idx = layer_id.spatial_layer_id; | 592 vp9_info->spatial_idx = layer_id.spatial_layer_id; |
593 } | 593 } |
594 if (layer_id.spatial_layer_id != 0) { | 594 if (layer_id.spatial_layer_id != 0) { |
595 vp9_info->ss_data_available = false; | 595 vp9_info->ss_data_available = false; |
596 } | 596 } |
597 | 597 |
598 // TODO(asapersson): this info has to be obtained from the encoder. | 598 // TODO(asapersson): this info has to be obtained from the encoder. |
599 vp9_info->temporal_up_switch = false; | 599 vp9_info->temporal_up_switch = false; |
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
839 | 839 |
840 bool VP9Decoder::IsSupported() { | 840 bool VP9Decoder::IsSupported() { |
841 return true; | 841 return true; |
842 } | 842 } |
843 | 843 |
844 VP9Decoder* VP9Decoder::Create() { | 844 VP9Decoder* VP9Decoder::Create() { |
845 return new VP9DecoderImpl(); | 845 return new VP9DecoderImpl(); |
846 } | 846 } |
847 | 847 |
848 VP9DecoderImpl::VP9DecoderImpl() | 848 VP9DecoderImpl::VP9DecoderImpl() |
849 : decode_complete_callback_(NULL), | 849 : decode_complete_callback_(nullptr), |
850 inited_(false), | 850 inited_(false), |
851 decoder_(NULL), | 851 decoder_(nullptr), |
852 key_frame_required_(true) { | 852 key_frame_required_(true) { |
853 memset(&codec_, 0, sizeof(codec_)); | 853 memset(&codec_, 0, sizeof(codec_)); |
854 } | 854 } |
855 | 855 |
856 VP9DecoderImpl::~VP9DecoderImpl() { | 856 VP9DecoderImpl::~VP9DecoderImpl() { |
857 inited_ = true; // in order to do the actual release | 857 inited_ = true; // in order to do the actual release |
858 Release(); | 858 Release(); |
859 int num_buffers_in_use = frame_buffer_pool_.GetNumBuffersInUse(); | 859 int num_buffers_in_use = frame_buffer_pool_.GetNumBuffersInUse(); |
860 if (num_buffers_in_use > 0) { | 860 if (num_buffers_in_use > 0) { |
861 // The frame buffers are reference counted and frames are exposed after | 861 // The frame buffers are reference counted and frames are exposed after |
862 // decoding. There may be valid usage cases where previous frames are still | 862 // decoding. There may be valid usage cases where previous frames are still |
863 // referenced after ~VP9DecoderImpl that is not a leak. | 863 // referenced after ~VP9DecoderImpl that is not a leak. |
864 LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still " | 864 LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still " |
865 << "referenced during ~VP9DecoderImpl."; | 865 << "referenced during ~VP9DecoderImpl."; |
866 } | 866 } |
867 } | 867 } |
868 | 868 |
869 int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { | 869 int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { |
870 if (inst == NULL) { | 870 if (inst == nullptr) { |
871 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 871 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
872 } | 872 } |
873 int ret_val = Release(); | 873 int ret_val = Release(); |
874 if (ret_val < 0) { | 874 if (ret_val < 0) { |
875 return ret_val; | 875 return ret_val; |
876 } | 876 } |
877 if (decoder_ == NULL) { | 877 if (decoder_ == nullptr) { |
878 decoder_ = new vpx_codec_ctx_t; | 878 decoder_ = new vpx_codec_ctx_t; |
879 } | 879 } |
880 vpx_codec_dec_cfg_t cfg; | 880 vpx_codec_dec_cfg_t cfg; |
881 // Setting number of threads to a constant value (1) | 881 // Setting number of threads to a constant value (1) |
882 cfg.threads = 1; | 882 cfg.threads = 1; |
883 cfg.h = cfg.w = 0; // set after decode | 883 cfg.h = cfg.w = 0; // set after decode |
884 vpx_codec_flags_t flags = 0; | 884 vpx_codec_flags_t flags = 0; |
885 if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) { | 885 if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) { |
886 return WEBRTC_VIDEO_CODEC_MEMORY; | 886 return WEBRTC_VIDEO_CODEC_MEMORY; |
887 } | 887 } |
(...skipping 13 matching lines...) Expand all Loading... |
901 } | 901 } |
902 | 902 |
903 int VP9DecoderImpl::Decode(const EncodedImage& input_image, | 903 int VP9DecoderImpl::Decode(const EncodedImage& input_image, |
904 bool missing_frames, | 904 bool missing_frames, |
905 const RTPFragmentationHeader* fragmentation, | 905 const RTPFragmentationHeader* fragmentation, |
906 const CodecSpecificInfo* codec_specific_info, | 906 const CodecSpecificInfo* codec_specific_info, |
907 int64_t /*render_time_ms*/) { | 907 int64_t /*render_time_ms*/) { |
908 if (!inited_) { | 908 if (!inited_) { |
909 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 909 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
910 } | 910 } |
911 if (decode_complete_callback_ == NULL) { | 911 if (decode_complete_callback_ == nullptr) { |
912 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 912 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
913 } | 913 } |
914 // Always start with a complete key frame. | 914 // Always start with a complete key frame. |
915 if (key_frame_required_) { | 915 if (key_frame_required_) { |
916 if (input_image._frameType != kVideoFrameKey) | 916 if (input_image._frameType != kVideoFrameKey) |
917 return WEBRTC_VIDEO_CODEC_ERROR; | 917 return WEBRTC_VIDEO_CODEC_ERROR; |
918 // We have a key frame - is it complete? | 918 // We have a key frame - is it complete? |
919 if (input_image._completeFrame) { | 919 if (input_image._completeFrame) { |
920 key_frame_required_ = false; | 920 key_frame_required_ = false; |
921 } else { | 921 } else { |
922 return WEBRTC_VIDEO_CODEC_ERROR; | 922 return WEBRTC_VIDEO_CODEC_ERROR; |
923 } | 923 } |
924 } | 924 } |
925 vpx_codec_iter_t iter = NULL; | 925 vpx_codec_iter_t iter = nullptr; |
926 vpx_image_t* img; | 926 vpx_image_t* img; |
927 uint8_t* buffer = input_image._buffer; | 927 uint8_t* buffer = input_image._buffer; |
928 if (input_image._length == 0) { | 928 if (input_image._length == 0) { |
929 buffer = NULL; // Triggers full frame concealment. | 929 buffer = nullptr; // Triggers full frame concealment. |
930 } | 930 } |
931 // During decode libvpx may get and release buffers from |frame_buffer_pool_|. | 931 // During decode libvpx may get and release buffers from |frame_buffer_pool_|. |
932 // In practice libvpx keeps a few (~3-4) buffers alive at a time. | 932 // In practice libvpx keeps a few (~3-4) buffers alive at a time. |
933 if (vpx_codec_decode(decoder_, buffer, | 933 if (vpx_codec_decode(decoder_, buffer, |
934 static_cast<unsigned int>(input_image._length), 0, | 934 static_cast<unsigned int>(input_image._length), 0, |
935 VPX_DL_REALTIME)) { | 935 VPX_DL_REALTIME)) { |
936 return WEBRTC_VIDEO_CODEC_ERROR; | 936 return WEBRTC_VIDEO_CODEC_ERROR; |
937 } | 937 } |
938 // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer. | 938 // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer. |
939 // It may be released by libvpx during future vpx_codec_decode or | 939 // It may be released by libvpx during future vpx_codec_decode or |
940 // vpx_codec_destroy calls. | 940 // vpx_codec_destroy calls. |
941 img = vpx_codec_get_frame(decoder_, &iter); | 941 img = vpx_codec_get_frame(decoder_, &iter); |
942 int qp; | 942 int qp; |
943 vpx_codec_err_t vpx_ret = | 943 vpx_codec_err_t vpx_ret = |
944 vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp); | 944 vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp); |
945 RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK); | 945 RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK); |
946 int ret = | 946 int ret = |
947 ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp); | 947 ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp); |
948 if (ret != 0) { | 948 if (ret != 0) { |
949 return ret; | 949 return ret; |
950 } | 950 } |
951 return WEBRTC_VIDEO_CODEC_OK; | 951 return WEBRTC_VIDEO_CODEC_OK; |
952 } | 952 } |
953 | 953 |
954 int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, | 954 int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, |
955 uint32_t timestamp, | 955 uint32_t timestamp, |
956 int64_t ntp_time_ms, | 956 int64_t ntp_time_ms, |
957 int qp) { | 957 int qp) { |
958 if (img == NULL) { | 958 if (img == nullptr) { |
959 // Decoder OK and NULL image => No show frame. | 959 // Decoder OK and nullptr image => No show frame. |
960 return WEBRTC_VIDEO_CODEC_NO_OUTPUT; | 960 return WEBRTC_VIDEO_CODEC_NO_OUTPUT; |
961 } | 961 } |
962 | 962 |
963 // This buffer contains all of |img|'s image data, a reference counted | 963 // This buffer contains all of |img|'s image data, a reference counted |
964 // Vp9FrameBuffer. (libvpx is done with the buffers after a few | 964 // Vp9FrameBuffer. (libvpx is done with the buffers after a few |
965 // vpx_codec_decode calls or vpx_codec_destroy). | 965 // vpx_codec_decode calls or vpx_codec_destroy). |
966 Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer = | 966 Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer = |
967 static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv); | 967 static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv); |
968 // The buffer can be used directly by the VideoFrame (without copy) by | 968 // The buffer can be used directly by the VideoFrame (without copy) by |
969 // using a WrappedI420Buffer. | 969 // using a WrappedI420Buffer. |
(...skipping 17 matching lines...) Expand all Loading... |
987 return WEBRTC_VIDEO_CODEC_OK; | 987 return WEBRTC_VIDEO_CODEC_OK; |
988 } | 988 } |
989 | 989 |
990 int VP9DecoderImpl::RegisterDecodeCompleteCallback( | 990 int VP9DecoderImpl::RegisterDecodeCompleteCallback( |
991 DecodedImageCallback* callback) { | 991 DecodedImageCallback* callback) { |
992 decode_complete_callback_ = callback; | 992 decode_complete_callback_ = callback; |
993 return WEBRTC_VIDEO_CODEC_OK; | 993 return WEBRTC_VIDEO_CODEC_OK; |
994 } | 994 } |
995 | 995 |
996 int VP9DecoderImpl::Release() { | 996 int VP9DecoderImpl::Release() { |
997 if (decoder_ != NULL) { | 997 if (decoder_ != nullptr) { |
998 // When a codec is destroyed libvpx will release any buffers of | 998 // When a codec is destroyed libvpx will release any buffers of |
999 // |frame_buffer_pool_| it is currently using. | 999 // |frame_buffer_pool_| it is currently using. |
1000 if (vpx_codec_destroy(decoder_)) { | 1000 if (vpx_codec_destroy(decoder_)) { |
1001 return WEBRTC_VIDEO_CODEC_MEMORY; | 1001 return WEBRTC_VIDEO_CODEC_MEMORY; |
1002 } | 1002 } |
1003 delete decoder_; | 1003 delete decoder_; |
1004 decoder_ = NULL; | 1004 decoder_ = nullptr; |
1005 } | 1005 } |
1006 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers | 1006 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers |
1007 // still referenced externally are deleted once fully released, not returning | 1007 // still referenced externally are deleted once fully released, not returning |
1008 // to the pool. | 1008 // to the pool. |
1009 frame_buffer_pool_.ClearPool(); | 1009 frame_buffer_pool_.ClearPool(); |
1010 inited_ = false; | 1010 inited_ = false; |
1011 return WEBRTC_VIDEO_CODEC_OK; | 1011 return WEBRTC_VIDEO_CODEC_OK; |
1012 } | 1012 } |
1013 | 1013 |
1014 const char* VP9DecoderImpl::ImplementationName() const { | 1014 const char* VP9DecoderImpl::ImplementationName() const { |
1015 return "libvpx"; | 1015 return "libvpx"; |
1016 } | 1016 } |
1017 | 1017 |
1018 } // namespace webrtc | 1018 } // namespace webrtc |
OLD | NEW |