OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 * | 9 * |
10 */ | 10 */ |
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
238 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 238 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
239 } | 239 } |
240 if (inst->codecSpecific.VP9.numberOfTemporalLayers > 3) { | 240 if (inst->codecSpecific.VP9.numberOfTemporalLayers > 3) { |
241 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 241 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
242 } | 242 } |
243 // libvpx currently supports only one or two spatial layers. | 243 // libvpx currently supports only one or two spatial layers. |
244 if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) { | 244 if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) { |
245 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 245 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
246 } | 246 } |
247 | 247 |
248 int retVal = Release(); | 248 int ret_val = Release(); |
249 if (retVal < 0) { | 249 if (ret_val < 0) { |
250 return retVal; | 250 return ret_val; |
251 } | 251 } |
252 if (encoder_ == NULL) { | 252 if (encoder_ == NULL) { |
253 encoder_ = new vpx_codec_ctx_t; | 253 encoder_ = new vpx_codec_ctx_t; |
254 } | 254 } |
255 if (config_ == NULL) { | 255 if (config_ == NULL) { |
256 config_ = new vpx_codec_enc_cfg_t; | 256 config_ = new vpx_codec_enc_cfg_t; |
257 } | 257 } |
258 timestamp_ = 0; | 258 timestamp_ = 0; |
259 if (&codec_ != inst) { | 259 if (&codec_ != inst) { |
260 codec_ = *inst; | 260 codec_ = *inst; |
(...skipping 382 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
643 svc_internal_.svc_params.scaling_factor_num[i] / | 643 svc_internal_.svc_params.scaling_factor_num[i] / |
644 svc_internal_.svc_params.scaling_factor_den[i]; | 644 svc_internal_.svc_params.scaling_factor_den[i]; |
645 } | 645 } |
646 if (!vp9_info->flexible_mode) { | 646 if (!vp9_info->flexible_mode) { |
647 vp9_info->gof.CopyGofInfoVP9(gof_); | 647 vp9_info->gof.CopyGofInfoVP9(gof_); |
648 } | 648 } |
649 } | 649 } |
650 } | 650 } |
651 | 651 |
652 int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { | 652 int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { |
653 encoded_image_._length = 0; | 653 RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT); |
654 encoded_image_._frameType = kVideoFrameDelta; | |
655 RTPFragmentationHeader frag_info; | |
656 // Note: no data partitioning in VP9, so 1 partition only. We keep this | |
657 // fragmentation data for now, until VP9 packetizer is implemented. | |
658 frag_info.VerifyAndAllocateFragmentationHeader(1); | |
659 int part_idx = 0; | |
660 CodecSpecificInfo codec_specific; | |
661 | 654 |
662 if (pkt->data.frame.sz > encoded_image_._size) { | 655 if (pkt->data.frame.sz > encoded_image_._size) { |
663 delete[] encoded_image_._buffer; | 656 delete[] encoded_image_._buffer; |
664 encoded_image_._size = pkt->data.frame.sz; | 657 encoded_image_._size = pkt->data.frame.sz; |
665 encoded_image_._buffer = new uint8_t[encoded_image_._size]; | 658 encoded_image_._buffer = new uint8_t[encoded_image_._size]; |
666 } | 659 } |
660 memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz); | |
661 encoded_image_._length = pkt->data.frame.sz; | |
667 | 662 |
668 assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT); | 663 // Note: no data partitioning in VP9, so 1 partition only. We keep this |
669 memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf, | 664 // fragmentation data for now, until VP9 packetizer is implemented. |
stefan-webrtc
2016/01/25 13:57:38
It is already implemented, right? Is this comment
åsapersson
2016/01/26 07:55:32
Updated comment.
| |
670 pkt->data.frame.sz); | 665 int part_idx = 0; |
671 frag_info.fragmentationOffset[part_idx] = encoded_image_._length; | 666 RTPFragmentationHeader frag_info; |
672 frag_info.fragmentationLength[part_idx] = | 667 frag_info.VerifyAndAllocateFragmentationHeader(1); |
673 static_cast<uint32_t>(pkt->data.frame.sz); | 668 frag_info.fragmentationOffset[part_idx] = 0; |
669 frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz; | |
674 frag_info.fragmentationPlType[part_idx] = 0; | 670 frag_info.fragmentationPlType[part_idx] = 0; |
675 frag_info.fragmentationTimeDiff[part_idx] = 0; | 671 frag_info.fragmentationTimeDiff[part_idx] = 0; |
676 encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz); | |
677 | 672 |
678 vpx_svc_layer_id_t layer_id = {0}; | 673 vpx_svc_layer_id_t layer_id = {0}; |
679 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); | 674 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); |
680 if (is_flexible_mode_ && codec_.mode == kScreensharing) | 675 if (is_flexible_mode_ && codec_.mode == kScreensharing) |
681 spatial_layer_->LayerFrameEncoded( | 676 spatial_layer_->LayerFrameEncoded( |
682 static_cast<unsigned int>(encoded_image_._length), | 677 static_cast<unsigned int>(encoded_image_._length), |
683 layer_id.spatial_layer_id); | 678 layer_id.spatial_layer_id); |
684 | 679 |
685 assert(encoded_image_._length <= encoded_image_._size); | |
686 | |
687 // End of frame. | 680 // End of frame. |
688 // Check if encoded frame is a key frame. | 681 // Check if encoded frame is a key frame. |
682 encoded_image_._frameType = kVideoFrameDelta; | |
689 if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { | 683 if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { |
690 encoded_image_._frameType = kVideoFrameKey; | 684 encoded_image_._frameType = kVideoFrameKey; |
691 } | 685 } |
686 RTC_DCHECK_LE(encoded_image_._length, encoded_image_._size); | |
687 | |
688 CodecSpecificInfo codec_specific; | |
692 PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); | 689 PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); |
693 | 690 |
694 if (encoded_image_._length > 0) { | 691 if (encoded_image_._length > 0) { |
695 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); | 692 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); |
696 encoded_image_._timeStamp = input_image_->timestamp(); | 693 encoded_image_._timeStamp = input_image_->timestamp(); |
697 encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); | 694 encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); |
698 encoded_image_._encodedHeight = raw_->d_h; | 695 encoded_image_._encodedHeight = raw_->d_h; |
699 encoded_image_._encodedWidth = raw_->d_w; | 696 encoded_image_._encodedWidth = raw_->d_w; |
700 encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, | 697 encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, |
701 &frag_info); | 698 &frag_info); |
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
987 frame_buffer_pool_.ClearPool(); | 984 frame_buffer_pool_.ClearPool(); |
988 inited_ = false; | 985 inited_ = false; |
989 return WEBRTC_VIDEO_CODEC_OK; | 986 return WEBRTC_VIDEO_CODEC_OK; |
990 } | 987 } |
991 | 988 |
992 const char* VP9DecoderImpl::ImplementationName() const { | 989 const char* VP9DecoderImpl::ImplementationName() const { |
993 return "libvpx"; | 990 return "libvpx"; |
994 } | 991 } |
995 | 992 |
996 } // namespace webrtc | 993 } // namespace webrtc |
OLD | NEW |