| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 711 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 722 int VP8EncoderImpl::Encode(const VideoFrame& frame, | 722 int VP8EncoderImpl::Encode(const VideoFrame& frame, |
| 723 const CodecSpecificInfo* codec_specific_info, | 723 const CodecSpecificInfo* codec_specific_info, |
| 724 const std::vector<FrameType>* frame_types) { | 724 const std::vector<FrameType>* frame_types) { |
| 725 if (!inited_) | 725 if (!inited_) |
| 726 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 726 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 727 if (frame.IsZeroSize()) | 727 if (frame.IsZeroSize()) |
| 728 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 728 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 729 if (encoded_complete_callback_ == NULL) | 729 if (encoded_complete_callback_ == NULL) |
| 730 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 730 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 731 | 731 |
| 732 if (quality_scaler_enabled_) | 732 rtc::scoped_refptr<VideoFrameBuffer> input_image = frame.video_frame_buffer(); |
| 733 quality_scaler_.OnEncodeFrame(frame); | |
| 734 const VideoFrame& input_image = | |
| 735 quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame; | |
| 736 | 733 |
| 737 if (quality_scaler_enabled_ && (input_image.width() != codec_.width || | 734 if (quality_scaler_enabled_) { |
| 738 input_image.height() != codec_.height)) { | 735 quality_scaler_.OnEncodeFrame(frame.width(), frame.height()); |
| 739 int ret = UpdateCodecFrameSize(input_image); | 736 input_image = quality_scaler_.GetScaledBuffer(input_image); |
| 740 if (ret < 0) | 737 |
| 741 return ret; | 738 if (input_image->width() != codec_.width || |
| 739 input_image->height() != codec_.height) { |
| 740 int ret = |
| 741 UpdateCodecFrameSize(input_image->width(), input_image->height()); |
| 742 if (ret < 0) |
| 743 return ret; |
| 744 } |
| 742 } | 745 } |
| 743 | 746 |
| 744 // Since we are extracting raw pointers from |input_image| to | 747 // Since we are extracting raw pointers from |input_image| to |
| 745 // |raw_images_[0]|, the resolution of these frames must match. Note that | 748 // |raw_images_[0]|, the resolution of these frames must match. Note that |
| 746 // |input_image| might be scaled from |frame|. In that case, the resolution of | 749 // |input_image| might be scaled from |frame|. In that case, the resolution of |
| 747 // |raw_images_[0]| should have been updated in UpdateCodecFrameSize. | 750 // |raw_images_[0]| should have been updated in UpdateCodecFrameSize. |
| 748 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); | 751 RTC_DCHECK_EQ(input_image->width(), static_cast<int>(raw_images_[0].d_w)); |
| 749 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); | 752 RTC_DCHECK_EQ(input_image->height(), static_cast<int>(raw_images_[0].d_h)); |
| 750 | 753 |
| 751 // Image in vpx_image_t format. | 754 // Image in vpx_image_t format. |
| 752 // Input image is const. VP8's raw image is not defined as const. | 755 // Input image is const. VP8's raw image is not defined as const. |
| 753 raw_images_[0].planes[VPX_PLANE_Y] = | 756 raw_images_[0].planes[VPX_PLANE_Y] = |
| 754 const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY()); | 757 const_cast<uint8_t*>(input_image->DataY()); |
| 755 raw_images_[0].planes[VPX_PLANE_U] = | 758 raw_images_[0].planes[VPX_PLANE_U] = |
| 756 const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU()); | 759 const_cast<uint8_t*>(input_image->DataU()); |
| 757 raw_images_[0].planes[VPX_PLANE_V] = | 760 raw_images_[0].planes[VPX_PLANE_V] = |
| 758 const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV()); | 761 const_cast<uint8_t*>(input_image->DataV()); |
| 759 | 762 |
| 760 raw_images_[0].stride[VPX_PLANE_Y] = | 763 raw_images_[0].stride[VPX_PLANE_Y] = input_image->StrideY(); |
| 761 input_image.video_frame_buffer()->StrideY(); | 764 raw_images_[0].stride[VPX_PLANE_U] = input_image->StrideU(); |
| 762 raw_images_[0].stride[VPX_PLANE_U] = | 765 raw_images_[0].stride[VPX_PLANE_V] = input_image->StrideV(); |
| 763 input_image.video_frame_buffer()->StrideU(); | |
| 764 raw_images_[0].stride[VPX_PLANE_V] = | |
| 765 input_image.video_frame_buffer()->StrideV(); | |
| 766 | 766 |
| 767 for (size_t i = 1; i < encoders_.size(); ++i) { | 767 for (size_t i = 1; i < encoders_.size(); ++i) { |
| 768 // Scale the image down a number of times by downsampling factor | 768 // Scale the image down a number of times by downsampling factor |
| 769 libyuv::I420Scale( | 769 libyuv::I420Scale( |
| 770 raw_images_[i - 1].planes[VPX_PLANE_Y], | 770 raw_images_[i - 1].planes[VPX_PLANE_Y], |
| 771 raw_images_[i - 1].stride[VPX_PLANE_Y], | 771 raw_images_[i - 1].stride[VPX_PLANE_Y], |
| 772 raw_images_[i - 1].planes[VPX_PLANE_U], | 772 raw_images_[i - 1].planes[VPX_PLANE_U], |
| 773 raw_images_[i - 1].stride[VPX_PLANE_U], | 773 raw_images_[i - 1].stride[VPX_PLANE_U], |
| 774 raw_images_[i - 1].planes[VPX_PLANE_V], | 774 raw_images_[i - 1].planes[VPX_PLANE_V], |
| 775 raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w, | 775 raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w, |
| 776 raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y], | 776 raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y], |
| 777 raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U], | 777 raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U], |
| 778 raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V], | 778 raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V], |
| 779 raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w, | 779 raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w, |
| 780 raw_images_[i].d_h, libyuv::kFilterBilinear); | 780 raw_images_[i].d_h, libyuv::kFilterBilinear); |
| 781 } | 781 } |
| 782 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; | 782 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; |
| 783 for (size_t i = 0; i < encoders_.size(); ++i) { | 783 for (size_t i = 0; i < encoders_.size(); ++i) { |
| 784 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp()); | 784 int ret = temporal_layers_[i]->EncodeFlags(frame.timestamp()); |
| 785 if (ret < 0) { | 785 if (ret < 0) { |
| 786 // Drop this frame. | 786 // Drop this frame. |
| 787 return WEBRTC_VIDEO_CODEC_OK; | 787 return WEBRTC_VIDEO_CODEC_OK; |
| 788 } | 788 } |
| 789 flags[i] = ret; | 789 flags[i] = ret; |
| 790 } | 790 } |
| 791 bool send_key_frame = false; | 791 bool send_key_frame = false; |
| 792 for (size_t i = 0; i < key_frame_request_.size() && i < send_stream_.size(); | 792 for (size_t i = 0; i < key_frame_request_.size() && i < send_stream_.size(); |
| 793 ++i) { | 793 ++i) { |
| 794 if (key_frame_request_[i] && send_stream_[i]) { | 794 if (key_frame_request_[i] && send_stream_[i]) { |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 826 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); | 826 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); |
| 827 } else if (codec_specific_info && | 827 } else if (codec_specific_info && |
| 828 codec_specific_info->codecType == kVideoCodecVP8) { | 828 codec_specific_info->codecType == kVideoCodecVP8) { |
| 829 if (feedback_mode_) { | 829 if (feedback_mode_) { |
| 830 // Handle RPSI and SLI messages and set up the appropriate encode flags. | 830 // Handle RPSI and SLI messages and set up the appropriate encode flags. |
| 831 bool sendRefresh = false; | 831 bool sendRefresh = false; |
| 832 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { | 832 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { |
| 833 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); | 833 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); |
| 834 } | 834 } |
| 835 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { | 835 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { |
| 836 sendRefresh = rps_.ReceivedSLI(input_image.timestamp()); | 836 sendRefresh = rps_.ReceivedSLI(frame.timestamp()); |
| 837 } | 837 } |
| 838 for (size_t i = 0; i < encoders_.size(); ++i) { | 838 for (size_t i = 0; i < encoders_.size(); ++i) { |
| 839 flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, | 839 flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, |
| 840 input_image.timestamp()); | 840 frame.timestamp()); |
| 841 } | 841 } |
| 842 } else { | 842 } else { |
| 843 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { | 843 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { |
| 844 // Is this our last key frame? If not ignore. | 844 // Is this our last key frame? If not ignore. |
| 845 // |picture_id_| is defined per spatial stream/layer, so check that | 845 // |picture_id_| is defined per spatial stream/layer, so check that |
| 846 // |RPSI| matches the last key frame from any of the spatial streams. | 846 // |RPSI| matches the last key frame from any of the spatial streams. |
| 847 // If so, then all spatial streams for this encoding will predict from | 847 // If so, then all spatial streams for this encoding will predict from |
| 848 // its long-term reference (last key frame). | 848 // its long-term reference (last key frame). |
| 849 int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI; | 849 int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI; |
| 850 for (size_t i = 0; i < encoders_.size(); ++i) { | 850 for (size_t i = 0; i < encoders_.size(); ++i) { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 898 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, | 898 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, |
| 899 duration, 0, VPX_DL_REALTIME); | 899 duration, 0, VPX_DL_REALTIME); |
| 900 // Reset specific intra frame thresholds, following the key frame. | 900 // Reset specific intra frame thresholds, following the key frame. |
| 901 if (send_key_frame) { | 901 if (send_key_frame) { |
| 902 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 902 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, |
| 903 rc_max_intra_target_); | 903 rc_max_intra_target_); |
| 904 } | 904 } |
| 905 if (error) | 905 if (error) |
| 906 return WEBRTC_VIDEO_CODEC_ERROR; | 906 return WEBRTC_VIDEO_CODEC_ERROR; |
| 907 timestamp_ += duration; | 907 timestamp_ += duration; |
| 908 return GetEncodedPartitions(input_image, only_predict_from_key_frame); | 908 // Examines frame timestamps only. |
| 909 return GetEncodedPartitions(frame, only_predict_from_key_frame); |
| 909 } | 910 } |
| 910 | 911 |
| 911 // TODO(pbos): Make sure this works for properly for >1 encoders. | 912 // TODO(pbos): Make sure this works for properly for >1 encoders. |
| 912 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { | 913 int VP8EncoderImpl::UpdateCodecFrameSize(int width, int height) { |
| 913 codec_.width = input_image.width(); | 914 codec_.width = width; |
| 914 codec_.height = input_image.height(); | 915 codec_.height = height; |
| 915 if (codec_.numberOfSimulcastStreams <= 1) { | 916 if (codec_.numberOfSimulcastStreams <= 1) { |
| 916 // For now scaling is only used for single-layer streams. | 917 // For now scaling is only used for single-layer streams. |
| 917 codec_.simulcastStream[0].width = input_image.width(); | 918 codec_.simulcastStream[0].width = width; |
| 918 codec_.simulcastStream[0].height = input_image.height(); | 919 codec_.simulcastStream[0].height = height; |
| 919 } | 920 } |
| 920 // Update the cpu_speed setting for resolution change. | 921 // Update the cpu_speed setting for resolution change. |
| 921 vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED, | 922 vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED, |
| 922 SetCpuSpeed(codec_.width, codec_.height)); | 923 SetCpuSpeed(codec_.width, codec_.height)); |
| 923 raw_images_[0].w = codec_.width; | 924 raw_images_[0].w = codec_.width; |
| 924 raw_images_[0].h = codec_.height; | 925 raw_images_[0].h = codec_.height; |
| 925 raw_images_[0].d_w = codec_.width; | 926 raw_images_[0].d_w = codec_.width; |
| 926 raw_images_[0].d_h = codec_.height; | 927 raw_images_[0].d_h = codec_.height; |
| 927 vpx_img_set_rect(&raw_images_[0], 0, 0, codec_.width, codec_.height); | 928 vpx_img_set_rect(&raw_images_[0], 0, 0, codec_.width, codec_.height); |
| 928 | 929 |
| (...skipping 487 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1416 return -1; | 1417 return -1; |
| 1417 } | 1418 } |
| 1418 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != | 1419 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != |
| 1419 VPX_CODEC_OK) { | 1420 VPX_CODEC_OK) { |
| 1420 return -1; | 1421 return -1; |
| 1421 } | 1422 } |
| 1422 return 0; | 1423 return 0; |
| 1423 } | 1424 } |
| 1424 | 1425 |
| 1425 } // namespace webrtc | 1426 } // namespace webrtc |
| OLD | NEW |