Index: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
index 3409de3f65e407aea3dd1d477b4dc2117173376d..a1e56b5c4d1f54856bb830566bb637c333199d7f 100644 |
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
@@ -41,6 +41,7 @@ namespace { |
const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Arm"; |
const char kVp8GfBoostFieldTrial[] = "WebRTC-VP8-GfBoost"; |
+const int kTokenPartitions = VP8_ONE_TOKENPARTITION; |
enum { kVp8ErrorPropagationTh = 30 }; |
enum { kVp832ByteAlign = 32 }; |
@@ -131,18 +132,14 @@ VP8Decoder* VP8Decoder::Create() { |
} |
VP8EncoderImpl::VP8EncoderImpl() |
- : encoded_complete_callback_(nullptr), |
+ : use_gf_boost_(webrtc::field_trial::IsEnabled(kVp8GfBoostFieldTrial)), |
+ encoded_complete_callback_(nullptr), |
inited_(false), |
timestamp_(0), |
- feedback_mode_(false), |
qp_max_(56), // Setting for max quantizer. |
cpu_speed_default_(-6), |
number_of_cores_(0), |
rc_max_intra_target_(0), |
- token_partitions_(VP8_ONE_TOKENPARTITION), |
- down_scale_requested_(false), |
- down_scale_bitrate_(0), |
- use_gf_boost_(webrtc::field_trial::IsEnabled(kVp8GfBoostFieldTrial)), |
key_frame_request_(kMaxSimulcastStreams, false) { |
uint32_t seed = rtc::Time32(); |
srand(seed); |
@@ -153,7 +150,7 @@ VP8EncoderImpl::VP8EncoderImpl() |
raw_images_.reserve(kMaxSimulcastStreams); |
encoded_images_.reserve(kMaxSimulcastStreams); |
send_stream_.reserve(kMaxSimulcastStreams); |
- cpu_speed_.assign(kMaxSimulcastStreams, -6); // Set default to -6. |
+ cpu_speed_.assign(kMaxSimulcastStreams, cpu_speed_default_); |
encoders_.reserve(kMaxSimulcastStreams); |
configurations_.reserve(kMaxSimulcastStreams); |
downsampling_factors_.reserve(kMaxSimulcastStreams); |
@@ -221,28 +218,7 @@ int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate, |
codec_.maxFramerate = new_framerate; |
- if (encoders_.size() == 1) { |
- // 1:1. |
- // Calculate a rough limit for when to trigger a potental down scale. |
- uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; |
- // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work |
- // around the current limitations. |
- // Only trigger keyframes if we are allowed to scale down. |
- if (configurations_[0].rc_resize_allowed) { |
- if (!down_scale_requested_) { |
- if (k_pixels_per_frame > bitrate.get_sum_kbps()) { |
- down_scale_requested_ = true; |
- down_scale_bitrate_ = bitrate.get_sum_kbps(); |
- key_frame_request_[0] = true; |
- } |
- } else { |
- if (bitrate.get_sum_kbps() > (2 * down_scale_bitrate_) || |
- bitrate.get_sum_kbps() < (down_scale_bitrate_ / 2)) { |
- down_scale_requested_ = false; |
- } |
- } |
- } |
- } else { |
+ if (encoders_.size() > 1) { |
// If we have more than 1 stream, reduce the qp_max for the low resolution |
// stream if frame rate is not too low. The trade-off with lower qp_max is |
// possibly more dropped frames, so we only do this if the frame rate is |
@@ -325,9 +301,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
if (number_of_cores < 1) { |
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
} |
- if (inst->VP8().feedbackModeOn && inst->numberOfSimulcastStreams > 1) { |
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
- } |
if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) { |
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
} |
@@ -350,8 +323,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); |
- feedback_mode_ = inst->VP8().feedbackModeOn; |
- |
number_of_cores_ = number_of_cores; |
timestamp_ = 0; |
codec_ = *inst; |
@@ -429,17 +400,8 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
configurations_[0].rc_dropframe_thresh = inst->VP8().frameDroppingOn ? 30 : 0; |
configurations_[0].rc_end_usage = VPX_CBR; |
configurations_[0].g_pass = VPX_RC_ONE_PASS; |
- // TODO(hellner): investigate why the following two lines produce |
- // automaticResizeOn value of 3 when running |
- // WebRtcVideoMediaChannelTest.GetStatsMultipleSendStreams inside the talk |
- // framework. |
- // configurations_[0].rc_resize_allowed = |
- // inst->codecSpecific.VP8.automaticResizeOn ? 1 : 0; |
+ // Handle resizing outside of libvpx. |
configurations_[0].rc_resize_allowed = 0; |
- // Handle resizing outside of libvpx when doing single-stream. |
- if (inst->VP8().automaticResizeOn && number_of_streams > 1) { |
- configurations_[0].rc_resize_allowed = 1; |
- } |
configurations_[0].rc_min_quantizer = 2; |
if (inst->qpMax >= configurations_[0].rc_min_quantizer) { |
qp_max_ = inst->qpMax; |
@@ -454,11 +416,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
// Set the maximum target size of any key-frame. |
rc_max_intra_target_ = MaxIntraTarget(configurations_[0].rc_buf_optimal_sz); |
- if (feedback_mode_) { |
- // Disable periodic key frames if we get feedback from the decoder |
- // through SLI and RPSI. |
- configurations_[0].kf_mode = VPX_KF_DISABLED; |
- } else if (inst->VP8().keyFrameInterval > 0) { |
+ if (inst->VP8().keyFrameInterval > 0) { |
configurations_[0].kf_mode = VPX_KF_AUTO; |
configurations_[0].kf_max_dist = inst->VP8().keyFrameInterval; |
} else { |
@@ -644,7 +602,7 @@ int VP8EncoderImpl::InitAndSetControlSettings() { |
codec_.mode == kScreensharing ? 300 : 1); |
vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); |
vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, |
- static_cast<vp8e_token_partitions>(token_partitions_)); |
+ static_cast<vp8e_token_partitions>(kTokenPartitions)); |
vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, |
rc_max_intra_target_); |
// VP8E_SET_SCREEN_CONTENT_MODE 2 = screen content with more aggressive |
@@ -774,41 +732,26 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame, |
std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); |
} else if (codec_specific_info && |
codec_specific_info->codecType == kVideoCodecVP8) { |
- if (feedback_mode_) { |
- // Handle RPSI and SLI messages and set up the appropriate encode flags. |
- bool sendRefresh = false; |
- if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { |
- rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); |
- } |
- if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { |
- sendRefresh = rps_.ReceivedSLI(frame.timestamp()); |
- } |
+ if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { |
+ // Is this our last key frame? If not ignore. |
+ // |picture_id_| is defined per spatial stream/layer, so check that |
+ // |RPSI| matches the last key frame from any of the spatial streams. |
+ // If so, then all spatial streams for this encoding will predict from |
+ // its long-term reference (last key frame). |
+ int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI; |
nisse-webrtc
2017/03/17 08:54:48
What's the state of rpsi (and sli) support after t
pbos-webrtc
2017/03/17 17:48:56
Not sure, I can make sure to take a second pass af
|
for (size_t i = 0; i < encoders_.size(); ++i) { |
- flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, |
- frame.timestamp()); |
- } |
- } else { |
- if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { |
- // Is this our last key frame? If not ignore. |
- // |picture_id_| is defined per spatial stream/layer, so check that |
- // |RPSI| matches the last key frame from any of the spatial streams. |
- // If so, then all spatial streams for this encoding will predict from |
- // its long-term reference (last key frame). |
- int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI; |
- for (size_t i = 0; i < encoders_.size(); ++i) { |
- if (last_key_frame_picture_id_[i] == RPSI) { |
- // Request for a long term reference frame. |
- // Note 1: overwrites any temporal settings. |
- // Note 2: VP8_EFLAG_NO_UPD_ENTROPY is not needed as that flag is |
- // set by error_resilient mode. |
- for (size_t j = 0; j < encoders_.size(); ++j) { |
- flags[j] = VP8_EFLAG_NO_UPD_ARF; |
- flags[j] |= VP8_EFLAG_NO_REF_GF; |
- flags[j] |= VP8_EFLAG_NO_REF_LAST; |
- } |
- only_predict_from_key_frame = true; |
- break; |
+ if (last_key_frame_picture_id_[i] == RPSI) { |
+ // Request for a long term reference frame. |
+ // Note 1: overwrites any temporal settings. |
+ // Note 2: VP8_EFLAG_NO_UPD_ENTROPY is not needed as that flag is |
+ // set by error_resilient mode. |
+ for (size_t j = 0; j < encoders_.size(); ++j) { |
+ flags[j] = VP8_EFLAG_NO_UPD_ARF; |
+ flags[j] |= VP8_EFLAG_NO_REF_GF; |
+ flags[j] |= VP8_EFLAG_NO_REF_LAST; |
} |
+ only_predict_from_key_frame = true; |
+ break; |
} |
} |
} |
@@ -925,9 +868,8 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, |
encoded_images_[encoder_idx]._length = 0; |
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; |
RTPFragmentationHeader frag_info; |
- // token_partitions_ is number of bits used. |
- frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) + |
- 1); |
+ // kTokenPartitions is number of bits used. |
+ frag_info.VerifyAndAllocateFragmentationHeader((1 << kTokenPartitions) + 1); |
CodecSpecificInfo codec_specific; |
const vpx_codec_cx_pkt_t* pkt = NULL; |
while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) != |
@@ -1024,19 +966,16 @@ int VP8EncoderImpl::RegisterEncodeCompleteCallback( |
} |
VP8DecoderImpl::VP8DecoderImpl() |
- : buffer_pool_(false, 300 /* max_number_of_buffers*/), |
+ : use_postproc_arm_(webrtc::field_trial::FindFullName( |
+ kVp8PostProcArmFieldTrial) == "Enabled"), |
+ buffer_pool_(false, 300 /* max_number_of_buffers*/), |
decode_complete_callback_(NULL), |
inited_(false), |
- feedback_mode_(false), |
decoder_(NULL), |
- image_format_(VPX_IMG_FMT_NONE), |
- ref_frame_(NULL), |
propagation_cnt_(-1), |
last_frame_width_(0), |
last_frame_height_(0), |
- key_frame_required_(true), |
- use_postproc_arm_(webrtc::field_trial::FindFullName( |
- kVp8PostProcArmFieldTrial) == "Enabled") {} |
+ key_frame_required_(true) {} |
VP8DecoderImpl::~VP8DecoderImpl() { |
inited_ = true; // in order to do the actual release |
@@ -1052,21 +991,15 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { |
decoder_ = new vpx_codec_ctx_t; |
memset(decoder_, 0, sizeof(*decoder_)); |
} |
- if (inst && inst->codecType == kVideoCodecVP8) { |
- feedback_mode_ = inst->VP8().feedbackModeOn; |
- } |
vpx_codec_dec_cfg_t cfg; |
// Setting number of threads to a constant value (1) |
cfg.threads = 1; |
cfg.h = cfg.w = 0; // set after decode |
- vpx_codec_flags_t flags = 0; |
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID) |
- if (use_postproc_arm_) { |
- flags = VPX_CODEC_USE_POSTPROC; |
- } |
+ vpx_codec_flags_t flags = use_postproc_arm_ ? VPX_CODEC_USE_POSTPROC : 0; |
#else |
- flags = VPX_CODEC_USE_POSTPROC; |
+ vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC; |
#endif |
if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) { |
@@ -1075,11 +1008,7 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { |
return WEBRTC_VIDEO_CODEC_MEMORY; |
} |
- // Save VideoCodec instance for later; mainly for duplicating the decoder. |
- if (&codec_ != inst) |
- codec_ = *inst; |
propagation_cnt_ = -1; |
- |
inited_ = true; |
// Always start with a complete key frame. |
@@ -1143,21 +1072,16 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image, |
return WEBRTC_VIDEO_CODEC_ERROR; |
} |
} |
- // Restrict error propagation using key frame requests. Disabled when |
- // the feedback mode is enabled (RPS). |
- // Reset on a key frame refresh. |
- if (!feedback_mode_) { |
- if (input_image._frameType == kVideoFrameKey && |
- input_image._completeFrame) { |
- propagation_cnt_ = -1; |
- // Start count on first loss. |
- } else if ((!input_image._completeFrame || missing_frames) && |
- propagation_cnt_ == -1) { |
- propagation_cnt_ = 0; |
- } |
- if (propagation_cnt_ >= 0) { |
- propagation_cnt_++; |
- } |
+ // Restrict error propagation using key frame requests. |
+ if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) { |
+ propagation_cnt_ = -1; |
+ // Start count on first loss. |
+ } else if ((!input_image._completeFrame || missing_frames) && |
+ propagation_cnt_ == -1) { |
+ propagation_cnt_ = 0; |
+ } |
+ if (propagation_cnt_ >= 0) { |
+ propagation_cnt_++; |
} |
vpx_codec_iter_t iter = NULL; |
@@ -1202,48 +1126,6 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image, |
propagation_cnt_ = 0; |
return ret; |
} |
- if (feedback_mode_) { |
- // Whenever we receive an incomplete key frame all reference buffers will |
- // be corrupt. If that happens we must request new key frames until we |
- // decode a complete key frame. |
- if (input_image._frameType == kVideoFrameKey && !input_image._completeFrame) |
- return WEBRTC_VIDEO_CODEC_ERROR; |
- // Check for reference updates and last reference buffer corruption and |
- // signal successful reference propagation or frame corruption to the |
- // encoder. |
- int reference_updates = 0; |
- if (vpx_codec_control(decoder_, VP8D_GET_LAST_REF_UPDATES, |
- &reference_updates)) { |
- // Reset to avoid requesting key frames too often. |
- if (propagation_cnt_ > 0) { |
- propagation_cnt_ = 0; |
- } |
- return WEBRTC_VIDEO_CODEC_ERROR; |
- } |
- int corrupted = 0; |
- if (vpx_codec_control(decoder_, VP8D_GET_FRAME_CORRUPTED, &corrupted)) { |
- // Reset to avoid requesting key frames too often. |
- if (propagation_cnt_ > 0) |
- propagation_cnt_ = 0; |
- return WEBRTC_VIDEO_CODEC_ERROR; |
- } |
- int16_t picture_id = -1; |
- if (codec_specific_info) { |
- picture_id = codec_specific_info->codecSpecific.VP8.pictureId; |
- } |
- if (picture_id > -1) { |
- if (((reference_updates & VP8_GOLD_FRAME) || |
- (reference_updates & VP8_ALTR_FRAME)) && |
- !corrupted) { |
- decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id); |
- } |
- decode_complete_callback_->ReceivedDecodedFrame(picture_id); |
- } |
- if (corrupted) { |
- // we can decode but with artifacts |
- return WEBRTC_VIDEO_CODEC_REQUEST_SLI; |
- } |
- } |
// Check Vs. threshold |
if (propagation_cnt_ > kVp8ErrorPropagationTh) { |
// Reset to avoid requesting key frames too often. |
@@ -1286,8 +1168,6 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img, |
decode_complete_callback_->Decoded(decoded_image, rtc::Optional<int32_t>(), |
rtc::Optional<uint8_t>(qp)); |
- // Remember image format for later |
- image_format_ = img->fmt; |
return WEBRTC_VIDEO_CODEC_OK; |
} |
@@ -1305,11 +1185,6 @@ int VP8DecoderImpl::Release() { |
delete decoder_; |
decoder_ = NULL; |
} |
- if (ref_frame_ != NULL) { |
- vpx_img_free(&ref_frame_->img); |
- delete ref_frame_; |
- ref_frame_ = NULL; |
- } |
buffer_pool_.Release(); |
inited_ = false; |
return WEBRTC_VIDEO_CODEC_OK; |
@@ -1319,18 +1194,4 @@ const char* VP8DecoderImpl::ImplementationName() const { |
return "libvpx"; |
} |
-int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) { |
- // The type of frame to copy should be set in ref_frame_->frame_type |
- // before the call to this function. |
- if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) != |
- VPX_CODEC_OK) { |
- return -1; |
- } |
- if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != |
- VPX_CODEC_OK) { |
- return -1; |
- } |
- return 0; |
-} |
- |
} // namespace webrtc |