Index: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
index 1d1a6b8c5f34e4891c1278afaeaa641964c5aaac..902a1c01b1a497e957306e8189b83ad56a1438a1 100644 |
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc |
@@ -324,9 +324,6 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
if (number_of_cores < 1) { |
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
} |
- if (inst->VP8().feedbackModeOn && inst->numberOfSimulcastStreams > 1) { |
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
- } |
if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) { |
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
} |
@@ -743,10 +740,6 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame, |
} |
} |
} |
- // The flag modification below (due to forced key frame, RPS, etc.,) for now |
- // will be the same for all encoders/spatial layers. |
- // TODO(marpan/holmer): Allow for key frame request to be set per encoder. |
- bool only_predict_from_key_frame = false; |
if (send_key_frame) { |
// Adapt the size of the key frame when in screenshare with 1 temporal |
// layer. |
@@ -805,7 +798,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame, |
return WEBRTC_VIDEO_CODEC_ERROR; |
timestamp_ += duration; |
// Examines frame timestamps only. |
- return GetEncodedPartitions(frame, only_predict_from_key_frame); |
+ return GetEncodedPartitions(frame); |
} |
// TODO(pbos): Make sure this works for properly for >1 encoders. |
@@ -840,8 +833,7 @@ void VP8EncoderImpl::PopulateCodecSpecific( |
CodecSpecificInfo* codec_specific, |
const vpx_codec_cx_pkt_t& pkt, |
int stream_idx, |
- uint32_t timestamp, |
- bool only_predicting_from_key_frame) { |
+ uint32_t timestamp) { |
assert(codec_specific != NULL); |
codec_specific->codecType = kVideoCodecVP8; |
codec_specific->codec_name = ImplementationName(); |
@@ -854,16 +846,14 @@ void VP8EncoderImpl::PopulateCodecSpecific( |
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this |
vp8Info->nonReference = |
(pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false; |
- bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) || |
- only_predicting_from_key_frame; |
+ bool base_layer_sync_point = pkt.data.frame.flags & VPX_FRAME_IS_KEY; |
temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, |
vp8Info, timestamp); |
// Prepare next. |
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; |
} |
-int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, |
- bool only_predicting_from_key_frame) { |
+int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image) { |
int bw_resolutions_disabled = |
(encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1; |
@@ -915,8 +905,7 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, |
encoded_images_[encoder_idx]._frameType = kVideoFrameKey; |
} |
PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, |
- input_image.timestamp(), |
- only_predicting_from_key_frame); |
+ input_image.timestamp()); |
break; |
} |
} |
@@ -1092,8 +1081,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image, |
// Reset on a key frame refresh. |
if (input_image._frameType == kVideoFrameKey && |
input_image._completeFrame) { |
- propagation_cnt_ = -1; |
- // Start count on first loss. |
+ propagation_cnt_ = -1; |
+ // Start count on first loss. |
} else if ((!input_image._completeFrame || missing_frames) && |
propagation_cnt_ == -1) { |
propagation_cnt_ = 0; |