Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(167)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc

Issue 2751133002: Remove dead code in vp8_impl files. (Closed)
Patch Set: rebase Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 23 matching lines...) Expand all
34 #include "webrtc/system_wrappers/include/clock.h" 34 #include "webrtc/system_wrappers/include/clock.h"
35 #include "webrtc/system_wrappers/include/field_trial.h" 35 #include "webrtc/system_wrappers/include/field_trial.h"
36 #include "webrtc/system_wrappers/include/metrics.h" 36 #include "webrtc/system_wrappers/include/metrics.h"
37 37
38 namespace webrtc { 38 namespace webrtc {
39 namespace { 39 namespace {
40 40
41 const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Arm"; 41 const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Arm";
42 const char kVp8GfBoostFieldTrial[] = "WebRTC-VP8-GfBoost"; 42 const char kVp8GfBoostFieldTrial[] = "WebRTC-VP8-GfBoost";
43 43
44 const int kTokenPartitions = VP8_ONE_TOKENPARTITION;
44 enum { kVp8ErrorPropagationTh = 30 }; 45 enum { kVp8ErrorPropagationTh = 30 };
45 enum { kVp832ByteAlign = 32 }; 46 enum { kVp832ByteAlign = 32 };
46 47
47 // VP8 denoiser states. 48 // VP8 denoiser states.
48 enum denoiserState { 49 enum denoiserState {
49 kDenoiserOff, 50 kDenoiserOff,
50 kDenoiserOnYOnly, 51 kDenoiserOnYOnly,
51 kDenoiserOnYUV, 52 kDenoiserOnYUV,
52 kDenoiserOnYUVAggressive, 53 kDenoiserOnYUVAggressive,
53 // Adaptive mode defaults to kDenoiserOnYUV on key frame, but may switch 54 // Adaptive mode defaults to kDenoiserOnYUV on key frame, but may switch
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
124 125
125 VP8Encoder* VP8Encoder::Create() { 126 VP8Encoder* VP8Encoder::Create() {
126 return new VP8EncoderImpl(); 127 return new VP8EncoderImpl();
127 } 128 }
128 129
129 VP8Decoder* VP8Decoder::Create() { 130 VP8Decoder* VP8Decoder::Create() {
130 return new VP8DecoderImpl(); 131 return new VP8DecoderImpl();
131 } 132 }
132 133
133 VP8EncoderImpl::VP8EncoderImpl() 134 VP8EncoderImpl::VP8EncoderImpl()
134 : encoded_complete_callback_(nullptr), 135 : use_gf_boost_(webrtc::field_trial::IsEnabled(kVp8GfBoostFieldTrial)),
136 encoded_complete_callback_(nullptr),
135 inited_(false), 137 inited_(false),
136 timestamp_(0), 138 timestamp_(0),
137 feedback_mode_(false),
138 qp_max_(56), // Setting for max quantizer. 139 qp_max_(56), // Setting for max quantizer.
139 cpu_speed_default_(-6), 140 cpu_speed_default_(-6),
140 number_of_cores_(0), 141 number_of_cores_(0),
141 rc_max_intra_target_(0), 142 rc_max_intra_target_(0),
142 token_partitions_(VP8_ONE_TOKENPARTITION),
143 down_scale_requested_(false),
144 down_scale_bitrate_(0),
145 use_gf_boost_(webrtc::field_trial::IsEnabled(kVp8GfBoostFieldTrial)),
146 key_frame_request_(kMaxSimulcastStreams, false) { 143 key_frame_request_(kMaxSimulcastStreams, false) {
147 uint32_t seed = rtc::Time32(); 144 uint32_t seed = rtc::Time32();
148 srand(seed); 145 srand(seed);
149 146
150 picture_id_.reserve(kMaxSimulcastStreams); 147 picture_id_.reserve(kMaxSimulcastStreams);
151 last_key_frame_picture_id_.reserve(kMaxSimulcastStreams); 148 last_key_frame_picture_id_.reserve(kMaxSimulcastStreams);
152 temporal_layers_.reserve(kMaxSimulcastStreams); 149 temporal_layers_.reserve(kMaxSimulcastStreams);
153 raw_images_.reserve(kMaxSimulcastStreams); 150 raw_images_.reserve(kMaxSimulcastStreams);
154 encoded_images_.reserve(kMaxSimulcastStreams); 151 encoded_images_.reserve(kMaxSimulcastStreams);
155 send_stream_.reserve(kMaxSimulcastStreams); 152 send_stream_.reserve(kMaxSimulcastStreams);
156 cpu_speed_.assign(kMaxSimulcastStreams, -6); // Set default to -6. 153 cpu_speed_.assign(kMaxSimulcastStreams, cpu_speed_default_);
157 encoders_.reserve(kMaxSimulcastStreams); 154 encoders_.reserve(kMaxSimulcastStreams);
158 configurations_.reserve(kMaxSimulcastStreams); 155 configurations_.reserve(kMaxSimulcastStreams);
159 downsampling_factors_.reserve(kMaxSimulcastStreams); 156 downsampling_factors_.reserve(kMaxSimulcastStreams);
160 } 157 }
161 158
162 VP8EncoderImpl::~VP8EncoderImpl() { 159 VP8EncoderImpl::~VP8EncoderImpl() {
163 Release(); 160 Release();
164 } 161 }
165 162
166 int VP8EncoderImpl::Release() { 163 int VP8EncoderImpl::Release() {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 211
215 // At this point, bitrate allocation should already match codec settings. 212 // At this point, bitrate allocation should already match codec settings.
216 if (codec_.maxBitrate > 0) 213 if (codec_.maxBitrate > 0)
217 RTC_DCHECK_LE(bitrate.get_sum_kbps(), codec_.maxBitrate); 214 RTC_DCHECK_LE(bitrate.get_sum_kbps(), codec_.maxBitrate);
218 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.minBitrate); 215 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.minBitrate);
219 if (codec_.numberOfSimulcastStreams > 0) 216 if (codec_.numberOfSimulcastStreams > 0)
220 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.simulcastStream[0].minBitrate); 217 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.simulcastStream[0].minBitrate);
221 218
222 codec_.maxFramerate = new_framerate; 219 codec_.maxFramerate = new_framerate;
223 220
224 if (encoders_.size() == 1) { 221 if (encoders_.size() > 1) {
225 // 1:1.
226 // Calculate a rough limit for when to trigger a potental down scale.
227 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000;
228 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work
229 // around the current limitations.
230 // Only trigger keyframes if we are allowed to scale down.
231 if (configurations_[0].rc_resize_allowed) {
232 if (!down_scale_requested_) {
233 if (k_pixels_per_frame > bitrate.get_sum_kbps()) {
234 down_scale_requested_ = true;
235 down_scale_bitrate_ = bitrate.get_sum_kbps();
236 key_frame_request_[0] = true;
237 }
238 } else {
239 if (bitrate.get_sum_kbps() > (2 * down_scale_bitrate_) ||
240 bitrate.get_sum_kbps() < (down_scale_bitrate_ / 2)) {
241 down_scale_requested_ = false;
242 }
243 }
244 }
245 } else {
246 // If we have more than 1 stream, reduce the qp_max for the low resolution 222 // If we have more than 1 stream, reduce the qp_max for the low resolution
247 // stream if frame rate is not too low. The trade-off with lower qp_max is 223 // stream if frame rate is not too low. The trade-off with lower qp_max is
248 // possibly more dropped frames, so we only do this if the frame rate is 224 // possibly more dropped frames, so we only do this if the frame rate is
249 // above some threshold (base temporal layer is down to 1/4 for 3 layers). 225 // above some threshold (base temporal layer is down to 1/4 for 3 layers).
250 // We may want to condition this on bitrate later. 226 // We may want to condition this on bitrate later.
251 if (new_framerate > 20) { 227 if (new_framerate > 20) {
252 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; 228 configurations_[encoders_.size() - 1].rc_max_quantizer = 45;
253 } else { 229 } else {
254 // Go back to default value set in InitEncode. 230 // Go back to default value set in InitEncode.
255 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; 231 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_;
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
318 // allow zero to represent an unspecified maxBitRate 294 // allow zero to represent an unspecified maxBitRate
319 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) { 295 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
320 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 296 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
321 } 297 }
322 if (inst->width <= 1 || inst->height <= 1) { 298 if (inst->width <= 1 || inst->height <= 1) {
323 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 299 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
324 } 300 }
325 if (number_of_cores < 1) { 301 if (number_of_cores < 1) {
326 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 302 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
327 } 303 }
328 if (inst->VP8().feedbackModeOn && inst->numberOfSimulcastStreams > 1) {
329 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
330 }
331 if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) { 304 if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) {
332 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 305 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
333 } 306 }
334 int retVal = Release(); 307 int retVal = Release();
335 if (retVal < 0) { 308 if (retVal < 0) {
336 return retVal; 309 return retVal;
337 } 310 }
338 311
339 int number_of_streams = NumberOfStreams(*inst); 312 int number_of_streams = NumberOfStreams(*inst);
340 bool doing_simulcast = (number_of_streams > 1); 313 bool doing_simulcast = (number_of_streams > 1);
341 314
342 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { 315 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
343 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 316 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
344 } 317 }
345 318
346 int num_temporal_layers = 319 int num_temporal_layers =
347 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers 320 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
348 : inst->VP8().numberOfTemporalLayers; 321 : inst->VP8().numberOfTemporalLayers;
349 RTC_DCHECK_GT(num_temporal_layers, 0); 322 RTC_DCHECK_GT(num_temporal_layers, 0);
350 323
351 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); 324 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
352 325
353 feedback_mode_ = inst->VP8().feedbackModeOn;
354
355 number_of_cores_ = number_of_cores; 326 number_of_cores_ = number_of_cores;
356 timestamp_ = 0; 327 timestamp_ = 0;
357 codec_ = *inst; 328 codec_ = *inst;
358 329
359 // Code expects simulcastStream resolutions to be correct, make sure they are 330 // Code expects simulcastStream resolutions to be correct, make sure they are
360 // filled even when there are no simulcast layers. 331 // filled even when there are no simulcast layers.
361 if (codec_.numberOfSimulcastStreams == 0) { 332 if (codec_.numberOfSimulcastStreams == 0) {
362 codec_.simulcastStream[0].width = codec_.width; 333 codec_.simulcastStream[0].width = codec_.width;
363 codec_.simulcastStream[0].height = codec_.height; 334 codec_.simulcastStream[0].height = codec_.height;
364 } 335 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
422 // drop support for libvpx 9.6.0. 393 // drop support for libvpx 9.6.0.
423 break; 394 break;
424 case kResilientFrames: 395 case kResilientFrames:
425 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported 396 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
426 } 397 }
427 398
428 // rate control settings 399 // rate control settings
429 configurations_[0].rc_dropframe_thresh = inst->VP8().frameDroppingOn ? 30 : 0; 400 configurations_[0].rc_dropframe_thresh = inst->VP8().frameDroppingOn ? 30 : 0;
430 configurations_[0].rc_end_usage = VPX_CBR; 401 configurations_[0].rc_end_usage = VPX_CBR;
431 configurations_[0].g_pass = VPX_RC_ONE_PASS; 402 configurations_[0].g_pass = VPX_RC_ONE_PASS;
432 // TODO(hellner): investigate why the following two lines produce 403 // Handle resizing outside of libvpx.
433 // automaticResizeOn value of 3 when running
434 // WebRtcVideoMediaChannelTest.GetStatsMultipleSendStreams inside the talk
435 // framework.
436 // configurations_[0].rc_resize_allowed =
437 // inst->codecSpecific.VP8.automaticResizeOn ? 1 : 0;
438 configurations_[0].rc_resize_allowed = 0; 404 configurations_[0].rc_resize_allowed = 0;
439 // Handle resizing outside of libvpx when doing single-stream.
440 if (inst->VP8().automaticResizeOn && number_of_streams > 1) {
441 configurations_[0].rc_resize_allowed = 1;
442 }
443 configurations_[0].rc_min_quantizer = 2; 405 configurations_[0].rc_min_quantizer = 2;
444 if (inst->qpMax >= configurations_[0].rc_min_quantizer) { 406 if (inst->qpMax >= configurations_[0].rc_min_quantizer) {
445 qp_max_ = inst->qpMax; 407 qp_max_ = inst->qpMax;
446 } 408 }
447 configurations_[0].rc_max_quantizer = qp_max_; 409 configurations_[0].rc_max_quantizer = qp_max_;
448 configurations_[0].rc_undershoot_pct = 100; 410 configurations_[0].rc_undershoot_pct = 100;
449 configurations_[0].rc_overshoot_pct = 15; 411 configurations_[0].rc_overshoot_pct = 15;
450 configurations_[0].rc_buf_initial_sz = 500; 412 configurations_[0].rc_buf_initial_sz = 500;
451 configurations_[0].rc_buf_optimal_sz = 600; 413 configurations_[0].rc_buf_optimal_sz = 600;
452 configurations_[0].rc_buf_sz = 1000; 414 configurations_[0].rc_buf_sz = 1000;
453 415
454 // Set the maximum target size of any key-frame. 416 // Set the maximum target size of any key-frame.
455 rc_max_intra_target_ = MaxIntraTarget(configurations_[0].rc_buf_optimal_sz); 417 rc_max_intra_target_ = MaxIntraTarget(configurations_[0].rc_buf_optimal_sz);
456 418
457 if (feedback_mode_) { 419 if (inst->VP8().keyFrameInterval > 0) {
458 // Disable periodic key frames if we get feedback from the decoder
459 // through SLI and RPSI.
460 configurations_[0].kf_mode = VPX_KF_DISABLED;
461 } else if (inst->VP8().keyFrameInterval > 0) {
462 configurations_[0].kf_mode = VPX_KF_AUTO; 420 configurations_[0].kf_mode = VPX_KF_AUTO;
463 configurations_[0].kf_max_dist = inst->VP8().keyFrameInterval; 421 configurations_[0].kf_max_dist = inst->VP8().keyFrameInterval;
464 } else { 422 } else {
465 configurations_[0].kf_mode = VPX_KF_DISABLED; 423 configurations_[0].kf_mode = VPX_KF_DISABLED;
466 } 424 }
467 425
468 // Allow the user to set the complexity for the base stream. 426 // Allow the user to set the complexity for the base stream.
469 switch (inst->VP8().complexity) { 427 switch (inst->VP8().complexity) {
470 case kComplexityHigh: 428 case kComplexityHigh:
471 cpu_speed_[0] = -5; 429 cpu_speed_[0] = -5;
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
637 vpx_codec_control( 595 vpx_codec_control(
638 &encoders_[1], VP8E_SET_NOISE_SENSITIVITY, 596 &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
639 codec_.VP8()->denoisingOn ? denoiser_state : kDenoiserOff); 597 codec_.VP8()->denoisingOn ? denoiser_state : kDenoiserOff);
640 } 598 }
641 for (size_t i = 0; i < encoders_.size(); ++i) { 599 for (size_t i = 0; i < encoders_.size(); ++i) {
642 // Allow more screen content to be detected as static. 600 // Allow more screen content to be detected as static.
643 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 601 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD,
644 codec_.mode == kScreensharing ? 300 : 1); 602 codec_.mode == kScreensharing ? 300 : 1);
645 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); 603 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]);
646 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, 604 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS,
647 static_cast<vp8e_token_partitions>(token_partitions_)); 605 static_cast<vp8e_token_partitions>(kTokenPartitions));
648 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 606 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
649 rc_max_intra_target_); 607 rc_max_intra_target_);
650 // VP8E_SET_SCREEN_CONTENT_MODE 2 = screen content with more aggressive 608 // VP8E_SET_SCREEN_CONTENT_MODE 2 = screen content with more aggressive
651 // rate control (drop frames on large target bitrate overshoot) 609 // rate control (drop frames on large target bitrate overshoot)
652 vpx_codec_control(&(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE, 610 vpx_codec_control(&(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE,
653 codec_.mode == kScreensharing ? 2 : 0); 611 codec_.mode == kScreensharing ? 2 : 0);
654 // Apply boost on golden frames (has only effect when resilience is off). 612 // Apply boost on golden frames (has only effect when resilience is off).
655 if (use_gf_boost_ && codec_.VP8()->resilience == kResilienceOff) { 613 if (use_gf_boost_ && codec_.VP8()->resilience == kResilienceOff) {
656 int gf_boost_percent; 614 int gf_boost_percent;
657 if (GetGfBoostPercentageFromFieldTrialGroup(&gf_boost_percent)) { 615 if (GetGfBoostPercentageFromFieldTrialGroup(&gf_boost_percent)) {
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
767 forceKeyFrameIntraTh); 725 forceKeyFrameIntraTh);
768 } 726 }
769 // Key frame request from caller. 727 // Key frame request from caller.
770 // Will update both golden and alt-ref. 728 // Will update both golden and alt-ref.
771 for (size_t i = 0; i < encoders_.size(); ++i) { 729 for (size_t i = 0; i < encoders_.size(); ++i) {
772 flags[i] = VPX_EFLAG_FORCE_KF; 730 flags[i] = VPX_EFLAG_FORCE_KF;
773 } 731 }
774 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); 732 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
775 } else if (codec_specific_info && 733 } else if (codec_specific_info &&
776 codec_specific_info->codecType == kVideoCodecVP8) { 734 codec_specific_info->codecType == kVideoCodecVP8) {
777 if (feedback_mode_) { 735 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
778 // Handle RPSI and SLI messages and set up the appropriate encode flags. 736 // Is this our last key frame? If not ignore.
779 bool sendRefresh = false; 737 // |picture_id_| is defined per spatial stream/layer, so check that
780 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { 738 // |RPSI| matches the last key frame from any of the spatial streams.
781 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); 739 // If so, then all spatial streams for this encoding will predict from
782 } 740 // its long-term reference (last key frame).
783 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { 741 int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI;
nisse-webrtc 2017/03/17 08:54:48 What's the state of rpsi (and sli) support after t
pbos-webrtc 2017/03/17 17:48:56 Not sure, I can make sure to take a second pass af
784 sendRefresh = rps_.ReceivedSLI(frame.timestamp());
785 }
786 for (size_t i = 0; i < encoders_.size(); ++i) { 742 for (size_t i = 0; i < encoders_.size(); ++i) {
787 flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, 743 if (last_key_frame_picture_id_[i] == RPSI) {
788 frame.timestamp()); 744 // Request for a long term reference frame.
789 } 745 // Note 1: overwrites any temporal settings.
790 } else { 746 // Note 2: VP8_EFLAG_NO_UPD_ENTROPY is not needed as that flag is
791 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { 747 // set by error_resilient mode.
792 // Is this our last key frame? If not ignore. 748 for (size_t j = 0; j < encoders_.size(); ++j) {
793 // |picture_id_| is defined per spatial stream/layer, so check that 749 flags[j] = VP8_EFLAG_NO_UPD_ARF;
794 // |RPSI| matches the last key frame from any of the spatial streams. 750 flags[j] |= VP8_EFLAG_NO_REF_GF;
795 // If so, then all spatial streams for this encoding will predict from 751 flags[j] |= VP8_EFLAG_NO_REF_LAST;
796 // its long-term reference (last key frame).
797 int RPSI = codec_specific_info->codecSpecific.VP8.pictureIdRPSI;
798 for (size_t i = 0; i < encoders_.size(); ++i) {
799 if (last_key_frame_picture_id_[i] == RPSI) {
800 // Request for a long term reference frame.
801 // Note 1: overwrites any temporal settings.
802 // Note 2: VP8_EFLAG_NO_UPD_ENTROPY is not needed as that flag is
803 // set by error_resilient mode.
804 for (size_t j = 0; j < encoders_.size(); ++j) {
805 flags[j] = VP8_EFLAG_NO_UPD_ARF;
806 flags[j] |= VP8_EFLAG_NO_REF_GF;
807 flags[j] |= VP8_EFLAG_NO_REF_LAST;
808 }
809 only_predict_from_key_frame = true;
810 break;
811 } 752 }
753 only_predict_from_key_frame = true;
754 break;
812 } 755 }
813 } 756 }
814 } 757 }
815 } 758 }
816 // Set the encoder frame flags and temporal layer_id for each spatial stream. 759 // Set the encoder frame flags and temporal layer_id for each spatial stream.
817 // Note that |temporal_layers_| are defined starting from lowest resolution at 760 // Note that |temporal_layers_| are defined starting from lowest resolution at
818 // position 0 to highest resolution at position |encoders_.size() - 1|, 761 // position 0 to highest resolution at position |encoders_.size() - 1|,
819 // whereas |encoder_| is from highest to lowest resolution. 762 // whereas |encoder_| is from highest to lowest resolution.
820 size_t stream_idx = encoders_.size() - 1; 763 size_t stream_idx = encoders_.size() - 1;
821 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { 764 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 861
919 int stream_idx = static_cast<int>(encoders_.size()) - 1; 862 int stream_idx = static_cast<int>(encoders_.size()) - 1;
920 int result = WEBRTC_VIDEO_CODEC_OK; 863 int result = WEBRTC_VIDEO_CODEC_OK;
921 for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); 864 for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
922 ++encoder_idx, --stream_idx) { 865 ++encoder_idx, --stream_idx) {
923 vpx_codec_iter_t iter = NULL; 866 vpx_codec_iter_t iter = NULL;
924 int part_idx = 0; 867 int part_idx = 0;
925 encoded_images_[encoder_idx]._length = 0; 868 encoded_images_[encoder_idx]._length = 0;
926 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; 869 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
927 RTPFragmentationHeader frag_info; 870 RTPFragmentationHeader frag_info;
928 // token_partitions_ is number of bits used. 871 // kTokenPartitions is number of bits used.
929 frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) + 872 frag_info.VerifyAndAllocateFragmentationHeader((1 << kTokenPartitions) + 1);
930 1);
931 CodecSpecificInfo codec_specific; 873 CodecSpecificInfo codec_specific;
932 const vpx_codec_cx_pkt_t* pkt = NULL; 874 const vpx_codec_cx_pkt_t* pkt = NULL;
933 while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) != 875 while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
934 NULL) { 876 NULL) {
935 switch (pkt->kind) { 877 switch (pkt->kind) {
936 case VPX_CODEC_CX_FRAME_PKT: { 878 case VPX_CODEC_CX_FRAME_PKT: {
937 size_t length = encoded_images_[encoder_idx]._length; 879 size_t length = encoded_images_[encoder_idx]._length;
938 if (pkt->data.frame.sz + length > 880 if (pkt->data.frame.sz + length >
939 encoded_images_[encoder_idx]._size) { 881 encoded_images_[encoder_idx]._size) {
940 uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length]; 882 uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
1017 return WEBRTC_VIDEO_CODEC_OK; 959 return WEBRTC_VIDEO_CODEC_OK;
1018 } 960 }
1019 961
1020 int VP8EncoderImpl::RegisterEncodeCompleteCallback( 962 int VP8EncoderImpl::RegisterEncodeCompleteCallback(
1021 EncodedImageCallback* callback) { 963 EncodedImageCallback* callback) {
1022 encoded_complete_callback_ = callback; 964 encoded_complete_callback_ = callback;
1023 return WEBRTC_VIDEO_CODEC_OK; 965 return WEBRTC_VIDEO_CODEC_OK;
1024 } 966 }
1025 967
1026 VP8DecoderImpl::VP8DecoderImpl() 968 VP8DecoderImpl::VP8DecoderImpl()
1027 : buffer_pool_(false, 300 /* max_number_of_buffers*/), 969 : use_postproc_arm_(webrtc::field_trial::FindFullName(
970 kVp8PostProcArmFieldTrial) == "Enabled"),
971 buffer_pool_(false, 300 /* max_number_of_buffers*/),
1028 decode_complete_callback_(NULL), 972 decode_complete_callback_(NULL),
1029 inited_(false), 973 inited_(false),
1030 feedback_mode_(false),
1031 decoder_(NULL), 974 decoder_(NULL),
1032 image_format_(VPX_IMG_FMT_NONE),
1033 ref_frame_(NULL),
1034 propagation_cnt_(-1), 975 propagation_cnt_(-1),
1035 last_frame_width_(0), 976 last_frame_width_(0),
1036 last_frame_height_(0), 977 last_frame_height_(0),
1037 key_frame_required_(true), 978 key_frame_required_(true) {}
1038 use_postproc_arm_(webrtc::field_trial::FindFullName(
1039 kVp8PostProcArmFieldTrial) == "Enabled") {}
1040 979
1041 VP8DecoderImpl::~VP8DecoderImpl() { 980 VP8DecoderImpl::~VP8DecoderImpl() {
1042 inited_ = true; // in order to do the actual release 981 inited_ = true; // in order to do the actual release
1043 Release(); 982 Release();
1044 } 983 }
1045 984
1046 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { 985 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
1047 int ret_val = Release(); 986 int ret_val = Release();
1048 if (ret_val < 0) { 987 if (ret_val < 0) {
1049 return ret_val; 988 return ret_val;
1050 } 989 }
1051 if (decoder_ == NULL) { 990 if (decoder_ == NULL) {
1052 decoder_ = new vpx_codec_ctx_t; 991 decoder_ = new vpx_codec_ctx_t;
1053 memset(decoder_, 0, sizeof(*decoder_)); 992 memset(decoder_, 0, sizeof(*decoder_));
1054 } 993 }
1055 if (inst && inst->codecType == kVideoCodecVP8) {
1056 feedback_mode_ = inst->VP8().feedbackModeOn;
1057 }
1058 vpx_codec_dec_cfg_t cfg; 994 vpx_codec_dec_cfg_t cfg;
1059 // Setting number of threads to a constant value (1) 995 // Setting number of threads to a constant value (1)
1060 cfg.threads = 1; 996 cfg.threads = 1;
1061 cfg.h = cfg.w = 0; // set after decode 997 cfg.h = cfg.w = 0; // set after decode
1062 998
1063 vpx_codec_flags_t flags = 0;
1064 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID) 999 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID)
1065 if (use_postproc_arm_) { 1000 vpx_codec_flags_t flags = use_postproc_arm_ ? VPX_CODEC_USE_POSTPROC : 0;
1066 flags = VPX_CODEC_USE_POSTPROC;
1067 }
1068 #else 1001 #else
1069 flags = VPX_CODEC_USE_POSTPROC; 1002 vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC;
1070 #endif 1003 #endif
1071 1004
1072 if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) { 1005 if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
1073 delete decoder_; 1006 delete decoder_;
1074 decoder_ = nullptr; 1007 decoder_ = nullptr;
1075 return WEBRTC_VIDEO_CODEC_MEMORY; 1008 return WEBRTC_VIDEO_CODEC_MEMORY;
1076 } 1009 }
1077 1010
1078 // Save VideoCodec instance for later; mainly for duplicating the decoder.
1079 if (&codec_ != inst)
1080 codec_ = *inst;
1081 propagation_cnt_ = -1; 1011 propagation_cnt_ = -1;
1082
1083 inited_ = true; 1012 inited_ = true;
1084 1013
1085 // Always start with a complete key frame. 1014 // Always start with a complete key frame.
1086 key_frame_required_ = true; 1015 key_frame_required_ = true;
1087 return WEBRTC_VIDEO_CODEC_OK; 1016 return WEBRTC_VIDEO_CODEC_OK;
1088 } 1017 }
1089 1018
1090 int VP8DecoderImpl::Decode(const EncodedImage& input_image, 1019 int VP8DecoderImpl::Decode(const EncodedImage& input_image,
1091 bool missing_frames, 1020 bool missing_frames,
1092 const RTPFragmentationHeader* fragmentation, 1021 const RTPFragmentationHeader* fragmentation,
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1136 if (key_frame_required_) { 1065 if (key_frame_required_) {
1137 if (input_image._frameType != kVideoFrameKey) 1066 if (input_image._frameType != kVideoFrameKey)
1138 return WEBRTC_VIDEO_CODEC_ERROR; 1067 return WEBRTC_VIDEO_CODEC_ERROR;
1139 // We have a key frame - is it complete? 1068 // We have a key frame - is it complete?
1140 if (input_image._completeFrame) { 1069 if (input_image._completeFrame) {
1141 key_frame_required_ = false; 1070 key_frame_required_ = false;
1142 } else { 1071 } else {
1143 return WEBRTC_VIDEO_CODEC_ERROR; 1072 return WEBRTC_VIDEO_CODEC_ERROR;
1144 } 1073 }
1145 } 1074 }
1146 // Restrict error propagation using key frame requests. Disabled when 1075 // Restrict error propagation using key frame requests.
1147 // the feedback mode is enabled (RPS). 1076 if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
1148 // Reset on a key frame refresh. 1077 propagation_cnt_ = -1;
1149 if (!feedback_mode_) { 1078 // Start count on first loss.
1150 if (input_image._frameType == kVideoFrameKey && 1079 } else if ((!input_image._completeFrame || missing_frames) &&
1151 input_image._completeFrame) { 1080 propagation_cnt_ == -1) {
1152 propagation_cnt_ = -1; 1081 propagation_cnt_ = 0;
1153 // Start count on first loss. 1082 }
1154 } else if ((!input_image._completeFrame || missing_frames) && 1083 if (propagation_cnt_ >= 0) {
1155 propagation_cnt_ == -1) { 1084 propagation_cnt_++;
1156 propagation_cnt_ = 0;
1157 }
1158 if (propagation_cnt_ >= 0) {
1159 propagation_cnt_++;
1160 }
1161 } 1085 }
1162 1086
1163 vpx_codec_iter_t iter = NULL; 1087 vpx_codec_iter_t iter = NULL;
1164 vpx_image_t* img; 1088 vpx_image_t* img;
1165 int ret; 1089 int ret;
1166 1090
1167 // Check for missing frames. 1091 // Check for missing frames.
1168 if (missing_frames) { 1092 if (missing_frames) {
1169 // Call decoder with zero data length to signal missing frames. 1093 // Call decoder with zero data length to signal missing frames.
1170 if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) { 1094 if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) {
(...skipping 24 matching lines...) Expand all
1195 vpx_codec_err_t vpx_ret = 1119 vpx_codec_err_t vpx_ret =
1196 vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp); 1120 vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
1197 RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK); 1121 RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
1198 ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp); 1122 ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
1199 if (ret != 0) { 1123 if (ret != 0) {
1200 // Reset to avoid requesting key frames too often. 1124 // Reset to avoid requesting key frames too often.
1201 if (ret < 0 && propagation_cnt_ > 0) 1125 if (ret < 0 && propagation_cnt_ > 0)
1202 propagation_cnt_ = 0; 1126 propagation_cnt_ = 0;
1203 return ret; 1127 return ret;
1204 } 1128 }
1205 if (feedback_mode_) {
1206 // Whenever we receive an incomplete key frame all reference buffers will
1207 // be corrupt. If that happens we must request new key frames until we
1208 // decode a complete key frame.
1209 if (input_image._frameType == kVideoFrameKey && !input_image._completeFrame)
1210 return WEBRTC_VIDEO_CODEC_ERROR;
1211 // Check for reference updates and last reference buffer corruption and
1212 // signal successful reference propagation or frame corruption to the
1213 // encoder.
1214 int reference_updates = 0;
1215 if (vpx_codec_control(decoder_, VP8D_GET_LAST_REF_UPDATES,
1216 &reference_updates)) {
1217 // Reset to avoid requesting key frames too often.
1218 if (propagation_cnt_ > 0) {
1219 propagation_cnt_ = 0;
1220 }
1221 return WEBRTC_VIDEO_CODEC_ERROR;
1222 }
1223 int corrupted = 0;
1224 if (vpx_codec_control(decoder_, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
1225 // Reset to avoid requesting key frames too often.
1226 if (propagation_cnt_ > 0)
1227 propagation_cnt_ = 0;
1228 return WEBRTC_VIDEO_CODEC_ERROR;
1229 }
1230 int16_t picture_id = -1;
1231 if (codec_specific_info) {
1232 picture_id = codec_specific_info->codecSpecific.VP8.pictureId;
1233 }
1234 if (picture_id > -1) {
1235 if (((reference_updates & VP8_GOLD_FRAME) ||
1236 (reference_updates & VP8_ALTR_FRAME)) &&
1237 !corrupted) {
1238 decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
1239 }
1240 decode_complete_callback_->ReceivedDecodedFrame(picture_id);
1241 }
1242 if (corrupted) {
1243 // we can decode but with artifacts
1244 return WEBRTC_VIDEO_CODEC_REQUEST_SLI;
1245 }
1246 }
1247 // Check Vs. threshold 1129 // Check Vs. threshold
1248 if (propagation_cnt_ > kVp8ErrorPropagationTh) { 1130 if (propagation_cnt_ > kVp8ErrorPropagationTh) {
1249 // Reset to avoid requesting key frames too often. 1131 // Reset to avoid requesting key frames too often.
1250 propagation_cnt_ = 0; 1132 propagation_cnt_ = 0;
1251 return WEBRTC_VIDEO_CODEC_ERROR; 1133 return WEBRTC_VIDEO_CODEC_ERROR;
1252 } 1134 }
1253 return WEBRTC_VIDEO_CODEC_OK; 1135 return WEBRTC_VIDEO_CODEC_OK;
1254 } 1136 }
1255 1137
1256 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img, 1138 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
(...skipping 22 matching lines...) Expand all
1279 buffer->MutableDataY(), buffer->StrideY(), 1161 buffer->MutableDataY(), buffer->StrideY(),
1280 buffer->MutableDataU(), buffer->StrideU(), 1162 buffer->MutableDataU(), buffer->StrideU(),
1281 buffer->MutableDataV(), buffer->StrideV(), 1163 buffer->MutableDataV(), buffer->StrideV(),
1282 img->d_w, img->d_h); 1164 img->d_w, img->d_h);
1283 1165
1284 VideoFrame decoded_image(buffer, timestamp, 0, kVideoRotation_0); 1166 VideoFrame decoded_image(buffer, timestamp, 0, kVideoRotation_0);
1285 decoded_image.set_ntp_time_ms(ntp_time_ms); 1167 decoded_image.set_ntp_time_ms(ntp_time_ms);
1286 decode_complete_callback_->Decoded(decoded_image, rtc::Optional<int32_t>(), 1168 decode_complete_callback_->Decoded(decoded_image, rtc::Optional<int32_t>(),
1287 rtc::Optional<uint8_t>(qp)); 1169 rtc::Optional<uint8_t>(qp));
1288 1170
1289 // Remember image format for later
1290 image_format_ = img->fmt;
1291 return WEBRTC_VIDEO_CODEC_OK; 1171 return WEBRTC_VIDEO_CODEC_OK;
1292 } 1172 }
1293 1173
1294 int VP8DecoderImpl::RegisterDecodeCompleteCallback( 1174 int VP8DecoderImpl::RegisterDecodeCompleteCallback(
1295 DecodedImageCallback* callback) { 1175 DecodedImageCallback* callback) {
1296 decode_complete_callback_ = callback; 1176 decode_complete_callback_ = callback;
1297 return WEBRTC_VIDEO_CODEC_OK; 1177 return WEBRTC_VIDEO_CODEC_OK;
1298 } 1178 }
1299 1179
1300 int VP8DecoderImpl::Release() { 1180 int VP8DecoderImpl::Release() {
1301 if (decoder_ != NULL) { 1181 if (decoder_ != NULL) {
1302 if (vpx_codec_destroy(decoder_)) { 1182 if (vpx_codec_destroy(decoder_)) {
1303 return WEBRTC_VIDEO_CODEC_MEMORY; 1183 return WEBRTC_VIDEO_CODEC_MEMORY;
1304 } 1184 }
1305 delete decoder_; 1185 delete decoder_;
1306 decoder_ = NULL; 1186 decoder_ = NULL;
1307 } 1187 }
1308 if (ref_frame_ != NULL) {
1309 vpx_img_free(&ref_frame_->img);
1310 delete ref_frame_;
1311 ref_frame_ = NULL;
1312 }
1313 buffer_pool_.Release(); 1188 buffer_pool_.Release();
1314 inited_ = false; 1189 inited_ = false;
1315 return WEBRTC_VIDEO_CODEC_OK; 1190 return WEBRTC_VIDEO_CODEC_OK;
1316 } 1191 }
1317 1192
1318 const char* VP8DecoderImpl::ImplementationName() const { 1193 const char* VP8DecoderImpl::ImplementationName() const {
1319 return "libvpx"; 1194 return "libvpx";
1320 } 1195 }
1321 1196
1322 int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
1323 // The type of frame to copy should be set in ref_frame_->frame_type
1324 // before the call to this function.
1325 if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
1326 VPX_CODEC_OK) {
1327 return -1;
1328 }
1329 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
1330 VPX_CODEC_OK) {
1331 return -1;
1332 }
1333 return 0;
1334 }
1335
1336 } // namespace webrtc 1197 } // namespace webrtc
OLDNEW
« webrtc/common_types.h ('K') | « webrtc/modules/video_coding/codecs/vp8/vp8_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698