Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(157)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc

Issue 1193513006: In screenshare mode, suppress VP8 bitrate overshoot and increase quality (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Rebase Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 // interpret the startBitrate as the encoder target bitrate. This is 271 // interpret the startBitrate as the encoder target bitrate. This is
272 // to allow for a different max bitrate, so if the codec can't meet 272 // to allow for a different max bitrate, so if the codec can't meet
273 // the target we still allow it to overshoot up to the max before dropping 273 // the target we still allow it to overshoot up to the max before dropping
274 // frames. This hack should be improved. 274 // frames. This hack should be improved.
275 if (codec_.targetBitrate > 0 && 275 if (codec_.targetBitrate > 0 &&
276 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || 276 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 ||
277 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { 277 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) {
278 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); 278 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate);
279 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); 279 max_bitrate = std::min(codec_.maxBitrate, target_bitrate);
280 target_bitrate = tl0_bitrate; 280 target_bitrate = tl0_bitrate;
281 framerate = -1;
282 } 281 }
283 configurations_[i].rc_target_bitrate = target_bitrate; 282 configurations_[i].rc_target_bitrate = target_bitrate;
284 temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate, 283 temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate,
285 max_bitrate, 284 max_bitrate,
286 framerate, 285 framerate,
287 &configurations_[i]); 286 &configurations_[i]);
288 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { 287 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
289 return WEBRTC_VIDEO_CODEC_ERROR; 288 return WEBRTC_VIDEO_CODEC_ERROR;
290 } 289 }
291 } 290 }
(...skipping 13 matching lines...) Expand all
305 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, 304 void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
306 int num_temporal_layers, 305 int num_temporal_layers,
307 const VideoCodec& codec) { 306 const VideoCodec& codec) {
308 const Config default_options; 307 const Config default_options;
309 const TemporalLayers::Factory& tl_factory = 308 const TemporalLayers::Factory& tl_factory =
310 (codec.extra_options ? codec.extra_options : &default_options) 309 (codec.extra_options ? codec.extra_options : &default_options)
311 ->Get<TemporalLayers::Factory>(); 310 ->Get<TemporalLayers::Factory>();
312 if (num_streams == 1) { 311 if (num_streams == 1) {
313 if (codec.mode == kScreensharing) { 312 if (codec.mode == kScreensharing) {
314 // Special mode when screensharing on a single stream. 313 // Special mode when screensharing on a single stream.
315 temporal_layers_.push_back(new ScreenshareLayers(num_temporal_layers, 314 temporal_layers_.push_back(
316 rand(), 315 new ScreenshareLayers(num_temporal_layers, rand()));
317 &tl0_frame_dropper_,
318 &tl1_frame_dropper_));
319 } else { 316 } else {
320 temporal_layers_.push_back( 317 temporal_layers_.push_back(
321 tl_factory.Create(num_temporal_layers, rand())); 318 tl_factory.Create(num_temporal_layers, rand()));
322 } 319 }
323 } else { 320 } else {
324 for (int i = 0; i < num_streams; ++i) { 321 for (int i = 0; i < num_streams; ++i) {
325 // TODO(andresp): crash if layers is invalid. 322 // TODO(andresp): crash if layers is invalid.
326 int layers = codec.simulcastStream[i].numberOfTemporalLayers; 323 int layers = codec.simulcastStream[i].numberOfTemporalLayers;
327 if (layers < 1) layers = 1; 324 if (layers < 1) layers = 1;
328 temporal_layers_.push_back(tl_factory.Create(layers, rand())); 325 temporal_layers_.push_back(tl_factory.Create(layers, rand()));
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after
663 codec_.codecSpecific.VP8.denoisingOn ? 660 codec_.codecSpecific.VP8.denoisingOn ?
664 denoiser_state : kDenoiserOff); 661 denoiser_state : kDenoiserOff);
665 } 662 }
666 for (size_t i = 0; i < encoders_.size(); ++i) { 663 for (size_t i = 0; i < encoders_.size(); ++i) {
667 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 1); 664 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 1);
668 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); 665 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]);
669 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, 666 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS,
670 static_cast<vp8e_token_partitions>(token_partitions_)); 667 static_cast<vp8e_token_partitions>(token_partitions_));
671 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 668 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
672 rc_max_intra_target_); 669 rc_max_intra_target_);
670 // VP8E_SET_SCREEN_CONTENT_MODE 2 = screen content with more aggressive
671 // rate control (drop frames on large target bitrate overshoot)
673 vpx_codec_control(&(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE, 672 vpx_codec_control(&(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE,
674 codec_.mode == kScreensharing); 673 codec_.mode == kScreensharing ? 2 : 0);
675 } 674 }
676 inited_ = true; 675 inited_ = true;
677 return WEBRTC_VIDEO_CODEC_OK; 676 return WEBRTC_VIDEO_CODEC_OK;
678 } 677 }
679 678
680 uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) { 679 uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
681 // Set max to the optimal buffer level (normalized by target BR), 680 // Set max to the optimal buffer level (normalized by target BR),
682 // and scaled by a scalePar. 681 // and scaled by a scalePar.
683 // Max target size = scalePar * optimalBufferSize * targetBR[Kbps]. 682 // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
684 // This values is presented in percentage of perFrameBw: 683 // This values is presented in percentage of perFrameBw:
685 // perFrameBw = targetBR[Kbps] * 1000 / frameRate. 684 // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
686 // The target in % is as follows: 685 // The target in % is as follows:
687 686
688 float scalePar = 0.5; 687 float scalePar = 0.5;
689 uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10; 688 uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10;
690 689
691 // Don't go below 3 times the per frame bandwidth. 690 // Don't go below 3 times the per frame bandwidth.
692 const uint32_t minIntraTh = 300; 691 const uint32_t minIntraTh = 300;
693 return (targetPct < minIntraTh) ? minIntraTh: targetPct; 692 return (targetPct < minIntraTh) ? minIntraTh: targetPct;
694 } 693 }
695 694
696 int VP8EncoderImpl::Encode(const VideoFrame& frame, 695 int VP8EncoderImpl::Encode(const VideoFrame& frame,
697 const CodecSpecificInfo* codec_specific_info, 696 const CodecSpecificInfo* codec_specific_info,
698 const std::vector<VideoFrameType>* frame_types) { 697 const std::vector<VideoFrameType>* frame_types) {
699 TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp()); 698 TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
700 699
701 if (!inited_) { 700 if (!inited_)
702 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 701 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
703 } 702 if (frame.IsZeroSize())
704 if (frame.IsZeroSize()) {
705 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 703 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
706 } 704 if (encoded_complete_callback_ == NULL)
707 if (encoded_complete_callback_ == NULL) {
708 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 705 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
709 }
710 706
711 // Only apply scaling to improve for single-layer streams. The scaling metrics 707 // Only apply scaling to improve for single-layer streams. The scaling metrics
712 // use frame drops as a signal and is only applicable when we drop frames. 708 // use frame drops as a signal and is only applicable when we drop frames.
713 const bool use_quality_scaler = encoders_.size() == 1 && 709 const bool use_quality_scaler = encoders_.size() == 1 &&
714 configurations_[0].rc_dropframe_thresh > 0 && 710 configurations_[0].rc_dropframe_thresh > 0 &&
715 codec_.codecSpecific.VP8.automaticResizeOn; 711 codec_.codecSpecific.VP8.automaticResizeOn;
716 const VideoFrame& input_image = 712 const VideoFrame& input_image =
717 use_quality_scaler ? quality_scaler_.GetScaledFrame(frame) : frame; 713 use_quality_scaler ? quality_scaler_.GetScaledFrame(frame) : frame;
718 714
719 if (use_quality_scaler && (input_image.width() != codec_.width || 715 if (use_quality_scaler && (input_image.width() != codec_.width ||
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
844 } 840 }
845 } 841 }
846 } 842 }
847 } 843 }
848 // Set the encoder frame flags and temporal layer_id for each spatial stream. 844 // Set the encoder frame flags and temporal layer_id for each spatial stream.
849 // Note that |temporal_layers_| are defined starting from lowest resolution at 845 // Note that |temporal_layers_| are defined starting from lowest resolution at
850 // position 0 to highest resolution at position |encoders_.size() - 1|, 846 // position 0 to highest resolution at position |encoders_.size() - 1|,
851 // whereas |encoder_| is from highest to lowest resolution. 847 // whereas |encoder_| is from highest to lowest resolution.
852 size_t stream_idx = encoders_.size() - 1; 848 size_t stream_idx = encoders_.size() - 1;
853 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { 849 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
850 // Allow the layers adapter to temporarily modify the configuration. This
851 // change isn't stored in configurations_ so change will be discarded at
852 // the next update.
853 vpx_codec_enc_cfg_t temp_config;
854 memcpy(&temp_config, &configurations_[i], sizeof(vpx_codec_enc_cfg_t));
855 if (temporal_layers_[stream_idx]->UpdateConfiguration(&temp_config)) {
856 if (vpx_codec_enc_config_set(&encoders_[i], &temp_config))
857 return WEBRTC_VIDEO_CODEC_ERROR;
858 }
859
854 vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]); 860 vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
855 vpx_codec_control(&encoders_[i], 861 vpx_codec_control(&encoders_[i],
856 VP8E_SET_TEMPORAL_LAYER_ID, 862 VP8E_SET_TEMPORAL_LAYER_ID,
857 temporal_layers_[stream_idx]->CurrentLayerId()); 863 temporal_layers_[stream_idx]->CurrentLayerId());
858 } 864 }
859 // TODO(holmer): Ideally the duration should be the timestamp diff of this 865 // TODO(holmer): Ideally the duration should be the timestamp diff of this
860 // frame and the next frame to be encoded, which we don't have. Instead we 866 // frame and the next frame to be encoded, which we don't have. Instead we
861 // would like to use the duration of the previous frame. Unfortunately the 867 // would like to use the duration of the previous frame. Unfortunately the
862 // rate control seems to be off with that setup. Using the average input 868 // rate control seems to be off with that setup. Using the average input
863 // frame rate to calculate an average duration for now. 869 // frame rate to calculate an average duration for now.
864 assert(codec_.maxFramerate > 0); 870 assert(codec_.maxFramerate > 0);
865 uint32_t duration = 90000 / codec_.maxFramerate; 871 uint32_t duration = 90000 / codec_.maxFramerate;
866 872
867 // Note we must pass 0 for |flags| field in encode call below since they are 873 // Note we must pass 0 for |flags| field in encode call below since they are
868 // set above in |vpx_codec_control| function for each encoder/spatial layer. 874 // set above in |vpx_codec_control| function for each encoder/spatial layer.
869 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, 875 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
870 duration, 0, VPX_DL_REALTIME); 876 duration, 0, VPX_DL_REALTIME);
871 // Reset specific intra frame thresholds, following the key frame. 877 // Reset specific intra frame thresholds, following the key frame.
872 if (send_key_frame) { 878 if (send_key_frame) {
873 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 879 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
874 rc_max_intra_target_); 880 rc_max_intra_target_);
875 } 881 }
876 if (error) { 882 if (error)
877 return WEBRTC_VIDEO_CODEC_ERROR; 883 return WEBRTC_VIDEO_CODEC_ERROR;
878 }
879 timestamp_ += duration; 884 timestamp_ += duration;
880 return GetEncodedPartitions(input_image, only_predict_from_key_frame); 885 return GetEncodedPartitions(input_image, only_predict_from_key_frame);
881 } 886 }
882 887
883 // TODO(pbos): Make sure this works for properly for >1 encoders. 888 // TODO(pbos): Make sure this works for properly for >1 encoders.
884 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { 889 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
885 codec_.width = input_image.width(); 890 codec_.width = input_image.width();
886 codec_.height = input_image.height(); 891 codec_.height = input_image.height();
887 // Update the cpu_speed setting for resolution change. 892 // Update the cpu_speed setting for resolution change.
888 vpx_codec_control(&(encoders_[0]), 893 vpx_codec_control(&(encoders_[0]),
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
926 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, 931 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
927 vp8Info, 932 vp8Info,
928 timestamp); 933 timestamp);
929 // Prepare next. 934 // Prepare next.
930 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; 935 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
931 } 936 }
932 937
933 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, 938 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
934 bool only_predicting_from_key_frame) { 939 bool only_predicting_from_key_frame) {
935 int stream_idx = static_cast<int>(encoders_.size()) - 1; 940 int stream_idx = static_cast<int>(encoders_.size()) - 1;
941 int result = WEBRTC_VIDEO_CODEC_OK;
936 for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); 942 for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
937 ++encoder_idx, --stream_idx) { 943 ++encoder_idx, --stream_idx) {
938 vpx_codec_iter_t iter = NULL; 944 vpx_codec_iter_t iter = NULL;
939 int part_idx = 0; 945 int part_idx = 0;
940 encoded_images_[encoder_idx]._length = 0; 946 encoded_images_[encoder_idx]._length = 0;
941 encoded_images_[encoder_idx]._frameType = kDeltaFrame; 947 encoded_images_[encoder_idx]._frameType = kDeltaFrame;
942 RTPFragmentationHeader frag_info; 948 RTPFragmentationHeader frag_info;
943 // token_partitions_ is number of bits used. 949 // token_partitions_ is number of bits used.
944 frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) 950 frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
945 + 1); 951 + 1);
(...skipping 28 matching lines...) Expand all
974 } 980 }
975 PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, 981 PopulateCodecSpecific(&codec_specific, *pkt, stream_idx,
976 input_image.timestamp(), 982 input_image.timestamp(),
977 only_predicting_from_key_frame); 983 only_predicting_from_key_frame);
978 break; 984 break;
979 } 985 }
980 } 986 }
981 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp(); 987 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
982 encoded_images_[encoder_idx].capture_time_ms_ = 988 encoded_images_[encoder_idx].capture_time_ms_ =
983 input_image.render_time_ms(); 989 input_image.render_time_ms();
990
991 int qp = -1;
992 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
984 temporal_layers_[stream_idx]->FrameEncoded( 993 temporal_layers_[stream_idx]->FrameEncoded(
985 encoded_images_[encoder_idx]._length, 994 encoded_images_[encoder_idx]._length,
986 encoded_images_[encoder_idx]._timeStamp); 995 encoded_images_[encoder_idx]._timeStamp, qp);
987 if (send_stream_[stream_idx]) { 996 if (send_stream_[stream_idx]) {
988 if (encoded_images_[encoder_idx]._length > 0) { 997 if (encoded_images_[encoder_idx]._length > 0) {
989 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx, 998 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
990 encoded_images_[encoder_idx]._length); 999 encoded_images_[encoder_idx]._length);
991 encoded_images_[encoder_idx]._encodedHeight = 1000 encoded_images_[encoder_idx]._encodedHeight =
992 codec_.simulcastStream[stream_idx].height; 1001 codec_.simulcastStream[stream_idx].height;
993 encoded_images_[encoder_idx]._encodedWidth = 1002 encoded_images_[encoder_idx]._encodedWidth =
994 codec_.simulcastStream[stream_idx].width; 1003 codec_.simulcastStream[stream_idx].width;
995 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx], 1004 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx],
996 &codec_specific, &frag_info); 1005 &codec_specific, &frag_info);
1006 } else if (codec_.mode == kScreensharing) {
1007 result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
997 } 1008 }
998 } else { 1009 } else {
999 // Required in case padding is applied to dropped frames. 1010 // Required in case padding is applied to dropped frames.
1000 encoded_images_[encoder_idx]._length = 0; 1011 encoded_images_[encoder_idx]._length = 0;
1001 encoded_images_[encoder_idx]._frameType = kSkipFrame; 1012 encoded_images_[encoder_idx]._frameType = kSkipFrame;
1002 codec_specific.codecType = kVideoCodecVP8; 1013 codec_specific.codecType = kVideoCodecVP8;
1003 CodecSpecificInfoVP8* vp8Info = &(codec_specific.codecSpecific.VP8); 1014 CodecSpecificInfoVP8* vp8Info = &(codec_specific.codecSpecific.VP8);
1004 vp8Info->pictureId = picture_id_[stream_idx]; 1015 vp8Info->pictureId = picture_id_[stream_idx];
1005 vp8Info->simulcastIdx = stream_idx; 1016 vp8Info->simulcastIdx = stream_idx;
1006 vp8Info->keyIdx = kNoKeyIdx; 1017 vp8Info->keyIdx = kNoKeyIdx;
1007 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx], 1018 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx],
1008 &codec_specific, NULL); 1019 &codec_specific, NULL);
1009 } 1020 }
1010 } 1021 }
1011 if (encoders_.size() == 1 && send_stream_[0]) { 1022 if (encoders_.size() == 1 && send_stream_[0]) {
1012 if (encoded_images_[0]._length > 0) { 1023 if (encoded_images_[0]._length > 0) {
1013 int qp; 1024 int qp;
1014 vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER_64, &qp); 1025 vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER_64, &qp);
1015 quality_scaler_.ReportQP(qp); 1026 quality_scaler_.ReportQP(qp);
1016 } else { 1027 } else {
1017 quality_scaler_.ReportDroppedFrame(); 1028 quality_scaler_.ReportDroppedFrame();
1018 } 1029 }
1019 } 1030 }
1020 return WEBRTC_VIDEO_CODEC_OK; 1031 return result;
1021 } 1032 }
1022 1033
1023 int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) { 1034 int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
1024 rps_.SetRtt(rtt); 1035 rps_.SetRtt(rtt);
1025 return WEBRTC_VIDEO_CODEC_OK; 1036 return WEBRTC_VIDEO_CODEC_OK;
1026 } 1037 }
1027 1038
1028 int VP8EncoderImpl::RegisterEncodeCompleteCallback( 1039 int VP8EncoderImpl::RegisterEncodeCompleteCallback(
1029 EncodedImageCallback* callback) { 1040 EncodedImageCallback* callback) {
1030 encoded_complete_callback_ = callback; 1041 encoded_complete_callback_ = callback;
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after
1375 return -1; 1386 return -1;
1376 } 1387 }
1377 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) 1388 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_)
1378 != VPX_CODEC_OK) { 1389 != VPX_CODEC_OK) {
1379 return -1; 1390 return -1;
1380 } 1391 }
1381 return 0; 1392 return 0;
1382 } 1393 }
1383 1394
1384 } // namespace webrtc 1395 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/video_coding/codecs/vp8/temporal_layers.h ('k') | webrtc/modules/video_coding/main/source/generic_encoder.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698