OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
271 // interpret the startBitrate as the encoder target bitrate. This is | 271 // interpret the startBitrate as the encoder target bitrate. This is |
272 // to allow for a different max bitrate, so if the codec can't meet | 272 // to allow for a different max bitrate, so if the codec can't meet |
273 // the target we still allow it to overshoot up to the max before dropping | 273 // the target we still allow it to overshoot up to the max before dropping |
274 // frames. This hack should be improved. | 274 // frames. This hack should be improved. |
275 if (codec_.targetBitrate > 0 && | 275 if (codec_.targetBitrate > 0 && |
276 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || | 276 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || |
277 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { | 277 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { |
278 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); | 278 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); |
279 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); | 279 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); |
280 target_bitrate = tl0_bitrate; | 280 target_bitrate = tl0_bitrate; |
281 framerate = -1; | |
282 } | 281 } |
283 configurations_[i].rc_target_bitrate = target_bitrate; | 282 configurations_[i].rc_target_bitrate = target_bitrate; |
284 temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate, | 283 temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate, |
285 max_bitrate, | 284 max_bitrate, |
286 framerate, | 285 framerate, |
287 &configurations_[i]); | 286 &configurations_[i]); |
288 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { | 287 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { |
289 return WEBRTC_VIDEO_CODEC_ERROR; | 288 return WEBRTC_VIDEO_CODEC_ERROR; |
290 } | 289 } |
291 } | 290 } |
(...skipping 13 matching lines...) Expand all Loading... | |
305 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, | 304 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, |
306 int num_temporal_layers, | 305 int num_temporal_layers, |
307 const VideoCodec& codec) { | 306 const VideoCodec& codec) { |
308 const Config default_options; | 307 const Config default_options; |
309 const TemporalLayers::Factory& tl_factory = | 308 const TemporalLayers::Factory& tl_factory = |
310 (codec.extra_options ? codec.extra_options : &default_options) | 309 (codec.extra_options ? codec.extra_options : &default_options) |
311 ->Get<TemporalLayers::Factory>(); | 310 ->Get<TemporalLayers::Factory>(); |
312 if (num_streams == 1) { | 311 if (num_streams == 1) { |
313 if (codec.mode == kScreensharing) { | 312 if (codec.mode == kScreensharing) { |
314 // Special mode when screensharing on a single stream. | 313 // Special mode when screensharing on a single stream. |
315 temporal_layers_.push_back(new ScreenshareLayers(num_temporal_layers, | 314 temporal_layers_.push_back( |
316 rand(), | 315 new ScreenshareLayers(num_temporal_layers, rand())); |
317 &tl0_frame_dropper_, | |
318 &tl1_frame_dropper_)); | |
319 } else { | 316 } else { |
320 temporal_layers_.push_back( | 317 temporal_layers_.push_back( |
321 tl_factory.Create(num_temporal_layers, rand())); | 318 tl_factory.Create(num_temporal_layers, rand())); |
322 } | 319 } |
323 } else { | 320 } else { |
324 for (int i = 0; i < num_streams; ++i) { | 321 for (int i = 0; i < num_streams; ++i) { |
325 // TODO(andresp): crash if layers is invalid. | 322 // TODO(andresp): crash if layers is invalid. |
326 int layers = codec.simulcastStream[i].numberOfTemporalLayers; | 323 int layers = codec.simulcastStream[i].numberOfTemporalLayers; |
327 if (layers < 1) layers = 1; | 324 if (layers < 1) layers = 1; |
328 temporal_layers_.push_back(tl_factory.Create(layers, rand())); | 325 temporal_layers_.push_back(tl_factory.Create(layers, rand())); |
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
664 denoiser_state : kDenoiserOff); | 661 denoiser_state : kDenoiserOff); |
665 } | 662 } |
666 for (size_t i = 0; i < encoders_.size(); ++i) { | 663 for (size_t i = 0; i < encoders_.size(); ++i) { |
667 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 1); | 664 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 1); |
668 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); | 665 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); |
669 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, | 666 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, |
670 static_cast<vp8e_token_partitions>(token_partitions_)); | 667 static_cast<vp8e_token_partitions>(token_partitions_)); |
671 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 668 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, |
672 rc_max_intra_target_); | 669 rc_max_intra_target_); |
673 vpx_codec_control(&(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE, | 670 vpx_codec_control(&(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE, |
674 codec_.mode == kScreensharing); | 671 codec_.mode == kScreensharing ? 2 : 0); |
stefan-webrtc
2015/06/18 09:39:02
What does 2 mean?
sprang_webrtc
2015/06/18 13:05:02
Commented
stefan-webrtc
2015/06/23 14:28:32
Still doesn't say what 2 means
sprang_webrtc
2015/06/23 15:07:30
You mean you miss the actual 2? :)
| |
675 } | 672 } |
676 inited_ = true; | 673 inited_ = true; |
677 return WEBRTC_VIDEO_CODEC_OK; | 674 return WEBRTC_VIDEO_CODEC_OK; |
678 } | 675 } |
679 | 676 |
680 uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) { | 677 uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) { |
681 // Set max to the optimal buffer level (normalized by target BR), | 678 // Set max to the optimal buffer level (normalized by target BR), |
682 // and scaled by a scalePar. | 679 // and scaled by a scalePar. |
683 // Max target size = scalePar * optimalBufferSize * targetBR[Kbps]. | 680 // Max target size = scalePar * optimalBufferSize * targetBR[Kbps]. |
684 // This values is presented in percentage of perFrameBw: | 681 // This values is presented in percentage of perFrameBw: |
685 // perFrameBw = targetBR[Kbps] * 1000 / frameRate. | 682 // perFrameBw = targetBR[Kbps] * 1000 / frameRate. |
686 // The target in % is as follows: | 683 // The target in % is as follows: |
687 | 684 |
688 float scalePar = 0.5; | 685 float scalePar = 0.5; |
689 uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10; | 686 uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10; |
690 | 687 |
691 // Don't go below 3 times the per frame bandwidth. | 688 // Don't go below 3 times the per frame bandwidth. |
692 const uint32_t minIntraTh = 300; | 689 const uint32_t minIntraTh = 300; |
693 return (targetPct < minIntraTh) ? minIntraTh: targetPct; | 690 return (targetPct < minIntraTh) ? minIntraTh: targetPct; |
694 } | 691 } |
695 | 692 |
696 int VP8EncoderImpl::Encode(const VideoFrame& frame, | 693 int VP8EncoderImpl::Encode(const VideoFrame& frame, |
697 const CodecSpecificInfo* codec_specific_info, | 694 const CodecSpecificInfo* codec_specific_info, |
698 const std::vector<VideoFrameType>* frame_types) { | 695 const std::vector<VideoFrameType>* frame_types) { |
699 TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp()); | 696 TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp()); |
700 | 697 |
701 if (!inited_) { | 698 if (!inited_) |
702 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 699 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
703 } | 700 if (frame.IsZeroSize()) |
704 if (frame.IsZeroSize()) { | |
705 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 701 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
706 } | 702 if (encoded_complete_callback_ == NULL) |
707 if (encoded_complete_callback_ == NULL) { | |
708 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 703 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
709 } | |
710 | 704 |
711 // Only apply scaling to improve for single-layer streams. The scaling metrics | 705 // Only apply scaling to improve for single-layer streams. The scaling metrics |
712 // use frame drops as a signal and is only applicable when we drop frames. | 706 // use frame drops as a signal and is only applicable when we drop frames. |
713 const bool use_quality_scaler = encoders_.size() == 1 && | 707 const bool use_quality_scaler = encoders_.size() == 1 && |
714 configurations_[0].rc_dropframe_thresh > 0 && | 708 configurations_[0].rc_dropframe_thresh > 0 && |
715 codec_.codecSpecific.VP8.automaticResizeOn; | 709 codec_.codecSpecific.VP8.automaticResizeOn; |
716 const VideoFrame& input_image = | 710 const VideoFrame& input_image = |
717 use_quality_scaler ? quality_scaler_.GetScaledFrame(frame) : frame; | 711 use_quality_scaler ? quality_scaler_.GetScaledFrame(frame) : frame; |
718 | 712 |
719 if (use_quality_scaler && (input_image.width() != codec_.width || | 713 if (use_quality_scaler && (input_image.width() != codec_.width || |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
844 } | 838 } |
845 } | 839 } |
846 } | 840 } |
847 } | 841 } |
848 // Set the encoder frame flags and temporal layer_id for each spatial stream. | 842 // Set the encoder frame flags and temporal layer_id for each spatial stream. |
849 // Note that |temporal_layers_| are defined starting from lowest resolution at | 843 // Note that |temporal_layers_| are defined starting from lowest resolution at |
850 // position 0 to highest resolution at position |encoders_.size() - 1|, | 844 // position 0 to highest resolution at position |encoders_.size() - 1|, |
851 // whereas |encoder_| is from highest to lowest resolution. | 845 // whereas |encoder_| is from highest to lowest resolution. |
852 size_t stream_idx = encoders_.size() - 1; | 846 size_t stream_idx = encoders_.size() - 1; |
853 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { | 847 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { |
848 vpx_codec_enc_cfg_t temp_config; | |
stefan-webrtc
2015/06/18 09:39:02
What is temporary with this config? Isn't it a scr
sprang_webrtc
2015/06/18 13:05:02
It's not stored in configurations_ and will be res
| |
849 memcpy(&temp_config, &configurations_[i], sizeof(vpx_codec_enc_cfg_t)); | |
850 if (temporal_layers_[stream_idx]->UpdateConfiguration(&temp_config)) { | |
851 if (vpx_codec_enc_config_set(&encoders_[i], &temp_config)) | |
852 return WEBRTC_VIDEO_CODEC_ERROR; | |
853 } | |
854 | |
854 vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]); | 855 vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]); |
855 vpx_codec_control(&encoders_[i], | 856 vpx_codec_control(&encoders_[i], |
856 VP8E_SET_TEMPORAL_LAYER_ID, | 857 VP8E_SET_TEMPORAL_LAYER_ID, |
857 temporal_layers_[stream_idx]->CurrentLayerId()); | 858 temporal_layers_[stream_idx]->CurrentLayerId()); |
858 } | 859 } |
859 // TODO(holmer): Ideally the duration should be the timestamp diff of this | 860 // TODO(holmer): Ideally the duration should be the timestamp diff of this |
860 // frame and the next frame to be encoded, which we don't have. Instead we | 861 // frame and the next frame to be encoded, which we don't have. Instead we |
861 // would like to use the duration of the previous frame. Unfortunately the | 862 // would like to use the duration of the previous frame. Unfortunately the |
862 // rate control seems to be off with that setup. Using the average input | 863 // rate control seems to be off with that setup. Using the average input |
863 // frame rate to calculate an average duration for now. | 864 // frame rate to calculate an average duration for now. |
864 assert(codec_.maxFramerate > 0); | 865 assert(codec_.maxFramerate > 0); |
865 uint32_t duration = 90000 / codec_.maxFramerate; | 866 uint32_t duration = 90000 / codec_.maxFramerate; |
866 | 867 |
867 // Note we must pass 0 for |flags| field in encode call below since they are | 868 // Note we must pass 0 for |flags| field in encode call below since they are |
868 // set above in |vpx_codec_control| function for each encoder/spatial layer. | 869 // set above in |vpx_codec_control| function for each encoder/spatial layer. |
869 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, | 870 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, |
870 duration, 0, VPX_DL_REALTIME); | 871 duration, 0, VPX_DL_REALTIME); |
871 // Reset specific intra frame thresholds, following the key frame. | 872 // Reset specific intra frame thresholds, following the key frame. |
872 if (send_key_frame) { | 873 if (send_key_frame) { |
873 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 874 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, |
874 rc_max_intra_target_); | 875 rc_max_intra_target_); |
875 } | 876 } |
876 if (error) { | 877 if (error) |
877 return WEBRTC_VIDEO_CODEC_ERROR; | 878 return WEBRTC_VIDEO_CODEC_ERROR; |
878 } | |
879 timestamp_ += duration; | 879 timestamp_ += duration; |
880 return GetEncodedPartitions(input_image, only_predict_from_key_frame); | 880 return GetEncodedPartitions(input_image, only_predict_from_key_frame); |
881 } | 881 } |
882 | 882 |
883 // TODO(pbos): Make sure this works for properly for >1 encoders. | 883 // TODO(pbos): Make sure this works for properly for >1 encoders. |
884 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { | 884 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { |
885 codec_.width = input_image.width(); | 885 codec_.width = input_image.width(); |
886 codec_.height = input_image.height(); | 886 codec_.height = input_image.height(); |
887 // Update the cpu_speed setting for resolution change. | 887 // Update the cpu_speed setting for resolution change. |
888 vpx_codec_control(&(encoders_[0]), | 888 vpx_codec_control(&(encoders_[0]), |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
974 } | 974 } |
975 PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, | 975 PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, |
976 input_image.timestamp(), | 976 input_image.timestamp(), |
977 only_predicting_from_key_frame); | 977 only_predicting_from_key_frame); |
978 break; | 978 break; |
979 } | 979 } |
980 } | 980 } |
981 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp(); | 981 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp(); |
982 encoded_images_[encoder_idx].capture_time_ms_ = | 982 encoded_images_[encoder_idx].capture_time_ms_ = |
983 input_image.render_time_ms(); | 983 input_image.render_time_ms(); |
984 | |
985 int qp = -1; | |
986 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp); | |
984 temporal_layers_[stream_idx]->FrameEncoded( | 987 temporal_layers_[stream_idx]->FrameEncoded( |
985 encoded_images_[encoder_idx]._length, | 988 encoded_images_[encoder_idx]._length, |
986 encoded_images_[encoder_idx]._timeStamp); | 989 encoded_images_[encoder_idx]._timeStamp, qp); |
987 if (send_stream_[stream_idx]) { | 990 if (send_stream_[stream_idx]) { |
988 if (encoded_images_[encoder_idx]._length > 0) { | 991 if (encoded_images_[encoder_idx]._length > 0) { |
989 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx, | 992 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx, |
990 encoded_images_[encoder_idx]._length); | 993 encoded_images_[encoder_idx]._length); |
991 encoded_images_[encoder_idx]._encodedHeight = | 994 encoded_images_[encoder_idx]._encodedHeight = |
992 codec_.simulcastStream[stream_idx].height; | 995 codec_.simulcastStream[stream_idx].height; |
993 encoded_images_[encoder_idx]._encodedWidth = | 996 encoded_images_[encoder_idx]._encodedWidth = |
994 codec_.simulcastStream[stream_idx].width; | 997 codec_.simulcastStream[stream_idx].width; |
995 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx], | 998 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx], |
996 &codec_specific, &frag_info); | 999 &codec_specific, &frag_info); |
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1375 return -1; | 1378 return -1; |
1376 } | 1379 } |
1377 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) | 1380 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) |
1378 != VPX_CODEC_OK) { | 1381 != VPX_CODEC_OK) { |
1379 return -1; | 1382 return -1; |
1380 } | 1383 } |
1381 return 0; | 1384 return 0; |
1382 } | 1385 } |
1383 | 1386 |
1384 } // namespace webrtc | 1387 } // namespace webrtc |
OLD | NEW |