Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 * | 9 * |
| 10 */ | 10 */ |
| 11 | 11 |
| 12 #include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h" | 12 #include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h" |
| 13 | 13 |
| 14 #include <stdlib.h> | 14 #include <stdlib.h> |
| 15 #include <string.h> | 15 #include <string.h> |
| 16 #include <time.h> | 16 #include <time.h> |
| 17 #include <vector> | 17 #include <vector> |
| 18 | 18 |
| 19 #include "vpx/vpx_encoder.h" | 19 #include "vpx/vpx_encoder.h" |
| 20 #include "vpx/vpx_decoder.h" | 20 #include "vpx/vpx_decoder.h" |
| 21 #include "vpx/vp8cx.h" | 21 #include "vpx/vp8cx.h" |
| 22 #include "vpx/vp8dx.h" | 22 #include "vpx/vp8dx.h" |
| 23 | 23 |
| 24 #include "webrtc/base/bind.h" | 24 #include "webrtc/base/bind.h" |
| 25 #include "webrtc/base/checks.h" | 25 #include "webrtc/base/checks.h" |
| 26 #include "webrtc/common.h" | 26 #include "webrtc/common.h" |
| 27 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" | 27 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" |
| 28 #include "webrtc/modules/interface/module_common_types.h" | 28 #include "webrtc/modules/interface/module_common_types.h" |
| 29 #include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h" | |
| 29 #include "webrtc/system_wrappers/interface/logging.h" | 30 #include "webrtc/system_wrappers/interface/logging.h" |
| 30 #include "webrtc/system_wrappers/interface/tick_util.h" | 31 #include "webrtc/system_wrappers/interface/tick_util.h" |
| 31 #include "webrtc/system_wrappers/interface/trace_event.h" | 32 #include "webrtc/system_wrappers/interface/trace_event.h" |
| 32 | 33 |
| 33 namespace { | 34 namespace { |
| 34 | 35 |
| 35 // VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer. | 36 // VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer. |
| 36 static void WrappedI420BufferNoLongerUsedCb( | 37 static void WrappedI420BufferNoLongerUsedCb( |
| 37 webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) { | 38 webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) { |
| 38 img_buffer->Release(); | 39 img_buffer->Release(); |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 69 inited_(false), | 70 inited_(false), |
| 70 timestamp_(0), | 71 timestamp_(0), |
| 71 picture_id_(0), | 72 picture_id_(0), |
| 72 cpu_speed_(3), | 73 cpu_speed_(3), |
| 73 rc_max_intra_target_(0), | 74 rc_max_intra_target_(0), |
| 74 encoder_(NULL), | 75 encoder_(NULL), |
| 75 config_(NULL), | 76 config_(NULL), |
| 76 raw_(NULL), | 77 raw_(NULL), |
| 77 input_image_(NULL), | 78 input_image_(NULL), |
| 78 tl0_pic_idx_(0), | 79 tl0_pic_idx_(0), |
| 79 gof_idx_(0), | 80 frames_since_kf_(0), |
| 80 num_temporal_layers_(0), | 81 num_temporal_layers_(0), |
| 81 num_spatial_layers_(0) { | 82 num_spatial_layers_(0), |
| 83 frames_encoded_(0), | |
| 84 spatial_layer_(new ScreenshareLayersVP9()) { | |
| 82 memset(&codec_, 0, sizeof(codec_)); | 85 memset(&codec_, 0, sizeof(codec_)); |
| 83 uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp()); | 86 uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp()); |
| 84 srand(seed); | 87 srand(seed); |
| 85 } | 88 } |
| 86 | 89 |
| 87 VP9EncoderImpl::~VP9EncoderImpl() { | 90 VP9EncoderImpl::~VP9EncoderImpl() { |
| 88 Release(); | 91 Release(); |
| 89 } | 92 } |
| 90 | 93 |
| 91 int VP9EncoderImpl::Release() { | 94 int VP9EncoderImpl::Release() { |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 171 } | 174 } |
| 172 if (new_framerate < 1) { | 175 if (new_framerate < 1) { |
| 173 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 176 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 174 } | 177 } |
| 175 // Update bit rate | 178 // Update bit rate |
| 176 if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) { | 179 if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) { |
| 177 new_bitrate_kbit = codec_.maxBitrate; | 180 new_bitrate_kbit = codec_.maxBitrate; |
| 178 } | 181 } |
| 179 config_->rc_target_bitrate = new_bitrate_kbit; | 182 config_->rc_target_bitrate = new_bitrate_kbit; |
| 180 codec_.maxFramerate = new_framerate; | 183 codec_.maxFramerate = new_framerate; |
| 184 spatial_layer_->ConfigureBitrate(new_bitrate_kbit); | |
| 181 | 185 |
| 182 if (!SetSvcRates()) { | 186 if (!SetSvcRates()) { |
| 183 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 187 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 184 } | 188 } |
| 185 | 189 |
| 186 // Update encoder context | 190 // Update encoder context |
| 187 if (vpx_codec_enc_config_set(encoder_, config_)) { | 191 if (vpx_codec_enc_config_set(encoder_, config_)) { |
| 188 return WEBRTC_VIDEO_CODEC_ERROR; | 192 return WEBRTC_VIDEO_CODEC_ERROR; |
| 189 } | 193 } |
| 190 return WEBRTC_VIDEO_CODEC_OK; | 194 return WEBRTC_VIDEO_CODEC_OK; |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 209 if (number_of_cores < 1) { | 213 if (number_of_cores < 1) { |
| 210 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 214 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 211 } | 215 } |
| 212 if (inst->codecSpecific.VP9.numberOfTemporalLayers > 3) { | 216 if (inst->codecSpecific.VP9.numberOfTemporalLayers > 3) { |
| 213 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 217 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 214 } | 218 } |
| 215 // libvpx currently supports only one or two spatial layers. | 219 // libvpx currently supports only one or two spatial layers. |
| 216 if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) { | 220 if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) { |
| 217 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 221 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 218 } | 222 } |
| 223 | |
| 219 int retVal = Release(); | 224 int retVal = Release(); |
| 220 if (retVal < 0) { | 225 if (retVal < 0) { |
| 221 return retVal; | 226 return retVal; |
| 222 } | 227 } |
| 223 if (encoder_ == NULL) { | 228 if (encoder_ == NULL) { |
| 224 encoder_ = new vpx_codec_ctx_t; | 229 encoder_ = new vpx_codec_ctx_t; |
| 225 } | 230 } |
| 226 if (config_ == NULL) { | 231 if (config_ == NULL) { |
| 227 config_ = new vpx_codec_enc_cfg_t; | 232 config_ = new vpx_codec_enc_cfg_t; |
| 228 } | 233 } |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 285 } | 290 } |
| 286 // Determine number of threads based on the image size and #cores. | 291 // Determine number of threads based on the image size and #cores. |
| 287 config_->g_threads = NumberOfThreads(config_->g_w, | 292 config_->g_threads = NumberOfThreads(config_->g_w, |
| 288 config_->g_h, | 293 config_->g_h, |
| 289 number_of_cores); | 294 number_of_cores); |
| 290 | 295 |
| 291 cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h); | 296 cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h); |
| 292 | 297 |
| 293 // TODO(asapersson): Check configuration of temporal switch up and increase | 298 // TODO(asapersson): Check configuration of temporal switch up and increase |
| 294 // pattern length. | 299 // pattern length. |
| 295 if (num_temporal_layers_ == 1) { | 300 is_flexible_mode_ = inst->codecSpecific.VP9.flexibleMode; |
| 301 if (is_flexible_mode_) { | |
| 302 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS; | |
| 303 config_->ts_number_layers = num_temporal_layers_; | |
| 304 | |
| 305 if (codec_.mode == kScreensharing) { | |
| 306 spatial_layer_->ConfigureBitrate(inst->startBitrate); | |
| 307 } | |
| 308 } else if (num_temporal_layers_ == 1) { | |
| 296 gof_.SetGofInfoVP9(kTemporalStructureMode1); | 309 gof_.SetGofInfoVP9(kTemporalStructureMode1); |
| 297 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING; | 310 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING; |
| 298 config_->ts_number_layers = 1; | 311 config_->ts_number_layers = 1; |
| 299 config_->ts_rate_decimator[0] = 1; | 312 config_->ts_rate_decimator[0] = 1; |
| 300 config_->ts_periodicity = 1; | 313 config_->ts_periodicity = 1; |
| 301 config_->ts_layer_id[0] = 0; | 314 config_->ts_layer_id[0] = 0; |
| 302 } else if (num_temporal_layers_ == 2) { | 315 } else if (num_temporal_layers_ == 2) { |
| 303 gof_.SetGofInfoVP9(kTemporalStructureMode2); | 316 gof_.SetGofInfoVP9(kTemporalStructureMode2); |
| 304 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101; | 317 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101; |
| 305 config_->ts_number_layers = 2; | 318 config_->ts_number_layers = 2; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 353 int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { | 366 int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { |
| 354 config_->ss_number_layers = num_spatial_layers_; | 367 config_->ss_number_layers = num_spatial_layers_; |
| 355 | 368 |
| 356 int scaling_factor_num = 256; | 369 int scaling_factor_num = 256; |
| 357 for (int i = num_spatial_layers_ - 1; i >= 0; --i) { | 370 for (int i = num_spatial_layers_ - 1; i >= 0; --i) { |
| 358 svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer; | 371 svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer; |
| 359 svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer; | 372 svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer; |
| 360 // 1:2 scaling in each dimension. | 373 // 1:2 scaling in each dimension. |
| 361 svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num; | 374 svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num; |
| 362 svc_internal_.svc_params.scaling_factor_den[i] = 256; | 375 svc_internal_.svc_params.scaling_factor_den[i] = 256; |
| 363 scaling_factor_num /= 2; | 376 if (!is_flexible_mode_) |
|
sprang_webrtc
2015/09/15 15:41:22
This seems very specific. This only works because
philipel
2015/09/16 09:35:54
Yes, this assumption is completely wrong. It shoul
| |
| 377 scaling_factor_num /= 2; | |
| 364 } | 378 } |
| 365 | 379 |
| 366 if (!SetSvcRates()) { | 380 if (!SetSvcRates()) { |
| 367 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 381 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 368 } | 382 } |
| 369 | 383 |
| 370 if (vpx_codec_enc_init(encoder_, vpx_codec_vp9_cx(), config_, 0)) { | 384 if (vpx_codec_enc_init(encoder_, vpx_codec_vp9_cx(), config_, 0)) { |
| 371 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 385 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 372 } | 386 } |
| 373 vpx_codec_control(encoder_, VP8E_SET_CPUUSED, cpu_speed_); | 387 vpx_codec_control(encoder_, VP8E_SET_CPUUSED, cpu_speed_); |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 453 | 467 |
| 454 // Image in vpx_image_t format. | 468 // Image in vpx_image_t format. |
| 455 // Input image is const. VPX's raw image is not defined as const. | 469 // Input image is const. VPX's raw image is not defined as const. |
| 456 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); | 470 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); |
| 457 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); | 471 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); |
| 458 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); | 472 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); |
| 459 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane); | 473 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane); |
| 460 raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); | 474 raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); |
| 461 raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane); | 475 raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane); |
| 462 | 476 |
| 463 int flags = 0; | 477 vpx_enc_frame_flags_t flags = 0; |
| 464 bool send_keyframe = (frame_type == kKeyFrame); | 478 bool send_keyframe = (frame_type == kKeyFrame); |
| 465 if (send_keyframe) { | 479 if (send_keyframe) { |
| 466 // Key frame request from caller. | 480 // Key frame request from caller. |
| 467 flags = VPX_EFLAG_FORCE_KF; | 481 flags = VPX_EFLAG_FORCE_KF; |
| 468 } | 482 } |
| 483 | |
| 484 if (is_flexible_mode_) { | |
| 485 SuperFrameRefSettings settings; | |
| 486 vpx_svc_ref_frame_config enc_layer_conf; | |
| 487 if (codec_.mode == kRealtimeVideo) { | |
| 488 // Real time video not yet implemented in flexible mode. | |
| 489 CHECK(false); | |
|
sprang_webrtc
2015/09/15 15:41:22
RTC_NOTREACHED();
philipel
2015/09/16 09:35:54
Done.
| |
| 490 } else { | |
| 491 settings = | |
| 492 spatial_layer_->SfSettings(input_image.timestamp(), send_keyframe); | |
| 493 } | |
| 494 enc_layer_conf = GenerateRefsAndFlags(settings); | |
| 495 vpx_codec_control(encoder_, VP9E_SET_FIRST_SPATIAL_LAYER, | |
| 496 settings.start_layer); | |
| 497 vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf); | |
| 498 } | |
| 499 | |
| 469 assert(codec_.maxFramerate > 0); | 500 assert(codec_.maxFramerate > 0); |
| 470 uint32_t duration = 90000 / codec_.maxFramerate; | 501 uint32_t duration = 90000 / codec_.maxFramerate; |
| 471 if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, | 502 if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, |
| 472 VPX_DL_REALTIME)) { | 503 VPX_DL_REALTIME)) { |
| 473 return WEBRTC_VIDEO_CODEC_ERROR; | 504 return WEBRTC_VIDEO_CODEC_ERROR; |
| 474 } | 505 } |
| 475 timestamp_ += duration; | 506 timestamp_ += duration; |
| 476 | 507 |
| 477 return WEBRTC_VIDEO_CODEC_OK; | 508 return WEBRTC_VIDEO_CODEC_OK; |
| 478 } | 509 } |
| 479 | 510 |
| 480 void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, | 511 void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, |
| 481 const vpx_codec_cx_pkt& pkt, | 512 const vpx_codec_cx_pkt& pkt, |
| 482 uint32_t timestamp) { | 513 uint32_t timestamp) { |
| 483 assert(codec_specific != NULL); | 514 assert(codec_specific != NULL); |
| 484 codec_specific->codecType = kVideoCodecVP9; | 515 codec_specific->codecType = kVideoCodecVP9; |
| 485 CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9); | 516 CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9); |
| 486 // TODO(asapersson): Set correct values. | 517 // TODO(asapersson): Set correct values. |
| 487 vp9_info->inter_pic_predicted = | 518 vp9_info->inter_pic_predicted = |
| 488 (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true; | 519 (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true; |
| 489 vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode; | 520 vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode; |
| 490 vp9_info->ss_data_available = ((pkt.data.frame.flags & VPX_FRAME_IS_KEY) && | 521 vp9_info->ss_data_available = ((pkt.data.frame.flags & VPX_FRAME_IS_KEY) && |
| 491 !codec_.codecSpecific.VP9.flexibleMode) | 522 !codec_.codecSpecific.VP9.flexibleMode) |
| 492 ? true | 523 ? true |
| 493 : false; | 524 : false; |
| 494 if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) { | 525 if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) |
| 495 gof_idx_ = 0; | 526 frames_since_kf_ = 0; |
| 496 } | |
| 497 | 527 |
| 498 vpx_svc_layer_id_t layer_id = {0}; | 528 vpx_svc_layer_id_t layer_id = {0}; |
| 499 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); | 529 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); |
| 500 | 530 |
| 501 assert(num_temporal_layers_ > 0); | 531 assert(num_temporal_layers_ > 0); |
| 502 assert(num_spatial_layers_ > 0); | 532 assert(num_spatial_layers_ > 0); |
| 503 if (num_temporal_layers_ == 1) { | 533 if (num_temporal_layers_ == 1) { |
| 504 assert(layer_id.temporal_layer_id == 0); | 534 assert(layer_id.temporal_layer_id == 0); |
| 505 vp9_info->temporal_idx = kNoTemporalIdx; | 535 vp9_info->temporal_idx = kNoTemporalIdx; |
| 506 } else { | 536 } else { |
| 507 vp9_info->temporal_idx = layer_id.temporal_layer_id; | 537 vp9_info->temporal_idx = layer_id.temporal_layer_id; |
| 508 } | 538 } |
| 509 if (num_spatial_layers_ == 1) { | 539 if (num_spatial_layers_ == 1) { |
| 510 assert(layer_id.spatial_layer_id == 0); | 540 assert(layer_id.spatial_layer_id == 0); |
| 511 vp9_info->spatial_idx = kNoSpatialIdx; | 541 vp9_info->spatial_idx = kNoSpatialIdx; |
| 512 } else { | 542 } else { |
| 513 vp9_info->spatial_idx = layer_id.spatial_layer_id; | 543 vp9_info->spatial_idx = layer_id.spatial_layer_id; |
| 514 } | 544 } |
| 515 if (layer_id.spatial_layer_id != 0) { | 545 if (layer_id.spatial_layer_id != 0) { |
| 516 vp9_info->ss_data_available = false; | 546 vp9_info->ss_data_available = false; |
| 517 } | 547 } |
| 518 | 548 |
| 519 if (vp9_info->flexible_mode) { | |
| 520 vp9_info->gof_idx = kNoGofIdx; | |
| 521 } else { | |
| 522 vp9_info->gof_idx = | |
| 523 static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof); | |
| 524 } | |
| 525 | |
| 526 // TODO(asapersson): this info has to be obtained from the encoder. | 549 // TODO(asapersson): this info has to be obtained from the encoder. |
| 527 vp9_info->temporal_up_switch = true; | 550 vp9_info->temporal_up_switch = true; |
| 528 | 551 |
| 529 if (layer_id.spatial_layer_id == 0) { | 552 if (layer_id.spatial_layer_id == spatial_layer_->CurrentLayer()) { |
| 530 picture_id_ = (picture_id_ + 1) & 0x7FFF; | 553 picture_id_ = (picture_id_ + 1) & 0x7FFF; |
| 531 // TODO(asapersson): this info has to be obtained from the encoder. | 554 // TODO(asapersson): this info has to be obtained from the encoder. |
| 532 vp9_info->inter_layer_predicted = false; | 555 vp9_info->inter_layer_predicted = false; |
| 533 } else { | 556 } else { |
| 534 // TODO(asapersson): this info has to be obtained from the encoder. | 557 // TODO(asapersson): this info has to be obtained from the encoder. |
| 535 vp9_info->inter_layer_predicted = true; | 558 vp9_info->inter_layer_predicted = true; |
| 536 } | 559 } |
| 537 | 560 |
| 538 vp9_info->picture_id = picture_id_; | 561 vp9_info->picture_id = picture_id_; |
| 539 | 562 |
| 540 if (!vp9_info->flexible_mode) { | 563 if (!vp9_info->flexible_mode) { |
| 541 if (layer_id.temporal_layer_id == 0 && layer_id.spatial_layer_id == 0) { | 564 if (layer_id.temporal_layer_id == 0 && layer_id.spatial_layer_id == 0) { |
| 542 tl0_pic_idx_++; | 565 tl0_pic_idx_++; |
| 543 } | 566 } |
| 544 vp9_info->tl0_pic_idx = tl0_pic_idx_; | 567 vp9_info->tl0_pic_idx = tl0_pic_idx_; |
| 545 } | 568 } |
| 546 | 569 |
| 547 // Always populate this, so that the packetizer can properly set the marker | 570 // Always populate this, so that the packetizer can properly set the marker |
| 548 // bit. | 571 // bit. |
| 549 vp9_info->num_spatial_layers = num_spatial_layers_; | 572 vp9_info->num_spatial_layers = num_spatial_layers_; |
| 573 | |
| 574 vp9_info->num_ref_pics = 0; | |
| 575 if (vp9_info->flexible_mode) { | |
| 576 vp9_info->gof_idx = kNoGofIdx; | |
| 577 if (!(pkt.data.frame.flags & VPX_FRAME_IS_KEY)) { | |
| 578 vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id]; | |
| 579 for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) { | |
| 580 vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i]; | |
| 581 } | |
| 582 } | |
| 583 } else { | |
| 584 vp9_info->gof_idx = | |
| 585 static_cast<uint8_t>(frames_since_kf_ % gof_.num_frames_in_gof); | |
| 586 } | |
| 587 ++frames_since_kf_; | |
| 588 | |
| 550 if (vp9_info->ss_data_available) { | 589 if (vp9_info->ss_data_available) { |
| 551 vp9_info->spatial_layer_resolution_present = true; | 590 vp9_info->spatial_layer_resolution_present = true; |
| 552 for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) { | 591 for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) { |
| 553 vp9_info->width[i] = codec_.width * | 592 vp9_info->width[i] = codec_.width * |
| 554 svc_internal_.svc_params.scaling_factor_num[i] / | 593 svc_internal_.svc_params.scaling_factor_num[i] / |
| 555 svc_internal_.svc_params.scaling_factor_den[i]; | 594 svc_internal_.svc_params.scaling_factor_den[i]; |
| 556 vp9_info->height[i] = codec_.height * | 595 vp9_info->height[i] = codec_.height * |
| 557 svc_internal_.svc_params.scaling_factor_num[i] / | 596 svc_internal_.svc_params.scaling_factor_num[i] / |
| 558 svc_internal_.svc_params.scaling_factor_den[i]; | 597 svc_internal_.svc_params.scaling_factor_den[i]; |
| 559 } | 598 } |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 575 | 614 |
| 576 assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT); | 615 assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT); |
| 577 memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf, | 616 memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf, |
| 578 pkt->data.frame.sz); | 617 pkt->data.frame.sz); |
| 579 frag_info.fragmentationOffset[part_idx] = encoded_image_._length; | 618 frag_info.fragmentationOffset[part_idx] = encoded_image_._length; |
| 580 frag_info.fragmentationLength[part_idx] = | 619 frag_info.fragmentationLength[part_idx] = |
| 581 static_cast<uint32_t>(pkt->data.frame.sz); | 620 static_cast<uint32_t>(pkt->data.frame.sz); |
| 582 frag_info.fragmentationPlType[part_idx] = 0; | 621 frag_info.fragmentationPlType[part_idx] = 0; |
| 583 frag_info.fragmentationTimeDiff[part_idx] = 0; | 622 frag_info.fragmentationTimeDiff[part_idx] = 0; |
| 584 encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz); | 623 encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz); |
| 624 | |
| 625 vpx_svc_layer_id_t layer_id = {0}; | |
| 626 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); | |
| 627 if (is_flexible_mode_ && codec_.mode == kScreensharing) | |
| 628 spatial_layer_->LayerFrameEncoded(encoded_image_._length, | |
| 629 layer_id.spatial_layer_id); | |
| 630 | |
| 585 assert(encoded_image_._length <= encoded_image_._size); | 631 assert(encoded_image_._length <= encoded_image_._size); |
| 586 | 632 |
| 587 // End of frame. | 633 // End of frame. |
| 588 // Check if encoded frame is a key frame. | 634 // Check if encoded frame is a key frame. |
| 589 if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { | 635 if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { |
| 590 encoded_image_._frameType = kKeyFrame; | 636 encoded_image_._frameType = kKeyFrame; |
| 591 } | 637 } |
| 592 PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); | 638 PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); |
| 593 | 639 |
| 594 if (encoded_image_._length > 0) { | 640 if (encoded_image_._length > 0) { |
| 595 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); | 641 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); |
| 596 encoded_image_._timeStamp = input_image_->timestamp(); | 642 encoded_image_._timeStamp = input_image_->timestamp(); |
| 597 encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); | 643 encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); |
| 598 encoded_image_._encodedHeight = raw_->d_h; | 644 encoded_image_._encodedHeight = raw_->d_h; |
| 599 encoded_image_._encodedWidth = raw_->d_w; | 645 encoded_image_._encodedWidth = raw_->d_w; |
| 600 encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, | 646 encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, |
| 601 &frag_info); | 647 &frag_info); |
| 602 } | 648 } |
| 603 return WEBRTC_VIDEO_CODEC_OK; | 649 return WEBRTC_VIDEO_CODEC_OK; |
| 604 } | 650 } |
| 605 | 651 |
| 652 vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags( | |
| 653 const SuperFrameRefSettings& settings) { | |
| 654 static const vpx_enc_frame_flags_t all_flags = | |
| 655 VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_LAST | | |
| 656 VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF; | |
| 657 vpx_svc_ref_frame_config sf_conf; | |
| 658 if (settings.is_keyframe) { | |
| 659 // Used later on to make sure we don't make any invalid references. | |
| 660 memset(buf_upd_at_frame_, -1, sizeof(buf_upd_at_frame_)); | |
| 661 for (int l = settings.start_layer; l <= settings.stop_layer; ++l) { | |
|
sprang_webrtc
2015/09/15 15:41:22
More descriptive names please! layer instead of l?
philipel
2015/09/16 09:35:53
Done.
| |
| 662 buf_upd_at_frame_[settings.layer[l].upd_buf] = frames_encoded_; | |
| 663 sf_conf.lst_fb_idx[l] = settings.layer[l].upd_buf; | |
| 664 } | |
| 665 } else { | |
| 666 for (int l = settings.start_layer; l <= settings.stop_layer; ++l) { | |
| 667 vpx_enc_frame_flags_t layer_flags = all_flags; | |
| 668 num_ref_pics_[l] = 0; | |
| 669 int8_t refs[3] = {settings.layer[l].ref_buf1, settings.layer[l].ref_buf2, | |
| 670 settings.layer[l].ref_buf3}; | |
| 671 | |
| 672 for (unsigned int r = 0; r < kMaxVp9RefPics; ++r) { | |
|
sprang_webrtc
2015/09/15 15:41:22
name
philipel
2015/09/16 09:35:54
Done.
| |
| 673 if (refs[r] == -1) | |
| 674 continue; | |
| 675 | |
| 676 DCHECK_GE(refs[r], 0); | |
| 677 DCHECK_LE(refs[r], 7); | |
| 678 switch (num_ref_pics_[l]) { | |
| 679 case 0: { | |
| 680 sf_conf.lst_fb_idx[l] = refs[r]; | |
| 681 layer_flags ^= VP8_EFLAG_NO_REF_LAST; | |
|
sprang_webrtc
2015/09/15 15:41:22
Not sure I follow why you xor this flag. Comment?
philipel
2015/09/16 09:35:54
If we want to reference the LAST buffer then we sh
| |
| 682 break; | |
| 683 } | |
| 684 case 1: { | |
| 685 sf_conf.gld_fb_idx[l] = refs[r]; | |
| 686 layer_flags ^= VP8_EFLAG_NO_REF_GF; | |
| 687 break; | |
| 688 } | |
| 689 case 2: { | |
| 690 sf_conf.alt_fb_idx[l] = refs[r]; | |
| 691 layer_flags ^= VP8_EFLAG_NO_REF_ARF; | |
| 692 break; | |
| 693 } | |
| 694 } | |
| 695 // Make sure we don't reference a buffer that hasn't been | |
| 696 // used at all or hasn't been used since a keyframe. | |
| 697 DCHECK_NE(buf_upd_at_frame_[refs[r]], -1); | |
| 698 | |
| 699 p_diff_[l][num_ref_pics_[l]] = | |
| 700 frames_encoded_ - buf_upd_at_frame_[refs[r]]; | |
| 701 num_ref_pics_[l]++; | |
| 702 } | |
| 703 | |
| 704 if (settings.layer[l].upd_buf != -1) { | |
| 705 for (unsigned int r = 0; r < kMaxVp9RefPics; ++r) { | |
| 706 if (settings.layer[l].upd_buf == refs[r]) { | |
| 707 switch (r) { | |
| 708 case 0: { | |
| 709 layer_flags ^= VP8_EFLAG_NO_UPD_LAST; | |
| 710 break; | |
| 711 } | |
| 712 case 1: { | |
| 713 layer_flags ^= VP8_EFLAG_NO_UPD_GF; | |
| 714 break; | |
| 715 } | |
| 716 case 2: { | |
| 717 layer_flags ^= VP8_EFLAG_NO_UPD_ARF; | |
| 718 break; | |
| 719 } | |
| 720 } | |
| 721 goto done; | |
|
sprang_webrtc
2015/09/15 15:41:22
Please no goto's!
Use a descriptively named temp
philipel
2015/09/16 09:35:54
Done.
| |
| 722 } | |
| 723 } | |
| 724 // If we have three references and a buffer is specified to be updated, | |
| 725 // then that buffer must be the same as one of the three references. | |
| 726 CHECK_LT(num_ref_pics_[l], kMaxVp9RefPics); | |
| 727 | |
| 728 sf_conf.alt_fb_idx[l] = settings.layer[l].upd_buf; | |
| 729 layer_flags ^= VP8_EFLAG_NO_UPD_ARF; | |
| 730 | |
| 731 done: | |
| 732 buf_upd_at_frame_[settings.layer[l].upd_buf] = frames_encoded_; | |
| 733 sf_conf.frame_flags[l] = layer_flags; | |
| 734 } | |
| 735 } | |
| 736 } | |
| 737 frames_encoded_++; | |
| 738 return sf_conf; | |
| 739 } | |
| 740 | |
| 606 int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) { | 741 int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) { |
| 607 return WEBRTC_VIDEO_CODEC_OK; | 742 return WEBRTC_VIDEO_CODEC_OK; |
| 608 } | 743 } |
| 609 | 744 |
| 610 int VP9EncoderImpl::RegisterEncodeCompleteCallback( | 745 int VP9EncoderImpl::RegisterEncodeCompleteCallback( |
| 611 EncodedImageCallback* callback) { | 746 EncodedImageCallback* callback) { |
| 612 encoded_complete_callback_ = callback; | 747 encoded_complete_callback_ = callback; |
| 613 return WEBRTC_VIDEO_CODEC_OK; | 748 return WEBRTC_VIDEO_CODEC_OK; |
| 614 } | 749 } |
| 615 | 750 |
| (...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 780 decoder_ = NULL; | 915 decoder_ = NULL; |
| 781 } | 916 } |
| 782 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers | 917 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers |
| 783 // still referenced externally are deleted once fully released, not returning | 918 // still referenced externally are deleted once fully released, not returning |
| 784 // to the pool. | 919 // to the pool. |
| 785 frame_buffer_pool_.ClearPool(); | 920 frame_buffer_pool_.ClearPool(); |
| 786 inited_ = false; | 921 inited_ = false; |
| 787 return WEBRTC_VIDEO_CODEC_OK; | 922 return WEBRTC_VIDEO_CODEC_OK; |
| 788 } | 923 } |
| 789 } // namespace webrtc | 924 } // namespace webrtc |
| OLD | NEW |