| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 * | 9 * |
| 10 */ | 10 */ |
| (...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 425 const std::vector<FrameType>* frame_types) { | 425 const std::vector<FrameType>* frame_types) { |
| 426 if (!inited_) { | 426 if (!inited_) { |
| 427 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 427 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 428 } | 428 } |
| 429 if (input_image.IsZeroSize()) { | 429 if (input_image.IsZeroSize()) { |
| 430 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 430 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 431 } | 431 } |
| 432 if (encoded_complete_callback_ == NULL) { | 432 if (encoded_complete_callback_ == NULL) { |
| 433 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 433 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 434 } | 434 } |
| 435 FrameType frame_type = kDeltaFrame; | 435 FrameType frame_type = kVideoFrameDelta; |
| 436 // We only support one stream at the moment. | 436 // We only support one stream at the moment. |
| 437 if (frame_types && frame_types->size() > 0) { | 437 if (frame_types && frame_types->size() > 0) { |
| 438 frame_type = (*frame_types)[0]; | 438 frame_type = (*frame_types)[0]; |
| 439 } | 439 } |
| 440 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w)); | 440 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w)); |
| 441 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h)); | 441 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h)); |
| 442 | 442 |
| 443 // Set input image for use in the callback. | 443 // Set input image for use in the callback. |
| 444 // This was necessary since you need some information from input_image. | 444 // This was necessary since you need some information from input_image. |
| 445 // You can save only the necessary information (such as timestamp) instead of | 445 // You can save only the necessary information (such as timestamp) instead of |
| 446 // doing this. | 446 // doing this. |
| 447 input_image_ = &input_image; | 447 input_image_ = &input_image; |
| 448 | 448 |
| 449 // Image in vpx_image_t format. | 449 // Image in vpx_image_t format. |
| 450 // Input image is const. VPX's raw image is not defined as const. | 450 // Input image is const. VPX's raw image is not defined as const. |
| 451 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); | 451 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); |
| 452 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); | 452 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); |
| 453 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); | 453 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); |
| 454 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane); | 454 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane); |
| 455 raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); | 455 raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); |
| 456 raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane); | 456 raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane); |
| 457 | 457 |
| 458 int flags = 0; | 458 int flags = 0; |
| 459 bool send_keyframe = (frame_type == kKeyFrame); | 459 bool send_keyframe = (frame_type == kVideoFrameKey); |
| 460 if (send_keyframe) { | 460 if (send_keyframe) { |
| 461 // Key frame request from caller. | 461 // Key frame request from caller. |
| 462 flags = VPX_EFLAG_FORCE_KF; | 462 flags = VPX_EFLAG_FORCE_KF; |
| 463 } | 463 } |
| 464 assert(codec_.maxFramerate > 0); | 464 assert(codec_.maxFramerate > 0); |
| 465 uint32_t duration = 90000 / codec_.maxFramerate; | 465 uint32_t duration = 90000 / codec_.maxFramerate; |
| 466 if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, | 466 if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, |
| 467 VPX_DL_REALTIME)) { | 467 VPX_DL_REALTIME)) { |
| 468 return WEBRTC_VIDEO_CODEC_ERROR; | 468 return WEBRTC_VIDEO_CODEC_ERROR; |
| 469 } | 469 } |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 553 svc_internal_.svc_params.scaling_factor_den[i]; | 553 svc_internal_.svc_params.scaling_factor_den[i]; |
| 554 } | 554 } |
| 555 if (!vp9_info->flexible_mode) { | 555 if (!vp9_info->flexible_mode) { |
| 556 vp9_info->gof.CopyGofInfoVP9(gof_); | 556 vp9_info->gof.CopyGofInfoVP9(gof_); |
| 557 } | 557 } |
| 558 } | 558 } |
| 559 } | 559 } |
| 560 | 560 |
| 561 int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { | 561 int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { |
| 562 encoded_image_._length = 0; | 562 encoded_image_._length = 0; |
| 563 encoded_image_._frameType = kDeltaFrame; | 563 encoded_image_._frameType = kVideoFrameDelta; |
| 564 RTPFragmentationHeader frag_info; | 564 RTPFragmentationHeader frag_info; |
| 565 // Note: no data partitioning in VP9, so 1 partition only. We keep this | 565 // Note: no data partitioning in VP9, so 1 partition only. We keep this |
| 566 // fragmentation data for now, until VP9 packetizer is implemented. | 566 // fragmentation data for now, until VP9 packetizer is implemented. |
| 567 frag_info.VerifyAndAllocateFragmentationHeader(1); | 567 frag_info.VerifyAndAllocateFragmentationHeader(1); |
| 568 int part_idx = 0; | 568 int part_idx = 0; |
| 569 CodecSpecificInfo codec_specific; | 569 CodecSpecificInfo codec_specific; |
| 570 | 570 |
| 571 assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT); | 571 assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT); |
| 572 memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf, | 572 memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf, |
| 573 pkt->data.frame.sz); | 573 pkt->data.frame.sz); |
| 574 frag_info.fragmentationOffset[part_idx] = encoded_image_._length; | 574 frag_info.fragmentationOffset[part_idx] = encoded_image_._length; |
| 575 frag_info.fragmentationLength[part_idx] = | 575 frag_info.fragmentationLength[part_idx] = |
| 576 static_cast<uint32_t>(pkt->data.frame.sz); | 576 static_cast<uint32_t>(pkt->data.frame.sz); |
| 577 frag_info.fragmentationPlType[part_idx] = 0; | 577 frag_info.fragmentationPlType[part_idx] = 0; |
| 578 frag_info.fragmentationTimeDiff[part_idx] = 0; | 578 frag_info.fragmentationTimeDiff[part_idx] = 0; |
| 579 encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz); | 579 encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz); |
| 580 assert(encoded_image_._length <= encoded_image_._size); | 580 assert(encoded_image_._length <= encoded_image_._size); |
| 581 | 581 |
| 582 // End of frame. | 582 // End of frame. |
| 583 // Check if encoded frame is a key frame. | 583 // Check if encoded frame is a key frame. |
| 584 if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { | 584 if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { |
| 585 encoded_image_._frameType = kKeyFrame; | 585 encoded_image_._frameType = kVideoFrameKey; |
| 586 } | 586 } |
| 587 PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); | 587 PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); |
| 588 | 588 |
| 589 if (encoded_image_._length > 0) { | 589 if (encoded_image_._length > 0) { |
| 590 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); | 590 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); |
| 591 encoded_image_._timeStamp = input_image_->timestamp(); | 591 encoded_image_._timeStamp = input_image_->timestamp(); |
| 592 encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); | 592 encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); |
| 593 encoded_image_._encodedHeight = raw_->d_h; | 593 encoded_image_._encodedHeight = raw_->d_h; |
| 594 encoded_image_._encodedWidth = raw_->d_w; | 594 encoded_image_._encodedWidth = raw_->d_w; |
| 595 encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, | 595 encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 681 const CodecSpecificInfo* codec_specific_info, | 681 const CodecSpecificInfo* codec_specific_info, |
| 682 int64_t /*render_time_ms*/) { | 682 int64_t /*render_time_ms*/) { |
| 683 if (!inited_) { | 683 if (!inited_) { |
| 684 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 684 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 685 } | 685 } |
| 686 if (decode_complete_callback_ == NULL) { | 686 if (decode_complete_callback_ == NULL) { |
| 687 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 687 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 688 } | 688 } |
| 689 // Always start with a complete key frame. | 689 // Always start with a complete key frame. |
| 690 if (key_frame_required_) { | 690 if (key_frame_required_) { |
| 691 if (input_image._frameType != kKeyFrame) | 691 if (input_image._frameType != kVideoFrameKey) |
| 692 return WEBRTC_VIDEO_CODEC_ERROR; | 692 return WEBRTC_VIDEO_CODEC_ERROR; |
| 693 // We have a key frame - is it complete? | 693 // We have a key frame - is it complete? |
| 694 if (input_image._completeFrame) { | 694 if (input_image._completeFrame) { |
| 695 key_frame_required_ = false; | 695 key_frame_required_ = false; |
| 696 } else { | 696 } else { |
| 697 return WEBRTC_VIDEO_CODEC_ERROR; | 697 return WEBRTC_VIDEO_CODEC_ERROR; |
| 698 } | 698 } |
| 699 } | 699 } |
| 700 vpx_codec_iter_t iter = NULL; | 700 vpx_codec_iter_t iter = NULL; |
| 701 vpx_image_t* img; | 701 vpx_image_t* img; |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 775 decoder_ = NULL; | 775 decoder_ = NULL; |
| 776 } | 776 } |
| 777 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers | 777 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers |
| 778 // still referenced externally are deleted once fully released, not returning | 778 // still referenced externally are deleted once fully released, not returning |
| 779 // to the pool. | 779 // to the pool. |
| 780 frame_buffer_pool_.ClearPool(); | 780 frame_buffer_pool_.ClearPool(); |
| 781 inited_ = false; | 781 inited_ = false; |
| 782 return WEBRTC_VIDEO_CODEC_OK; | 782 return WEBRTC_VIDEO_CODEC_OK; |
| 783 } | 783 } |
| 784 } // namespace webrtc | 784 } // namespace webrtc |
| OLD | NEW |