OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include "modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h" |
| 12 |
| 13 #include "common_video/include/video_frame.h" |
| 14 #include "common_video/include/video_frame_buffer.h" |
| 15 #include "common_video/libyuv/include/webrtc_libyuv.h" |
| 16 #include "modules/include/module_common_types.h" |
| 17 #include "rtc_base/keep_ref_until_done.h" |
| 18 #include "rtc_base/logging.h" |
| 19 |
| 20 namespace webrtc { |
| 21 |
| 22 class StereoEncoderAdapter::AdapterEncodedImageCallback |
| 23 : public webrtc::EncodedImageCallback { |
| 24 public: |
| 25 AdapterEncodedImageCallback(webrtc::StereoEncoderAdapter* adapter, |
| 26 StereoCodecStream stream_idx) |
| 27 : adapter_(adapter), stream_idx_(stream_idx) {} |
| 28 |
| 29 EncodedImageCallback::Result OnEncodedImage( |
| 30 const EncodedImage& encoded_image, |
| 31 const CodecSpecificInfo* codec_specific_info, |
| 32 const RTPFragmentationHeader* fragmentation) override { |
| 33 if (!adapter_) |
| 34 return Result(Result::OK); |
| 35 return adapter_->OnEncodedImage(stream_idx_, encoded_image, |
| 36 codec_specific_info, fragmentation); |
| 37 } |
| 38 |
| 39 private: |
| 40 StereoEncoderAdapter* adapter_; |
| 41 const StereoCodecStream stream_idx_; |
| 42 }; |
| 43 |
| 44 struct StereoEncoderAdapter::EncodedImageData { |
| 45 explicit EncodedImageData(StereoCodecStream stream_idx) |
| 46 : stream_idx_(stream_idx) { |
| 47 RTC_DCHECK_EQ(kAXXStream, stream_idx); |
| 48 encodedImage_._length = 0; |
| 49 } |
| 50 EncodedImageData(StereoCodecStream stream_idx, |
| 51 const EncodedImage& encodedImage, |
| 52 const CodecSpecificInfo* codecSpecificInfo, |
| 53 const RTPFragmentationHeader* fragmentation) |
| 54 : stream_idx_(stream_idx), |
| 55 encodedImage_(encodedImage), |
| 56 codecSpecificInfo_(*codecSpecificInfo) { |
| 57 fragmentation_.CopyFrom(*fragmentation); |
| 58 } |
| 59 const StereoCodecStream stream_idx_; |
| 60 EncodedImage encodedImage_; |
| 61 const CodecSpecificInfo codecSpecificInfo_; |
| 62 RTPFragmentationHeader fragmentation_; |
| 63 |
| 64 private: |
| 65 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedImageData); |
| 66 }; |
| 67 |
| 68 StereoEncoderAdapter::StereoEncoderAdapter(VideoEncoderFactoryEx* factory) |
| 69 : factory_(factory), encoded_complete_callback_(nullptr) {} |
| 70 |
| 71 StereoEncoderAdapter::~StereoEncoderAdapter() { |
| 72 Release(); |
| 73 } |
| 74 |
| 75 int StereoEncoderAdapter::InitEncode(const VideoCodec* inst, |
| 76 int number_of_cores, |
| 77 size_t max_payload_size) { |
| 78 const size_t buffer_size = |
| 79 CalcBufferSize(VideoType::kI420, inst->width, inst->height); |
| 80 stereo_dummy_planes_.resize(buffer_size); |
| 81 // It is more expensive to encode 0x00, so use 0x80 instead. |
| 82 std::fill(stereo_dummy_planes_.begin(), stereo_dummy_planes_.end(), 0x80); |
| 83 |
| 84 for (size_t i = 0; i < kStereoCodecStreams; ++i) { |
| 85 VideoEncoder* encoder = factory_->Create(); |
| 86 const int rv = encoder->InitEncode(inst, number_of_cores, max_payload_size); |
| 87 if (rv) |
| 88 return rv; |
| 89 encoders_.push_back(encoder); |
| 90 adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback( |
| 91 this, static_cast<StereoCodecStream>(i))); |
| 92 encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get()); |
| 93 } |
| 94 return WEBRTC_VIDEO_CODEC_OK; |
| 95 } |
| 96 |
| 97 int StereoEncoderAdapter::Encode(const VideoFrame& input_image, |
| 98 const CodecSpecificInfo* codec_specific_info, |
| 99 const std::vector<FrameType>* frame_types) { |
| 100 if (!encoded_complete_callback_) { |
| 101 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 102 } |
| 103 |
| 104 // Encode AXX |
| 105 rtc::scoped_refptr<I420BufferInterface> yuva_buffer = |
| 106 input_image.video_frame_buffer()->ToI420(); |
| 107 if (yuva_buffer->HasAlpha()) { |
| 108 rtc::scoped_refptr<WrappedI420Buffer> alpha_buffer( |
| 109 new rtc::RefCountedObject<webrtc::WrappedI420Buffer>( |
| 110 input_image.width(), input_image.height(), yuva_buffer->DataA(), |
| 111 yuva_buffer->StrideA(), stereo_dummy_planes_.data(), |
| 112 yuva_buffer->StrideU(), stereo_dummy_planes_.data(), |
| 113 yuva_buffer->StrideV(), |
| 114 rtc::KeepRefUntilDone(input_image.video_frame_buffer()))); |
| 115 VideoFrame alpha_image(alpha_buffer, input_image.timestamp(), |
| 116 input_image.render_time_ms(), |
| 117 input_image.rotation()); |
| 118 encoders_[kAXXStream]->Encode(alpha_image, codec_specific_info, |
| 119 frame_types); |
| 120 frame_count_.emplace(input_image.timestamp(), 2); |
| 121 } else { |
| 122 RTC_DCHECK(frame_count_.find(input_image.timestamp()) == |
| 123 frame_count_.end()); |
| 124 frame_count_.emplace(input_image.timestamp(), 1); |
| 125 } |
| 126 |
| 127 // Encode YUV |
| 128 int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info, |
| 129 frame_types); |
| 130 return rv; |
| 131 } |
| 132 |
| 133 int StereoEncoderAdapter::RegisterEncodeCompleteCallback( |
| 134 EncodedImageCallback* callback) { |
| 135 encoded_complete_callback_ = callback; |
| 136 return WEBRTC_VIDEO_CODEC_OK; |
| 137 } |
| 138 |
| 139 int StereoEncoderAdapter::SetChannelParameters(uint32_t packet_loss, |
| 140 int64_t rtt) { |
| 141 for (auto encoder : encoders_) { |
| 142 const int rv = encoder->SetChannelParameters(packet_loss, rtt); |
| 143 if (rv) |
| 144 return rv; |
| 145 } |
| 146 return WEBRTC_VIDEO_CODEC_OK; |
| 147 } |
| 148 |
| 149 int StereoEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate, |
| 150 uint32_t new_framerate) { |
| 151 for (auto encoder : encoders_) { |
| 152 const int rv = encoder->SetRateAllocation(bitrate, new_framerate); |
| 153 if (rv) |
| 154 return rv; |
| 155 } |
| 156 return WEBRTC_VIDEO_CODEC_OK; |
| 157 } |
| 158 |
| 159 int StereoEncoderAdapter::Release() { |
| 160 for (auto encoder : encoders_) { |
| 161 const int rv = encoder->Release(); |
| 162 if (rv) |
| 163 return rv; |
| 164 factory_->Destroy(encoder); |
| 165 } |
| 166 encoders_.clear(); |
| 167 adapter_callbacks_.clear(); |
| 168 return WEBRTC_VIDEO_CODEC_OK; |
| 169 } |
| 170 |
| 171 EncodedImageCallback::Result StereoEncoderAdapter::OnEncodedImage( |
| 172 StereoCodecStream stream_idx, |
| 173 const EncodedImage& encodedImage, |
| 174 const CodecSpecificInfo* codecSpecificInfo, |
| 175 const RTPFragmentationHeader* fragmentation) { |
| 176 const auto& frame_count_object = |
| 177 frame_count_.find(encodedImage._timeStamp); |
| 178 |
| 179 // If the timestamp has already be deleted, this means the frame |
| 180 // arrives later than its future frame, but we still send it out |
| 181 // not to break the frame dependence chain on the receiver side. |
| 182 int frame_count = frame_count_object != frame_count_.end() |
| 183 ? frame_count_object->second |
| 184 : kStereoCodecStreams; |
| 185 |
| 186 frame_count_.erase(frame_count_.begin(),frame_count_object); |
| 187 |
| 188 CodecSpecificInfo* yuv_codec = |
| 189 const_cast<CodecSpecificInfo*>(codecSpecificInfo); |
| 190 yuv_codec->codecType = kVideoCodecStereo; |
| 191 yuv_codec->codec_name = "stereo-vp9"; |
| 192 yuv_codec->stereoInfo.stereoCodecType = kVideoCodecVP9; |
| 193 yuv_codec->stereoInfo.frameIndex = stream_idx; |
| 194 yuv_codec->stereoInfo.frameCount = frame_count; |
| 195 yuv_codec->stereoInfo.pictureIndex = ++picture_index_; |
| 196 encoded_complete_callback_->OnEncodedImage(encodedImage, yuv_codec, |
| 197 fragmentation); |
| 198 return EncodedImageCallback::Result(EncodedImageCallback::Result::OK); |
| 199 } |
| 200 |
| 201 } // namespace webrtc |
OLD | NEW |