| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/video_coding/generic_encoder.h" | 11 #include "webrtc/modules/video_coding/generic_encoder.h" |
| 12 | 12 |
| 13 #include <vector> | 13 #include <vector> |
| 14 | 14 |
| 15 #include "webrtc/base/checks.h" | 15 #include "webrtc/base/checks.h" |
| 16 #include "webrtc/base/logging.h" | 16 #include "webrtc/base/logging.h" |
| 17 #include "webrtc/base/trace_event.h" | 17 #include "webrtc/base/trace_event.h" |
| 18 #include "webrtc/engine_configurations.h" | 18 #include "webrtc/engine_configurations.h" |
| 19 #include "webrtc/modules/video_coding/encoded_frame.h" | 19 #include "webrtc/modules/video_coding/encoded_frame.h" |
| 20 #include "webrtc/modules/video_coding/media_optimization.h" | 20 #include "webrtc/modules/video_coding/media_optimization.h" |
| 21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
| 22 | 22 |
| 23 namespace webrtc { | 23 namespace webrtc { |
| 24 namespace { |
| 25 // Map information from info into rtp. If no relevant information is found |
| 26 // in info, rtp is set to NULL. |
| 27 void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) { |
| 28 RTC_DCHECK(info); |
| 29 switch (info->codecType) { |
| 30 case kVideoCodecVP8: { |
| 31 rtp->codec = kRtpVideoVp8; |
| 32 rtp->codecHeader.VP8.InitRTPVideoHeaderVP8(); |
| 33 rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId; |
| 34 rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference; |
| 35 rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx; |
| 36 rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync; |
| 37 rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx; |
| 38 rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx; |
| 39 rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx; |
| 40 return; |
| 41 } |
| 42 case kVideoCodecVP9: { |
| 43 rtp->codec = kRtpVideoVp9; |
| 44 rtp->codecHeader.VP9.InitRTPVideoHeaderVP9(); |
| 45 rtp->codecHeader.VP9.inter_pic_predicted = |
| 46 info->codecSpecific.VP9.inter_pic_predicted; |
| 47 rtp->codecHeader.VP9.flexible_mode = |
| 48 info->codecSpecific.VP9.flexible_mode; |
| 49 rtp->codecHeader.VP9.ss_data_available = |
| 50 info->codecSpecific.VP9.ss_data_available; |
| 51 rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id; |
| 52 rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx; |
| 53 rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx; |
| 54 rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx; |
| 55 rtp->codecHeader.VP9.temporal_up_switch = |
| 56 info->codecSpecific.VP9.temporal_up_switch; |
| 57 rtp->codecHeader.VP9.inter_layer_predicted = |
| 58 info->codecSpecific.VP9.inter_layer_predicted; |
| 59 rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx; |
| 60 rtp->codecHeader.VP9.num_spatial_layers = |
| 61 info->codecSpecific.VP9.num_spatial_layers; |
| 62 |
| 63 if (info->codecSpecific.VP9.ss_data_available) { |
| 64 rtp->codecHeader.VP9.spatial_layer_resolution_present = |
| 65 info->codecSpecific.VP9.spatial_layer_resolution_present; |
| 66 if (info->codecSpecific.VP9.spatial_layer_resolution_present) { |
| 67 for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers; |
| 68 ++i) { |
| 69 rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i]; |
| 70 rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i]; |
| 71 } |
| 72 } |
| 73 rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof); |
| 74 } |
| 75 |
| 76 rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics; |
| 77 for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i) |
| 78 rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i]; |
| 79 return; |
| 80 } |
| 81 case kVideoCodecH264: |
| 82 rtp->codec = kRtpVideoH264; |
| 83 return; |
| 84 case kVideoCodecGeneric: |
| 85 rtp->codec = kRtpVideoGeneric; |
| 86 rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx; |
| 87 return; |
| 88 default: |
| 89 return; |
| 90 } |
| 91 } |
| 92 } // namespace |
| 93 |
| 24 VCMGenericEncoder::VCMGenericEncoder( | 94 VCMGenericEncoder::VCMGenericEncoder( |
| 25 VideoEncoder* encoder, | 95 VideoEncoder* encoder, |
| 26 VideoEncoderRateObserver* rate_observer, | 96 VideoEncoderRateObserver* rate_observer, |
| 27 VCMEncodedFrameCallback* encoded_frame_callback, | 97 VCMEncodedFrameCallback* encoded_frame_callback, |
| 28 bool internal_source) | 98 bool internal_source) |
| 29 : encoder_(encoder), | 99 : encoder_(encoder), |
| 30 rate_observer_(rate_observer), | 100 rate_observer_(rate_observer), |
| 31 vcm_encoded_frame_callback_(encoded_frame_callback), | 101 vcm_encoded_frame_callback_(encoded_frame_callback), |
| 32 internal_source_(internal_source), | 102 internal_source_(internal_source), |
| 33 encoder_params_({0, 0, 0, 0}), | 103 encoder_params_({0, 0, 0, 0}), |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 139 } | 209 } |
| 140 | 210 |
| 141 int VCMGenericEncoder::GetTargetFramerate() { | 211 int VCMGenericEncoder::GetTargetFramerate() { |
| 142 return encoder_->GetTargetFramerate(); | 212 return encoder_->GetTargetFramerate(); |
| 143 } | 213 } |
| 144 | 214 |
| 145 VCMEncodedFrameCallback::VCMEncodedFrameCallback( | 215 VCMEncodedFrameCallback::VCMEncodedFrameCallback( |
| 146 EncodedImageCallback* post_encode_callback) | 216 EncodedImageCallback* post_encode_callback) |
| 147 : send_callback_(), | 217 : send_callback_(), |
| 148 media_opt_(nullptr), | 218 media_opt_(nullptr), |
| 219 payload_type_(0), |
| 149 internal_source_(false), | 220 internal_source_(false), |
| 150 post_encode_callback_(post_encode_callback) {} | 221 post_encode_callback_(post_encode_callback) {} |
| 151 | 222 |
| 152 VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {} | 223 VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {} |
| 153 | 224 |
| 154 int32_t VCMEncodedFrameCallback::SetTransportCallback( | 225 int32_t VCMEncodedFrameCallback::SetTransportCallback( |
| 155 VCMPacketizationCallback* transport) { | 226 VCMPacketizationCallback* transport) { |
| 156 send_callback_ = transport; | 227 send_callback_ = transport; |
| 157 return VCM_OK; | 228 return VCM_OK; |
| 158 } | 229 } |
| 159 | 230 |
| 160 int32_t VCMEncodedFrameCallback::Encoded( | 231 int32_t VCMEncodedFrameCallback::Encoded( |
| 161 const EncodedImage& encoded_image, | 232 const EncodedImage& encoded_image, |
| 162 const CodecSpecificInfo* codec_specific, | 233 const CodecSpecificInfo* codec_specific, |
| 163 const RTPFragmentationHeader* fragmentation_header) { | 234 const RTPFragmentationHeader* fragmentation_header) { |
| 164 TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded", | 235 TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded", |
| 165 "timestamp", encoded_image._timeStamp); | 236 "timestamp", encoded_image._timeStamp); |
| 166 int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific, | 237 post_encode_callback_->Encoded(encoded_image, nullptr, nullptr); |
| 167 fragmentation_header); | 238 |
| 239 if (send_callback_ == nullptr) |
| 240 return VCM_UNINITIALIZED; |
| 241 |
| 242 RTPVideoHeader rtp_video_header; |
| 243 memset(&rtp_video_header, 0, sizeof(RTPVideoHeader)); |
| 244 if (codec_specific) |
| 245 CopyCodecSpecific(codec_specific, &rtp_video_header); |
| 246 rtp_video_header.rotation = encoded_image.rotation_; |
| 247 |
| 248 int32_t ret_val = send_callback_->SendData( |
| 249 payload_type_, encoded_image, fragmentation_header, &rtp_video_header); |
| 168 if (ret_val < 0) | 250 if (ret_val < 0) |
| 169 return ret_val; | 251 return ret_val; |
| 170 | 252 |
| 171 if (media_opt_) { | 253 if (media_opt_) { |
| 172 media_opt_->UpdateWithEncodedData(encoded_image); | 254 media_opt_->UpdateWithEncodedData(encoded_image); |
| 173 if (internal_source_) | 255 if (internal_source_) |
| 174 return media_opt_->DropFrame(); // Signal to encoder to drop next frame. | 256 return media_opt_->DropFrame(); // Signal to encoder to drop next frame. |
| 175 } | 257 } |
| 176 return VCM_OK; | 258 return VCM_OK; |
| 177 } | 259 } |
| 178 | 260 |
| 179 void VCMEncodedFrameCallback::SetMediaOpt( | 261 void VCMEncodedFrameCallback::SetMediaOpt( |
| 180 media_optimization::MediaOptimization* mediaOpt) { | 262 media_optimization::MediaOptimization* mediaOpt) { |
| 181 media_opt_ = mediaOpt; | 263 media_opt_ = mediaOpt; |
| 182 } | 264 } |
| 183 | 265 |
| 184 void VCMEncodedFrameCallback::SignalLastEncoderImplementationUsed( | 266 void VCMEncodedFrameCallback::SignalLastEncoderImplementationUsed( |
| 185 const char* implementation_name) { | 267 const char* implementation_name) { |
| 186 if (send_callback_) | 268 if (send_callback_) |
| 187 send_callback_->OnEncoderImplementationName(implementation_name); | 269 send_callback_->OnEncoderImplementationName(implementation_name); |
| 188 } | 270 } |
| 189 } // namespace webrtc | 271 } // namespace webrtc |
| OLD | NEW |