| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 209 timing_frames_info_[simulcast_svc_idx].encode_start_time_ms[capture_time_ms] = | 209 timing_frames_info_[simulcast_svc_idx].encode_start_time_ms[capture_time_ms] = |
| 210 rtc::TimeMillis(); | 210 rtc::TimeMillis(); |
| 211 } | 211 } |
| 212 | 212 |
| 213 EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage( | 213 EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage( |
| 214 const EncodedImage& encoded_image, | 214 const EncodedImage& encoded_image, |
| 215 const CodecSpecificInfo* codec_specific, | 215 const CodecSpecificInfo* codec_specific, |
| 216 const RTPFragmentationHeader* fragmentation_header) { | 216 const RTPFragmentationHeader* fragmentation_header) { |
| 217 TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded", | 217 TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded", |
| 218 "timestamp", encoded_image._timeStamp); | 218 "timestamp", encoded_image._timeStamp); |
| 219 bool is_timing_frame = false; |
| 220 size_t outlier_frame_size = 0; |
| 221 int64_t encode_start_ms = -1; |
| 219 size_t simulcast_svc_idx = 0; | 222 size_t simulcast_svc_idx = 0; |
| 220 if (codec_specific->codecType == kVideoCodecVP9) { | 223 if (codec_specific->codecType == kVideoCodecVP9) { |
| 221 if (codec_specific->codecSpecific.VP9.num_spatial_layers > 1) | 224 if (codec_specific->codecSpecific.VP9.num_spatial_layers > 1) |
| 222 simulcast_svc_idx = codec_specific->codecSpecific.VP9.spatial_idx; | 225 simulcast_svc_idx = codec_specific->codecSpecific.VP9.spatial_idx; |
| 223 } else if (codec_specific->codecType == kVideoCodecVP8) { | 226 } else if (codec_specific->codecType == kVideoCodecVP8) { |
| 224 simulcast_svc_idx = codec_specific->codecSpecific.VP8.simulcastIdx; | 227 simulcast_svc_idx = codec_specific->codecSpecific.VP8.simulcastIdx; |
| 225 } else if (codec_specific->codecType == kVideoCodecGeneric) { | 228 } else if (codec_specific->codecType == kVideoCodecGeneric) { |
| 226 simulcast_svc_idx = codec_specific->codecSpecific.generic.simulcast_idx; | 229 simulcast_svc_idx = codec_specific->codecSpecific.generic.simulcast_idx; |
| 227 } else if (codec_specific->codecType == kVideoCodecH264) { | 230 } else if (codec_specific->codecType == kVideoCodecH264) { |
| 228 // TODO(ilnik): When h264 simulcast is landed, extract simulcast idx here. | 231 // TODO(ilnik): When h264 simulcast is landed, extract simulcast idx here. |
| 229 } | 232 } |
| 230 | 233 |
| 231 // Larger than current frame size, so it will not trigger timing frame as an | |
| 232 // outlier. If encoder didn't call OnFramerateChanged it will be used. | |
| 233 size_t outlier_frame_size = encoded_image._length + 1; | |
| 234 // Default best guess. If encoder didn't call OnEncodeStarted it will be used. | |
| 235 int64_t encode_start_ms = encoded_image.capture_time_ms_; | |
| 236 bool is_timing_frame = false; | |
| 237 { | 234 { |
| 238 rtc::CritScope crit(&timing_params_lock_); | 235 rtc::CritScope crit(&timing_params_lock_); |
| 236 // TODO(ilnik): Workaround for hardware encoders, which do not call |
| 237 // |OnEncodeStarted| correctly. Once fixed, remove conditional check. |
| 238 if (simulcast_svc_idx < timing_frames_info_.size()) { |
| 239 RTC_CHECK_LT(simulcast_svc_idx, timing_frames_info_.size()); |
| 239 | 240 |
| 240 // Encoders with internal sources do not call OnEncodeStarted and | |
| 241 // OnFrameRateChanged. |timing_frames_info_| may be not filled here. | |
| 242 if (simulcast_svc_idx < timing_frames_info_.size()) { | |
| 243 auto encode_start_map = | 241 auto encode_start_map = |
| 244 &timing_frames_info_[simulcast_svc_idx].encode_start_time_ms; | 242 &timing_frames_info_[simulcast_svc_idx].encode_start_time_ms; |
| 245 auto it = encode_start_map->find(encoded_image.capture_time_ms_); | 243 auto it = encode_start_map->find(encoded_image.capture_time_ms_); |
| 246 if (it != encode_start_map->end()) { | 244 if (it != encode_start_map->end()) { |
| 247 encode_start_ms = it->second; | 245 encode_start_ms = it->second; |
| 248 // Assuming all encoders do not reorder frames within single stream, | 246 // Assuming all encoders do not reorder frames within single stream, |
| 249 // there may be some dropped frames with smaller timestamps. These | 247 // there may be some dropped frames with smaller timestamps. These |
| 250 // should be purged. | 248 // should be purged. |
| 251 encode_start_map->erase(encode_start_map->begin(), it); | 249 encode_start_map->erase(encode_start_map->begin(), it); |
| 252 encode_start_map->erase(it); | 250 encode_start_map->erase(it); |
| 253 } else { | 251 } else { |
| 254 // Encoder is with internal source: free our records of any frames just | 252 // Some chromium remoting unittests use generic encoder incorrectly |
| 255 // in case to free memory. | 253 // If timestamps do not match, purge them all. |
| 256 encode_start_map->clear(); | 254 encode_start_map->erase(encode_start_map->begin(), |
| 255 encode_start_map->end()); |
| 257 } | 256 } |
| 258 | 257 |
| 259 size_t target_bitrate = | 258 int64_t timing_frame_delay_ms = |
| 260 timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec; | 259 encoded_image.capture_time_ms_ - last_timing_frame_time_ms_; |
| 261 if (framerate_ > 0 && target_bitrate > 0) { | 260 if (last_timing_frame_time_ms_ == -1 || |
| 262 // framerate and target bitrate were reported by encoder. | 261 timing_frame_delay_ms >= timing_frames_thresholds_.delay_ms || |
| 263 size_t average_frame_size = target_bitrate / framerate_; | 262 timing_frame_delay_ms == 0) { |
| 263 is_timing_frame = true; |
| 264 last_timing_frame_time_ms_ = encoded_image.capture_time_ms_; |
| 265 } |
| 266 // TODO(ilnik): Once OnFramerateChanged is called correctly by hardware |
| 267 // encoders, remove the conditional check below. |
| 268 if (framerate_ > 0) { |
| 269 RTC_CHECK_GT(framerate_, 0); |
| 270 size_t average_frame_size = |
| 271 timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec |
| 272 / framerate_; |
| 264 outlier_frame_size = average_frame_size * | 273 outlier_frame_size = average_frame_size * |
| 265 timing_frames_thresholds_.outlier_ratio_percent / | 274 timing_frames_thresholds_.outlier_ratio_percent / |
| 266 100; | 275 100; |
| 276 } else { |
| 277 outlier_frame_size = encoded_image._length + 1; |
| 267 } | 278 } |
| 268 } | 279 } else { |
| 269 | 280 // We don't have any information prior to encode start, thus we can't |
| 270 // Check if it's time to send a timing frame. | 281 // reliably detect outliers. Set outlier size to anything larger than |
| 271 int64_t timing_frame_delay_ms = | 282 // current frame size. |
| 272 encoded_image.capture_time_ms_ - last_timing_frame_time_ms_; | 283 outlier_frame_size = encoded_image._length + 1; |
| 273 // Trigger threshold if it's a first frame, too long passed since the last | |
| 274 // timing frame, or we already sent timing frame on a different simulcast | |
| 275 // stream with the same capture time. | |
| 276 if (last_timing_frame_time_ms_ == -1 || | |
| 277 timing_frame_delay_ms >= timing_frames_thresholds_.delay_ms || | |
| 278 timing_frame_delay_ms == 0) { | |
| 279 is_timing_frame = true; | |
| 280 last_timing_frame_time_ms_ = encoded_image.capture_time_ms_; | |
| 281 } | |
| 282 | |
| 283 // Outliers trigger timing frames, but do not affect scheduled timing | |
| 284 // frames. | |
| 285 if (encoded_image._length >= outlier_frame_size) { | |
| 286 is_timing_frame = true; | |
| 287 } | 284 } |
| 288 } | 285 } |
| 289 | 286 |
| 290 if (is_timing_frame) { | 287 if (encoded_image._length >= outlier_frame_size) { |
| 288 is_timing_frame = true; |
| 289 } |
| 290 if (encode_start_ms >= 0 && is_timing_frame) { |
| 291 encoded_image.SetEncodeTime(encode_start_ms, rtc::TimeMillis()); | 291 encoded_image.SetEncodeTime(encode_start_ms, rtc::TimeMillis()); |
| 292 } | 292 } |
| 293 | 293 |
| 294 Result result = post_encode_callback_->OnEncodedImage( | 294 Result result = post_encode_callback_->OnEncodedImage( |
| 295 encoded_image, codec_specific, fragmentation_header); | 295 encoded_image, codec_specific, fragmentation_header); |
| 296 if (result.error != Result::OK) | 296 if (result.error != Result::OK) |
| 297 return result; | 297 return result; |
| 298 | 298 |
| 299 if (media_opt_) { | 299 if (media_opt_) { |
| 300 media_opt_->UpdateWithEncodedData(encoded_image); | 300 media_opt_->UpdateWithEncodedData(encoded_image); |
| 301 if (internal_source_) { | 301 if (internal_source_) { |
| 302 // Signal to encoder to drop next frame. | 302 // Signal to encoder to drop next frame. |
| 303 result.drop_next_frame = media_opt_->DropFrame(); | 303 result.drop_next_frame = media_opt_->DropFrame(); |
| 304 } | 304 } |
| 305 } | 305 } |
| 306 return result; | 306 return result; |
| 307 } | 307 } |
| 308 | 308 |
| 309 } // namespace webrtc | 309 } // namespace webrtc |
| OLD | NEW |