OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
52 if (!size || data_size == CapturedFrame::kUnknownDataSize) { | 52 if (!size || data_size == CapturedFrame::kUnknownDataSize) { |
53 return false; | 53 return false; |
54 } | 54 } |
55 *size = data_size; | 55 *size = data_size; |
56 return true; | 56 return true; |
57 } | 57 } |
58 | 58 |
59 ///////////////////////////////////////////////////////////////////// | 59 ///////////////////////////////////////////////////////////////////// |
60 // Implementation of class VideoCapturer | 60 // Implementation of class VideoCapturer |
61 ///////////////////////////////////////////////////////////////////// | 61 ///////////////////////////////////////////////////////////////////// |
62 VideoCapturer::VideoCapturer() : apply_rotation_(false) { | 62 VideoCapturer::VideoCapturer() |
63 : apply_rotation_(false), frames_seen_(0), offset_us_(0) { | |
63 thread_checker_.DetachFromThread(); | 64 thread_checker_.DetachFromThread(); |
64 Construct(); | 65 Construct(); |
65 } | 66 } |
66 | 67 |
67 void VideoCapturer::Construct() { | 68 void VideoCapturer::Construct() { |
68 enable_camera_list_ = false; | 69 enable_camera_list_ = false; |
69 capture_state_ = CS_STOPPED; | 70 capture_state_ = CS_STOPPED; |
70 SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured); | 71 SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured); |
71 scaled_width_ = 0; | 72 scaled_width_ = 0; |
72 scaled_height_ = 0; | 73 scaled_height_ = 0; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
207 if (frame_factory_) { | 208 if (frame_factory_) { |
208 frame_factory_->SetApplyRotation(apply_rotation_); | 209 frame_factory_->SetApplyRotation(apply_rotation_); |
209 } | 210 } |
210 | 211 |
211 if (video_adapter()) { | 212 if (video_adapter()) { |
212 video_adapter()->OnResolutionRequest(wants.max_pixel_count, | 213 video_adapter()->OnResolutionRequest(wants.max_pixel_count, |
213 wants.max_pixel_count_step_up); | 214 wants.max_pixel_count_step_up); |
214 } | 215 } |
215 } | 216 } |
216 | 217 |
218 void VideoCapturer::UpdateOffset(int64_t capture_time_us) { | |
219 // Estimate the offset between system monotonic time and the capture | |
220 // time from the camera. The camera is assumed to provide more | |
221 // accurate timestamps than we can do here. But the camera may use | |
222 // its own free-running clock with a large offset and a small drift | |
223 // compared to the system clock. So the model is basically | |
224 // | |
225 // y_k = c_0 + c_1 x_k + v_k | |
226 // | |
227 // where x_k is the camera timestamp, believed to be accurate in its | |
228 // own scale. y_k is our reading of the system clock. v_k is the | |
229 // measurement noise, i.e., the delay from frame capture until we | |
230 // get here and read the clock. | |
231 // | |
232 // It's possible to do (weighted) least-squares estimation of both | |
233 // c_0 and c_1. Then we get the constants as c_1 = Cov(x,y) / | |
234 // Var(x), and c_0 = mean(y) - c_1 mean(x). Substituting this c_0, | |
235 // we can rearrange the model as | |
236 // | |
237 // y_k = mean(y) + (x_k - mean(x)) + (c_1 - 1) (x_k - mean(x)) + v_k | |
238 // | |
239 // Now if we use a weighted average which gradually forgets old | |
240 // values, x_k - mean(x) is bounded, of the same order as the time | |
241 // constant (and close to constant for a steady frame rate). In | |
242 // addition, the frequency error |c_1 - 1| should be small. Cameras | |
243 // with a frequency error up to 3000 ppm (3 ms drift per second) | |
244 // have been observed, but frequency errors below 100 ppm could be | |
245 // expected of any cheap crystal. | |
246 // | |
247 // Bottom line is that we ignore the c_1 term, and use only the estimator | |
248 // | |
249 // x_k + mean(y-x) | |
250 // | |
251 // where mean is plain averaging for initial samples, followed by | |
252 // exponential averaging. | |
253 | |
254 // TODO(nisse): Don't read the clock here, instead let the caller | |
255 // pass in the current system time? Useful for testing, or if the | |
256 // application reads the system clock earlier. | |
257 int64_t diff_us = rtc::TimeMicros() - capture_time_us; | |
258 | |
259 // We also try to detect if the camera timestamp actually is using | |
260 // the system monotonic clock (a common case, for cameras without | |
261 // builtin timestamping). In this case, we aim to keep the | |
262 // timestamps as if, i.e., set the offset to zero. In case the | |
263 // camera clock is drifting, and by chance the offset is crossing | |
264 // zero, this hack will only cause a small dent in the otherwise | |
265 // linear offset curve, temporarily forcing it closer to zero. | |
266 static const int64_t kDelayLimit = 50 * rtc::kNumMicrosecsPerMillisec; | |
stefan-webrtc
2016/05/27 00:49:10
Maybe just kDelayLimitUs = 50000? Easier to read,
nisse-webrtc
2016/05/27 09:47:33
Done.
| |
267 if (diff_us > 0 && diff_us < kDelayLimit) | |
268 diff_us = 0; | |
269 | |
270 // TODO(nisse): Do we need to detect jumps in the camera clock? | |
271 // E.g., if the camera is somehow reset mid-stream? We could check | |
272 // if abs(diff_us - offset_us) > 500ms or so, and in this case reset | |
273 // frames_seen_ to zero. | |
274 static const unsigned kWindowSize = 100; | |
275 if (frames_seen_ < kWindowSize) | |
276 frames_seen_++; | |
stefan-webrtc
2016/05/27 00:49:10
++frames_seen_
nisse-webrtc
2016/05/27 09:47:33
Done.
| |
277 | |
278 offset_us_ += diff_us / frames_seen_; | |
qiangchen
2016/05/26 20:01:35
Can you double check the correctness of the math b
stefan-webrtc
2016/05/27 00:49:10
That's what I would have expected too.
nisse-webrtc
2016/05/27 09:47:32
Fixed.
qiangchen
2016/05/27 15:43:50
Acknowledged.
| |
279 } | |
280 | |
217 bool VideoCapturer::AdaptFrame(int width, | 281 bool VideoCapturer::AdaptFrame(int width, |
218 int height, | 282 int height, |
219 // TODO(nisse): Switch to us unit. | 283 // TODO(nisse): Switch to us unit. In |
284 // progress in different cl. | |
220 int64_t capture_time_ns, | 285 int64_t capture_time_ns, |
221 int* out_width, | 286 int* out_width, |
222 int* out_height, | 287 int* out_height, |
223 int* crop_width, | 288 int* crop_width, |
224 int* crop_height, | 289 int* crop_height, |
225 int* crop_x, | 290 int* crop_x, |
226 int* crop_y) { | 291 int* crop_y, |
292 int64_t* time_us) { | |
293 int64_t capture_time_us = capture_time_ns / rtc::kNumNanosecsPerMicrosec; | |
294 UpdateOffset(capture_time_us); | |
295 | |
227 if (!broadcaster_.frame_wanted()) { | 296 if (!broadcaster_.frame_wanted()) { |
228 return false; | 297 return false; |
229 } | 298 } |
230 | 299 |
231 if (enable_video_adapter_ && !IsScreencast()) { | 300 if (enable_video_adapter_ && !IsScreencast()) { |
232 if (!video_adapter_.AdaptFrameResolution( | 301 if (!video_adapter_.AdaptFrameResolution( |
233 width, height, capture_time_ns, | 302 width, height, capture_time_ns, |
234 crop_width, crop_height, out_width, out_height)) { | 303 crop_width, crop_height, out_width, out_height)) { |
235 // VideoAdapter dropped the frame. | 304 // VideoAdapter dropped the frame. |
236 return false; | 305 return false; |
237 } | 306 } |
238 *crop_x = (width - *crop_width) / 2; | 307 *crop_x = (width - *crop_width) / 2; |
239 *crop_y = (height - *crop_height) / 2; | 308 *crop_y = (height - *crop_height) / 2; |
240 } else { | 309 } else { |
241 *out_width = width; | 310 *out_width = width; |
242 *out_height = height; | 311 *out_height = height; |
243 *crop_width = width; | 312 *crop_width = width; |
244 *crop_height = height; | 313 *crop_height = height; |
245 *crop_x = 0; | 314 *crop_x = 0; |
246 *crop_y = 0; | 315 *crop_y = 0; |
247 } | 316 } |
317 *time_us = capture_time_us + offset_us_; | |
248 return true; | 318 return true; |
249 } | 319 } |
250 | 320 |
251 void VideoCapturer::OnFrameCaptured(VideoCapturer*, | 321 void VideoCapturer::OnFrameCaptured(VideoCapturer*, |
252 const CapturedFrame* captured_frame) { | 322 const CapturedFrame* captured_frame) { |
253 int out_width; | 323 int out_width; |
254 int out_height; | 324 int out_height; |
255 int crop_width; | 325 int crop_width; |
256 int crop_height; | 326 int crop_height; |
257 int crop_x; | 327 int crop_x; |
258 int crop_y; | 328 int crop_y; |
329 int64_t time_us; | |
259 | 330 |
260 if (!AdaptFrame(captured_frame->width, captured_frame->height, | 331 if (!AdaptFrame(captured_frame->width, captured_frame->height, |
261 captured_frame->time_stamp, | 332 captured_frame->time_stamp, |
262 &out_width, &out_height, | 333 &out_width, &out_height, |
263 &crop_width, &crop_height, &crop_x, &crop_y)) { | 334 &crop_width, &crop_height, &crop_x, &crop_y, &time_us)) { |
264 return; | 335 return; |
265 } | 336 } |
266 | 337 |
267 if (!frame_factory_) { | 338 if (!frame_factory_) { |
268 LOG(LS_ERROR) << "No video frame factory."; | 339 LOG(LS_ERROR) << "No video frame factory."; |
269 return; | 340 return; |
270 } | 341 } |
271 | 342 |
343 frame_factory_->SetTimestampOffset(offset_us_); | |
344 | |
272 // TODO(nisse): Reorganize frame factory methods. crop_x and crop_y | 345 // TODO(nisse): Reorganize frame factory methods. crop_x and crop_y |
273 // are ignored for now. | 346 // are ignored for now. |
274 std::unique_ptr<VideoFrame> adapted_frame(frame_factory_->CreateAliasedFrame( | 347 std::unique_ptr<VideoFrame> adapted_frame(frame_factory_->CreateAliasedFrame( |
275 captured_frame, crop_width, crop_height, out_width, out_height)); | 348 captured_frame, crop_width, crop_height, out_width, out_height)); |
276 | 349 |
277 if (!adapted_frame) { | 350 if (!adapted_frame) { |
278 // TODO(fbarchard): LOG more information about captured frame attributes. | 351 // TODO(fbarchard): LOG more information about captured frame attributes. |
279 LOG(LS_ERROR) << "Couldn't convert to I420! " | 352 LOG(LS_ERROR) << "Couldn't convert to I420! " |
280 << "From " << ToString(captured_frame) << " To " | 353 << "From " << ToString(captured_frame) << " To " |
281 << out_width << " x " << out_height; | 354 << out_width << " x " << out_height; |
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
433 void VideoCapturer::UpdateInputSize(int width, int height) { | 506 void VideoCapturer::UpdateInputSize(int width, int height) { |
434 // Update stats protected from fetches from different thread. | 507 // Update stats protected from fetches from different thread. |
435 rtc::CritScope cs(&frame_stats_crit_); | 508 rtc::CritScope cs(&frame_stats_crit_); |
436 | 509 |
437 input_size_valid_ = true; | 510 input_size_valid_ = true; |
438 input_width_ = width; | 511 input_width_ = width; |
439 input_height_ = height; | 512 input_height_ = height; |
440 } | 513 } |
441 | 514 |
442 } // namespace cricket | 515 } // namespace cricket |
OLD | NEW |