Chromium Code Reviews| Index: webrtc/media/base/videocapturer_unittest.cc |
| diff --git a/webrtc/media/base/videocapturer_unittest.cc b/webrtc/media/base/videocapturer_unittest.cc |
| index 25230b5118a80517ff861d5bb473019f58528332..3002dc625bc1fb71877341c53e3c93478b51fdec 100644 |
| --- a/webrtc/media/base/videocapturer_unittest.cc |
| +++ b/webrtc/media/base/videocapturer_unittest.cc |
| @@ -15,6 +15,7 @@ |
| #include "webrtc/base/gunit.h" |
| #include "webrtc/base/logging.h" |
| +#include "webrtc/base/random.h" |
| #include "webrtc/base/thread.h" |
| #include "webrtc/media/base/fakevideocapturer.h" |
| #include "webrtc/media/base/fakevideorenderer.h" |
| @@ -781,3 +782,159 @@ TEST_F(VideoCapturerTest, BlacklistAllFormats) { |
| ASSERT_EQ(1u, capturer_->GetSupportedFormats()->size()); |
| EXPECT_EQ(vga_format.height, capturer_->GetSupportedFormats()->at(0).height); |
| } |
| + |
| +namespace { |
| +// Computes the difference x_k - mean(x), when x_k is the linear sequence x_k = |
| +// k, and the "mean" is plain mean for the first |window_size| samples, followed |
| +// by exponential averaging with weight 1/|window_size| for each new sample. |
| +// This is needed to predict the effect of camera clock drift on the timestamp |
| +// translation. See the comment on VideoCapturer::UpdateOffset for more context. |
| +double MeanTimeDifference(int nsamples, int window_size) { |
| + if (nsamples <= window_size) { |
| + // Plain averaging. |
| + return nsamples / 2.0; |
| + } else { |
| + // Exponential convergence towards |
| + // interval_error * (window_size - 1) |
| + double alpha = 1.0 - 1.0 / window_size; |
| + |
| + return ((window_size - 1) - |
| + (window_size / 2.0 - 1) * pow(alpha, nsamples - window_size)); |
| + } |
| +} |
| + |
| +} // Anonymous namespace |
| + |
| +TEST_F(VideoCapturerTest, AttenuateTimestampJitterPosDrift) { |
| + const int kWidth = 800; |
| + const int kHeight = 400; |
| + |
| + const double rel_freq_error = 0.003; |
| + const int64_t epoch = 10000; |
| + const int64_t jitter_us = 5000; |
| + const int64_t interval_us = 33333; // 30 FPS |
| + const int64_t interval_error_us = interval_us * rel_freq_error; |
| + const int window_size = 100; |
| + const int nframes = 2 * window_size; |
|
sprang_webrtc
2016/06/17 13:10:52
Think also these should have kConstantVariable for
nisse-webrtc
2016/06/17 13:43:01
CamelCaseEvenForLocalVariables? Sure, I can do tha
sprang_webrtc
2016/06/17 14:35:38
Makes it clear which ones are just named constant
nisse-webrtc
2016/06/20 11:47:49
Done.
|
| + |
| + const int64_t system_start_us = rtc::TimeMicros(); |
| + webrtc::Random random(17); |
| + |
| + int64_t prev_translated_time_us = system_start_us; |
| + |
| + for (int i = 0; i < nframes; i++) { |
| + // Camera time subject to drift. |
| + int64_t camera_time_us = epoch + i * (interval_us + interval_error_us); |
| + int64_t system_time_us = system_start_us + i * interval_us; |
| + // And system time readings are subject to jitter. |
| + int64_t system_measured_us = system_time_us + random.Rand(jitter_us); |
| + |
| + int out_width; |
| + int out_height; |
| + int crop_width; |
| + int crop_height; |
| + int crop_x; |
| + int crop_y; |
| + int64_t translated_time_us; |
| + |
| + EXPECT_TRUE(capturer_->AdaptFrame(kWidth, kHeight, |
| + camera_time_us, system_measured_us, |
| + &out_width, &out_height, |
| + &crop_width, &crop_height, |
| + &crop_x, &crop_y, &translated_time_us)); |
| + |
| + EXPECT_LE(translated_time_us, system_measured_us); |
| + EXPECT_GE(translated_time_us, prev_translated_time_us); |
| + |
| + // The relative frequency error contributes to the expected error |
| + // by a factor which is the difference between the current time |
| + // and the average of earlier sample times. |
| + int64_t expected_error_us = |
| + jitter_us / 2 + |
| + rel_freq_error * interval_us * MeanTimeDifference(i, window_size); |
| + |
| + int64_t bias_us = capturer_->clip_bias_us(); |
| + EXPECT_GE(bias_us, 0); |
| + |
| + if (i == 0) { |
| + EXPECT_EQ(translated_time_us, system_measured_us); |
| + } else { |
| + EXPECT_NEAR(translated_time_us + bias_us, |
| + system_time_us + expected_error_us, |
| + 2.0 * jitter_us / sqrt(std::max(i, window_size))); |
| + } |
| + // Initially, the bias applied by the capturer depends mainly on |
| + // the measurement noise. It is is expected to roughly cancel the |
| + // expected error from the clock drift, as this grows. The numbers |
| + // for this test were selected after some trial and error. |
| + if (i < 10) { |
| + EXPECT_LE(bias_us, jitter_us / 2); |
| + } else { |
| + EXPECT_NEAR(bias_us, expected_error_us, 1500); |
| + } |
| + prev_translated_time_us = translated_time_us; |
| + } |
| +} |
| + |
| +TEST_F(VideoCapturerTest, AttenuateTimestampJitterNegDrift) { |
|
sprang_webrtc
2016/06/17 13:10:52
A lot of code duplication here. Could you extract
nisse-webrtc
2016/06/17 13:43:01
Makes some sense, I can give it a try.
In general
sprang_webrtc
2016/06/17 14:35:38
Agree, some code duplication in tests are fine. In
nisse-webrtc
2016/06/20 11:47:49
Done.
|
| + const int kWidth = 800; |
| + const int kHeight = 400; |
| + |
| + const double rel_freq_error = -0.003; |
| + const int64_t epoch = 10000; |
| + const int64_t jitter_us = 5000; |
| + const int64_t interval_us = 33333; // 30 FPS |
| + const int64_t interval_error_us = interval_us * rel_freq_error; |
| + const int window_size = 100; |
| + const int nframes = 2 * window_size; |
| + |
| + const int64_t system_start_us = rtc::TimeMicros(); |
| + webrtc::Random random(17); |
| + |
| + int64_t prev_translated_time_us = system_start_us; |
| + |
| + for (int i = 0; i < nframes; i++) { |
| + // Camera time subject to drift. |
| + int64_t camera_time_us = epoch + i * (interval_us + interval_error_us); |
| + int64_t system_time_us = system_start_us + i * interval_us; |
| + // And system time readings are subject to jitter. |
| + int64_t system_measured_us = system_time_us + random.Rand(jitter_us); |
| + |
| + int out_width; |
| + int out_height; |
| + int crop_width; |
| + int crop_height; |
| + int crop_x; |
| + int crop_y; |
| + int64_t translated_time_us; |
| + |
| + EXPECT_TRUE(capturer_->AdaptFrame(kWidth, kHeight, |
| + camera_time_us, system_measured_us, |
| + &out_width, &out_height, |
| + &crop_width, &crop_height, |
| + &crop_x, &crop_y, &translated_time_us)); |
| + |
| + EXPECT_LE(translated_time_us, system_measured_us); |
| + EXPECT_GE(translated_time_us, prev_translated_time_us); |
| + |
| + // The relative frequency error contributes to the expected error |
| + // by a factor which is the difference between the current time |
| + // and the average of earlier sample times. |
| + int64_t expected_error_us = |
| + jitter_us / 2 + |
| + rel_freq_error * interval_us * MeanTimeDifference(i, window_size); |
| + |
| + int64_t bias_us = capturer_->clip_bias_us(); |
| + EXPECT_GE(bias_us, 0); |
| + |
| + if (i == 0) { |
| + EXPECT_EQ(translated_time_us, system_measured_us); |
| + } else { |
| + EXPECT_NEAR(translated_time_us + bias_us, |
| + system_time_us + expected_error_us, |
| + 2.0 * jitter_us / sqrt(std::max(i, window_size))); |
| + } |
| + EXPECT_LE(bias_us, jitter_us / 2); |
| + prev_translated_time_us = translated_time_us; |
| + } |
| +} |