OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2008 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2008 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <stdio.h> | 11 #include <stdio.h> |
12 | 12 |
13 #include <memory> | 13 #include <memory> |
14 #include <vector> | 14 #include <vector> |
15 | 15 |
16 #include "webrtc/base/gunit.h" | 16 #include "webrtc/base/gunit.h" |
17 #include "webrtc/base/logging.h" | 17 #include "webrtc/base/logging.h" |
18 #include "webrtc/base/random.h" | |
18 #include "webrtc/base/thread.h" | 19 #include "webrtc/base/thread.h" |
19 #include "webrtc/media/base/fakevideocapturer.h" | 20 #include "webrtc/media/base/fakevideocapturer.h" |
20 #include "webrtc/media/base/fakevideorenderer.h" | 21 #include "webrtc/media/base/fakevideorenderer.h" |
21 #include "webrtc/media/base/testutils.h" | 22 #include "webrtc/media/base/testutils.h" |
22 #include "webrtc/media/base/videocapturer.h" | 23 #include "webrtc/media/base/videocapturer.h" |
23 | 24 |
24 using cricket::FakeVideoCapturer; | 25 using cricket::FakeVideoCapturer; |
25 | 26 |
26 namespace { | 27 namespace { |
27 | 28 |
(...skipping 746 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
774 capturer_->set_enable_camera_list(true); | 775 capturer_->set_enable_camera_list(true); |
775 capturer_->ConstrainSupportedFormats(vga_format); | 776 capturer_->ConstrainSupportedFormats(vga_format); |
776 EXPECT_EQ(2u, capturer_->GetSupportedFormats()->size()); | 777 EXPECT_EQ(2u, capturer_->GetSupportedFormats()->size()); |
777 // To make sure it's not just the camera list being broken, add in VGA and | 778 // To make sure it's not just the camera list being broken, add in VGA and |
778 // try again. This time, only the VGA format should be there. | 779 // try again. This time, only the VGA format should be there. |
779 supported_formats.push_back(vga_format); | 780 supported_formats.push_back(vga_format); |
780 capturer_->ResetSupportedFormats(supported_formats); | 781 capturer_->ResetSupportedFormats(supported_formats); |
781 ASSERT_EQ(1u, capturer_->GetSupportedFormats()->size()); | 782 ASSERT_EQ(1u, capturer_->GetSupportedFormats()->size()); |
782 EXPECT_EQ(vga_format.height, capturer_->GetSupportedFormats()->at(0).height); | 783 EXPECT_EQ(vga_format.height, capturer_->GetSupportedFormats()->at(0).height); |
783 } | 784 } |
785 | |
786 namespace { | |
787 // Computes the difference x_k - mean(x), when x_k is the linear sequence x_k = | |
788 // k, and the "mean" is plain mean for the first |window_size| samples, followed | |
789 // by exponential averaging with weight 1/|window_size| for each new sample. | |
790 // This is needed to predict the effect of camera clock drift on the timestamp | |
791 // translation. See the comment on VideoCapturer::UpdateOffset for more context. | |
792 double MeanTimeDifference(int nsamples, int window_size) { | |
793 if (nsamples <= window_size) { | |
794 // Plain averaging. | |
795 return nsamples / 2.0; | |
796 } else { | |
797 // Exponential convergence towards | |
798 // interval_error * (window_size - 1) | |
799 double alpha = 1.0 - 1.0 / window_size; | |
800 | |
801 return ((window_size - 1) - | |
802 (window_size / 2.0 - 1) * pow(alpha, nsamples - window_size)); | |
803 } | |
804 } | |
805 | |
806 } // Anonymous namespace | |
807 | |
808 TEST_F(VideoCapturerTest, AttenuateTimestampJitterPosDrift) { | |
809 const int kWidth = 800; | |
810 const int kHeight = 400; | |
811 | |
812 const double rel_freq_error = 0.003; | |
813 const int64_t epoch = 10000; | |
814 const int64_t jitter_us = 5000; | |
815 const int64_t interval_us = 33333; // 30 FPS | |
816 const int64_t interval_error_us = interval_us * rel_freq_error; | |
817 const int window_size = 100; | |
818 const int nframes = 2 * window_size; | |
sprang_webrtc
2016/06/17 13:10:52
Think also these should have kConstantVariable for
nisse-webrtc
2016/06/17 13:43:01
CamelCaseEvenForLocalVariables? Sure, I can do tha
sprang_webrtc
2016/06/17 14:35:38
Makes it clear which ones are just named constant
nisse-webrtc
2016/06/20 11:47:49
Done.
| |
819 | |
820 const int64_t system_start_us = rtc::TimeMicros(); | |
821 webrtc::Random random(17); | |
822 | |
823 int64_t prev_translated_time_us = system_start_us; | |
824 | |
825 for (int i = 0; i < nframes; i++) { | |
826 // Camera time subject to drift. | |
827 int64_t camera_time_us = epoch + i * (interval_us + interval_error_us); | |
828 int64_t system_time_us = system_start_us + i * interval_us; | |
829 // And system time readings are subject to jitter. | |
830 int64_t system_measured_us = system_time_us + random.Rand(jitter_us); | |
831 | |
832 int out_width; | |
833 int out_height; | |
834 int crop_width; | |
835 int crop_height; | |
836 int crop_x; | |
837 int crop_y; | |
838 int64_t translated_time_us; | |
839 | |
840 EXPECT_TRUE(capturer_->AdaptFrame(kWidth, kHeight, | |
841 camera_time_us, system_measured_us, | |
842 &out_width, &out_height, | |
843 &crop_width, &crop_height, | |
844 &crop_x, &crop_y, &translated_time_us)); | |
845 | |
846 EXPECT_LE(translated_time_us, system_measured_us); | |
847 EXPECT_GE(translated_time_us, prev_translated_time_us); | |
848 | |
849 // The relative frequency error contributes to the expected error | |
850 // by a factor which is the difference between the current time | |
851 // and the average of earlier sample times. | |
852 int64_t expected_error_us = | |
853 jitter_us / 2 + | |
854 rel_freq_error * interval_us * MeanTimeDifference(i, window_size); | |
855 | |
856 int64_t bias_us = capturer_->clip_bias_us(); | |
857 EXPECT_GE(bias_us, 0); | |
858 | |
859 if (i == 0) { | |
860 EXPECT_EQ(translated_time_us, system_measured_us); | |
861 } else { | |
862 EXPECT_NEAR(translated_time_us + bias_us, | |
863 system_time_us + expected_error_us, | |
864 2.0 * jitter_us / sqrt(std::max(i, window_size))); | |
865 } | |
866 // Initially, the bias applied by the capturer depends mainly on | |
867 // the measurement noise. It is is expected to roughly cancel the | |
868 // expected error from the clock drift, as this grows. The numbers | |
869 // for this test were selected after some trial and error. | |
870 if (i < 10) { | |
871 EXPECT_LE(bias_us, jitter_us / 2); | |
872 } else { | |
873 EXPECT_NEAR(bias_us, expected_error_us, 1500); | |
874 } | |
875 prev_translated_time_us = translated_time_us; | |
876 } | |
877 } | |
878 | |
879 TEST_F(VideoCapturerTest, AttenuateTimestampJitterNegDrift) { | |
sprang_webrtc
2016/06/17 13:10:52
A lot of code duplication here. Could you extract
nisse-webrtc
2016/06/17 13:43:01
Makes some sense, I can give it a try.
In general
sprang_webrtc
2016/06/17 14:35:38
Agree, some code duplication in tests are fine. In
nisse-webrtc
2016/06/20 11:47:49
Done.
| |
880 const int kWidth = 800; | |
881 const int kHeight = 400; | |
882 | |
883 const double rel_freq_error = -0.003; | |
884 const int64_t epoch = 10000; | |
885 const int64_t jitter_us = 5000; | |
886 const int64_t interval_us = 33333; // 30 FPS | |
887 const int64_t interval_error_us = interval_us * rel_freq_error; | |
888 const int window_size = 100; | |
889 const int nframes = 2 * window_size; | |
890 | |
891 const int64_t system_start_us = rtc::TimeMicros(); | |
892 webrtc::Random random(17); | |
893 | |
894 int64_t prev_translated_time_us = system_start_us; | |
895 | |
896 for (int i = 0; i < nframes; i++) { | |
897 // Camera time subject to drift. | |
898 int64_t camera_time_us = epoch + i * (interval_us + interval_error_us); | |
899 int64_t system_time_us = system_start_us + i * interval_us; | |
900 // And system time readings are subject to jitter. | |
901 int64_t system_measured_us = system_time_us + random.Rand(jitter_us); | |
902 | |
903 int out_width; | |
904 int out_height; | |
905 int crop_width; | |
906 int crop_height; | |
907 int crop_x; | |
908 int crop_y; | |
909 int64_t translated_time_us; | |
910 | |
911 EXPECT_TRUE(capturer_->AdaptFrame(kWidth, kHeight, | |
912 camera_time_us, system_measured_us, | |
913 &out_width, &out_height, | |
914 &crop_width, &crop_height, | |
915 &crop_x, &crop_y, &translated_time_us)); | |
916 | |
917 EXPECT_LE(translated_time_us, system_measured_us); | |
918 EXPECT_GE(translated_time_us, prev_translated_time_us); | |
919 | |
920 // The relative frequency error contributes to the expected error | |
921 // by a factor which is the difference between the current time | |
922 // and the average of earlier sample times. | |
923 int64_t expected_error_us = | |
924 jitter_us / 2 + | |
925 rel_freq_error * interval_us * MeanTimeDifference(i, window_size); | |
926 | |
927 int64_t bias_us = capturer_->clip_bias_us(); | |
928 EXPECT_GE(bias_us, 0); | |
929 | |
930 if (i == 0) { | |
931 EXPECT_EQ(translated_time_us, system_measured_us); | |
932 } else { | |
933 EXPECT_NEAR(translated_time_us + bias_us, | |
934 system_time_us + expected_error_us, | |
935 2.0 * jitter_us / sqrt(std::max(i, window_size))); | |
936 } | |
937 EXPECT_LE(bias_us, jitter_us / 2); | |
938 prev_translated_time_us = translated_time_us; | |
939 } | |
940 } | |
OLD | NEW |