Index: talk/app/webrtc/objc/avfoundationvideocapturer.mm |
diff --git a/talk/app/webrtc/objc/avfoundationvideocapturer.mm b/talk/app/webrtc/objc/avfoundationvideocapturer.mm |
index c47e36dc40930d784756d7364569e19bd6401f38..b4d7ee2443e083f0eed876629ea1115f07cc2c7c 100644 |
--- a/talk/app/webrtc/objc/avfoundationvideocapturer.mm |
+++ b/talk/app/webrtc/objc/avfoundationvideocapturer.mm |
@@ -306,7 +306,7 @@ static dispatch_queue_t kBackgroundQueue = nil; |
namespace webrtc { |
AVFoundationVideoCapturer::AVFoundationVideoCapturer() |
- : _capturer(nil), _startThread(nullptr), _startTime(0) { |
+ : _capturer(nil), _startThread(nullptr) { |
// Set our supported formats. This matches kDefaultPreset. |
std::vector<cricket::VideoFormat> supportedFormats; |
supportedFormats.push_back(cricket::VideoFormat(kDefaultFormat)); |
@@ -344,7 +344,6 @@ cricket::CaptureState AVFoundationVideoCapturer::Start( |
// to spin up, and this call returns async. |
// TODO(tkchin): make this better. |
[_capturer startCaptureAsync]; |
- _startTime = rtc::TimeNanos(); |
SetCaptureState(cricket::CaptureState::CS_RUNNING); |
return cricket::CaptureState::CS_STARTING; |
@@ -424,7 +423,6 @@ void AVFoundationVideoCapturer::CaptureSampleBuffer( |
frame.pixel_height = 1; |
frame.fourcc = static_cast<uint32>(cricket::FOURCC_NV12); |
frame.time_stamp = currentTime; |
- frame.elapsed_time = currentTime - _startTime; |
frame.data = yPlaneAddress; |
frame.data_size = frameSize; |