OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 // Implementation file of class VideoCapturer. | 11 // Implementation file of class VideoCapturer. |
12 | 12 |
13 #include "webrtc/media/base/videocapturer.h" | 13 #include "webrtc/media/base/videocapturer.h" |
14 | 14 |
15 #include <algorithm> | 15 #include <algorithm> |
16 | 16 |
17 #include "libyuv/scale_argb.h" | 17 #include "libyuv/scale_argb.h" |
18 #include "webrtc/base/common.h" | 18 #include "webrtc/base/common.h" |
19 #include "webrtc/base/logging.h" | 19 #include "webrtc/base/logging.h" |
20 #include "webrtc/base/systeminfo.h" | 20 #include "webrtc/base/systeminfo.h" |
21 #include "webrtc/media/base/videoframefactory.h" | 21 #include "webrtc/media/base/videoframefactory.h" |
| 22 |
| 23 #if defined(HAVE_WEBRTC_VIDEO) |
22 #include "webrtc/media/engine/webrtcvideoframe.h" | 24 #include "webrtc/media/engine/webrtcvideoframe.h" |
23 #include "webrtc/media/engine/webrtcvideoframefactory.h" | 25 #include "webrtc/media/engine/webrtcvideoframefactory.h" |
| 26 #endif // HAVE_WEBRTC_VIDEO |
24 | 27 |
25 namespace cricket { | 28 namespace cricket { |
26 | 29 |
27 namespace { | 30 namespace { |
28 | 31 |
| 32 // TODO(thorcarpenter): This is a BIG hack to flush the system with black |
| 33 // frames. Frontends should coordinate to update the video state of a muted |
| 34 // user. When all frontends to this consider removing the black frame business. |
| 35 const int kNumBlackFramesOnMute = 30; |
| 36 |
| 37 // MessageHandler constants. |
| 38 enum { |
| 39 MSG_DO_PAUSE = 0, |
| 40 MSG_DO_UNPAUSE, |
| 41 MSG_STATE_CHANGE |
| 42 }; |
| 43 |
29 static const int64_t kMaxDistance = ~(static_cast<int64_t>(1) << 63); | 44 static const int64_t kMaxDistance = ~(static_cast<int64_t>(1) << 63); |
30 #ifdef WEBRTC_LINUX | 45 #ifdef WEBRTC_LINUX |
31 static const int kYU12Penalty = 16; // Needs to be higher than MJPG index. | 46 static const int kYU12Penalty = 16; // Needs to be higher than MJPG index. |
32 #endif | 47 #endif |
33 static const int kDefaultScreencastFps = 5; | 48 static const int kDefaultScreencastFps = 5; |
| 49 typedef rtc::TypedMessageData<CaptureState> StateChangeParams; |
34 | 50 |
35 // Limit stats data collections to ~20 seconds of 30fps data before dropping | 51 // Limit stats data collections to ~20 seconds of 30fps data before dropping |
36 // old data in case stats aren't reset for long periods of time. | 52 // old data in case stats aren't reset for long periods of time. |
37 static const size_t kMaxAccumulatorSize = 600; | 53 static const size_t kMaxAccumulatorSize = 600; |
38 | 54 |
39 } // namespace | 55 } // namespace |
40 | 56 |
41 ///////////////////////////////////////////////////////////////////// | 57 ///////////////////////////////////////////////////////////////////// |
42 // Implementation of struct CapturedFrame | 58 // Implementation of struct CapturedFrame |
43 ///////////////////////////////////////////////////////////////////// | 59 ///////////////////////////////////////////////////////////////////// |
(...skipping 14 matching lines...) Expand all Loading... |
58 return false; | 74 return false; |
59 } | 75 } |
60 *size = data_size; | 76 *size = data_size; |
61 return true; | 77 return true; |
62 } | 78 } |
63 | 79 |
64 ///////////////////////////////////////////////////////////////////// | 80 ///////////////////////////////////////////////////////////////////// |
65 // Implementation of class VideoCapturer | 81 // Implementation of class VideoCapturer |
66 ///////////////////////////////////////////////////////////////////// | 82 ///////////////////////////////////////////////////////////////////// |
67 VideoCapturer::VideoCapturer() | 83 VideoCapturer::VideoCapturer() |
68 : adapt_frame_drops_data_(kMaxAccumulatorSize), | 84 : thread_(rtc::Thread::Current()), |
| 85 adapt_frame_drops_data_(kMaxAccumulatorSize), |
69 frame_time_data_(kMaxAccumulatorSize), | 86 frame_time_data_(kMaxAccumulatorSize), |
70 apply_rotation_(true) { | 87 apply_rotation_(true) { |
71 thread_checker_.DetachFromThread(); | 88 Construct(); |
| 89 } |
| 90 |
| 91 VideoCapturer::VideoCapturer(rtc::Thread* thread) |
| 92 : thread_(thread), |
| 93 adapt_frame_drops_data_(kMaxAccumulatorSize), |
| 94 frame_time_data_(kMaxAccumulatorSize), |
| 95 apply_rotation_(true) { |
72 Construct(); | 96 Construct(); |
73 } | 97 } |
74 | 98 |
75 void VideoCapturer::Construct() { | 99 void VideoCapturer::Construct() { |
76 ratio_w_ = 0; | 100 ClearAspectRatio(); |
77 ratio_h_ = 0; | |
78 enable_camera_list_ = false; | 101 enable_camera_list_ = false; |
79 square_pixel_aspect_ratio_ = false; | 102 square_pixel_aspect_ratio_ = false; |
80 capture_state_ = CS_STOPPED; | 103 capture_state_ = CS_STOPPED; |
81 SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured); | 104 SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured); |
82 // TODO(perkj) SignalVideoFrame is used directly by Chrome remoting. | 105 // TODO(perkj) SignalVideoFrame is used directly by Chrome remoting. |
83 // Before that is refactored, SignalVideoFrame must forward frames to the | 106 // Before that is refactored, SignalVideoFrame must forward frames to the |
84 // |VideoBroadcaster|; | 107 // |VideoBroadcaster|; |
85 SignalVideoFrame.connect(this, &VideoCapturer::OnFrame); | 108 SignalVideoFrame.connect(this, &VideoCapturer::OnFrame); |
86 scaled_width_ = 0; | 109 scaled_width_ = 0; |
87 scaled_height_ = 0; | 110 scaled_height_ = 0; |
| 111 muted_ = false; |
| 112 black_frame_count_down_ = kNumBlackFramesOnMute; |
88 enable_video_adapter_ = true; | 113 enable_video_adapter_ = true; |
89 adapt_frame_drops_ = 0; | 114 adapt_frame_drops_ = 0; |
90 previous_frame_time_ = 0.0; | 115 previous_frame_time_ = 0.0; |
| 116 #ifdef HAVE_WEBRTC_VIDEO |
91 // There are lots of video capturers out there that don't call | 117 // There are lots of video capturers out there that don't call |
92 // set_frame_factory. We can either go change all of them, or we | 118 // set_frame_factory. We can either go change all of them, or we |
93 // can set this default. | 119 // can set this default. |
94 // TODO(pthatcher): Remove this hack and require the frame factory | 120 // TODO(pthatcher): Remove this hack and require the frame factory |
95 // to be passed in the constructor. | 121 // to be passed in the constructor. |
96 set_frame_factory(new WebRtcVideoFrameFactory()); | 122 set_frame_factory(new WebRtcVideoFrameFactory()); |
| 123 #endif |
97 } | 124 } |
98 | 125 |
99 const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const { | 126 const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const { |
100 return &filtered_supported_formats_; | 127 return &filtered_supported_formats_; |
101 } | 128 } |
102 | 129 |
103 bool VideoCapturer::StartCapturing(const VideoFormat& capture_format) { | 130 bool VideoCapturer::StartCapturing(const VideoFormat& capture_format) { |
104 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
105 previous_frame_time_ = frame_length_time_reporter_.TimerNow(); | 131 previous_frame_time_ = frame_length_time_reporter_.TimerNow(); |
106 CaptureState result = Start(capture_format); | 132 CaptureState result = Start(capture_format); |
107 const bool success = (result == CS_RUNNING) || (result == CS_STARTING); | 133 const bool success = (result == CS_RUNNING) || (result == CS_STARTING); |
108 if (!success) { | 134 if (!success) { |
109 return false; | 135 return false; |
110 } | 136 } |
111 if (result == CS_RUNNING) { | 137 if (result == CS_RUNNING) { |
112 SetCaptureState(result); | 138 SetCaptureState(result); |
113 } | 139 } |
114 return true; | 140 return true; |
115 } | 141 } |
116 | 142 |
| 143 void VideoCapturer::UpdateAspectRatio(int ratio_w, int ratio_h) { |
| 144 if (ratio_w == 0 || ratio_h == 0) { |
| 145 LOG(LS_WARNING) << "UpdateAspectRatio ignored invalid ratio: " |
| 146 << ratio_w << "x" << ratio_h; |
| 147 return; |
| 148 } |
| 149 ratio_w_ = ratio_w; |
| 150 ratio_h_ = ratio_h; |
| 151 } |
| 152 |
| 153 void VideoCapturer::ClearAspectRatio() { |
| 154 ratio_w_ = 0; |
| 155 ratio_h_ = 0; |
| 156 } |
| 157 |
| 158 // Override this to have more control of how your device is started/stopped. |
| 159 bool VideoCapturer::Pause(bool pause) { |
| 160 if (pause) { |
| 161 if (capture_state() == CS_PAUSED) { |
| 162 return true; |
| 163 } |
| 164 bool is_running = capture_state() == CS_STARTING || |
| 165 capture_state() == CS_RUNNING; |
| 166 if (!is_running) { |
| 167 LOG(LS_ERROR) << "Cannot pause a stopped camera."; |
| 168 return false; |
| 169 } |
| 170 LOG(LS_INFO) << "Pausing a camera."; |
| 171 rtc::scoped_ptr<VideoFormat> capture_format_when_paused( |
| 172 capture_format_ ? new VideoFormat(*capture_format_) : NULL); |
| 173 Stop(); |
| 174 SetCaptureState(CS_PAUSED); |
| 175 // If you override this function be sure to restore the capture format |
| 176 // after calling Stop(). |
| 177 SetCaptureFormat(capture_format_when_paused.get()); |
| 178 } else { // Unpause. |
| 179 if (capture_state() != CS_PAUSED) { |
| 180 LOG(LS_WARNING) << "Cannot unpause a camera that hasn't been paused."; |
| 181 return false; |
| 182 } |
| 183 if (!capture_format_) { |
| 184 LOG(LS_ERROR) << "Missing capture_format_, cannot unpause a camera."; |
| 185 return false; |
| 186 } |
| 187 if (muted_) { |
| 188 LOG(LS_WARNING) << "Camera cannot be unpaused while muted."; |
| 189 return false; |
| 190 } |
| 191 LOG(LS_INFO) << "Unpausing a camera."; |
| 192 if (!Start(*capture_format_)) { |
| 193 LOG(LS_ERROR) << "Camera failed to start when unpausing."; |
| 194 return false; |
| 195 } |
| 196 } |
| 197 return true; |
| 198 } |
| 199 |
| 200 bool VideoCapturer::Restart(const VideoFormat& capture_format) { |
| 201 if (!IsRunning()) { |
| 202 return StartCapturing(capture_format); |
| 203 } |
| 204 |
| 205 if (GetCaptureFormat() != NULL && *GetCaptureFormat() == capture_format) { |
| 206 // The reqested format is the same; nothing to do. |
| 207 return true; |
| 208 } |
| 209 |
| 210 Stop(); |
| 211 return StartCapturing(capture_format); |
| 212 } |
| 213 |
| 214 bool VideoCapturer::MuteToBlackThenPause(bool muted) { |
| 215 if (muted == IsMuted()) { |
| 216 return true; |
| 217 } |
| 218 |
| 219 LOG(LS_INFO) << (muted ? "Muting" : "Unmuting") << " this video capturer."; |
| 220 muted_ = muted; // Do this before calling Pause(). |
| 221 if (muted) { |
| 222 // Reset black frame count down. |
| 223 black_frame_count_down_ = kNumBlackFramesOnMute; |
| 224 // Following frames will be overritten with black, then the camera will be |
| 225 // paused. |
| 226 return true; |
| 227 } |
| 228 // Start the camera. |
| 229 thread_->Clear(this, MSG_DO_PAUSE); |
| 230 return Pause(false); |
| 231 } |
| 232 |
117 void VideoCapturer::SetSupportedFormats( | 233 void VideoCapturer::SetSupportedFormats( |
118 const std::vector<VideoFormat>& formats) { | 234 const std::vector<VideoFormat>& formats) { |
119 // This method is OK to call during initialization on a separate thread. | |
120 RTC_DCHECK(capture_state_ == CS_STOPPED || | |
121 thread_checker_.CalledOnValidThread()); | |
122 supported_formats_ = formats; | 235 supported_formats_ = formats; |
123 UpdateFilteredSupportedFormats(); | 236 UpdateFilteredSupportedFormats(); |
124 } | 237 } |
125 | 238 |
126 bool VideoCapturer::GetBestCaptureFormat(const VideoFormat& format, | 239 bool VideoCapturer::GetBestCaptureFormat(const VideoFormat& format, |
127 VideoFormat* best_format) { | 240 VideoFormat* best_format) { |
128 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
129 // TODO(fbarchard): Directly support max_format. | 241 // TODO(fbarchard): Directly support max_format. |
130 UpdateFilteredSupportedFormats(); | 242 UpdateFilteredSupportedFormats(); |
131 const std::vector<VideoFormat>* supported_formats = GetSupportedFormats(); | 243 const std::vector<VideoFormat>* supported_formats = GetSupportedFormats(); |
132 | 244 |
133 if (supported_formats->empty()) { | 245 if (supported_formats->empty()) { |
134 return false; | 246 return false; |
135 } | 247 } |
136 LOG(LS_INFO) << " Capture Requested " << format.ToString(); | 248 LOG(LS_INFO) << " Capture Requested " << format.ToString(); |
137 int64_t best_distance = kMaxDistance; | 249 int64_t best_distance = kMaxDistance; |
138 std::vector<VideoFormat>::const_iterator best = supported_formats->end(); | 250 std::vector<VideoFormat>::const_iterator best = supported_formats->end(); |
(...skipping 18 matching lines...) Expand all Loading... |
157 best_format->height = best->height; | 269 best_format->height = best->height; |
158 best_format->fourcc = best->fourcc; | 270 best_format->fourcc = best->fourcc; |
159 best_format->interval = best->interval; | 271 best_format->interval = best->interval; |
160 LOG(LS_INFO) << " Best " << best_format->ToString() << " Interval " | 272 LOG(LS_INFO) << " Best " << best_format->ToString() << " Interval " |
161 << best_format->interval << " distance " << best_distance; | 273 << best_format->interval << " distance " << best_distance; |
162 } | 274 } |
163 return true; | 275 return true; |
164 } | 276 } |
165 | 277 |
166 void VideoCapturer::ConstrainSupportedFormats(const VideoFormat& max_format) { | 278 void VideoCapturer::ConstrainSupportedFormats(const VideoFormat& max_format) { |
167 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
168 max_format_.reset(new VideoFormat(max_format)); | 279 max_format_.reset(new VideoFormat(max_format)); |
169 LOG(LS_VERBOSE) << " ConstrainSupportedFormats " << max_format.ToString(); | 280 LOG(LS_VERBOSE) << " ConstrainSupportedFormats " << max_format.ToString(); |
170 UpdateFilteredSupportedFormats(); | 281 UpdateFilteredSupportedFormats(); |
171 } | 282 } |
172 | 283 |
173 std::string VideoCapturer::ToString(const CapturedFrame* captured_frame) const { | 284 std::string VideoCapturer::ToString(const CapturedFrame* captured_frame) const { |
174 std::string fourcc_name = GetFourccName(captured_frame->fourcc) + " "; | 285 std::string fourcc_name = GetFourccName(captured_frame->fourcc) + " "; |
175 for (std::string::const_iterator i = fourcc_name.begin(); | 286 for (std::string::const_iterator i = fourcc_name.begin(); |
176 i < fourcc_name.end(); ++i) { | 287 i < fourcc_name.end(); ++i) { |
177 // Test character is printable; Avoid isprint() which asserts on negatives. | 288 // Test character is printable; Avoid isprint() which asserts on negatives. |
(...skipping 23 matching lines...) Expand all Loading... |
201 GetVariableSnapshot(adapt_frame_drops_data_, adapt_drops_stats); | 312 GetVariableSnapshot(adapt_frame_drops_data_, adapt_drops_stats); |
202 GetVariableSnapshot(frame_time_data_, frame_time_stats); | 313 GetVariableSnapshot(frame_time_data_, frame_time_stats); |
203 *last_captured_frame_format = last_captured_frame_format_; | 314 *last_captured_frame_format = last_captured_frame_format_; |
204 | 315 |
205 adapt_frame_drops_data_.Reset(); | 316 adapt_frame_drops_data_.Reset(); |
206 frame_time_data_.Reset(); | 317 frame_time_data_.Reset(); |
207 } | 318 } |
208 | 319 |
209 void VideoCapturer::RemoveSink( | 320 void VideoCapturer::RemoveSink( |
210 rtc::VideoSinkInterface<cricket::VideoFrame>* sink) { | 321 rtc::VideoSinkInterface<cricket::VideoFrame>* sink) { |
211 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
212 broadcaster_.RemoveSink(sink); | 322 broadcaster_.RemoveSink(sink); |
213 } | 323 } |
214 | 324 |
215 void VideoCapturer::AddOrUpdateSink( | 325 void VideoCapturer::AddOrUpdateSink( |
216 rtc::VideoSinkInterface<cricket::VideoFrame>* sink, | 326 rtc::VideoSinkInterface<cricket::VideoFrame>* sink, |
217 const rtc::VideoSinkWants& wants) { | 327 const rtc::VideoSinkWants& wants) { |
218 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
219 broadcaster_.AddOrUpdateSink(sink, wants); | 328 broadcaster_.AddOrUpdateSink(sink, wants); |
220 OnSinkWantsChanged(broadcaster_.wants()); | 329 OnSinkWantsChanged(broadcaster_.wants()); |
221 } | 330 } |
222 | 331 |
223 void VideoCapturer::OnSinkWantsChanged(const rtc::VideoSinkWants& wants) { | 332 void VideoCapturer::OnSinkWantsChanged(const rtc::VideoSinkWants& wants) { |
224 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
225 apply_rotation_ = wants.rotation_applied; | 333 apply_rotation_ = wants.rotation_applied; |
226 if (frame_factory_) { | 334 if (frame_factory_) { |
227 frame_factory_->SetApplyRotation(apply_rotation_); | 335 frame_factory_->SetApplyRotation(apply_rotation_); |
228 } | 336 } |
229 } | 337 } |
230 | 338 |
231 void VideoCapturer::OnFrameCaptured(VideoCapturer*, | 339 void VideoCapturer::OnFrameCaptured(VideoCapturer*, |
232 const CapturedFrame* captured_frame) { | 340 const CapturedFrame* captured_frame) { |
| 341 if (muted_) { |
| 342 if (black_frame_count_down_ == 0) { |
| 343 thread_->Post(this, MSG_DO_PAUSE, NULL); |
| 344 } else { |
| 345 --black_frame_count_down_; |
| 346 } |
| 347 } |
| 348 |
233 if (!broadcaster_.frame_wanted()) { | 349 if (!broadcaster_.frame_wanted()) { |
234 return; | 350 return; |
235 } | 351 } |
236 | 352 |
237 // Use a temporary buffer to scale | 353 // Use a temporary buffer to scale |
238 rtc::scoped_ptr<uint8_t[]> scale_buffer; | 354 rtc::scoped_ptr<uint8_t[]> scale_buffer; |
| 355 |
239 if (IsScreencast()) { | 356 if (IsScreencast()) { |
240 int scaled_width, scaled_height; | 357 int scaled_width, scaled_height; |
241 int desired_screencast_fps = | 358 int desired_screencast_fps = capture_format_.get() ? |
242 capture_format_.get() | 359 VideoFormat::IntervalToFps(capture_format_->interval) : |
243 ? VideoFormat::IntervalToFps(capture_format_->interval) | 360 kDefaultScreencastFps; |
244 : kDefaultScreencastFps; | |
245 ComputeScale(captured_frame->width, captured_frame->height, | 361 ComputeScale(captured_frame->width, captured_frame->height, |
246 desired_screencast_fps, &scaled_width, &scaled_height); | 362 desired_screencast_fps, &scaled_width, &scaled_height); |
247 | 363 |
248 if (FOURCC_ARGB == captured_frame->fourcc && | 364 if (FOURCC_ARGB == captured_frame->fourcc && |
249 (scaled_width != captured_frame->width || | 365 (scaled_width != captured_frame->width || |
250 scaled_height != captured_frame->height)) { | 366 scaled_height != captured_frame->height)) { |
251 if (scaled_width != scaled_width_ || scaled_height != scaled_height_) { | 367 if (scaled_width != scaled_width_ || scaled_height != scaled_height_) { |
252 LOG(LS_INFO) << "Scaling Screencast from " << captured_frame->width | 368 LOG(LS_INFO) << "Scaling Screencast from " |
253 << "x" << captured_frame->height << " to " << scaled_width | 369 << captured_frame->width << "x" |
254 << "x" << scaled_height; | 370 << captured_frame->height << " to " |
| 371 << scaled_width << "x" << scaled_height; |
255 scaled_width_ = scaled_width; | 372 scaled_width_ = scaled_width; |
256 scaled_height_ = scaled_height; | 373 scaled_height_ = scaled_height; |
257 } | 374 } |
258 CapturedFrame* modified_frame = | 375 CapturedFrame* modified_frame = |
259 const_cast<CapturedFrame*>(captured_frame); | 376 const_cast<CapturedFrame*>(captured_frame); |
260 const int modified_frame_size = scaled_width * scaled_height * 4; | 377 const int modified_frame_size = scaled_width * scaled_height * 4; |
261 scale_buffer.reset(new uint8_t[modified_frame_size]); | 378 scale_buffer.reset(new uint8_t[modified_frame_size]); |
262 // Compute new width such that width * height is less than maximum but | 379 // Compute new width such that width * height is less than maximum but |
263 // maintains original captured frame aspect ratio. | 380 // maintains original captured frame aspect ratio. |
264 // Round down width to multiple of 4 so odd width won't round up beyond | 381 // Round down width to multiple of 4 so odd width won't round up beyond |
265 // maximum, and so chroma channel is even width to simplify spatial | 382 // maximum, and so chroma channel is even width to simplify spatial |
266 // resampling. | 383 // resampling. |
267 libyuv::ARGBScale(reinterpret_cast<const uint8_t*>(captured_frame->data), | 384 libyuv::ARGBScale(reinterpret_cast<const uint8_t*>(captured_frame->data), |
268 captured_frame->width * 4, captured_frame->width, | 385 captured_frame->width * 4, captured_frame->width, |
269 captured_frame->height, scale_buffer.get(), | 386 captured_frame->height, scale_buffer.get(), |
270 scaled_width * 4, scaled_width, scaled_height, | 387 scaled_width * 4, scaled_width, scaled_height, |
271 libyuv::kFilterBilinear); | 388 libyuv::kFilterBilinear); |
272 modified_frame->width = scaled_width; | 389 modified_frame->width = scaled_width; |
273 modified_frame->height = scaled_height; | 390 modified_frame->height = scaled_height; |
274 modified_frame->data_size = scaled_width * 4 * scaled_height; | 391 modified_frame->data_size = scaled_width * 4 * scaled_height; |
275 modified_frame->data = scale_buffer.get(); | 392 modified_frame->data = scale_buffer.get(); |
276 } | 393 } |
277 } | 394 } |
278 | 395 |
279 const int kYuy2Bpp = 2; | 396 const int kYuy2Bpp = 2; |
280 const int kArgbBpp = 4; | 397 const int kArgbBpp = 4; |
281 // TODO(fbarchard): Make a helper function to adjust pixels to square. | 398 // TODO(fbarchard): Make a helper function to adjust pixels to square. |
282 // TODO(fbarchard): Hook up experiment to scaling. | 399 // TODO(fbarchard): Hook up experiment to scaling. |
| 400 // TODO(fbarchard): Avoid scale and convert if muted. |
283 // Temporary buffer is scoped here so it will persist until i420_frame.Init() | 401 // Temporary buffer is scoped here so it will persist until i420_frame.Init() |
284 // makes a copy of the frame, converting to I420. | 402 // makes a copy of the frame, converting to I420. |
285 rtc::scoped_ptr<uint8_t[]> temp_buffer; | 403 rtc::scoped_ptr<uint8_t[]> temp_buffer; |
286 // YUY2 can be scaled vertically using an ARGB scaler. Aspect ratio is only | 404 // YUY2 can be scaled vertically using an ARGB scaler. Aspect ratio is only |
287 // a problem on OSX. OSX always converts webcams to YUY2 or UYVY. | 405 // a problem on OSX. OSX always converts webcams to YUY2 or UYVY. |
288 bool can_scale = | 406 bool can_scale = |
289 FOURCC_YUY2 == CanonicalFourCC(captured_frame->fourcc) || | 407 FOURCC_YUY2 == CanonicalFourCC(captured_frame->fourcc) || |
290 FOURCC_UYVY == CanonicalFourCC(captured_frame->fourcc); | 408 FOURCC_UYVY == CanonicalFourCC(captured_frame->fourcc); |
291 | 409 |
292 // If pixels are not square, optionally use vertical scaling to make them | 410 // If pixels are not square, optionally use vertical scaling to make them |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
400 adapted_width, adapted_height)); | 518 adapted_width, adapted_height)); |
401 | 519 |
402 if (!adapted_frame) { | 520 if (!adapted_frame) { |
403 // TODO(fbarchard): LOG more information about captured frame attributes. | 521 // TODO(fbarchard): LOG more information about captured frame attributes. |
404 LOG(LS_ERROR) << "Couldn't convert to I420! " | 522 LOG(LS_ERROR) << "Couldn't convert to I420! " |
405 << "From " << ToString(captured_frame) << " To " | 523 << "From " << ToString(captured_frame) << " To " |
406 << cropped_width << " x " << cropped_height; | 524 << cropped_width << " x " << cropped_height; |
407 return; | 525 return; |
408 } | 526 } |
409 | 527 |
| 528 if (muted_) { |
| 529 // TODO(pthatcher): Use frame_factory_->CreateBlackFrame() instead. |
| 530 adapted_frame->SetToBlack(); |
| 531 } |
410 SignalVideoFrame(this, adapted_frame.get()); | 532 SignalVideoFrame(this, adapted_frame.get()); |
411 UpdateStats(captured_frame); | 533 UpdateStats(captured_frame); |
412 } | 534 } |
413 | 535 |
414 void VideoCapturer::OnFrame(VideoCapturer* capturer, const VideoFrame* frame) { | 536 void VideoCapturer::OnFrame(VideoCapturer* capturer, const VideoFrame* frame) { |
415 broadcaster_.OnFrame(*frame); | 537 broadcaster_.OnFrame(*frame); |
416 } | 538 } |
417 | 539 |
418 void VideoCapturer::SetCaptureState(CaptureState state) { | 540 void VideoCapturer::SetCaptureState(CaptureState state) { |
419 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
420 if (state == capture_state_) { | 541 if (state == capture_state_) { |
421 // Don't trigger a state changed callback if the state hasn't changed. | 542 // Don't trigger a state changed callback if the state hasn't changed. |
422 return; | 543 return; |
423 } | 544 } |
| 545 StateChangeParams* state_params = new StateChangeParams(state); |
424 capture_state_ = state; | 546 capture_state_ = state; |
425 SignalStateChange(this, capture_state_); | 547 thread_->Post(this, MSG_STATE_CHANGE, state_params); |
| 548 } |
| 549 |
| 550 void VideoCapturer::OnMessage(rtc::Message* message) { |
| 551 switch (message->message_id) { |
| 552 case MSG_STATE_CHANGE: { |
| 553 rtc::scoped_ptr<StateChangeParams> p( |
| 554 static_cast<StateChangeParams*>(message->pdata)); |
| 555 SignalStateChange(this, p->data()); |
| 556 break; |
| 557 } |
| 558 case MSG_DO_PAUSE: { |
| 559 Pause(true); |
| 560 break; |
| 561 } |
| 562 case MSG_DO_UNPAUSE: { |
| 563 Pause(false); |
| 564 break; |
| 565 } |
| 566 default: { |
| 567 ASSERT(false); |
| 568 } |
| 569 } |
426 } | 570 } |
427 | 571 |
428 // Get the distance between the supported and desired formats. | 572 // Get the distance between the supported and desired formats. |
429 // Prioritization is done according to this algorithm: | 573 // Prioritization is done according to this algorithm: |
430 // 1) Width closeness. If not same, we prefer wider. | 574 // 1) Width closeness. If not same, we prefer wider. |
431 // 2) Height closeness. If not same, we prefer higher. | 575 // 2) Height closeness. If not same, we prefer higher. |
432 // 3) Framerate closeness. If not same, we prefer faster. | 576 // 3) Framerate closeness. If not same, we prefer faster. |
433 // 4) Compression. If desired format has a specific fourcc, we need exact match; | 577 // 4) Compression. If desired format has a specific fourcc, we need exact match; |
434 // otherwise, we use preference. | 578 // otherwise, we use preference. |
435 int64_t VideoCapturer::GetFormatDistance(const VideoFormat& desired, | 579 int64_t VideoCapturer::GetFormatDistance(const VideoFormat& desired, |
436 const VideoFormat& supported) { | 580 const VideoFormat& supported) { |
437 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
438 int64_t distance = kMaxDistance; | 581 int64_t distance = kMaxDistance; |
439 | 582 |
440 // Check fourcc. | 583 // Check fourcc. |
441 uint32_t supported_fourcc = CanonicalFourCC(supported.fourcc); | 584 uint32_t supported_fourcc = CanonicalFourCC(supported.fourcc); |
442 int64_t delta_fourcc = kMaxDistance; | 585 int64_t delta_fourcc = kMaxDistance; |
443 if (FOURCC_ANY == desired.fourcc) { | 586 if (FOURCC_ANY == desired.fourcc) { |
444 // Any fourcc is OK for the desired. Use preference to find best fourcc. | 587 // Any fourcc is OK for the desired. Use preference to find best fourcc. |
445 std::vector<uint32_t> preferred_fourccs; | 588 std::vector<uint32_t> preferred_fourccs; |
446 if (!GetPreferredFourccs(&preferred_fourccs)) { | 589 if (!GetPreferredFourccs(&preferred_fourccs)) { |
447 return distance; | 590 return distance; |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
538 } | 681 } |
539 if (filtered_supported_formats_.empty()) { | 682 if (filtered_supported_formats_.empty()) { |
540 // The device only captures at resolutions higher than |max_format_| this | 683 // The device only captures at resolutions higher than |max_format_| this |
541 // indicates that |max_format_| should be ignored as it is better to capture | 684 // indicates that |max_format_| should be ignored as it is better to capture |
542 // at too high a resolution than to not capture at all. | 685 // at too high a resolution than to not capture at all. |
543 filtered_supported_formats_ = supported_formats_; | 686 filtered_supported_formats_ = supported_formats_; |
544 } | 687 } |
545 } | 688 } |
546 | 689 |
547 bool VideoCapturer::ShouldFilterFormat(const VideoFormat& format) const { | 690 bool VideoCapturer::ShouldFilterFormat(const VideoFormat& format) const { |
548 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
549 if (!enable_camera_list_) { | 691 if (!enable_camera_list_) { |
550 return false; | 692 return false; |
551 } | 693 } |
552 return format.width > max_format_->width || | 694 return format.width > max_format_->width || |
553 format.height > max_format_->height; | 695 format.height > max_format_->height; |
554 } | 696 } |
555 | 697 |
556 void VideoCapturer::UpdateStats(const CapturedFrame* captured_frame) { | 698 void VideoCapturer::UpdateStats(const CapturedFrame* captured_frame) { |
557 // Update stats protected from fetches from different thread. | 699 // Update stats protected from fetches from different thread. |
558 rtc::CritScope cs(&frame_stats_crit_); | 700 rtc::CritScope cs(&frame_stats_crit_); |
(...skipping 17 matching lines...) Expand all Loading... |
576 void VideoCapturer::GetVariableSnapshot( | 718 void VideoCapturer::GetVariableSnapshot( |
577 const rtc::RollingAccumulator<T>& data, | 719 const rtc::RollingAccumulator<T>& data, |
578 VariableInfo<T>* stats) { | 720 VariableInfo<T>* stats) { |
579 stats->max_val = data.ComputeMax(); | 721 stats->max_val = data.ComputeMax(); |
580 stats->mean = data.ComputeMean(); | 722 stats->mean = data.ComputeMean(); |
581 stats->min_val = data.ComputeMin(); | 723 stats->min_val = data.ComputeMin(); |
582 stats->variance = data.ComputeVariance(); | 724 stats->variance = data.ComputeVariance(); |
583 } | 725 } |
584 | 726 |
585 } // namespace cricket | 727 } // namespace cricket |
OLD | NEW |