OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
105 VP8Encoder* VP8Encoder::Create() { | 105 VP8Encoder* VP8Encoder::Create() { |
106 return new VP8EncoderImpl(); | 106 return new VP8EncoderImpl(); |
107 } | 107 } |
108 | 108 |
109 VP8Decoder* VP8Decoder::Create() { | 109 VP8Decoder* VP8Decoder::Create() { |
110 return new VP8DecoderImpl(); | 110 return new VP8DecoderImpl(); |
111 } | 111 } |
112 | 112 |
113 VP8EncoderImpl::VP8EncoderImpl() | 113 VP8EncoderImpl::VP8EncoderImpl() |
114 : encoded_complete_callback_(nullptr), | 114 : encoded_complete_callback_(nullptr), |
115 rate_allocator_(new SimulcastRateAllocator(codec_)), | |
116 inited_(false), | 115 inited_(false), |
117 timestamp_(0), | 116 timestamp_(0), |
118 feedback_mode_(false), | 117 feedback_mode_(false), |
119 qp_max_(56), // Setting for max quantizer. | 118 qp_max_(56), // Setting for max quantizer. |
120 cpu_speed_default_(-6), | 119 cpu_speed_default_(-6), |
121 rc_max_intra_target_(0), | 120 rc_max_intra_target_(0), |
122 token_partitions_(VP8_ONE_TOKENPARTITION), | 121 token_partitions_(VP8_ONE_TOKENPARTITION), |
123 down_scale_requested_(false), | 122 down_scale_requested_(false), |
124 down_scale_bitrate_(0), | 123 down_scale_bitrate_(0), |
125 key_frame_request_(kMaxSimulcastStreams, false), | 124 key_frame_request_(kMaxSimulcastStreams, false), |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
166 raw_images_.pop_back(); | 165 raw_images_.pop_back(); |
167 } | 166 } |
168 while (!temporal_layers_.empty()) { | 167 while (!temporal_layers_.empty()) { |
169 delete temporal_layers_.back(); | 168 delete temporal_layers_.back(); |
170 temporal_layers_.pop_back(); | 169 temporal_layers_.pop_back(); |
171 } | 170 } |
172 inited_ = false; | 171 inited_ = false; |
173 return ret_val; | 172 return ret_val; |
174 } | 173 } |
175 | 174 |
176 int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit, | 175 int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate, |
177 uint32_t new_framerate) { | 176 uint32_t new_framerate) { |
178 if (!inited_) { | 177 if (!inited_) |
179 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 178 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
180 } | 179 |
181 if (encoders_[0].err) { | 180 if (encoders_[0].err) |
182 return WEBRTC_VIDEO_CODEC_ERROR; | 181 return WEBRTC_VIDEO_CODEC_ERROR; |
183 } | 182 |
184 if (new_framerate < 1) { | 183 if (new_framerate < 1) |
184 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
185 | |
186 // At this point, bitrate allocation should already match codec settings. | |
187 if (codec_.maxBitrate > 0 && bitrate.get_sum_kbps() > codec_.maxBitrate) | |
stefan-webrtc
2016/11/02 10:26:35
Should we DCHECK instead if it should match at thi
sprang_webrtc
2016/11/02 13:28:33
Yes, that is probably a good idea.
| |
188 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
189 | |
190 if (bitrate.get_sum_kbps() < codec_.minBitrate) | |
191 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
192 | |
193 if (codec_.numberOfSimulcastStreams > 0 && | |
194 bitrate.get_sum_kbps() < codec_.simulcastStream[0].minBitrate) { | |
185 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 195 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
186 } | 196 } |
187 if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) { | 197 |
188 new_bitrate_kbit = codec_.maxBitrate; | |
189 } | |
190 if (new_bitrate_kbit < codec_.minBitrate) { | |
191 new_bitrate_kbit = codec_.minBitrate; | |
192 } | |
193 if (codec_.numberOfSimulcastStreams > 0 && | |
194 new_bitrate_kbit < codec_.simulcastStream[0].minBitrate) { | |
195 new_bitrate_kbit = codec_.simulcastStream[0].minBitrate; | |
196 } | |
197 codec_.maxFramerate = new_framerate; | 198 codec_.maxFramerate = new_framerate; |
198 | 199 |
199 if (encoders_.size() == 1) { | 200 if (encoders_.size() == 1) { |
200 // 1:1. | 201 // 1:1. |
201 // Calculate a rough limit for when to trigger a potental down scale. | 202 // Calculate a rough limit for when to trigger a potental down scale. |
202 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; | 203 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; |
203 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work | 204 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work |
204 // around the current limitations. | 205 // around the current limitations. |
205 // Only trigger keyframes if we are allowed to scale down. | 206 // Only trigger keyframes if we are allowed to scale down. |
206 if (configurations_[0].rc_resize_allowed) { | 207 if (configurations_[0].rc_resize_allowed) { |
207 if (!down_scale_requested_) { | 208 if (!down_scale_requested_) { |
208 if (k_pixels_per_frame > new_bitrate_kbit) { | 209 if (k_pixels_per_frame > bitrate.get_sum_kbps()) { |
209 down_scale_requested_ = true; | 210 down_scale_requested_ = true; |
210 down_scale_bitrate_ = new_bitrate_kbit; | 211 down_scale_bitrate_ = bitrate.get_sum_kbps(); |
211 key_frame_request_[0] = true; | 212 key_frame_request_[0] = true; |
212 } | 213 } |
213 } else { | 214 } else { |
214 if (new_bitrate_kbit > (2 * down_scale_bitrate_) || | 215 if (bitrate.get_sum_kbps() > (2 * down_scale_bitrate_) || |
215 new_bitrate_kbit < (down_scale_bitrate_ / 2)) { | 216 bitrate.get_sum_kbps() < (down_scale_bitrate_ / 2)) { |
216 down_scale_requested_ = false; | 217 down_scale_requested_ = false; |
217 } | 218 } |
218 } | 219 } |
219 } | 220 } |
220 } else { | 221 } else { |
221 // If we have more than 1 stream, reduce the qp_max for the low resolution | 222 // If we have more than 1 stream, reduce the qp_max for the low resolution |
222 // stream if frame rate is not too low. The trade-off with lower qp_max is | 223 // stream if frame rate is not too low. The trade-off with lower qp_max is |
223 // possibly more dropped frames, so we only do this if the frame rate is | 224 // possibly more dropped frames, so we only do this if the frame rate is |
224 // above some threshold (base temporal layer is down to 1/4 for 3 layers). | 225 // above some threshold (base temporal layer is down to 1/4 for 3 layers). |
225 // We may want to condition this on bitrate later. | 226 // We may want to condition this on bitrate later. |
226 if (new_framerate > 20) { | 227 if (new_framerate > 20) { |
227 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; | 228 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; |
228 } else { | 229 } else { |
229 // Go back to default value set in InitEncode. | 230 // Go back to default value set in InitEncode. |
230 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; | 231 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; |
231 } | 232 } |
232 } | 233 } |
233 | 234 |
234 std::vector<uint32_t> stream_bitrates = | |
235 rate_allocator_->GetAllocation(new_bitrate_kbit); | |
236 size_t stream_idx = encoders_.size() - 1; | 235 size_t stream_idx = encoders_.size() - 1; |
237 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { | 236 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { |
237 unsigned int target_bitrate_kbps = | |
238 bitrate.GetSpatialLayerSum(stream_idx) / 1000; | |
239 | |
238 if (encoders_.size() > 1) | 240 if (encoders_.size() > 1) |
239 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 241 SetStreamState(target_bitrate_kbps > 0, stream_idx); |
240 | 242 |
241 unsigned int target_bitrate = stream_bitrates[stream_idx]; | 243 configurations_[i].rc_target_bitrate = target_bitrate_kbps; |
242 unsigned int max_bitrate = codec_.maxBitrate; | 244 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); |
243 int framerate = new_framerate; | 245 |
244 // TODO(holmer): This is a temporary hack for screensharing, where we | |
245 // interpret the startBitrate as the encoder target bitrate. This is | |
246 // to allow for a different max bitrate, so if the codec can't meet | |
247 // the target we still allow it to overshoot up to the max before dropping | |
248 // frames. This hack should be improved. | |
249 if (codec_.targetBitrate > 0 && | |
250 (codec_.VP8()->numberOfTemporalLayers == 2 || | |
251 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { | |
252 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); | |
253 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); | |
254 target_bitrate = tl0_bitrate; | |
255 } | |
256 configurations_[i].rc_target_bitrate = target_bitrate; | |
257 temporal_layers_[stream_idx]->ConfigureBitrates( | |
258 target_bitrate, max_bitrate, framerate, &configurations_[i]); | |
259 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { | 246 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { |
260 return WEBRTC_VIDEO_CODEC_ERROR; | 247 return WEBRTC_VIDEO_CODEC_ERROR; |
261 } | 248 } |
262 } | 249 } |
263 quality_scaler_.ReportFramerate(new_framerate); | 250 quality_scaler_.ReportFramerate(new_framerate); |
264 return WEBRTC_VIDEO_CODEC_OK; | 251 return WEBRTC_VIDEO_CODEC_OK; |
265 } | 252 } |
266 | 253 |
267 void VP8EncoderImpl::OnDroppedFrame() { | 254 void VP8EncoderImpl::OnDroppedFrame() { |
268 if (quality_scaler_enabled_) | 255 if (quality_scaler_enabled_) |
269 quality_scaler_.ReportDroppedFrame(); | 256 quality_scaler_.ReportDroppedFrame(); |
270 } | 257 } |
271 | 258 |
272 const char* VP8EncoderImpl::ImplementationName() const { | 259 const char* VP8EncoderImpl::ImplementationName() const { |
273 return "libvpx"; | 260 return "libvpx"; |
274 } | 261 } |
275 | 262 |
276 void VP8EncoderImpl::SetStreamState(bool send_stream, | 263 void VP8EncoderImpl::SetStreamState(bool send_stream, |
277 int stream_idx) { | 264 int stream_idx) { |
278 if (send_stream && !send_stream_[stream_idx]) { | 265 if (send_stream && !send_stream_[stream_idx]) { |
279 // Need a key frame if we have not sent this stream before. | 266 // Need a key frame if we have not sent this stream before. |
280 key_frame_request_[stream_idx] = true; | 267 key_frame_request_[stream_idx] = true; |
281 } | 268 } |
282 send_stream_[stream_idx] = send_stream; | 269 send_stream_[stream_idx] = send_stream; |
283 } | 270 } |
284 | 271 |
285 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, | 272 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, |
286 int num_temporal_layers, | 273 int num_temporal_layers, |
287 const VideoCodec& codec) { | 274 const VideoCodec& codec) { |
288 TemporalLayersFactory default_factory; | 275 RTC_DCHECK(codec.codecSpecific.VP8.tl_factory != nullptr); |
289 const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory; | 276 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory; |
290 if (!tl_factory) | |
291 tl_factory = &default_factory; | |
292 if (num_streams == 1) { | 277 if (num_streams == 1) { |
293 if (codec.mode == kScreensharing) { | 278 temporal_layers_.push_back( |
294 // Special mode when screensharing on a single stream. | 279 tl_factory->Create(0, num_temporal_layers, rand())); |
295 temporal_layers_.push_back(new ScreenshareLayers( | |
296 num_temporal_layers, rand(), webrtc::Clock::GetRealTimeClock())); | |
297 } else { | |
298 temporal_layers_.push_back( | |
299 tl_factory->Create(num_temporal_layers, rand())); | |
300 } | |
301 } else { | 280 } else { |
302 for (int i = 0; i < num_streams; ++i) { | 281 for (int i = 0; i < num_streams; ++i) { |
303 // TODO(andresp): crash if layers is invalid. | 282 RTC_CHECK_GT(num_temporal_layers, 0); |
304 int layers = codec.simulcastStream[i].numberOfTemporalLayers; | 283 int layers = std::max(static_cast<uint8_t>(1), |
305 if (layers < 1) | 284 codec.simulcastStream[i].numberOfTemporalLayers); |
306 layers = 1; | 285 temporal_layers_.push_back(tl_factory->Create(i, layers, rand())); |
307 temporal_layers_.push_back(tl_factory->Create(layers, rand())); | |
308 } | 286 } |
309 } | 287 } |
310 } | 288 } |
311 | 289 |
312 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, | 290 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
313 int number_of_cores, | 291 int number_of_cores, |
314 size_t /*maxPayloadSize */) { | 292 size_t /*maxPayloadSize */) { |
315 if (inst == NULL) { | 293 if (inst == NULL) { |
316 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 294 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
317 } | 295 } |
(...skipping 24 matching lines...) Expand all Loading... | |
342 int number_of_streams = NumberOfStreams(*inst); | 320 int number_of_streams = NumberOfStreams(*inst); |
343 bool doing_simulcast = (number_of_streams > 1); | 321 bool doing_simulcast = (number_of_streams > 1); |
344 | 322 |
345 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { | 323 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { |
346 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 324 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
347 } | 325 } |
348 | 326 |
349 int num_temporal_layers = | 327 int num_temporal_layers = |
350 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers | 328 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers |
351 : inst->VP8().numberOfTemporalLayers; | 329 : inst->VP8().numberOfTemporalLayers; |
330 RTC_DCHECK_GT(num_temporal_layers, 0); | |
352 | 331 |
353 // TODO(andresp): crash if num temporal layers is bananas. | |
354 if (num_temporal_layers < 1) | |
355 num_temporal_layers = 1; | |
356 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); | 332 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); |
357 | 333 |
358 feedback_mode_ = inst->VP8().feedbackModeOn; | 334 feedback_mode_ = inst->VP8().feedbackModeOn; |
359 | |
360 timestamp_ = 0; | 335 timestamp_ = 0; |
361 codec_ = *inst; | 336 codec_ = *inst; |
362 rate_allocator_.reset(new SimulcastRateAllocator(codec_)); | |
363 | 337 |
364 // Code expects simulcastStream resolutions to be correct, make sure they are | 338 // Code expects simulcastStream resolutions to be correct, make sure they are |
365 // filled even when there are no simulcast layers. | 339 // filled even when there are no simulcast layers. |
366 if (codec_.numberOfSimulcastStreams == 0) { | 340 if (codec_.numberOfSimulcastStreams == 0) { |
367 codec_.simulcastStream[0].width = codec_.width; | 341 codec_.simulcastStream[0].width = codec_.width; |
368 codec_.simulcastStream[0].height = codec_.height; | 342 codec_.simulcastStream[0].height = codec_.height; |
369 } | 343 } |
370 | 344 |
371 picture_id_.resize(number_of_streams); | 345 picture_id_.resize(number_of_streams); |
372 last_key_frame_picture_id_.resize(number_of_streams); | 346 last_key_frame_picture_id_.resize(number_of_streams); |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
504 // TODO(fbarchard): Consider number of Simulcast layers. | 478 // TODO(fbarchard): Consider number of Simulcast layers. |
505 configurations_[0].g_threads = NumberOfThreads( | 479 configurations_[0].g_threads = NumberOfThreads( |
506 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); | 480 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); |
507 | 481 |
508 // Creating a wrapper to the image - setting image data to NULL. | 482 // Creating a wrapper to the image - setting image data to NULL. |
509 // Actual pointer will be set in encode. Setting align to 1, as it | 483 // Actual pointer will be set in encode. Setting align to 1, as it |
510 // is meaningless (no memory allocation is done here). | 484 // is meaningless (no memory allocation is done here). |
511 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, | 485 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, |
512 NULL); | 486 NULL); |
513 | 487 |
514 if (encoders_.size() == 1) { | 488 // Note the order we use is different from webm, we have lowest resolution |
515 configurations_[0].rc_target_bitrate = inst->startBitrate; | 489 // at position 0 and they have highest resolution at position 0. |
516 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, | 490 int stream_idx = encoders_.size() - 1; |
517 inst->maxFramerate, | 491 SimulcastRateAllocator init_allocator(codec_, nullptr); |
518 &configurations_[0]); | 492 BitrateAllocation allocation = init_allocator.GetAllocation( |
519 } else { | 493 inst->startBitrate * 1000, inst->maxFramerate); |
520 // Note the order we use is different from webm, we have lowest resolution | 494 std::vector<uint32_t> stream_bitrates; |
521 // at position 0 and they have highest resolution at position 0. | 495 for (int i = 0; i == 0 || i < inst->numberOfSimulcastStreams; ++i) { |
522 int stream_idx = encoders_.size() - 1; | 496 uint32_t bitrate = allocation.GetSpatialLayerSum(i) / 1000; |
523 std::vector<uint32_t> stream_bitrates = | 497 stream_bitrates.push_back(bitrate); |
524 rate_allocator_->GetAllocation(inst->startBitrate); | 498 } |
499 | |
500 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; | |
501 temporal_layers_[stream_idx]->OnRatesUpdated( | |
502 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate); | |
503 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[0]); | |
504 --stream_idx; | |
505 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { | |
506 memcpy(&configurations_[i], &configurations_[0], | |
507 sizeof(configurations_[0])); | |
508 | |
509 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; | |
510 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; | |
511 | |
512 // Use 1 thread for lower resolutions. | |
513 configurations_[i].g_threads = 1; | |
514 | |
515 // Setting alignment to 32 - as that ensures at least 16 for all | |
516 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for | |
517 // the y plane, but only half of it to the u and v planes. | |
518 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, | |
519 inst->simulcastStream[stream_idx].width, | |
520 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); | |
525 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 521 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); |
526 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; | 522 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; |
527 temporal_layers_[stream_idx]->ConfigureBitrates( | 523 temporal_layers_[stream_idx]->OnRatesUpdated( |
528 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, | 524 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate); |
529 &configurations_[0]); | 525 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); |
530 --stream_idx; | |
531 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { | |
532 memcpy(&configurations_[i], &configurations_[0], | |
533 sizeof(configurations_[0])); | |
534 | |
535 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; | |
536 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; | |
537 | |
538 // Use 1 thread for lower resolutions. | |
539 configurations_[i].g_threads = 1; | |
540 | |
541 // Setting alignment to 32 - as that ensures at least 16 for all | |
542 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for | |
543 // the y plane, but only half of it to the u and v planes. | |
544 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, | |
545 inst->simulcastStream[stream_idx].width, | |
546 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); | |
547 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | |
548 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; | |
549 temporal_layers_[stream_idx]->ConfigureBitrates( | |
550 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, | |
551 &configurations_[i]); | |
552 } | |
553 } | 526 } |
554 | 527 |
555 rps_.Init(); | 528 rps_.Init(); |
556 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, | 529 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, |
557 codec_.height, codec_.maxFramerate); | 530 codec_.height, codec_.maxFramerate); |
558 | 531 |
559 // Only apply scaling to improve for single-layer streams. The scaling metrics | 532 // Only apply scaling to improve for single-layer streams. The scaling metrics |
560 // use frame drops as a signal and is only applicable when we drop frames. | 533 // use frame drops as a signal and is only applicable when we drop frames. |
561 quality_scaler_enabled_ = encoders_.size() == 1 && | 534 quality_scaler_enabled_ = encoders_.size() == 1 && |
562 configurations_[0].rc_dropframe_thresh > 0 && | 535 configurations_[0].rc_dropframe_thresh > 0 && |
(...skipping 752 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1315 return -1; | 1288 return -1; |
1316 } | 1289 } |
1317 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != | 1290 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != |
1318 VPX_CODEC_OK) { | 1291 VPX_CODEC_OK) { |
1319 return -1; | 1292 return -1; |
1320 } | 1293 } |
1321 return 0; | 1294 return 0; |
1322 } | 1295 } |
1323 | 1296 |
1324 } // namespace webrtc | 1297 } // namespace webrtc |
OLD | NEW |