OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 94 matching lines...) Loading... |
105 VP8Encoder* VP8Encoder::Create() { | 105 VP8Encoder* VP8Encoder::Create() { |
106 return new VP8EncoderImpl(); | 106 return new VP8EncoderImpl(); |
107 } | 107 } |
108 | 108 |
109 VP8Decoder* VP8Decoder::Create() { | 109 VP8Decoder* VP8Decoder::Create() { |
110 return new VP8DecoderImpl(); | 110 return new VP8DecoderImpl(); |
111 } | 111 } |
112 | 112 |
113 VP8EncoderImpl::VP8EncoderImpl() | 113 VP8EncoderImpl::VP8EncoderImpl() |
114 : encoded_complete_callback_(nullptr), | 114 : encoded_complete_callback_(nullptr), |
115 rate_allocator_(new SimulcastRateAllocator(codec_)), | |
116 inited_(false), | 115 inited_(false), |
117 timestamp_(0), | 116 timestamp_(0), |
118 feedback_mode_(false), | 117 feedback_mode_(false), |
119 qp_max_(56), // Setting for max quantizer. | 118 qp_max_(56), // Setting for max quantizer. |
120 cpu_speed_default_(-6), | 119 cpu_speed_default_(-6), |
121 number_of_cores_(0), | 120 number_of_cores_(0), |
122 rc_max_intra_target_(0), | 121 rc_max_intra_target_(0), |
123 token_partitions_(VP8_ONE_TOKENPARTITION), | 122 token_partitions_(VP8_ONE_TOKENPARTITION), |
124 down_scale_requested_(false), | 123 down_scale_requested_(false), |
125 down_scale_bitrate_(0), | 124 down_scale_bitrate_(0), |
(...skipping 41 matching lines...) Loading... |
167 raw_images_.pop_back(); | 166 raw_images_.pop_back(); |
168 } | 167 } |
169 while (!temporal_layers_.empty()) { | 168 while (!temporal_layers_.empty()) { |
170 delete temporal_layers_.back(); | 169 delete temporal_layers_.back(); |
171 temporal_layers_.pop_back(); | 170 temporal_layers_.pop_back(); |
172 } | 171 } |
173 inited_ = false; | 172 inited_ = false; |
174 return ret_val; | 173 return ret_val; |
175 } | 174 } |
176 | 175 |
177 int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit, | 176 int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate, |
178 uint32_t new_framerate) { | 177 uint32_t new_framerate) { |
179 if (!inited_) { | 178 if (!inited_) |
180 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 179 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 180 |
| 181 if (encoders_[0].err) |
| 182 return WEBRTC_VIDEO_CODEC_ERROR; |
| 183 |
| 184 if (new_framerate < 1) |
| 185 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 186 |
| 187 if (bitrate.get_sum_bps() == 0) { |
| 188 // Encoder paused, turn off all encoding. |
| 189 const int num_streams = static_cast<size_t>(encoders_.size()); |
| 190 for (int i = 0; i < num_streams; ++i) |
| 191 SetStreamState(false, i); |
| 192 return WEBRTC_VIDEO_CODEC_OK; |
181 } | 193 } |
182 if (encoders_[0].err) { | 194 |
183 return WEBRTC_VIDEO_CODEC_ERROR; | 195 // At this point, bitrate allocation should already match codec settings. |
184 } | 196 if (codec_.maxBitrate > 0) |
185 if (new_framerate < 1) { | 197 RTC_DCHECK_LE(bitrate.get_sum_kbps(), codec_.maxBitrate); |
186 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 198 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.minBitrate); |
187 } | 199 if (codec_.numberOfSimulcastStreams > 0) |
188 if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) { | 200 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.simulcastStream[0].minBitrate); |
189 new_bitrate_kbit = codec_.maxBitrate; | 201 |
190 } | |
191 if (new_bitrate_kbit < codec_.minBitrate) { | |
192 new_bitrate_kbit = codec_.minBitrate; | |
193 } | |
194 if (codec_.numberOfSimulcastStreams > 0 && | |
195 new_bitrate_kbit < codec_.simulcastStream[0].minBitrate) { | |
196 new_bitrate_kbit = codec_.simulcastStream[0].minBitrate; | |
197 } | |
198 codec_.maxFramerate = new_framerate; | 202 codec_.maxFramerate = new_framerate; |
199 | 203 |
200 if (encoders_.size() == 1) { | 204 if (encoders_.size() == 1) { |
201 // 1:1. | 205 // 1:1. |
202 // Calculate a rough limit for when to trigger a potental down scale. | 206 // Calculate a rough limit for when to trigger a potental down scale. |
203 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; | 207 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; |
204 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work | 208 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work |
205 // around the current limitations. | 209 // around the current limitations. |
206 // Only trigger keyframes if we are allowed to scale down. | 210 // Only trigger keyframes if we are allowed to scale down. |
207 if (configurations_[0].rc_resize_allowed) { | 211 if (configurations_[0].rc_resize_allowed) { |
208 if (!down_scale_requested_) { | 212 if (!down_scale_requested_) { |
209 if (k_pixels_per_frame > new_bitrate_kbit) { | 213 if (k_pixels_per_frame > bitrate.get_sum_kbps()) { |
210 down_scale_requested_ = true; | 214 down_scale_requested_ = true; |
211 down_scale_bitrate_ = new_bitrate_kbit; | 215 down_scale_bitrate_ = bitrate.get_sum_kbps(); |
212 key_frame_request_[0] = true; | 216 key_frame_request_[0] = true; |
213 } | 217 } |
214 } else { | 218 } else { |
215 if (new_bitrate_kbit > (2 * down_scale_bitrate_) || | 219 if (bitrate.get_sum_kbps() > (2 * down_scale_bitrate_) || |
216 new_bitrate_kbit < (down_scale_bitrate_ / 2)) { | 220 bitrate.get_sum_kbps() < (down_scale_bitrate_ / 2)) { |
217 down_scale_requested_ = false; | 221 down_scale_requested_ = false; |
218 } | 222 } |
219 } | 223 } |
220 } | 224 } |
221 } else { | 225 } else { |
222 // If we have more than 1 stream, reduce the qp_max for the low resolution | 226 // If we have more than 1 stream, reduce the qp_max for the low resolution |
223 // stream if frame rate is not too low. The trade-off with lower qp_max is | 227 // stream if frame rate is not too low. The trade-off with lower qp_max is |
224 // possibly more dropped frames, so we only do this if the frame rate is | 228 // possibly more dropped frames, so we only do this if the frame rate is |
225 // above some threshold (base temporal layer is down to 1/4 for 3 layers). | 229 // above some threshold (base temporal layer is down to 1/4 for 3 layers). |
226 // We may want to condition this on bitrate later. | 230 // We may want to condition this on bitrate later. |
227 if (new_framerate > 20) { | 231 if (new_framerate > 20) { |
228 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; | 232 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; |
229 } else { | 233 } else { |
230 // Go back to default value set in InitEncode. | 234 // Go back to default value set in InitEncode. |
231 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; | 235 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; |
232 } | 236 } |
233 } | 237 } |
234 | 238 |
235 std::vector<uint32_t> stream_bitrates = | |
236 rate_allocator_->GetAllocation(new_bitrate_kbit); | |
237 size_t stream_idx = encoders_.size() - 1; | 239 size_t stream_idx = encoders_.size() - 1; |
238 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { | 240 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { |
239 if (encoders_.size() > 1) | 241 unsigned int target_bitrate_kbps = |
240 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 242 bitrate.GetSpatialLayerSum(stream_idx) / 1000; |
241 | 243 |
242 unsigned int target_bitrate = stream_bitrates[stream_idx]; | 244 bool send_stream = target_bitrate_kbps > 0; |
243 unsigned int max_bitrate = codec_.maxBitrate; | 245 if (send_stream || encoders_.size() > 1) |
244 int framerate = new_framerate; | 246 SetStreamState(send_stream, stream_idx); |
245 // TODO(holmer): This is a temporary hack for screensharing, where we | 247 |
246 // interpret the startBitrate as the encoder target bitrate. This is | 248 configurations_[i].rc_target_bitrate = target_bitrate_kbps; |
247 // to allow for a different max bitrate, so if the codec can't meet | 249 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); |
248 // the target we still allow it to overshoot up to the max before dropping | 250 |
249 // frames. This hack should be improved. | |
250 if (codec_.targetBitrate > 0 && | |
251 (codec_.VP8()->numberOfTemporalLayers == 2 || | |
252 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { | |
253 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); | |
254 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); | |
255 target_bitrate = tl0_bitrate; | |
256 } | |
257 configurations_[i].rc_target_bitrate = target_bitrate; | |
258 temporal_layers_[stream_idx]->ConfigureBitrates( | |
259 target_bitrate, max_bitrate, framerate, &configurations_[i]); | |
260 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { | 251 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { |
261 return WEBRTC_VIDEO_CODEC_ERROR; | 252 return WEBRTC_VIDEO_CODEC_ERROR; |
262 } | 253 } |
263 } | 254 } |
264 quality_scaler_.ReportFramerate(new_framerate); | 255 quality_scaler_.ReportFramerate(new_framerate); |
265 return WEBRTC_VIDEO_CODEC_OK; | 256 return WEBRTC_VIDEO_CODEC_OK; |
266 } | 257 } |
267 | 258 |
268 void VP8EncoderImpl::OnDroppedFrame() { | 259 void VP8EncoderImpl::OnDroppedFrame() { |
269 if (quality_scaler_enabled_) | 260 if (quality_scaler_enabled_) |
270 quality_scaler_.ReportDroppedFrame(); | 261 quality_scaler_.ReportDroppedFrame(); |
271 } | 262 } |
272 | 263 |
273 const char* VP8EncoderImpl::ImplementationName() const { | 264 const char* VP8EncoderImpl::ImplementationName() const { |
274 return "libvpx"; | 265 return "libvpx"; |
275 } | 266 } |
276 | 267 |
277 void VP8EncoderImpl::SetStreamState(bool send_stream, | 268 void VP8EncoderImpl::SetStreamState(bool send_stream, |
278 int stream_idx) { | 269 int stream_idx) { |
279 if (send_stream && !send_stream_[stream_idx]) { | 270 if (send_stream && !send_stream_[stream_idx]) { |
280 // Need a key frame if we have not sent this stream before. | 271 // Need a key frame if we have not sent this stream before. |
281 key_frame_request_[stream_idx] = true; | 272 key_frame_request_[stream_idx] = true; |
282 } | 273 } |
283 send_stream_[stream_idx] = send_stream; | 274 send_stream_[stream_idx] = send_stream; |
284 } | 275 } |
285 | 276 |
286 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, | 277 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, |
287 int num_temporal_layers, | 278 int num_temporal_layers, |
288 const VideoCodec& codec) { | 279 const VideoCodec& codec) { |
289 TemporalLayersFactory default_factory; | 280 RTC_DCHECK(codec.codecSpecific.VP8.tl_factory != nullptr); |
290 const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory; | 281 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory; |
291 if (!tl_factory) | |
292 tl_factory = &default_factory; | |
293 if (num_streams == 1) { | 282 if (num_streams == 1) { |
294 if (codec.mode == kScreensharing) { | 283 temporal_layers_.push_back( |
295 // Special mode when screensharing on a single stream. | 284 tl_factory->Create(0, num_temporal_layers, rand())); |
296 temporal_layers_.push_back(new ScreenshareLayers( | |
297 num_temporal_layers, rand(), webrtc::Clock::GetRealTimeClock())); | |
298 } else { | |
299 temporal_layers_.push_back( | |
300 tl_factory->Create(num_temporal_layers, rand())); | |
301 } | |
302 } else { | 285 } else { |
303 for (int i = 0; i < num_streams; ++i) { | 286 for (int i = 0; i < num_streams; ++i) { |
304 // TODO(andresp): crash if layers is invalid. | 287 RTC_CHECK_GT(num_temporal_layers, 0); |
305 int layers = codec.simulcastStream[i].numberOfTemporalLayers; | 288 int layers = std::max(static_cast<uint8_t>(1), |
306 if (layers < 1) | 289 codec.simulcastStream[i].numberOfTemporalLayers); |
307 layers = 1; | 290 temporal_layers_.push_back(tl_factory->Create(i, layers, rand())); |
308 temporal_layers_.push_back(tl_factory->Create(layers, rand())); | |
309 } | 291 } |
310 } | 292 } |
311 } | 293 } |
312 | 294 |
313 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, | 295 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
314 int number_of_cores, | 296 int number_of_cores, |
315 size_t /*maxPayloadSize */) { | 297 size_t /*maxPayloadSize */) { |
316 if (inst == NULL) { | 298 if (inst == NULL) { |
317 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 299 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
318 } | 300 } |
(...skipping 24 matching lines...) Loading... |
343 int number_of_streams = NumberOfStreams(*inst); | 325 int number_of_streams = NumberOfStreams(*inst); |
344 bool doing_simulcast = (number_of_streams > 1); | 326 bool doing_simulcast = (number_of_streams > 1); |
345 | 327 |
346 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { | 328 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { |
347 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 329 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
348 } | 330 } |
349 | 331 |
350 int num_temporal_layers = | 332 int num_temporal_layers = |
351 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers | 333 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers |
352 : inst->VP8().numberOfTemporalLayers; | 334 : inst->VP8().numberOfTemporalLayers; |
| 335 RTC_DCHECK_GT(num_temporal_layers, 0); |
353 | 336 |
354 // TODO(andresp): crash if num temporal layers is bananas. | |
355 if (num_temporal_layers < 1) | |
356 num_temporal_layers = 1; | |
357 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); | 337 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); |
358 | 338 |
359 feedback_mode_ = inst->VP8().feedbackModeOn; | 339 feedback_mode_ = inst->VP8().feedbackModeOn; |
360 | 340 |
361 number_of_cores_ = number_of_cores; | 341 number_of_cores_ = number_of_cores; |
362 timestamp_ = 0; | 342 timestamp_ = 0; |
363 codec_ = *inst; | 343 codec_ = *inst; |
364 rate_allocator_.reset(new SimulcastRateAllocator(codec_)); | |
365 | 344 |
366 // Code expects simulcastStream resolutions to be correct, make sure they are | 345 // Code expects simulcastStream resolutions to be correct, make sure they are |
367 // filled even when there are no simulcast layers. | 346 // filled even when there are no simulcast layers. |
368 if (codec_.numberOfSimulcastStreams == 0) { | 347 if (codec_.numberOfSimulcastStreams == 0) { |
369 codec_.simulcastStream[0].width = codec_.width; | 348 codec_.simulcastStream[0].width = codec_.width; |
370 codec_.simulcastStream[0].height = codec_.height; | 349 codec_.simulcastStream[0].height = codec_.height; |
371 } | 350 } |
372 | 351 |
373 picture_id_.resize(number_of_streams); | 352 picture_id_.resize(number_of_streams); |
374 last_key_frame_picture_id_.resize(number_of_streams); | 353 last_key_frame_picture_id_.resize(number_of_streams); |
(...skipping 131 matching lines...) Loading... |
506 // TODO(fbarchard): Consider number of Simulcast layers. | 485 // TODO(fbarchard): Consider number of Simulcast layers. |
507 configurations_[0].g_threads = NumberOfThreads( | 486 configurations_[0].g_threads = NumberOfThreads( |
508 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); | 487 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); |
509 | 488 |
510 // Creating a wrapper to the image - setting image data to NULL. | 489 // Creating a wrapper to the image - setting image data to NULL. |
511 // Actual pointer will be set in encode. Setting align to 1, as it | 490 // Actual pointer will be set in encode. Setting align to 1, as it |
512 // is meaningless (no memory allocation is done here). | 491 // is meaningless (no memory allocation is done here). |
513 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, | 492 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, |
514 NULL); | 493 NULL); |
515 | 494 |
516 if (encoders_.size() == 1) { | 495 // Note the order we use is different from webm, we have lowest resolution |
517 configurations_[0].rc_target_bitrate = inst->startBitrate; | 496 // at position 0 and they have highest resolution at position 0. |
518 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, | 497 int stream_idx = encoders_.size() - 1; |
519 inst->maxFramerate, | 498 SimulcastRateAllocator init_allocator(codec_, nullptr); |
520 &configurations_[0]); | 499 BitrateAllocation allocation = init_allocator.GetAllocation( |
521 } else { | 500 inst->startBitrate * 1000, inst->maxFramerate); |
522 // Note the order we use is different from webm, we have lowest resolution | 501 std::vector<uint32_t> stream_bitrates; |
523 // at position 0 and they have highest resolution at position 0. | 502 for (int i = 0; i == 0 || i < inst->numberOfSimulcastStreams; ++i) { |
524 int stream_idx = encoders_.size() - 1; | 503 uint32_t bitrate = allocation.GetSpatialLayerSum(i) / 1000; |
525 std::vector<uint32_t> stream_bitrates = | 504 stream_bitrates.push_back(bitrate); |
526 rate_allocator_->GetAllocation(inst->startBitrate); | 505 } |
| 506 |
| 507 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; |
| 508 temporal_layers_[stream_idx]->OnRatesUpdated( |
| 509 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate); |
| 510 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[0]); |
| 511 --stream_idx; |
| 512 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { |
| 513 memcpy(&configurations_[i], &configurations_[0], |
| 514 sizeof(configurations_[0])); |
| 515 |
| 516 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; |
| 517 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; |
| 518 |
| 519 // Use 1 thread for lower resolutions. |
| 520 configurations_[i].g_threads = 1; |
| 521 |
| 522 // Setting alignment to 32 - as that ensures at least 16 for all |
| 523 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for |
| 524 // the y plane, but only half of it to the u and v planes. |
| 525 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, |
| 526 inst->simulcastStream[stream_idx].width, |
| 527 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); |
527 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 528 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); |
528 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; | 529 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; |
529 temporal_layers_[stream_idx]->ConfigureBitrates( | 530 temporal_layers_[stream_idx]->OnRatesUpdated( |
530 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, | 531 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate); |
531 &configurations_[0]); | 532 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); |
532 --stream_idx; | |
533 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { | |
534 memcpy(&configurations_[i], &configurations_[0], | |
535 sizeof(configurations_[0])); | |
536 | |
537 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; | |
538 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; | |
539 | |
540 // Use 1 thread for lower resolutions. | |
541 configurations_[i].g_threads = 1; | |
542 | |
543 // Setting alignment to 32 - as that ensures at least 16 for all | |
544 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for | |
545 // the y plane, but only half of it to the u and v planes. | |
546 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, | |
547 inst->simulcastStream[stream_idx].width, | |
548 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); | |
549 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | |
550 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; | |
551 temporal_layers_[stream_idx]->ConfigureBitrates( | |
552 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, | |
553 &configurations_[i]); | |
554 } | |
555 } | 533 } |
556 | 534 |
557 rps_.Init(); | 535 rps_.Init(); |
558 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, | 536 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, |
559 codec_.height, codec_.maxFramerate); | 537 codec_.height, codec_.maxFramerate); |
560 | 538 |
561 // Only apply scaling to improve for single-layer streams. The scaling metrics | 539 // Only apply scaling to improve for single-layer streams. The scaling metrics |
562 // use frame drops as a signal and is only applicable when we drop frames. | 540 // use frame drops as a signal and is only applicable when we drop frames. |
563 quality_scaler_enabled_ = encoders_.size() == 1 && | 541 quality_scaler_enabled_ = encoders_.size() == 1 && |
564 configurations_[0].rc_dropframe_thresh > 0 && | 542 configurations_[0].rc_dropframe_thresh > 0 && |
(...skipping 761 matching lines...) Loading... |
1326 return -1; | 1304 return -1; |
1327 } | 1305 } |
1328 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != | 1306 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != |
1329 VPX_CODEC_OK) { | 1307 VPX_CODEC_OK) { |
1330 return -1; | 1308 return -1; |
1331 } | 1309 } |
1332 return 0; | 1310 return 0; |
1333 } | 1311 } |
1334 | 1312 |
1335 } // namespace webrtc | 1313 } // namespace webrtc |
OLD | NEW |