OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 VP8Encoder* VP8Encoder::Create() { | 106 VP8Encoder* VP8Encoder::Create() { |
107 return new VP8EncoderImpl(); | 107 return new VP8EncoderImpl(); |
108 } | 108 } |
109 | 109 |
110 VP8Decoder* VP8Decoder::Create() { | 110 VP8Decoder* VP8Decoder::Create() { |
111 return new VP8DecoderImpl(); | 111 return new VP8DecoderImpl(); |
112 } | 112 } |
113 | 113 |
114 VP8EncoderImpl::VP8EncoderImpl() | 114 VP8EncoderImpl::VP8EncoderImpl() |
115 : encoded_complete_callback_(nullptr), | 115 : encoded_complete_callback_(nullptr), |
116 rate_allocator_(new SimulcastRateAllocator(codec_)), | |
117 inited_(false), | 116 inited_(false), |
118 timestamp_(0), | 117 timestamp_(0), |
119 feedback_mode_(false), | 118 feedback_mode_(false), |
120 qp_max_(56), // Setting for max quantizer. | 119 qp_max_(56), // Setting for max quantizer. |
121 cpu_speed_default_(-6), | 120 cpu_speed_default_(-6), |
122 number_of_cores_(0), | 121 number_of_cores_(0), |
123 rc_max_intra_target_(0), | 122 rc_max_intra_target_(0), |
124 token_partitions_(VP8_ONE_TOKENPARTITION), | 123 token_partitions_(VP8_ONE_TOKENPARTITION), |
125 down_scale_requested_(false), | 124 down_scale_requested_(false), |
126 down_scale_bitrate_(0), | 125 down_scale_bitrate_(0), |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
168 raw_images_.pop_back(); | 167 raw_images_.pop_back(); |
169 } | 168 } |
170 while (!temporal_layers_.empty()) { | 169 while (!temporal_layers_.empty()) { |
171 delete temporal_layers_.back(); | 170 delete temporal_layers_.back(); |
172 temporal_layers_.pop_back(); | 171 temporal_layers_.pop_back(); |
173 } | 172 } |
174 inited_ = false; | 173 inited_ = false; |
175 return ret_val; | 174 return ret_val; |
176 } | 175 } |
177 | 176 |
178 int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit, | 177 int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate, |
179 uint32_t new_framerate) { | 178 uint32_t new_framerate) { |
180 if (!inited_) { | 179 if (!inited_) |
181 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 180 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| 181 |
| 182 if (encoders_[0].err) |
| 183 return WEBRTC_VIDEO_CODEC_ERROR; |
| 184 |
| 185 if (new_framerate < 1) |
| 186 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
| 187 |
| 188 if (bitrate.get_sum_bps() == 0) { |
| 189 // Encoder paused, turn off all encoding. |
| 190 const int num_streams = static_cast<size_t>(encoders_.size()); |
| 191 for (int i = 0; i < num_streams; ++i) |
| 192 SetStreamState(false, i); |
| 193 return WEBRTC_VIDEO_CODEC_OK; |
182 } | 194 } |
183 if (encoders_[0].err) { | 195 |
184 return WEBRTC_VIDEO_CODEC_ERROR; | 196 // At this point, bitrate allocation should already match codec settings. |
185 } | 197 if (codec_.maxBitrate > 0) |
186 if (new_framerate < 1) { | 198 RTC_DCHECK_LE(bitrate.get_sum_kbps(), codec_.maxBitrate); |
187 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 199 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.minBitrate); |
188 } | 200 if (codec_.numberOfSimulcastStreams > 0) |
189 if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) { | 201 RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.simulcastStream[0].minBitrate); |
190 new_bitrate_kbit = codec_.maxBitrate; | 202 |
191 } | |
192 if (new_bitrate_kbit < codec_.minBitrate) { | |
193 new_bitrate_kbit = codec_.minBitrate; | |
194 } | |
195 if (codec_.numberOfSimulcastStreams > 0 && | |
196 new_bitrate_kbit < codec_.simulcastStream[0].minBitrate) { | |
197 new_bitrate_kbit = codec_.simulcastStream[0].minBitrate; | |
198 } | |
199 codec_.maxFramerate = new_framerate; | 203 codec_.maxFramerate = new_framerate; |
200 | 204 |
201 if (encoders_.size() == 1) { | 205 if (encoders_.size() == 1) { |
202 // 1:1. | 206 // 1:1. |
203 // Calculate a rough limit for when to trigger a potental down scale. | 207 // Calculate a rough limit for when to trigger a potental down scale. |
204 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; | 208 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; |
205 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work | 209 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work |
206 // around the current limitations. | 210 // around the current limitations. |
207 // Only trigger keyframes if we are allowed to scale down. | 211 // Only trigger keyframes if we are allowed to scale down. |
208 if (configurations_[0].rc_resize_allowed) { | 212 if (configurations_[0].rc_resize_allowed) { |
209 if (!down_scale_requested_) { | 213 if (!down_scale_requested_) { |
210 if (k_pixels_per_frame > new_bitrate_kbit) { | 214 if (k_pixels_per_frame > bitrate.get_sum_kbps()) { |
211 down_scale_requested_ = true; | 215 down_scale_requested_ = true; |
212 down_scale_bitrate_ = new_bitrate_kbit; | 216 down_scale_bitrate_ = bitrate.get_sum_kbps(); |
213 key_frame_request_[0] = true; | 217 key_frame_request_[0] = true; |
214 } | 218 } |
215 } else { | 219 } else { |
216 if (new_bitrate_kbit > (2 * down_scale_bitrate_) || | 220 if (bitrate.get_sum_kbps() > (2 * down_scale_bitrate_) || |
217 new_bitrate_kbit < (down_scale_bitrate_ / 2)) { | 221 bitrate.get_sum_kbps() < (down_scale_bitrate_ / 2)) { |
218 down_scale_requested_ = false; | 222 down_scale_requested_ = false; |
219 } | 223 } |
220 } | 224 } |
221 } | 225 } |
222 } else { | 226 } else { |
223 // If we have more than 1 stream, reduce the qp_max for the low resolution | 227 // If we have more than 1 stream, reduce the qp_max for the low resolution |
224 // stream if frame rate is not too low. The trade-off with lower qp_max is | 228 // stream if frame rate is not too low. The trade-off with lower qp_max is |
225 // possibly more dropped frames, so we only do this if the frame rate is | 229 // possibly more dropped frames, so we only do this if the frame rate is |
226 // above some threshold (base temporal layer is down to 1/4 for 3 layers). | 230 // above some threshold (base temporal layer is down to 1/4 for 3 layers). |
227 // We may want to condition this on bitrate later. | 231 // We may want to condition this on bitrate later. |
228 if (new_framerate > 20) { | 232 if (new_framerate > 20) { |
229 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; | 233 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; |
230 } else { | 234 } else { |
231 // Go back to default value set in InitEncode. | 235 // Go back to default value set in InitEncode. |
232 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; | 236 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; |
233 } | 237 } |
234 } | 238 } |
235 | 239 |
236 std::vector<uint32_t> stream_bitrates = | |
237 rate_allocator_->GetAllocation(new_bitrate_kbit); | |
238 size_t stream_idx = encoders_.size() - 1; | 240 size_t stream_idx = encoders_.size() - 1; |
239 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { | 241 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { |
240 if (encoders_.size() > 1) | 242 unsigned int target_bitrate_kbps = |
241 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 243 bitrate.GetSpatialLayerSum(stream_idx) / 1000; |
242 | 244 |
243 unsigned int target_bitrate = stream_bitrates[stream_idx]; | 245 bool send_stream = target_bitrate_kbps > 0; |
244 unsigned int max_bitrate = codec_.maxBitrate; | 246 if (send_stream || encoders_.size() > 1) |
245 int framerate = new_framerate; | 247 SetStreamState(send_stream, stream_idx); |
246 // TODO(holmer): This is a temporary hack for screensharing, where we | 248 |
247 // interpret the startBitrate as the encoder target bitrate. This is | 249 configurations_[i].rc_target_bitrate = target_bitrate_kbps; |
248 // to allow for a different max bitrate, so if the codec can't meet | 250 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); |
249 // the target we still allow it to overshoot up to the max before dropping | 251 |
250 // frames. This hack should be improved. | |
251 if (codec_.targetBitrate > 0 && | |
252 (codec_.VP8()->numberOfTemporalLayers == 2 || | |
253 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { | |
254 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); | |
255 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); | |
256 target_bitrate = tl0_bitrate; | |
257 } | |
258 configurations_[i].rc_target_bitrate = target_bitrate; | |
259 temporal_layers_[stream_idx]->ConfigureBitrates( | |
260 target_bitrate, max_bitrate, framerate, &configurations_[i]); | |
261 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { | 252 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { |
262 return WEBRTC_VIDEO_CODEC_ERROR; | 253 return WEBRTC_VIDEO_CODEC_ERROR; |
263 } | 254 } |
264 } | 255 } |
265 quality_scaler_.ReportFramerate(new_framerate); | 256 quality_scaler_.ReportFramerate(new_framerate); |
266 return WEBRTC_VIDEO_CODEC_OK; | 257 return WEBRTC_VIDEO_CODEC_OK; |
267 } | 258 } |
268 | 259 |
269 void VP8EncoderImpl::OnDroppedFrame() { | 260 void VP8EncoderImpl::OnDroppedFrame() { |
270 if (quality_scaler_enabled_) | 261 if (quality_scaler_enabled_) |
271 quality_scaler_.ReportDroppedFrame(); | 262 quality_scaler_.ReportDroppedFrame(); |
272 } | 263 } |
273 | 264 |
274 const char* VP8EncoderImpl::ImplementationName() const { | 265 const char* VP8EncoderImpl::ImplementationName() const { |
275 return "libvpx"; | 266 return "libvpx"; |
276 } | 267 } |
277 | 268 |
278 void VP8EncoderImpl::SetStreamState(bool send_stream, | 269 void VP8EncoderImpl::SetStreamState(bool send_stream, |
279 int stream_idx) { | 270 int stream_idx) { |
280 if (send_stream && !send_stream_[stream_idx]) { | 271 if (send_stream && !send_stream_[stream_idx]) { |
281 // Need a key frame if we have not sent this stream before. | 272 // Need a key frame if we have not sent this stream before. |
282 key_frame_request_[stream_idx] = true; | 273 key_frame_request_[stream_idx] = true; |
283 } | 274 } |
284 send_stream_[stream_idx] = send_stream; | 275 send_stream_[stream_idx] = send_stream; |
285 } | 276 } |
286 | 277 |
287 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, | 278 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, |
288 int num_temporal_layers, | 279 int num_temporal_layers, |
289 const VideoCodec& codec) { | 280 const VideoCodec& codec) { |
290 TemporalLayersFactory default_factory; | 281 RTC_DCHECK(codec.codecSpecific.VP8.tl_factory != nullptr); |
291 const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory; | 282 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory; |
292 if (!tl_factory) | |
293 tl_factory = &default_factory; | |
294 if (num_streams == 1) { | 283 if (num_streams == 1) { |
295 if (codec.mode == kScreensharing) { | 284 temporal_layers_.push_back( |
296 // Special mode when screensharing on a single stream. | 285 tl_factory->Create(0, num_temporal_layers, rand())); |
297 temporal_layers_.push_back(new ScreenshareLayers( | |
298 num_temporal_layers, rand(), webrtc::Clock::GetRealTimeClock())); | |
299 } else { | |
300 temporal_layers_.push_back( | |
301 tl_factory->Create(num_temporal_layers, rand())); | |
302 } | |
303 } else { | 286 } else { |
304 for (int i = 0; i < num_streams; ++i) { | 287 for (int i = 0; i < num_streams; ++i) { |
305 // TODO(andresp): crash if layers is invalid. | 288 RTC_CHECK_GT(num_temporal_layers, 0); |
306 int layers = codec.simulcastStream[i].numberOfTemporalLayers; | 289 int layers = std::max(static_cast<uint8_t>(1), |
307 if (layers < 1) | 290 codec.simulcastStream[i].numberOfTemporalLayers); |
308 layers = 1; | 291 temporal_layers_.push_back(tl_factory->Create(i, layers, rand())); |
309 temporal_layers_.push_back(tl_factory->Create(layers, rand())); | |
310 } | 292 } |
311 } | 293 } |
312 } | 294 } |
313 | 295 |
314 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, | 296 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, |
315 int number_of_cores, | 297 int number_of_cores, |
316 size_t /*maxPayloadSize */) { | 298 size_t /*maxPayloadSize */) { |
317 if (inst == NULL) { | 299 if (inst == NULL) { |
318 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 300 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
319 } | 301 } |
(...skipping 24 matching lines...) Expand all Loading... |
344 int number_of_streams = NumberOfStreams(*inst); | 326 int number_of_streams = NumberOfStreams(*inst); |
345 bool doing_simulcast = (number_of_streams > 1); | 327 bool doing_simulcast = (number_of_streams > 1); |
346 | 328 |
347 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { | 329 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { |
348 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 330 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; |
349 } | 331 } |
350 | 332 |
351 int num_temporal_layers = | 333 int num_temporal_layers = |
352 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers | 334 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers |
353 : inst->VP8().numberOfTemporalLayers; | 335 : inst->VP8().numberOfTemporalLayers; |
| 336 RTC_DCHECK_GT(num_temporal_layers, 0); |
354 | 337 |
355 // TODO(andresp): crash if num temporal layers is bananas. | |
356 if (num_temporal_layers < 1) | |
357 num_temporal_layers = 1; | |
358 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); | 338 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); |
359 | 339 |
360 feedback_mode_ = inst->VP8().feedbackModeOn; | 340 feedback_mode_ = inst->VP8().feedbackModeOn; |
361 | 341 |
362 number_of_cores_ = number_of_cores; | 342 number_of_cores_ = number_of_cores; |
363 timestamp_ = 0; | 343 timestamp_ = 0; |
364 codec_ = *inst; | 344 codec_ = *inst; |
365 rate_allocator_.reset(new SimulcastRateAllocator(codec_)); | |
366 | 345 |
367 // Code expects simulcastStream resolutions to be correct, make sure they are | 346 // Code expects simulcastStream resolutions to be correct, make sure they are |
368 // filled even when there are no simulcast layers. | 347 // filled even when there are no simulcast layers. |
369 if (codec_.numberOfSimulcastStreams == 0) { | 348 if (codec_.numberOfSimulcastStreams == 0) { |
370 codec_.simulcastStream[0].width = codec_.width; | 349 codec_.simulcastStream[0].width = codec_.width; |
371 codec_.simulcastStream[0].height = codec_.height; | 350 codec_.simulcastStream[0].height = codec_.height; |
372 } | 351 } |
373 | 352 |
374 picture_id_.resize(number_of_streams); | 353 picture_id_.resize(number_of_streams); |
375 last_key_frame_picture_id_.resize(number_of_streams); | 354 last_key_frame_picture_id_.resize(number_of_streams); |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
507 // TODO(fbarchard): Consider number of Simulcast layers. | 486 // TODO(fbarchard): Consider number of Simulcast layers. |
508 configurations_[0].g_threads = NumberOfThreads( | 487 configurations_[0].g_threads = NumberOfThreads( |
509 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); | 488 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); |
510 | 489 |
511 // Creating a wrapper to the image - setting image data to NULL. | 490 // Creating a wrapper to the image - setting image data to NULL. |
512 // Actual pointer will be set in encode. Setting align to 1, as it | 491 // Actual pointer will be set in encode. Setting align to 1, as it |
513 // is meaningless (no memory allocation is done here). | 492 // is meaningless (no memory allocation is done here). |
514 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, | 493 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, |
515 NULL); | 494 NULL); |
516 | 495 |
517 if (encoders_.size() == 1) { | 496 // Note the order we use is different from webm, we have lowest resolution |
518 configurations_[0].rc_target_bitrate = inst->startBitrate; | 497 // at position 0 and they have highest resolution at position 0. |
519 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, | 498 int stream_idx = encoders_.size() - 1; |
520 inst->maxFramerate, | 499 SimulcastRateAllocator init_allocator(codec_, nullptr); |
521 &configurations_[0]); | 500 BitrateAllocation allocation = init_allocator.GetAllocation( |
522 } else { | 501 inst->startBitrate * 1000, inst->maxFramerate); |
523 // Note the order we use is different from webm, we have lowest resolution | 502 std::vector<uint32_t> stream_bitrates; |
524 // at position 0 and they have highest resolution at position 0. | 503 for (int i = 0; i == 0 || i < inst->numberOfSimulcastStreams; ++i) { |
525 int stream_idx = encoders_.size() - 1; | 504 uint32_t bitrate = allocation.GetSpatialLayerSum(i) / 1000; |
526 std::vector<uint32_t> stream_bitrates = | 505 stream_bitrates.push_back(bitrate); |
527 rate_allocator_->GetAllocation(inst->startBitrate); | 506 } |
| 507 |
| 508 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; |
| 509 temporal_layers_[stream_idx]->OnRatesUpdated( |
| 510 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate); |
| 511 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[0]); |
| 512 --stream_idx; |
| 513 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { |
| 514 memcpy(&configurations_[i], &configurations_[0], |
| 515 sizeof(configurations_[0])); |
| 516 |
| 517 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; |
| 518 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; |
| 519 |
| 520 // Use 1 thread for lower resolutions. |
| 521 configurations_[i].g_threads = 1; |
| 522 |
| 523 // Setting alignment to 32 - as that ensures at least 16 for all |
| 524 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for |
| 525 // the y plane, but only half of it to the u and v planes. |
| 526 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, |
| 527 inst->simulcastStream[stream_idx].width, |
| 528 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); |
528 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 529 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); |
529 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; | 530 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; |
530 temporal_layers_[stream_idx]->ConfigureBitrates( | 531 temporal_layers_[stream_idx]->OnRatesUpdated( |
531 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, | 532 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate); |
532 &configurations_[0]); | 533 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]); |
533 --stream_idx; | |
534 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { | |
535 memcpy(&configurations_[i], &configurations_[0], | |
536 sizeof(configurations_[0])); | |
537 | |
538 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; | |
539 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; | |
540 | |
541 // Use 1 thread for lower resolutions. | |
542 configurations_[i].g_threads = 1; | |
543 | |
544 // Setting alignment to 32 - as that ensures at least 16 for all | |
545 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for | |
546 // the y plane, but only half of it to the u and v planes. | |
547 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, | |
548 inst->simulcastStream[stream_idx].width, | |
549 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); | |
550 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | |
551 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; | |
552 temporal_layers_[stream_idx]->ConfigureBitrates( | |
553 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, | |
554 &configurations_[i]); | |
555 } | |
556 } | 534 } |
557 | 535 |
558 rps_.Init(); | 536 rps_.Init(); |
559 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, | 537 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, |
560 codec_.height, codec_.maxFramerate); | 538 codec_.height, codec_.maxFramerate); |
561 | 539 |
562 // Only apply scaling to improve for single-layer streams. The scaling metrics | 540 // Only apply scaling to improve for single-layer streams. The scaling metrics |
563 // use frame drops as a signal and is only applicable when we drop frames. | 541 // use frame drops as a signal and is only applicable when we drop frames. |
564 quality_scaler_enabled_ = encoders_.size() == 1 && | 542 quality_scaler_enabled_ = encoders_.size() == 1 && |
565 configurations_[0].rc_dropframe_thresh > 0 && | 543 configurations_[0].rc_dropframe_thresh > 0 && |
(...skipping 768 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1334 return -1; | 1312 return -1; |
1335 } | 1313 } |
1336 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != | 1314 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != |
1337 VPX_CODEC_OK) { | 1315 VPX_CODEC_OK) { |
1338 return -1; | 1316 return -1; |
1339 } | 1317 } |
1340 return 0; | 1318 return 0; |
1341 } | 1319 } |
1342 | 1320 |
1343 } // namespace webrtc | 1321 } // namespace webrtc |
OLD | NEW |