Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(22)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc

Issue 2434073003: Extract bitrate allocation of spatial/temporal layers out of codec impl. (Closed)
Patch Set: Fixed sign mismatch Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
105 VP8Encoder* VP8Encoder::Create() { 105 VP8Encoder* VP8Encoder::Create() {
106 return new VP8EncoderImpl(); 106 return new VP8EncoderImpl();
107 } 107 }
108 108
109 VP8Decoder* VP8Decoder::Create() { 109 VP8Decoder* VP8Decoder::Create() {
110 return new VP8DecoderImpl(); 110 return new VP8DecoderImpl();
111 } 111 }
112 112
113 VP8EncoderImpl::VP8EncoderImpl() 113 VP8EncoderImpl::VP8EncoderImpl()
114 : encoded_complete_callback_(nullptr), 114 : encoded_complete_callback_(nullptr),
115 rate_allocator_(new SimulcastRateAllocator(codec_)),
116 inited_(false), 115 inited_(false),
117 timestamp_(0), 116 timestamp_(0),
118 feedback_mode_(false), 117 feedback_mode_(false),
119 qp_max_(56), // Setting for max quantizer. 118 qp_max_(56), // Setting for max quantizer.
120 cpu_speed_default_(-6), 119 cpu_speed_default_(-6),
121 rc_max_intra_target_(0), 120 rc_max_intra_target_(0),
122 token_partitions_(VP8_ONE_TOKENPARTITION), 121 token_partitions_(VP8_ONE_TOKENPARTITION),
123 down_scale_requested_(false), 122 down_scale_requested_(false),
124 down_scale_bitrate_(0), 123 down_scale_bitrate_(0),
125 key_frame_request_(kMaxSimulcastStreams, false), 124 key_frame_request_(kMaxSimulcastStreams, false),
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
166 raw_images_.pop_back(); 165 raw_images_.pop_back();
167 } 166 }
168 while (!temporal_layers_.empty()) { 167 while (!temporal_layers_.empty()) {
169 delete temporal_layers_.back(); 168 delete temporal_layers_.back();
170 temporal_layers_.pop_back(); 169 temporal_layers_.pop_back();
171 } 170 }
172 inited_ = false; 171 inited_ = false;
173 return ret_val; 172 return ret_val;
174 } 173 }
175 174
176 int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit, 175 int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate,
177 uint32_t new_framerate) { 176 uint32_t new_framerate) {
178 if (!inited_) { 177 if (!inited_)
179 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 178 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
180 } 179
181 if (encoders_[0].err) { 180 if (encoders_[0].err)
182 return WEBRTC_VIDEO_CODEC_ERROR; 181 return WEBRTC_VIDEO_CODEC_ERROR;
183 } 182
184 if (new_framerate < 1) { 183 if (new_framerate < 1)
184 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
185
186 // At this point, bitrate allocation should already match codec settings.
187 if (codec_.maxBitrate > 0 && bitrate.get_sum_kbps() > codec_.maxBitrate)
188 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
189
190 if (bitrate.get_sum_kbps() < codec_.minBitrate)
191 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
192
193 if (codec_.numberOfSimulcastStreams > 0 &&
194 bitrate.get_sum_kbps() < codec_.simulcastStream[0].minBitrate) {
185 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 195 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
186 } 196 }
187 if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) { 197
188 new_bitrate_kbit = codec_.maxBitrate;
189 }
190 if (new_bitrate_kbit < codec_.minBitrate) {
191 new_bitrate_kbit = codec_.minBitrate;
192 }
193 if (codec_.numberOfSimulcastStreams > 0 &&
194 new_bitrate_kbit < codec_.simulcastStream[0].minBitrate) {
195 new_bitrate_kbit = codec_.simulcastStream[0].minBitrate;
196 }
197 codec_.maxFramerate = new_framerate; 198 codec_.maxFramerate = new_framerate;
198 199
199 if (encoders_.size() == 1) { 200 if (encoders_.size() == 1) {
200 // 1:1. 201 // 1:1.
201 // Calculate a rough limit for when to trigger a potental down scale. 202 // Calculate a rough limit for when to trigger a potental down scale.
202 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; 203 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000;
203 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work 204 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work
204 // around the current limitations. 205 // around the current limitations.
205 // Only trigger keyframes if we are allowed to scale down. 206 // Only trigger keyframes if we are allowed to scale down.
206 if (configurations_[0].rc_resize_allowed) { 207 if (configurations_[0].rc_resize_allowed) {
207 if (!down_scale_requested_) { 208 if (!down_scale_requested_) {
208 if (k_pixels_per_frame > new_bitrate_kbit) { 209 if (k_pixels_per_frame > bitrate.get_sum_kbps()) {
209 down_scale_requested_ = true; 210 down_scale_requested_ = true;
210 down_scale_bitrate_ = new_bitrate_kbit; 211 down_scale_bitrate_ = bitrate.get_sum_kbps();
211 key_frame_request_[0] = true; 212 key_frame_request_[0] = true;
212 } 213 }
213 } else { 214 } else {
214 if (new_bitrate_kbit > (2 * down_scale_bitrate_) || 215 if (bitrate.get_sum_kbps() > (2 * down_scale_bitrate_) ||
215 new_bitrate_kbit < (down_scale_bitrate_ / 2)) { 216 bitrate.get_sum_kbps() < (down_scale_bitrate_ / 2)) {
216 down_scale_requested_ = false; 217 down_scale_requested_ = false;
217 } 218 }
218 } 219 }
219 } 220 }
220 } else { 221 } else {
221 // If we have more than 1 stream, reduce the qp_max for the low resolution 222 // If we have more than 1 stream, reduce the qp_max for the low resolution
222 // stream if frame rate is not too low. The trade-off with lower qp_max is 223 // stream if frame rate is not too low. The trade-off with lower qp_max is
223 // possibly more dropped frames, so we only do this if the frame rate is 224 // possibly more dropped frames, so we only do this if the frame rate is
224 // above some threshold (base temporal layer is down to 1/4 for 3 layers). 225 // above some threshold (base temporal layer is down to 1/4 for 3 layers).
225 // We may want to condition this on bitrate later. 226 // We may want to condition this on bitrate later.
226 if (new_framerate > 20) { 227 if (new_framerate > 20) {
227 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; 228 configurations_[encoders_.size() - 1].rc_max_quantizer = 45;
228 } else { 229 } else {
229 // Go back to default value set in InitEncode. 230 // Go back to default value set in InitEncode.
230 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; 231 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_;
231 } 232 }
232 } 233 }
233 234
234 std::vector<uint32_t> stream_bitrates =
235 rate_allocator_->GetAllocation(new_bitrate_kbit);
236 size_t stream_idx = encoders_.size() - 1; 235 size_t stream_idx = encoders_.size() - 1;
237 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) { 236 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
237 unsigned int target_bitrate_kbps =
238 bitrate.GetSpatialLayerSum(stream_idx) / 1000;
239
238 if (encoders_.size() > 1) 240 if (encoders_.size() > 1)
239 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); 241 SetStreamState(target_bitrate_kbps > 0, stream_idx);
240 242
241 unsigned int target_bitrate = stream_bitrates[stream_idx]; 243 configurations_[i].rc_target_bitrate = target_bitrate_kbps;
242 unsigned int max_bitrate = codec_.maxBitrate; 244 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]);
243 int framerate = new_framerate; 245
244 // TODO(holmer): This is a temporary hack for screensharing, where we
245 // interpret the startBitrate as the encoder target bitrate. This is
246 // to allow for a different max bitrate, so if the codec can't meet
247 // the target we still allow it to overshoot up to the max before dropping
248 // frames. This hack should be improved.
249 if (codec_.targetBitrate > 0 &&
250 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 ||
251 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) {
252 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate);
253 max_bitrate = std::min(codec_.maxBitrate, target_bitrate);
254 target_bitrate = tl0_bitrate;
255 }
256 configurations_[i].rc_target_bitrate = target_bitrate;
257 temporal_layers_[stream_idx]->ConfigureBitrates(
258 target_bitrate, max_bitrate, framerate, &configurations_[i]);
259 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { 246 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
260 return WEBRTC_VIDEO_CODEC_ERROR; 247 return WEBRTC_VIDEO_CODEC_ERROR;
261 } 248 }
262 } 249 }
263 quality_scaler_.ReportFramerate(new_framerate); 250 quality_scaler_.ReportFramerate(new_framerate);
264 return WEBRTC_VIDEO_CODEC_OK; 251 return WEBRTC_VIDEO_CODEC_OK;
265 } 252 }
266 253
267 void VP8EncoderImpl::OnDroppedFrame() { 254 void VP8EncoderImpl::OnDroppedFrame() {
268 if (quality_scaler_enabled_) 255 if (quality_scaler_enabled_)
269 quality_scaler_.ReportDroppedFrame(); 256 quality_scaler_.ReportDroppedFrame();
270 } 257 }
271 258
272 const char* VP8EncoderImpl::ImplementationName() const { 259 const char* VP8EncoderImpl::ImplementationName() const {
273 return "libvpx"; 260 return "libvpx";
274 } 261 }
275 262
276 void VP8EncoderImpl::SetStreamState(bool send_stream, 263 void VP8EncoderImpl::SetStreamState(bool send_stream,
277 int stream_idx) { 264 int stream_idx) {
278 if (send_stream && !send_stream_[stream_idx]) { 265 if (send_stream && !send_stream_[stream_idx]) {
279 // Need a key frame if we have not sent this stream before. 266 // Need a key frame if we have not sent this stream before.
280 key_frame_request_[stream_idx] = true; 267 key_frame_request_[stream_idx] = true;
281 } 268 }
282 send_stream_[stream_idx] = send_stream; 269 send_stream_[stream_idx] = send_stream;
283 } 270 }
284 271
285 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, 272 void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
286 int num_temporal_layers, 273 int num_temporal_layers,
287 const VideoCodec& codec) { 274 const VideoCodec& codec) {
288 TemporalLayersFactory default_factory; 275 RTC_DCHECK(codec.codecSpecific.VP8.tl_factory != nullptr);
289 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory; 276 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory;
290 if (!tl_factory)
291 tl_factory = &default_factory;
292 if (num_streams == 1) { 277 if (num_streams == 1) {
293 if (codec.mode == kScreensharing) { 278 temporal_layers_.push_back(
294 // Special mode when screensharing on a single stream. 279 tl_factory->Create(0, num_temporal_layers, rand()));
295 temporal_layers_.push_back(new ScreenshareLayers(
296 num_temporal_layers, rand(), webrtc::Clock::GetRealTimeClock()));
297 } else {
298 temporal_layers_.push_back(
299 tl_factory->Create(num_temporal_layers, rand()));
300 }
301 } else { 280 } else {
302 for (int i = 0; i < num_streams; ++i) { 281 for (int i = 0; i < num_streams; ++i) {
303 // TODO(andresp): crash if layers is invalid. 282 RTC_CHECK_GT(num_temporal_layers, 0);
304 int layers = codec.simulcastStream[i].numberOfTemporalLayers; 283 int layers = std::max(static_cast<uint8_t>(1),
305 if (layers < 1) 284 codec.simulcastStream[i].numberOfTemporalLayers);
306 layers = 1; 285 temporal_layers_.push_back(tl_factory->Create(i, layers, rand()));
307 temporal_layers_.push_back(tl_factory->Create(layers, rand()));
308 } 286 }
309 } 287 }
310 } 288 }
311 289
312 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, 290 int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
313 int number_of_cores, 291 int number_of_cores,
314 size_t /*maxPayloadSize */) { 292 size_t /*maxPayloadSize */) {
315 if (inst == NULL) { 293 if (inst == NULL) {
316 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 294 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
317 } 295 }
(...skipping 26 matching lines...) Expand all
344 int number_of_streams = NumberOfStreams(*inst); 322 int number_of_streams = NumberOfStreams(*inst);
345 bool doing_simulcast = (number_of_streams > 1); 323 bool doing_simulcast = (number_of_streams > 1);
346 324
347 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { 325 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
348 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 326 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
349 } 327 }
350 328
351 int num_temporal_layers = 329 int num_temporal_layers =
352 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers 330 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
353 : inst->codecSpecific.VP8.numberOfTemporalLayers; 331 : inst->codecSpecific.VP8.numberOfTemporalLayers;
332 RTC_DCHECK_GT(num_temporal_layers, 0);
354 333
355 // TODO(andresp): crash if num temporal layers is bananas.
356 if (num_temporal_layers < 1)
357 num_temporal_layers = 1;
358 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); 334 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
359 335
360 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; 336 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
361
362 timestamp_ = 0; 337 timestamp_ = 0;
363 codec_ = *inst; 338 codec_ = *inst;
364 rate_allocator_.reset(new SimulcastRateAllocator(codec_));
365 339
366 // Code expects simulcastStream resolutions to be correct, make sure they are 340 // Code expects simulcastStream resolutions to be correct, make sure they are
367 // filled even when there are no simulcast layers. 341 // filled even when there are no simulcast layers.
368 if (codec_.numberOfSimulcastStreams == 0) { 342 if (codec_.numberOfSimulcastStreams == 0) {
369 codec_.simulcastStream[0].width = codec_.width; 343 codec_.simulcastStream[0].width = codec_.width;
370 codec_.simulcastStream[0].height = codec_.height; 344 codec_.simulcastStream[0].height = codec_.height;
371 } 345 }
372 346
373 picture_id_.resize(number_of_streams); 347 picture_id_.resize(number_of_streams);
374 last_key_frame_picture_id_.resize(number_of_streams); 348 last_key_frame_picture_id_.resize(number_of_streams);
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
507 // TODO(fbarchard): Consider number of Simulcast layers. 481 // TODO(fbarchard): Consider number of Simulcast layers.
508 configurations_[0].g_threads = NumberOfThreads( 482 configurations_[0].g_threads = NumberOfThreads(
509 configurations_[0].g_w, configurations_[0].g_h, number_of_cores); 483 configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
510 484
511 // Creating a wrapper to the image - setting image data to NULL. 485 // Creating a wrapper to the image - setting image data to NULL.
512 // Actual pointer will be set in encode. Setting align to 1, as it 486 // Actual pointer will be set in encode. Setting align to 1, as it
513 // is meaningless (no memory allocation is done here). 487 // is meaningless (no memory allocation is done here).
514 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, 488 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
515 NULL); 489 NULL);
516 490
517 if (encoders_.size() == 1) { 491 // Note the order we use is different from webm, we have lowest resolution
518 configurations_[0].rc_target_bitrate = inst->startBitrate; 492 // at position 0 and they have highest resolution at position 0.
519 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, 493 int stream_idx = encoders_.size() - 1;
520 inst->maxFramerate, 494 SimulcastRateAllocator init_allocator(codec_, nullptr);
521 &configurations_[0]); 495 BitrateAllocation allocation = init_allocator.GetAllocation(
522 } else { 496 inst->startBitrate * 1000, inst->maxFramerate);
523 // Note the order we use is different from webm, we have lowest resolution 497 std::vector<uint32_t> stream_bitrates;
524 // at position 0 and they have highest resolution at position 0. 498 for (int i = 0; i == 0 || i < inst->numberOfSimulcastStreams; ++i) {
525 int stream_idx = encoders_.size() - 1; 499 uint32_t bitrate = allocation.GetSpatialLayerSum(i) / 1000;
526 std::vector<uint32_t> stream_bitrates = 500 stream_bitrates.push_back(bitrate);
527 rate_allocator_->GetAllocation(inst->startBitrate); 501 }
502
503 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx];
504 temporal_layers_[stream_idx]->OnRatesUpdated(
505 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate);
506 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[0]);
507 --stream_idx;
508 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) {
509 memcpy(&configurations_[i], &configurations_[0],
510 sizeof(configurations_[0]));
511
512 configurations_[i].g_w = inst->simulcastStream[stream_idx].width;
513 configurations_[i].g_h = inst->simulcastStream[stream_idx].height;
514
515 // Use 1 thread for lower resolutions.
516 configurations_[i].g_threads = 1;
517
518 // Setting alignment to 32 - as that ensures at least 16 for all
519 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
520 // the y plane, but only half of it to the u and v planes.
521 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420,
522 inst->simulcastStream[stream_idx].width,
523 inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
528 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); 524 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
529 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; 525 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx];
530 temporal_layers_[stream_idx]->ConfigureBitrates( 526 temporal_layers_[stream_idx]->OnRatesUpdated(
531 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, 527 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate);
532 &configurations_[0]); 528 temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]);
533 --stream_idx;
534 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) {
535 memcpy(&configurations_[i], &configurations_[0],
536 sizeof(configurations_[0]));
537
538 configurations_[i].g_w = inst->simulcastStream[stream_idx].width;
539 configurations_[i].g_h = inst->simulcastStream[stream_idx].height;
540
541 // Use 1 thread for lower resolutions.
542 configurations_[i].g_threads = 1;
543
544 // Setting alignment to 32 - as that ensures at least 16 for all
545 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
546 // the y plane, but only half of it to the u and v planes.
547 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420,
548 inst->simulcastStream[stream_idx].width,
549 inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
550 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
551 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx];
552 temporal_layers_[stream_idx]->ConfigureBitrates(
553 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate,
554 &configurations_[i]);
555 }
556 } 529 }
557 530
558 rps_.Init(); 531 rps_.Init();
559 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width, 532 quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width,
560 codec_.height, codec_.maxFramerate); 533 codec_.height, codec_.maxFramerate);
561 534
562 // Only apply scaling to improve for single-layer streams. The scaling metrics 535 // Only apply scaling to improve for single-layer streams. The scaling metrics
563 // use frame drops as a signal and is only applicable when we drop frames. 536 // use frame drops as a signal and is only applicable when we drop frames.
564 quality_scaler_enabled_ = encoders_.size() == 1 && 537 quality_scaler_enabled_ = encoders_.size() == 1 &&
565 configurations_[0].rc_dropframe_thresh > 0 && 538 configurations_[0].rc_dropframe_thresh > 0 &&
(...skipping 753 matching lines...) Expand 10 before | Expand all | Expand 10 after
1319 return -1; 1292 return -1;
1320 } 1293 }
1321 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != 1294 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
1322 VPX_CODEC_OK) { 1295 VPX_CODEC_OK) {
1323 return -1; 1296 return -1;
1324 } 1297 }
1325 return 0; 1298 return 0;
1326 } 1299 }
1327 1300
1328 } // namespace webrtc 1301 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698