Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(596)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc

Issue 1541803002: Lint fix for webrtc/modules/video_coding PART 1! (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/video_coding/codecs/vp8/vp8_impl.h" 11 #include "webrtc/modules/video_coding/codecs/vp8/vp8_impl.h"
12 12
13 #include <stdlib.h> 13 #include <stdlib.h>
14 #include <string.h> 14 #include <string.h>
15 #include <time.h> 15 #include <time.h>
16 #include <algorithm> 16 #include <algorithm>
17 17
18 // NOTE(ajm): Path provided by gyp. 18 // NOTE(ajm): Path provided by gyp.
19 #include "libyuv/scale.h" // NOLINT 19 #include "libyuv/scale.h" // NOLINT
20 #include "libyuv/convert.h" // NOLINT 20 #include "libyuv/convert.h" // NOLINT
21 21
22 #include "webrtc/base/checks.h" 22 #include "webrtc/base/checks.h"
23 #include "webrtc/base/trace_event.h" 23 #include "webrtc/base/trace_event.h"
24 #include "webrtc/common.h" 24 #include "webrtc/common.h"
25 #include "webrtc/common_types.h" 25 #include "webrtc/common_types.h"
26 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" 26 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
27 #include "webrtc/modules/include/module_common_types.h" 27 #include "webrtc/modules/include/module_common_types.h"
28 #include "webrtc/modules/video_coding/include/video_codec_interface.h" 28 #include "webrtc/modules/video_coding/include/video_codec_interface.h"
29 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h" 29 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
61 61
62 std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec, 62 std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
63 int bitrate_to_allocate_kbps) { 63 int bitrate_to_allocate_kbps) {
64 if (codec.numberOfSimulcastStreams <= 1) { 64 if (codec.numberOfSimulcastStreams <= 1) {
65 return std::vector<int>(1, bitrate_to_allocate_kbps); 65 return std::vector<int>(1, bitrate_to_allocate_kbps);
66 } 66 }
67 67
68 std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams); 68 std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
69 // Allocate min -> target bitrates as long as we have bitrate to spend. 69 // Allocate min -> target bitrates as long as we have bitrate to spend.
70 size_t last_active_stream = 0; 70 size_t last_active_stream = 0;
71 for (size_t i = 0; 71 for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
72 i < static_cast<size_t>(codec.numberOfSimulcastStreams) && 72 bitrate_to_allocate_kbps >=
73 bitrate_to_allocate_kbps >= 73 static_cast<int>(codec.simulcastStream[i].minBitrate);
74 static_cast<int>(codec.simulcastStream[i].minBitrate);
75 ++i) { 74 ++i) {
76 last_active_stream = i; 75 last_active_stream = i;
77 int allocated_bitrate_kbps = 76 int allocated_bitrate_kbps =
78 std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate), 77 std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate),
79 bitrate_to_allocate_kbps); 78 bitrate_to_allocate_kbps);
80 bitrates_kbps[i] = allocated_bitrate_kbps; 79 bitrates_kbps[i] = allocated_bitrate_kbps;
81 bitrate_to_allocate_kbps -= allocated_bitrate_kbps; 80 bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
82 } 81 }
83 82
84 // Spend additional bits on the highest-quality active layer, up to max 83 // Spend additional bits on the highest-quality active layer, up to max
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
125 } 124 }
126 for (int i = 0; i < num_streams; ++i) { 125 for (int i = 0; i < num_streams; ++i) {
127 if (codec.width * codec.simulcastStream[i].height != 126 if (codec.width * codec.simulcastStream[i].height !=
128 codec.height * codec.simulcastStream[i].width) { 127 codec.height * codec.simulcastStream[i].width) {
129 return false; 128 return false;
130 } 129 }
131 } 130 }
132 return true; 131 return true;
133 } 132 }
134 133
135 int NumStreamsDisabled(std::vector<bool>& streams) { 134 int NumStreamsDisabled(const std::vector<bool>& streams) {
136 int num_disabled = 0; 135 int num_disabled = 0;
137 for (bool stream : streams) { 136 for (bool stream : streams) {
138 if (!stream) 137 if (!stream)
139 ++num_disabled; 138 ++num_disabled;
140 } 139 }
141 return num_disabled; 140 return num_disabled;
142 } 141 }
143 } // namespace 142 } // namespace
144 143
145 const float kTl1MaxTimeToDropFrames = 20.0f; 144 const float kTl1MaxTimeToDropFrames = 20.0f;
(...skipping 30 matching lines...) Expand all
176 175
177 VP8EncoderImpl::~VP8EncoderImpl() { 176 VP8EncoderImpl::~VP8EncoderImpl() {
178 Release(); 177 Release();
179 } 178 }
180 179
181 int VP8EncoderImpl::Release() { 180 int VP8EncoderImpl::Release() {
182 int ret_val = WEBRTC_VIDEO_CODEC_OK; 181 int ret_val = WEBRTC_VIDEO_CODEC_OK;
183 182
184 while (!encoded_images_.empty()) { 183 while (!encoded_images_.empty()) {
185 EncodedImage& image = encoded_images_.back(); 184 EncodedImage& image = encoded_images_.back();
186 delete [] image._buffer; 185 delete[] image._buffer;
187 encoded_images_.pop_back(); 186 encoded_images_.pop_back();
188 } 187 }
189 while (!encoders_.empty()) { 188 while (!encoders_.empty()) {
190 vpx_codec_ctx_t& encoder = encoders_.back(); 189 vpx_codec_ctx_t& encoder = encoders_.back();
191 if (vpx_codec_destroy(&encoder)) { 190 if (vpx_codec_destroy(&encoder)) {
192 ret_val = WEBRTC_VIDEO_CODEC_MEMORY; 191 ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
193 } 192 }
194 encoders_.pop_back(); 193 encoders_.pop_back();
195 } 194 }
196 configurations_.clear(); 195 configurations_.clear();
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
282 // the target we still allow it to overshoot up to the max before dropping 281 // the target we still allow it to overshoot up to the max before dropping
283 // frames. This hack should be improved. 282 // frames. This hack should be improved.
284 if (codec_.targetBitrate > 0 && 283 if (codec_.targetBitrate > 0 &&
285 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || 284 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 ||
286 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { 285 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) {
287 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); 286 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate);
288 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); 287 max_bitrate = std::min(codec_.maxBitrate, target_bitrate);
289 target_bitrate = tl0_bitrate; 288 target_bitrate = tl0_bitrate;
290 } 289 }
291 configurations_[i].rc_target_bitrate = target_bitrate; 290 configurations_[i].rc_target_bitrate = target_bitrate;
292 temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate, 291 temporal_layers_[stream_idx]->ConfigureBitrates(
293 max_bitrate, 292 target_bitrate, max_bitrate, framerate, &configurations_[i]);
294 framerate,
295 &configurations_[i]);
296 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { 293 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
297 return WEBRTC_VIDEO_CODEC_ERROR; 294 return WEBRTC_VIDEO_CODEC_ERROR;
298 } 295 }
299 } 296 }
300 quality_scaler_.ReportFramerate(new_framerate); 297 quality_scaler_.ReportFramerate(new_framerate);
301 return WEBRTC_VIDEO_CODEC_OK; 298 return WEBRTC_VIDEO_CODEC_OK;
302 } 299 }
303 300
304 const char* VP8EncoderImpl::ImplementationName() const { 301 const char* VP8EncoderImpl::ImplementationName() const {
305 return "libvpx"; 302 return "libvpx";
306 } 303 }
307 304
308 void VP8EncoderImpl::SetStreamState(bool send_stream, 305 void VP8EncoderImpl::SetStreamState(bool send_stream,
309 int stream_idx) { 306 int stream_idx) {
310 if (send_stream && !send_stream_[stream_idx]) { 307 if (send_stream && !send_stream_[stream_idx]) {
311 // Need a key frame if we have not sent this stream before. 308 // Need a key frame if we have not sent this stream before.
312 key_frame_request_[stream_idx] = true; 309 key_frame_request_[stream_idx] = true;
313 } 310 }
314 send_stream_[stream_idx] = send_stream; 311 send_stream_[stream_idx] = send_stream;
315 } 312 }
316 313
317 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, 314 void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
318 int num_temporal_layers, 315 int num_temporal_layers,
319 const VideoCodec& codec) { 316 const VideoCodec& codec) {
320 const Config default_options; 317 const Config default_options;
321 const TemporalLayers::Factory& tl_factory = 318 const TemporalLayers::Factory& tl_factory =
322 (codec.extra_options ? codec.extra_options : &default_options) 319 (codec.extra_options ? codec.extra_options : &default_options)
323 ->Get<TemporalLayers::Factory>(); 320 ->Get<TemporalLayers::Factory>();
324 if (num_streams == 1) { 321 if (num_streams == 1) {
325 if (codec.mode == kScreensharing) { 322 if (codec.mode == kScreensharing) {
326 // Special mode when screensharing on a single stream. 323 // Special mode when screensharing on a single stream.
327 temporal_layers_.push_back( 324 temporal_layers_.push_back(
328 new ScreenshareLayers(num_temporal_layers, rand())); 325 new ScreenshareLayers(num_temporal_layers, rand()));
329 } else { 326 } else {
330 temporal_layers_.push_back( 327 temporal_layers_.push_back(
331 tl_factory.Create(num_temporal_layers, rand())); 328 tl_factory.Create(num_temporal_layers, rand()));
332 } 329 }
333 } else { 330 } else {
334 for (int i = 0; i < num_streams; ++i) { 331 for (int i = 0; i < num_streams; ++i) {
335 // TODO(andresp): crash if layers is invalid. 332 // TODO(andresp): crash if layers is invalid.
336 int layers = codec.simulcastStream[i].numberOfTemporalLayers; 333 int layers = codec.simulcastStream[i].numberOfTemporalLayers;
337 if (layers < 1) layers = 1; 334 if (layers < 1)
335 layers = 1;
338 temporal_layers_.push_back(tl_factory.Create(layers, rand())); 336 temporal_layers_.push_back(tl_factory.Create(layers, rand()));
339 } 337 }
340 } 338 }
341 } 339 }
342 340
343 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, 341 int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
344 int number_of_cores, 342 int number_of_cores,
345 size_t /*maxPayloadSize */) { 343 size_t /*maxPayloadSize */) {
346 if (inst == NULL) { 344 if (inst == NULL) {
347 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 345 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
348 } 346 }
349 if (inst->maxFramerate < 1) { 347 if (inst->maxFramerate < 1) {
350 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 348 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
351 } 349 }
352 // allow zero to represent an unspecified maxBitRate 350 // allow zero to represent an unspecified maxBitRate
353 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) { 351 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
354 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 352 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
355 } 353 }
(...skipping 16 matching lines...) Expand all
372 return retVal; 370 return retVal;
373 } 371 }
374 372
375 int number_of_streams = NumberOfStreams(*inst); 373 int number_of_streams = NumberOfStreams(*inst);
376 bool doing_simulcast = (number_of_streams > 1); 374 bool doing_simulcast = (number_of_streams > 1);
377 375
378 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { 376 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
379 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 377 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
380 } 378 }
381 379
382 int num_temporal_layers = doing_simulcast ? 380 int num_temporal_layers =
383 inst->simulcastStream[0].numberOfTemporalLayers : 381 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
384 inst->codecSpecific.VP8.numberOfTemporalLayers; 382 : inst->codecSpecific.VP8.numberOfTemporalLayers;
385 383
386 // TODO(andresp): crash if num temporal layers is bananas. 384 // TODO(andresp): crash if num temporal layers is bananas.
387 if (num_temporal_layers < 1) num_temporal_layers = 1; 385 if (num_temporal_layers < 1)
386 num_temporal_layers = 1;
388 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); 387 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
389 388
390 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; 389 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
391 390
392 timestamp_ = 0; 391 timestamp_ = 0;
393 codec_ = *inst; 392 codec_ = *inst;
394 393
395 // Code expects simulcastStream resolutions to be correct, make sure they are 394 // Code expects simulcastStream resolutions to be correct, make sure they are
396 // filled even when there are no simulcast layers. 395 // filled even when there are no simulcast layers.
397 if (codec_.numberOfSimulcastStreams == 0) { 396 if (codec_.numberOfSimulcastStreams == 0) {
398 codec_.simulcastStream[0].width = codec_.width; 397 codec_.simulcastStream[0].width = codec_.width;
399 codec_.simulcastStream[0].height = codec_.height; 398 codec_.simulcastStream[0].height = codec_.height;
400 } 399 }
401 400
402 picture_id_.resize(number_of_streams); 401 picture_id_.resize(number_of_streams);
403 last_key_frame_picture_id_.resize(number_of_streams); 402 last_key_frame_picture_id_.resize(number_of_streams);
404 encoded_images_.resize(number_of_streams); 403 encoded_images_.resize(number_of_streams);
405 encoders_.resize(number_of_streams); 404 encoders_.resize(number_of_streams);
406 configurations_.resize(number_of_streams); 405 configurations_.resize(number_of_streams);
407 downsampling_factors_.resize(number_of_streams); 406 downsampling_factors_.resize(number_of_streams);
408 raw_images_.resize(number_of_streams); 407 raw_images_.resize(number_of_streams);
409 send_stream_.resize(number_of_streams); 408 send_stream_.resize(number_of_streams);
410 send_stream_[0] = true; // For non-simulcast case. 409 send_stream_[0] = true; // For non-simulcast case.
411 cpu_speed_.resize(number_of_streams); 410 cpu_speed_.resize(number_of_streams);
412 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); 411 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
413 412
414 int idx = number_of_streams - 1; 413 int idx = number_of_streams - 1;
415 for (int i = 0; i < (number_of_streams - 1); ++i, --idx) { 414 for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
416 int gcd = GCD(inst->simulcastStream[idx].width, 415 int gcd = GCD(inst->simulcastStream[idx].width,
417 inst->simulcastStream[idx-1].width); 416 inst->simulcastStream[idx - 1].width);
418 downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd; 417 downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
419 downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd; 418 downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
420 send_stream_[i] = false; 419 send_stream_[i] = false;
421 } 420 }
422 if (number_of_streams > 1) { 421 if (number_of_streams > 1) {
423 send_stream_[number_of_streams - 1] = false; 422 send_stream_[number_of_streams - 1] = false;
424 downsampling_factors_[number_of_streams - 1].num = 1; 423 downsampling_factors_[number_of_streams - 1].num = 1;
425 downsampling_factors_[number_of_streams - 1].den = 1; 424 downsampling_factors_[number_of_streams - 1].den = 1;
426 } 425 }
427 for (int i = 0; i < number_of_streams; ++i) { 426 for (int i = 0; i < number_of_streams; ++i) {
428 // Random start, 16 bits is enough. 427 // Random start, 16 bits is enough.
429 picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; 428 picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
430 last_key_frame_picture_id_[i] = -1; 429 last_key_frame_picture_id_[i] = -1;
431 // allocate memory for encoded image 430 // allocate memory for encoded image
432 if (encoded_images_[i]._buffer != NULL) { 431 if (encoded_images_[i]._buffer != NULL) {
433 delete [] encoded_images_[i]._buffer; 432 delete[] encoded_images_[i]._buffer;
434 } 433 }
435 encoded_images_[i]._size = CalcBufferSize(kI420, 434 encoded_images_[i]._size =
436 codec_.width, codec_.height); 435 CalcBufferSize(kI420, codec_.width, codec_.height);
437 encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size]; 436 encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
438 encoded_images_[i]._completeFrame = true; 437 encoded_images_[i]._completeFrame = true;
439 } 438 }
440 // populate encoder configuration with default values 439 // populate encoder configuration with default values
441 if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), 440 if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
442 &configurations_[0], 0)) { 441 0)) {
443 return WEBRTC_VIDEO_CODEC_ERROR; 442 return WEBRTC_VIDEO_CODEC_ERROR;
444 } 443 }
445 // setting the time base of the codec 444 // setting the time base of the codec
446 configurations_[0].g_timebase.num = 1; 445 configurations_[0].g_timebase.num = 1;
447 configurations_[0].g_timebase.den = 90000; 446 configurations_[0].g_timebase.den = 90000;
448 configurations_[0].g_lag_in_frames = 0; // 0- no frame lagging 447 configurations_[0].g_lag_in_frames = 0; // 0- no frame lagging
449 448
450 // Set the error resilience mode according to user settings. 449 // Set the error resilience mode according to user settings.
451 switch (inst->codecSpecific.VP8.resilience) { 450 switch (inst->codecSpecific.VP8.resilience) {
452 case kResilienceOff: 451 case kResilienceOff:
453 // TODO(marpan): We should set keep error resilience off for this mode, 452 // TODO(marpan): We should set keep error resilience off for this mode,
454 // independent of temporal layer settings, and make sure we set 453 // independent of temporal layer settings, and make sure we set
455 // |codecSpecific.VP8.resilience| = |kResilientStream| at higher level 454 // |codecSpecific.VP8.resilience| = |kResilientStream| at higher level
456 // code if we want to get error resilience on. 455 // code if we want to get error resilience on.
457 configurations_[0].g_error_resilient = 1; 456 configurations_[0].g_error_resilient = 1;
458 break; 457 break;
459 case kResilientStream: 458 case kResilientStream:
460 configurations_[0].g_error_resilient = 1; // TODO(holmer): Replace with 459 configurations_[0].g_error_resilient = 1; // TODO(holmer): Replace with
461 // VPX_ERROR_RESILIENT_DEFAULT when we 460 // VPX_ERROR_RESILIENT_DEFAULT when we
462 // drop support for libvpx 9.6.0. 461 // drop support for libvpx 9.6.0.
463 break; 462 break;
464 case kResilientFrames: 463 case kResilientFrames:
465 #ifdef INDEPENDENT_PARTITIONS 464 #ifdef INDEPENDENT_PARTITIONS
466 configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT | 465 configurations_[0] - g_error_resilient =
467 VPX_ERROR_RESILIENT_PARTITIONS; 466 VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
468 break; 467 break;
469 #else 468 #else
470 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported 469 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
471 #endif 470 #endif
472 } 471 }
473 472
474 // rate control settings 473 // rate control settings
475 configurations_[0].rc_dropframe_thresh = 474 configurations_[0].rc_dropframe_thresh =
476 inst->codecSpecific.VP8.frameDroppingOn ? 30 : 0; 475 inst->codecSpecific.VP8.frameDroppingOn ? 30 : 0;
477 configurations_[0].rc_end_usage = VPX_CBR; 476 configurations_[0].rc_end_usage = VPX_CBR;
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
533 for (int i = 1; i < number_of_streams; ++i) { 532 for (int i = 1; i < number_of_streams; ++i) {
534 cpu_speed_[i] = 533 cpu_speed_[i] =
535 SetCpuSpeed(inst->simulcastStream[number_of_streams - 1 - i].width, 534 SetCpuSpeed(inst->simulcastStream[number_of_streams - 1 - i].width,
536 inst->simulcastStream[number_of_streams - 1 - i].height); 535 inst->simulcastStream[number_of_streams - 1 - i].height);
537 } 536 }
538 configurations_[0].g_w = inst->width; 537 configurations_[0].g_w = inst->width;
539 configurations_[0].g_h = inst->height; 538 configurations_[0].g_h = inst->height;
540 539
541 // Determine number of threads based on the image size and #cores. 540 // Determine number of threads based on the image size and #cores.
542 // TODO(fbarchard): Consider number of Simulcast layers. 541 // TODO(fbarchard): Consider number of Simulcast layers.
543 configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w, 542 configurations_[0].g_threads = NumberOfThreads(
544 configurations_[0].g_h, 543 configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
545 number_of_cores);
546 544
547 // Creating a wrapper to the image - setting image data to NULL. 545 // Creating a wrapper to the image - setting image data to NULL.
548 // Actual pointer will be set in encode. Setting align to 1, as it 546 // Actual pointer will be set in encode. Setting align to 1, as it
549 // is meaningless (no memory allocation is done here). 547 // is meaningless (no memory allocation is done here).
550 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 548 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
551 1, NULL); 549 NULL);
552 550
553 if (encoders_.size() == 1) { 551 if (encoders_.size() == 1) {
554 configurations_[0].rc_target_bitrate = inst->startBitrate; 552 configurations_[0].rc_target_bitrate = inst->startBitrate;
555 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, 553 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
556 inst->maxBitrate,
557 inst->maxFramerate, 554 inst->maxFramerate,
558 &configurations_[0]); 555 &configurations_[0]);
559 } else { 556 } else {
560 // Note the order we use is different from webm, we have lowest resolution 557 // Note the order we use is different from webm, we have lowest resolution
561 // at position 0 and they have highest resolution at position 0. 558 // at position 0 and they have highest resolution at position 0.
562 int stream_idx = encoders_.size() - 1; 559 int stream_idx = encoders_.size() - 1;
563 std::vector<int> stream_bitrates = 560 std::vector<int> stream_bitrates =
564 GetStreamBitratesKbps(codec_, inst->startBitrate); 561 GetStreamBitratesKbps(codec_, inst->startBitrate);
565 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); 562 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
566 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; 563 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx];
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
638 // 1 thread for VGA or less. 635 // 1 thread for VGA or less.
639 return 1; 636 return 1;
640 } 637 }
641 } 638 }
642 639
643 int VP8EncoderImpl::InitAndSetControlSettings() { 640 int VP8EncoderImpl::InitAndSetControlSettings() {
644 vpx_codec_flags_t flags = 0; 641 vpx_codec_flags_t flags = 0;
645 flags |= VPX_CODEC_USE_OUTPUT_PARTITION; 642 flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
646 643
647 if (encoders_.size() > 1) { 644 if (encoders_.size() > 1) {
648 int error = vpx_codec_enc_init_multi(&encoders_[0], 645 int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(),
649 vpx_codec_vp8_cx(), 646 &configurations_[0], encoders_.size(),
650 &configurations_[0], 647 flags, &downsampling_factors_[0]);
651 encoders_.size(),
652 flags,
653 &downsampling_factors_[0]);
654 if (error) { 648 if (error) {
655 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 649 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
656 } 650 }
657 } else { 651 } else {
658 if (vpx_codec_enc_init(&encoders_[0], 652 if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
659 vpx_codec_vp8_cx(), 653 &configurations_[0], flags)) {
660 &configurations_[0],
661 flags)) {
662 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 654 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
663 } 655 }
664 } 656 }
665 // Enable denoising for the highest resolution stream, and for 657 // Enable denoising for the highest resolution stream, and for
666 // the second highest resolution if we are doing more than 2 658 // the second highest resolution if we are doing more than 2
667 // spatial layers/streams. 659 // spatial layers/streams.
668 // TODO(holmer): Investigate possibility of adding a libvpx API 660 // TODO(holmer): Investigate possibility of adding a libvpx API
669 // for getting the denoised frame from the encoder and using that 661 // for getting the denoised frame from the encoder and using that
670 // when encoding lower resolution streams. Would it work with the 662 // when encoding lower resolution streams. Would it work with the
671 // multi-res encoding feature? 663 // multi-res encoding feature?
672 denoiserState denoiser_state = kDenoiserOnYOnly; 664 denoiserState denoiser_state = kDenoiserOnYOnly;
673 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) 665 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
674 denoiser_state = kDenoiserOnYOnly; 666 denoiser_state = kDenoiserOnYOnly;
675 #else 667 #else
676 denoiser_state = kDenoiserOnAdaptive; 668 denoiser_state = kDenoiserOnAdaptive;
677 #endif 669 #endif
678 vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY, 670 vpx_codec_control(
679 codec_.codecSpecific.VP8.denoisingOn ? 671 &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
680 denoiser_state : kDenoiserOff); 672 codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
681 if (encoders_.size() > 2) { 673 if (encoders_.size() > 2) {
682 vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY, 674 vpx_codec_control(
683 codec_.codecSpecific.VP8.denoisingOn ? 675 &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
684 denoiser_state : kDenoiserOff); 676 codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
685 } 677 }
686 for (size_t i = 0; i < encoders_.size(); ++i) { 678 for (size_t i = 0; i < encoders_.size(); ++i) {
687 // Allow more screen content to be detected as static. 679 // Allow more screen content to be detected as static.
688 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 680 vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD,
689 codec_.mode == kScreensharing ? 300 : 1); 681 codec_.mode == kScreensharing ? 300 : 1);
690 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); 682 vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]);
691 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, 683 vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS,
692 static_cast<vp8e_token_partitions>(token_partitions_)); 684 static_cast<vp8e_token_partitions>(token_partitions_));
693 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 685 vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
694 rc_max_intra_target_); 686 rc_max_intra_target_);
(...skipping 12 matching lines...) Expand all
707 // Max target size = scalePar * optimalBufferSize * targetBR[Kbps]. 699 // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
708 // This values is presented in percentage of perFrameBw: 700 // This values is presented in percentage of perFrameBw:
709 // perFrameBw = targetBR[Kbps] * 1000 / frameRate. 701 // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
710 // The target in % is as follows: 702 // The target in % is as follows:
711 703
712 float scalePar = 0.5; 704 float scalePar = 0.5;
713 uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10; 705 uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10;
714 706
715 // Don't go below 3 times the per frame bandwidth. 707 // Don't go below 3 times the per frame bandwidth.
716 const uint32_t minIntraTh = 300; 708 const uint32_t minIntraTh = 300;
717 return (targetPct < minIntraTh) ? minIntraTh: targetPct; 709 return (targetPct < minIntraTh) ? minIntraTh : targetPct;
718 } 710 }
719 711
720 int VP8EncoderImpl::Encode(const VideoFrame& frame, 712 int VP8EncoderImpl::Encode(const VideoFrame& frame,
721 const CodecSpecificInfo* codec_specific_info, 713 const CodecSpecificInfo* codec_specific_info,
722 const std::vector<FrameType>* frame_types) { 714 const std::vector<FrameType>* frame_types) {
723 if (!inited_) 715 if (!inited_)
724 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 716 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
725 if (frame.IsZeroSize()) 717 if (frame.IsZeroSize())
726 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 718 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
727 if (encoded_complete_callback_ == NULL) 719 if (encoded_complete_callback_ == NULL)
728 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 720 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
729 721
730 if (quality_scaler_enabled_) 722 if (quality_scaler_enabled_)
731 quality_scaler_.OnEncodeFrame(frame); 723 quality_scaler_.OnEncodeFrame(frame);
732 const VideoFrame& input_image = 724 const VideoFrame& input_image =
733 quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame; 725 quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame;
734 726
735 if (quality_scaler_enabled_ && (input_image.width() != codec_.width || 727 if (quality_scaler_enabled_ && (input_image.width() != codec_.width ||
736 input_image.height() != codec_.height)) { 728 input_image.height() != codec_.height)) {
737 int ret = UpdateCodecFrameSize(input_image); 729 int ret = UpdateCodecFrameSize(input_image);
738 if (ret < 0) 730 if (ret < 0)
739 return ret; 731 return ret;
740 } 732 }
741 733
742 // Since we are extracting raw pointers from |input_image| to 734 // Since we are extracting raw pointers from |input_image| to
743 // |raw_images_[0]|, the resolution of these frames must match. Note that 735 // |raw_images_[0]|, the resolution of these frames must match. Note that
744 // |input_image| might be scaled from |frame|. In that case, the resolution of 736 // |input_image| might be scaled from |frame|. In that case, the resolution of
745 // |raw_images_[0]| should have been updated in UpdateCodecFrameSize. 737 // |raw_images_[0]| should have been updated in UpdateCodecFrameSize.
746 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); 738 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
747 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); 739 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
748 740
749 // Image in vpx_image_t format. 741 // Image in vpx_image_t format.
750 // Input image is const. VP8's raw image is not defined as const. 742 // Input image is const. VP8's raw image is not defined as const.
751 raw_images_[0].planes[VPX_PLANE_Y] = 743 raw_images_[0].planes[VPX_PLANE_Y] =
752 const_cast<uint8_t*>(input_image.buffer(kYPlane)); 744 const_cast<uint8_t*>(input_image.buffer(kYPlane));
753 raw_images_[0].planes[VPX_PLANE_U] = 745 raw_images_[0].planes[VPX_PLANE_U] =
754 const_cast<uint8_t*>(input_image.buffer(kUPlane)); 746 const_cast<uint8_t*>(input_image.buffer(kUPlane));
755 raw_images_[0].planes[VPX_PLANE_V] = 747 raw_images_[0].planes[VPX_PLANE_V] =
756 const_cast<uint8_t*>(input_image.buffer(kVPlane)); 748 const_cast<uint8_t*>(input_image.buffer(kVPlane));
757 749
758 raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane); 750 raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
759 raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane); 751 raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
760 raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane); 752 raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane);
761 753
762 for (size_t i = 1; i < encoders_.size(); ++i) { 754 for (size_t i = 1; i < encoders_.size(); ++i) {
763 // Scale the image down a number of times by downsampling factor 755 // Scale the image down a number of times by downsampling factor
764 libyuv::I420Scale( 756 libyuv::I420Scale(
765 raw_images_[i-1].planes[VPX_PLANE_Y], 757 raw_images_[i - 1].planes[VPX_PLANE_Y],
766 raw_images_[i-1].stride[VPX_PLANE_Y], 758 raw_images_[i - 1].stride[VPX_PLANE_Y],
767 raw_images_[i-1].planes[VPX_PLANE_U], 759 raw_images_[i - 1].planes[VPX_PLANE_U],
768 raw_images_[i-1].stride[VPX_PLANE_U], 760 raw_images_[i - 1].stride[VPX_PLANE_U],
769 raw_images_[i-1].planes[VPX_PLANE_V], 761 raw_images_[i - 1].planes[VPX_PLANE_V],
770 raw_images_[i-1].stride[VPX_PLANE_V], 762 raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
771 raw_images_[i-1].d_w, raw_images_[i-1].d_h, 763 raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
772 raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y], 764 raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
773 raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U], 765 raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
774 raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V], 766 raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
775 raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear); 767 raw_images_[i].d_h, libyuv::kFilterBilinear);
776 } 768 }
777 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; 769 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
778 for (size_t i = 0; i < encoders_.size(); ++i) { 770 for (size_t i = 0; i < encoders_.size(); ++i) {
779 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp()); 771 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp());
780 if (ret < 0) { 772 if (ret < 0) {
781 // Drop this frame. 773 // Drop this frame.
782 return WEBRTC_VIDEO_CODEC_OK; 774 return WEBRTC_VIDEO_CODEC_OK;
783 } 775 }
784 flags[i] = ret; 776 flags[i] = ret;
785 } 777 }
(...skipping 14 matching lines...) Expand all
800 } 792 }
801 } 793 }
802 } 794 }
803 // The flag modification below (due to forced key frame, RPS, etc.,) for now 795 // The flag modification below (due to forced key frame, RPS, etc.,) for now
804 // will be the same for all encoders/spatial layers. 796 // will be the same for all encoders/spatial layers.
805 // TODO(marpan/holmer): Allow for key frame request to be set per encoder. 797 // TODO(marpan/holmer): Allow for key frame request to be set per encoder.
806 bool only_predict_from_key_frame = false; 798 bool only_predict_from_key_frame = false;
807 if (send_key_frame) { 799 if (send_key_frame) {
808 // Adapt the size of the key frame when in screenshare with 1 temporal 800 // Adapt the size of the key frame when in screenshare with 1 temporal
809 // layer. 801 // layer.
810 if (encoders_.size() == 1 && codec_.mode == kScreensharing 802 if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
811 && codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) { 803 codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
812 const uint32_t forceKeyFrameIntraTh = 100; 804 const uint32_t forceKeyFrameIntraTh = 100;
813 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 805 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
814 forceKeyFrameIntraTh); 806 forceKeyFrameIntraTh);
815 } 807 }
816 // Key frame request from caller. 808 // Key frame request from caller.
817 // Will update both golden and alt-ref. 809 // Will update both golden and alt-ref.
818 for (size_t i = 0; i < encoders_.size(); ++i) { 810 for (size_t i = 0; i < encoders_.size(); ++i) {
819 flags[i] = VPX_EFLAG_FORCE_KF; 811 flags[i] = VPX_EFLAG_FORCE_KF;
820 } 812 }
821 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); 813 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
822 } else if (codec_specific_info && 814 } else if (codec_specific_info &&
823 codec_specific_info->codecType == kVideoCodecVP8) { 815 codec_specific_info->codecType == kVideoCodecVP8) {
824 if (feedback_mode_) { 816 if (feedback_mode_) {
825 // Handle RPSI and SLI messages and set up the appropriate encode flags. 817 // Handle RPSI and SLI messages and set up the appropriate encode flags.
826 bool sendRefresh = false; 818 bool sendRefresh = false;
827 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { 819 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
828 rps_.ReceivedRPSI( 820 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
829 codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
830 } 821 }
831 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { 822 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
832 sendRefresh = rps_.ReceivedSLI(input_image.timestamp()); 823 sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
833 } 824 }
834 for (size_t i = 0; i < encoders_.size(); ++i) { 825 for (size_t i = 0; i < encoders_.size(); ++i) {
835 flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, 826 flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh,
836 input_image.timestamp()); 827 input_image.timestamp());
837 } 828 }
838 } else { 829 } else {
839 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { 830 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
871 // change isn't stored in configurations_ so change will be discarded at 862 // change isn't stored in configurations_ so change will be discarded at
872 // the next update. 863 // the next update.
873 vpx_codec_enc_cfg_t temp_config; 864 vpx_codec_enc_cfg_t temp_config;
874 memcpy(&temp_config, &configurations_[i], sizeof(vpx_codec_enc_cfg_t)); 865 memcpy(&temp_config, &configurations_[i], sizeof(vpx_codec_enc_cfg_t));
875 if (temporal_layers_[stream_idx]->UpdateConfiguration(&temp_config)) { 866 if (temporal_layers_[stream_idx]->UpdateConfiguration(&temp_config)) {
876 if (vpx_codec_enc_config_set(&encoders_[i], &temp_config)) 867 if (vpx_codec_enc_config_set(&encoders_[i], &temp_config))
877 return WEBRTC_VIDEO_CODEC_ERROR; 868 return WEBRTC_VIDEO_CODEC_ERROR;
878 } 869 }
879 870
880 vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]); 871 vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
881 vpx_codec_control(&encoders_[i], 872 vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
882 VP8E_SET_TEMPORAL_LAYER_ID,
883 temporal_layers_[stream_idx]->CurrentLayerId()); 873 temporal_layers_[stream_idx]->CurrentLayerId());
884 } 874 }
885 // TODO(holmer): Ideally the duration should be the timestamp diff of this 875 // TODO(holmer): Ideally the duration should be the timestamp diff of this
886 // frame and the next frame to be encoded, which we don't have. Instead we 876 // frame and the next frame to be encoded, which we don't have. Instead we
887 // would like to use the duration of the previous frame. Unfortunately the 877 // would like to use the duration of the previous frame. Unfortunately the
888 // rate control seems to be off with that setup. Using the average input 878 // rate control seems to be off with that setup. Using the average input
889 // frame rate to calculate an average duration for now. 879 // frame rate to calculate an average duration for now.
890 assert(codec_.maxFramerate > 0); 880 assert(codec_.maxFramerate > 0);
891 uint32_t duration = 90000 / codec_.maxFramerate; 881 uint32_t duration = 90000 / codec_.maxFramerate;
892 882
893 // Note we must pass 0 for |flags| field in encode call below since they are 883 // Note we must pass 0 for |flags| field in encode call below since they are
894 // set above in |vpx_codec_control| function for each encoder/spatial layer. 884 // set above in |vpx_codec_control| function for each encoder/spatial layer.
895 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, 885 int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
896 duration, 0, VPX_DL_REALTIME); 886 duration, 0, VPX_DL_REALTIME);
897 // Reset specific intra frame thresholds, following the key frame. 887 // Reset specific intra frame thresholds, following the key frame.
898 if (send_key_frame) { 888 if (send_key_frame) {
899 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 889 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
900 rc_max_intra_target_); 890 rc_max_intra_target_);
901 } 891 }
902 if (error) 892 if (error)
903 return WEBRTC_VIDEO_CODEC_ERROR; 893 return WEBRTC_VIDEO_CODEC_ERROR;
904 timestamp_ += duration; 894 timestamp_ += duration;
905 return GetEncodedPartitions(input_image, only_predict_from_key_frame); 895 return GetEncodedPartitions(input_image, only_predict_from_key_frame);
906 } 896 }
907 897
908 // TODO(pbos): Make sure this works for properly for >1 encoders. 898 // TODO(pbos): Make sure this works for properly for >1 encoders.
909 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { 899 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
910 codec_.width = input_image.width(); 900 codec_.width = input_image.width();
911 codec_.height = input_image.height(); 901 codec_.height = input_image.height();
912 if (codec_.numberOfSimulcastStreams <= 1) { 902 if (codec_.numberOfSimulcastStreams <= 1) {
913 // For now scaling is only used for single-layer streams. 903 // For now scaling is only used for single-layer streams.
914 codec_.simulcastStream[0].width = input_image.width(); 904 codec_.simulcastStream[0].width = input_image.width();
915 codec_.simulcastStream[0].height = input_image.height(); 905 codec_.simulcastStream[0].height = input_image.height();
916 } 906 }
917 // Update the cpu_speed setting for resolution change. 907 // Update the cpu_speed setting for resolution change.
918 vpx_codec_control(&(encoders_[0]), 908 vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
919 VP8E_SET_CPUUSED,
920 SetCpuSpeed(codec_.width, codec_.height)); 909 SetCpuSpeed(codec_.width, codec_.height));
921 raw_images_[0].w = codec_.width; 910 raw_images_[0].w = codec_.width;
922 raw_images_[0].h = codec_.height; 911 raw_images_[0].h = codec_.height;
923 raw_images_[0].d_w = codec_.width; 912 raw_images_[0].d_w = codec_.width;
924 raw_images_[0].d_h = codec_.height; 913 raw_images_[0].d_h = codec_.height;
925 vpx_img_set_rect(&raw_images_[0], 0, 0, codec_.width, codec_.height); 914 vpx_img_set_rect(&raw_images_[0], 0, 0, codec_.width, codec_.height);
926 915
927 // Update encoder context for new frame size. 916 // Update encoder context for new frame size.
928 // Change of frame size will automatically trigger a key frame. 917 // Change of frame size will automatically trigger a key frame.
929 configurations_[0].g_w = codec_.width; 918 configurations_[0].g_w = codec_.width;
(...skipping 12 matching lines...) Expand all
942 bool only_predicting_from_key_frame) { 931 bool only_predicting_from_key_frame) {
943 assert(codec_specific != NULL); 932 assert(codec_specific != NULL);
944 codec_specific->codecType = kVideoCodecVP8; 933 codec_specific->codecType = kVideoCodecVP8;
945 CodecSpecificInfoVP8* vp8Info = &(codec_specific->codecSpecific.VP8); 934 CodecSpecificInfoVP8* vp8Info = &(codec_specific->codecSpecific.VP8);
946 vp8Info->pictureId = picture_id_[stream_idx]; 935 vp8Info->pictureId = picture_id_[stream_idx];
947 if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) { 936 if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
948 last_key_frame_picture_id_[stream_idx] = picture_id_[stream_idx]; 937 last_key_frame_picture_id_[stream_idx] = picture_id_[stream_idx];
949 } 938 }
950 vp8Info->simulcastIdx = stream_idx; 939 vp8Info->simulcastIdx = stream_idx;
951 vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this 940 vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
952 vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? 941 vp8Info->nonReference =
953 true : false; 942 (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false;
954 bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) || 943 bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
955 only_predicting_from_key_frame; 944 only_predicting_from_key_frame;
956 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, 945 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
957 vp8Info, 946 vp8Info, timestamp);
958 timestamp);
959 // Prepare next. 947 // Prepare next.
960 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; 948 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
961 } 949 }
962 950
963 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, 951 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
964 bool only_predicting_from_key_frame) { 952 bool only_predicting_from_key_frame) {
965 int bw_resolutions_disabled = 953 int bw_resolutions_disabled =
966 (encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1; 954 (encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1;
967 955
968 int stream_idx = static_cast<int>(encoders_.size()) - 1; 956 int stream_idx = static_cast<int>(encoders_.size()) - 1;
969 int result = WEBRTC_VIDEO_CODEC_OK; 957 int result = WEBRTC_VIDEO_CODEC_OK;
970 for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); 958 for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
971 ++encoder_idx, --stream_idx) { 959 ++encoder_idx, --stream_idx) {
972 vpx_codec_iter_t iter = NULL; 960 vpx_codec_iter_t iter = NULL;
973 int part_idx = 0; 961 int part_idx = 0;
974 encoded_images_[encoder_idx]._length = 0; 962 encoded_images_[encoder_idx]._length = 0;
975 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; 963 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
976 RTPFragmentationHeader frag_info; 964 RTPFragmentationHeader frag_info;
977 // token_partitions_ is number of bits used. 965 // token_partitions_ is number of bits used.
978 frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) 966 frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) +
979 + 1); 967 1);
980 CodecSpecificInfo codec_specific; 968 CodecSpecificInfo codec_specific;
981 const vpx_codec_cx_pkt_t *pkt = NULL; 969 const vpx_codec_cx_pkt_t* pkt = NULL;
982 while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], 970 while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
983 &iter)) != NULL) { 971 NULL) {
984 switch (pkt->kind) { 972 switch (pkt->kind) {
985 case VPX_CODEC_CX_FRAME_PKT: { 973 case VPX_CODEC_CX_FRAME_PKT: {
986 uint32_t length = encoded_images_[encoder_idx]._length; 974 uint32_t length = encoded_images_[encoder_idx]._length;
987 memcpy(&encoded_images_[encoder_idx]._buffer[length], 975 memcpy(&encoded_images_[encoder_idx]._buffer[length],
988 pkt->data.frame.buf, 976 pkt->data.frame.buf, pkt->data.frame.sz);
989 pkt->data.frame.sz);
990 frag_info.fragmentationOffset[part_idx] = length; 977 frag_info.fragmentationOffset[part_idx] = length;
991 frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz; 978 frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
992 frag_info.fragmentationPlType[part_idx] = 0; // not known here 979 frag_info.fragmentationPlType[part_idx] = 0; // not known here
993 frag_info.fragmentationTimeDiff[part_idx] = 0; 980 frag_info.fragmentationTimeDiff[part_idx] = 0;
994 encoded_images_[encoder_idx]._length += pkt->data.frame.sz; 981 encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
995 assert(length <= encoded_images_[encoder_idx]._size); 982 assert(length <= encoded_images_[encoder_idx]._size);
996 ++part_idx; 983 ++part_idx;
997 break; 984 break;
998 } 985 }
999 default: 986 default:
1000 break; 987 break;
1001 } 988 }
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1058 rps_.SetRtt(rtt); 1045 rps_.SetRtt(rtt);
1059 return WEBRTC_VIDEO_CODEC_OK; 1046 return WEBRTC_VIDEO_CODEC_OK;
1060 } 1047 }
1061 1048
1062 int VP8EncoderImpl::RegisterEncodeCompleteCallback( 1049 int VP8EncoderImpl::RegisterEncodeCompleteCallback(
1063 EncodedImageCallback* callback) { 1050 EncodedImageCallback* callback) {
1064 encoded_complete_callback_ = callback; 1051 encoded_complete_callback_ = callback;
1065 return WEBRTC_VIDEO_CODEC_OK; 1052 return WEBRTC_VIDEO_CODEC_OK;
1066 } 1053 }
1067 1054
1068
1069 VP8DecoderImpl::VP8DecoderImpl() 1055 VP8DecoderImpl::VP8DecoderImpl()
1070 : decode_complete_callback_(NULL), 1056 : decode_complete_callback_(NULL),
1071 inited_(false), 1057 inited_(false),
1072 feedback_mode_(false), 1058 feedback_mode_(false),
1073 decoder_(NULL), 1059 decoder_(NULL),
1074 last_keyframe_(), 1060 last_keyframe_(),
1075 image_format_(VPX_IMG_FMT_NONE), 1061 image_format_(VPX_IMG_FMT_NONE),
1076 ref_frame_(NULL), 1062 ref_frame_(NULL),
1077 propagation_cnt_(-1), 1063 propagation_cnt_(-1),
1078 last_frame_width_(0), 1064 last_frame_width_(0),
1079 last_frame_height_(0), 1065 last_frame_height_(0),
1080 key_frame_required_(true) { 1066 key_frame_required_(true) {}
1081 }
1082 1067
1083 VP8DecoderImpl::~VP8DecoderImpl() { 1068 VP8DecoderImpl::~VP8DecoderImpl() {
1084 inited_ = true; // in order to do the actual release 1069 inited_ = true; // in order to do the actual release
1085 Release(); 1070 Release();
1086 } 1071 }
1087 1072
1088 int VP8DecoderImpl::Reset() { 1073 int VP8DecoderImpl::Reset() {
1089 if (!inited_) { 1074 if (!inited_) {
1090 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 1075 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1091 } 1076 }
1092 InitDecode(&codec_, 1); 1077 InitDecode(&codec_, 1);
1093 propagation_cnt_ = -1; 1078 propagation_cnt_ = -1;
1094 return WEBRTC_VIDEO_CODEC_OK; 1079 return WEBRTC_VIDEO_CODEC_OK;
1095 } 1080 }
1096 1081
1097 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, 1082 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
1098 int number_of_cores) {
1099 int ret_val = Release(); 1083 int ret_val = Release();
1100 if (ret_val < 0) { 1084 if (ret_val < 0) {
1101 return ret_val; 1085 return ret_val;
1102 } 1086 }
1103 if (decoder_ == NULL) { 1087 if (decoder_ == NULL) {
1104 decoder_ = new vpx_codec_ctx_t; 1088 decoder_ = new vpx_codec_ctx_t;
1105 } 1089 }
1106 if (inst && inst->codecType == kVideoCodecVP8) { 1090 if (inst && inst->codecType == kVideoCodecVP8) {
1107 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; 1091 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
1108 } 1092 }
1109 vpx_codec_dec_cfg_t cfg; 1093 vpx_codec_dec_cfg_t cfg;
1110 // Setting number of threads to a constant value (1) 1094 // Setting number of threads to a constant value (1)
1111 cfg.threads = 1; 1095 cfg.threads = 1;
1112 cfg.h = cfg.w = 0; // set after decode 1096 cfg.h = cfg.w = 0; // set after decode
1113 1097
1114 vpx_codec_flags_t flags = 0; 1098 vpx_codec_flags_t flags = 0;
1115 #if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) 1099 #if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64)
1116 flags = VPX_CODEC_USE_POSTPROC; 1100 flags = VPX_CODEC_USE_POSTPROC;
1117 #ifdef INDEPENDENT_PARTITIONS 1101 #ifdef INDEPENDENT_PARTITIONS
1118 flags |= VPX_CODEC_USE_INPUT_PARTITION; 1102 flags |= VPX_CODEC_USE_INPUT_PARTITION;
1119 #endif 1103 #endif
1120 #endif 1104 #endif
1121 1105
1122 if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) { 1106 if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
1123 return WEBRTC_VIDEO_CODEC_MEMORY; 1107 return WEBRTC_VIDEO_CODEC_MEMORY;
1124 } 1108 }
1125 1109
1126 // Save VideoCodec instance for later; mainly for duplicating the decoder. 1110 // Save VideoCodec instance for later; mainly for duplicating the decoder.
1127 if (&codec_ != inst) 1111 if (&codec_ != inst)
1128 codec_ = *inst; 1112 codec_ = *inst;
1129 propagation_cnt_ = -1; 1113 propagation_cnt_ = -1;
1130 1114
1131 inited_ = true; 1115 inited_ = true;
1132 1116
1133 // Always start with a complete key frame. 1117 // Always start with a complete key frame.
1134 key_frame_required_ = true; 1118 key_frame_required_ = true;
1135 return WEBRTC_VIDEO_CODEC_OK; 1119 return WEBRTC_VIDEO_CODEC_OK;
1136 } 1120 }
1137 1121
1138 int VP8DecoderImpl::Decode(const EncodedImage& input_image, 1122 int VP8DecoderImpl::Decode(const EncodedImage& input_image,
1139 bool missing_frames, 1123 bool missing_frames,
1140 const RTPFragmentationHeader* fragmentation, 1124 const RTPFragmentationHeader* fragmentation,
1141 const CodecSpecificInfo* codec_specific_info, 1125 const CodecSpecificInfo* codec_specific_info,
1142 int64_t /*render_time_ms*/) { 1126 int64_t /*render_time_ms*/) {
1143 if (!inited_) { 1127 if (!inited_) {
1144 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 1128 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1145 } 1129 }
1146 if (decode_complete_callback_ == NULL) { 1130 if (decode_complete_callback_ == NULL) {
1147 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; 1131 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1148 } 1132 }
1149 if (input_image._buffer == NULL && input_image._length > 0) { 1133 if (input_image._buffer == NULL && input_image._length > 0) {
1150 // Reset to avoid requesting key frames too often. 1134 // Reset to avoid requesting key frames too often.
1151 if (propagation_cnt_ > 0) 1135 if (propagation_cnt_ > 0)
1152 propagation_cnt_ = 0; 1136 propagation_cnt_ = 0;
(...skipping 30 matching lines...) Expand all
1183 return WEBRTC_VIDEO_CODEC_ERROR; 1167 return WEBRTC_VIDEO_CODEC_ERROR;
1184 } 1168 }
1185 } 1169 }
1186 // Restrict error propagation using key frame requests. Disabled when 1170 // Restrict error propagation using key frame requests. Disabled when
1187 // the feedback mode is enabled (RPS). 1171 // the feedback mode is enabled (RPS).
1188 // Reset on a key frame refresh. 1172 // Reset on a key frame refresh.
1189 if (!feedback_mode_) { 1173 if (!feedback_mode_) {
1190 if (input_image._frameType == kVideoFrameKey && 1174 if (input_image._frameType == kVideoFrameKey &&
1191 input_image._completeFrame) { 1175 input_image._completeFrame) {
1192 propagation_cnt_ = -1; 1176 propagation_cnt_ = -1;
1193 // Start count on first loss. 1177 // Start count on first loss.
1194 } else if ((!input_image._completeFrame || missing_frames) && 1178 } else if ((!input_image._completeFrame || missing_frames) &&
1195 propagation_cnt_ == -1) { 1179 propagation_cnt_ == -1) {
1196 propagation_cnt_ = 0; 1180 propagation_cnt_ = 0;
1197 } 1181 }
1198 if (propagation_cnt_ >= 0) { 1182 if (propagation_cnt_ >= 0) {
1199 propagation_cnt_++; 1183 propagation_cnt_++;
1200 } 1184 }
1201 } 1185 }
1202 1186
1203 vpx_codec_iter_t iter = NULL; 1187 vpx_codec_iter_t iter = NULL;
1204 vpx_image_t* img; 1188 vpx_image_t* img;
1205 int ret; 1189 int ret;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1237 propagation_cnt_ = 0; 1221 propagation_cnt_ = 0;
1238 } 1222 }
1239 return WEBRTC_VIDEO_CODEC_ERROR; 1223 return WEBRTC_VIDEO_CODEC_ERROR;
1240 } 1224 }
1241 #endif 1225 #endif
1242 1226
1243 // Store encoded frame if key frame. (Used in Copy method.) 1227 // Store encoded frame if key frame. (Used in Copy method.)
1244 if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) { 1228 if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
1245 const uint32_t bytes_to_copy = input_image._length; 1229 const uint32_t bytes_to_copy = input_image._length;
1246 if (last_keyframe_._size < bytes_to_copy) { 1230 if (last_keyframe_._size < bytes_to_copy) {
1247 delete [] last_keyframe_._buffer; 1231 delete[] last_keyframe_._buffer;
1248 last_keyframe_._buffer = NULL; 1232 last_keyframe_._buffer = NULL;
1249 last_keyframe_._size = 0; 1233 last_keyframe_._size = 0;
1250 } 1234 }
1251 uint8_t* temp_buffer = last_keyframe_._buffer; // Save buffer ptr. 1235 uint8_t* temp_buffer = last_keyframe_._buffer; // Save buffer ptr.
1252 uint32_t temp_size = last_keyframe_._size; // Save size. 1236 uint32_t temp_size = last_keyframe_._size; // Save size.
1253 last_keyframe_ = input_image; // Shallow copy. 1237 last_keyframe_ = input_image; // Shallow copy.
1254 last_keyframe_._buffer = temp_buffer; // Restore buffer ptr. 1238 last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
1255 last_keyframe_._size = temp_size; // Restore buffer size. 1239 last_keyframe_._size = temp_size; // Restore buffer size.
1256 if (!last_keyframe_._buffer) { 1240 if (!last_keyframe_._buffer) {
1257 // Allocate memory. 1241 // Allocate memory.
1258 last_keyframe_._size = bytes_to_copy; 1242 last_keyframe_._size = bytes_to_copy;
1259 last_keyframe_._buffer = new uint8_t[last_keyframe_._size]; 1243 last_keyframe_._buffer = new uint8_t[last_keyframe_._size];
1260 } 1244 }
1261 // Copy encoded frame. 1245 // Copy encoded frame.
1262 memcpy(last_keyframe_._buffer, input_image._buffer, bytes_to_copy); 1246 memcpy(last_keyframe_._buffer, input_image._buffer, bytes_to_copy);
1263 last_keyframe_._length = bytes_to_copy; 1247 last_keyframe_._length = bytes_to_copy;
1264 } 1248 }
1265 1249
(...skipping 29 matching lines...) Expand all
1295 if (propagation_cnt_ > 0) 1279 if (propagation_cnt_ > 0)
1296 propagation_cnt_ = 0; 1280 propagation_cnt_ = 0;
1297 return WEBRTC_VIDEO_CODEC_ERROR; 1281 return WEBRTC_VIDEO_CODEC_ERROR;
1298 } 1282 }
1299 int16_t picture_id = -1; 1283 int16_t picture_id = -1;
1300 if (codec_specific_info) { 1284 if (codec_specific_info) {
1301 picture_id = codec_specific_info->codecSpecific.VP8.pictureId; 1285 picture_id = codec_specific_info->codecSpecific.VP8.pictureId;
1302 } 1286 }
1303 if (picture_id > -1) { 1287 if (picture_id > -1) {
1304 if (((reference_updates & VP8_GOLD_FRAME) || 1288 if (((reference_updates & VP8_GOLD_FRAME) ||
1305 (reference_updates & VP8_ALTR_FRAME)) && !corrupted) { 1289 (reference_updates & VP8_ALTR_FRAME)) &&
1290 !corrupted) {
1306 decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id); 1291 decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
1307 } 1292 }
1308 decode_complete_callback_->ReceivedDecodedFrame(picture_id); 1293 decode_complete_callback_->ReceivedDecodedFrame(picture_id);
1309 } 1294 }
1310 if (corrupted) { 1295 if (corrupted) {
1311 // we can decode but with artifacts 1296 // we can decode but with artifacts
1312 return WEBRTC_VIDEO_CODEC_REQUEST_SLI; 1297 return WEBRTC_VIDEO_CODEC_REQUEST_SLI;
1313 } 1298 }
1314 } 1299 }
1315 // Check Vs. threshold 1300 // Check Vs. threshold
1316 if (propagation_cnt_ > kVp8ErrorPropagationTh) { 1301 if (propagation_cnt_ > kVp8ErrorPropagationTh) {
1317 // Reset to avoid requesting key frames too often. 1302 // Reset to avoid requesting key frames too often.
1318 propagation_cnt_ = 0; 1303 propagation_cnt_ = 0;
1319 return WEBRTC_VIDEO_CODEC_ERROR; 1304 return WEBRTC_VIDEO_CODEC_ERROR;
1320 } 1305 }
1321 return WEBRTC_VIDEO_CODEC_OK; 1306 return WEBRTC_VIDEO_CODEC_OK;
1322 } 1307 }
1323 1308
1324 int VP8DecoderImpl::DecodePartitions( 1309 int VP8DecoderImpl::DecodePartitions(
1325 const EncodedImage& input_image, 1310 const EncodedImage& input_image,
1326 const RTPFragmentationHeader* fragmentation) { 1311 const RTPFragmentationHeader* fragmentation) {
1327 for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) { 1312 for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
1328 const uint8_t* partition = input_image._buffer + 1313 const uint8_t* partition =
1329 fragmentation->fragmentationOffset[i]; 1314 input_image._buffer + fragmentation->fragmentationOffset[i];
1330 const uint32_t partition_length = 1315 const uint32_t partition_length = fragmentation->fragmentationLength[i];
1331 fragmentation->fragmentationLength[i]; 1316 if (vpx_codec_decode(decoder_, partition, partition_length, 0,
1332 if (vpx_codec_decode(decoder_,
1333 partition,
1334 partition_length,
1335 0,
1336 VPX_DL_REALTIME)) { 1317 VPX_DL_REALTIME)) {
1337 return WEBRTC_VIDEO_CODEC_ERROR; 1318 return WEBRTC_VIDEO_CODEC_ERROR;
1338 } 1319 }
1339 } 1320 }
1340 // Signal end of frame data. If there was no frame data this will trigger 1321 // Signal end of frame data. If there was no frame data this will trigger
1341 // a full frame concealment. 1322 // a full frame concealment.
1342 if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) 1323 if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME))
1343 return WEBRTC_VIDEO_CODEC_ERROR; 1324 return WEBRTC_VIDEO_CODEC_ERROR;
1344 return WEBRTC_VIDEO_CODEC_OK; 1325 return WEBRTC_VIDEO_CODEC_OK;
1345 } 1326 }
1346 1327
1347 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img, 1328 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
1348 uint32_t timestamp, 1329 uint32_t timestamp,
1349 int64_t ntp_time_ms) { 1330 int64_t ntp_time_ms) {
1350 if (img == NULL) { 1331 if (img == NULL) {
1351 // Decoder OK and NULL image => No show frame 1332 // Decoder OK and NULL image => No show frame
1352 return WEBRTC_VIDEO_CODEC_NO_OUTPUT; 1333 return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
1353 } 1334 }
1354 last_frame_width_ = img->d_w; 1335 last_frame_width_ = img->d_w;
1355 last_frame_height_ = img->d_h; 1336 last_frame_height_ = img->d_h;
1356 // Allocate memory for decoded image. 1337 // Allocate memory for decoded image.
1357 VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h), 1338 VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
1358 timestamp, 0, kVideoRotation_0); 1339 timestamp, 0, kVideoRotation_0);
1359 libyuv::I420Copy( 1340 libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
1360 img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], 1341 img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
1361 img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], 1342 img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
1362 img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], 1343 decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
1363 decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane), 1344 decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
1364 decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane), 1345 decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
1365 decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane), 1346 img->d_w, img->d_h);
1366 img->d_w, img->d_h);
1367 decoded_image.set_ntp_time_ms(ntp_time_ms); 1347 decoded_image.set_ntp_time_ms(ntp_time_ms);
1368 int ret = decode_complete_callback_->Decoded(decoded_image); 1348 int ret = decode_complete_callback_->Decoded(decoded_image);
1369 if (ret != 0) 1349 if (ret != 0)
1370 return ret; 1350 return ret;
1371 1351
1372 // Remember image format for later 1352 // Remember image format for later
1373 image_format_ = img->fmt; 1353 image_format_ = img->fmt;
1374 return WEBRTC_VIDEO_CODEC_OK; 1354 return WEBRTC_VIDEO_CODEC_OK;
1375 } 1355 }
1376 1356
1377 int VP8DecoderImpl::RegisterDecodeCompleteCallback( 1357 int VP8DecoderImpl::RegisterDecodeCompleteCallback(
1378 DecodedImageCallback* callback) { 1358 DecodedImageCallback* callback) {
1379 decode_complete_callback_ = callback; 1359 decode_complete_callback_ = callback;
1380 return WEBRTC_VIDEO_CODEC_OK; 1360 return WEBRTC_VIDEO_CODEC_OK;
1381 } 1361 }
1382 1362
1383 int VP8DecoderImpl::Release() { 1363 int VP8DecoderImpl::Release() {
1384 if (last_keyframe_._buffer != NULL) { 1364 if (last_keyframe_._buffer != NULL) {
1385 delete [] last_keyframe_._buffer; 1365 delete[] last_keyframe_._buffer;
1386 last_keyframe_._buffer = NULL; 1366 last_keyframe_._buffer = NULL;
1387 } 1367 }
1388 if (decoder_ != NULL) { 1368 if (decoder_ != NULL) {
1389 if (vpx_codec_destroy(decoder_)) { 1369 if (vpx_codec_destroy(decoder_)) {
1390 return WEBRTC_VIDEO_CODEC_MEMORY; 1370 return WEBRTC_VIDEO_CODEC_MEMORY;
1391 } 1371 }
1392 delete decoder_; 1372 delete decoder_;
1393 decoder_ = NULL; 1373 decoder_ = NULL;
1394 } 1374 }
1395 if (ref_frame_ != NULL) { 1375 if (ref_frame_ != NULL) {
1396 vpx_img_free(&ref_frame_->img); 1376 vpx_img_free(&ref_frame_->img);
1397 delete ref_frame_; 1377 delete ref_frame_;
1398 ref_frame_ = NULL; 1378 ref_frame_ = NULL;
1399 } 1379 }
1400 buffer_pool_.Release(); 1380 buffer_pool_.Release();
1401 inited_ = false; 1381 inited_ = false;
1402 return WEBRTC_VIDEO_CODEC_OK; 1382 return WEBRTC_VIDEO_CODEC_OK;
1403 } 1383 }
1404 1384
1405 const char* VP8DecoderImpl::ImplementationName() const { 1385 const char* VP8DecoderImpl::ImplementationName() const {
1406 return "libvpx"; 1386 return "libvpx";
1407 } 1387 }
1408 1388
1409 int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) { 1389 int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
1410 // The type of frame to copy should be set in ref_frame_->frame_type 1390 // The type of frame to copy should be set in ref_frame_->frame_type
1411 // before the call to this function. 1391 // before the call to this function.
1412 if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) 1392 if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
1413 != VPX_CODEC_OK) { 1393 VPX_CODEC_OK) {
1414 return -1; 1394 return -1;
1415 } 1395 }
1416 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) 1396 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
1417 != VPX_CODEC_OK) { 1397 VPX_CODEC_OK) {
1418 return -1; 1398 return -1;
1419 } 1399 }
1420 return 0; 1400 return 0;
1421 } 1401 }
1422 1402
1423 } // namespace webrtc 1403 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/video_coding/codecs/vp8/vp8_impl.h ('k') | webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698