| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 
| 3  * | 3  * | 
| 4  *  Use of this source code is governed by a BSD-style license | 4  *  Use of this source code is governed by a BSD-style license | 
| 5  *  that can be found in the LICENSE file in the root of the source | 5  *  that can be found in the LICENSE file in the root of the source | 
| 6  *  tree. An additional intellectual property rights grant can be found | 6  *  tree. An additional intellectual property rights grant can be found | 
| 7  *  in the file PATENTS.  All contributing project authors may | 7  *  in the file PATENTS.  All contributing project authors may | 
| 8  *  be found in the AUTHORS file in the root of the source tree. | 8  *  be found in the AUTHORS file in the root of the source tree. | 
| 9  */ | 9  */ | 
| 10 | 10 | 
| 11 #include "webrtc/modules/video_coding/codecs/vp8/vp8_impl.h" | 11 #include "webrtc/modules/video_coding/codecs/vp8/vp8_impl.h" | 
| 12 | 12 | 
| 13 #include <stdlib.h> | 13 #include <stdlib.h> | 
| 14 #include <string.h> | 14 #include <string.h> | 
| 15 #include <time.h> | 15 #include <time.h> | 
| 16 #include <algorithm> | 16 #include <algorithm> | 
| 17 | 17 | 
| 18 // NOTE(ajm): Path provided by gyp. | 18 // NOTE(ajm): Path provided by gyp. | 
| 19 #include "libyuv/scale.h"  // NOLINT | 19 #include "libyuv/scale.h"    // NOLINT | 
| 20 #include "libyuv/convert.h"  // NOLINT | 20 #include "libyuv/convert.h"  // NOLINT | 
| 21 | 21 | 
| 22 #include "webrtc/base/checks.h" | 22 #include "webrtc/base/checks.h" | 
| 23 #include "webrtc/base/trace_event.h" | 23 #include "webrtc/base/trace_event.h" | 
| 24 #include "webrtc/common.h" | 24 #include "webrtc/common.h" | 
| 25 #include "webrtc/common_types.h" | 25 #include "webrtc/common_types.h" | 
| 26 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" | 26 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" | 
| 27 #include "webrtc/modules/include/module_common_types.h" | 27 #include "webrtc/modules/include/module_common_types.h" | 
| 28 #include "webrtc/modules/video_coding/include/video_codec_interface.h" | 28 #include "webrtc/modules/video_coding/include/video_codec_interface.h" | 
| 29 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h" | 29 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h" | 
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 61 | 61 | 
| 62 std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec, | 62 std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec, | 
| 63                                        int bitrate_to_allocate_kbps) { | 63                                        int bitrate_to_allocate_kbps) { | 
| 64   if (codec.numberOfSimulcastStreams <= 1) { | 64   if (codec.numberOfSimulcastStreams <= 1) { | 
| 65     return std::vector<int>(1, bitrate_to_allocate_kbps); | 65     return std::vector<int>(1, bitrate_to_allocate_kbps); | 
| 66   } | 66   } | 
| 67 | 67 | 
| 68   std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams); | 68   std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams); | 
| 69   // Allocate min -> target bitrates as long as we have bitrate to spend. | 69   // Allocate min -> target bitrates as long as we have bitrate to spend. | 
| 70   size_t last_active_stream = 0; | 70   size_t last_active_stream = 0; | 
| 71   for (size_t i = 0; | 71   for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) && | 
| 72        i < static_cast<size_t>(codec.numberOfSimulcastStreams) && | 72                      bitrate_to_allocate_kbps >= | 
| 73            bitrate_to_allocate_kbps >= | 73                          static_cast<int>(codec.simulcastStream[i].minBitrate); | 
| 74                static_cast<int>(codec.simulcastStream[i].minBitrate); |  | 
| 75        ++i) { | 74        ++i) { | 
| 76     last_active_stream = i; | 75     last_active_stream = i; | 
| 77     int allocated_bitrate_kbps = | 76     int allocated_bitrate_kbps = | 
| 78         std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate), | 77         std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate), | 
| 79                  bitrate_to_allocate_kbps); | 78                  bitrate_to_allocate_kbps); | 
| 80     bitrates_kbps[i] = allocated_bitrate_kbps; | 79     bitrates_kbps[i] = allocated_bitrate_kbps; | 
| 81     bitrate_to_allocate_kbps -= allocated_bitrate_kbps; | 80     bitrate_to_allocate_kbps -= allocated_bitrate_kbps; | 
| 82   } | 81   } | 
| 83 | 82 | 
| 84   // Spend additional bits on the highest-quality active layer, up to max | 83   // Spend additional bits on the highest-quality active layer, up to max | 
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 125   } | 124   } | 
| 126   for (int i = 0; i < num_streams; ++i) { | 125   for (int i = 0; i < num_streams; ++i) { | 
| 127     if (codec.width * codec.simulcastStream[i].height != | 126     if (codec.width * codec.simulcastStream[i].height != | 
| 128         codec.height * codec.simulcastStream[i].width) { | 127         codec.height * codec.simulcastStream[i].width) { | 
| 129       return false; | 128       return false; | 
| 130     } | 129     } | 
| 131   } | 130   } | 
| 132   return true; | 131   return true; | 
| 133 } | 132 } | 
| 134 | 133 | 
| 135 int NumStreamsDisabled(std::vector<bool>& streams) { | 134 int NumStreamsDisabled(const std::vector<bool>& streams) { | 
| 136   int num_disabled = 0; | 135   int num_disabled = 0; | 
| 137   for (bool stream : streams) { | 136   for (bool stream : streams) { | 
| 138     if (!stream) | 137     if (!stream) | 
| 139       ++num_disabled; | 138       ++num_disabled; | 
| 140   } | 139   } | 
| 141   return num_disabled; | 140   return num_disabled; | 
| 142 } | 141 } | 
| 143 }  // namespace | 142 }  // namespace | 
| 144 | 143 | 
| 145 const float kTl1MaxTimeToDropFrames = 20.0f; | 144 const float kTl1MaxTimeToDropFrames = 20.0f; | 
| (...skipping 30 matching lines...) Expand all  Loading... | 
| 176 | 175 | 
| 177 VP8EncoderImpl::~VP8EncoderImpl() { | 176 VP8EncoderImpl::~VP8EncoderImpl() { | 
| 178   Release(); | 177   Release(); | 
| 179 } | 178 } | 
| 180 | 179 | 
| 181 int VP8EncoderImpl::Release() { | 180 int VP8EncoderImpl::Release() { | 
| 182   int ret_val = WEBRTC_VIDEO_CODEC_OK; | 181   int ret_val = WEBRTC_VIDEO_CODEC_OK; | 
| 183 | 182 | 
| 184   while (!encoded_images_.empty()) { | 183   while (!encoded_images_.empty()) { | 
| 185     EncodedImage& image = encoded_images_.back(); | 184     EncodedImage& image = encoded_images_.back(); | 
| 186     delete [] image._buffer; | 185     delete[] image._buffer; | 
| 187     encoded_images_.pop_back(); | 186     encoded_images_.pop_back(); | 
| 188   } | 187   } | 
| 189   while (!encoders_.empty()) { | 188   while (!encoders_.empty()) { | 
| 190     vpx_codec_ctx_t& encoder = encoders_.back(); | 189     vpx_codec_ctx_t& encoder = encoders_.back(); | 
| 191     if (vpx_codec_destroy(&encoder)) { | 190     if (vpx_codec_destroy(&encoder)) { | 
| 192       ret_val = WEBRTC_VIDEO_CODEC_MEMORY; | 191       ret_val = WEBRTC_VIDEO_CODEC_MEMORY; | 
| 193     } | 192     } | 
| 194     encoders_.pop_back(); | 193     encoders_.pop_back(); | 
| 195   } | 194   } | 
| 196   configurations_.clear(); | 195   configurations_.clear(); | 
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 282     // the target we still allow it to overshoot up to the max before dropping | 281     // the target we still allow it to overshoot up to the max before dropping | 
| 283     // frames. This hack should be improved. | 282     // frames. This hack should be improved. | 
| 284     if (codec_.targetBitrate > 0 && | 283     if (codec_.targetBitrate > 0 && | 
| 285         (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || | 284         (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || | 
| 286          codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { | 285          codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { | 
| 287       int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); | 286       int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); | 
| 288       max_bitrate = std::min(codec_.maxBitrate, target_bitrate); | 287       max_bitrate = std::min(codec_.maxBitrate, target_bitrate); | 
| 289       target_bitrate = tl0_bitrate; | 288       target_bitrate = tl0_bitrate; | 
| 290     } | 289     } | 
| 291     configurations_[i].rc_target_bitrate = target_bitrate; | 290     configurations_[i].rc_target_bitrate = target_bitrate; | 
| 292     temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate, | 291     temporal_layers_[stream_idx]->ConfigureBitrates( | 
| 293                                                     max_bitrate, | 292         target_bitrate, max_bitrate, framerate, &configurations_[i]); | 
| 294                                                     framerate, |  | 
| 295                                                     &configurations_[i]); |  | 
| 296     if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { | 293     if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { | 
| 297       return WEBRTC_VIDEO_CODEC_ERROR; | 294       return WEBRTC_VIDEO_CODEC_ERROR; | 
| 298     } | 295     } | 
| 299   } | 296   } | 
| 300   quality_scaler_.ReportFramerate(new_framerate); | 297   quality_scaler_.ReportFramerate(new_framerate); | 
| 301   return WEBRTC_VIDEO_CODEC_OK; | 298   return WEBRTC_VIDEO_CODEC_OK; | 
| 302 } | 299 } | 
| 303 | 300 | 
| 304 void VP8EncoderImpl::SetStreamState(bool send_stream, | 301 void VP8EncoderImpl::SetStreamState(bool send_stream, int stream_idx) { | 
| 305                                             int stream_idx) { |  | 
| 306   if (send_stream && !send_stream_[stream_idx]) { | 302   if (send_stream && !send_stream_[stream_idx]) { | 
| 307     // Need a key frame if we have not sent this stream before. | 303     // Need a key frame if we have not sent this stream before. | 
| 308     key_frame_request_[stream_idx] = true; | 304     key_frame_request_[stream_idx] = true; | 
| 309   } | 305   } | 
| 310   send_stream_[stream_idx] = send_stream; | 306   send_stream_[stream_idx] = send_stream; | 
| 311 } | 307 } | 
| 312 | 308 | 
| 313 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, | 309 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, | 
| 314                                                  int num_temporal_layers, | 310                                          int num_temporal_layers, | 
| 315                                                  const VideoCodec& codec) { | 311                                          const VideoCodec& codec) { | 
| 316   const Config default_options; | 312   const Config default_options; | 
| 317   const TemporalLayers::Factory& tl_factory = | 313   const TemporalLayers::Factory& tl_factory = | 
| 318       (codec.extra_options ? codec.extra_options : &default_options) | 314       (codec.extra_options ? codec.extra_options : &default_options) | 
| 319           ->Get<TemporalLayers::Factory>(); | 315           ->Get<TemporalLayers::Factory>(); | 
| 320   if (num_streams == 1) { | 316   if (num_streams == 1) { | 
| 321     if (codec.mode == kScreensharing) { | 317     if (codec.mode == kScreensharing) { | 
| 322       // Special mode when screensharing on a single stream. | 318       // Special mode when screensharing on a single stream. | 
| 323       temporal_layers_.push_back( | 319       temporal_layers_.push_back( | 
| 324           new ScreenshareLayers(num_temporal_layers, rand())); | 320           new ScreenshareLayers(num_temporal_layers, rand())); | 
| 325     } else { | 321     } else { | 
| 326       temporal_layers_.push_back( | 322       temporal_layers_.push_back( | 
| 327           tl_factory.Create(num_temporal_layers, rand())); | 323           tl_factory.Create(num_temporal_layers, rand())); | 
| 328     } | 324     } | 
| 329   } else { | 325   } else { | 
| 330     for (int i = 0; i < num_streams; ++i) { | 326     for (int i = 0; i < num_streams; ++i) { | 
| 331       // TODO(andresp): crash if layers is invalid. | 327       // TODO(andresp): crash if layers is invalid. | 
| 332       int layers = codec.simulcastStream[i].numberOfTemporalLayers; | 328       int layers = codec.simulcastStream[i].numberOfTemporalLayers; | 
| 333       if (layers < 1) layers = 1; | 329       if (layers < 1) | 
|  | 330         layers = 1; | 
| 334       temporal_layers_.push_back(tl_factory.Create(layers, rand())); | 331       temporal_layers_.push_back(tl_factory.Create(layers, rand())); | 
| 335     } | 332     } | 
| 336   } | 333   } | 
| 337 } | 334 } | 
| 338 | 335 | 
| 339 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, | 336 int VP8EncoderImpl::InitEncode(const VideoCodec* inst, | 
| 340                                        int number_of_cores, | 337                                int number_of_cores, | 
| 341                                        size_t /*maxPayloadSize */) { | 338                                size_t /*maxPayloadSize */) { | 
| 342   if (inst == NULL) { | 339   if (inst == NULL) { | 
| 343     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 340     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 
| 344   } | 341   } | 
| 345   if (inst->maxFramerate < 1) { | 342   if (inst->maxFramerate < 1) { | 
| 346     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 343     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 
| 347   } | 344   } | 
| 348   // allow zero to represent an unspecified maxBitRate | 345   // allow zero to represent an unspecified maxBitRate | 
| 349   if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) { | 346   if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) { | 
| 350     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 347     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 
| 351   } | 348   } | 
| (...skipping 16 matching lines...) Expand all  Loading... | 
| 368     return retVal; | 365     return retVal; | 
| 369   } | 366   } | 
| 370 | 367 | 
| 371   int number_of_streams = NumberOfStreams(*inst); | 368   int number_of_streams = NumberOfStreams(*inst); | 
| 372   bool doing_simulcast = (number_of_streams > 1); | 369   bool doing_simulcast = (number_of_streams > 1); | 
| 373 | 370 | 
| 374   if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { | 371   if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { | 
| 375     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 372     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 
| 376   } | 373   } | 
| 377 | 374 | 
| 378   int num_temporal_layers = doing_simulcast ? | 375   int num_temporal_layers = | 
| 379       inst->simulcastStream[0].numberOfTemporalLayers : | 376       doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers | 
| 380       inst->codecSpecific.VP8.numberOfTemporalLayers; | 377                       : inst->codecSpecific.VP8.numberOfTemporalLayers; | 
| 381 | 378 | 
| 382   // TODO(andresp): crash if num temporal layers is bananas. | 379   // TODO(andresp): crash if num temporal layers is bananas. | 
| 383   if (num_temporal_layers < 1) num_temporal_layers = 1; | 380   if (num_temporal_layers < 1) | 
|  | 381     num_temporal_layers = 1; | 
| 384   SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); | 382   SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); | 
| 385 | 383 | 
| 386   feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; | 384   feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; | 
| 387 | 385 | 
| 388   timestamp_ = 0; | 386   timestamp_ = 0; | 
| 389   codec_ = *inst; | 387   codec_ = *inst; | 
| 390 | 388 | 
| 391   // Code expects simulcastStream resolutions to be correct, make sure they are | 389   // Code expects simulcastStream resolutions to be correct, make sure they are | 
| 392   // filled even when there are no simulcast layers. | 390   // filled even when there are no simulcast layers. | 
| 393   if (codec_.numberOfSimulcastStreams == 0) { | 391   if (codec_.numberOfSimulcastStreams == 0) { | 
| 394     codec_.simulcastStream[0].width = codec_.width; | 392     codec_.simulcastStream[0].width = codec_.width; | 
| 395     codec_.simulcastStream[0].height = codec_.height; | 393     codec_.simulcastStream[0].height = codec_.height; | 
| 396   } | 394   } | 
| 397 | 395 | 
| 398   picture_id_.resize(number_of_streams); | 396   picture_id_.resize(number_of_streams); | 
| 399   last_key_frame_picture_id_.resize(number_of_streams); | 397   last_key_frame_picture_id_.resize(number_of_streams); | 
| 400   encoded_images_.resize(number_of_streams); | 398   encoded_images_.resize(number_of_streams); | 
| 401   encoders_.resize(number_of_streams); | 399   encoders_.resize(number_of_streams); | 
| 402   configurations_.resize(number_of_streams); | 400   configurations_.resize(number_of_streams); | 
| 403   downsampling_factors_.resize(number_of_streams); | 401   downsampling_factors_.resize(number_of_streams); | 
| 404   raw_images_.resize(number_of_streams); | 402   raw_images_.resize(number_of_streams); | 
| 405   send_stream_.resize(number_of_streams); | 403   send_stream_.resize(number_of_streams); | 
| 406   send_stream_[0] = true;  // For non-simulcast case. | 404   send_stream_[0] = true;  // For non-simulcast case. | 
| 407   cpu_speed_.resize(number_of_streams); | 405   cpu_speed_.resize(number_of_streams); | 
| 408   std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); | 406   std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); | 
| 409 | 407 | 
| 410   int idx = number_of_streams - 1; | 408   int idx = number_of_streams - 1; | 
| 411   for (int i = 0; i < (number_of_streams - 1); ++i, --idx) { | 409   for (int i = 0; i < (number_of_streams - 1); ++i, --idx) { | 
| 412     int gcd = GCD(inst->simulcastStream[idx].width, | 410     int gcd = GCD(inst->simulcastStream[idx].width, | 
| 413                   inst->simulcastStream[idx-1].width); | 411                   inst->simulcastStream[idx - 1].width); | 
| 414     downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd; | 412     downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd; | 
| 415     downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd; | 413     downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd; | 
| 416     send_stream_[i] = false; | 414     send_stream_[i] = false; | 
| 417   } | 415   } | 
| 418   if (number_of_streams > 1) { | 416   if (number_of_streams > 1) { | 
| 419     send_stream_[number_of_streams - 1] = false; | 417     send_stream_[number_of_streams - 1] = false; | 
| 420     downsampling_factors_[number_of_streams - 1].num = 1; | 418     downsampling_factors_[number_of_streams - 1].num = 1; | 
| 421     downsampling_factors_[number_of_streams - 1].den = 1; | 419     downsampling_factors_[number_of_streams - 1].den = 1; | 
| 422   } | 420   } | 
| 423   for (int i = 0; i < number_of_streams; ++i) { | 421   for (int i = 0; i < number_of_streams; ++i) { | 
| 424     // Random start, 16 bits is enough. | 422     // Random start, 16 bits is enough. | 
| 425     picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; | 423     picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF;  // NOLINT | 
| 426     last_key_frame_picture_id_[i] = -1; | 424     last_key_frame_picture_id_[i] = -1; | 
| 427     // allocate memory for encoded image | 425     // allocate memory for encoded image | 
| 428     if (encoded_images_[i]._buffer != NULL) { | 426     if (encoded_images_[i]._buffer != NULL) { | 
| 429       delete [] encoded_images_[i]._buffer; | 427       delete[] encoded_images_[i]._buffer; | 
| 430     } | 428     } | 
| 431     encoded_images_[i]._size = CalcBufferSize(kI420, | 429     encoded_images_[i]._size = | 
| 432                                               codec_.width, codec_.height); | 430         CalcBufferSize(kI420, codec_.width, codec_.height); | 
| 433     encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size]; | 431     encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size]; | 
| 434     encoded_images_[i]._completeFrame = true; | 432     encoded_images_[i]._completeFrame = true; | 
| 435   } | 433   } | 
| 436   // populate encoder configuration with default values | 434   // populate encoder configuration with default values | 
| 437   if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), | 435   if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0], | 
| 438                                    &configurations_[0], 0)) { | 436                                    0)) { | 
| 439     return WEBRTC_VIDEO_CODEC_ERROR; | 437     return WEBRTC_VIDEO_CODEC_ERROR; | 
| 440   } | 438   } | 
| 441   // setting the time base of the codec | 439   // setting the time base of the codec | 
| 442   configurations_[0].g_timebase.num = 1; | 440   configurations_[0].g_timebase.num = 1; | 
| 443   configurations_[0].g_timebase.den = 90000; | 441   configurations_[0].g_timebase.den = 90000; | 
| 444   configurations_[0].g_lag_in_frames = 0;  // 0- no frame lagging | 442   configurations_[0].g_lag_in_frames = 0;  // 0- no frame lagging | 
| 445 | 443 | 
| 446   // Set the error resilience mode according to user settings. | 444   // Set the error resilience mode according to user settings. | 
| 447   switch (inst->codecSpecific.VP8.resilience) { | 445   switch (inst->codecSpecific.VP8.resilience) { | 
| 448     case kResilienceOff: | 446     case kResilienceOff: | 
| 449       // TODO(marpan): We should set keep error resilience off for this mode, | 447       // TODO(marpan): We should set keep error resilience off for this mode, | 
| 450       // independent of temporal layer settings, and make sure we set | 448       // independent of temporal layer settings, and make sure we set | 
| 451       // |codecSpecific.VP8.resilience| = |kResilientStream| at higher level | 449       // |codecSpecific.VP8.resilience| = |kResilientStream| at higher level | 
| 452       // code if we want to get error resilience on. | 450       // code if we want to get error resilience on. | 
| 453       configurations_[0].g_error_resilient = 1; | 451       configurations_[0].g_error_resilient = 1; | 
| 454       break; | 452       break; | 
| 455     case kResilientStream: | 453     case kResilientStream: | 
| 456       configurations_[0].g_error_resilient = 1;  // TODO(holmer): Replace with | 454       configurations_[0].g_error_resilient = 1;  // TODO(holmer): Replace with | 
| 457       // VPX_ERROR_RESILIENT_DEFAULT when we | 455       // VPX_ERROR_RESILIENT_DEFAULT when we | 
| 458       // drop support for libvpx 9.6.0. | 456       // drop support for libvpx 9.6.0. | 
| 459       break; | 457       break; | 
| 460     case kResilientFrames: | 458     case kResilientFrames: | 
| 461 #ifdef INDEPENDENT_PARTITIONS | 459 #ifdef INDEPENDENT_PARTITIONS | 
| 462       configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT | | 460       configurations_[0] - g_error_resilient = | 
| 463       VPX_ERROR_RESILIENT_PARTITIONS; | 461           VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS; | 
| 464       break; | 462       break; | 
| 465 #else | 463 #else | 
| 466       return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;  // Not supported | 464       return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;  // Not supported | 
| 467 #endif | 465 #endif | 
| 468   } | 466   } | 
| 469 | 467 | 
| 470   // rate control settings | 468   // rate control settings | 
| 471   configurations_[0].rc_dropframe_thresh = | 469   configurations_[0].rc_dropframe_thresh = | 
| 472       inst->codecSpecific.VP8.frameDroppingOn ? 30 : 0; | 470       inst->codecSpecific.VP8.frameDroppingOn ? 30 : 0; | 
| 473   configurations_[0].rc_end_usage = VPX_CBR; | 471   configurations_[0].rc_end_usage = VPX_CBR; | 
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 529   for (int i = 1; i < number_of_streams; ++i) { | 527   for (int i = 1; i < number_of_streams; ++i) { | 
| 530     cpu_speed_[i] = | 528     cpu_speed_[i] = | 
| 531         SetCpuSpeed(inst->simulcastStream[number_of_streams - 1 - i].width, | 529         SetCpuSpeed(inst->simulcastStream[number_of_streams - 1 - i].width, | 
| 532                     inst->simulcastStream[number_of_streams - 1 - i].height); | 530                     inst->simulcastStream[number_of_streams - 1 - i].height); | 
| 533   } | 531   } | 
| 534   configurations_[0].g_w = inst->width; | 532   configurations_[0].g_w = inst->width; | 
| 535   configurations_[0].g_h = inst->height; | 533   configurations_[0].g_h = inst->height; | 
| 536 | 534 | 
| 537   // Determine number of threads based on the image size and #cores. | 535   // Determine number of threads based on the image size and #cores. | 
| 538   // TODO(fbarchard): Consider number of Simulcast layers. | 536   // TODO(fbarchard): Consider number of Simulcast layers. | 
| 539   configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w, | 537   configurations_[0].g_threads = NumberOfThreads( | 
| 540                                                  configurations_[0].g_h, | 538       configurations_[0].g_w, configurations_[0].g_h, number_of_cores); | 
| 541                                                  number_of_cores); |  | 
| 542 | 539 | 
| 543   // Creating a wrapper to the image - setting image data to NULL. | 540   // Creating a wrapper to the image - setting image data to NULL. | 
| 544   // Actual pointer will be set in encode. Setting align to 1, as it | 541   // Actual pointer will be set in encode. Setting align to 1, as it | 
| 545   // is meaningless (no memory allocation is done here). | 542   // is meaningless (no memory allocation is done here). | 
| 546   vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, | 543   vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, | 
| 547                1, NULL); | 544                NULL); | 
| 548 | 545 | 
| 549   if (encoders_.size() == 1) { | 546   if (encoders_.size() == 1) { | 
| 550     configurations_[0].rc_target_bitrate = inst->startBitrate; | 547     configurations_[0].rc_target_bitrate = inst->startBitrate; | 
| 551     temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, | 548     temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, | 
| 552                                            inst->maxBitrate, |  | 
| 553                                            inst->maxFramerate, | 549                                            inst->maxFramerate, | 
| 554                                            &configurations_[0]); | 550                                            &configurations_[0]); | 
| 555   } else { | 551   } else { | 
| 556     // Note the order we use is different from webm, we have lowest resolution | 552     // Note the order we use is different from webm, we have lowest resolution | 
| 557     // at position 0 and they have highest resolution at position 0. | 553     // at position 0 and they have highest resolution at position 0. | 
| 558     int stream_idx = encoders_.size() - 1; | 554     int stream_idx = encoders_.size() - 1; | 
| 559     std::vector<int> stream_bitrates = | 555     std::vector<int> stream_bitrates = | 
| 560         GetStreamBitratesKbps(codec_, inst->startBitrate); | 556         GetStreamBitratesKbps(codec_, inst->startBitrate); | 
| 561     SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 557     SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); | 
| 562     configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; | 558     configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx]; | 
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 634     // 1 thread for VGA or less. | 630     // 1 thread for VGA or less. | 
| 635     return 1; | 631     return 1; | 
| 636   } | 632   } | 
| 637 } | 633 } | 
| 638 | 634 | 
| 639 int VP8EncoderImpl::InitAndSetControlSettings() { | 635 int VP8EncoderImpl::InitAndSetControlSettings() { | 
| 640   vpx_codec_flags_t flags = 0; | 636   vpx_codec_flags_t flags = 0; | 
| 641   flags |= VPX_CODEC_USE_OUTPUT_PARTITION; | 637   flags |= VPX_CODEC_USE_OUTPUT_PARTITION; | 
| 642 | 638 | 
| 643   if (encoders_.size() > 1) { | 639   if (encoders_.size() > 1) { | 
| 644     int error = vpx_codec_enc_init_multi(&encoders_[0], | 640     int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(), | 
| 645                                  vpx_codec_vp8_cx(), | 641                                          &configurations_[0], encoders_.size(), | 
| 646                                  &configurations_[0], | 642                                          flags, &downsampling_factors_[0]); | 
| 647                                  encoders_.size(), |  | 
| 648                                  flags, |  | 
| 649                                  &downsampling_factors_[0]); |  | 
| 650     if (error) { | 643     if (error) { | 
| 651       return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 644       return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 652     } | 645     } | 
| 653   } else { | 646   } else { | 
| 654     if (vpx_codec_enc_init(&encoders_[0], | 647     if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(), | 
| 655                            vpx_codec_vp8_cx(), | 648                            &configurations_[0], flags)) { | 
| 656                            &configurations_[0], |  | 
| 657                            flags)) { |  | 
| 658       return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 649       return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 659     } | 650     } | 
| 660   } | 651   } | 
| 661   // Enable denoising for the highest resolution stream, and for | 652   // Enable denoising for the highest resolution stream, and for | 
| 662   // the second highest resolution if we are doing more than 2 | 653   // the second highest resolution if we are doing more than 2 | 
| 663   // spatial layers/streams. | 654   // spatial layers/streams. | 
| 664   // TODO(holmer): Investigate possibility of adding a libvpx API | 655   // TODO(holmer): Investigate possibility of adding a libvpx API | 
| 665   // for getting the denoised frame from the encoder and using that | 656   // for getting the denoised frame from the encoder and using that | 
| 666   // when encoding lower resolution streams. Would it work with the | 657   // when encoding lower resolution streams. Would it work with the | 
| 667   // multi-res encoding feature? | 658   // multi-res encoding feature? | 
| 668   denoiserState denoiser_state = kDenoiserOnYOnly; | 659   denoiserState denoiser_state = kDenoiserOnYOnly; | 
| 669 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) | 660 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) | 
| 670   denoiser_state = kDenoiserOnYOnly; | 661   denoiser_state = kDenoiserOnYOnly; | 
| 671 #else | 662 #else | 
| 672   denoiser_state = kDenoiserOnAdaptive; | 663   denoiser_state = kDenoiserOnAdaptive; | 
| 673 #endif | 664 #endif | 
| 674   vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY, | 665   vpx_codec_control( | 
| 675                     codec_.codecSpecific.VP8.denoisingOn ? | 666       &encoders_[0], VP8E_SET_NOISE_SENSITIVITY, | 
| 676                     denoiser_state : kDenoiserOff); | 667       codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff); | 
| 677   if (encoders_.size() > 2) { | 668   if (encoders_.size() > 2) { | 
| 678     vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY, | 669     vpx_codec_control( | 
| 679                       codec_.codecSpecific.VP8.denoisingOn ? | 670         &encoders_[1], VP8E_SET_NOISE_SENSITIVITY, | 
| 680                       denoiser_state : kDenoiserOff); | 671         codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff); | 
| 681   } | 672   } | 
| 682   for (size_t i = 0; i < encoders_.size(); ++i) { | 673   for (size_t i = 0; i < encoders_.size(); ++i) { | 
| 683     // Allow more screen content to be detected as static. | 674     // Allow more screen content to be detected as static. | 
| 684     vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, | 675     vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, | 
| 685                       codec_.mode == kScreensharing ? 300 : 1); | 676                       codec_.mode == kScreensharing ? 300 : 1); | 
| 686     vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); | 677     vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]); | 
| 687     vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, | 678     vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS, | 
| 688                       static_cast<vp8e_token_partitions>(token_partitions_)); | 679                       static_cast<vp8e_token_partitions>(token_partitions_)); | 
| 689     vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 680     vpx_codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 
| 690                       rc_max_intra_target_); | 681                       rc_max_intra_target_); | 
| (...skipping 12 matching lines...) Expand all  Loading... | 
| 703   // Max target size = scalePar * optimalBufferSize * targetBR[Kbps]. | 694   // Max target size = scalePar * optimalBufferSize * targetBR[Kbps]. | 
| 704   // This values is presented in percentage of perFrameBw: | 695   // This values is presented in percentage of perFrameBw: | 
| 705   // perFrameBw = targetBR[Kbps] * 1000 / frameRate. | 696   // perFrameBw = targetBR[Kbps] * 1000 / frameRate. | 
| 706   // The target in % is as follows: | 697   // The target in % is as follows: | 
| 707 | 698 | 
| 708   float scalePar = 0.5; | 699   float scalePar = 0.5; | 
| 709   uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10; | 700   uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10; | 
| 710 | 701 | 
| 711   // Don't go below 3 times the per frame bandwidth. | 702   // Don't go below 3 times the per frame bandwidth. | 
| 712   const uint32_t minIntraTh = 300; | 703   const uint32_t minIntraTh = 300; | 
| 713   return (targetPct < minIntraTh) ? minIntraTh: targetPct; | 704   return (targetPct < minIntraTh) ? minIntraTh : targetPct; | 
| 714 } | 705 } | 
| 715 | 706 | 
| 716 int VP8EncoderImpl::Encode(const VideoFrame& frame, | 707 int VP8EncoderImpl::Encode(const VideoFrame& frame, | 
| 717                            const CodecSpecificInfo* codec_specific_info, | 708                            const CodecSpecificInfo* codec_specific_info, | 
| 718                            const std::vector<FrameType>* frame_types) { | 709                            const std::vector<FrameType>* frame_types) { | 
| 719   if (!inited_) | 710   if (!inited_) | 
| 720     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 711     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 721   if (frame.IsZeroSize()) | 712   if (frame.IsZeroSize()) | 
| 722     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 713     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | 
| 723   if (encoded_complete_callback_ == NULL) | 714   if (encoded_complete_callback_ == NULL) | 
| 724     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 715     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 725 | 716 | 
| 726   if (quality_scaler_enabled_) | 717   if (quality_scaler_enabled_) | 
| 727     quality_scaler_.OnEncodeFrame(frame); | 718     quality_scaler_.OnEncodeFrame(frame); | 
| 728   const VideoFrame& input_image = | 719   const VideoFrame& input_image = | 
| 729       quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame; | 720       quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame; | 
| 730 | 721 | 
| 731   if (quality_scaler_enabled_ && (input_image.width() != codec_.width || | 722   if (quality_scaler_enabled_ && (input_image.width() != codec_.width || | 
| 732       input_image.height() != codec_.height)) { | 723                                   input_image.height() != codec_.height)) { | 
| 733     int ret = UpdateCodecFrameSize(input_image); | 724     int ret = UpdateCodecFrameSize(input_image); | 
| 734     if (ret < 0) | 725     if (ret < 0) | 
| 735       return ret; | 726       return ret; | 
| 736   } | 727   } | 
| 737 | 728 | 
| 738   // Since we are extracting raw pointers from |input_image| to | 729   // Since we are extracting raw pointers from |input_image| to | 
| 739   // |raw_images_[0]|, the resolution of these frames must match. Note that | 730   // |raw_images_[0]|, the resolution of these frames must match. Note that | 
| 740   // |input_image| might be scaled from |frame|. In that case, the resolution of | 731   // |input_image| might be scaled from |frame|. In that case, the resolution of | 
| 741   // |raw_images_[0]| should have been updated in UpdateCodecFrameSize. | 732   // |raw_images_[0]| should have been updated in UpdateCodecFrameSize. | 
| 742   RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); | 733   RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); | 
| 743   RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); | 734   RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); | 
| 744 | 735 | 
| 745   // Image in vpx_image_t format. | 736   // Image in vpx_image_t format. | 
| 746   // Input image is const. VP8's raw image is not defined as const. | 737   // Input image is const. VP8's raw image is not defined as const. | 
| 747   raw_images_[0].planes[VPX_PLANE_Y] = | 738   raw_images_[0].planes[VPX_PLANE_Y] = | 
| 748      const_cast<uint8_t*>(input_image.buffer(kYPlane)); | 739       const_cast<uint8_t*>(input_image.buffer(kYPlane)); | 
| 749   raw_images_[0].planes[VPX_PLANE_U] = | 740   raw_images_[0].planes[VPX_PLANE_U] = | 
| 750      const_cast<uint8_t*>(input_image.buffer(kUPlane)); | 741       const_cast<uint8_t*>(input_image.buffer(kUPlane)); | 
| 751   raw_images_[0].planes[VPX_PLANE_V] = | 742   raw_images_[0].planes[VPX_PLANE_V] = | 
| 752      const_cast<uint8_t*>(input_image.buffer(kVPlane)); | 743       const_cast<uint8_t*>(input_image.buffer(kVPlane)); | 
| 753 | 744 | 
| 754   raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane); | 745   raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane); | 
| 755   raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane); | 746   raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane); | 
| 756   raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane); | 747   raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane); | 
| 757 | 748 | 
| 758   for (size_t i = 1; i < encoders_.size(); ++i) { | 749   for (size_t i = 1; i < encoders_.size(); ++i) { | 
| 759     // Scale the image down a number of times by downsampling factor | 750     // Scale the image down a number of times by downsampling factor | 
| 760     libyuv::I420Scale( | 751     libyuv::I420Scale( | 
| 761         raw_images_[i-1].planes[VPX_PLANE_Y], | 752         raw_images_[i - 1].planes[VPX_PLANE_Y], | 
| 762         raw_images_[i-1].stride[VPX_PLANE_Y], | 753         raw_images_[i - 1].stride[VPX_PLANE_Y], | 
| 763         raw_images_[i-1].planes[VPX_PLANE_U], | 754         raw_images_[i - 1].planes[VPX_PLANE_U], | 
| 764         raw_images_[i-1].stride[VPX_PLANE_U], | 755         raw_images_[i - 1].stride[VPX_PLANE_U], | 
| 765         raw_images_[i-1].planes[VPX_PLANE_V], | 756         raw_images_[i - 1].planes[VPX_PLANE_V], | 
| 766         raw_images_[i-1].stride[VPX_PLANE_V], | 757         raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w, | 
| 767         raw_images_[i-1].d_w, raw_images_[i-1].d_h, | 758         raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y], | 
| 768         raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y], | 759         raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U], | 
| 769         raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U], | 760         raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V], | 
| 770         raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V], | 761         raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w, | 
| 771         raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear); | 762         raw_images_[i].d_h, libyuv::kFilterBilinear); | 
| 772   } | 763   } | 
| 773   vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; | 764   vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; | 
| 774   for (size_t i = 0; i < encoders_.size(); ++i) { | 765   for (size_t i = 0; i < encoders_.size(); ++i) { | 
| 775     int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp()); | 766     int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp()); | 
| 776     if (ret < 0) { | 767     if (ret < 0) { | 
| 777       // Drop this frame. | 768       // Drop this frame. | 
| 778       return WEBRTC_VIDEO_CODEC_OK; | 769       return WEBRTC_VIDEO_CODEC_OK; | 
| 779     } | 770     } | 
| 780     flags[i] = ret; | 771     flags[i] = ret; | 
| 781   } | 772   } | 
| (...skipping 14 matching lines...) Expand all  Loading... | 
| 796       } | 787       } | 
| 797     } | 788     } | 
| 798   } | 789   } | 
| 799   // The flag modification below (due to forced key frame, RPS, etc.,) for now | 790   // The flag modification below (due to forced key frame, RPS, etc.,) for now | 
| 800   // will be the same for all encoders/spatial layers. | 791   // will be the same for all encoders/spatial layers. | 
| 801   // TODO(marpan/holmer): Allow for key frame request to be set per encoder. | 792   // TODO(marpan/holmer): Allow for key frame request to be set per encoder. | 
| 802   bool only_predict_from_key_frame = false; | 793   bool only_predict_from_key_frame = false; | 
| 803   if (send_key_frame) { | 794   if (send_key_frame) { | 
| 804     // Adapt the size of the key frame when in screenshare with 1 temporal | 795     // Adapt the size of the key frame when in screenshare with 1 temporal | 
| 805     // layer. | 796     // layer. | 
| 806     if (encoders_.size() == 1 && codec_.mode == kScreensharing | 797     if (encoders_.size() == 1 && codec_.mode == kScreensharing && | 
| 807         && codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) { | 798         codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) { | 
| 808       const uint32_t forceKeyFrameIntraTh = 100; | 799       const uint32_t forceKeyFrameIntraTh = 100; | 
| 809       vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 800       vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 
| 810                         forceKeyFrameIntraTh); | 801                         forceKeyFrameIntraTh); | 
| 811     } | 802     } | 
| 812     // Key frame request from caller. | 803     // Key frame request from caller. | 
| 813     // Will update both golden and alt-ref. | 804     // Will update both golden and alt-ref. | 
| 814     for (size_t i = 0; i < encoders_.size(); ++i) { | 805     for (size_t i = 0; i < encoders_.size(); ++i) { | 
| 815       flags[i] = VPX_EFLAG_FORCE_KF; | 806       flags[i] = VPX_EFLAG_FORCE_KF; | 
| 816     } | 807     } | 
| 817     std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); | 808     std::fill(key_frame_request_.begin(), key_frame_request_.end(), false); | 
| 818   } else if (codec_specific_info && | 809   } else if (codec_specific_info && | 
| 819       codec_specific_info->codecType == kVideoCodecVP8) { | 810              codec_specific_info->codecType == kVideoCodecVP8) { | 
| 820     if (feedback_mode_) { | 811     if (feedback_mode_) { | 
| 821       // Handle RPSI and SLI messages and set up the appropriate encode flags. | 812       // Handle RPSI and SLI messages and set up the appropriate encode flags. | 
| 822       bool sendRefresh = false; | 813       bool sendRefresh = false; | 
| 823       if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { | 814       if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { | 
| 824         rps_.ReceivedRPSI( | 815         rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); | 
| 825             codec_specific_info->codecSpecific.VP8.pictureIdRPSI); |  | 
| 826       } | 816       } | 
| 827       if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { | 817       if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { | 
| 828         sendRefresh = rps_.ReceivedSLI(input_image.timestamp()); | 818         sendRefresh = rps_.ReceivedSLI(input_image.timestamp()); | 
| 829       } | 819       } | 
| 830       for (size_t i = 0; i < encoders_.size(); ++i) { | 820       for (size_t i = 0; i < encoders_.size(); ++i) { | 
| 831         flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, | 821         flags[i] = rps_.EncodeFlags(picture_id_[i], sendRefresh, | 
| 832                                     input_image.timestamp()); | 822                                     input_image.timestamp()); | 
| 833       } | 823       } | 
| 834     } else { | 824     } else { | 
| 835       if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { | 825       if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { | 
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 867     // change isn't stored in configurations_ so change will be discarded at | 857     // change isn't stored in configurations_ so change will be discarded at | 
| 868     // the next update. | 858     // the next update. | 
| 869     vpx_codec_enc_cfg_t temp_config; | 859     vpx_codec_enc_cfg_t temp_config; | 
| 870     memcpy(&temp_config, &configurations_[i], sizeof(vpx_codec_enc_cfg_t)); | 860     memcpy(&temp_config, &configurations_[i], sizeof(vpx_codec_enc_cfg_t)); | 
| 871     if (temporal_layers_[stream_idx]->UpdateConfiguration(&temp_config)) { | 861     if (temporal_layers_[stream_idx]->UpdateConfiguration(&temp_config)) { | 
| 872       if (vpx_codec_enc_config_set(&encoders_[i], &temp_config)) | 862       if (vpx_codec_enc_config_set(&encoders_[i], &temp_config)) | 
| 873         return WEBRTC_VIDEO_CODEC_ERROR; | 863         return WEBRTC_VIDEO_CODEC_ERROR; | 
| 874     } | 864     } | 
| 875 | 865 | 
| 876     vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]); | 866     vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]); | 
| 877     vpx_codec_control(&encoders_[i], | 867     vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID, | 
| 878                       VP8E_SET_TEMPORAL_LAYER_ID, |  | 
| 879                       temporal_layers_[stream_idx]->CurrentLayerId()); | 868                       temporal_layers_[stream_idx]->CurrentLayerId()); | 
| 880   } | 869   } | 
| 881   // TODO(holmer): Ideally the duration should be the timestamp diff of this | 870   // TODO(holmer): Ideally the duration should be the timestamp diff of this | 
| 882   // frame and the next frame to be encoded, which we don't have. Instead we | 871   // frame and the next frame to be encoded, which we don't have. Instead we | 
| 883   // would like to use the duration of the previous frame. Unfortunately the | 872   // would like to use the duration of the previous frame. Unfortunately the | 
| 884   // rate control seems to be off with that setup. Using the average input | 873   // rate control seems to be off with that setup. Using the average input | 
| 885   // frame rate to calculate an average duration for now. | 874   // frame rate to calculate an average duration for now. | 
| 886   assert(codec_.maxFramerate > 0); | 875   assert(codec_.maxFramerate > 0); | 
| 887   uint32_t duration = 90000 / codec_.maxFramerate; | 876   uint32_t duration = 90000 / codec_.maxFramerate; | 
| 888 | 877 | 
| 889   // Note we must pass 0 for |flags| field in encode call below since they are | 878   // Note we must pass 0 for |flags| field in encode call below since they are | 
| 890   // set above in |vpx_codec_control| function for each encoder/spatial layer. | 879   // set above in |vpx_codec_control| function for each encoder/spatial layer. | 
| 891   int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, | 880   int error = vpx_codec_encode(&encoders_[0], &raw_images_[0], timestamp_, | 
| 892                                duration, 0, VPX_DL_REALTIME); | 881                                duration, 0, VPX_DL_REALTIME); | 
| 893   // Reset specific intra frame thresholds, following the key frame. | 882   // Reset specific intra frame thresholds, following the key frame. | 
| 894   if (send_key_frame) { | 883   if (send_key_frame) { | 
| 895     vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 884     vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, | 
| 896         rc_max_intra_target_); | 885                       rc_max_intra_target_); | 
| 897   } | 886   } | 
| 898   if (error) | 887   if (error) | 
| 899     return WEBRTC_VIDEO_CODEC_ERROR; | 888     return WEBRTC_VIDEO_CODEC_ERROR; | 
| 900   timestamp_ += duration; | 889   timestamp_ += duration; | 
| 901   return GetEncodedPartitions(input_image, only_predict_from_key_frame); | 890   return GetEncodedPartitions(input_image, only_predict_from_key_frame); | 
| 902 } | 891 } | 
| 903 | 892 | 
| 904 // TODO(pbos): Make sure this works for properly for >1 encoders. | 893 // TODO(pbos): Make sure this works for properly for >1 encoders. | 
| 905 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { | 894 int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) { | 
| 906   codec_.width = input_image.width(); | 895   codec_.width = input_image.width(); | 
| 907   codec_.height = input_image.height(); | 896   codec_.height = input_image.height(); | 
| 908   if (codec_.numberOfSimulcastStreams <= 1) { | 897   if (codec_.numberOfSimulcastStreams <= 1) { | 
| 909     // For now scaling is only used for single-layer streams. | 898     // For now scaling is only used for single-layer streams. | 
| 910     codec_.simulcastStream[0].width = input_image.width(); | 899     codec_.simulcastStream[0].width = input_image.width(); | 
| 911     codec_.simulcastStream[0].height = input_image.height(); | 900     codec_.simulcastStream[0].height = input_image.height(); | 
| 912   } | 901   } | 
| 913   // Update the cpu_speed setting for resolution change. | 902   // Update the cpu_speed setting for resolution change. | 
| 914   vpx_codec_control(&(encoders_[0]), | 903   vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED, | 
| 915                     VP8E_SET_CPUUSED, |  | 
| 916                     SetCpuSpeed(codec_.width, codec_.height)); | 904                     SetCpuSpeed(codec_.width, codec_.height)); | 
| 917   raw_images_[0].w = codec_.width; | 905   raw_images_[0].w = codec_.width; | 
| 918   raw_images_[0].h = codec_.height; | 906   raw_images_[0].h = codec_.height; | 
| 919   raw_images_[0].d_w = codec_.width; | 907   raw_images_[0].d_w = codec_.width; | 
| 920   raw_images_[0].d_h = codec_.height; | 908   raw_images_[0].d_h = codec_.height; | 
| 921   vpx_img_set_rect(&raw_images_[0], 0, 0, codec_.width, codec_.height); | 909   vpx_img_set_rect(&raw_images_[0], 0, 0, codec_.width, codec_.height); | 
| 922 | 910 | 
| 923   // Update encoder context for new frame size. | 911   // Update encoder context for new frame size. | 
| 924   // Change of frame size will automatically trigger a key frame. | 912   // Change of frame size will automatically trigger a key frame. | 
| 925   configurations_[0].g_w = codec_.width; | 913   configurations_[0].g_w = codec_.width; | 
| (...skipping 12 matching lines...) Expand all  Loading... | 
| 938     bool only_predicting_from_key_frame) { | 926     bool only_predicting_from_key_frame) { | 
| 939   assert(codec_specific != NULL); | 927   assert(codec_specific != NULL); | 
| 940   codec_specific->codecType = kVideoCodecVP8; | 928   codec_specific->codecType = kVideoCodecVP8; | 
| 941   CodecSpecificInfoVP8* vp8Info = &(codec_specific->codecSpecific.VP8); | 929   CodecSpecificInfoVP8* vp8Info = &(codec_specific->codecSpecific.VP8); | 
| 942   vp8Info->pictureId = picture_id_[stream_idx]; | 930   vp8Info->pictureId = picture_id_[stream_idx]; | 
| 943   if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) { | 931   if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) { | 
| 944     last_key_frame_picture_id_[stream_idx] = picture_id_[stream_idx]; | 932     last_key_frame_picture_id_[stream_idx] = picture_id_[stream_idx]; | 
| 945   } | 933   } | 
| 946   vp8Info->simulcastIdx = stream_idx; | 934   vp8Info->simulcastIdx = stream_idx; | 
| 947   vp8Info->keyIdx = kNoKeyIdx;  // TODO(hlundin) populate this | 935   vp8Info->keyIdx = kNoKeyIdx;  // TODO(hlundin) populate this | 
| 948   vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? | 936   vp8Info->nonReference = | 
| 949       true : false; | 937       (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false; | 
| 950   bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) || | 938   bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) || | 
| 951                                 only_predicting_from_key_frame; | 939                                only_predicting_from_key_frame; | 
| 952   temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, | 940   temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, | 
| 953                                                       vp8Info, | 941                                                       vp8Info, timestamp); | 
| 954                                                       timestamp); |  | 
| 955   // Prepare next. | 942   // Prepare next. | 
| 956   picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; | 943   picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; | 
| 957 } | 944 } | 
| 958 | 945 | 
| 959 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, | 946 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, | 
| 960                                          bool only_predicting_from_key_frame) { | 947                                          bool only_predicting_from_key_frame) { | 
| 961   int bw_resolutions_disabled = | 948   int bw_resolutions_disabled = | 
| 962       (encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1; | 949       (encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1; | 
| 963 | 950 | 
| 964   int stream_idx = static_cast<int>(encoders_.size()) - 1; | 951   int stream_idx = static_cast<int>(encoders_.size()) - 1; | 
| 965   int result = WEBRTC_VIDEO_CODEC_OK; | 952   int result = WEBRTC_VIDEO_CODEC_OK; | 
| 966   for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); | 953   for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); | 
| 967       ++encoder_idx, --stream_idx) { | 954        ++encoder_idx, --stream_idx) { | 
| 968     vpx_codec_iter_t iter = NULL; | 955     vpx_codec_iter_t iter = NULL; | 
| 969     int part_idx = 0; | 956     int part_idx = 0; | 
| 970     encoded_images_[encoder_idx]._length = 0; | 957     encoded_images_[encoder_idx]._length = 0; | 
| 971     encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; | 958     encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; | 
| 972     RTPFragmentationHeader frag_info; | 959     RTPFragmentationHeader frag_info; | 
| 973     // token_partitions_ is number of bits used. | 960     // token_partitions_ is number of bits used. | 
| 974     frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) | 961     frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) + | 
| 975                                                    + 1); | 962                                                    1); | 
| 976     CodecSpecificInfo codec_specific; | 963     CodecSpecificInfo codec_specific; | 
| 977     const vpx_codec_cx_pkt_t *pkt = NULL; | 964     const vpx_codec_cx_pkt_t* pkt = NULL; | 
| 978     while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], | 965     while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) != | 
| 979                                         &iter)) != NULL) { | 966            NULL) { | 
| 980       switch (pkt->kind) { | 967       switch (pkt->kind) { | 
| 981         case VPX_CODEC_CX_FRAME_PKT: { | 968         case VPX_CODEC_CX_FRAME_PKT: { | 
| 982           uint32_t length = encoded_images_[encoder_idx]._length; | 969           uint32_t length = encoded_images_[encoder_idx]._length; | 
| 983           memcpy(&encoded_images_[encoder_idx]._buffer[length], | 970           memcpy(&encoded_images_[encoder_idx]._buffer[length], | 
| 984                  pkt->data.frame.buf, | 971                  pkt->data.frame.buf, pkt->data.frame.sz); | 
| 985                  pkt->data.frame.sz); |  | 
| 986           frag_info.fragmentationOffset[part_idx] = length; | 972           frag_info.fragmentationOffset[part_idx] = length; | 
| 987           frag_info.fragmentationLength[part_idx] =  pkt->data.frame.sz; | 973           frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz; | 
| 988           frag_info.fragmentationPlType[part_idx] = 0;  // not known here | 974           frag_info.fragmentationPlType[part_idx] = 0;  // not known here | 
| 989           frag_info.fragmentationTimeDiff[part_idx] = 0; | 975           frag_info.fragmentationTimeDiff[part_idx] = 0; | 
| 990           encoded_images_[encoder_idx]._length += pkt->data.frame.sz; | 976           encoded_images_[encoder_idx]._length += pkt->data.frame.sz; | 
| 991           assert(length <= encoded_images_[encoder_idx]._size); | 977           assert(length <= encoded_images_[encoder_idx]._size); | 
| 992           ++part_idx; | 978           ++part_idx; | 
| 993           break; | 979           break; | 
| 994         } | 980         } | 
| 995         default: | 981         default: | 
| 996           break; | 982           break; | 
| 997       } | 983       } | 
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1054   rps_.SetRtt(rtt); | 1040   rps_.SetRtt(rtt); | 
| 1055   return WEBRTC_VIDEO_CODEC_OK; | 1041   return WEBRTC_VIDEO_CODEC_OK; | 
| 1056 } | 1042 } | 
| 1057 | 1043 | 
| 1058 int VP8EncoderImpl::RegisterEncodeCompleteCallback( | 1044 int VP8EncoderImpl::RegisterEncodeCompleteCallback( | 
| 1059     EncodedImageCallback* callback) { | 1045     EncodedImageCallback* callback) { | 
| 1060   encoded_complete_callback_ = callback; | 1046   encoded_complete_callback_ = callback; | 
| 1061   return WEBRTC_VIDEO_CODEC_OK; | 1047   return WEBRTC_VIDEO_CODEC_OK; | 
| 1062 } | 1048 } | 
| 1063 | 1049 | 
| 1064 |  | 
| 1065 VP8DecoderImpl::VP8DecoderImpl() | 1050 VP8DecoderImpl::VP8DecoderImpl() | 
| 1066     : decode_complete_callback_(NULL), | 1051     : decode_complete_callback_(NULL), | 
| 1067       inited_(false), | 1052       inited_(false), | 
| 1068       feedback_mode_(false), | 1053       feedback_mode_(false), | 
| 1069       decoder_(NULL), | 1054       decoder_(NULL), | 
| 1070       last_keyframe_(), | 1055       last_keyframe_(), | 
| 1071       image_format_(VPX_IMG_FMT_NONE), | 1056       image_format_(VPX_IMG_FMT_NONE), | 
| 1072       ref_frame_(NULL), | 1057       ref_frame_(NULL), | 
| 1073       propagation_cnt_(-1), | 1058       propagation_cnt_(-1), | 
| 1074       last_frame_width_(0), | 1059       last_frame_width_(0), | 
| 1075       last_frame_height_(0), | 1060       last_frame_height_(0), | 
| 1076       key_frame_required_(true) { | 1061       key_frame_required_(true) {} | 
| 1077 } |  | 
| 1078 | 1062 | 
| 1079 VP8DecoderImpl::~VP8DecoderImpl() { | 1063 VP8DecoderImpl::~VP8DecoderImpl() { | 
| 1080   inited_ = true;  // in order to do the actual release | 1064   inited_ = true;  // in order to do the actual release | 
| 1081   Release(); | 1065   Release(); | 
| 1082 } | 1066 } | 
| 1083 | 1067 | 
| 1084 int VP8DecoderImpl::Reset() { | 1068 int VP8DecoderImpl::Reset() { | 
| 1085   if (!inited_) { | 1069   if (!inited_) { | 
| 1086     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 1070     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 1087   } | 1071   } | 
| 1088   InitDecode(&codec_, 1); | 1072   InitDecode(&codec_, 1); | 
| 1089   propagation_cnt_ = -1; | 1073   propagation_cnt_ = -1; | 
| 1090   return WEBRTC_VIDEO_CODEC_OK; | 1074   return WEBRTC_VIDEO_CODEC_OK; | 
| 1091 } | 1075 } | 
| 1092 | 1076 | 
| 1093 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, | 1077 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { | 
| 1094                                        int number_of_cores) { |  | 
| 1095   int ret_val = Release(); | 1078   int ret_val = Release(); | 
| 1096   if (ret_val < 0) { | 1079   if (ret_val < 0) { | 
| 1097     return ret_val; | 1080     return ret_val; | 
| 1098   } | 1081   } | 
| 1099   if (decoder_ == NULL) { | 1082   if (decoder_ == NULL) { | 
| 1100     decoder_ = new vpx_codec_ctx_t; | 1083     decoder_ = new vpx_codec_ctx_t; | 
| 1101   } | 1084   } | 
| 1102   if (inst && inst->codecType == kVideoCodecVP8) { | 1085   if (inst && inst->codecType == kVideoCodecVP8) { | 
| 1103     feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; | 1086     feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; | 
| 1104   } | 1087   } | 
| 1105   vpx_codec_dec_cfg_t  cfg; | 1088   vpx_codec_dec_cfg_t cfg; | 
| 1106   // Setting number of threads to a constant value (1) | 1089   // Setting number of threads to a constant value (1) | 
| 1107   cfg.threads = 1; | 1090   cfg.threads = 1; | 
| 1108   cfg.h = cfg.w = 0;  // set after decode | 1091   cfg.h = cfg.w = 0;  // set after decode | 
| 1109 | 1092 | 
| 1110 vpx_codec_flags_t flags = 0; | 1093   vpx_codec_flags_t flags = 0; | 
| 1111 #if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) | 1094 #if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) | 
| 1112   flags = VPX_CODEC_USE_POSTPROC; | 1095   flags = VPX_CODEC_USE_POSTPROC; | 
| 1113 #ifdef INDEPENDENT_PARTITIONS | 1096 #ifdef INDEPENDENT_PARTITIONS | 
| 1114   flags |= VPX_CODEC_USE_INPUT_PARTITION; | 1097   flags |= VPX_CODEC_USE_INPUT_PARTITION; | 
| 1115 #endif | 1098 #endif | 
| 1116 #endif | 1099 #endif | 
| 1117 | 1100 | 
| 1118   if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) { | 1101   if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) { | 
| 1119     return WEBRTC_VIDEO_CODEC_MEMORY; | 1102     return WEBRTC_VIDEO_CODEC_MEMORY; | 
| 1120   } | 1103   } | 
| 1121 | 1104 | 
| 1122   // Save VideoCodec instance for later; mainly for duplicating the decoder. | 1105   // Save VideoCodec instance for later; mainly for duplicating the decoder. | 
| 1123   if (&codec_ != inst) | 1106   if (&codec_ != inst) | 
| 1124     codec_ = *inst; | 1107     codec_ = *inst; | 
| 1125   propagation_cnt_ = -1; | 1108   propagation_cnt_ = -1; | 
| 1126 | 1109 | 
| 1127   inited_ = true; | 1110   inited_ = true; | 
| 1128 | 1111 | 
| 1129   // Always start with a complete key frame. | 1112   // Always start with a complete key frame. | 
| 1130   key_frame_required_ = true; | 1113   key_frame_required_ = true; | 
| 1131   return WEBRTC_VIDEO_CODEC_OK; | 1114   return WEBRTC_VIDEO_CODEC_OK; | 
| 1132 } | 1115 } | 
| 1133 | 1116 | 
| 1134 int VP8DecoderImpl::Decode(const EncodedImage& input_image, | 1117 int VP8DecoderImpl::Decode(const EncodedImage& input_image, | 
| 1135                                    bool missing_frames, | 1118                            bool missing_frames, | 
| 1136                                    const RTPFragmentationHeader* fragmentation, | 1119                            const RTPFragmentationHeader* fragmentation, | 
| 1137                                    const CodecSpecificInfo* codec_specific_info, | 1120                            const CodecSpecificInfo* codec_specific_info, | 
| 1138                                    int64_t /*render_time_ms*/) { | 1121                            int64_t /*render_time_ms*/) { | 
| 1139   if (!inited_) { | 1122   if (!inited_) { | 
| 1140     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 1123     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 1141   } | 1124   } | 
| 1142   if (decode_complete_callback_ == NULL) { | 1125   if (decode_complete_callback_ == NULL) { | 
| 1143     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 1126     return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | 
| 1144   } | 1127   } | 
| 1145   if (input_image._buffer == NULL && input_image._length > 0) { | 1128   if (input_image._buffer == NULL && input_image._length > 0) { | 
| 1146     // Reset to avoid requesting key frames too often. | 1129     // Reset to avoid requesting key frames too often. | 
| 1147     if (propagation_cnt_ > 0) | 1130     if (propagation_cnt_ > 0) | 
| 1148       propagation_cnt_ = 0; | 1131       propagation_cnt_ = 0; | 
| (...skipping 30 matching lines...) Expand all  Loading... | 
| 1179       return WEBRTC_VIDEO_CODEC_ERROR; | 1162       return WEBRTC_VIDEO_CODEC_ERROR; | 
| 1180     } | 1163     } | 
| 1181   } | 1164   } | 
| 1182   // Restrict error propagation using key frame requests. Disabled when | 1165   // Restrict error propagation using key frame requests. Disabled when | 
| 1183   // the feedback mode is enabled (RPS). | 1166   // the feedback mode is enabled (RPS). | 
| 1184   // Reset on a key frame refresh. | 1167   // Reset on a key frame refresh. | 
| 1185   if (!feedback_mode_) { | 1168   if (!feedback_mode_) { | 
| 1186     if (input_image._frameType == kVideoFrameKey && | 1169     if (input_image._frameType == kVideoFrameKey && | 
| 1187         input_image._completeFrame) { | 1170         input_image._completeFrame) { | 
| 1188       propagation_cnt_ = -1; | 1171       propagation_cnt_ = -1; | 
| 1189     // Start count on first loss. | 1172       // Start count on first loss. | 
| 1190     } else if ((!input_image._completeFrame || missing_frames) && | 1173     } else if ((!input_image._completeFrame || missing_frames) && | 
| 1191         propagation_cnt_ == -1) { | 1174                propagation_cnt_ == -1) { | 
| 1192       propagation_cnt_ = 0; | 1175       propagation_cnt_ = 0; | 
| 1193     } | 1176     } | 
| 1194     if (propagation_cnt_ >= 0) { | 1177     if (propagation_cnt_ >= 0) { | 
| 1195       propagation_cnt_++; | 1178       propagation_cnt_++; | 
| 1196     } | 1179     } | 
| 1197   } | 1180   } | 
| 1198 | 1181 | 
| 1199   vpx_codec_iter_t iter = NULL; | 1182   vpx_codec_iter_t iter = NULL; | 
| 1200   vpx_image_t* img; | 1183   vpx_image_t* img; | 
| 1201   int ret; | 1184   int ret; | 
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1233       propagation_cnt_ = 0; | 1216       propagation_cnt_ = 0; | 
| 1234     } | 1217     } | 
| 1235     return WEBRTC_VIDEO_CODEC_ERROR; | 1218     return WEBRTC_VIDEO_CODEC_ERROR; | 
| 1236   } | 1219   } | 
| 1237 #endif | 1220 #endif | 
| 1238 | 1221 | 
| 1239   // Store encoded frame if key frame. (Used in Copy method.) | 1222   // Store encoded frame if key frame. (Used in Copy method.) | 
| 1240   if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) { | 1223   if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) { | 
| 1241     const uint32_t bytes_to_copy = input_image._length; | 1224     const uint32_t bytes_to_copy = input_image._length; | 
| 1242     if (last_keyframe_._size < bytes_to_copy) { | 1225     if (last_keyframe_._size < bytes_to_copy) { | 
| 1243       delete [] last_keyframe_._buffer; | 1226       delete[] last_keyframe_._buffer; | 
| 1244       last_keyframe_._buffer = NULL; | 1227       last_keyframe_._buffer = NULL; | 
| 1245       last_keyframe_._size = 0; | 1228       last_keyframe_._size = 0; | 
| 1246     } | 1229     } | 
| 1247     uint8_t* temp_buffer = last_keyframe_._buffer;  // Save buffer ptr. | 1230     uint8_t* temp_buffer = last_keyframe_._buffer;  // Save buffer ptr. | 
| 1248     uint32_t temp_size = last_keyframe_._size;  // Save size. | 1231     uint32_t temp_size = last_keyframe_._size;      // Save size. | 
| 1249     last_keyframe_ = input_image;  // Shallow copy. | 1232     last_keyframe_ = input_image;                   // Shallow copy. | 
| 1250     last_keyframe_._buffer = temp_buffer;  // Restore buffer ptr. | 1233     last_keyframe_._buffer = temp_buffer;           // Restore buffer ptr. | 
| 1251     last_keyframe_._size = temp_size;  // Restore buffer size. | 1234     last_keyframe_._size = temp_size;               // Restore buffer size. | 
| 1252     if (!last_keyframe_._buffer) { | 1235     if (!last_keyframe_._buffer) { | 
| 1253       // Allocate memory. | 1236       // Allocate memory. | 
| 1254       last_keyframe_._size = bytes_to_copy; | 1237       last_keyframe_._size = bytes_to_copy; | 
| 1255       last_keyframe_._buffer = new uint8_t[last_keyframe_._size]; | 1238       last_keyframe_._buffer = new uint8_t[last_keyframe_._size]; | 
| 1256     } | 1239     } | 
| 1257     // Copy encoded frame. | 1240     // Copy encoded frame. | 
| 1258     memcpy(last_keyframe_._buffer, input_image._buffer, bytes_to_copy); | 1241     memcpy(last_keyframe_._buffer, input_image._buffer, bytes_to_copy); | 
| 1259     last_keyframe_._length = bytes_to_copy; | 1242     last_keyframe_._length = bytes_to_copy; | 
| 1260   } | 1243   } | 
| 1261 | 1244 | 
| (...skipping 29 matching lines...) Expand all  Loading... | 
| 1291       if (propagation_cnt_ > 0) | 1274       if (propagation_cnt_ > 0) | 
| 1292         propagation_cnt_ = 0; | 1275         propagation_cnt_ = 0; | 
| 1293       return WEBRTC_VIDEO_CODEC_ERROR; | 1276       return WEBRTC_VIDEO_CODEC_ERROR; | 
| 1294     } | 1277     } | 
| 1295     int16_t picture_id = -1; | 1278     int16_t picture_id = -1; | 
| 1296     if (codec_specific_info) { | 1279     if (codec_specific_info) { | 
| 1297       picture_id = codec_specific_info->codecSpecific.VP8.pictureId; | 1280       picture_id = codec_specific_info->codecSpecific.VP8.pictureId; | 
| 1298     } | 1281     } | 
| 1299     if (picture_id > -1) { | 1282     if (picture_id > -1) { | 
| 1300       if (((reference_updates & VP8_GOLD_FRAME) || | 1283       if (((reference_updates & VP8_GOLD_FRAME) || | 
| 1301           (reference_updates & VP8_ALTR_FRAME)) && !corrupted) { | 1284            (reference_updates & VP8_ALTR_FRAME)) && | 
|  | 1285           !corrupted) { | 
| 1302         decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id); | 1286         decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id); | 
| 1303       } | 1287       } | 
| 1304       decode_complete_callback_->ReceivedDecodedFrame(picture_id); | 1288       decode_complete_callback_->ReceivedDecodedFrame(picture_id); | 
| 1305     } | 1289     } | 
| 1306     if (corrupted) { | 1290     if (corrupted) { | 
| 1307       // we can decode but with artifacts | 1291       // we can decode but with artifacts | 
| 1308       return WEBRTC_VIDEO_CODEC_REQUEST_SLI; | 1292       return WEBRTC_VIDEO_CODEC_REQUEST_SLI; | 
| 1309     } | 1293     } | 
| 1310   } | 1294   } | 
| 1311   // Check Vs. threshold | 1295   // Check Vs. threshold | 
| 1312   if (propagation_cnt_ > kVp8ErrorPropagationTh) { | 1296   if (propagation_cnt_ > kVp8ErrorPropagationTh) { | 
| 1313     // Reset to avoid requesting key frames too often. | 1297     // Reset to avoid requesting key frames too often. | 
| 1314     propagation_cnt_ = 0; | 1298     propagation_cnt_ = 0; | 
| 1315     return WEBRTC_VIDEO_CODEC_ERROR; | 1299     return WEBRTC_VIDEO_CODEC_ERROR; | 
| 1316   } | 1300   } | 
| 1317   return WEBRTC_VIDEO_CODEC_OK; | 1301   return WEBRTC_VIDEO_CODEC_OK; | 
| 1318 } | 1302 } | 
| 1319 | 1303 | 
| 1320 int VP8DecoderImpl::DecodePartitions( | 1304 int VP8DecoderImpl::DecodePartitions( | 
| 1321     const EncodedImage& input_image, | 1305     const EncodedImage& input_image, | 
| 1322     const RTPFragmentationHeader* fragmentation) { | 1306     const RTPFragmentationHeader* fragmentation) { | 
| 1323   for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) { | 1307   for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) { | 
| 1324     const uint8_t* partition = input_image._buffer + | 1308     const uint8_t* partition = | 
| 1325         fragmentation->fragmentationOffset[i]; | 1309         input_image._buffer + fragmentation->fragmentationOffset[i]; | 
| 1326     const uint32_t partition_length = | 1310     const uint32_t partition_length = fragmentation->fragmentationLength[i]; | 
| 1327         fragmentation->fragmentationLength[i]; | 1311     if (vpx_codec_decode(decoder_, partition, partition_length, 0, | 
| 1328     if (vpx_codec_decode(decoder_, |  | 
| 1329                          partition, |  | 
| 1330                          partition_length, |  | 
| 1331                          0, |  | 
| 1332                          VPX_DL_REALTIME)) { | 1312                          VPX_DL_REALTIME)) { | 
| 1333       return WEBRTC_VIDEO_CODEC_ERROR; | 1313       return WEBRTC_VIDEO_CODEC_ERROR; | 
| 1334     } | 1314     } | 
| 1335   } | 1315   } | 
| 1336   // Signal end of frame data. If there was no frame data this will trigger | 1316   // Signal end of frame data. If there was no frame data this will trigger | 
| 1337   // a full frame concealment. | 1317   // a full frame concealment. | 
| 1338   if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) | 1318   if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) | 
| 1339     return WEBRTC_VIDEO_CODEC_ERROR; | 1319     return WEBRTC_VIDEO_CODEC_ERROR; | 
| 1340   return WEBRTC_VIDEO_CODEC_OK; | 1320   return WEBRTC_VIDEO_CODEC_OK; | 
| 1341 } | 1321 } | 
| 1342 | 1322 | 
| 1343 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img, | 1323 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img, | 
| 1344                                         uint32_t timestamp, | 1324                                 uint32_t timestamp, | 
| 1345                                         int64_t ntp_time_ms) { | 1325                                 int64_t ntp_time_ms) { | 
| 1346   if (img == NULL) { | 1326   if (img == NULL) { | 
| 1347     // Decoder OK and NULL image => No show frame | 1327     // Decoder OK and NULL image => No show frame | 
| 1348     return WEBRTC_VIDEO_CODEC_NO_OUTPUT; | 1328     return WEBRTC_VIDEO_CODEC_NO_OUTPUT; | 
| 1349   } | 1329   } | 
| 1350   last_frame_width_ = img->d_w; | 1330   last_frame_width_ = img->d_w; | 
| 1351   last_frame_height_ = img->d_h; | 1331   last_frame_height_ = img->d_h; | 
| 1352   // Allocate memory for decoded image. | 1332   // Allocate memory for decoded image. | 
| 1353   VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h), | 1333   VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h), | 
| 1354                            timestamp, 0, kVideoRotation_0); | 1334                            timestamp, 0, kVideoRotation_0); | 
| 1355   libyuv::I420Copy( | 1335   libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], | 
| 1356       img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], | 1336                    img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], | 
| 1357       img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], | 1337                    img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], | 
| 1358       img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], | 1338                    decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane), | 
| 1359       decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane), | 1339                    decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane), | 
| 1360       decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane), | 1340                    decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane), | 
| 1361       decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane), | 1341                    img->d_w, img->d_h); | 
| 1362       img->d_w, img->d_h); |  | 
| 1363   decoded_image.set_ntp_time_ms(ntp_time_ms); | 1342   decoded_image.set_ntp_time_ms(ntp_time_ms); | 
| 1364   int ret = decode_complete_callback_->Decoded(decoded_image); | 1343   int ret = decode_complete_callback_->Decoded(decoded_image); | 
| 1365   if (ret != 0) | 1344   if (ret != 0) | 
| 1366     return ret; | 1345     return ret; | 
| 1367 | 1346 | 
| 1368   // Remember image format for later | 1347   // Remember image format for later | 
| 1369   image_format_ = img->fmt; | 1348   image_format_ = img->fmt; | 
| 1370   return WEBRTC_VIDEO_CODEC_OK; | 1349   return WEBRTC_VIDEO_CODEC_OK; | 
| 1371 } | 1350 } | 
| 1372 | 1351 | 
| 1373 int VP8DecoderImpl::RegisterDecodeCompleteCallback( | 1352 int VP8DecoderImpl::RegisterDecodeCompleteCallback( | 
| 1374     DecodedImageCallback* callback) { | 1353     DecodedImageCallback* callback) { | 
| 1375   decode_complete_callback_ = callback; | 1354   decode_complete_callback_ = callback; | 
| 1376   return WEBRTC_VIDEO_CODEC_OK; | 1355   return WEBRTC_VIDEO_CODEC_OK; | 
| 1377 } | 1356 } | 
| 1378 | 1357 | 
| 1379 int VP8DecoderImpl::Release() { | 1358 int VP8DecoderImpl::Release() { | 
| 1380   if (last_keyframe_._buffer != NULL) { | 1359   if (last_keyframe_._buffer != NULL) { | 
| 1381     delete [] last_keyframe_._buffer; | 1360     delete[] last_keyframe_._buffer; | 
| 1382     last_keyframe_._buffer = NULL; | 1361     last_keyframe_._buffer = NULL; | 
| 1383   } | 1362   } | 
| 1384   if (decoder_ != NULL) { | 1363   if (decoder_ != NULL) { | 
| 1385     if (vpx_codec_destroy(decoder_)) { | 1364     if (vpx_codec_destroy(decoder_)) { | 
| 1386       return WEBRTC_VIDEO_CODEC_MEMORY; | 1365       return WEBRTC_VIDEO_CODEC_MEMORY; | 
| 1387     } | 1366     } | 
| 1388     delete decoder_; | 1367     delete decoder_; | 
| 1389     decoder_ = NULL; | 1368     decoder_ = NULL; | 
| 1390   } | 1369   } | 
| 1391   if (ref_frame_ != NULL) { | 1370   if (ref_frame_ != NULL) { | 
| 1392     vpx_img_free(&ref_frame_->img); | 1371     vpx_img_free(&ref_frame_->img); | 
| 1393     delete ref_frame_; | 1372     delete ref_frame_; | 
| 1394     ref_frame_ = NULL; | 1373     ref_frame_ = NULL; | 
| 1395   } | 1374   } | 
| 1396   buffer_pool_.Release(); | 1375   buffer_pool_.Release(); | 
| 1397   inited_ = false; | 1376   inited_ = false; | 
| 1398   return WEBRTC_VIDEO_CODEC_OK; | 1377   return WEBRTC_VIDEO_CODEC_OK; | 
| 1399 } | 1378 } | 
| 1400 | 1379 | 
| 1401 int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) { | 1380 int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) { | 
| 1402   // The type of frame to copy should be set in ref_frame_->frame_type | 1381   // The type of frame to copy should be set in ref_frame_->frame_type | 
| 1403   // before the call to this function. | 1382   // before the call to this function. | 
| 1404   if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) | 1383   if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) != | 
| 1405       != VPX_CODEC_OK) { | 1384       VPX_CODEC_OK) { | 
| 1406     return -1; | 1385     return -1; | 
| 1407   } | 1386   } | 
| 1408   if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) | 1387   if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != | 
| 1409       != VPX_CODEC_OK) { | 1388       VPX_CODEC_OK) { | 
| 1410     return -1; | 1389     return -1; | 
| 1411   } | 1390   } | 
| 1412   return 0; | 1391   return 0; | 
| 1413 } | 1392 } | 
| 1414 | 1393 | 
| 1415 }  // namespace webrtc | 1394 }  // namespace webrtc | 
| OLD | NEW | 
|---|