| Index: webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
 | 
| diff --git a/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
 | 
| index 18eccb25a5d4c1950935b54a060b03ce8006250b..6da71c9bae757a46807b3ef4a5f18775f4dbeb30 100644
 | 
| --- a/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
 | 
| +++ b/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
 | 
| @@ -11,8 +11,10 @@
 | 
|  
 | 
|  #include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h"
 | 
|  
 | 
| +#include <algorithm>
 | 
|  #include <limits>
 | 
|  
 | 
| +#include "webrtc/common_video/libyuv/include/scaler.h"
 | 
|  #include "third_party/openh264/src/codec/api/svc/codec_api.h"
 | 
|  #include "third_party/openh264/src/codec/api/svc/codec_app_def.h"
 | 
|  #include "third_party/openh264/src/codec/api/svc/codec_def.h"
 | 
| @@ -21,6 +23,7 @@
 | 
|  #include "webrtc/base/logging.h"
 | 
|  #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 | 
|  #include "webrtc/system_wrappers/include/metrics.h"
 | 
| +#include "webrtc/video_frame.h"
 | 
|  
 | 
|  namespace webrtc {
 | 
|  
 | 
| @@ -50,6 +53,78 @@ int NumberOfThreads(int width, int height, int number_of_cores) {
 | 
|    return 1;
 | 
|  }
 | 
|  
 | 
| +std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
 | 
| +                                       int bitrate_to_allocate_kbps) {
 | 
| +  if (codec.numberOfSimulcastStreams <= 1) {
 | 
| +    return std::vector<int>(1, bitrate_to_allocate_kbps);
 | 
| +  }
 | 
| +
 | 
| +  std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
 | 
| +  // Allocate min -> target bitrates as long as we have bitrate to spend.
 | 
| +  size_t last_active_stream = 0;
 | 
| +  for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
 | 
| +                     bitrate_to_allocate_kbps >=
 | 
| +                         static_cast<int>(codec.simulcastStream[i].minBitrate);
 | 
| +       ++i) {
 | 
| +    last_active_stream = i;
 | 
| +    int allocated_bitrate_kbps =
 | 
| +        std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate),
 | 
| +                 bitrate_to_allocate_kbps);
 | 
| +    bitrates_kbps[i] = allocated_bitrate_kbps;
 | 
| +    bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
 | 
| +  }
 | 
| +
 | 
| +  // Spend additional bits on the highest-quality active layer, up to max
 | 
| +  // bitrate.
 | 
| +  // TODO(pbos): Consider spending additional bits on last_active_stream-1 down
 | 
| +  // to 0 and not just the top layer when we have additional bitrate to spend.
 | 
| +  int allocated_bitrate_kbps = std::min(
 | 
| +      static_cast<int>(codec.simulcastStream[last_active_stream].maxBitrate -
 | 
| +                       bitrates_kbps[last_active_stream]),
 | 
| +      bitrate_to_allocate_kbps);
 | 
| +  bitrates_kbps[last_active_stream] += allocated_bitrate_kbps;
 | 
| +  bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
 | 
| +
 | 
| +  // Make sure we can always send something. Suspending below min bitrate is
 | 
| +  // controlled outside the codec implementation and is not overriden by this.
 | 
| +  if (bitrates_kbps[0] < static_cast<int>(codec.simulcastStream[0].minBitrate))
 | 
| +    bitrates_kbps[0] = static_cast<int>(codec.simulcastStream[0].minBitrate);
 | 
| +
 | 
| +  return bitrates_kbps;
 | 
| +}
 | 
| +
 | 
| +uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec) {
 | 
| +  uint32_t bitrate_sum = 0;
 | 
| +  for (int i = 0; i < streams; ++i) {
 | 
| +    bitrate_sum += codec.simulcastStream[i].maxBitrate;
 | 
| +  }
 | 
| +  return bitrate_sum;
 | 
| +}
 | 
| +
 | 
| +int NumberOfStreams(const VideoCodec& codec) {
 | 
| +  int streams =
 | 
| +      codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
 | 
| +  uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
 | 
| +  if (simulcast_max_bitrate == 0) {
 | 
| +    streams = 1;
 | 
| +  }
 | 
| +  return streams;
 | 
| +}
 | 
| +
 | 
| +bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
 | 
| +  if (codec.width != codec.simulcastStream[num_streams - 1].width ||
 | 
| +      codec.height != codec.simulcastStream[num_streams - 1].height) {
 | 
| +    return false;
 | 
| +  }
 | 
| +  for (int i = 0; i < num_streams; ++i) {
 | 
| +    if (codec.width * codec.simulcastStream[i].height !=
 | 
| +        codec.height * codec.simulcastStream[i].width) {
 | 
| +      return false;
 | 
| +    }
 | 
| +  }
 | 
| +  return true;
 | 
| +}
 | 
| +
 | 
|  }  // namespace
 | 
|  
 | 
|  static FrameType EVideoFrameType_to_FrameType(EVideoFrameType type) {
 | 
| @@ -148,16 +223,29 @@ static void RtpFragmentize(EncodedImage* encoded_image,
 | 
|  }
 | 
|  
 | 
|  H264EncoderImpl::H264EncoderImpl()
 | 
| -    : openh264_encoder_(nullptr),
 | 
| -      encoded_image_callback_(nullptr),
 | 
| +    : encoded_image_callback_(nullptr),
 | 
|        has_reported_init_(false),
 | 
| -      has_reported_error_(false) {
 | 
| +      has_reported_error_(false),
 | 
| +      key_frame_request_(kMaxSimulcastStreams, false) {
 | 
| +  encoded_images_.reserve(kMaxSimulcastStreams);
 | 
| +  encoded_image_buffers_.reserve(kMaxSimulcastStreams);
 | 
| +  send_streams_.reserve(kMaxSimulcastStreams);
 | 
| +  encoders_.reserve(kMaxSimulcastStreams);
 | 
| +  scaled_input_frames_.reserve(kMaxSimulcastStreams);
 | 
|  }
 | 
|  
 | 
|  H264EncoderImpl::~H264EncoderImpl() {
 | 
|    Release();
 | 
|  }
 | 
|  
 | 
| +void H264EncoderImpl::SetStreamState(bool send_stream, int stream_idx) {
 | 
| +  if (send_stream && !send_streams_[stream_idx]) {
 | 
| +    // Need a key frame if we have not sent this stream before.
 | 
| +    key_frame_request_[stream_idx] = true;
 | 
| +  }
 | 
| +  send_streams_[stream_idx] = send_stream;
 | 
| +}
 | 
| +
 | 
|  int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
 | 
|                                      int32_t number_of_cores,
 | 
|                                      size_t /*max_payload_size*/) {
 | 
| @@ -181,23 +269,27 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
 | 
|      ReportError();
 | 
|      return release_ret;
 | 
|    }
 | 
| -  RTC_DCHECK(!openh264_encoder_);
 | 
|  
 | 
| -  // Create encoder.
 | 
| -  if (WelsCreateSVCEncoder(&openh264_encoder_) != 0) {
 | 
| -    // Failed to create encoder.
 | 
| -    LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
 | 
| -    RTC_DCHECK(!openh264_encoder_);
 | 
| -    ReportError();
 | 
| -    return WEBRTC_VIDEO_CODEC_ERROR;
 | 
| +  int number_of_streams = NumberOfStreams(*codec_settings);
 | 
| +  bool doing_simulcast = (number_of_streams > 1);
 | 
| +
 | 
| +  if (doing_simulcast &&
 | 
| +      !ValidSimulcastResolutions(*codec_settings, number_of_streams)) {
 | 
| +    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
 | 
|    }
 | 
| -  RTC_DCHECK(openh264_encoder_);
 | 
| -  if (kOpenH264EncoderDetailedLogging) {
 | 
| -    int trace_level = WELS_LOG_DETAIL;
 | 
| -    openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL,
 | 
| -                                 &trace_level);
 | 
| +  // Code expects simulcastStream resolutions to be correct, make sure they are
 | 
| +  // filled even when there are no simulcast layers.
 | 
| +  if (codec_settings->numberOfSimulcastStreams == 0) {
 | 
| +    codec_settings_.simulcastStream[0].width = codec_settings->width;
 | 
| +    codec_settings_.simulcastStream[0].height = codec_settings->height;
 | 
|    }
 | 
| -  // else WELS_LOG_DEFAULT is used by default.
 | 
| +
 | 
| +  encoded_images_.resize(number_of_streams);
 | 
| +  encoded_image_buffers_.resize(number_of_streams);
 | 
| +  encoders_.resize(number_of_streams);
 | 
| +  scaled_input_frames_.resize(number_of_streams);
 | 
| +  key_frame_request_.resize(number_of_streams);
 | 
| +  std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
 | 
|  
 | 
|    codec_settings_ = *codec_settings;
 | 
|    if (codec_settings_.targetBitrate == 0)
 | 
| @@ -208,86 +300,127 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
 | 
|    // memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt
 | 
|    // which is a superset of SEncParamBase (cleared with GetDefaultParams) used
 | 
|    // in InitializeExt.
 | 
| -  SEncParamExt init_params;
 | 
| -  openh264_encoder_->GetDefaultParams(&init_params);
 | 
| -  if (codec_settings_.mode == kRealtimeVideo) {
 | 
| -    init_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
 | 
| -  } else if (codec_settings_.mode == kScreensharing) {
 | 
| -    init_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
 | 
| -  } else {
 | 
| -    ReportError();
 | 
| -    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
 | 
| -  }
 | 
| -  init_params.iPicWidth = codec_settings_.width;
 | 
| -  init_params.iPicHeight = codec_settings_.height;
 | 
| -  // |init_params| uses bit/s, |codec_settings_| uses kbit/s.
 | 
| -  init_params.iTargetBitrate = codec_settings_.targetBitrate * 1000;
 | 
| -  init_params.iMaxBitrate = codec_settings_.maxBitrate * 1000;
 | 
| -  // Rate Control mode
 | 
| -  init_params.iRCMode = RC_BITRATE_MODE;
 | 
| -  init_params.fMaxFrameRate = static_cast<float>(codec_settings_.maxFramerate);
 | 
| -
 | 
| -  // The following parameters are extension parameters (they're in SEncParamExt,
 | 
| -  // not in SEncParamBase).
 | 
| -  init_params.bEnableFrameSkip =
 | 
| -      codec_settings_.codecSpecific.H264.frameDroppingOn;
 | 
| -  // |uiIntraPeriod|    - multiple of GOP size
 | 
| -  // |keyFrameInterval| - number of frames
 | 
| -  init_params.uiIntraPeriod =
 | 
| -      codec_settings_.codecSpecific.H264.keyFrameInterval;
 | 
| -  init_params.uiMaxNalSize = 0;
 | 
| -  // Threading model: use auto.
 | 
| -  //  0: auto (dynamic imp. internal encoder)
 | 
| -  //  1: single thread (default value)
 | 
| -  // >1: number of threads
 | 
| -  init_params.iMultipleThreadIdc = NumberOfThreads(init_params.iPicWidth,
 | 
| -                                                   init_params.iPicHeight,
 | 
| -                                                   number_of_cores);
 | 
| -  // The base spatial layer 0 is the only one we use.
 | 
| -  init_params.sSpatialLayers[0].iVideoWidth        = init_params.iPicWidth;
 | 
| -  init_params.sSpatialLayers[0].iVideoHeight       = init_params.iPicHeight;
 | 
| -  init_params.sSpatialLayers[0].fFrameRate         = init_params.fMaxFrameRate;
 | 
| -  init_params.sSpatialLayers[0].iSpatialBitrate    = init_params.iTargetBitrate;
 | 
| -  init_params.sSpatialLayers[0].iMaxSpatialBitrate = init_params.iMaxBitrate;
 | 
| -  // Slice num according to number of threads.
 | 
| -  init_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE;
 | 
| -
 | 
| -  // Initialize.
 | 
| -  if (openh264_encoder_->InitializeExt(&init_params) != 0) {
 | 
| -    LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
 | 
| -    Release();
 | 
| -    ReportError();
 | 
| -    return WEBRTC_VIDEO_CODEC_ERROR;
 | 
| +
 | 
| +  for (int i = 0; i < number_of_streams; ++i) {
 | 
| +    // Create encoder.
 | 
| +    if (WelsCreateSVCEncoder(&encoders_[i]) != 0) {
 | 
| +      // Failed to create encoder.
 | 
| +      LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
 | 
| +      RTC_DCHECK(!encoders_[i]);
 | 
| +      ReportError();
 | 
| +      return WEBRTC_VIDEO_CODEC_ERROR;
 | 
| +    }
 | 
| +
 | 
| +    RTC_DCHECK(encoders_[i]);
 | 
| +    if (kOpenH264EncoderDetailedLogging) {
 | 
| +      int trace_level = WELS_LOG_DETAIL;
 | 
| +      encoders_[i]->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
 | 
| +    }
 | 
| +    SEncParamExt init_params;
 | 
| +    memset(&init_params, 0, sizeof(SEncParamExt));
 | 
| +    encoders_[i]->GetDefaultParams(&init_params);
 | 
| +    if (codec_settings_.mode == kRealtimeVideo) {
 | 
| +      init_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
 | 
| +    } else if (codec_settings_.mode == kScreensharing) {
 | 
| +      init_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
 | 
| +    } else {
 | 
| +      return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
 | 
| +    }
 | 
| +    init_params.iPicWidth = codec_settings_.simulcastStream[i].width;
 | 
| +    init_params.iPicHeight = codec_settings_.simulcastStream[i].height;
 | 
| +    // |init_params| uses bit/s, |codec_settings_| uses kbit/s.
 | 
| +    init_params.iTargetBitrate = codec_settings_.startBitrate * 1000;
 | 
| +    init_params.iMaxBitrate = codec_settings_.maxBitrate * 1000;
 | 
| +    // Rate Control mode
 | 
| +    init_params.iRCMode = RC_BITRATE_MODE;
 | 
| +    init_params.fMaxFrameRate =
 | 
| +        static_cast<float>(codec_settings_.maxFramerate);
 | 
| +
 | 
| +    // The following parameters are extension parameters
 | 
| +    // (they're in SEncParamExt, not in SEncParamBase).
 | 
| +    init_params.bEnableFrameSkip =
 | 
| +        codec_settings_.codecSpecific.H264.frameDroppingOn;
 | 
| +    // |uiIntraPeriod|    - multiple of GOP size
 | 
| +    // |keyFrameInterval| - number of frames
 | 
| +    init_params.uiIntraPeriod =
 | 
| +        codec_settings_.codecSpecific.H264.keyFrameInterval;
 | 
| +    init_params.uiMaxNalSize = 0;
 | 
| +    init_params.iComplexityMode = ECOMPLEXITY_MODE::LOW_COMPLEXITY;
 | 
| +
 | 
| +    // Threading model: use auto.
 | 
| +    //  0: auto (dynamic imp. internal encoder)
 | 
| +    //  1: single thread (default value)
 | 
| +    // >1: number of threads
 | 
| +    init_params.iMultipleThreadIdc = NumberOfThreads(
 | 
| +        init_params.iPicWidth, init_params.iPicHeight, number_of_cores);
 | 
| +    // The base spatial layer 0 is the only one we use.
 | 
| +    init_params.sSpatialLayers[0].iVideoWidth = init_params.iPicWidth;
 | 
| +    init_params.sSpatialLayers[0].iVideoHeight = init_params.iPicHeight;
 | 
| +    init_params.sSpatialLayers[0].fFrameRate = init_params.fMaxFrameRate;
 | 
| +    init_params.sSpatialLayers[0].iSpatialBitrate = init_params.iTargetBitrate;
 | 
| +    init_params.sSpatialLayers[0].iMaxSpatialBitrate = init_params.iMaxBitrate;
 | 
| +
 | 
| +    // Slice num according to number of threads.
 | 
| +    init_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE;
 | 
| +    // Initialize.
 | 
| +    if (encoders_[i]->InitializeExt(&init_params) != 0) {
 | 
| +      LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
 | 
| +      Release();
 | 
| +      ReportError();
 | 
| +      return WEBRTC_VIDEO_CODEC_ERROR;
 | 
| +    }
 | 
| +    int video_format = EVideoFormatType::videoFormatI420;
 | 
| +    encoders_[i]->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
 | 
| +    // Initialize encoded image. Default buffer size: size of unencoded data.
 | 
| +    // allocate memory for encoded image
 | 
| +    if (encoded_images_[i]._buffer != NULL) {
 | 
| +      delete[] encoded_images_[i]._buffer;
 | 
| +    }
 | 
| +    encoded_images_[i]._size =
 | 
| +        CalcBufferSize(kI420, codec_settings->simulcastStream[i].width,
 | 
| +            codec_settings->simulcastStream[i].height);
 | 
| +    encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
 | 
| +    encoded_image_buffers_[i].reset(encoded_images_[i]._buffer);
 | 
| +    encoded_images_[i]._completeFrame = true;
 | 
| +    encoded_images_[i]._encodedWidth = 0;
 | 
| +    encoded_images_[i]._encodedHeight = 0;
 | 
| +    encoded_images_[i]._length = 0;
 | 
| +
 | 
| +    // Initialize scaled input frames.
 | 
| +    scaled_input_frames_[i] = *new VideoFrame();
 | 
| +    scaled_input_frames_[i].CreateEmptyFrame(
 | 
| +        codec_settings->simulcastStream[i].width,
 | 
| +        codec_settings->simulcastStream[i].height,
 | 
| +        CalculateYStrideSize(codec_settings->simulcastStream[i].width,
 | 
| +                             codec_settings->simulcastStream[i].height),
 | 
| +        CalculateUVStrideSize(codec_settings->simulcastStream[i].width,
 | 
| +                              codec_settings->simulcastStream[i].height),
 | 
| +        CalculateUVStrideSize(codec_settings->simulcastStream[i].width,
 | 
| +                              codec_settings->simulcastStream[i].height));
 | 
|    }
 | 
| -  int video_format = EVideoFormatType::videoFormatI420;
 | 
| -  openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT,
 | 
| -                               &video_format);
 | 
| -
 | 
| -  // Initialize encoded image. Default buffer size: size of unencoded data.
 | 
| -  encoded_image_._size = CalcBufferSize(
 | 
| -      kI420, codec_settings_.width, codec_settings_.height);
 | 
| -  encoded_image_._buffer = new uint8_t[encoded_image_._size];
 | 
| -  encoded_image_buffer_.reset(encoded_image_._buffer);
 | 
| -  encoded_image_._completeFrame = true;
 | 
| -  encoded_image_._encodedWidth = 0;
 | 
| -  encoded_image_._encodedHeight = 0;
 | 
| -  encoded_image_._length = 0;
 | 
|    return WEBRTC_VIDEO_CODEC_OK;
 | 
|  }
 | 
|  
 | 
|  int32_t H264EncoderImpl::Release() {
 | 
| -  if (openh264_encoder_) {
 | 
| -    int uninit_ret = openh264_encoder_->Uninitialize();
 | 
| -    if (uninit_ret != 0) {
 | 
| -      LOG(LS_WARNING) << "OpenH264 encoder's Uninitialize() returned "
 | 
| -                      << "unsuccessful: " << uninit_ret;
 | 
| +  while (!encoders_.empty()) {
 | 
| +    ISVCEncoder* openh264_encoder = encoders_.back();
 | 
| +    if (openh264_encoder) {
 | 
| +      int uninit_ret = openh264_encoder->Uninitialize();
 | 
| +      if (uninit_ret != 0) {
 | 
| +        LOG(LS_WARNING) << "OpenH264 encoder's Uninitialize() returned "
 | 
| +                        << "unsuccessful: " << uninit_ret;
 | 
| +      }
 | 
| +      WelsDestroySVCEncoder(openh264_encoder);
 | 
| +      openh264_encoder = nullptr;
 | 
| +      encoders_.pop_back();
 | 
| +      EncodedImage encoded_image = encoded_images_.back();
 | 
| +      if (encoded_image._buffer != nullptr) {
 | 
| +        encoded_image._buffer = nullptr;
 | 
| +        encoded_image_buffers_.back().reset();
 | 
| +      }
 | 
| +      encoded_images_.pop_back();
 | 
| +      encoded_image_buffers_.pop_back();
 | 
|      }
 | 
| -    WelsDestroySVCEncoder(openh264_encoder_);
 | 
| -    openh264_encoder_ = nullptr;
 | 
| -  }
 | 
| -  if (encoded_image_._buffer != nullptr) {
 | 
| -    encoded_image_._buffer = nullptr;
 | 
| -    encoded_image_buffer_.reset();
 | 
|    }
 | 
|    return WEBRTC_VIDEO_CODEC_OK;
 | 
|  }
 | 
| @@ -302,21 +435,45 @@ int32_t H264EncoderImpl::SetRates(uint32_t bitrate, uint32_t framerate) {
 | 
|    if (bitrate <= 0 || framerate <= 0) {
 | 
|      return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
 | 
|    }
 | 
| +  if (codec_settings_.maxBitrate > 0 && bitrate > codec_settings_.maxBitrate) {
 | 
| +    bitrate = codec_settings_.maxBitrate;
 | 
| +  }
 | 
| +  if (bitrate < codec_settings_.minBitrate) {
 | 
| +    bitrate = codec_settings_.minBitrate;
 | 
| +  }
 | 
| +  if (codec_settings_.numberOfSimulcastStreams > 0 &&
 | 
| +      bitrate < codec_settings_.simulcastStream[0].minBitrate) {
 | 
| +    bitrate = codec_settings_.simulcastStream[0].minBitrate;
 | 
| +  }
 | 
|    codec_settings_.targetBitrate = bitrate;
 | 
|    codec_settings_.maxFramerate = framerate;
 | 
|  
 | 
| -  SBitrateInfo target_bitrate;
 | 
| -  memset(&target_bitrate, 0, sizeof(SBitrateInfo));
 | 
| -  target_bitrate.iLayer = SPATIAL_LAYER_ALL,
 | 
| -  target_bitrate.iBitrate = codec_settings_.targetBitrate * 1000;
 | 
| -  openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE,
 | 
| -                               &target_bitrate);
 | 
| -  float max_framerate = static_cast<float>(codec_settings_.maxFramerate);
 | 
| -  openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE,
 | 
| -                               &max_framerate);
 | 
| +  std::vector<int> stream_bitrates =
 | 
| +      GetStreamBitratesKbps(codec_settings_, bitrate);
 | 
| +  for (size_t i = 0; i < encoders_.size(); ++i) {
 | 
| +    SetStreamState(stream_bitrates[i] > 0, i);
 | 
| +    if (send_streams_[i]) {
 | 
| +      SBitrateInfo target_bitrate;
 | 
| +      memset(&target_bitrate, 0, sizeof(SBitrateInfo));
 | 
| +      target_bitrate.iLayer = SPATIAL_LAYER_ALL,
 | 
| +      target_bitrate.iBitrate = stream_bitrates[i] * 1000;  // bps
 | 
| +      encoders_[i]->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
 | 
| +      float max_framerate = static_cast<float>(framerate);
 | 
| +      encoders_[i]->SetOption(ENCODER_OPTION_FRAME_RATE, &max_framerate);
 | 
| +    }
 | 
| +  }
 | 
|    return WEBRTC_VIDEO_CODEC_OK;
 | 
|  }
 | 
|  
 | 
| +void H264EncoderImpl::Scale(const VideoFrame& input_frame,
 | 
| +                            VideoFrame* output_frame) {
 | 
| +  Scaler scaler;
 | 
| +  scaler.Set(input_frame.width(), input_frame.height(), output_frame->width(),
 | 
| +             output_frame->height(), webrtc::kI420, webrtc::kI420,
 | 
| +             webrtc::kScaleBilinear);
 | 
| +  scaler.Scale(input_frame, output_frame);
 | 
| +}
 | 
| +
 | 
|  int32_t H264EncoderImpl::Encode(
 | 
|      const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info,
 | 
|      const std::vector<FrameType>* frame_types) {
 | 
| @@ -334,88 +491,111 @@ int32_t H264EncoderImpl::Encode(
 | 
|      ReportError();
 | 
|      return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
 | 
|    }
 | 
| -  if (frame.width()  != codec_settings_.width ||
 | 
| -      frame.height() != codec_settings_.height) {
 | 
| -    LOG(LS_WARNING) << "Encoder initialized for " << codec_settings_.width
 | 
| -                    << "x" << codec_settings_.height << " but trying to encode "
 | 
| -                    << frame.width() << "x" << frame.height() << " frame.";
 | 
| -    ReportError();
 | 
| -    return WEBRTC_VIDEO_CODEC_ERR_SIZE;
 | 
| -  }
 | 
|  
 | 
| -  bool force_key_frame = false;
 | 
| -  if (frame_types != nullptr) {
 | 
| -    // We only support a single stream.
 | 
| -    RTC_DCHECK_EQ(frame_types->size(), static_cast<size_t>(1));
 | 
| -    // Skip frame?
 | 
| -    if ((*frame_types)[0] == kEmptyFrame) {
 | 
| -      return WEBRTC_VIDEO_CODEC_OK;
 | 
| +  std::vector<bool> force_key_frame;
 | 
| +  force_key_frame.resize(encoders_.size());
 | 
| +  std::fill(force_key_frame.begin(), force_key_frame.end(), false);
 | 
| +  for (size_t i = 0; i < key_frame_request_.size() && i < send_streams_.size();
 | 
| +       ++i) {
 | 
| +    if (key_frame_request_[i] && send_streams_[i]) {
 | 
| +      force_key_frame[i] = true;
 | 
|      }
 | 
| -    // Force key frame?
 | 
| -    force_key_frame = (*frame_types)[0] == kVideoFrameKey;
 | 
|    }
 | 
| -  if (force_key_frame) {
 | 
| -    // API doc says ForceIntraFrame(false) does nothing, but calling this
 | 
| -    // function forces a key frame regardless of the |bIDR| argument's value.
 | 
| -    // (If every frame is a key frame we get lag/delays.)
 | 
| -    openh264_encoder_->ForceIntraFrame(true);
 | 
| +  if (frame_types) {
 | 
| +    for (size_t i = 0; i < frame_types->size(); ++i) {
 | 
| +      if ((*frame_types)[i] == kVideoFrameKey) {
 | 
| +        force_key_frame[i] = true;
 | 
| +      }
 | 
| +    }
 | 
|    }
 | 
|  
 | 
| -  // EncodeFrame input.
 | 
| -  SSourcePicture picture;
 | 
| -  memset(&picture, 0, sizeof(SSourcePicture));
 | 
| -  picture.iPicWidth = frame.width();
 | 
| -  picture.iPicHeight = frame.height();
 | 
| -  picture.iColorFormat = EVideoFormatType::videoFormatI420;
 | 
| -  picture.uiTimeStamp = frame.ntp_time_ms();
 | 
| -  picture.iStride[0] = frame.stride(kYPlane);
 | 
| -  picture.iStride[1] = frame.stride(kUPlane);
 | 
| -  picture.iStride[2] = frame.stride(kVPlane);
 | 
| -  picture.pData[0] = const_cast<uint8_t*>(frame.buffer(kYPlane));
 | 
| -  picture.pData[1] = const_cast<uint8_t*>(frame.buffer(kUPlane));
 | 
| -  picture.pData[2] = const_cast<uint8_t*>(frame.buffer(kVPlane));
 | 
| -
 | 
| -  // EncodeFrame output.
 | 
| -  SFrameBSInfo info;
 | 
| -  memset(&info, 0, sizeof(SFrameBSInfo));
 | 
| -
 | 
| -  // Encode!
 | 
| -  int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info);
 | 
| -  if (enc_ret != 0) {
 | 
| -    LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
 | 
| -                  << enc_ret << ".";
 | 
| -    ReportError();
 | 
| -    return WEBRTC_VIDEO_CODEC_ERROR;
 | 
| -  }
 | 
| +  for (size_t i = 0; i < encoders_.size(); ++i) {
 | 
| +    if (!send_streams_[i] || (*frame_types)[i] == kEmptyFrame) {
 | 
| +      continue;
 | 
| +    }
 | 
| +    // Scale input to match encode dimensions
 | 
| +    Scale(frame, &scaled_input_frames_[i]);
 | 
| +
 | 
| +    if (scaled_input_frames_[i].width() !=
 | 
| +            codec_settings_.simulcastStream[i].width ||
 | 
| +        scaled_input_frames_[i].height() !=
 | 
| +            codec_settings_.simulcastStream[i].height) {
 | 
| +      LOG(LS_ERROR) << "Encoder initialized for "
 | 
| +                    << codec_settings_.simulcastStream[i].width << "x"
 | 
| +                    << codec_settings_.simulcastStream[i].height
 | 
| +                    << " but trying to encode "
 | 
| +                    << scaled_input_frames_[i].width() << "x"
 | 
| +                    << scaled_input_frames_[i].height() << " frame.";
 | 
| +      ReportError();
 | 
| +      return WEBRTC_VIDEO_CODEC_ERR_SIZE;
 | 
| +    }
 | 
| +
 | 
| +    SSourcePicture picture;
 | 
| +    memset(&picture, 0, sizeof(SSourcePicture));
 | 
| +    picture.iPicWidth = scaled_input_frames_[i].width();
 | 
| +    picture.iPicHeight = scaled_input_frames_[i].height();
 | 
| +    picture.iColorFormat = EVideoFormatType::videoFormatI420;
 | 
| +    picture.uiTimeStamp = frame.ntp_time_ms();
 | 
| +    picture.iStride[0] = scaled_input_frames_[i].stride(kYPlane);
 | 
| +    picture.iStride[1] = scaled_input_frames_[i].stride(kUPlane);
 | 
| +    picture.iStride[2] = scaled_input_frames_[i].stride(kVPlane);
 | 
| +    picture.pData[0] =
 | 
| +        const_cast<uint8_t*>(scaled_input_frames_[i].buffer(kYPlane));
 | 
| +    picture.pData[1] =
 | 
| +        const_cast<uint8_t*>(scaled_input_frames_[i].buffer(kUPlane));
 | 
| +    picture.pData[2] =
 | 
| +        const_cast<uint8_t*>(scaled_input_frames_[i].buffer(kVPlane));
 | 
| +    if (force_key_frame[i]) {
 | 
| +      // API doc says ForceIntraFrame(false) does nothing, but calling this
 | 
| +      // function forces a key frame regardless of the |bIDR| argument's value.
 | 
| +      // (If every frame is a key frame we get lag/delays.)
 | 
| +      encoders_[i]->ForceIntraFrame(true);
 | 
| +      std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
 | 
| +    }
 | 
| +    // EncodeFrame output.
 | 
| +    SFrameBSInfo info;
 | 
| +    memset(&info, 0, sizeof(SFrameBSInfo));
 | 
| +    int enc_ret = encoders_[i]->EncodeFrame(&picture, &info);
 | 
| +    if (enc_ret != 0) {
 | 
| +      LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
 | 
| +                    << enc_ret << ".";
 | 
| +      ReportError();
 | 
| +      return WEBRTC_VIDEO_CODEC_ERROR;
 | 
| +    }
 | 
|  
 | 
| -  encoded_image_._encodedWidth = frame.width();
 | 
| -  encoded_image_._encodedHeight = frame.height();
 | 
| -  encoded_image_._timeStamp = frame.timestamp();
 | 
| -  encoded_image_.ntp_time_ms_ = frame.ntp_time_ms();
 | 
| -  encoded_image_.capture_time_ms_ = frame.render_time_ms();
 | 
| -  encoded_image_.rotation_ = frame.rotation();
 | 
| -  encoded_image_._frameType = EVideoFrameType_to_FrameType(info.eFrameType);
 | 
| -
 | 
| -  // Split encoded image up into fragments. This also updates |encoded_image_|.
 | 
| -  RTPFragmentationHeader frag_header;
 | 
| -  RtpFragmentize(&encoded_image_, &encoded_image_buffer_, frame, &info,
 | 
| -                 &frag_header);
 | 
| -
 | 
| -  // Encoder can skip frames to save bandwidth in which case
 | 
| -  // |encoded_image_._length| == 0.
 | 
| -  if (encoded_image_._length > 0) {
 | 
| -    // Deliver encoded image.
 | 
| -    CodecSpecificInfo codec_specific;
 | 
| -    codec_specific.codecType = kVideoCodecH264;
 | 
| -    encoded_image_callback_->Encoded(encoded_image_,
 | 
| -                                     &codec_specific,
 | 
| -                                     &frag_header);
 | 
| +    encoded_images_[i]._encodedWidth = codec_settings_.simulcastStream[i].width;
 | 
| +    encoded_images_[i]._encodedHeight =
 | 
| +        codec_settings_.simulcastStream[i].height;
 | 
| +    encoded_images_[i]._timeStamp = frame.timestamp();
 | 
| +    encoded_images_[i].ntp_time_ms_ = frame.ntp_time_ms();
 | 
| +    encoded_images_[i].capture_time_ms_ = frame.render_time_ms();
 | 
| +    encoded_images_[i]._frameType =
 | 
| +        EVideoFrameType_to_FrameType(info.eFrameType);
 | 
| +    // Split encoded image up into fragments. This also updates
 | 
| +    // |encoded_image_|.
 | 
| +    RTPFragmentationHeader frag_header;
 | 
| +    RtpFragmentize(&encoded_images_[i], &encoded_image_buffers_[i], frame,
 | 
| +                   &info, &frag_header);
 | 
| +    if (encoded_images_[i]._length > 0) {
 | 
| +      // Deliver encoded image.
 | 
| +      CodecSpecificInfo codec_specific;
 | 
| +      CodecSpecificInfoH264* h264Info = &(codec_specific.codecSpecific.H264);
 | 
| +      h264Info->simulcastIdx = i;
 | 
| +      codec_specific.codecType = kVideoCodecH264;
 | 
| +      encoded_image_callback_->Encoded(encoded_images_[i], &codec_specific,
 | 
| +                                       &frag_header);
 | 
| +    }
 | 
|    }
 | 
|    return WEBRTC_VIDEO_CODEC_OK;
 | 
|  }
 | 
|  
 | 
|  bool H264EncoderImpl::IsInitialized() const {
 | 
| -  return openh264_encoder_ != nullptr;
 | 
| +  for (auto openh264_encoder : encoders_) {
 | 
| +    if (openh264_encoder == nullptr) {
 | 
| +      return false;
 | 
| +    }
 | 
| +  }
 | 
| +  return true;
 | 
|  }
 | 
|  
 | 
|  void H264EncoderImpl::ReportInit() {
 | 
| 
 |