Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(341)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc

Issue 1913073002: Extract common simulcast logic from VP8 wrapper and simulcast adapter (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Address comments, added tests Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
52 int GCD(int a, int b) { 52 int GCD(int a, int b) {
53 int c = a % b; 53 int c = a % b;
54 while (c != 0) { 54 while (c != 0) {
55 a = b; 55 a = b;
56 b = c; 56 b = c;
57 c = a % b; 57 c = a % b;
58 } 58 }
59 return b; 59 return b;
60 } 60 }
61 61
62 std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
63 int bitrate_to_allocate_kbps) {
64 if (codec.numberOfSimulcastStreams <= 1) {
65 return std::vector<int>(1, bitrate_to_allocate_kbps);
66 }
67
68 std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
69 // Allocate min -> target bitrates as long as we have bitrate to spend.
70 size_t last_active_stream = 0;
71 for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
72 bitrate_to_allocate_kbps >=
73 static_cast<int>(codec.simulcastStream[i].minBitrate);
74 ++i) {
75 last_active_stream = i;
76 int allocated_bitrate_kbps =
77 std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate),
78 bitrate_to_allocate_kbps);
79 bitrates_kbps[i] = allocated_bitrate_kbps;
80 bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
81 }
82
83 // Spend additional bits on the highest-quality active layer, up to max
84 // bitrate.
85 // TODO(pbos): Consider spending additional bits on last_active_stream-1 down
86 // to 0 and not just the top layer when we have additional bitrate to spend.
87 int allocated_bitrate_kbps = std::min(
88 static_cast<int>(codec.simulcastStream[last_active_stream].maxBitrate -
89 bitrates_kbps[last_active_stream]),
90 bitrate_to_allocate_kbps);
91 bitrates_kbps[last_active_stream] += allocated_bitrate_kbps;
92 bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
93
94 // Make sure we can always send something. Suspending below min bitrate is
95 // controlled outside the codec implementation and is not overriden by this.
96 if (bitrates_kbps[0] < static_cast<int>(codec.simulcastStream[0].minBitrate))
97 bitrates_kbps[0] = static_cast<int>(codec.simulcastStream[0].minBitrate);
98
99 return bitrates_kbps;
100 }
101
102 uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec) {
103 uint32_t bitrate_sum = 0;
104 for (int i = 0; i < streams; ++i) {
105 bitrate_sum += codec.simulcastStream[i].maxBitrate;
106 }
107 return bitrate_sum;
108 }
109
110 int NumberOfStreams(const VideoCodec& codec) {
111 int streams =
112 codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
113 uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
114 if (simulcast_max_bitrate == 0) {
115 streams = 1;
116 }
117 return streams;
118 }
119
120 bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) { 62 bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
121 if (codec.width != codec.simulcastStream[num_streams - 1].width || 63 if (codec.width != codec.simulcastStream[num_streams - 1].width ||
122 codec.height != codec.simulcastStream[num_streams - 1].height) { 64 codec.height != codec.simulcastStream[num_streams - 1].height) {
123 return false; 65 return false;
124 } 66 }
125 for (int i = 0; i < num_streams; ++i) { 67 for (int i = 0; i < num_streams; ++i) {
126 if (codec.width * codec.simulcastStream[i].height != 68 if (codec.width * codec.simulcastStream[i].height !=
127 codec.height * codec.simulcastStream[i].width) { 69 codec.height * codec.simulcastStream[i].width) {
128 return false; 70 return false;
129 } 71 }
130 } 72 }
131 return true; 73 return true;
132 } 74 }
133 75
134 int NumStreamsDisabled(const std::vector<bool>& streams) { 76 VideoCodec DefaultCodec() {
135 int num_disabled = 0; 77 VideoCodec codec;
136 for (bool stream : streams) { 78 memset(&codec, 0, sizeof(VideoCodec));
137 if (!stream) 79 codec.codecType = kVideoCodecVP8;
138 ++num_disabled; 80 return codec;
139 }
140 return num_disabled;
141 } 81 }
82
142 } // namespace 83 } // namespace
143 84
144 VP8Encoder* VP8Encoder::Create() { 85 VP8Encoder* VP8Encoder::Create() {
145 return new VP8EncoderImpl(); 86 return new VP8EncoderImpl();
146 } 87 }
147 88
148 VP8Decoder* VP8Decoder::Create() { 89 VP8Decoder* VP8Decoder::Create() {
149 return new VP8DecoderImpl(); 90 return new VP8DecoderImpl();
150 } 91 }
151 92
152 const float kTl1MaxTimeToDropFrames = 20.0f;
153
154 VP8EncoderImpl::VP8EncoderImpl() 93 VP8EncoderImpl::VP8EncoderImpl()
155 : encoded_complete_callback_(NULL), 94 : encoded_complete_callback_(NULL),
156 inited_(false), 95 inited_(false),
157 timestamp_(0), 96 timestamp_(0),
158 feedback_mode_(false), 97 feedback_mode_(false),
159 qp_max_(56), // Setting for max quantizer. 98 qp_max_(56), // Setting for max quantizer.
160 cpu_speed_default_(-6), 99 cpu_speed_default_(-6),
161 rc_max_intra_target_(0), 100 rc_max_intra_target_(0),
162 token_partitions_(VP8_ONE_TOKENPARTITION), 101 token_partitions_(VP8_ONE_TOKENPARTITION),
102 simulcast_state_(DefaultCodec()),
163 down_scale_requested_(false), 103 down_scale_requested_(false),
164 down_scale_bitrate_(0), 104 down_scale_bitrate_(0),
165 tl0_frame_dropper_(),
166 tl1_frame_dropper_(kTl1MaxTimeToDropFrames),
167 key_frame_request_(kMaxSimulcastStreams, false),
168 quality_scaler_enabled_(false) { 105 quality_scaler_enabled_(false) {
169 uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp()); 106 uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
170 srand(seed); 107 srand(seed);
171 108
172 picture_id_.reserve(kMaxSimulcastStreams); 109 picture_id_.reserve(kMaxSimulcastStreams);
173 last_key_frame_picture_id_.reserve(kMaxSimulcastStreams); 110 last_key_frame_picture_id_.reserve(kMaxSimulcastStreams);
174 temporal_layers_.reserve(kMaxSimulcastStreams); 111 temporal_layers_.reserve(kMaxSimulcastStreams);
175 raw_images_.reserve(kMaxSimulcastStreams); 112 raw_images_.reserve(kMaxSimulcastStreams);
176 encoded_images_.reserve(kMaxSimulcastStreams); 113 encoded_images_.reserve(kMaxSimulcastStreams);
177 send_stream_.reserve(kMaxSimulcastStreams);
178 cpu_speed_.assign(kMaxSimulcastStreams, -6); // Set default to -6. 114 cpu_speed_.assign(kMaxSimulcastStreams, -6); // Set default to -6.
179 encoders_.reserve(kMaxSimulcastStreams); 115 encoders_.reserve(kMaxSimulcastStreams);
180 configurations_.reserve(kMaxSimulcastStreams); 116 configurations_.reserve(kMaxSimulcastStreams);
181 downsampling_factors_.reserve(kMaxSimulcastStreams); 117 downsampling_factors_.reserve(kMaxSimulcastStreams);
182 } 118 }
183 119
184 VP8EncoderImpl::~VP8EncoderImpl() { 120 VP8EncoderImpl::~VP8EncoderImpl() {
185 Release(); 121 Release();
186 } 122 }
187 123
188 int VP8EncoderImpl::Release() { 124 int VP8EncoderImpl::Release() {
189 int ret_val = WEBRTC_VIDEO_CODEC_OK; 125 int ret_val = WEBRTC_VIDEO_CODEC_OK;
190 126
191 while (!encoded_images_.empty()) { 127 while (!encoded_images_.empty()) {
192 EncodedImage& image = encoded_images_.back(); 128 EncodedImage& image = encoded_images_.back();
193 delete[] image._buffer; 129 delete[] image._buffer;
194 encoded_images_.pop_back(); 130 encoded_images_.pop_back();
195 } 131 }
196 while (!encoders_.empty()) { 132 while (!encoders_.empty()) {
197 vpx_codec_ctx_t& encoder = encoders_.back(); 133 vpx_codec_ctx_t& encoder = encoders_.back();
198 if (vpx_codec_destroy(&encoder)) { 134 if (vpx_codec_destroy(&encoder)) {
199 ret_val = WEBRTC_VIDEO_CODEC_MEMORY; 135 ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
200 } 136 }
201 encoders_.pop_back(); 137 encoders_.pop_back();
202 } 138 }
203 configurations_.clear(); 139 configurations_.clear();
204 send_stream_.clear(); 140 simulcast_state_ = SimulcastState(DefaultCodec());
205 cpu_speed_.clear(); 141 cpu_speed_.clear();
206 while (!raw_images_.empty()) { 142 while (!raw_images_.empty()) {
207 vpx_img_free(&raw_images_.back()); 143 vpx_img_free(&raw_images_.back());
208 raw_images_.pop_back(); 144 raw_images_.pop_back();
209 } 145 }
210 while (!temporal_layers_.empty()) { 146 while (!temporal_layers_.empty()) {
211 delete temporal_layers_.back(); 147 delete temporal_layers_.back();
212 temporal_layers_.pop_back(); 148 temporal_layers_.pop_back();
213 } 149 }
214 inited_ = false; 150 inited_ = false;
(...skipping 28 matching lines...) Expand all
243 // Calculate a rough limit for when to trigger a potental down scale. 179 // Calculate a rough limit for when to trigger a potental down scale.
244 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; 180 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000;
245 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work 181 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work
246 // around the current limitations. 182 // around the current limitations.
247 // Only trigger keyframes if we are allowed to scale down. 183 // Only trigger keyframes if we are allowed to scale down.
248 if (configurations_[0].rc_resize_allowed) { 184 if (configurations_[0].rc_resize_allowed) {
249 if (!down_scale_requested_) { 185 if (!down_scale_requested_) {
250 if (k_pixels_per_frame > new_bitrate_kbit) { 186 if (k_pixels_per_frame > new_bitrate_kbit) {
251 down_scale_requested_ = true; 187 down_scale_requested_ = true;
252 down_scale_bitrate_ = new_bitrate_kbit; 188 down_scale_bitrate_ = new_bitrate_kbit;
253 key_frame_request_[0] = true; 189 simulcast_state_.RequestKeyFrame(0);
254 } 190 }
255 } else { 191 } else {
256 if (new_bitrate_kbit > (2 * down_scale_bitrate_) || 192 if (new_bitrate_kbit > (2 * down_scale_bitrate_) ||
257 new_bitrate_kbit < (down_scale_bitrate_ / 2)) { 193 new_bitrate_kbit < (down_scale_bitrate_ / 2)) {
258 down_scale_requested_ = false; 194 down_scale_requested_ = false;
259 } 195 }
260 } 196 }
261 } 197 }
262 } else { 198 } else {
263 // If we have more than 1 stream, reduce the qp_max for the low resolution 199 // If we have more than 1 stream, reduce the qp_max for the low resolution
264 // stream if frame rate is not too low. The trade-off with lower qp_max is 200 // stream if frame rate is not too low. The trade-off with lower qp_max is
265 // possibly more dropped frames, so we only do this if the frame rate is 201 // possibly more dropped frames, so we only do this if the frame rate is
266 // above some threshold (base temporal layer is down to 1/4 for 3 layers). 202 // above some threshold (base temporal layer is down to 1/4 for 3 layers).
267 // We may want to condition this on bitrate later. 203 // We may want to condition this on bitrate later.
268 if (new_framerate > 20) { 204 if (new_framerate > 20) {
269 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; 205 configurations_[encoders_.size() - 1].rc_max_quantizer = 45;
270 } else { 206 } else {
271 // Go back to default value set in InitEncode. 207 // Go back to default value set in InitEncode.
272 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; 208 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_;
273 } 209 }
274 } 210 }
275 211
276 std::vector<int> stream_bitrates = 212 simulcast_state_.AllocateBitrate(new_bitrate_kbit * 1000);
277 GetStreamBitratesKbps(codec_, new_bitrate_kbit); 213 for (const SimulcastState::Stream& stream : simulcast_state_.Streams()) {
278 size_t stream_idx = encoders_.size() - 1; 214 unsigned int target_bitrate = stream.allocated_rate_bps / 1000;
279 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
280 if (encoders_.size() > 1)
281 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
282
283 unsigned int target_bitrate = stream_bitrates[stream_idx];
284 unsigned int max_bitrate = codec_.maxBitrate; 215 unsigned int max_bitrate = codec_.maxBitrate;
285 int framerate = new_framerate;
286 // TODO(holmer): This is a temporary hack for screensharing, where we 216 // TODO(holmer): This is a temporary hack for screensharing, where we
287 // interpret the startBitrate as the encoder target bitrate. This is 217 // interpret the startBitrate as the encoder target bitrate. This is
288 // to allow for a different max bitrate, so if the codec can't meet 218 // to allow for a different max bitrate, so if the codec can't meet
289 // the target we still allow it to overshoot up to the max before dropping 219 // the target we still allow it to overshoot up to the max before dropping
290 // frames. This hack should be improved. 220 // frames. This hack should be improved.
291 if (codec_.targetBitrate > 0 && 221 if (codec_.targetBitrate > 0 && simulcast_state_.NumStreams() == 1 &&
292 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || 222 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 ||
293 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { 223 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) {
294 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); 224 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate);
295 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); 225 max_bitrate = std::min(codec_.maxBitrate, target_bitrate);
296 target_bitrate = tl0_bitrate; 226 target_bitrate = tl0_bitrate;
297 } 227 }
228 int i = simulcast_state_.NumStreams() - stream.idx - 1;
298 configurations_[i].rc_target_bitrate = target_bitrate; 229 configurations_[i].rc_target_bitrate = target_bitrate;
299 temporal_layers_[stream_idx]->ConfigureBitrates( 230 temporal_layers_[stream.idx]->ConfigureBitrates(
300 target_bitrate, max_bitrate, framerate, &configurations_[i]); 231 target_bitrate, max_bitrate, new_framerate, &configurations_[i]);
301 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { 232 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
302 return WEBRTC_VIDEO_CODEC_ERROR; 233 return WEBRTC_VIDEO_CODEC_ERROR;
303 } 234 }
304 } 235 }
305 quality_scaler_.ReportFramerate(new_framerate); 236 quality_scaler_.ReportFramerate(new_framerate);
306 return WEBRTC_VIDEO_CODEC_OK; 237 return WEBRTC_VIDEO_CODEC_OK;
307 } 238 }
308 239
309 const char* VP8EncoderImpl::ImplementationName() const { 240 const char* VP8EncoderImpl::ImplementationName() const {
310 return "libvpx"; 241 return "libvpx";
311 } 242 }
312 243
313 void VP8EncoderImpl::SetStreamState(bool send_stream,
314 int stream_idx) {
315 if (send_stream && !send_stream_[stream_idx]) {
316 // Need a key frame if we have not sent this stream before.
317 key_frame_request_[stream_idx] = true;
318 }
319 send_stream_[stream_idx] = send_stream;
320 }
321
322 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, 244 void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
323 int num_temporal_layers, 245 int num_temporal_layers,
324 const VideoCodec& codec) { 246 const VideoCodec& codec) {
325 TemporalLayersFactory default_factory; 247 TemporalLayersFactory default_factory;
326 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory; 248 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory;
327 if (!tl_factory) 249 if (!tl_factory)
328 tl_factory = &default_factory; 250 tl_factory = &default_factory;
329 if (num_streams == 1) { 251 if (num_streams == 1) {
330 if (codec.mode == kScreensharing) { 252 if (codec.mode == kScreensharing) {
331 // Special mode when screensharing on a single stream. 253 // Special mode when screensharing on a single stream.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
371 } 293 }
372 if (inst->codecSpecific.VP8.automaticResizeOn && 294 if (inst->codecSpecific.VP8.automaticResizeOn &&
373 inst->numberOfSimulcastStreams > 1) { 295 inst->numberOfSimulcastStreams > 1) {
374 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 296 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
375 } 297 }
376 int retVal = Release(); 298 int retVal = Release();
377 if (retVal < 0) { 299 if (retVal < 0) {
378 return retVal; 300 return retVal;
379 } 301 }
380 302
381 int number_of_streams = NumberOfStreams(*inst); 303 std::unique_ptr<SimulcastState> new_simulcast_state(
304 new SimulcastState(*inst));
305
306 int number_of_streams = new_simulcast_state->NumStreams();
382 bool doing_simulcast = (number_of_streams > 1); 307 bool doing_simulcast = (number_of_streams > 1);
383 308
384 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { 309 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
385 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 310 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
386 } 311 }
387 312
388 int num_temporal_layers = 313 int num_temporal_layers =
389 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers 314 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
390 : inst->codecSpecific.VP8.numberOfTemporalLayers; 315 : inst->codecSpecific.VP8.numberOfTemporalLayers;
391 316
392 // TODO(andresp): crash if num temporal layers is bananas. 317 // TODO(andresp): crash if num temporal layers is bananas.
393 if (num_temporal_layers < 1) 318 if (num_temporal_layers < 1)
394 num_temporal_layers = 1; 319 num_temporal_layers = 1;
395 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); 320 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
396 321
397 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; 322 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
398 323
399 timestamp_ = 0; 324 timestamp_ = 0;
400 codec_ = *inst; 325 codec_ = *inst;
326 simulcast_state_ = *new_simulcast_state;
401 327
402 // Code expects simulcastStream resolutions to be correct, make sure they are 328 // Code expects simulcastStream resolutions to be correct, make sure they are
403 // filled even when there are no simulcast layers. 329 // filled even when there are no simulcast layers.
404 if (codec_.numberOfSimulcastStreams == 0) { 330 if (codec_.numberOfSimulcastStreams == 0) {
405 codec_.simulcastStream[0].width = codec_.width; 331 codec_.simulcastStream[0].width = codec_.width;
406 codec_.simulcastStream[0].height = codec_.height; 332 codec_.simulcastStream[0].height = codec_.height;
407 } 333 }
408 334
409 picture_id_.resize(number_of_streams); 335 picture_id_.resize(number_of_streams);
410 last_key_frame_picture_id_.resize(number_of_streams); 336 last_key_frame_picture_id_.resize(number_of_streams);
411 encoded_images_.resize(number_of_streams); 337 encoded_images_.resize(number_of_streams);
412 encoders_.resize(number_of_streams); 338 encoders_.resize(number_of_streams);
413 configurations_.resize(number_of_streams); 339 configurations_.resize(number_of_streams);
414 downsampling_factors_.resize(number_of_streams); 340 downsampling_factors_.resize(number_of_streams);
415 raw_images_.resize(number_of_streams); 341 raw_images_.resize(number_of_streams);
416 send_stream_.resize(number_of_streams);
417 send_stream_[0] = true; // For non-simulcast case.
418 cpu_speed_.resize(number_of_streams); 342 cpu_speed_.resize(number_of_streams);
419 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
420 343
421 int idx = number_of_streams - 1; 344 int idx = number_of_streams - 1;
422 for (int i = 0; i < (number_of_streams - 1); ++i, --idx) { 345 for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
423 int gcd = GCD(inst->simulcastStream[idx].width, 346 int gcd = GCD(inst->simulcastStream[idx].width,
424 inst->simulcastStream[idx - 1].width); 347 inst->simulcastStream[idx - 1].width);
425 downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd; 348 downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
426 downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd; 349 downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
427 send_stream_[i] = false; 350 simulcast_state_.SetSending(i, false);
428 } 351 }
429 if (number_of_streams > 1) { 352 if (number_of_streams > 1) {
430 send_stream_[number_of_streams - 1] = false; 353 simulcast_state_.SetSending(number_of_streams - 1, false);
431 downsampling_factors_[number_of_streams - 1].num = 1; 354 downsampling_factors_[number_of_streams - 1].num = 1;
432 downsampling_factors_[number_of_streams - 1].den = 1; 355 downsampling_factors_[number_of_streams - 1].den = 1;
433 } 356 }
434 for (int i = 0; i < number_of_streams; ++i) { 357 for (int i = 0; i < number_of_streams; ++i) {
435 // Random start, 16 bits is enough. 358 // Random start, 16 bits is enough.
436 picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT 359 picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
437 last_key_frame_picture_id_[i] = -1; 360 last_key_frame_picture_id_[i] = -1;
438 // allocate memory for encoded image 361 // allocate memory for encoded image
439 if (encoded_images_[i]._buffer != NULL) { 362 if (encoded_images_[i]._buffer != NULL) {
440 delete[] encoded_images_[i]._buffer; 363 delete[] encoded_images_[i]._buffer;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
555 // is meaningless (no memory allocation is done here). 478 // is meaningless (no memory allocation is done here).
556 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, 479 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
557 NULL); 480 NULL);
558 481
559 if (encoders_.size() == 1) { 482 if (encoders_.size() == 1) {
560 configurations_[0].rc_target_bitrate = inst->startBitrate; 483 configurations_[0].rc_target_bitrate = inst->startBitrate;
561 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, 484 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
562 inst->maxFramerate, 485 inst->maxFramerate,
563 &configurations_[0]); 486 &configurations_[0]);
564 } else { 487 } else {
488 simulcast_state_.AllocateBitrate(inst->startBitrate * 1000);
565 // Note the order we use is different from webm, we have lowest resolution 489 // Note the order we use is different from webm, we have lowest resolution
566 // at position 0 and they have highest resolution at position 0. 490 // at position 0 and they have highest resolution at position 0.
567 int stream_idx = encoders_.size() - 1; 491 int stream_idx = encoders_.size() - 1;
568 std::vector<int> stream_bitrates = 492 int stream_bitrate_kbps = simulcast_state_.AllocatedRate(stream_idx) / 1000;
569 GetStreamBitratesKbps(codec_, inst->startBitrate); 493 configurations_[0].rc_target_bitrate = stream_bitrate_kbps;
570 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
571 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx];
572 temporal_layers_[stream_idx]->ConfigureBitrates( 494 temporal_layers_[stream_idx]->ConfigureBitrates(
573 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, 495 stream_bitrate_kbps, inst->maxBitrate, inst->maxFramerate,
574 &configurations_[0]); 496 &configurations_[0]);
575 --stream_idx; 497 --stream_idx;
576 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { 498 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) {
577 memcpy(&configurations_[i], &configurations_[0], 499 memcpy(&configurations_[i], &configurations_[0],
578 sizeof(configurations_[0])); 500 sizeof(configurations_[0]));
579 501
580 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; 502 configurations_[i].g_w = inst->simulcastStream[stream_idx].width;
581 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; 503 configurations_[i].g_h = inst->simulcastStream[stream_idx].height;
582 504
583 // Use 1 thread for lower resolutions. 505 // Use 1 thread for lower resolutions.
584 configurations_[i].g_threads = 1; 506 configurations_[i].g_threads = 1;
585 507
586 // Setting alignment to 32 - as that ensures at least 16 for all 508 // Setting alignment to 32 - as that ensures at least 16 for all
587 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for 509 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
588 // the y plane, but only half of it to the u and v planes. 510 // the y plane, but only half of it to the u and v planes.
589 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, 511 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420,
590 inst->simulcastStream[stream_idx].width, 512 inst->simulcastStream[stream_idx].width,
591 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); 513 inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
592 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); 514 stream_bitrate_kbps = simulcast_state_.AllocatedRate(stream_idx);
593 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; 515 configurations_[i].rc_target_bitrate = stream_bitrate_kbps;
594 temporal_layers_[stream_idx]->ConfigureBitrates( 516 temporal_layers_[stream_idx]->ConfigureBitrates(
595 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, 517 stream_bitrate_kbps, inst->maxBitrate, inst->maxFramerate,
596 &configurations_[i]); 518 &configurations_[i]);
597 } 519 }
598 } 520 }
599 521
600 rps_.Init(); 522 rps_.Init();
601 // QP thresholds are chosen to be high enough to be hit in practice when 523 // QP thresholds are chosen to be high enough to be hit in practice when
602 // quality is good, but also low enough to not cause a flip-flop behavior 524 // quality is good, but also low enough to not cause a flip-flop behavior
603 // (e.g. going up in resolution shouldn't give so bad quality that we should 525 // (e.g. going up in resolution shouldn't give so bad quality that we should
604 // go back down). 526 // go back down).
605 const int kLowQpThreshold = 29; 527 const int kLowQpThreshold = 29;
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
780 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; 702 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
781 for (size_t i = 0; i < encoders_.size(); ++i) { 703 for (size_t i = 0; i < encoders_.size(); ++i) {
782 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp()); 704 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp());
783 if (ret < 0) { 705 if (ret < 0) {
784 // Drop this frame. 706 // Drop this frame.
785 return WEBRTC_VIDEO_CODEC_OK; 707 return WEBRTC_VIDEO_CODEC_OK;
786 } 708 }
787 flags[i] = ret; 709 flags[i] = ret;
788 } 710 }
789 bool send_key_frame = false; 711 bool send_key_frame = false;
790 for (size_t i = 0; i < key_frame_request_.size() && i < send_stream_.size(); 712 size_t num_entries = std::min(frame_types ? frame_types->size() : 0,
791 ++i) { 713 simulcast_state_.NumStreams());
792 if (key_frame_request_[i] && send_stream_[i]) { 714 for (size_t i = 0; i < num_entries; ++i) {
793 send_key_frame = true; 715 send_key_frame |= simulcast_state_.GetAndResetKeyFrameRequest(i) ||
794 break; 716 (frame_types && (*frame_types)[i] == kVideoFrameKey);
795 }
796 } 717 }
797 if (!send_key_frame && frame_types) { 718
798 for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
799 ++i) {
800 if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
801 send_key_frame = true;
802 break;
803 }
804 }
805 }
806 // The flag modification below (due to forced key frame, RPS, etc.,) for now 719 // The flag modification below (due to forced key frame, RPS, etc.,) for now
807 // will be the same for all encoders/spatial layers. 720 // will be the same for all encoders/spatial layers.
808 // TODO(marpan/holmer): Allow for key frame request to be set per encoder. 721 // TODO(marpan/holmer): Allow for key frame request to be set per encoder.
809 bool only_predict_from_key_frame = false; 722 bool only_predict_from_key_frame = false;
810 if (send_key_frame) { 723 if (send_key_frame) {
811 // Adapt the size of the key frame when in screenshare with 1 temporal 724 // Adapt the size of the key frame when in screenshare with 1 temporal
812 // layer. 725 // layer.
813 if (encoders_.size() == 1 && codec_.mode == kScreensharing && 726 if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
814 codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) { 727 codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
815 const uint32_t forceKeyFrameIntraTh = 100; 728 const uint32_t forceKeyFrameIntraTh = 100;
816 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 729 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
817 forceKeyFrameIntraTh); 730 forceKeyFrameIntraTh);
818 } 731 }
819 // Key frame request from caller. 732 // Key frame request from caller.
820 // Will update both golden and alt-ref. 733 // Will update both golden and alt-ref.
821 for (size_t i = 0; i < encoders_.size(); ++i) { 734 for (size_t i = 0; i < encoders_.size(); ++i) {
822 flags[i] = VPX_EFLAG_FORCE_KF; 735 flags[i] = VPX_EFLAG_FORCE_KF;
823 } 736 }
824 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
825 } else if (codec_specific_info && 737 } else if (codec_specific_info &&
826 codec_specific_info->codecType == kVideoCodecVP8) { 738 codec_specific_info->codecType == kVideoCodecVP8) {
827 if (feedback_mode_) { 739 if (feedback_mode_) {
828 // Handle RPSI and SLI messages and set up the appropriate encode flags. 740 // Handle RPSI and SLI messages and set up the appropriate encode flags.
829 bool sendRefresh = false; 741 bool sendRefresh = false;
830 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { 742 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
831 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); 743 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
832 } 744 }
833 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { 745 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
834 sendRefresh = rps_.ReceivedSLI(input_image.timestamp()); 746 sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
954 bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) || 866 bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
955 only_predicting_from_key_frame; 867 only_predicting_from_key_frame;
956 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, 868 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
957 vp8Info, timestamp); 869 vp8Info, timestamp);
958 // Prepare next. 870 // Prepare next.
959 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; 871 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
960 } 872 }
961 873
962 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, 874 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
963 bool only_predicting_from_key_frame) { 875 bool only_predicting_from_key_frame) {
964 int bw_resolutions_disabled = 876 int bw_resolutions_disabled = (!encoders_.empty())
965 (encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1; 877 ? (simulcast_state_.NumStreams() -
878 simulcast_state_.NumSendingStreams())
879 : -1;
966 880
967 int stream_idx = static_cast<int>(encoders_.size()) - 1; 881 int stream_idx = static_cast<int>(encoders_.size()) - 1;
968 int result = WEBRTC_VIDEO_CODEC_OK; 882 int result = WEBRTC_VIDEO_CODEC_OK;
969 for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); 883 for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
970 ++encoder_idx, --stream_idx) { 884 ++encoder_idx, --stream_idx) {
971 vpx_codec_iter_t iter = NULL; 885 vpx_codec_iter_t iter = NULL;
972 int part_idx = 0; 886 int part_idx = 0;
973 encoded_images_[encoder_idx]._length = 0; 887 encoded_images_[encoder_idx]._length = 0;
974 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; 888 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
975 RTPFragmentationHeader frag_info; 889 RTPFragmentationHeader frag_info;
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1021 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp(); 935 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
1022 encoded_images_[encoder_idx].capture_time_ms_ = 936 encoded_images_[encoder_idx].capture_time_ms_ =
1023 input_image.render_time_ms(); 937 input_image.render_time_ms();
1024 encoded_images_[encoder_idx].rotation_ = input_image.rotation(); 938 encoded_images_[encoder_idx].rotation_ = input_image.rotation();
1025 939
1026 int qp = -1; 940 int qp = -1;
1027 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp); 941 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
1028 temporal_layers_[stream_idx]->FrameEncoded( 942 temporal_layers_[stream_idx]->FrameEncoded(
1029 encoded_images_[encoder_idx]._length, 943 encoded_images_[encoder_idx]._length,
1030 encoded_images_[encoder_idx]._timeStamp, qp); 944 encoded_images_[encoder_idx]._timeStamp, qp);
1031 if (send_stream_[stream_idx]) { 945 if (simulcast_state_.IsSending(stream_idx)) {
1032 if (encoded_images_[encoder_idx]._length > 0) { 946 if (encoded_images_[encoder_idx]._length > 0) {
1033 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx, 947 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
1034 encoded_images_[encoder_idx]._length); 948 encoded_images_[encoder_idx]._length);
1035 encoded_images_[encoder_idx]._encodedHeight = 949 encoded_images_[encoder_idx]._encodedHeight =
1036 codec_.simulcastStream[stream_idx].height; 950 codec_.simulcastStream[stream_idx].height;
1037 encoded_images_[encoder_idx]._encodedWidth = 951 encoded_images_[encoder_idx]._encodedWidth =
1038 codec_.simulcastStream[stream_idx].width; 952 codec_.simulcastStream[stream_idx].width;
1039 encoded_images_[encoder_idx] 953 encoded_images_[encoder_idx]
1040 .adapt_reason_.quality_resolution_downscales = 954 .adapt_reason_.quality_resolution_downscales =
1041 quality_scaler_enabled_ ? quality_scaler_.downscale_shift() : -1; 955 quality_scaler_enabled_ ? quality_scaler_.downscale_shift() : -1;
1042 // Report once per frame (lowest stream always sent). 956 // Report once per frame (lowest stream always sent).
1043 encoded_images_[encoder_idx].adapt_reason_.bw_resolutions_disabled = 957 encoded_images_[encoder_idx].adapt_reason_.bw_resolutions_disabled =
1044 (stream_idx == 0) ? bw_resolutions_disabled : -1; 958 (stream_idx == 0) ? bw_resolutions_disabled : -1;
1045 int qp_128 = -1; 959 int qp_128 = -1;
1046 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER, 960 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER,
1047 &qp_128); 961 &qp_128);
1048 encoded_images_[encoder_idx].qp_ = qp_128; 962 encoded_images_[encoder_idx].qp_ = qp_128;
1049 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx], 963 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx],
1050 &codec_specific, &frag_info); 964 &codec_specific, &frag_info);
1051 } else if (codec_.mode == kScreensharing) { 965 } else if (codec_.mode == kScreensharing) {
1052 result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT; 966 result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
1053 } 967 }
1054 } 968 }
1055 } 969 }
1056 if (encoders_.size() == 1 && send_stream_[0]) { 970 if (encoders_.size() == 1 && simulcast_state_.IsSending(0)) {
1057 if (encoded_images_[0]._length > 0) { 971 if (encoded_images_[0]._length > 0) {
1058 int qp_128; 972 int qp_128;
1059 vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER, &qp_128); 973 vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER, &qp_128);
1060 quality_scaler_.ReportQP(qp_128); 974 quality_scaler_.ReportQP(qp_128);
1061 } else { 975 } else {
1062 quality_scaler_.ReportDroppedFrame(); 976 quality_scaler_.ReportDroppedFrame();
1063 } 977 }
1064 } 978 }
1065 return result; 979 return result;
1066 } 980 }
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after
1411 return -1; 1325 return -1;
1412 } 1326 }
1413 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != 1327 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
1414 VPX_CODEC_OK) { 1328 VPX_CODEC_OK) {
1415 return -1; 1329 return -1;
1416 } 1330 }
1417 return 0; 1331 return 0;
1418 } 1332 }
1419 1333
1420 } // namespace webrtc 1334 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698