Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(788)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc

Issue 1913073002: Extract common simulcast logic from VP8 wrapper and simulcast adapter (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Bug fix Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 10 matching lines...) Expand all
21 21
22 #include "webrtc/base/checks.h" 22 #include "webrtc/base/checks.h"
23 #include "webrtc/base/trace_event.h" 23 #include "webrtc/base/trace_event.h"
24 #include "webrtc/common_types.h" 24 #include "webrtc/common_types.h"
25 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" 25 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
26 #include "webrtc/modules/include/module_common_types.h" 26 #include "webrtc/modules/include/module_common_types.h"
27 #include "webrtc/modules/video_coding/include/video_codec_interface.h" 27 #include "webrtc/modules/video_coding/include/video_codec_interface.h"
28 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h" 28 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
29 #include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h" 29 #include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
30 #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h" 30 #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
31 #include "webrtc/modules/video_coding/utility/simulcast_state.h"
31 #include "webrtc/system_wrappers/include/clock.h" 32 #include "webrtc/system_wrappers/include/clock.h"
32 #include "webrtc/system_wrappers/include/tick_util.h" 33 #include "webrtc/system_wrappers/include/tick_util.h"
33 34
34 namespace webrtc { 35 namespace webrtc {
35 namespace { 36 namespace {
36 37
37 enum { kVp8ErrorPropagationTh = 30 }; 38 enum { kVp8ErrorPropagationTh = 30 };
38 enum { kVp832ByteAlign = 32 }; 39 enum { kVp832ByteAlign = 32 };
39 40
40 // VP8 denoiser states. 41 // VP8 denoiser states.
(...skipping 11 matching lines...) Expand all
52 int GCD(int a, int b) { 53 int GCD(int a, int b) {
53 int c = a % b; 54 int c = a % b;
54 while (c != 0) { 55 while (c != 0) {
55 a = b; 56 a = b;
56 b = c; 57 b = c;
57 c = a % b; 58 c = a % b;
58 } 59 }
59 return b; 60 return b;
60 } 61 }
61 62
62 std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
63 int bitrate_to_allocate_kbps) {
64 if (codec.numberOfSimulcastStreams <= 1) {
65 return std::vector<int>(1, bitrate_to_allocate_kbps);
66 }
67
68 std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
69 // Allocate min -> target bitrates as long as we have bitrate to spend.
70 size_t last_active_stream = 0;
71 for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
72 bitrate_to_allocate_kbps >=
73 static_cast<int>(codec.simulcastStream[i].minBitrate);
74 ++i) {
75 last_active_stream = i;
76 int allocated_bitrate_kbps =
77 std::min(static_cast<int>(codec.simulcastStream[i].targetBitrate),
78 bitrate_to_allocate_kbps);
79 bitrates_kbps[i] = allocated_bitrate_kbps;
80 bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
81 }
82
83 // Spend additional bits on the highest-quality active layer, up to max
84 // bitrate.
85 // TODO(pbos): Consider spending additional bits on last_active_stream-1 down
86 // to 0 and not just the top layer when we have additional bitrate to spend.
87 int allocated_bitrate_kbps = std::min(
88 static_cast<int>(codec.simulcastStream[last_active_stream].maxBitrate -
89 bitrates_kbps[last_active_stream]),
90 bitrate_to_allocate_kbps);
91 bitrates_kbps[last_active_stream] += allocated_bitrate_kbps;
92 bitrate_to_allocate_kbps -= allocated_bitrate_kbps;
93
94 // Make sure we can always send something. Suspending below min bitrate is
95 // controlled outside the codec implementation and is not overriden by this.
96 if (bitrates_kbps[0] < static_cast<int>(codec.simulcastStream[0].minBitrate))
97 bitrates_kbps[0] = static_cast<int>(codec.simulcastStream[0].minBitrate);
98
99 return bitrates_kbps;
100 }
101
102 uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec) {
103 uint32_t bitrate_sum = 0;
104 for (int i = 0; i < streams; ++i) {
105 bitrate_sum += codec.simulcastStream[i].maxBitrate;
106 }
107 return bitrate_sum;
108 }
109
110 int NumberOfStreams(const VideoCodec& codec) {
111 int streams =
112 codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
113 uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
114 if (simulcast_max_bitrate == 0) {
115 streams = 1;
116 }
117 return streams;
118 }
119
120 bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) { 63 bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
121 if (codec.width != codec.simulcastStream[num_streams - 1].width || 64 if (codec.width != codec.simulcastStream[num_streams - 1].width ||
122 codec.height != codec.simulcastStream[num_streams - 1].height) { 65 codec.height != codec.simulcastStream[num_streams - 1].height) {
123 return false; 66 return false;
124 } 67 }
125 for (int i = 0; i < num_streams; ++i) { 68 for (int i = 0; i < num_streams; ++i) {
126 if (codec.width * codec.simulcastStream[i].height != 69 if (codec.width * codec.simulcastStream[i].height !=
127 codec.height * codec.simulcastStream[i].width) { 70 codec.height * codec.simulcastStream[i].width) {
128 return false; 71 return false;
129 } 72 }
130 } 73 }
131 return true; 74 return true;
132 } 75 }
133 76
134 int NumStreamsDisabled(const std::vector<bool>& streams) {
135 int num_disabled = 0;
136 for (bool stream : streams) {
137 if (!stream)
138 ++num_disabled;
139 }
140 return num_disabled;
141 }
142 } // namespace 77 } // namespace
143 78
144 VP8Encoder* VP8Encoder::Create() { 79 VP8Encoder* VP8Encoder::Create() {
145 return new VP8EncoderImpl(); 80 return new VP8EncoderImpl();
146 } 81 }
147 82
148 VP8Decoder* VP8Decoder::Create() { 83 VP8Decoder* VP8Decoder::Create() {
149 return new VP8DecoderImpl(); 84 return new VP8DecoderImpl();
150 } 85 }
151 86
152 const float kTl1MaxTimeToDropFrames = 20.0f;
153
154 VP8EncoderImpl::VP8EncoderImpl() 87 VP8EncoderImpl::VP8EncoderImpl()
155 : encoded_complete_callback_(NULL), 88 : encoded_complete_callback_(NULL),
156 inited_(false), 89 inited_(false),
157 timestamp_(0), 90 timestamp_(0),
158 feedback_mode_(false), 91 feedback_mode_(false),
159 qp_max_(56), // Setting for max quantizer. 92 qp_max_(56), // Setting for max quantizer.
160 cpu_speed_default_(-6), 93 cpu_speed_default_(-6),
161 rc_max_intra_target_(0), 94 rc_max_intra_target_(0),
162 token_partitions_(VP8_ONE_TOKENPARTITION), 95 token_partitions_(VP8_ONE_TOKENPARTITION),
163 down_scale_requested_(false), 96 down_scale_requested_(false),
164 down_scale_bitrate_(0), 97 down_scale_bitrate_(0),
165 tl0_frame_dropper_(),
166 tl1_frame_dropper_(kTl1MaxTimeToDropFrames),
167 key_frame_request_(kMaxSimulcastStreams, false),
168 quality_scaler_enabled_(false) { 98 quality_scaler_enabled_(false) {
169 uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp()); 99 uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
170 srand(seed); 100 srand(seed);
171 101
172 picture_id_.reserve(kMaxSimulcastStreams); 102 picture_id_.reserve(kMaxSimulcastStreams);
173 last_key_frame_picture_id_.reserve(kMaxSimulcastStreams); 103 last_key_frame_picture_id_.reserve(kMaxSimulcastStreams);
174 temporal_layers_.reserve(kMaxSimulcastStreams); 104 temporal_layers_.reserve(kMaxSimulcastStreams);
175 raw_images_.reserve(kMaxSimulcastStreams); 105 raw_images_.reserve(kMaxSimulcastStreams);
176 encoded_images_.reserve(kMaxSimulcastStreams); 106 encoded_images_.reserve(kMaxSimulcastStreams);
177 send_stream_.reserve(kMaxSimulcastStreams);
178 cpu_speed_.assign(kMaxSimulcastStreams, -6); // Set default to -6. 107 cpu_speed_.assign(kMaxSimulcastStreams, -6); // Set default to -6.
179 encoders_.reserve(kMaxSimulcastStreams); 108 encoders_.reserve(kMaxSimulcastStreams);
180 configurations_.reserve(kMaxSimulcastStreams); 109 configurations_.reserve(kMaxSimulcastStreams);
181 downsampling_factors_.reserve(kMaxSimulcastStreams); 110 downsampling_factors_.reserve(kMaxSimulcastStreams);
182 } 111 }
183 112
184 VP8EncoderImpl::~VP8EncoderImpl() { 113 VP8EncoderImpl::~VP8EncoderImpl() {
185 Release(); 114 Release();
186 } 115 }
187 116
188 int VP8EncoderImpl::Release() { 117 int VP8EncoderImpl::Release() {
189 int ret_val = WEBRTC_VIDEO_CODEC_OK; 118 int ret_val = WEBRTC_VIDEO_CODEC_OK;
190 119
191 while (!encoded_images_.empty()) { 120 while (!encoded_images_.empty()) {
192 EncodedImage& image = encoded_images_.back(); 121 EncodedImage& image = encoded_images_.back();
193 delete[] image._buffer; 122 delete[] image._buffer;
194 encoded_images_.pop_back(); 123 encoded_images_.pop_back();
195 } 124 }
196 while (!encoders_.empty()) { 125 while (!encoders_.empty()) {
197 vpx_codec_ctx_t& encoder = encoders_.back(); 126 vpx_codec_ctx_t& encoder = encoders_.back();
198 if (vpx_codec_destroy(&encoder)) { 127 if (vpx_codec_destroy(&encoder)) {
199 ret_val = WEBRTC_VIDEO_CODEC_MEMORY; 128 ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
200 } 129 }
201 encoders_.pop_back(); 130 encoders_.pop_back();
202 } 131 }
203 configurations_.clear(); 132 configurations_.clear();
204 send_stream_.clear(); 133 simulcast_state_.reset();
205 cpu_speed_.clear(); 134 cpu_speed_.clear();
206 while (!raw_images_.empty()) { 135 while (!raw_images_.empty()) {
207 vpx_img_free(&raw_images_.back()); 136 vpx_img_free(&raw_images_.back());
208 raw_images_.pop_back(); 137 raw_images_.pop_back();
209 } 138 }
210 while (!temporal_layers_.empty()) { 139 while (!temporal_layers_.empty()) {
211 delete temporal_layers_.back(); 140 delete temporal_layers_.back();
212 temporal_layers_.pop_back(); 141 temporal_layers_.pop_back();
213 } 142 }
214 inited_ = false; 143 inited_ = false;
(...skipping 28 matching lines...) Expand all
243 // Calculate a rough limit for when to trigger a potental down scale. 172 // Calculate a rough limit for when to trigger a potental down scale.
244 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000; 173 uint32_t k_pixels_per_frame = codec_.width * codec_.height / 1000;
245 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work 174 // TODO(pwestin): we currently lack CAMA, this is a temporary fix to work
246 // around the current limitations. 175 // around the current limitations.
247 // Only trigger keyframes if we are allowed to scale down. 176 // Only trigger keyframes if we are allowed to scale down.
248 if (configurations_[0].rc_resize_allowed) { 177 if (configurations_[0].rc_resize_allowed) {
249 if (!down_scale_requested_) { 178 if (!down_scale_requested_) {
250 if (k_pixels_per_frame > new_bitrate_kbit) { 179 if (k_pixels_per_frame > new_bitrate_kbit) {
251 down_scale_requested_ = true; 180 down_scale_requested_ = true;
252 down_scale_bitrate_ = new_bitrate_kbit; 181 down_scale_bitrate_ = new_bitrate_kbit;
253 key_frame_request_[0] = true; 182 simulcast_state_->RequestKeyFrame(0);
254 } 183 }
255 } else { 184 } else {
256 if (new_bitrate_kbit > (2 * down_scale_bitrate_) || 185 if (new_bitrate_kbit > (2 * down_scale_bitrate_) ||
257 new_bitrate_kbit < (down_scale_bitrate_ / 2)) { 186 new_bitrate_kbit < (down_scale_bitrate_ / 2)) {
258 down_scale_requested_ = false; 187 down_scale_requested_ = false;
259 } 188 }
260 } 189 }
261 } 190 }
262 } else { 191 } else {
263 // If we have more than 1 stream, reduce the qp_max for the low resolution 192 // If we have more than 1 stream, reduce the qp_max for the low resolution
264 // stream if frame rate is not too low. The trade-off with lower qp_max is 193 // stream if frame rate is not too low. The trade-off with lower qp_max is
265 // possibly more dropped frames, so we only do this if the frame rate is 194 // possibly more dropped frames, so we only do this if the frame rate is
266 // above some threshold (base temporal layer is down to 1/4 for 3 layers). 195 // above some threshold (base temporal layer is down to 1/4 for 3 layers).
267 // We may want to condition this on bitrate later. 196 // We may want to condition this on bitrate later.
268 if (new_framerate > 20) { 197 if (new_framerate > 20) {
269 configurations_[encoders_.size() - 1].rc_max_quantizer = 45; 198 configurations_[encoders_.size() - 1].rc_max_quantizer = 45;
270 } else { 199 } else {
271 // Go back to default value set in InitEncode. 200 // Go back to default value set in InitEncode.
272 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_; 201 configurations_[encoders_.size() - 1].rc_max_quantizer = qp_max_;
273 } 202 }
274 } 203 }
275 204
276 std::vector<int> stream_bitrates = 205 simulcast_state_->AllocateBitrate(new_bitrate_kbit);
277 GetStreamBitratesKbps(codec_, new_bitrate_kbit); 206 for (const SimulcastState::Stream& stream : *simulcast_state_) {
278 size_t stream_idx = encoders_.size() - 1; 207 unsigned int target_bitrate = stream.allocated_rate_kbps;
279 for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
280 if (encoders_.size() > 1)
281 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
282
283 unsigned int target_bitrate = stream_bitrates[stream_idx];
284 unsigned int max_bitrate = codec_.maxBitrate; 208 unsigned int max_bitrate = codec_.maxBitrate;
285 int framerate = new_framerate;
286 // TODO(holmer): This is a temporary hack for screensharing, where we 209 // TODO(holmer): This is a temporary hack for screensharing, where we
287 // interpret the startBitrate as the encoder target bitrate. This is 210 // interpret the startBitrate as the encoder target bitrate. This is
288 // to allow for a different max bitrate, so if the codec can't meet 211 // to allow for a different max bitrate, so if the codec can't meet
289 // the target we still allow it to overshoot up to the max before dropping 212 // the target we still allow it to overshoot up to the max before dropping
290 // frames. This hack should be improved. 213 // frames. This hack should be improved.
291 if (codec_.targetBitrate > 0 && 214 if (codec_.targetBitrate > 0 && simulcast_state_->NumStreams() == 1 &&
292 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 || 215 (codec_.codecSpecific.VP8.numberOfTemporalLayers == 2 ||
293 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) { 216 codec_.simulcastStream[0].numberOfTemporalLayers == 2)) {
294 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate); 217 int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate);
295 max_bitrate = std::min(codec_.maxBitrate, target_bitrate); 218 max_bitrate = std::min(codec_.maxBitrate, target_bitrate);
296 target_bitrate = tl0_bitrate; 219 target_bitrate = tl0_bitrate;
297 } 220 }
221 int i = simulcast_state_->NumStreams() - stream.idx - 1;
298 configurations_[i].rc_target_bitrate = target_bitrate; 222 configurations_[i].rc_target_bitrate = target_bitrate;
299 temporal_layers_[stream_idx]->ConfigureBitrates( 223 temporal_layers_[stream.idx]->ConfigureBitrates(
300 target_bitrate, max_bitrate, framerate, &configurations_[i]); 224 target_bitrate, max_bitrate, new_framerate, &configurations_[i]);
301 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) { 225 if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
302 return WEBRTC_VIDEO_CODEC_ERROR; 226 return WEBRTC_VIDEO_CODEC_ERROR;
303 } 227 }
304 } 228 }
305 quality_scaler_.ReportFramerate(new_framerate); 229 quality_scaler_.ReportFramerate(new_framerate);
306 return WEBRTC_VIDEO_CODEC_OK; 230 return WEBRTC_VIDEO_CODEC_OK;
307 } 231 }
308 232
309 const char* VP8EncoderImpl::ImplementationName() const { 233 const char* VP8EncoderImpl::ImplementationName() const {
310 return "libvpx"; 234 return "libvpx";
311 } 235 }
312 236
313 void VP8EncoderImpl::SetStreamState(bool send_stream,
314 int stream_idx) {
315 if (send_stream && !send_stream_[stream_idx]) {
316 // Need a key frame if we have not sent this stream before.
317 key_frame_request_[stream_idx] = true;
318 }
319 send_stream_[stream_idx] = send_stream;
320 }
321
322 void VP8EncoderImpl::SetupTemporalLayers(int num_streams, 237 void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
323 int num_temporal_layers, 238 int num_temporal_layers,
324 const VideoCodec& codec) { 239 const VideoCodec& codec) {
325 TemporalLayersFactory default_factory; 240 TemporalLayersFactory default_factory;
326 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory; 241 const TemporalLayersFactory* tl_factory = codec.codecSpecific.VP8.tl_factory;
327 if (!tl_factory) 242 if (!tl_factory)
328 tl_factory = &default_factory; 243 tl_factory = &default_factory;
329 if (num_streams == 1) { 244 if (num_streams == 1) {
330 if (codec.mode == kScreensharing) { 245 if (codec.mode == kScreensharing) {
331 // Special mode when screensharing on a single stream. 246 // Special mode when screensharing on a single stream.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
371 } 286 }
372 if (inst->codecSpecific.VP8.automaticResizeOn && 287 if (inst->codecSpecific.VP8.automaticResizeOn &&
373 inst->numberOfSimulcastStreams > 1) { 288 inst->numberOfSimulcastStreams > 1) {
374 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 289 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
375 } 290 }
376 int retVal = Release(); 291 int retVal = Release();
377 if (retVal < 0) { 292 if (retVal < 0) {
378 return retVal; 293 return retVal;
379 } 294 }
380 295
381 int number_of_streams = NumberOfStreams(*inst); 296 std::unique_ptr<SimulcastState> new_simulcast_state(
297 new SimulcastState(*inst));
298
299 int number_of_streams = new_simulcast_state->NumStreams();
382 bool doing_simulcast = (number_of_streams > 1); 300 bool doing_simulcast = (number_of_streams > 1);
383 301
384 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) { 302 if (doing_simulcast && !ValidSimulcastResolutions(*inst, number_of_streams)) {
385 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; 303 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
386 } 304 }
387 305
388 int num_temporal_layers = 306 int num_temporal_layers =
389 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers 307 doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
390 : inst->codecSpecific.VP8.numberOfTemporalLayers; 308 : inst->codecSpecific.VP8.numberOfTemporalLayers;
391 309
392 // TODO(andresp): crash if num temporal layers is bananas. 310 // TODO(andresp): crash if num temporal layers is bananas.
393 if (num_temporal_layers < 1) 311 if (num_temporal_layers < 1)
394 num_temporal_layers = 1; 312 num_temporal_layers = 1;
395 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst); 313 SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
396 314
397 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn; 315 feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
398 316
399 timestamp_ = 0; 317 timestamp_ = 0;
400 codec_ = *inst; 318 codec_ = *inst;
319 simulcast_state_ = std::move(new_simulcast_state);
401 320
402 // Code expects simulcastStream resolutions to be correct, make sure they are 321 // Code expects simulcastStream resolutions to be correct, make sure they are
403 // filled even when there are no simulcast layers. 322 // filled even when there are no simulcast layers.
404 if (codec_.numberOfSimulcastStreams == 0) { 323 if (codec_.numberOfSimulcastStreams == 0) {
405 codec_.simulcastStream[0].width = codec_.width; 324 codec_.simulcastStream[0].width = codec_.width;
406 codec_.simulcastStream[0].height = codec_.height; 325 codec_.simulcastStream[0].height = codec_.height;
407 } 326 }
408 327
409 picture_id_.resize(number_of_streams); 328 picture_id_.resize(number_of_streams);
410 last_key_frame_picture_id_.resize(number_of_streams); 329 last_key_frame_picture_id_.resize(number_of_streams);
411 encoded_images_.resize(number_of_streams); 330 encoded_images_.resize(number_of_streams);
412 encoders_.resize(number_of_streams); 331 encoders_.resize(number_of_streams);
413 configurations_.resize(number_of_streams); 332 configurations_.resize(number_of_streams);
414 downsampling_factors_.resize(number_of_streams); 333 downsampling_factors_.resize(number_of_streams);
415 raw_images_.resize(number_of_streams); 334 raw_images_.resize(number_of_streams);
416 send_stream_.resize(number_of_streams);
417 send_stream_[0] = true; // For non-simulcast case.
418 cpu_speed_.resize(number_of_streams); 335 cpu_speed_.resize(number_of_streams);
419 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
420 336
421 int idx = number_of_streams - 1; 337 int idx = number_of_streams - 1;
422 for (int i = 0; i < (number_of_streams - 1); ++i, --idx) { 338 for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
423 int gcd = GCD(inst->simulcastStream[idx].width, 339 int gcd = GCD(inst->simulcastStream[idx].width,
424 inst->simulcastStream[idx - 1].width); 340 inst->simulcastStream[idx - 1].width);
425 downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd; 341 downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
426 downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd; 342 downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
427 send_stream_[i] = false; 343 simulcast_state_->SetSending(i, false);
428 } 344 }
429 if (number_of_streams > 1) { 345 if (number_of_streams > 1) {
430 send_stream_[number_of_streams - 1] = false; 346 simulcast_state_->SetSending(number_of_streams - 1, false);
431 downsampling_factors_[number_of_streams - 1].num = 1; 347 downsampling_factors_[number_of_streams - 1].num = 1;
432 downsampling_factors_[number_of_streams - 1].den = 1; 348 downsampling_factors_[number_of_streams - 1].den = 1;
433 } 349 }
434 for (int i = 0; i < number_of_streams; ++i) { 350 for (int i = 0; i < number_of_streams; ++i) {
435 // Random start, 16 bits is enough. 351 // Random start, 16 bits is enough.
436 picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT 352 picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
437 last_key_frame_picture_id_[i] = -1; 353 last_key_frame_picture_id_[i] = -1;
438 // allocate memory for encoded image 354 // allocate memory for encoded image
439 if (encoded_images_[i]._buffer != NULL) { 355 if (encoded_images_[i]._buffer != NULL) {
440 delete[] encoded_images_[i]._buffer; 356 delete[] encoded_images_[i]._buffer;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
555 // is meaningless (no memory allocation is done here). 471 // is meaningless (no memory allocation is done here).
556 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1, 472 vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
557 NULL); 473 NULL);
558 474
559 if (encoders_.size() == 1) { 475 if (encoders_.size() == 1) {
560 configurations_[0].rc_target_bitrate = inst->startBitrate; 476 configurations_[0].rc_target_bitrate = inst->startBitrate;
561 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, 477 temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
562 inst->maxFramerate, 478 inst->maxFramerate,
563 &configurations_[0]); 479 &configurations_[0]);
564 } else { 480 } else {
481 simulcast_state_->AllocateBitrate(inst->startBitrate);
565 // Note the order we use is different from webm, we have lowest resolution 482 // Note the order we use is different from webm, we have lowest resolution
566 // at position 0 and they have highest resolution at position 0. 483 // at position 0 and they have highest resolution at position 0.
567 int stream_idx = encoders_.size() - 1; 484 int stream_idx = encoders_.size() - 1;
568 std::vector<int> stream_bitrates = 485 int stream_bitrate_kbps = simulcast_state_->AllocatedRate(stream_idx);
569 GetStreamBitratesKbps(codec_, inst->startBitrate); 486 configurations_[0].rc_target_bitrate = stream_bitrate_kbps;
570 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
571 configurations_[0].rc_target_bitrate = stream_bitrates[stream_idx];
572 temporal_layers_[stream_idx]->ConfigureBitrates( 487 temporal_layers_[stream_idx]->ConfigureBitrates(
573 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, 488 stream_bitrate_kbps, inst->maxBitrate, inst->maxFramerate,
574 &configurations_[0]); 489 &configurations_[0]);
575 --stream_idx; 490 --stream_idx;
576 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) { 491 for (size_t i = 1; i < encoders_.size(); ++i, --stream_idx) {
577 memcpy(&configurations_[i], &configurations_[0], 492 memcpy(&configurations_[i], &configurations_[0],
578 sizeof(configurations_[0])); 493 sizeof(configurations_[0]));
579 494
580 configurations_[i].g_w = inst->simulcastStream[stream_idx].width; 495 configurations_[i].g_w = inst->simulcastStream[stream_idx].width;
581 configurations_[i].g_h = inst->simulcastStream[stream_idx].height; 496 configurations_[i].g_h = inst->simulcastStream[stream_idx].height;
582 497
583 // Use 1 thread for lower resolutions. 498 // Use 1 thread for lower resolutions.
584 configurations_[i].g_threads = 1; 499 configurations_[i].g_threads = 1;
585 500
586 // Setting alignment to 32 - as that ensures at least 16 for all 501 // Setting alignment to 32 - as that ensures at least 16 for all
587 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for 502 // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
588 // the y plane, but only half of it to the u and v planes. 503 // the y plane, but only half of it to the u and v planes.
589 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, 504 vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420,
590 inst->simulcastStream[stream_idx].width, 505 inst->simulcastStream[stream_idx].width,
591 inst->simulcastStream[stream_idx].height, kVp832ByteAlign); 506 inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
592 SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); 507 stream_bitrate_kbps = simulcast_state_->AllocatedRate(stream_idx);
593 configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx]; 508 configurations_[i].rc_target_bitrate = stream_bitrate_kbps;
594 temporal_layers_[stream_idx]->ConfigureBitrates( 509 temporal_layers_[stream_idx]->ConfigureBitrates(
595 stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate, 510 stream_bitrate_kbps, inst->maxBitrate, inst->maxFramerate,
596 &configurations_[i]); 511 &configurations_[i]);
597 } 512 }
598 } 513 }
599 514
600 rps_.Init(); 515 rps_.Init();
601 // Disable both high-QP limits and framedropping. Both are handled by libvpx 516 // Disable both high-QP limits and framedropping. Both are handled by libvpx
602 // internally. 517 // internally.
603 // QP thresholds are chosen to be high enough to be hit in practice when 518 // QP thresholds are chosen to be high enough to be hit in practice when
604 // quality is good, but also low enough to not cause a flip-flop behavior 519 // quality is good, but also low enough to not cause a flip-flop behavior
605 // (e.g. going up in resolution shouldn't give so bad quality that we should 520 // (e.g. going up in resolution shouldn't give so bad quality that we should
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
782 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams]; 697 vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
783 for (size_t i = 0; i < encoders_.size(); ++i) { 698 for (size_t i = 0; i < encoders_.size(); ++i) {
784 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp()); 699 int ret = temporal_layers_[i]->EncodeFlags(input_image.timestamp());
785 if (ret < 0) { 700 if (ret < 0) {
786 // Drop this frame. 701 // Drop this frame.
787 return WEBRTC_VIDEO_CODEC_OK; 702 return WEBRTC_VIDEO_CODEC_OK;
788 } 703 }
789 flags[i] = ret; 704 flags[i] = ret;
790 } 705 }
791 bool send_key_frame = false; 706 bool send_key_frame = false;
792 for (size_t i = 0; i < key_frame_request_.size() && i < send_stream_.size(); 707 size_t num_entries = std::min(frame_types ? frame_types->size() : 0,
793 ++i) { 708 simulcast_state_->NumStreams());
794 if (key_frame_request_[i] && send_stream_[i]) { 709 for (size_t i = 0; i < num_entries; ++i) {
795 send_key_frame = true; 710 send_key_frame |= simulcast_state_->GetAndResetKeyFrameRequest(i) ||
796 break; 711 (frame_types && (*frame_types)[i] == kVideoFrameKey);
797 }
798 } 712 }
799 if (!send_key_frame && frame_types) { 713
800 for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
801 ++i) {
802 if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
803 send_key_frame = true;
804 break;
805 }
806 }
807 }
808 // The flag modification below (due to forced key frame, RPS, etc.,) for now 714 // The flag modification below (due to forced key frame, RPS, etc.,) for now
809 // will be the same for all encoders/spatial layers. 715 // will be the same for all encoders/spatial layers.
810 // TODO(marpan/holmer): Allow for key frame request to be set per encoder. 716 // TODO(marpan/holmer): Allow for key frame request to be set per encoder.
811 bool only_predict_from_key_frame = false; 717 bool only_predict_from_key_frame = false;
812 if (send_key_frame) { 718 if (send_key_frame) {
813 // Adapt the size of the key frame when in screenshare with 1 temporal 719 // Adapt the size of the key frame when in screenshare with 1 temporal
814 // layer. 720 // layer.
815 if (encoders_.size() == 1 && codec_.mode == kScreensharing && 721 if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
816 codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) { 722 codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
817 const uint32_t forceKeyFrameIntraTh = 100; 723 const uint32_t forceKeyFrameIntraTh = 100;
818 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT, 724 vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
819 forceKeyFrameIntraTh); 725 forceKeyFrameIntraTh);
820 } 726 }
821 // Key frame request from caller. 727 // Key frame request from caller.
822 // Will update both golden and alt-ref. 728 // Will update both golden and alt-ref.
823 for (size_t i = 0; i < encoders_.size(); ++i) { 729 for (size_t i = 0; i < encoders_.size(); ++i) {
824 flags[i] = VPX_EFLAG_FORCE_KF; 730 flags[i] = VPX_EFLAG_FORCE_KF;
825 } 731 }
826 std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
827 } else if (codec_specific_info && 732 } else if (codec_specific_info &&
828 codec_specific_info->codecType == kVideoCodecVP8) { 733 codec_specific_info->codecType == kVideoCodecVP8) {
829 if (feedback_mode_) { 734 if (feedback_mode_) {
830 // Handle RPSI and SLI messages and set up the appropriate encode flags. 735 // Handle RPSI and SLI messages and set up the appropriate encode flags.
831 bool sendRefresh = false; 736 bool sendRefresh = false;
832 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) { 737 if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
833 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI); 738 rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
834 } 739 }
835 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) { 740 if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
836 sendRefresh = rps_.ReceivedSLI(input_image.timestamp()); 741 sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 only_predicting_from_key_frame; 862 only_predicting_from_key_frame;
958 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point, 863 temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
959 vp8Info, timestamp); 864 vp8Info, timestamp);
960 // Prepare next. 865 // Prepare next.
961 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF; 866 picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
962 } 867 }
963 868
964 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image, 869 int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
965 bool only_predicting_from_key_frame) { 870 bool only_predicting_from_key_frame) {
966 int bw_resolutions_disabled = 871 int bw_resolutions_disabled =
967 (encoders_.size() > 1) ? NumStreamsDisabled(send_stream_) : -1; 872 (encoders_.size() > 1) ? simulcast_state_->NumSendingStreams() : -1;
pbos-webrtc 2016/04/29 21:23:27 This is not NumStreamsDisabled? Why are tests not
968 873
969 int stream_idx = static_cast<int>(encoders_.size()) - 1; 874 int stream_idx = static_cast<int>(encoders_.size()) - 1;
970 int result = WEBRTC_VIDEO_CODEC_OK; 875 int result = WEBRTC_VIDEO_CODEC_OK;
971 for (size_t encoder_idx = 0; encoder_idx < encoders_.size(); 876 for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
972 ++encoder_idx, --stream_idx) { 877 ++encoder_idx, --stream_idx) {
973 vpx_codec_iter_t iter = NULL; 878 vpx_codec_iter_t iter = NULL;
974 int part_idx = 0; 879 int part_idx = 0;
975 encoded_images_[encoder_idx]._length = 0; 880 encoded_images_[encoder_idx]._length = 0;
976 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta; 881 encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
977 RTPFragmentationHeader frag_info; 882 RTPFragmentationHeader frag_info;
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1023 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp(); 928 encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
1024 encoded_images_[encoder_idx].capture_time_ms_ = 929 encoded_images_[encoder_idx].capture_time_ms_ =
1025 input_image.render_time_ms(); 930 input_image.render_time_ms();
1026 encoded_images_[encoder_idx].rotation_ = input_image.rotation(); 931 encoded_images_[encoder_idx].rotation_ = input_image.rotation();
1027 932
1028 int qp = -1; 933 int qp = -1;
1029 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp); 934 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
1030 temporal_layers_[stream_idx]->FrameEncoded( 935 temporal_layers_[stream_idx]->FrameEncoded(
1031 encoded_images_[encoder_idx]._length, 936 encoded_images_[encoder_idx]._length,
1032 encoded_images_[encoder_idx]._timeStamp, qp); 937 encoded_images_[encoder_idx]._timeStamp, qp);
1033 if (send_stream_[stream_idx]) { 938 if (simulcast_state_->IsSending(stream_idx)) {
1034 if (encoded_images_[encoder_idx]._length > 0) { 939 if (encoded_images_[encoder_idx]._length > 0) {
1035 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx, 940 TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
1036 encoded_images_[encoder_idx]._length); 941 encoded_images_[encoder_idx]._length);
1037 encoded_images_[encoder_idx]._encodedHeight = 942 encoded_images_[encoder_idx]._encodedHeight =
1038 codec_.simulcastStream[stream_idx].height; 943 codec_.simulcastStream[stream_idx].height;
1039 encoded_images_[encoder_idx]._encodedWidth = 944 encoded_images_[encoder_idx]._encodedWidth =
1040 codec_.simulcastStream[stream_idx].width; 945 codec_.simulcastStream[stream_idx].width;
1041 encoded_images_[encoder_idx] 946 encoded_images_[encoder_idx]
1042 .adapt_reason_.quality_resolution_downscales = 947 .adapt_reason_.quality_resolution_downscales =
1043 quality_scaler_enabled_ ? quality_scaler_.downscale_shift() : -1; 948 quality_scaler_enabled_ ? quality_scaler_.downscale_shift() : -1;
1044 // Report once per frame (lowest stream always sent). 949 // Report once per frame (lowest stream always sent).
1045 encoded_images_[encoder_idx].adapt_reason_.bw_resolutions_disabled = 950 encoded_images_[encoder_idx].adapt_reason_.bw_resolutions_disabled =
1046 (stream_idx == 0) ? bw_resolutions_disabled : -1; 951 (stream_idx == 0) ? bw_resolutions_disabled : -1;
1047 int qp_128 = -1; 952 int qp_128 = -1;
1048 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER, 953 vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER,
1049 &qp_128); 954 &qp_128);
1050 encoded_images_[encoder_idx].qp_ = qp_128; 955 encoded_images_[encoder_idx].qp_ = qp_128;
1051 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx], 956 encoded_complete_callback_->Encoded(encoded_images_[encoder_idx],
1052 &codec_specific, &frag_info); 957 &codec_specific, &frag_info);
1053 } else if (codec_.mode == kScreensharing) { 958 } else if (codec_.mode == kScreensharing) {
1054 result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT; 959 result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
1055 } 960 }
1056 } 961 }
1057 } 962 }
1058 if (encoders_.size() == 1 && send_stream_[0]) { 963 if (encoders_.size() == 1 && simulcast_state_->IsSending(0)) {
1059 if (encoded_images_[0]._length > 0) { 964 if (encoded_images_[0]._length > 0) {
1060 int qp_128; 965 int qp_128;
1061 vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER, &qp_128); 966 vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER, &qp_128);
1062 quality_scaler_.ReportQP(qp_128); 967 quality_scaler_.ReportQP(qp_128);
1063 } else { 968 } else {
1064 quality_scaler_.ReportDroppedFrame(); 969 quality_scaler_.ReportDroppedFrame();
1065 } 970 }
1066 } 971 }
1067 return result; 972 return result;
1068 } 973 }
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after
1413 return -1; 1318 return -1;
1414 } 1319 }
1415 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) != 1320 if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
1416 VPX_CODEC_OK) { 1321 VPX_CODEC_OK) {
1417 return -1; 1322 return -1;
1418 } 1323 }
1419 return 0; 1324 return 0;
1420 } 1325 }
1421 1326
1422 } // namespace webrtc 1327 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698