OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 #include "webrtc/modules/video_coding/main/source/jitter_buffer.h" | |
11 | |
12 #include <assert.h> | |
13 | |
14 #include <algorithm> | |
15 #include <utility> | |
16 | |
17 #include "webrtc/base/checks.h" | |
18 #include "webrtc/base/logging.h" | |
19 #include "webrtc/base/trace_event.h" | |
20 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" | |
21 #include "webrtc/modules/video_coding/main/interface/video_coding.h" | |
22 #include "webrtc/modules/video_coding/main/source/frame_buffer.h" | |
23 #include "webrtc/modules/video_coding/main/source/inter_frame_delay.h" | |
24 #include "webrtc/modules/video_coding/main/source/internal_defines.h" | |
25 #include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h" | |
26 #include "webrtc/modules/video_coding/main/source/jitter_estimator.h" | |
27 #include "webrtc/modules/video_coding/main/source/packet.h" | |
28 #include "webrtc/system_wrappers/include/clock.h" | |
29 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
30 #include "webrtc/system_wrappers/include/event_wrapper.h" | |
31 #include "webrtc/system_wrappers/include/metrics.h" | |
32 | |
33 namespace webrtc { | |
34 | |
35 // Interval for updating SS data. | |
36 static const uint32_t kSsCleanupIntervalSec = 60; | |
37 | |
38 // Use this rtt if no value has been reported. | |
39 static const int64_t kDefaultRtt = 200; | |
40 | |
41 typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair; | |
42 | |
43 bool IsKeyFrame(FrameListPair pair) { | |
44 return pair.second->FrameType() == kVideoFrameKey; | |
45 } | |
46 | |
47 bool HasNonEmptyState(FrameListPair pair) { | |
48 return pair.second->GetState() != kStateEmpty; | |
49 } | |
50 | |
51 void FrameList::InsertFrame(VCMFrameBuffer* frame) { | |
52 insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame)); | |
53 } | |
54 | |
55 VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) { | |
56 FrameList::iterator it = find(timestamp); | |
57 if (it == end()) | |
58 return NULL; | |
59 VCMFrameBuffer* frame = it->second; | |
60 erase(it); | |
61 return frame; | |
62 } | |
63 | |
64 VCMFrameBuffer* FrameList::Front() const { | |
65 return begin()->second; | |
66 } | |
67 | |
68 VCMFrameBuffer* FrameList::Back() const { | |
69 return rbegin()->second; | |
70 } | |
71 | |
72 int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it, | |
73 UnorderedFrameList* free_frames) { | |
74 int drop_count = 0; | |
75 FrameList::iterator it = begin(); | |
76 while (!empty()) { | |
77 // Throw at least one frame. | |
78 it->second->Reset(); | |
79 free_frames->push_back(it->second); | |
80 erase(it++); | |
81 ++drop_count; | |
82 if (it != end() && it->second->FrameType() == kVideoFrameKey) { | |
83 *key_frame_it = it; | |
84 return drop_count; | |
85 } | |
86 } | |
87 *key_frame_it = end(); | |
88 return drop_count; | |
89 } | |
90 | |
91 void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state, | |
92 UnorderedFrameList* free_frames) { | |
93 while (!empty()) { | |
94 VCMFrameBuffer* oldest_frame = Front(); | |
95 bool remove_frame = false; | |
96 if (oldest_frame->GetState() == kStateEmpty && size() > 1) { | |
97 // This frame is empty, try to update the last decoded state and drop it | |
98 // if successful. | |
99 remove_frame = decoding_state->UpdateEmptyFrame(oldest_frame); | |
100 } else { | |
101 remove_frame = decoding_state->IsOldFrame(oldest_frame); | |
102 } | |
103 if (!remove_frame) { | |
104 break; | |
105 } | |
106 free_frames->push_back(oldest_frame); | |
107 TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp", | |
108 oldest_frame->TimeStamp()); | |
109 erase(begin()); | |
110 } | |
111 } | |
112 | |
113 void FrameList::Reset(UnorderedFrameList* free_frames) { | |
114 while (!empty()) { | |
115 begin()->second->Reset(); | |
116 free_frames->push_back(begin()->second); | |
117 erase(begin()); | |
118 } | |
119 } | |
120 | |
121 bool Vp9SsMap::Insert(const VCMPacket& packet) { | |
122 if (!packet.codecSpecificHeader.codecHeader.VP9.ss_data_available) | |
123 return false; | |
124 | |
125 ss_map_[packet.timestamp] = packet.codecSpecificHeader.codecHeader.VP9.gof; | |
126 return true; | |
127 } | |
128 | |
129 void Vp9SsMap::Reset() { | |
130 ss_map_.clear(); | |
131 } | |
132 | |
133 bool Vp9SsMap::Find(uint32_t timestamp, SsMap::iterator* it_out) { | |
134 bool found = false; | |
135 for (SsMap::iterator it = ss_map_.begin(); it != ss_map_.end(); ++it) { | |
136 if (it->first == timestamp || IsNewerTimestamp(timestamp, it->first)) { | |
137 *it_out = it; | |
138 found = true; | |
139 } | |
140 } | |
141 return found; | |
142 } | |
143 | |
144 void Vp9SsMap::RemoveOld(uint32_t timestamp) { | |
145 if (!TimeForCleanup(timestamp)) | |
146 return; | |
147 | |
148 SsMap::iterator it; | |
149 if (!Find(timestamp, &it)) | |
150 return; | |
151 | |
152 ss_map_.erase(ss_map_.begin(), it); | |
153 AdvanceFront(timestamp); | |
154 } | |
155 | |
156 bool Vp9SsMap::TimeForCleanup(uint32_t timestamp) const { | |
157 if (ss_map_.empty() || !IsNewerTimestamp(timestamp, ss_map_.begin()->first)) | |
158 return false; | |
159 | |
160 uint32_t diff = timestamp - ss_map_.begin()->first; | |
161 return diff / kVideoPayloadTypeFrequency >= kSsCleanupIntervalSec; | |
162 } | |
163 | |
164 void Vp9SsMap::AdvanceFront(uint32_t timestamp) { | |
165 RTC_DCHECK(!ss_map_.empty()); | |
166 GofInfoVP9 gof = ss_map_.begin()->second; | |
167 ss_map_.erase(ss_map_.begin()); | |
168 ss_map_[timestamp] = gof; | |
169 } | |
170 | |
171 // TODO(asapersson): Update according to updates in RTP payload profile. | |
172 bool Vp9SsMap::UpdatePacket(VCMPacket* packet) { | |
173 uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx; | |
174 if (gof_idx == kNoGofIdx) | |
175 return false; // No update needed. | |
176 | |
177 SsMap::iterator it; | |
178 if (!Find(packet->timestamp, &it)) | |
179 return false; // Corresponding SS not yet received. | |
180 | |
181 if (gof_idx >= it->second.num_frames_in_gof) | |
182 return false; // Assume corresponding SS not yet received. | |
183 | |
184 RTPVideoHeaderVP9* vp9 = &packet->codecSpecificHeader.codecHeader.VP9; | |
185 vp9->temporal_idx = it->second.temporal_idx[gof_idx]; | |
186 vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx]; | |
187 | |
188 // TODO(asapersson): Set vp9.ref_picture_id[i] and add usage. | |
189 vp9->num_ref_pics = it->second.num_ref_pics[gof_idx]; | |
190 for (uint8_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) { | |
191 vp9->pid_diff[i] = it->second.pid_diff[gof_idx][i]; | |
192 } | |
193 return true; | |
194 } | |
195 | |
196 void Vp9SsMap::UpdateFrames(FrameList* frames) { | |
197 for (const auto& frame_it : *frames) { | |
198 uint8_t gof_idx = | |
199 frame_it.second->CodecSpecific()->codecSpecific.VP9.gof_idx; | |
200 if (gof_idx == kNoGofIdx) { | |
201 continue; | |
202 } | |
203 SsMap::iterator ss_it; | |
204 if (Find(frame_it.second->TimeStamp(), &ss_it)) { | |
205 if (gof_idx >= ss_it->second.num_frames_in_gof) { | |
206 continue; // Assume corresponding SS not yet received. | |
207 } | |
208 frame_it.second->SetGofInfo(ss_it->second, gof_idx); | |
209 } | |
210 } | |
211 } | |
212 | |
213 VCMJitterBuffer::VCMJitterBuffer(Clock* clock, | |
214 rtc::scoped_ptr<EventWrapper> event) | |
215 : clock_(clock), | |
216 running_(false), | |
217 crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), | |
218 frame_event_(event.Pass()), | |
219 max_number_of_frames_(kStartNumberOfFrames), | |
220 free_frames_(), | |
221 decodable_frames_(), | |
222 incomplete_frames_(), | |
223 last_decoded_state_(), | |
224 first_packet_since_reset_(true), | |
225 stats_callback_(NULL), | |
226 incoming_frame_rate_(0), | |
227 incoming_frame_count_(0), | |
228 time_last_incoming_frame_count_(0), | |
229 incoming_bit_count_(0), | |
230 incoming_bit_rate_(0), | |
231 num_consecutive_old_packets_(0), | |
232 num_packets_(0), | |
233 num_duplicated_packets_(0), | |
234 num_discarded_packets_(0), | |
235 time_first_packet_ms_(0), | |
236 jitter_estimate_(clock), | |
237 inter_frame_delay_(clock_->TimeInMilliseconds()), | |
238 rtt_ms_(kDefaultRtt), | |
239 nack_mode_(kNoNack), | |
240 low_rtt_nack_threshold_ms_(-1), | |
241 high_rtt_nack_threshold_ms_(-1), | |
242 missing_sequence_numbers_(SequenceNumberLessThan()), | |
243 max_nack_list_size_(0), | |
244 max_packet_age_to_nack_(0), | |
245 max_incomplete_time_ms_(0), | |
246 decode_error_mode_(kNoErrors), | |
247 average_packets_per_frame_(0.0f), | |
248 frame_counter_(0) { | |
249 for (int i = 0; i < kStartNumberOfFrames; i++) | |
250 free_frames_.push_back(new VCMFrameBuffer()); | |
251 } | |
252 | |
253 VCMJitterBuffer::~VCMJitterBuffer() { | |
254 Stop(); | |
255 for (UnorderedFrameList::iterator it = free_frames_.begin(); | |
256 it != free_frames_.end(); ++it) { | |
257 delete *it; | |
258 } | |
259 for (FrameList::iterator it = incomplete_frames_.begin(); | |
260 it != incomplete_frames_.end(); ++it) { | |
261 delete it->second; | |
262 } | |
263 for (FrameList::iterator it = decodable_frames_.begin(); | |
264 it != decodable_frames_.end(); ++it) { | |
265 delete it->second; | |
266 } | |
267 delete crit_sect_; | |
268 } | |
269 | |
270 void VCMJitterBuffer::UpdateHistograms() { | |
271 if (num_packets_ <= 0 || !running_) { | |
272 return; | |
273 } | |
274 int64_t elapsed_sec = | |
275 (clock_->TimeInMilliseconds() - time_first_packet_ms_) / 1000; | |
276 if (elapsed_sec < metrics::kMinRunTimeInSeconds) { | |
277 return; | |
278 } | |
279 | |
280 RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DiscardedPacketsInPercent", | |
281 num_discarded_packets_ * 100 / num_packets_); | |
282 RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DuplicatedPacketsInPercent", | |
283 num_duplicated_packets_ * 100 / num_packets_); | |
284 | |
285 int total_frames = | |
286 receive_statistics_.key_frames + receive_statistics_.delta_frames; | |
287 if (total_frames > 0) { | |
288 RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.CompleteFramesReceivedPerSecond", | |
289 static_cast<int>((total_frames / elapsed_sec) + 0.5f)); | |
290 RTC_HISTOGRAM_COUNTS_1000( | |
291 "WebRTC.Video.KeyFramesReceivedInPermille", | |
292 static_cast<int>( | |
293 (receive_statistics_.key_frames * 1000.0f / total_frames) + 0.5f)); | |
294 } | |
295 } | |
296 | |
297 void VCMJitterBuffer::Start() { | |
298 CriticalSectionScoped cs(crit_sect_); | |
299 running_ = true; | |
300 incoming_frame_count_ = 0; | |
301 incoming_frame_rate_ = 0; | |
302 incoming_bit_count_ = 0; | |
303 incoming_bit_rate_ = 0; | |
304 time_last_incoming_frame_count_ = clock_->TimeInMilliseconds(); | |
305 receive_statistics_ = FrameCounts(); | |
306 | |
307 num_consecutive_old_packets_ = 0; | |
308 num_packets_ = 0; | |
309 num_duplicated_packets_ = 0; | |
310 num_discarded_packets_ = 0; | |
311 time_first_packet_ms_ = 0; | |
312 | |
313 // Start in a non-signaled state. | |
314 waiting_for_completion_.frame_size = 0; | |
315 waiting_for_completion_.timestamp = 0; | |
316 waiting_for_completion_.latest_packet_time = -1; | |
317 first_packet_since_reset_ = true; | |
318 rtt_ms_ = kDefaultRtt; | |
319 last_decoded_state_.Reset(); | |
320 } | |
321 | |
322 void VCMJitterBuffer::Stop() { | |
323 crit_sect_->Enter(); | |
324 UpdateHistograms(); | |
325 running_ = false; | |
326 last_decoded_state_.Reset(); | |
327 | |
328 // Make sure all frames are free and reset. | |
329 for (FrameList::iterator it = decodable_frames_.begin(); | |
330 it != decodable_frames_.end(); ++it) { | |
331 free_frames_.push_back(it->second); | |
332 } | |
333 for (FrameList::iterator it = incomplete_frames_.begin(); | |
334 it != incomplete_frames_.end(); ++it) { | |
335 free_frames_.push_back(it->second); | |
336 } | |
337 for (UnorderedFrameList::iterator it = free_frames_.begin(); | |
338 it != free_frames_.end(); ++it) { | |
339 (*it)->Reset(); | |
340 } | |
341 decodable_frames_.clear(); | |
342 incomplete_frames_.clear(); | |
343 crit_sect_->Leave(); | |
344 // Make sure we wake up any threads waiting on these events. | |
345 frame_event_->Set(); | |
346 } | |
347 | |
348 bool VCMJitterBuffer::Running() const { | |
349 CriticalSectionScoped cs(crit_sect_); | |
350 return running_; | |
351 } | |
352 | |
353 void VCMJitterBuffer::Flush() { | |
354 CriticalSectionScoped cs(crit_sect_); | |
355 decodable_frames_.Reset(&free_frames_); | |
356 incomplete_frames_.Reset(&free_frames_); | |
357 last_decoded_state_.Reset(); // TODO(mikhal): sync reset. | |
358 num_consecutive_old_packets_ = 0; | |
359 // Also reset the jitter and delay estimates | |
360 jitter_estimate_.Reset(); | |
361 inter_frame_delay_.Reset(clock_->TimeInMilliseconds()); | |
362 waiting_for_completion_.frame_size = 0; | |
363 waiting_for_completion_.timestamp = 0; | |
364 waiting_for_completion_.latest_packet_time = -1; | |
365 first_packet_since_reset_ = true; | |
366 missing_sequence_numbers_.clear(); | |
367 } | |
368 | |
369 // Get received key and delta frames | |
370 FrameCounts VCMJitterBuffer::FrameStatistics() const { | |
371 CriticalSectionScoped cs(crit_sect_); | |
372 return receive_statistics_; | |
373 } | |
374 | |
375 int VCMJitterBuffer::num_packets() const { | |
376 CriticalSectionScoped cs(crit_sect_); | |
377 return num_packets_; | |
378 } | |
379 | |
380 int VCMJitterBuffer::num_duplicated_packets() const { | |
381 CriticalSectionScoped cs(crit_sect_); | |
382 return num_duplicated_packets_; | |
383 } | |
384 | |
385 int VCMJitterBuffer::num_discarded_packets() const { | |
386 CriticalSectionScoped cs(crit_sect_); | |
387 return num_discarded_packets_; | |
388 } | |
389 | |
390 // Calculate framerate and bitrate. | |
391 void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate, | |
392 unsigned int* bitrate) { | |
393 assert(framerate); | |
394 assert(bitrate); | |
395 CriticalSectionScoped cs(crit_sect_); | |
396 const int64_t now = clock_->TimeInMilliseconds(); | |
397 int64_t diff = now - time_last_incoming_frame_count_; | |
398 if (diff < 1000 && incoming_frame_rate_ > 0 && incoming_bit_rate_ > 0) { | |
399 // Make sure we report something even though less than | |
400 // 1 second has passed since last update. | |
401 *framerate = incoming_frame_rate_; | |
402 *bitrate = incoming_bit_rate_; | |
403 } else if (incoming_frame_count_ != 0) { | |
404 // We have received frame(s) since last call to this function | |
405 | |
406 // Prepare calculations | |
407 if (diff <= 0) { | |
408 diff = 1; | |
409 } | |
410 // we add 0.5f for rounding | |
411 float rate = 0.5f + ((incoming_frame_count_ * 1000.0f) / diff); | |
412 if (rate < 1.0f) { | |
413 rate = 1.0f; | |
414 } | |
415 | |
416 // Calculate frame rate | |
417 // Let r be rate. | |
418 // r(0) = 1000*framecount/delta_time. | |
419 // (I.e. frames per second since last calculation.) | |
420 // frame_rate = r(0)/2 + r(-1)/2 | |
421 // (I.e. fr/s average this and the previous calculation.) | |
422 *framerate = (incoming_frame_rate_ + static_cast<unsigned int>(rate)) / 2; | |
423 incoming_frame_rate_ = static_cast<unsigned int>(rate); | |
424 | |
425 // Calculate bit rate | |
426 if (incoming_bit_count_ == 0) { | |
427 *bitrate = 0; | |
428 } else { | |
429 *bitrate = 10 * ((100 * incoming_bit_count_) / | |
430 static_cast<unsigned int>(diff)); | |
431 } | |
432 incoming_bit_rate_ = *bitrate; | |
433 | |
434 // Reset count | |
435 incoming_frame_count_ = 0; | |
436 incoming_bit_count_ = 0; | |
437 time_last_incoming_frame_count_ = now; | |
438 | |
439 } else { | |
440 // No frames since last call | |
441 time_last_incoming_frame_count_ = clock_->TimeInMilliseconds(); | |
442 *framerate = 0; | |
443 *bitrate = 0; | |
444 incoming_frame_rate_ = 0; | |
445 incoming_bit_rate_ = 0; | |
446 } | |
447 } | |
448 | |
449 // Answers the question: | |
450 // Will the packet sequence be complete if the next frame is grabbed for | |
451 // decoding right now? That is, have we lost a frame between the last decoded | |
452 // frame and the next, or is the next | |
453 // frame missing one or more packets? | |
454 bool VCMJitterBuffer::CompleteSequenceWithNextFrame() { | |
455 CriticalSectionScoped cs(crit_sect_); | |
456 // Finding oldest frame ready for decoder, check sequence number and size | |
457 CleanUpOldOrEmptyFrames(); | |
458 if (!decodable_frames_.empty()) { | |
459 if (decodable_frames_.Front()->GetState() == kStateComplete) { | |
460 return true; | |
461 } | |
462 } else if (incomplete_frames_.size() <= 1) { | |
463 // Frame not ready to be decoded. | |
464 return true; | |
465 } | |
466 return false; | |
467 } | |
468 | |
469 // Returns immediately or a |max_wait_time_ms| ms event hang waiting for a | |
470 // complete frame, |max_wait_time_ms| decided by caller. | |
471 bool VCMJitterBuffer::NextCompleteTimestamp( | |
472 uint32_t max_wait_time_ms, uint32_t* timestamp) { | |
473 crit_sect_->Enter(); | |
474 if (!running_) { | |
475 crit_sect_->Leave(); | |
476 return false; | |
477 } | |
478 CleanUpOldOrEmptyFrames(); | |
479 | |
480 if (decodable_frames_.empty() || | |
481 decodable_frames_.Front()->GetState() != kStateComplete) { | |
482 const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() + | |
483 max_wait_time_ms; | |
484 int64_t wait_time_ms = max_wait_time_ms; | |
485 while (wait_time_ms > 0) { | |
486 crit_sect_->Leave(); | |
487 const EventTypeWrapper ret = | |
488 frame_event_->Wait(static_cast<uint32_t>(wait_time_ms)); | |
489 crit_sect_->Enter(); | |
490 if (ret == kEventSignaled) { | |
491 // Are we shutting down the jitter buffer? | |
492 if (!running_) { | |
493 crit_sect_->Leave(); | |
494 return false; | |
495 } | |
496 // Finding oldest frame ready for decoder. | |
497 CleanUpOldOrEmptyFrames(); | |
498 if (decodable_frames_.empty() || | |
499 decodable_frames_.Front()->GetState() != kStateComplete) { | |
500 wait_time_ms = end_wait_time_ms - clock_->TimeInMilliseconds(); | |
501 } else { | |
502 break; | |
503 } | |
504 } else { | |
505 break; | |
506 } | |
507 } | |
508 } | |
509 if (decodable_frames_.empty() || | |
510 decodable_frames_.Front()->GetState() != kStateComplete) { | |
511 crit_sect_->Leave(); | |
512 return false; | |
513 } | |
514 *timestamp = decodable_frames_.Front()->TimeStamp(); | |
515 crit_sect_->Leave(); | |
516 return true; | |
517 } | |
518 | |
519 bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) { | |
520 CriticalSectionScoped cs(crit_sect_); | |
521 if (!running_) { | |
522 return false; | |
523 } | |
524 if (decode_error_mode_ == kNoErrors) { | |
525 // No point to continue, as we are not decoding with errors. | |
526 return false; | |
527 } | |
528 | |
529 CleanUpOldOrEmptyFrames(); | |
530 | |
531 if (decodable_frames_.empty()) { | |
532 return false; | |
533 } | |
534 VCMFrameBuffer* oldest_frame = decodable_frames_.Front(); | |
535 // If we have exactly one frame in the buffer, release it only if it is | |
536 // complete. We know decodable_frames_ is not empty due to the previous | |
537 // check. | |
538 if (decodable_frames_.size() == 1 && incomplete_frames_.empty() | |
539 && oldest_frame->GetState() != kStateComplete) { | |
540 return false; | |
541 } | |
542 | |
543 *timestamp = oldest_frame->TimeStamp(); | |
544 return true; | |
545 } | |
546 | |
547 VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { | |
548 CriticalSectionScoped cs(crit_sect_); | |
549 if (!running_) { | |
550 return NULL; | |
551 } | |
552 // Extract the frame with the desired timestamp. | |
553 VCMFrameBuffer* frame = decodable_frames_.PopFrame(timestamp); | |
554 bool continuous = true; | |
555 if (!frame) { | |
556 frame = incomplete_frames_.PopFrame(timestamp); | |
557 if (frame) | |
558 continuous = last_decoded_state_.ContinuousFrame(frame); | |
559 else | |
560 return NULL; | |
561 } | |
562 TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", timestamp, "Extract"); | |
563 // Frame pulled out from jitter buffer, update the jitter estimate. | |
564 const bool retransmitted = (frame->GetNackCount() > 0); | |
565 if (retransmitted) { | |
566 jitter_estimate_.FrameNacked(); | |
567 } else if (frame->Length() > 0) { | |
568 // Ignore retransmitted and empty frames. | |
569 if (waiting_for_completion_.latest_packet_time >= 0) { | |
570 UpdateJitterEstimate(waiting_for_completion_, true); | |
571 } | |
572 if (frame->GetState() == kStateComplete) { | |
573 UpdateJitterEstimate(*frame, false); | |
574 } else { | |
575 // Wait for this one to get complete. | |
576 waiting_for_completion_.frame_size = frame->Length(); | |
577 waiting_for_completion_.latest_packet_time = | |
578 frame->LatestPacketTimeMs(); | |
579 waiting_for_completion_.timestamp = frame->TimeStamp(); | |
580 } | |
581 } | |
582 | |
583 // The state must be changed to decoding before cleaning up zero sized | |
584 // frames to avoid empty frames being cleaned up and then given to the | |
585 // decoder. Propagates the missing_frame bit. | |
586 frame->PrepareForDecode(continuous); | |
587 | |
588 // We have a frame - update the last decoded state and nack list. | |
589 last_decoded_state_.SetState(frame); | |
590 DropPacketsFromNackList(last_decoded_state_.sequence_num()); | |
591 | |
592 if ((*frame).IsSessionComplete()) | |
593 UpdateAveragePacketsPerFrame(frame->NumPackets()); | |
594 | |
595 return frame; | |
596 } | |
597 | |
598 // Release frame when done with decoding. Should never be used to release | |
599 // frames from within the jitter buffer. | |
600 void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) { | |
601 CriticalSectionScoped cs(crit_sect_); | |
602 VCMFrameBuffer* frame_buffer = static_cast<VCMFrameBuffer*>(frame); | |
603 if (frame_buffer) { | |
604 free_frames_.push_back(frame_buffer); | |
605 } | |
606 } | |
607 | |
608 // Gets frame to use for this timestamp. If no match, get empty frame. | |
609 VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet, | |
610 VCMFrameBuffer** frame, | |
611 FrameList** frame_list) { | |
612 *frame = incomplete_frames_.PopFrame(packet.timestamp); | |
613 if (*frame != NULL) { | |
614 *frame_list = &incomplete_frames_; | |
615 return kNoError; | |
616 } | |
617 *frame = decodable_frames_.PopFrame(packet.timestamp); | |
618 if (*frame != NULL) { | |
619 *frame_list = &decodable_frames_; | |
620 return kNoError; | |
621 } | |
622 | |
623 *frame_list = NULL; | |
624 // No match, return empty frame. | |
625 *frame = GetEmptyFrame(); | |
626 if (*frame == NULL) { | |
627 // No free frame! Try to reclaim some... | |
628 LOG(LS_WARNING) << "Unable to get empty frame; Recycling."; | |
629 bool found_key_frame = RecycleFramesUntilKeyFrame(); | |
630 *frame = GetEmptyFrame(); | |
631 assert(*frame); | |
632 if (!found_key_frame) { | |
633 free_frames_.push_back(*frame); | |
634 return kFlushIndicator; | |
635 } | |
636 } | |
637 (*frame)->Reset(); | |
638 return kNoError; | |
639 } | |
640 | |
641 int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame, | |
642 bool* retransmitted) const { | |
643 assert(retransmitted); | |
644 CriticalSectionScoped cs(crit_sect_); | |
645 const VCMFrameBuffer* frame_buffer = | |
646 static_cast<const VCMFrameBuffer*>(frame); | |
647 *retransmitted = (frame_buffer->GetNackCount() > 0); | |
648 return frame_buffer->LatestPacketTimeMs(); | |
649 } | |
650 | |
651 VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, | |
652 bool* retransmitted) { | |
653 CriticalSectionScoped cs(crit_sect_); | |
654 | |
655 ++num_packets_; | |
656 if (num_packets_ == 1) { | |
657 time_first_packet_ms_ = clock_->TimeInMilliseconds(); | |
658 } | |
659 // Does this packet belong to an old frame? | |
660 if (last_decoded_state_.IsOldPacket(&packet)) { | |
661 // Account only for media packets. | |
662 if (packet.sizeBytes > 0) { | |
663 num_discarded_packets_++; | |
664 num_consecutive_old_packets_++; | |
665 if (stats_callback_ != NULL) | |
666 stats_callback_->OnDiscardedPacketsUpdated(num_discarded_packets_); | |
667 } | |
668 // Update last decoded sequence number if the packet arrived late and | |
669 // belongs to a frame with a timestamp equal to the last decoded | |
670 // timestamp. | |
671 last_decoded_state_.UpdateOldPacket(&packet); | |
672 DropPacketsFromNackList(last_decoded_state_.sequence_num()); | |
673 | |
674 // Also see if this old packet made more incomplete frames continuous. | |
675 FindAndInsertContinuousFramesWithState(last_decoded_state_); | |
676 | |
677 if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) { | |
678 LOG(LS_WARNING) | |
679 << num_consecutive_old_packets_ | |
680 << " consecutive old packets received. Flushing the jitter buffer."; | |
681 Flush(); | |
682 return kFlushIndicator; | |
683 } | |
684 return kOldPacket; | |
685 } | |
686 | |
687 num_consecutive_old_packets_ = 0; | |
688 | |
689 VCMFrameBuffer* frame; | |
690 FrameList* frame_list; | |
691 const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list); | |
692 if (error != kNoError) | |
693 return error; | |
694 | |
695 int64_t now_ms = clock_->TimeInMilliseconds(); | |
696 // We are keeping track of the first and latest seq numbers, and | |
697 // the number of wraps to be able to calculate how many packets we expect. | |
698 if (first_packet_since_reset_) { | |
699 // Now it's time to start estimating jitter | |
700 // reset the delay estimate. | |
701 inter_frame_delay_.Reset(now_ms); | |
702 } | |
703 | |
704 // Empty packets may bias the jitter estimate (lacking size component), | |
705 // therefore don't let empty packet trigger the following updates: | |
706 if (packet.frameType != kEmptyFrame) { | |
707 if (waiting_for_completion_.timestamp == packet.timestamp) { | |
708 // This can get bad if we have a lot of duplicate packets, | |
709 // we will then count some packet multiple times. | |
710 waiting_for_completion_.frame_size += packet.sizeBytes; | |
711 waiting_for_completion_.latest_packet_time = now_ms; | |
712 } else if (waiting_for_completion_.latest_packet_time >= 0 && | |
713 waiting_for_completion_.latest_packet_time + 2000 <= now_ms) { | |
714 // A packet should never be more than two seconds late | |
715 UpdateJitterEstimate(waiting_for_completion_, true); | |
716 waiting_for_completion_.latest_packet_time = -1; | |
717 waiting_for_completion_.frame_size = 0; | |
718 waiting_for_completion_.timestamp = 0; | |
719 } | |
720 } | |
721 | |
722 VCMFrameBufferStateEnum previous_state = frame->GetState(); | |
723 // Insert packet. | |
724 FrameData frame_data; | |
725 frame_data.rtt_ms = rtt_ms_; | |
726 frame_data.rolling_average_packets_per_frame = average_packets_per_frame_; | |
727 VCMFrameBufferEnum buffer_state = | |
728 frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data); | |
729 | |
730 if (previous_state != kStateComplete) { | |
731 TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), | |
732 "timestamp", frame->TimeStamp()); | |
733 } | |
734 | |
735 if (buffer_state > 0) { | |
736 incoming_bit_count_ += packet.sizeBytes << 3; | |
737 if (first_packet_since_reset_) { | |
738 latest_received_sequence_number_ = packet.seqNum; | |
739 first_packet_since_reset_ = false; | |
740 } else { | |
741 if (IsPacketRetransmitted(packet)) { | |
742 frame->IncrementNackCount(); | |
743 } | |
744 if (!UpdateNackList(packet.seqNum) && | |
745 packet.frameType != kVideoFrameKey) { | |
746 buffer_state = kFlushIndicator; | |
747 } | |
748 | |
749 latest_received_sequence_number_ = LatestSequenceNumber( | |
750 latest_received_sequence_number_, packet.seqNum); | |
751 } | |
752 } | |
753 | |
754 // Is the frame already in the decodable list? | |
755 bool continuous = IsContinuous(*frame); | |
756 switch (buffer_state) { | |
757 case kGeneralError: | |
758 case kTimeStampError: | |
759 case kSizeError: { | |
760 free_frames_.push_back(frame); | |
761 break; | |
762 } | |
763 case kCompleteSession: { | |
764 if (previous_state != kStateDecodable && | |
765 previous_state != kStateComplete) { | |
766 CountFrame(*frame); | |
767 if (continuous) { | |
768 // Signal that we have a complete session. | |
769 frame_event_->Set(); | |
770 } | |
771 } | |
772 FALLTHROUGH(); | |
773 } | |
774 // Note: There is no break here - continuing to kDecodableSession. | |
775 case kDecodableSession: { | |
776 *retransmitted = (frame->GetNackCount() > 0); | |
777 if (continuous) { | |
778 decodable_frames_.InsertFrame(frame); | |
779 FindAndInsertContinuousFrames(*frame); | |
780 } else { | |
781 incomplete_frames_.InsertFrame(frame); | |
782 } | |
783 break; | |
784 } | |
785 case kIncomplete: { | |
786 if (frame->GetState() == kStateEmpty && | |
787 last_decoded_state_.UpdateEmptyFrame(frame)) { | |
788 free_frames_.push_back(frame); | |
789 return kNoError; | |
790 } else { | |
791 incomplete_frames_.InsertFrame(frame); | |
792 } | |
793 break; | |
794 } | |
795 case kNoError: | |
796 case kOutOfBoundsPacket: | |
797 case kDuplicatePacket: { | |
798 // Put back the frame where it came from. | |
799 if (frame_list != NULL) { | |
800 frame_list->InsertFrame(frame); | |
801 } else { | |
802 free_frames_.push_back(frame); | |
803 } | |
804 ++num_duplicated_packets_; | |
805 break; | |
806 } | |
807 case kFlushIndicator: | |
808 free_frames_.push_back(frame); | |
809 return kFlushIndicator; | |
810 default: assert(false); | |
811 } | |
812 return buffer_state; | |
813 } | |
814 | |
815 bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame, | |
816 const VCMDecodingState& decoding_state) const { | |
817 if (decode_error_mode_ == kWithErrors) | |
818 return true; | |
819 // Is this frame (complete or decodable) and continuous? | |
820 // kStateDecodable will never be set when decode_error_mode_ is false | |
821 // as SessionInfo determines this state based on the error mode (and frame | |
822 // completeness). | |
823 return (frame.GetState() == kStateComplete || | |
824 frame.GetState() == kStateDecodable) && | |
825 decoding_state.ContinuousFrame(&frame); | |
826 } | |
827 | |
828 bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const { | |
829 if (IsContinuousInState(frame, last_decoded_state_)) { | |
830 return true; | |
831 } | |
832 VCMDecodingState decoding_state; | |
833 decoding_state.CopyFrom(last_decoded_state_); | |
834 for (FrameList::const_iterator it = decodable_frames_.begin(); | |
835 it != decodable_frames_.end(); ++it) { | |
836 VCMFrameBuffer* decodable_frame = it->second; | |
837 if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) { | |
838 break; | |
839 } | |
840 decoding_state.SetState(decodable_frame); | |
841 if (IsContinuousInState(frame, decoding_state)) { | |
842 return true; | |
843 } | |
844 } | |
845 return false; | |
846 } | |
847 | |
848 void VCMJitterBuffer::FindAndInsertContinuousFrames( | |
849 const VCMFrameBuffer& new_frame) { | |
850 VCMDecodingState decoding_state; | |
851 decoding_state.CopyFrom(last_decoded_state_); | |
852 decoding_state.SetState(&new_frame); | |
853 FindAndInsertContinuousFramesWithState(decoding_state); | |
854 } | |
855 | |
856 void VCMJitterBuffer::FindAndInsertContinuousFramesWithState( | |
857 const VCMDecodingState& original_decoded_state) { | |
858 // Copy original_decoded_state so we can move the state forward with each | |
859 // decodable frame we find. | |
860 VCMDecodingState decoding_state; | |
861 decoding_state.CopyFrom(original_decoded_state); | |
862 | |
863 // When temporal layers are available, we search for a complete or decodable | |
864 // frame until we hit one of the following: | |
865 // 1. Continuous base or sync layer. | |
866 // 2. The end of the list was reached. | |
867 for (FrameList::iterator it = incomplete_frames_.begin(); | |
868 it != incomplete_frames_.end();) { | |
869 VCMFrameBuffer* frame = it->second; | |
870 if (IsNewerTimestamp(original_decoded_state.time_stamp(), | |
871 frame->TimeStamp())) { | |
872 ++it; | |
873 continue; | |
874 } | |
875 if (IsContinuousInState(*frame, decoding_state)) { | |
876 decodable_frames_.InsertFrame(frame); | |
877 incomplete_frames_.erase(it++); | |
878 decoding_state.SetState(frame); | |
879 } else if (frame->TemporalId() <= 0) { | |
880 break; | |
881 } else { | |
882 ++it; | |
883 } | |
884 } | |
885 } | |
886 | |
887 uint32_t VCMJitterBuffer::EstimatedJitterMs() { | |
888 CriticalSectionScoped cs(crit_sect_); | |
889 // Compute RTT multiplier for estimation. | |
890 // low_rtt_nackThresholdMs_ == -1 means no FEC. | |
891 double rtt_mult = 1.0f; | |
892 if (low_rtt_nack_threshold_ms_ >= 0 && | |
893 rtt_ms_ >= low_rtt_nack_threshold_ms_) { | |
894 // For RTTs above low_rtt_nack_threshold_ms_ we don't apply extra delay | |
895 // when waiting for retransmissions. | |
896 rtt_mult = 0.0f; | |
897 } | |
898 return jitter_estimate_.GetJitterEstimate(rtt_mult); | |
899 } | |
900 | |
901 void VCMJitterBuffer::UpdateRtt(int64_t rtt_ms) { | |
902 CriticalSectionScoped cs(crit_sect_); | |
903 rtt_ms_ = rtt_ms; | |
904 jitter_estimate_.UpdateRtt(rtt_ms); | |
905 } | |
906 | |
907 void VCMJitterBuffer::SetNackMode(VCMNackMode mode, | |
908 int64_t low_rtt_nack_threshold_ms, | |
909 int64_t high_rtt_nack_threshold_ms) { | |
910 CriticalSectionScoped cs(crit_sect_); | |
911 nack_mode_ = mode; | |
912 if (mode == kNoNack) { | |
913 missing_sequence_numbers_.clear(); | |
914 } | |
915 assert(low_rtt_nack_threshold_ms >= -1 && high_rtt_nack_threshold_ms >= -1); | |
916 assert(high_rtt_nack_threshold_ms == -1 || | |
917 low_rtt_nack_threshold_ms <= high_rtt_nack_threshold_ms); | |
918 assert(low_rtt_nack_threshold_ms > -1 || high_rtt_nack_threshold_ms == -1); | |
919 low_rtt_nack_threshold_ms_ = low_rtt_nack_threshold_ms; | |
920 high_rtt_nack_threshold_ms_ = high_rtt_nack_threshold_ms; | |
921 // Don't set a high start rtt if high_rtt_nack_threshold_ms_ is used, to not | |
922 // disable NACK in |kNack| mode. | |
923 if (rtt_ms_ == kDefaultRtt && high_rtt_nack_threshold_ms_ != -1) { | |
924 rtt_ms_ = 0; | |
925 } | |
926 if (!WaitForRetransmissions()) { | |
927 jitter_estimate_.ResetNackCount(); | |
928 } | |
929 } | |
930 | |
931 void VCMJitterBuffer::SetNackSettings(size_t max_nack_list_size, | |
932 int max_packet_age_to_nack, | |
933 int max_incomplete_time_ms) { | |
934 CriticalSectionScoped cs(crit_sect_); | |
935 assert(max_packet_age_to_nack >= 0); | |
936 assert(max_incomplete_time_ms_ >= 0); | |
937 max_nack_list_size_ = max_nack_list_size; | |
938 max_packet_age_to_nack_ = max_packet_age_to_nack; | |
939 max_incomplete_time_ms_ = max_incomplete_time_ms; | |
940 } | |
941 | |
942 VCMNackMode VCMJitterBuffer::nack_mode() const { | |
943 CriticalSectionScoped cs(crit_sect_); | |
944 return nack_mode_; | |
945 } | |
946 | |
947 int VCMJitterBuffer::NonContinuousOrIncompleteDuration() { | |
948 if (incomplete_frames_.empty()) { | |
949 return 0; | |
950 } | |
951 uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp(); | |
952 if (!decodable_frames_.empty()) { | |
953 start_timestamp = decodable_frames_.Back()->TimeStamp(); | |
954 } | |
955 return incomplete_frames_.Back()->TimeStamp() - start_timestamp; | |
956 } | |
957 | |
958 uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber( | |
959 const VCMFrameBuffer& frame) const { | |
960 assert(frame.GetLowSeqNum() >= 0); | |
961 if (frame.HaveFirstPacket()) | |
962 return frame.GetLowSeqNum(); | |
963 | |
964 // This estimate is not accurate if more than one packet with lower sequence | |
965 // number is lost. | |
966 return frame.GetLowSeqNum() - 1; | |
967 } | |
968 | |
969 std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) { | |
970 CriticalSectionScoped cs(crit_sect_); | |
971 *request_key_frame = false; | |
972 if (nack_mode_ == kNoNack) { | |
973 return std::vector<uint16_t>(); | |
974 } | |
975 if (last_decoded_state_.in_initial_state()) { | |
976 VCMFrameBuffer* next_frame = NextFrame(); | |
977 const bool first_frame_is_key = next_frame && | |
978 next_frame->FrameType() == kVideoFrameKey && | |
979 next_frame->HaveFirstPacket(); | |
980 if (!first_frame_is_key) { | |
981 bool have_non_empty_frame = decodable_frames_.end() != find_if( | |
982 decodable_frames_.begin(), decodable_frames_.end(), | |
983 HasNonEmptyState); | |
984 if (!have_non_empty_frame) { | |
985 have_non_empty_frame = incomplete_frames_.end() != find_if( | |
986 incomplete_frames_.begin(), incomplete_frames_.end(), | |
987 HasNonEmptyState); | |
988 } | |
989 bool found_key_frame = RecycleFramesUntilKeyFrame(); | |
990 if (!found_key_frame) { | |
991 *request_key_frame = have_non_empty_frame; | |
992 return std::vector<uint16_t>(); | |
993 } | |
994 } | |
995 } | |
996 if (TooLargeNackList()) { | |
997 *request_key_frame = !HandleTooLargeNackList(); | |
998 } | |
999 if (max_incomplete_time_ms_ > 0) { | |
1000 int non_continuous_incomplete_duration = | |
1001 NonContinuousOrIncompleteDuration(); | |
1002 if (non_continuous_incomplete_duration > 90 * max_incomplete_time_ms_) { | |
1003 LOG_F(LS_WARNING) << "Too long non-decodable duration: " | |
1004 << non_continuous_incomplete_duration << " > " | |
1005 << 90 * max_incomplete_time_ms_; | |
1006 FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(), | |
1007 incomplete_frames_.rend(), IsKeyFrame); | |
1008 if (rit == incomplete_frames_.rend()) { | |
1009 // Request a key frame if we don't have one already. | |
1010 *request_key_frame = true; | |
1011 return std::vector<uint16_t>(); | |
1012 } else { | |
1013 // Skip to the last key frame. If it's incomplete we will start | |
1014 // NACKing it. | |
1015 // Note that the estimated low sequence number is correct for VP8 | |
1016 // streams because only the first packet of a key frame is marked. | |
1017 last_decoded_state_.Reset(); | |
1018 DropPacketsFromNackList(EstimatedLowSequenceNumber(*rit->second)); | |
1019 } | |
1020 } | |
1021 } | |
1022 std::vector<uint16_t> nack_list(missing_sequence_numbers_.begin(), | |
1023 missing_sequence_numbers_.end()); | |
1024 return nack_list; | |
1025 } | |
1026 | |
1027 void VCMJitterBuffer::SetDecodeErrorMode(VCMDecodeErrorMode error_mode) { | |
1028 CriticalSectionScoped cs(crit_sect_); | |
1029 decode_error_mode_ = error_mode; | |
1030 } | |
1031 | |
1032 VCMFrameBuffer* VCMJitterBuffer::NextFrame() const { | |
1033 if (!decodable_frames_.empty()) | |
1034 return decodable_frames_.Front(); | |
1035 if (!incomplete_frames_.empty()) | |
1036 return incomplete_frames_.Front(); | |
1037 return NULL; | |
1038 } | |
1039 | |
1040 bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) { | |
1041 if (nack_mode_ == kNoNack) { | |
1042 return true; | |
1043 } | |
1044 // Make sure we don't add packets which are already too old to be decoded. | |
1045 if (!last_decoded_state_.in_initial_state()) { | |
1046 latest_received_sequence_number_ = LatestSequenceNumber( | |
1047 latest_received_sequence_number_, | |
1048 last_decoded_state_.sequence_num()); | |
1049 } | |
1050 if (IsNewerSequenceNumber(sequence_number, | |
1051 latest_received_sequence_number_)) { | |
1052 // Push any missing sequence numbers to the NACK list. | |
1053 for (uint16_t i = latest_received_sequence_number_ + 1; | |
1054 IsNewerSequenceNumber(sequence_number, i); ++i) { | |
1055 missing_sequence_numbers_.insert(missing_sequence_numbers_.end(), i); | |
1056 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "AddNack", | |
1057 "seqnum", i); | |
1058 } | |
1059 if (TooLargeNackList() && !HandleTooLargeNackList()) { | |
1060 LOG(LS_WARNING) << "Requesting key frame due to too large NACK list."; | |
1061 return false; | |
1062 } | |
1063 if (MissingTooOldPacket(sequence_number) && | |
1064 !HandleTooOldPackets(sequence_number)) { | |
1065 LOG(LS_WARNING) << "Requesting key frame due to missing too old packets"; | |
1066 return false; | |
1067 } | |
1068 } else { | |
1069 missing_sequence_numbers_.erase(sequence_number); | |
1070 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "RemoveNack", | |
1071 "seqnum", sequence_number); | |
1072 } | |
1073 return true; | |
1074 } | |
1075 | |
1076 bool VCMJitterBuffer::TooLargeNackList() const { | |
1077 return missing_sequence_numbers_.size() > max_nack_list_size_; | |
1078 } | |
1079 | |
1080 bool VCMJitterBuffer::HandleTooLargeNackList() { | |
1081 // Recycle frames until the NACK list is small enough. It is likely cheaper to | |
1082 // request a key frame than to retransmit this many missing packets. | |
1083 LOG_F(LS_WARNING) << "NACK list has grown too large: " | |
1084 << missing_sequence_numbers_.size() << " > " | |
1085 << max_nack_list_size_; | |
1086 bool key_frame_found = false; | |
1087 while (TooLargeNackList()) { | |
1088 key_frame_found = RecycleFramesUntilKeyFrame(); | |
1089 } | |
1090 return key_frame_found; | |
1091 } | |
1092 | |
1093 bool VCMJitterBuffer::MissingTooOldPacket( | |
1094 uint16_t latest_sequence_number) const { | |
1095 if (missing_sequence_numbers_.empty()) { | |
1096 return false; | |
1097 } | |
1098 const uint16_t age_of_oldest_missing_packet = latest_sequence_number - | |
1099 *missing_sequence_numbers_.begin(); | |
1100 // Recycle frames if the NACK list contains too old sequence numbers as | |
1101 // the packets may have already been dropped by the sender. | |
1102 return age_of_oldest_missing_packet > max_packet_age_to_nack_; | |
1103 } | |
1104 | |
1105 bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) { | |
1106 bool key_frame_found = false; | |
1107 const uint16_t age_of_oldest_missing_packet = latest_sequence_number - | |
1108 *missing_sequence_numbers_.begin(); | |
1109 LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: " | |
1110 << age_of_oldest_missing_packet << " > " | |
1111 << max_packet_age_to_nack_; | |
1112 while (MissingTooOldPacket(latest_sequence_number)) { | |
1113 key_frame_found = RecycleFramesUntilKeyFrame(); | |
1114 } | |
1115 return key_frame_found; | |
1116 } | |
1117 | |
1118 void VCMJitterBuffer::DropPacketsFromNackList( | |
1119 uint16_t last_decoded_sequence_number) { | |
1120 // Erase all sequence numbers from the NACK list which we won't need any | |
1121 // longer. | |
1122 missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(), | |
1123 missing_sequence_numbers_.upper_bound( | |
1124 last_decoded_sequence_number)); | |
1125 } | |
1126 | |
1127 int64_t VCMJitterBuffer::LastDecodedTimestamp() const { | |
1128 CriticalSectionScoped cs(crit_sect_); | |
1129 return last_decoded_state_.time_stamp(); | |
1130 } | |
1131 | |
1132 void VCMJitterBuffer::RenderBufferSize(uint32_t* timestamp_start, | |
1133 uint32_t* timestamp_end) { | |
1134 CriticalSectionScoped cs(crit_sect_); | |
1135 CleanUpOldOrEmptyFrames(); | |
1136 *timestamp_start = 0; | |
1137 *timestamp_end = 0; | |
1138 if (decodable_frames_.empty()) { | |
1139 return; | |
1140 } | |
1141 *timestamp_start = decodable_frames_.Front()->TimeStamp(); | |
1142 *timestamp_end = decodable_frames_.Back()->TimeStamp(); | |
1143 } | |
1144 | |
1145 void VCMJitterBuffer::RegisterStatsCallback( | |
1146 VCMReceiveStatisticsCallback* callback) { | |
1147 CriticalSectionScoped cs(crit_sect_); | |
1148 stats_callback_ = callback; | |
1149 } | |
1150 | |
1151 VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() { | |
1152 if (free_frames_.empty()) { | |
1153 if (!TryToIncreaseJitterBufferSize()) { | |
1154 return NULL; | |
1155 } | |
1156 } | |
1157 VCMFrameBuffer* frame = free_frames_.front(); | |
1158 free_frames_.pop_front(); | |
1159 return frame; | |
1160 } | |
1161 | |
1162 bool VCMJitterBuffer::TryToIncreaseJitterBufferSize() { | |
1163 if (max_number_of_frames_ >= kMaxNumberOfFrames) | |
1164 return false; | |
1165 free_frames_.push_back(new VCMFrameBuffer()); | |
1166 ++max_number_of_frames_; | |
1167 TRACE_COUNTER1("webrtc", "JBMaxFrames", max_number_of_frames_); | |
1168 return true; | |
1169 } | |
1170 | |
1171 // Recycle oldest frames up to a key frame, used if jitter buffer is completely | |
1172 // full. | |
1173 bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() { | |
1174 // First release incomplete frames, and only release decodable frames if there | |
1175 // are no incomplete ones. | |
1176 FrameList::iterator key_frame_it; | |
1177 bool key_frame_found = false; | |
1178 int dropped_frames = 0; | |
1179 dropped_frames += incomplete_frames_.RecycleFramesUntilKeyFrame( | |
1180 &key_frame_it, &free_frames_); | |
1181 key_frame_found = key_frame_it != incomplete_frames_.end(); | |
1182 if (dropped_frames == 0) { | |
1183 dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame( | |
1184 &key_frame_it, &free_frames_); | |
1185 key_frame_found = key_frame_it != decodable_frames_.end(); | |
1186 } | |
1187 TRACE_EVENT_INSTANT0("webrtc", "JB::RecycleFramesUntilKeyFrame"); | |
1188 if (key_frame_found) { | |
1189 LOG(LS_INFO) << "Found key frame while dropping frames."; | |
1190 // Reset last decoded state to make sure the next frame decoded is a key | |
1191 // frame, and start NACKing from here. | |
1192 last_decoded_state_.Reset(); | |
1193 DropPacketsFromNackList(EstimatedLowSequenceNumber(*key_frame_it->second)); | |
1194 } else if (decodable_frames_.empty()) { | |
1195 // All frames dropped. Reset the decoding state and clear missing sequence | |
1196 // numbers as we're starting fresh. | |
1197 last_decoded_state_.Reset(); | |
1198 missing_sequence_numbers_.clear(); | |
1199 } | |
1200 return key_frame_found; | |
1201 } | |
1202 | |
1203 // Must be called under the critical section |crit_sect_|. | |
1204 void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) { | |
1205 incoming_frame_count_++; | |
1206 | |
1207 if (frame.FrameType() == kVideoFrameKey) { | |
1208 TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", | |
1209 frame.TimeStamp(), "KeyComplete"); | |
1210 } else { | |
1211 TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", | |
1212 frame.TimeStamp(), "DeltaComplete"); | |
1213 } | |
1214 | |
1215 // Update receive statistics. We count all layers, thus when you use layers | |
1216 // adding all key and delta frames might differ from frame count. | |
1217 if (frame.IsSessionComplete()) { | |
1218 if (frame.FrameType() == kVideoFrameKey) { | |
1219 ++receive_statistics_.key_frames; | |
1220 } else { | |
1221 ++receive_statistics_.delta_frames; | |
1222 } | |
1223 if (stats_callback_ != NULL) | |
1224 stats_callback_->OnFrameCountsUpdated(receive_statistics_); | |
1225 } | |
1226 } | |
1227 | |
1228 void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) { | |
1229 if (frame_counter_ > kFastConvergeThreshold) { | |
1230 average_packets_per_frame_ = average_packets_per_frame_ | |
1231 * (1 - kNormalConvergeMultiplier) | |
1232 + current_number_packets * kNormalConvergeMultiplier; | |
1233 } else if (frame_counter_ > 0) { | |
1234 average_packets_per_frame_ = average_packets_per_frame_ | |
1235 * (1 - kFastConvergeMultiplier) | |
1236 + current_number_packets * kFastConvergeMultiplier; | |
1237 frame_counter_++; | |
1238 } else { | |
1239 average_packets_per_frame_ = current_number_packets; | |
1240 frame_counter_++; | |
1241 } | |
1242 } | |
1243 | |
1244 // Must be called under the critical section |crit_sect_|. | |
1245 void VCMJitterBuffer::CleanUpOldOrEmptyFrames() { | |
1246 decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_, | |
1247 &free_frames_); | |
1248 incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_, | |
1249 &free_frames_); | |
1250 if (!last_decoded_state_.in_initial_state()) { | |
1251 DropPacketsFromNackList(last_decoded_state_.sequence_num()); | |
1252 } | |
1253 } | |
1254 | |
1255 // Must be called from within |crit_sect_|. | |
1256 bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const { | |
1257 return missing_sequence_numbers_.find(packet.seqNum) != | |
1258 missing_sequence_numbers_.end(); | |
1259 } | |
1260 | |
1261 // Must be called under the critical section |crit_sect_|. Should never be | |
1262 // called with retransmitted frames, they must be filtered out before this | |
1263 // function is called. | |
1264 void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample, | |
1265 bool incomplete_frame) { | |
1266 if (sample.latest_packet_time == -1) { | |
1267 return; | |
1268 } | |
1269 UpdateJitterEstimate(sample.latest_packet_time, sample.timestamp, | |
1270 sample.frame_size, incomplete_frame); | |
1271 } | |
1272 | |
1273 // Must be called under the critical section crit_sect_. Should never be | |
1274 // called with retransmitted frames, they must be filtered out before this | |
1275 // function is called. | |
1276 void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame, | |
1277 bool incomplete_frame) { | |
1278 if (frame.LatestPacketTimeMs() == -1) { | |
1279 return; | |
1280 } | |
1281 // No retransmitted frames should be a part of the jitter | |
1282 // estimate. | |
1283 UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(), | |
1284 frame.Length(), incomplete_frame); | |
1285 } | |
1286 | |
1287 // Must be called under the critical section |crit_sect_|. Should never be | |
1288 // called with retransmitted frames, they must be filtered out before this | |
1289 // function is called. | |
1290 void VCMJitterBuffer::UpdateJitterEstimate( | |
1291 int64_t latest_packet_time_ms, | |
1292 uint32_t timestamp, | |
1293 unsigned int frame_size, | |
1294 bool incomplete_frame) { | |
1295 if (latest_packet_time_ms == -1) { | |
1296 return; | |
1297 } | |
1298 int64_t frame_delay; | |
1299 bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp, | |
1300 &frame_delay, | |
1301 latest_packet_time_ms); | |
1302 // Filter out frames which have been reordered in time by the network | |
1303 if (not_reordered) { | |
1304 // Update the jitter estimate with the new samples | |
1305 jitter_estimate_.UpdateEstimate(frame_delay, frame_size, incomplete_frame); | |
1306 } | |
1307 } | |
1308 | |
1309 bool VCMJitterBuffer::WaitForRetransmissions() { | |
1310 if (nack_mode_ == kNoNack) { | |
1311 // NACK disabled -> don't wait for retransmissions. | |
1312 return false; | |
1313 } | |
1314 // Evaluate if the RTT is higher than |high_rtt_nack_threshold_ms_|, and in | |
1315 // that case we don't wait for retransmissions. | |
1316 if (high_rtt_nack_threshold_ms_ >= 0 && | |
1317 rtt_ms_ >= high_rtt_nack_threshold_ms_) { | |
1318 return false; | |
1319 } | |
1320 return true; | |
1321 } | |
1322 } // namespace webrtc | |
OLD | NEW |