OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/video_coding/packet_buffer.h" | 11 #include "webrtc/modules/video_coding/packet_buffer.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <limits> | 14 #include <limits> |
15 | 15 |
16 #include "webrtc/base/checks.h" | 16 #include "webrtc/base/checks.h" |
17 #include "webrtc/base/logging.h" | |
17 #include "webrtc/modules/video_coding/frame_object.h" | 18 #include "webrtc/modules/video_coding/frame_object.h" |
18 | 19 |
19 namespace webrtc { | 20 namespace webrtc { |
20 namespace video_coding { | 21 namespace video_coding { |
21 | 22 |
22 PacketBuffer::PacketBuffer(size_t start_buffer_size, | 23 PacketBuffer::PacketBuffer(size_t start_buffer_size, |
23 size_t max_buffer_size, | 24 size_t max_buffer_size, |
24 OnCompleteFrameCallback* frame_callback) | 25 OnCompleteFrameCallback* frame_callback) |
25 : size_(start_buffer_size), | 26 : size_(start_buffer_size), |
26 max_size_(max_buffer_size), | 27 max_size_(max_buffer_size), |
27 first_seq_num_(0), | 28 first_seq_num_(0), |
28 last_seq_num_(0), | 29 last_seq_num_(0), |
29 first_packet_received_(false), | 30 first_packet_received_(false), |
30 data_buffer_(start_buffer_size), | 31 data_buffer_(start_buffer_size), |
31 sequence_buffer_(start_buffer_size), | 32 sequence_buffer_(start_buffer_size), |
32 frame_callback_(frame_callback), | 33 frame_callback_(frame_callback), |
33 last_picture_id_(-1), | 34 last_picture_id_(-1), |
34 last_unwrap_(-1) { | 35 last_unwrap_(-1), |
36 current_ss_idx_(0) { | |
35 RTC_DCHECK_LE(start_buffer_size, max_buffer_size); | 37 RTC_DCHECK_LE(start_buffer_size, max_buffer_size); |
36 // Buffer size must always be a power of 2. | 38 // Buffer size must always be a power of 2. |
37 RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0); | 39 RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0); |
38 RTC_DCHECK((max_buffer_size & (max_buffer_size - 1)) == 0); | 40 RTC_DCHECK((max_buffer_size & (max_buffer_size - 1)) == 0); |
39 } | 41 } |
40 | 42 |
41 bool PacketBuffer::InsertPacket(const VCMPacket& packet) { | 43 bool PacketBuffer::InsertPacket(const VCMPacket& packet) { |
42 rtc::CritScope lock(&crit_); | 44 rtc::CritScope lock(&crit_); |
43 uint16_t seq_num = packet.seqNum; | 45 uint16_t seq_num = packet.seqNum; |
44 size_t index = seq_num % size_; | 46 size_t index = seq_num % size_; |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
210 case kVideoCodecULPFEC : | 212 case kVideoCodecULPFEC : |
211 case kVideoCodecRED : | 213 case kVideoCodecRED : |
212 case kVideoCodecUnknown : { | 214 case kVideoCodecUnknown : { |
213 RTC_NOTREACHED(); | 215 RTC_NOTREACHED(); |
214 } | 216 } |
215 case kVideoCodecVP8 : { | 217 case kVideoCodecVP8 : { |
216 ManageFrameVp8(std::move(frame)); | 218 ManageFrameVp8(std::move(frame)); |
217 break; | 219 break; |
218 } | 220 } |
219 case kVideoCodecVP9 : { | 221 case kVideoCodecVP9 : { |
220 // TODO(philipel): ManageFrameVp9(std::move(frame)); | 222 ManageFrameVp9(std::move(frame)); |
221 break; | 223 break; |
222 } | 224 } |
223 case kVideoCodecH264 : | 225 case kVideoCodecH264 : |
224 case kVideoCodecI420 : | 226 case kVideoCodecI420 : |
225 case kVideoCodecGeneric : | 227 case kVideoCodecGeneric : |
226 default : { | 228 default : { |
227 ManageFrameGeneric(std::move(frame)); | 229 ManageFrameGeneric(std::move(frame)); |
228 } | 230 } |
229 } | 231 } |
230 } | 232 } |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
312 frame->picture_id = codec_header.pictureId % kPicIdLength; | 314 frame->picture_id = codec_header.pictureId % kPicIdLength; |
313 | 315 |
314 if (last_unwrap_ == -1) | 316 if (last_unwrap_ == -1) |
315 last_unwrap_ = codec_header.pictureId; | 317 last_unwrap_ = codec_header.pictureId; |
316 | 318 |
317 if (last_picture_id_ == -1) | 319 if (last_picture_id_ == -1) |
318 last_picture_id_ = frame->picture_id; | 320 last_picture_id_ = frame->picture_id; |
319 | 321 |
320 // Find if there has been a gap in fully received frames and save the picture | 322 // Find if there has been a gap in fully received frames and save the picture |
321 // id of those frames in |not_yet_received_frames_|. | 323 // id of those frames in |not_yet_received_frames_|. |
322 if (AheadOf<uint8_t, kPicIdLength>(frame->picture_id, last_picture_id_)) { | 324 if (AheadOf<uint16_t, kPicIdLength>(frame->picture_id, last_picture_id_)) { |
323 last_picture_id_ = Add<kPicIdLength>(last_picture_id_, 1); | 325 last_picture_id_ = Add<kPicIdLength>(last_picture_id_, 1); |
324 while (last_picture_id_ != frame->picture_id) { | 326 while (last_picture_id_ != frame->picture_id) { |
325 not_yet_received_frames_.insert(last_picture_id_); | 327 not_yet_received_frames_.insert(last_picture_id_); |
326 last_picture_id_ = Add<kPicIdLength>(last_picture_id_, 1); | 328 last_picture_id_ = Add<kPicIdLength>(last_picture_id_, 1); |
327 } | 329 } |
328 } | 330 } |
329 | 331 |
330 // Clean up info for base layers that are too old. | 332 // Clean up info for base layers that are too old. |
331 uint8_t old_tl0_pic_idx = codec_header.tl0PicIdx - kMaxLayerInfo; | 333 uint8_t old_tl0_pic_idx = codec_header.tl0PicIdx - kMaxLayerInfo; |
332 auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx); | 334 auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
382 // Find all references for this frame. | 384 // Find all references for this frame. |
383 frame->num_references = 0; | 385 frame->num_references = 0; |
384 for (uint8_t layer = 0; layer <= codec_header.temporalIdx; ++layer) { | 386 for (uint8_t layer = 0; layer <= codec_header.temporalIdx; ++layer) { |
385 RTC_DCHECK_NE(-1, layer_info_it->second[layer]); | 387 RTC_DCHECK_NE(-1, layer_info_it->second[layer]); |
386 | 388 |
387 // If we have not yet received a frame between this frame and the referenced | 389 // If we have not yet received a frame between this frame and the referenced |
388 // frame then we have to wait for that frame to be completed first. | 390 // frame then we have to wait for that frame to be completed first. |
389 auto not_received_frame_it = | 391 auto not_received_frame_it = |
390 not_yet_received_frames_.upper_bound(layer_info_it->second[layer]); | 392 not_yet_received_frames_.upper_bound(layer_info_it->second[layer]); |
391 if (not_received_frame_it != not_yet_received_frames_.end() && | 393 if (not_received_frame_it != not_yet_received_frames_.end() && |
392 AheadOf<uint8_t, kPicIdLength>(frame->picture_id, | 394 AheadOf<uint16_t, kPicIdLength>(frame->picture_id, |
393 *not_received_frame_it)) { | 395 *not_received_frame_it)) { |
394 stashed_frames_.emplace(std::move(frame)); | 396 stashed_frames_.emplace(std::move(frame)); |
395 return; | 397 return; |
396 } | 398 } |
397 | 399 |
398 ++frame->num_references; | 400 ++frame->num_references; |
399 frame->references[layer] = layer_info_it->second[layer]; | 401 frame->references[layer] = layer_info_it->second[layer]; |
400 } | 402 } |
401 | 403 |
402 CompletedFrameVp8(std::move(frame)); | 404 CompletedFrameVp8(std::move(frame)); |
(...skipping 18 matching lines...) Expand all Loading... | |
421 // update. | 423 // update. |
422 break; | 424 break; |
423 } | 425 } |
424 | 426 |
425 layer_info_it->second[codec_header.temporalIdx] = frame->picture_id; | 427 layer_info_it->second[codec_header.temporalIdx] = frame->picture_id; |
426 ++tl0_pic_idx; | 428 ++tl0_pic_idx; |
427 layer_info_it = layer_info_.find(tl0_pic_idx); | 429 layer_info_it = layer_info_.find(tl0_pic_idx); |
428 } | 430 } |
429 not_yet_received_frames_.erase(frame->picture_id); | 431 not_yet_received_frames_.erase(frame->picture_id); |
430 | 432 |
431 for (size_t r = 0; r < frame->num_references; ++r) | 433 for (size_t i = 0; i < frame->num_references; ++i) |
432 frame->references[r] = UnwrapPictureId(frame->references[r]); | 434 frame->references[i] = UnwrapPictureId(frame->references[i]); |
433 frame->picture_id = UnwrapPictureId(frame->picture_id); | 435 frame->picture_id = UnwrapPictureId(frame->picture_id); |
434 | 436 |
435 frame_callback_->OnCompleteFrame(std::move(frame)); | 437 frame_callback_->OnCompleteFrame(std::move(frame)); |
436 RetryStashedFrames(); | 438 RetryStashedFrames(); |
437 } | 439 } |
438 | 440 |
441 void PacketBuffer::ManageFrameVp9(std::unique_ptr<RtpFrameObject> frame) { | |
442 size_t index = frame->first_seq_num() % size_; | |
443 const VCMPacket& packet = data_buffer_[index]; | |
444 const RTPVideoHeaderVP9& codec_header = | |
445 packet.codecSpecificHeader.codecHeader.VP9; | |
446 | |
447 if (codec_header.picture_id == kNoPictureId) { | |
448 ManageFrameGeneric(std::move(frame)); | |
449 return; | |
450 } | |
451 | |
452 frame->spatial_layer = codec_header.spatial_idx; | |
453 frame->inter_layer_predicted = codec_header.inter_layer_predicted; | |
454 frame->picture_id = codec_header.picture_id % kPicIdLength; | |
455 | |
456 if (last_unwrap_ == -1) | |
457 last_unwrap_ = codec_header.picture_id; | |
458 | |
459 if (last_picture_id_ == -1) | |
460 last_picture_id_ = frame->picture_id; | |
461 | |
462 if (codec_header.flexible_mode) { | |
463 frame->num_references = codec_header.num_ref_pics; | |
464 for (size_t i = 0; i < frame->num_references; ++i) { | |
465 frame->references[i] = Subtract<1 << 16>(frame->picture_id, | |
466 codec_header.pid_diff[i]); | |
467 } | |
468 | |
469 CompletedFrameVp9(std::move(frame)); | |
470 return; | |
471 } | |
472 | |
473 if (codec_header.ss_data_available) { | |
474 // Scalability structures can only be sent with tl0 frames. | |
475 if (codec_header.temporal_idx != 0) { | |
476 LOG(LS_WARNING) << "Received scalability structure on a non base layer" | |
477 " frame. Scalability structure ignored."; | |
478 } else { | |
479 current_ss_idx_ = Add<kMaxGofSaved>(current_ss_idx_, 1); | |
480 scalability_structures_[current_ss_idx_] = codec_header.gof; | |
stefan-webrtc
2016/05/04 09:09:50
If you do
auto inserted = scalability_structures_
philipel
2016/05/04 09:52:26
The member |scalability_structures_| is an std::ar
stefan-webrtc
2016/05/06 11:18:55
Ah, thanks
| |
481 scalability_structures_[current_ss_idx_].pid_start = frame->picture_id; | |
482 | |
483 auto pid_and_gof = std::make_pair(frame->picture_id, | |
484 &scalability_structures_[current_ss_idx_]); | |
stefan-webrtc
2016/05/04 09:09:50
git cl format
philipel
2016/05/04 09:52:26
Done.
| |
485 gof_info_.insert(std::make_pair(codec_header.tl0_pic_idx, pid_and_gof)); | |
486 } | |
487 } | |
488 | |
489 // Clean up info for base layers that are too old. | |
490 uint8_t old_tl0_pic_idx = codec_header.tl0_pic_idx - kMaxGofSaved; | |
491 auto clean_gof_info_to = gof_info_.lower_bound(old_tl0_pic_idx); | |
492 gof_info_.erase(gof_info_.begin(), clean_gof_info_to); | |
493 | |
494 if (packet.frameType == kVideoFrameKey) { | |
495 // When using GOF all keyframes must include the scalability structure. | |
496 if (!codec_header.ss_data_available) | |
497 LOG(LS_WARNING) << "Received keyframe without scalability structure"; | |
498 | |
499 frame->num_references = 0; | |
500 GofInfoVP9* gof = gof_info_.find(codec_header.tl0_pic_idx)->second.second; | |
stefan-webrtc
2016/05/04 09:09:50
It would be nice to clean this up and rename it Go
philipel
2016/05/04 09:52:26
Not sure what you mean.
stefan-webrtc
2016/05/06 11:18:55
I was refering to GofInfoVP9 -> GofInfoVp9.
Feel
| |
501 FrameReceivedVp9(frame->picture_id, *gof); | |
502 CompletedFrameVp9(std::move(frame)); | |
503 return; | |
504 } | |
505 | |
506 auto gof_info_it = gof_info_.find((codec_header.temporal_idx == 0 && | |
507 !codec_header.ss_data_available) | |
508 ? codec_header.tl0_pic_idx - 1 | |
509 : codec_header.tl0_pic_idx); | |
510 | |
511 // Gof info for this frame is not available yet, stash this frame. | |
512 if (gof_info_it == gof_info_.end()) { | |
513 stashed_frames_.emplace(std::move(frame)); | |
514 return; | |
515 } | |
516 | |
517 GofInfoVP9* gof = gof_info_it->second.second; | |
518 uint16_t picture_id_tl0 = gof_info_it->second.first; | |
519 | |
520 FrameReceivedVp9(frame->picture_id, *gof); | |
521 | |
522 // Make sure we don't miss any frame that could potentially have the | |
523 // up switch flag set. | |
524 if (MissingRequiredFrameVp9(frame->picture_id, *gof)) { | |
525 stashed_frames_.emplace(std::move(frame)); | |
526 return; | |
527 } | |
528 | |
529 if (codec_header.temporal_up_switch) { | |
530 auto pid_tidx = std::make_pair(frame->picture_id, | |
531 codec_header.temporal_idx); | |
532 up_switch_.insert(pid_tidx); | |
533 } | |
534 | |
535 // If this is a base layer frame that contains a scalability structure | |
536 // then gof info has already been inserted earlier, so we only want to | |
537 // insert if we haven't done so already. | |
538 if (codec_header.temporal_idx == 0 && | |
539 !codec_header.ss_data_available) { | |
540 auto pid_and_gof = std::make_pair(frame->picture_id, gof); | |
541 gof_info_.insert(std::make_pair(codec_header.tl0_pic_idx, pid_and_gof)); | |
542 } | |
543 | |
544 | |
stefan-webrtc
2016/05/04 09:09:50
Remove empty line
philipel
2016/05/04 09:52:26
Done.
| |
545 // Clean out old info about up switch frames. | |
546 uint16_t old_picture_id = Subtract<kPicIdLength>(last_picture_id_, 50); | |
547 auto up_switch_erase_to = up_switch_.lower_bound(old_picture_id); | |
548 up_switch_.erase(up_switch_.begin(), up_switch_erase_to); | |
549 | |
550 RTC_DCHECK((AheadOrAt<uint16_t, kPicIdLength>(frame->picture_id, | |
551 picture_id_tl0))); | |
552 | |
553 uint8_t diff = ForwardDiff<uint16_t, kPicIdLength>(gof->pid_start, | |
stefan-webrtc
2016/05/04 09:09:50
This is a bit confusing to me. Why is the diff sto
philipel
2016/05/04 09:52:26
You are right that an uint8_t is to small, but the
| |
554 frame->picture_id); | |
555 uint8_t gof_idx = diff % gof->num_frames_in_gof; | |
stefan-webrtc
2016/05/04 09:09:50
Can we make gof_idx a size_t? There's no point in
philipel
2016/05/04 09:52:26
Done.
| |
556 | |
557 // Populate references according to the scalability structure. | |
558 frame->num_references = gof->num_ref_pics[gof_idx]; | |
559 for (size_t i = 0; i < frame->num_references; ++i) { | |
560 frame->references[i] = Subtract<kPicIdLength>(frame->picture_id, | |
561 gof->pid_diff[gof_idx][i]); | |
562 | |
563 // If this is a reference to a frame earlier than the last up switch point, | |
564 // then ignore this reference. | |
565 if (UpSwitchInIntervalVp9(frame->picture_id, | |
566 codec_header.temporal_idx, | |
stefan-webrtc
2016/05/04 09:09:50
This doesn't look correctly formatted.
philipel
2016/05/04 09:52:26
Formated.
| |
567 frame->references[i])) { | |
568 --frame->num_references; | |
569 } | |
570 } | |
571 | |
572 CompletedFrameVp9(std::move(frame)); | |
573 } | |
574 | |
575 bool PacketBuffer::MissingRequiredFrameVp9(uint16_t picture_id, | |
576 const GofInfoVP9& gof) { | |
577 uint8_t diff = ForwardDiff<uint16_t, kPicIdLength>(gof.pid_start, picture_id); | |
578 uint8_t gof_idx = diff % gof.num_frames_in_gof; | |
579 uint8_t temporal_idx = gof.temporal_idx[gof_idx]; | |
stefan-webrtc
2016/05/04 09:09:50
Should we use int for all of these instead? Just t
philipel
2016/05/04 09:52:26
Changed to size_t.
| |
580 | |
581 // For every reference this frame has, check if there is a frame missing in | |
582 // the intervall (|ref_pid|, |picture_id|) in any of the lower temporal | |
stefan-webrtc
2016/05/04 09:09:50
interval
philipel
2016/05/04 09:52:26
Done.
| |
583 // layers. If so, we are missing a required frame. | |
584 uint8_t num_references = gof.num_ref_pics[gof_idx]; | |
585 for (size_t i = 0; i < num_references; ++i) { | |
586 uint16_t ref_pid = Subtract<kPicIdLength>(picture_id, | |
587 gof.pid_diff[gof_idx][i]); | |
588 for (size_t l = 0; l < temporal_idx; ++l) { | |
589 auto missing_frame_it = missing_frames_for_layer_[l].lower_bound(ref_pid); | |
590 if (missing_frame_it != missing_frames_for_layer_[l].end() && | |
591 AheadOf<uint16_t, kPicIdLength>(picture_id, *missing_frame_it)) { | |
592 return true; | |
593 } | |
594 } | |
595 } | |
596 return false; | |
597 } | |
598 | |
599 void PacketBuffer::FrameReceivedVp9(uint16_t picture_id, | |
600 const GofInfoVP9& gof) { | |
601 RTC_DCHECK_NE(-1, last_picture_id_); | |
602 | |
603 // If there is a gap, find which temporal layers the missing framess | |
stefan-webrtc
2016/05/04 09:09:50
frames
philipel
2016/05/04 09:52:26
Done.
| |
604 // belongs to and add the frame as missing for that temporal layer. | |
stefan-webrtc
2016/05/04 09:09:50
belong to
philipel
2016/05/04 09:52:26
Done.
| |
605 // Otherwise, remove this frame from the set of missing frames. | |
606 if (AheadOf<uint16_t, kPicIdLength>(picture_id, last_picture_id_)) { | |
607 uint8_t diff = ForwardDiff<uint16_t, kPicIdLength>(gof.pid_start, | |
608 last_picture_id_); | |
609 uint8_t gof_idx = diff % gof.num_frames_in_gof; | |
610 | |
611 last_picture_id_ = Add<kPicIdLength>(last_picture_id_, 1); | |
612 while (last_picture_id_ != picture_id) { | |
613 ++gof_idx; | |
614 RTC_DCHECK_NE(0ul, gof_idx % gof.num_frames_in_gof); | |
615 uint8_t temporal_idx = gof.temporal_idx[gof_idx]; | |
616 missing_frames_for_layer_[temporal_idx].insert(last_picture_id_); | |
617 last_picture_id_ = Add<kPicIdLength>(last_picture_id_, 1); | |
618 } | |
619 } else { | |
620 uint8_t diff = ForwardDiff<uint16_t, kPicIdLength>(gof.pid_start, | |
621 picture_id); | |
622 uint8_t gof_idx = diff % gof.num_frames_in_gof; | |
623 uint8_t temporal_idx = gof.temporal_idx[gof_idx]; | |
624 missing_frames_for_layer_[temporal_idx].erase(picture_id); | |
625 } | |
626 } | |
627 | |
628 bool PacketBuffer::UpSwitchInIntervalVp9(uint16_t picture_id, | |
629 uint8_t temporal_idx, | |
630 uint16_t pid_ref) { | |
631 auto up_switch_it = up_switch_.upper_bound(pid_ref); | |
632 while (up_switch_it != up_switch_.end() && | |
stefan-webrtc
2016/05/04 09:09:50
I'd use a for loop here instead
philipel
2016/05/04 09:52:26
Done.
| |
633 AheadOf<uint16_t, kPicIdLength>(picture_id, up_switch_it->first)) { | |
634 if (up_switch_it->second < temporal_idx) | |
635 return true; | |
636 ++up_switch_it; | |
637 } | |
638 | |
639 return false; | |
640 } | |
641 | |
642 void PacketBuffer::CompletedFrameVp9(std::unique_ptr<RtpFrameObject> frame) { | |
643 for (size_t i = 0; i < frame->num_references; ++i) | |
644 frame->references[i] = UnwrapPictureId(frame->references[i]); | |
645 frame->picture_id = UnwrapPictureId(frame->picture_id); | |
646 | |
647 frame_callback_->OnCompleteFrame(std::move(frame)); | |
648 RetryStashedFrames(); | |
649 } | |
650 | |
439 uint16_t PacketBuffer::UnwrapPictureId(uint16_t picture_id) { | 651 uint16_t PacketBuffer::UnwrapPictureId(uint16_t picture_id) { |
440 if (last_unwrap_ == -1) | 652 RTC_DCHECK_NE(-1, last_unwrap_); |
441 last_unwrap_ = picture_id; | |
442 | 653 |
443 uint16_t unwrap_truncated = last_unwrap_ % kPicIdLength; | 654 uint16_t unwrap_truncated = last_unwrap_ % kPicIdLength; |
444 uint16_t diff = MinDiff<uint8_t, kPicIdLength>(unwrap_truncated, picture_id); | 655 uint16_t diff = MinDiff<uint16_t, kPicIdLength>(unwrap_truncated, picture_id); |
445 | 656 |
446 if (AheadOf<uint8_t, kPicIdLength>(picture_id, unwrap_truncated)) | 657 if (AheadOf<uint16_t, kPicIdLength>(picture_id, unwrap_truncated)) |
447 last_unwrap_ = Add<1 << 16>(last_unwrap_, diff); | 658 last_unwrap_ = Add<1 << 16>(last_unwrap_, diff); |
448 else | 659 else |
449 last_unwrap_ = Subtract<1 << 16>(last_unwrap_, diff); | 660 last_unwrap_ = Subtract<1 << 16>(last_unwrap_, diff); |
450 | 661 |
451 return last_unwrap_; | 662 return last_unwrap_; |
452 } | 663 } |
453 | 664 |
454 void PacketBuffer::Flush() { | 665 void PacketBuffer::Flush() { |
455 rtc::CritScope lock(&crit_); | 666 rtc::CritScope lock(&crit_); |
456 for (size_t i = 0; i < size_; ++i) | 667 for (size_t i = 0; i < size_; ++i) |
457 sequence_buffer_[i].used = false; | 668 sequence_buffer_[i].used = false; |
458 | 669 |
459 last_seq_num_gop_.clear(); | 670 last_seq_num_gop_.clear(); |
460 while (!stashed_frames_.empty()) | 671 while (!stashed_frames_.empty()) |
461 stashed_frames_.pop(); | 672 stashed_frames_.pop(); |
462 not_yet_received_frames_.clear(); | 673 not_yet_received_frames_.clear(); |
463 | 674 |
464 first_packet_received_ = false; | 675 first_packet_received_ = false; |
465 } | 676 } |
466 | 677 |
467 } // namespace video_coding | 678 } // namespace video_coding |
468 } // namespace webrtc | 679 } // namespace webrtc |
OLD | NEW |