OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include "webrtc/modules/video_coding/frame_buffer2.h" |
| 12 |
| 13 #include <algorithm> |
| 14 |
| 15 #include "webrtc/base/checks.h" |
| 16 #include "webrtc/modules/video_coding/frame_object.h" |
| 17 #include "webrtc/modules/video_coding/jitter_estimator.h" |
| 18 #include "webrtc/modules/video_coding/sequence_number_util.h" |
| 19 #include "webrtc/modules/video_coding/timing.h" |
| 20 #include "webrtc/system_wrappers/include/clock.h" |
| 21 |
| 22 namespace webrtc { |
| 23 namespace video_coding { |
| 24 |
| 25 namespace { |
| 26 // The maximum age of decoded frames tracked by frame buffer, compared to |
| 27 // |newest_picture_id_|. |
| 28 constexpr int kMaxFrameAge = 4096; |
| 29 |
| 30 // The maximum number of decoded frames being tracked by the frame buffer. |
| 31 constexpr int kMaxNumHistoryFrames = 256; |
| 32 |
| 33 // The maximum number of spatial layers. |
| 34 constexpr int kMaxSpatialLayers = 5; |
| 35 } // namespace |
| 36 |
| 37 bool FrameBuffer::FrameComp::operator()(const FrameKey& f1, |
| 38 const FrameKey& f2) const { |
| 39 // first = picture id |
| 40 // second = spatial layer |
| 41 if (f1.first == f2.first) |
| 42 return f1.second < f2.second; |
| 43 return AheadOf(f2.first, f1.first); |
| 44 } |
| 45 |
| 46 FrameBuffer::FrameBuffer(Clock* clock, |
| 47 VCMJitterEstimator* jitter_estimator, |
| 48 const VCMTiming* timing) |
| 49 : clock_(clock), |
| 50 frame_inserted_event_(false, false), |
| 51 jitter_estimator_(jitter_estimator), |
| 52 timing_(timing), |
| 53 newest_picture_id_(-1) {} |
| 54 |
| 55 std::unique_ptr<FrameObject> FrameBuffer::NextFrame(int64_t max_wait_time_ms) { |
| 56 int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms; |
| 57 while (true) { |
| 58 int64_t now = clock_->TimeInMilliseconds(); |
| 59 int64_t wait_ms = max_wait_time_ms; |
| 60 |
| 61 crit_.Enter(); |
| 62 frame_inserted_event_.Reset(); |
| 63 auto next_frame = frames_.end(); |
| 64 for (auto frame_it = frames_.begin(); frame_it != frames_.end(); |
| 65 ++frame_it) { |
| 66 const FrameObject& frame = *frame_it->second; |
| 67 if (IsContinuous(frame)) { |
| 68 next_frame = frame_it; |
| 69 int64_t render_time = timing_->RenderTimeMs(frame.timestamp, now); |
| 70 wait_ms = timing_->MaxWaitingTime(render_time, now); |
| 71 |
| 72 // This will cause the frame buffer to prefer high framerate rather |
| 73 // than high resolution in the case of the decoder not decoding fast |
| 74 // enough and the stream has multiple spatial and temporal layers. |
| 75 if (wait_ms == 0) |
| 76 continue; |
| 77 |
| 78 break; |
| 79 } |
| 80 } |
| 81 crit_.Leave(); |
| 82 |
| 83 // If the timout occures, return. Otherwise a new frame has been inserted |
| 84 // and the best frame to decode next will be selected again. |
| 85 wait_ms = std::min<int64_t>(wait_ms, latest_return_time - now); |
| 86 wait_ms = std::max<int64_t>(wait_ms, 0); |
| 87 if (!frame_inserted_event_.Wait(wait_ms)) { |
| 88 crit_.Enter(); |
| 89 if (next_frame != frames_.end()) { |
| 90 // TODO(philipel): update jitter estimator with correct values. |
| 91 jitter_estimator_->UpdateEstimate(100, 100); |
| 92 |
| 93 decoded_frames_.insert(next_frame->first); |
| 94 std::unique_ptr<FrameObject> frame = std::move(next_frame->second); |
| 95 frames_.erase(frames_.begin(), ++next_frame); |
| 96 crit_.Leave(); |
| 97 return frame; |
| 98 } else { |
| 99 crit_.Leave(); |
| 100 return std::unique_ptr<FrameObject>(); |
| 101 } |
| 102 } |
| 103 } |
| 104 } |
| 105 |
| 106 void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { |
| 107 rtc::CritScope lock(&crit_); |
| 108 if (newest_picture_id_ == -1) |
| 109 newest_picture_id_ = frame->picture_id; |
| 110 |
| 111 if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) |
| 112 newest_picture_id_ = frame->picture_id; |
| 113 |
| 114 // Remove frames as long as we have too many, |kMaxNumHistoryFrames|. |
| 115 while (decoded_frames_.size() > kMaxNumHistoryFrames) |
| 116 decoded_frames_.erase(decoded_frames_.begin()); |
| 117 |
| 118 // Remove frames that are too old, |kMaxNumHistoryFrames|. |
| 119 uint16_t old_picture_id = Subtract<1 << 16>(newest_picture_id_, kMaxFrameAge); |
| 120 auto old_decoded_it = |
| 121 decoded_frames_.lower_bound(FrameKey(old_picture_id, kMaxSpatialLayers)); |
| 122 decoded_frames_.erase(decoded_frames_.begin(), old_decoded_it); |
| 123 |
| 124 FrameKey key(frame->picture_id, frame->spatial_layer); |
| 125 frames_[key] = std::move(frame); |
| 126 frame_inserted_event_.Set(); |
| 127 } |
| 128 |
| 129 bool FrameBuffer::IsContinuous(const FrameObject& frame) const { |
| 130 // If a frame with an earlier picture id was inserted compared to the last |
| 131 // decoded frames picture id then that frame arrived too late. |
| 132 if (!decoded_frames_.empty() && |
| 133 AheadOf(decoded_frames_.rbegin()->first, frame.picture_id)) { |
| 134 return false; |
| 135 } |
| 136 |
| 137 for (size_t r = 0; r < frame.num_references; ++r) { |
| 138 FrameKey ref_key(frame.references[r], frame.spatial_layer); |
| 139 if (decoded_frames_.find(ref_key) == decoded_frames_.end()) |
| 140 return false; |
| 141 } |
| 142 |
| 143 if (frame.inter_layer_predicted) { |
| 144 RTC_DCHECK_GT(frame.spatial_layer, 0); |
| 145 FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1); |
| 146 if (decoded_frames_.find(ref_key) == decoded_frames_.end()) |
| 147 return false; |
| 148 } |
| 149 |
| 150 return true; |
| 151 } |
| 152 |
| 153 } // namespace video_coding |
| 154 } // namespace webrtc |
OLD | NEW |