Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(812)

Side by Side Diff: webrtc/modules/video_coding/frame_buffer2.cc

Issue 2322263002: Frame continuity is now tested as soon as a frame is inserted into the FrameBuffer. (Closed)
Patch Set: Feedback Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/video_coding/frame_buffer2.h" 11 #include "webrtc/modules/video_coding/frame_buffer2.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <cstring>
15 #include <queue>
14 16
15 #include "webrtc/base/checks.h" 17 #include "webrtc/base/checks.h"
16 #include "webrtc/modules/video_coding/frame_object.h" 18 #include "webrtc/base/logging.h"
17 #include "webrtc/modules/video_coding/jitter_estimator.h" 19 #include "webrtc/modules/video_coding/jitter_estimator.h"
18 #include "webrtc/modules/video_coding/sequence_number_util.h"
19 #include "webrtc/modules/video_coding/timing.h" 20 #include "webrtc/modules/video_coding/timing.h"
20 #include "webrtc/system_wrappers/include/clock.h" 21 #include "webrtc/system_wrappers/include/clock.h"
21 22
22 namespace webrtc { 23 namespace webrtc {
23 namespace video_coding { 24 namespace video_coding {
24 25
25 namespace { 26 namespace {
26 // The maximum age of decoded frames tracked by frame buffer, compared to 27 // Max number of frames the buffer will hold.
27 // |newest_picture_id_|. 28 constexpr int kMaxFramesBuffered = 600;
28 constexpr int kMaxFrameAge = 4096;
29 29
30 // The maximum number of decoded frames being tracked by the frame buffer. 30 // Max number of decoded frame info that will be saved.
31 constexpr int kMaxNumHistoryFrames = 256; 31 constexpr int kMaxFramesHistory = 20;
32 } // namespace 32 } // namespace
33 33
34 bool FrameBuffer::FrameComp::operator()(const FrameKey& f1,
35 const FrameKey& f2) const {
36 // first = picture id
37 // second = spatial layer
38 if (f1.first == f2.first)
39 return f1.second < f2.second;
40 return AheadOf(f2.first, f1.first);
41 }
42
43 FrameBuffer::FrameBuffer(Clock* clock, 34 FrameBuffer::FrameBuffer(Clock* clock,
44 VCMJitterEstimator* jitter_estimator, 35 VCMJitterEstimator* jitter_estimator,
45 VCMTiming* timing) 36 VCMTiming* timing)
46 : clock_(clock), 37 : clock_(clock),
47 frame_inserted_event_(false, false), 38 new_countinuous_frame_event_(false, false),
48 jitter_estimator_(jitter_estimator), 39 jitter_estimator_(jitter_estimator),
49 timing_(timing), 40 timing_(timing),
50 inter_frame_delay_(clock_->TimeInMilliseconds()), 41 inter_frame_delay_(clock_->TimeInMilliseconds()),
51 newest_picture_id_(-1), 42 last_decoded_frame_it_(frames_.end()),
43 last_continuous_frame_it_(frames_.end()),
44 num_frames_history_(0),
45 num_frames_buffered_(0),
52 stopped_(false), 46 stopped_(false),
53 protection_mode_(kProtectionNack) {} 47 protection_mode_(kProtectionNack) {}
54 48
55 FrameBuffer::ReturnReason FrameBuffer::NextFrame( 49 FrameBuffer::ReturnReason FrameBuffer::NextFrame(
56 int64_t max_wait_time_ms, 50 int64_t max_wait_time_ms,
57 std::unique_ptr<FrameObject>* frame_out) { 51 std::unique_ptr<FrameObject>* frame_out) {
58 int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms; 52 int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms;
59 int64_t now = clock_->TimeInMilliseconds();
60 int64_t wait_ms = max_wait_time_ms; 53 int64_t wait_ms = max_wait_time_ms;
61 while (true) { 54 FrameMap::iterator next_frame_it;
62 std::map<FrameKey, std::unique_ptr<FrameObject>, FrameComp>::iterator 55
63 next_frame_it; 56 do {
57 int64_t now_ms = clock_->TimeInMilliseconds();
64 { 58 {
65 rtc::CritScope lock(&crit_); 59 rtc::CritScope lock(&crit_);
66 frame_inserted_event_.Reset(); 60 new_countinuous_frame_event_.Reset();
67 if (stopped_) 61 if (stopped_)
68 return kStopped; 62 return kStopped;
69 63
70 now = clock_->TimeInMilliseconds();
71 wait_ms = max_wait_time_ms; 64 wait_ms = max_wait_time_ms;
65
66 // Need to hold |crit_| in order to use |frames_|, therefore we
67 // set it here in the loop instead of outside the loop in order to not
68 // acquire the lock unnecesserily.
72 next_frame_it = frames_.end(); 69 next_frame_it = frames_.end();
73 for (auto frame_it = frames_.begin(); frame_it != frames_.end();
74 ++frame_it) {
75 const FrameObject& frame = *frame_it->second;
76 if (IsContinuous(frame)) {
77 next_frame_it = frame_it;
78 int64_t render_time =
79 next_frame_it->second->RenderTime() == -1
80 ? timing_->RenderTimeMs(frame.timestamp, now)
81 : next_frame_it->second->RenderTime();
82 wait_ms = timing_->MaxWaitingTime(render_time, now);
83 frame_it->second->SetRenderTime(render_time);
84 70
85 // This will cause the frame buffer to prefer high framerate rather 71 // |frame_it| points to the first frame after the
86 // than high resolution in the case of the decoder not decoding fast 72 // |last_decoded_frame_it_|.
87 // enough and the stream has multiple spatial and temporal layers. 73 auto frame_it = frames_.end();
88 if (wait_ms == 0) 74 if (last_decoded_frame_it_ == frames_.end()) {
89 continue; 75 frame_it = frames_.begin();
76 } else {
77 frame_it = last_decoded_frame_it_;
78 ++frame_it;
79 }
90 80
91 break; 81 // |continuous_end_it| points to the first frame after the
92 } 82 // |last_continuous_frame_it_|.
83 auto continuous_end_it = last_continuous_frame_it_;
84 if (continuous_end_it != frames_.end())
85 ++continuous_end_it;
86
87 for (; frame_it != continuous_end_it; ++frame_it) {
88 if (frame_it->second.num_missing_decodable > 0)
89 continue;
90
91 FrameObject* frame = frame_it->second.frame.get();
92 next_frame_it = frame_it;
93 if (frame->RenderTime() == -1)
94 frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
95 wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);
96
97 // This will cause the frame buffer to prefer high framerate rather
98 // than high resolution in the case of the decoder not decoding fast
99 // enough and the stream has multiple spatial and temporal layers.
100 if (wait_ms == 0)
101 continue;
102
103 break;
93 } 104 }
105 } // rtc::Critscope lock(&crit_);
106
107 wait_ms = std::min<int64_t>(wait_ms, latest_return_time - now_ms);
108 wait_ms = std::max<int64_t>(wait_ms, 0);
109 } while (new_countinuous_frame_event_.Wait(wait_ms));
110
111 rtc::CritScope lock(&crit_);
112 if (next_frame_it != frames_.end()) {
113 std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second.frame);
114 int64_t received_time = frame->ReceivedTime();
115 uint32_t timestamp = frame->Timestamp();
116
117 int64_t frame_delay;
118 if (inter_frame_delay_.CalculateDelay(timestamp, &frame_delay,
119 received_time)) {
120 jitter_estimator_->UpdateEstimate(frame_delay, frame->size);
94 } 121 }
122 float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
123 timing_->SetJitterDelay(jitter_estimator_->GetJitterEstimate(rtt_mult));
124 timing_->UpdateCurrentDelay(frame->RenderTime(),
125 clock_->TimeInMilliseconds());
95 126
96 wait_ms = std::min<int64_t>(wait_ms, latest_return_time - now); 127 PropagateDecodability(next_frame_it->second);
97 wait_ms = std::max<int64_t>(wait_ms, 0); 128 AdvanceLastDecodedFrame(next_frame_it);
98 // If the timeout occurs, return. Otherwise a new frame has been inserted 129 *frame_out = std::move(frame);
99 // and the best frame to decode next will be selected again. 130 return kFrameFound;
100 if (!frame_inserted_event_.Wait(wait_ms)) { 131 } else {
101 rtc::CritScope lock(&crit_); 132 return kTimeout;
102 if (next_frame_it != frames_.end()) {
103 int64_t received_timestamp = next_frame_it->second->ReceivedTime();
104 uint32_t timestamp = next_frame_it->second->Timestamp();
105
106 int64_t frame_delay;
107 if (inter_frame_delay_.CalculateDelay(timestamp, &frame_delay,
108 received_timestamp)) {
109 jitter_estimator_->UpdateEstimate(frame_delay,
110 next_frame_it->second->size);
111 }
112 float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
113 timing_->SetJitterDelay(jitter_estimator_->GetJitterEstimate(rtt_mult));
114 timing_->UpdateCurrentDelay(next_frame_it->second->RenderTime(),
115 clock_->TimeInMilliseconds());
116
117 decoded_frames_.insert(next_frame_it->first);
118 std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second);
119 frames_.erase(frames_.begin(), ++next_frame_it);
120 *frame_out = std::move(frame);
121 return kFrameFound;
122 } else {
123 return kTimeout;
124 }
125 }
126 } 133 }
127 } 134 }
128 135
129 void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) { 136 void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) {
130 rtc::CritScope lock(&crit_); 137 rtc::CritScope lock(&crit_);
131 protection_mode_ = mode; 138 protection_mode_ = mode;
132 } 139 }
133 140
134 void FrameBuffer::Start() { 141 void FrameBuffer::Start() {
135 rtc::CritScope lock(&crit_); 142 rtc::CritScope lock(&crit_);
136 stopped_ = false; 143 stopped_ = false;
137 } 144 }
138 145
139 void FrameBuffer::Stop() { 146 void FrameBuffer::Stop() {
140 rtc::CritScope lock(&crit_); 147 rtc::CritScope lock(&crit_);
141 stopped_ = true; 148 stopped_ = true;
142 frame_inserted_event_.Set(); 149 new_countinuous_frame_event_.Set();
143 } 150 }
144 151
145 void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { 152 int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) {
146 rtc::CritScope lock(&crit_); 153 rtc::CritScope lock(&crit_);
147 // If |newest_picture_id_| is -1 then this is the first frame we received. 154 FrameKey key(frame->picture_id, frame->spatial_layer);
148 if (newest_picture_id_ == -1) 155 int last_continuous_picture_id =
149 newest_picture_id_ = frame->picture_id; 156 last_continuous_frame_it_ == frames_.end()
157 ? -1
158 : last_continuous_frame_it_->first.picture_id;
150 159
151 if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) 160 if (num_frames_buffered_ >= kMaxFramesBuffered) {
152 newest_picture_id_ = frame->picture_id; 161 LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" << key.picture_id
162 << ":" << static_cast<int>(key.spatial_layer)
163 << ") could not be inserted due to the frame "
164 << "buffer being full, dropping frame.";
165 return last_continuous_picture_id;
166 }
153 167
154 // Remove frames as long as we have too many, |kMaxNumHistoryFrames|. 168 if (frame->inter_layer_predicted && frame->spatial_layer == 0) {
155 while (decoded_frames_.size() > kMaxNumHistoryFrames) 169 LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" << key.picture_id
156 decoded_frames_.erase(decoded_frames_.begin()); 170 << ":" << static_cast<int>(key.spatial_layer)
171 << ") is marked as inter layer predicted, dropping frame.";
172 return last_continuous_picture_id;
173 }
157 174
158 // Remove frames that are too old. 175 if (last_decoded_frame_it_ != frames_.end() &&
159 uint16_t old_picture_id = Subtract<1 << 16>(newest_picture_id_, kMaxFrameAge); 176 key < last_decoded_frame_it_->first) {
160 auto old_decoded_it = 177 LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" << key.picture_id
161 decoded_frames_.lower_bound(FrameKey(old_picture_id, 0)); 178 << ":" << static_cast<int>(key.spatial_layer)
162 decoded_frames_.erase(decoded_frames_.begin(), old_decoded_it); 179 << ") inserted after frame ("
180 << last_decoded_frame_it_->first.picture_id << ":"
181 << static_cast<int>(
182 last_decoded_frame_it_->first.spatial_layer)
183 << ") was handed off for decoding, dropping frame.";
184 return last_continuous_picture_id;
185 }
163 186
164 FrameKey key(frame->picture_id, frame->spatial_layer); 187 auto info = frames_.insert(std::make_pair(key, FrameInfo())).first;
165 frames_[key] = std::move(frame); 188
166 frame_inserted_event_.Set(); 189 if (!UpdateFrameInfoWithIncomingFrame(*frame, info)) {
190 frames_.erase(info);
191 return last_continuous_picture_id;
192 }
193
194 info->second.frame = std::move(frame);
195 ++num_frames_buffered_;
196
197 if (info->second.num_missing_continuous == 0) {
198 info->second.continuous = true;
199 PropagateContinuity(info);
200 last_continuous_picture_id = last_continuous_frame_it_->first.picture_id;
201
202 // Since we now have new continuous frames there might be a better frame
203 // to return from NextFrame. Signal that thread so that it again can choose
204 // which frame to return.
205 new_countinuous_frame_event_.Set();
206 }
207
208 return last_continuous_picture_id;
167 } 209 }
168 210
169 bool FrameBuffer::IsContinuous(const FrameObject& frame) const { 211 void FrameBuffer::PropagateContinuity(FrameMap::iterator start) {
170 // If a frame with an earlier picture id was inserted compared to the last 212 RTC_DCHECK(start->second.continuous);
171 // decoded frames picture id then that frame arrived too late. 213 if (last_continuous_frame_it_ == frames_.end())
172 if (!decoded_frames_.empty() && 214 last_continuous_frame_it_ = start;
173 AheadOf(decoded_frames_.rbegin()->first, frame.picture_id)) { 215
174 return false; 216 std::queue<FrameMap::iterator> continuous_frames;
217 continuous_frames.push(start);
218
219 // A simple BFS to traverse continuous frames.
220 while (!continuous_frames.empty()) {
221 auto frame = continuous_frames.front();
222 continuous_frames.pop();
223
224 if (last_continuous_frame_it_->first < frame->first)
225 last_continuous_frame_it_ = frame;
226
227 // Loop through all dependent frames, and if that frame no longer has
228 // any unfulfilled dependencies then that frame is continuous as well.
229 for (size_t d = 0; d < frame->second.num_dependent_frames; ++d) {
230 auto frame_ref = frames_.find(frame->second.dependent_frames[d]);
231 --frame_ref->second.num_missing_continuous;
232
233 if (frame_ref->second.num_missing_continuous == 0) {
234 frame_ref->second.continuous = true;
235 continuous_frames.push(frame_ref);
236 }
237 }
238 }
239 }
240
241 void FrameBuffer::PropagateDecodability(const FrameInfo& info) {
242 for (size_t d = 0; d < info.num_dependent_frames; ++d) {
243 auto ref_info = frames_.find(info.dependent_frames[d]);
244 RTC_DCHECK_GT(ref_info->second.num_missing_decodable, 0U);
245 --ref_info->second.num_missing_decodable;
246 }
247 }
248
249 void FrameBuffer::AdvanceLastDecodedFrame(FrameMap::iterator decoded) {
250 if (last_decoded_frame_it_ == frames_.end()) {
251 last_decoded_frame_it_ = frames_.begin();
252 } else {
253 RTC_DCHECK(last_decoded_frame_it_->first < decoded->first);
254 ++last_decoded_frame_it_;
255 }
256 --num_frames_buffered_;
257 ++num_frames_history_;
258
259 // First, delete non-decoded frames from the history.
260 while (last_decoded_frame_it_ != decoded) {
261 if (last_decoded_frame_it_->second.frame)
262 --num_frames_buffered_;
263 last_decoded_frame_it_ = frames_.erase(last_decoded_frame_it_);
175 } 264 }
176 265
177 // Have we decoded all frames that this frame depend on? 266 // Then remove old history if we have too much history saved.
178 for (size_t r = 0; r < frame.num_references; ++r) { 267 if (num_frames_history_ > kMaxFramesHistory) {
179 FrameKey ref_key(frame.references[r], frame.spatial_layer); 268 frames_.erase(frames_.begin());
180 if (decoded_frames_.find(ref_key) == decoded_frames_.end()) 269 --num_frames_history_;
181 return false; 270 }
271 }
272
273 bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const FrameObject& frame,
274 FrameMap::iterator info) {
275 FrameKey key(frame.picture_id, frame.spatial_layer);
276 info->second.num_missing_continuous = frame.num_references;
277 info->second.num_missing_decodable = frame.num_references;
278
279 RTC_DCHECK(last_decoded_frame_it_ == frames_.end() ||
280 last_decoded_frame_it_->first < info->first);
281
282 // Check how many dependencies that have already been fulfilled.
283 for (size_t i = 0; i < frame.num_references; ++i) {
284 FrameKey ref_key(frame.references[i], frame.spatial_layer);
285 auto ref_info = frames_.find(ref_key);
286
287 // Does |frame| depend on a frame earlier than the last decoded frame?
288 if (last_decoded_frame_it_ != frames_.end() &&
289 ref_key <= last_decoded_frame_it_->first) {
290 if (ref_info == frames_.end()) {
291 LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
292 << key.picture_id << ":"
293 << static_cast<int>(key.spatial_layer)
294 << " depends on a non-decoded frame more previous than "
295 << "the last decoded frame, dropping frame.";
296 return false;
297 }
298
299 --info->second.num_missing_continuous;
300 --info->second.num_missing_decodable;
301 } else {
302 if (ref_info == frames_.end())
303 ref_info = frames_.insert(std::make_pair(ref_key, FrameInfo())).first;
304
305 if (ref_info->second.continuous)
306 --info->second.num_missing_continuous;
307
308 // Add backwards reference so |frame| can be updated when new
309 // frames are inserted or decoded.
310 ref_info->second.dependent_frames[ref_info->second.num_dependent_frames] =
311 key;
312 ++ref_info->second.num_dependent_frames;
313 }
182 } 314 }
183 315
184 // If this is a layer frame, have we decoded the lower layer of this 316 // Check if we have the lower spatial layer frame.
185 // super frame.
186 if (frame.inter_layer_predicted) { 317 if (frame.inter_layer_predicted) {
187 RTC_DCHECK_GT(frame.spatial_layer, 0); 318 ++info->second.num_missing_continuous;
319 ++info->second.num_missing_decodable;
320
188 FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1); 321 FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1);
189 if (decoded_frames_.find(ref_key) == decoded_frames_.end()) 322 // Gets or create the FrameInfo for the referenced frame.
stefan-webrtc 2016/09/27 14:11:40 Get or create
190 return false; 323 auto ref_info = frames_.insert(std::make_pair(ref_key, FrameInfo())).first;
324 if (ref_info->second.continuous)
325 --info->second.num_missing_continuous;
326
327 if (ref_info == last_decoded_frame_it_) {
328 --info->second.num_missing_decodable;
329 } else {
330 ref_info->second.dependent_frames[ref_info->second.num_dependent_frames] =
331 key;
332 ++ref_info->second.num_dependent_frames;
333 }
191 } 334 }
192 335
193 return true; 336 return true;
194 } 337 }
195 338
196 } // namespace video_coding 339 } // namespace video_coding
197 } // namespace webrtc 340 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/video_coding/frame_buffer2.h ('k') | webrtc/modules/video_coding/frame_buffer2_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698