OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/video_coding/frame_buffer2.h" | 11 #include "webrtc/modules/video_coding/frame_buffer2.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <cstring> | |
15 #include <queue> | |
14 | 16 |
15 #include "webrtc/base/checks.h" | 17 #include "webrtc/base/checks.h" |
16 #include "webrtc/modules/video_coding/frame_object.h" | 18 #include "webrtc/base/logging.h" |
17 #include "webrtc/modules/video_coding/jitter_estimator.h" | 19 #include "webrtc/modules/video_coding/jitter_estimator.h" |
18 #include "webrtc/modules/video_coding/sequence_number_util.h" | |
19 #include "webrtc/modules/video_coding/timing.h" | 20 #include "webrtc/modules/video_coding/timing.h" |
20 #include "webrtc/system_wrappers/include/clock.h" | 21 #include "webrtc/system_wrappers/include/clock.h" |
21 | 22 |
22 namespace webrtc { | 23 namespace webrtc { |
23 namespace video_coding { | 24 namespace video_coding { |
24 | 25 |
25 namespace { | 26 namespace { |
26 // The maximum age of decoded frames tracked by frame buffer, compared to | 27 // Max number of frames the buffer will hold. |
27 // |newest_picture_id_|. | 28 constexpr int kMaxFramesBuffered = 1000; |
stefan-webrtc
2016/09/21 14:50:14
Old jitter buffer had this limited to 300.
philipel
2016/09/22 11:18:38
I think that is a bit low, at 60 fps that is 5 sec
| |
28 constexpr int kMaxFrameAge = 4096; | |
29 | 29 |
30 // The maximum number of decoded frames being tracked by the frame buffer. | 30 // Max number of decoded frame info that will be saved. |
31 constexpr int kMaxNumHistoryFrames = 256; | 31 constexpr int kMaxFramesHistory = 20; |
32 } // namespace | 32 } // namespace |
33 | 33 |
34 bool FrameBuffer::FrameComp::operator()(const FrameKey& f1, | |
35 const FrameKey& f2) const { | |
36 // first = picture id | |
37 // second = spatial layer | |
38 if (f1.first == f2.first) | |
39 return f1.second < f2.second; | |
40 return AheadOf(f2.first, f1.first); | |
41 } | |
42 | |
43 FrameBuffer::FrameBuffer(Clock* clock, | 34 FrameBuffer::FrameBuffer(Clock* clock, |
44 VCMJitterEstimator* jitter_estimator, | 35 VCMJitterEstimator* jitter_estimator, |
45 VCMTiming* timing) | 36 VCMTiming* timing) |
46 : clock_(clock), | 37 : clock_(clock), |
47 frame_inserted_event_(false, false), | 38 new_countinuous_frame_event_(false, false), |
48 jitter_estimator_(jitter_estimator), | 39 jitter_estimator_(jitter_estimator), |
49 timing_(timing), | 40 timing_(timing), |
50 inter_frame_delay_(clock_->TimeInMilliseconds()), | 41 inter_frame_delay_(clock_->TimeInMilliseconds()), |
51 newest_picture_id_(-1), | 42 last_decoded_frame_it_(frames_.end()), |
43 last_continuous_frame_it_(frames_.end()), | |
44 num_frames_history_(0), | |
45 num_frames_buffered_(0), | |
52 stopped_(false), | 46 stopped_(false), |
53 protection_mode_(kProtectionNack) {} | 47 protection_mode_(kProtectionNack) {} |
54 | 48 |
55 FrameBuffer::ReturnReason FrameBuffer::NextFrame( | 49 FrameBuffer::ReturnReason FrameBuffer::NextFrame( |
56 int64_t max_wait_time_ms, | 50 int64_t max_wait_time_ms, |
57 std::unique_ptr<FrameObject>* frame_out) { | 51 std::unique_ptr<FrameObject>* frame_out) { |
58 int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms; | 52 int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms; |
59 int64_t now = clock_->TimeInMilliseconds(); | |
60 int64_t wait_ms = max_wait_time_ms; | 53 int64_t wait_ms = max_wait_time_ms; |
61 while (true) { | 54 FrameMap::iterator next_frame_it; |
62 std::map<FrameKey, std::unique_ptr<FrameObject>, FrameComp>::iterator | 55 |
63 next_frame_it; | 56 do { |
57 int64_t now_ms = clock_->TimeInMilliseconds(); | |
64 { | 58 { |
65 rtc::CritScope lock(&crit_); | 59 rtc::CritScope lock(&crit_); |
66 frame_inserted_event_.Reset(); | 60 new_countinuous_frame_event_.Reset(); |
67 if (stopped_) | 61 if (stopped_) |
68 return kStopped; | 62 return kStopped; |
69 | 63 |
70 now = clock_->TimeInMilliseconds(); | |
71 wait_ms = max_wait_time_ms; | 64 wait_ms = max_wait_time_ms; |
65 | |
66 // Need to hold |crit_| in order to use |frames_|, therefore we | |
67 // set it here in the loop instead of outside the loop in order to not | |
68 // acquire the lock unnecesserily. | |
72 next_frame_it = frames_.end(); | 69 next_frame_it = frames_.end(); |
73 for (auto frame_it = frames_.begin(); frame_it != frames_.end(); | |
74 ++frame_it) { | |
75 const FrameObject& frame = *frame_it->second; | |
76 if (IsContinuous(frame)) { | |
77 next_frame_it = frame_it; | |
78 int64_t render_time = | |
79 next_frame_it->second->RenderTime() == -1 | |
80 ? timing_->RenderTimeMs(frame.timestamp, now) | |
81 : next_frame_it->second->RenderTime(); | |
82 wait_ms = timing_->MaxWaitingTime(render_time, now); | |
83 frame_it->second->SetRenderTime(render_time); | |
84 | 70 |
85 // This will cause the frame buffer to prefer high framerate rather | 71 // |frame_it| points to the first frame after the |
86 // than high resolution in the case of the decoder not decoding fast | 72 // |last_decoded_frame_it_|. |
87 // enough and the stream has multiple spatial and temporal layers. | 73 auto frame_it = frames_.end(); |
88 if (wait_ms == 0) | 74 if (last_decoded_frame_it_ == frames_.end()) { |
89 continue; | 75 frame_it = frames_.begin(); |
76 } else { | |
77 frame_it = last_decoded_frame_it_; | |
78 ++frame_it; | |
79 } | |
90 | 80 |
91 break; | 81 // |continuous_end_it| point to the first frame after the |
stefan-webrtc
2016/09/21 14:50:13
points
philipel
2016/09/22 11:18:38
Done.
| |
92 } | 82 // |last_continuous_frame_it_|. |
83 auto continuous_end_it = last_continuous_frame_it_; | |
84 if (continuous_end_it != frames_.end()) | |
85 ++continuous_end_it; | |
86 | |
87 for (; frame_it != continuous_end_it; ++frame_it) { | |
88 if (frame_it->second.num_missing_decodable > 0) | |
89 continue; | |
90 | |
91 FrameObject* frame = frame_it->second.frame.get(); | |
92 next_frame_it = frame_it; | |
93 int64_t render_time = | |
danilchap
2016/09/20 13:45:25
may be shorten this block a bit:
if (frame->Render
philipel
2016/09/22 11:18:38
Done.
| |
94 frame->RenderTime() == -1 | |
95 ? timing_->RenderTimeMs(frame->timestamp, now_ms) | |
96 : frame->RenderTime(); | |
97 wait_ms = timing_->MaxWaitingTime(render_time, now_ms); | |
98 frame->SetRenderTime(render_time); | |
99 | |
100 // This will cause the frame buffer to prefer high framerate rather | |
101 // than high resolution in the case of the decoder not decoding fast | |
102 // enough and the stream has multiple spatial and temporal layers. | |
103 if (wait_ms == 0) | |
104 continue; | |
105 | |
106 break; | |
93 } | 107 } |
108 } // rtc::Critscope lock(&crit_); | |
109 | |
110 wait_ms = std::min<int64_t>(wait_ms, latest_return_time - now_ms); | |
111 wait_ms = std::max<int64_t>(wait_ms, 0); | |
112 } while (new_countinuous_frame_event_.Wait(wait_ms)); | |
113 | |
114 rtc::CritScope lock(&crit_); | |
115 if (next_frame_it != frames_.end()) { | |
116 std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second.frame); | |
117 int64_t received_timestamp = frame->ReceivedTime(); | |
stefan-webrtc
2016/09/21 14:50:14
received_time instead of received_timestamp? The w
philipel
2016/09/22 11:18:38
Done.
| |
118 uint32_t timestamp = frame->Timestamp(); | |
119 | |
120 int64_t frame_delay; | |
121 if (inter_frame_delay_.CalculateDelay(timestamp, &frame_delay, | |
122 received_timestamp)) { | |
123 jitter_estimator_->UpdateEstimate(frame_delay, frame->size); | |
94 } | 124 } |
125 float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0; | |
126 timing_->SetJitterDelay(jitter_estimator_->GetJitterEstimate(rtt_mult)); | |
127 timing_->UpdateCurrentDelay(frame->RenderTime(), | |
128 clock_->TimeInMilliseconds()); | |
95 | 129 |
96 wait_ms = std::min<int64_t>(wait_ms, latest_return_time - now); | 130 PropagateDecodability(next_frame_it->second); |
97 wait_ms = std::max<int64_t>(wait_ms, 0); | 131 AdvanceLastDecodedFrame(next_frame_it); |
98 // If the timeout occurs, return. Otherwise a new frame has been inserted | 132 *frame_out = std::move(frame); |
99 // and the best frame to decode next will be selected again. | 133 return kFrameFound; |
100 if (!frame_inserted_event_.Wait(wait_ms)) { | 134 } else { |
101 rtc::CritScope lock(&crit_); | 135 return kTimeout; |
102 if (next_frame_it != frames_.end()) { | |
103 int64_t received_timestamp = next_frame_it->second->ReceivedTime(); | |
104 uint32_t timestamp = next_frame_it->second->Timestamp(); | |
105 | |
106 int64_t frame_delay; | |
107 if (inter_frame_delay_.CalculateDelay(timestamp, &frame_delay, | |
108 received_timestamp)) { | |
109 jitter_estimator_->UpdateEstimate(frame_delay, | |
110 next_frame_it->second->size); | |
111 } | |
112 float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0; | |
113 timing_->SetJitterDelay(jitter_estimator_->GetJitterEstimate(rtt_mult)); | |
114 timing_->UpdateCurrentDelay(next_frame_it->second->RenderTime(), | |
115 clock_->TimeInMilliseconds()); | |
116 | |
117 decoded_frames_.insert(next_frame_it->first); | |
118 std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second); | |
119 frames_.erase(frames_.begin(), ++next_frame_it); | |
120 *frame_out = std::move(frame); | |
121 return kFrameFound; | |
122 } else { | |
123 return kTimeout; | |
124 } | |
125 } | |
126 } | 136 } |
127 } | 137 } |
128 | 138 |
129 void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) { | 139 void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) { |
130 rtc::CritScope lock(&crit_); | 140 rtc::CritScope lock(&crit_); |
131 protection_mode_ = mode; | 141 protection_mode_ = mode; |
132 } | 142 } |
133 | 143 |
134 void FrameBuffer::Start() { | 144 void FrameBuffer::Start() { |
135 rtc::CritScope lock(&crit_); | 145 rtc::CritScope lock(&crit_); |
136 stopped_ = false; | 146 stopped_ = false; |
137 } | 147 } |
138 | 148 |
139 void FrameBuffer::Stop() { | 149 void FrameBuffer::Stop() { |
140 rtc::CritScope lock(&crit_); | 150 rtc::CritScope lock(&crit_); |
141 stopped_ = true; | 151 stopped_ = true; |
142 frame_inserted_event_.Set(); | 152 new_countinuous_frame_event_.Set(); |
143 } | 153 } |
144 | 154 |
145 void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { | 155 int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { |
146 rtc::CritScope lock(&crit_); | 156 rtc::CritScope lock(&crit_); |
147 // If |newest_picture_id_| is -1 then this is the first frame we received. | 157 FrameKey key(frame->picture_id, frame->spatial_layer); |
148 if (newest_picture_id_ == -1) | 158 int last_continuous_picture_id = |
149 newest_picture_id_ = frame->picture_id; | 159 last_continuous_frame_it_ == frames_.end() |
160 ? -1 | |
161 : last_continuous_frame_it_->first.picture_id; | |
150 | 162 |
151 if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) | 163 if (num_frames_buffered_ >= kMaxFramesBuffered) { |
152 newest_picture_id_ = frame->picture_id; | 164 LOG(LS_INFO) << "Frame with (picture_id:spatial_id) (" << key.picture_id |
stefan-webrtc
2016/09/21 14:50:13
Should these logs be warnings?
philipel
2016/09/22 11:18:38
I think warnings are better since we don't expect
| |
165 << ":" << int(key.spatial_layer) | |
stefan-webrtc
2016/09/21 14:50:14
static_cast<int>() here and below.
philipel
2016/09/22 11:18:38
Done.
| |
166 << ") could not be inserted due to the frame " | |
167 << "buffer being full, dropping frame."; | |
168 return last_continuous_picture_id; | |
169 } | |
153 | 170 |
154 // Remove frames as long as we have too many, |kMaxNumHistoryFrames|. | 171 if (frame->inter_layer_predicted && frame->spatial_layer == 0) { |
155 while (decoded_frames_.size() > kMaxNumHistoryFrames) | 172 LOG(LS_INFO) << "Frame with (picture_id:spatial_id) (" << key.picture_id |
156 decoded_frames_.erase(decoded_frames_.begin()); | 173 << ":" << int(key.spatial_layer) |
174 << ") is marked as inter layer predicted, dropping frame."; | |
175 return last_continuous_picture_id; | |
176 } | |
157 | 177 |
158 // Remove frames that are too old. | 178 if (last_decoded_frame_it_ != frames_.end() && |
159 uint16_t old_picture_id = Subtract<1 << 16>(newest_picture_id_, kMaxFrameAge); | 179 key < last_decoded_frame_it_->first) { |
160 auto old_decoded_it = | 180 LOG(LS_INFO) << "Frame with (picture_id:spatial_id) (" << key.picture_id |
161 decoded_frames_.lower_bound(FrameKey(old_picture_id, 0)); | 181 << ":" << int(key.spatial_layer) << ") inserted after frame (" |
162 decoded_frames_.erase(decoded_frames_.begin(), old_decoded_it); | 182 << last_decoded_frame_it_->first.picture_id << ":" |
183 << int(last_decoded_frame_it_->first.spatial_layer) | |
184 << ") was handed off for decoding, dropping frame."; | |
185 return last_continuous_picture_id; | |
186 } | |
163 | 187 |
164 FrameKey key(frame->picture_id, frame->spatial_layer); | 188 auto info = frames_.insert(std::make_pair(key, FrameInfo())).first; |
165 frames_[key] = std::move(frame); | 189 |
166 frame_inserted_event_.Set(); | 190 if (!UpdateFrameInfoWithIncomingFrame(*frame, info)) { |
191 frames_.erase(info); | |
192 return last_continuous_picture_id; | |
193 } | |
194 | |
195 info->second.frame = std::move(frame); | |
196 ++num_frames_buffered_; | |
197 | |
198 if (info->second.num_missing_continuous == 0) { | |
199 info->second.continuous = true; | |
200 PropagateContinuity(info); | |
stefan-webrtc
2016/09/21 14:50:13
I think it would be easier to read the code if you
philipel
2016/09/22 11:18:38
If a new frame became continuous then |last_contin
| |
201 last_continuous_picture_id = last_continuous_frame_it_->first.picture_id; | |
202 | |
203 // Since we now have new continuous frames there might be a better frame | |
204 // to return from NextFrame. Signal that thread so that it again can choose | |
205 // which frame to return. | |
206 new_countinuous_frame_event_.Set(); | |
207 } | |
208 | |
209 return last_continuous_picture_id; | |
167 } | 210 } |
168 | 211 |
169 bool FrameBuffer::IsContinuous(const FrameObject& frame) const { | 212 void FrameBuffer::PropagateContinuity(FrameMap::iterator start) { |
170 // If a frame with an earlier picture id was inserted compared to the last | 213 RTC_DCHECK(start->second.continuous); |
171 // decoded frames picture id then that frame arrived too late. | 214 if (last_continuous_frame_it_ == frames_.end()) |
172 if (!decoded_frames_.empty() && | 215 last_continuous_frame_it_ = start; |
173 AheadOf(decoded_frames_.rbegin()->first, frame.picture_id)) { | 216 |
174 return false; | 217 std::queue<FrameMap::iterator> continuous_frames; |
218 continuous_frames.push(start); | |
219 | |
220 // A simple BFS to traverse continuous frames. | |
221 while (!continuous_frames.empty()) { | |
222 auto frame = continuous_frames.front(); | |
223 continuous_frames.pop(); | |
224 | |
225 if (last_continuous_frame_it_->first < frame->first) | |
226 last_continuous_frame_it_ = frame; | |
227 | |
228 // Loop through all dependent frames, and if that frame no longer has | |
229 // any unfulfilled dependencies then that frame is continuous as well. | |
230 for (size_t d = 0; d < frame->second.num_dependent_frames; ++d) { | |
231 auto frame_ref = frames_.find(frame->second.dependent_frames[d]); | |
232 --frame_ref->second.num_missing_continuous; | |
233 | |
234 if (frame_ref->second.num_missing_continuous == 0) { | |
235 frame_ref->second.continuous = true; | |
236 continuous_frames.push(frame_ref); | |
237 } | |
238 } | |
239 } | |
240 } | |
241 | |
242 void FrameBuffer::PropagateDecodability(const FrameInfo& info) { | |
243 for (size_t d = 0; d < info.num_dependent_frames; ++d) { | |
244 auto ref_info = frames_.find(info.dependent_frames[d]); | |
245 RTC_DCHECK_GT(ref_info->second.num_missing_decodable, 0U); | |
246 --ref_info->second.num_missing_decodable; | |
247 } | |
248 } | |
249 | |
250 void FrameBuffer::AdvanceLastDecodedFrame(FrameMap::iterator decoded) { | |
251 if (last_decoded_frame_it_ == frames_.end()) { | |
252 last_decoded_frame_it_ = frames_.begin(); | |
253 } else { | |
254 RTC_DCHECK(last_decoded_frame_it_->first < decoded->first); | |
255 ++last_decoded_frame_it_; | |
256 } | |
257 --num_frames_buffered_; | |
258 ++num_frames_history_; | |
259 | |
260 // First, delete non-decoded frames from the history. | |
261 while (last_decoded_frame_it_ != decoded) { | |
262 if (last_decoded_frame_it_->second.frame) | |
263 --num_frames_buffered_; | |
264 last_decoded_frame_it_ = frames_.erase(last_decoded_frame_it_); | |
175 } | 265 } |
176 | 266 |
177 // Have we decoded all frames that this frame depend on? | 267 // Then remove old history if we have too much history saved. |
268 if (num_frames_history_ > kMaxFramesHistory) { | |
269 frames_.erase(frames_.begin()); | |
270 --num_frames_history_; | |
271 } | |
272 } | |
273 | |
274 bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const FrameObject& frame, | |
275 FrameMap::iterator info) { | |
276 FrameKey key(frame.picture_id, frame.spatial_layer); | |
277 info->second.num_missing_continuous = frame.num_references; | |
278 info->second.num_missing_decodable = frame.num_references; | |
279 | |
280 RTC_DCHECK(last_decoded_frame_it_ == frames_.end() || | |
281 last_decoded_frame_it_->first < info->first); | |
282 | |
283 // Check how many dependencies that has already been fulfilled. | |
stefan-webrtc
2016/09/21 14:50:14
have already
philipel
2016/09/22 11:18:38
Done.
| |
178 for (size_t r = 0; r < frame.num_references; ++r) { | 284 for (size_t r = 0; r < frame.num_references; ++r) { |
stefan-webrtc
2016/09/21 14:50:13
use i instead of r.
philipel
2016/09/22 11:18:38
Done.
| |
179 FrameKey ref_key(frame.references[r], frame.spatial_layer); | 285 FrameKey ref_key(frame.references[r], frame.spatial_layer); |
180 if (decoded_frames_.find(ref_key) == decoded_frames_.end()) | 286 auto ref_info = frames_.find(ref_key); |
181 return false; | 287 |
288 // Does |frame| depend on a frame earlier than the last decoded frame? | |
289 if (last_decoded_frame_it_ != frames_.end() && | |
290 ref_key <= last_decoded_frame_it_->first) { | |
291 // Does |frame| depend on a frame that was never decoded? | |
292 if (ref_info == frames_.end()) { | |
293 LOG(LS_INFO) << "Frame with (picture_id:spatial_id) (" << key.picture_id | |
294 << ":" << int(key.spatial_layer) | |
295 << " depends on a non-decoded frame more previous than " | |
296 << "the last decoded frame, dropping frame."; | |
297 return false; | |
298 } | |
299 | |
300 --info->second.num_missing_continuous; | |
stefan-webrtc
2016/09/21 14:50:14
Does this mean the frame has already been decoded?
philipel
2016/09/22 11:18:38
Yes, since this |ref_key| refers to a frame more p
| |
301 --info->second.num_missing_decodable; | |
302 | |
stefan-webrtc
2016/09/21 14:50:13
Remove empty line
philipel
2016/09/22 11:18:38
Done.
| |
303 } else { | |
304 if (ref_info == frames_.end()) | |
305 ref_info = frames_.insert(std::make_pair(ref_key, FrameInfo())).first; | |
306 | |
307 if (ref_info->second.continuous) | |
308 --info->second.num_missing_continuous; | |
309 | |
310 // Add backwards reference so |frame| can be updated when new | |
311 // frames are inserted or decoded. | |
312 ref_info->second.dependent_frames[ref_info->second.num_dependent_frames] = | |
313 key; | |
314 ++ref_info->second.num_dependent_frames; | |
315 } | |
182 } | 316 } |
183 | 317 |
184 // If this is a layer frame, have we decoded the lower layer of this | 318 // Check if we have the lower spatial layer frame. |
185 // super frame. | |
186 if (frame.inter_layer_predicted) { | 319 if (frame.inter_layer_predicted) { |
187 RTC_DCHECK_GT(frame.spatial_layer, 0); | 320 ++info->second.num_missing_continuous; |
321 ++info->second.num_missing_decodable; | |
322 | |
188 FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1); | 323 FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1); |
189 if (decoded_frames_.find(ref_key) == decoded_frames_.end()) | 324 auto ref_info = frames_.insert(std::make_pair(ref_key, FrameInfo())).first; |
stefan-webrtc
2016/09/21 14:50:13
When doing things like this I think it would be go
philipel
2016/09/22 11:18:38
Done.
| |
190 return false; | 325 if (ref_info->second.continuous) |
326 --info->second.num_missing_continuous; | |
327 | |
328 if (ref_info == last_decoded_frame_it_) { | |
329 --info->second.num_missing_decodable; | |
330 } else { | |
331 ref_info->second.dependent_frames[ref_info->second.num_dependent_frames] = | |
332 key; | |
333 ++ref_info->second.num_dependent_frames; | |
334 } | |
191 } | 335 } |
192 | 336 |
193 return true; | 337 return true; |
194 } | 338 } |
195 | 339 |
196 } // namespace video_coding | 340 } // namespace video_coding |
197 } // namespace webrtc | 341 } // namespace webrtc |
OLD | NEW |