OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include <algorithm> | |
12 | |
13 #include "webrtc/modules/video_coding/frame_buffer2.h" | |
danilchap
2016/05/13 13:31:04
this include goes first, before c++ header
philipel
2016/05/16 12:10:21
Done.
| |
14 | |
15 #include "webrtc/base/checks.h" | |
16 #include "webrtc/modules/video_coding/frame_object.h" | |
17 | |
18 namespace webrtc { | |
19 namespace video_coding { | |
20 | |
21 FrameBuffer::FrameBuffer(Clock* clock, | |
22 VCMJitterEstimator* jitter_estimator, | |
23 VCMTiming* timing) : | |
24 clock_(clock), | |
25 frame_inserted_event_(false, false), | |
26 jitter_estimator_(jitter_estimator), | |
27 timing_(timing), | |
28 newest_picture_id_(-1) {} | |
29 | |
30 | |
31 std::unique_ptr<FrameObject> FrameBuffer::NextFrame(int64_t max_wait_time_ms) { | |
32 // Instead of using an iterator to point to the selected frame we save | |
33 // the key since |frames_| may be updated after we select the frame. | |
34 auto frame_key = std::make_pair(-1, -1); | |
danilchap
2016/05/13 13:31:04
frame_key has type std::pair<int, int>, but used i
philipel
2016/05/16 12:10:21
I no longer use a pair to keep track of which fram
| |
35 int64_t wait_ms = max_wait_time_ms; | |
36 while (true) { | |
37 crit_.Enter(); | |
danilchap
2016/05/13 13:31:04
may be use
{
rtc::CritScope lock(&crit_);
...
philipel
2016/05/16 12:10:21
After the change to use an iterator instead of a f
| |
38 frame_inserted_event_.Reset(); | |
39 for (auto& kvp : frames_) { | |
danilchap
2016/05/13 13:31:04
const auto&
though may be explicit type (const std
| |
40 if (IsContinuous(*(kvp.second.get()))) { | |
danilchap
2016/05/13 13:31:04
IsContinuous(*kvp.second)
Or add
const FrameObjec
philipel
2016/05/16 12:10:21
Added an additional line.
| |
41 frame_key = kvp.first; | |
42 int64_t render_time = timing_->RenderTimeMs(kvp.second->timestamp, | |
43 clock_->TimeInMilliseconds()); | |
danilchap
2016/05/13 13:31:04
May be you want to cache current time,
or do you e
philipel
2016/05/16 12:10:21
Done.
| |
44 wait_ms = timing_->MaxWaitingTime(render_time, | |
45 clock_->TimeInMilliseconds()); | |
46 | |
47 // This will cause the frame buffer to prefer high framerate rather | |
48 // than high resolution in the case of the decoder not decoding fast | |
49 // enough and the stream has multiple spatial and temporal layers. | |
50 if (wait_ms == 0) | |
51 continue; | |
52 | |
53 break; | |
54 } | |
55 } | |
56 crit_.Leave(); | |
57 | |
58 // If the timout occures, return. Otherwise a new frame has been inserted | |
59 // and the best frame to decode next will be selected again. | |
60 wait_ms = std::min(wait_ms, max_wait_time_ms); | |
61 if (!frame_inserted_event_.Wait(wait_ms)) { | |
62 if (frame_key.first != -1) { | |
63 crit_.Enter(); | |
danilchap
2016/05/13 13:31:04
rtc::CritScope lock(&crit_);
philipel
2016/05/16 12:10:21
Don't want to use two different styles within the
| |
64 | |
65 // TODO(philipel): update jitter estimator with correct values. | |
66 jitter_estimator_->UpdateEstimate(100, 100); | |
67 | |
68 auto frame_it = frames_.find(frame_key); | |
danilchap
2016/05/13 13:31:04
since you already silently expect frame_it != fram
philipel
2016/05/16 12:10:21
Done.
| |
69 std::unique_ptr<FrameObject> frame = std::move(frame_it->second); | |
70 frames_.erase(frames_.begin(), ++frame_it); | |
71 decoded_frames_[frame->picture_id][frame->spatial_layer] = true; | |
72 crit_.Leave(); | |
73 return frame; | |
74 } else { | |
75 break; | |
76 } | |
77 } | |
78 } | |
79 return std::unique_ptr<FrameObject>(); | |
80 } | |
81 | |
82 void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { | |
83 rtc::CritScope lock(&crit_); | |
84 if (newest_picture_id_ == -1) | |
85 newest_picture_id_ = frame->picture_id; | |
86 | |
87 if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) | |
88 newest_picture_id_ = frame->picture_id; | |
89 | |
90 // Remove frames as long as we have to many, |kMaxStoredFrames|, or if | |
91 // they are to old, |kMaxFrameAge|. | |
92 while (!decoded_frames_.empty() && | |
93 (decoded_frames_.size() > kMaxStoredFrames || | |
94 ForwardDiff<uint16_t>(decoded_frames_.begin()->first, | |
95 newest_picture_id_) > kMaxFrameAge)) { | |
96 decoded_frames_.erase(decoded_frames_.begin()); | |
97 } | |
98 | |
99 auto frame_key = std::make_pair(frame->picture_id, frame->spatial_layer); | |
100 RTC_DCHECK(frames_.find(frame_key) == frames_.end()); | |
101 frames_.insert(std::make_pair(frame_key, std::move(frame))); | |
102 frame_inserted_event_.Set(); | |
103 } | |
104 | |
105 bool FrameBuffer::IsContinuous(const FrameObject& frame) const { | |
106 for (size_t r = 0; r < frame.num_references; ++r) { | |
107 auto decoded_frame_it = decoded_frames_.find(frame.references[r]); | |
108 if (decoded_frame_it == decoded_frames_.end() || | |
109 !decoded_frame_it->second[frame.spatial_layer]) { | |
110 return false; | |
111 } | |
112 } | |
113 | |
114 if (frame.inter_layer_predicted) { | |
115 RTC_DCHECK_GT(frame.spatial_layer, 0); | |
116 auto decoded_frame_it = decoded_frames_.find(frame.picture_id); | |
117 if (decoded_frame_it == decoded_frames_.end() || | |
118 !decoded_frame_it->second[frame.spatial_layer - 1]) { | |
119 return false; | |
120 } | |
121 } | |
122 | |
123 return true; | |
124 } | |
125 | |
126 | |
127 } // namespace video_coding | |
128 } // namespace webrtc | |
OLD | NEW |