Index: webrtc/modules/video_coding/frame_buffer2.cc |
diff --git a/webrtc/modules/video_coding/frame_buffer2.cc b/webrtc/modules/video_coding/frame_buffer2.cc |
index 766e6a05d722ddc2811f47763d9a1f49711093ec..21dc14e40bfa90f0aad53f37a50039c62b380962 100644 |
--- a/webrtc/modules/video_coding/frame_buffer2.cc |
+++ b/webrtc/modules/video_coding/frame_buffer2.cc |
@@ -11,11 +11,12 @@ |
#include "webrtc/modules/video_coding/frame_buffer2.h" |
#include <algorithm> |
+#include <cstring> |
+#include <queue> |
#include "webrtc/base/checks.h" |
-#include "webrtc/modules/video_coding/frame_object.h" |
+#include "webrtc/base/logging.h" |
#include "webrtc/modules/video_coding/jitter_estimator.h" |
-#include "webrtc/modules/video_coding/sequence_number_util.h" |
#include "webrtc/modules/video_coding/timing.h" |
#include "webrtc/system_wrappers/include/clock.h" |
@@ -25,21 +26,12 @@ namespace video_coding { |
namespace { |
// The maximum age of decoded frames tracked by frame buffer, compared to |
// |newest_picture_id_|. |
-constexpr int kMaxFrameAge = 4096; |
+constexpr int kMaxHistoryFrameAge = 4096; |
// The maximum number of decoded frames being tracked by the frame buffer. |
constexpr int kMaxNumHistoryFrames = 256; |
} // namespace |
-bool FrameBuffer::FrameComp::operator()(const FrameKey& f1, |
- const FrameKey& f2) const { |
- // first = picture id |
- // second = spatial layer |
- if (f1.first == f2.first) |
- return f1.second < f2.second; |
- return AheadOf(f2.first, f1.first); |
-} |
- |
FrameBuffer::FrameBuffer(Clock* clock, |
VCMJitterEstimator* jitter_estimator, |
VCMTiming* timing) |
@@ -49,6 +41,8 @@ FrameBuffer::FrameBuffer(Clock* clock, |
timing_(timing), |
inter_frame_delay_(clock_->TimeInMilliseconds()), |
newest_picture_id_(-1), |
+ last_continuous_picture_id_(-1), |
+ last_decoded_picture_id_(-1), |
stopped_(false), |
protection_mode_(kProtectionNack) {} |
@@ -58,9 +52,9 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame( |
int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms; |
int64_t now = clock_->TimeInMilliseconds(); |
int64_t wait_ms = max_wait_time_ms; |
+ decltype(frames_.end()) next_frame_it; |
danilchap
2016/09/12 12:20:14
may be add
using Frames = std::map<FrameKey, Frame
philipel
2016/09/16 14:01:20
Done.
|
+ |
while (true) { |
- std::map<FrameKey, std::unique_ptr<FrameObject>, FrameComp>::iterator |
- next_frame_it; |
{ |
rtc::CritScope lock(&crit_); |
frame_inserted_event_.Reset(); |
@@ -70,17 +64,19 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame( |
now = clock_->TimeInMilliseconds(); |
wait_ms = max_wait_time_ms; |
next_frame_it = frames_.end(); |
- for (auto frame_it = frames_.begin(); frame_it != frames_.end(); |
- ++frame_it) { |
- const FrameObject& frame = *frame_it->second; |
- if (IsContinuous(frame)) { |
- next_frame_it = frame_it; |
+ for (auto it = frames_.begin(); it != frames_.end(); ++it) { |
+ const FrameKey& frame_key = it->first; |
+ const FrameInfo& frame_info = frame_history_.find(frame_key)->second; |
danilchap
2016/09/12 12:20:14
frame_history_ can through out frames when there a
philipel
2016/09/16 14:01:20
Done, rewrote FrameBuffer to only have one map for
|
+ FrameObject* frame = it->second.get(); |
+ |
+ if (frame_info.num_missing_decodable == 0) { |
+ next_frame_it = it; |
int64_t render_time = |
- next_frame_it->second->RenderTime() == -1 |
- ? timing_->RenderTimeMs(frame.timestamp, now) |
- : next_frame_it->second->RenderTime(); |
+ frame->RenderTime() == -1 |
+ ? timing_->RenderTimeMs(frame->timestamp, now) |
+ : frame->RenderTime(); |
wait_ms = timing_->MaxWaitingTime(render_time, now); |
- frame_it->second->SetRenderTime(render_time); |
+ frame->SetRenderTime(render_time); |
// This will cause the frame buffer to prefer high framerate rather |
// than high resolution in the case of the decoder not decoding fast |
@@ -114,8 +110,9 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame( |
timing_->UpdateCurrentDelay(next_frame_it->second->RenderTime(), |
clock_->TimeInMilliseconds()); |
- decoded_frames_.insert(next_frame_it->first); |
+ PropagateDecodability(next_frame_it->first); |
std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second); |
+ last_decoded_picture_id_ = frame->picture_id; |
frames_.erase(frames_.begin(), ++next_frame_it); |
*frame_out = std::move(frame); |
return kFrameFound; |
@@ -142,55 +139,117 @@ void FrameBuffer::Stop() { |
frame_inserted_event_.Set(); |
} |
-void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { |
+int FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { |
rtc::CritScope lock(&crit_); |
- // If |newest_picture_id_| is -1 then this is the first frame we received. |
- if (newest_picture_id_ == -1) |
- newest_picture_id_ = frame->picture_id; |
+ if (frame->inter_layer_predicted && frame->spatial_layer == 0) { |
+ LOG(LS_INFO) << "Spatial layer 0 frame with picture id " |
+ << frame->picture_id |
+ << " is marked as inter layer predicted, dropping frame."; |
+ return last_continuous_picture_id_; |
+ } |
- if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) |
- newest_picture_id_ = frame->picture_id; |
+ if (last_decoded_picture_id_ != -1 && |
+ AheadOf<uint16_t>(last_decoded_picture_id_, frame->picture_id)) { |
+ LOG(LS_INFO) << "Frame with picture id " << frame->picture_id |
+ << " inserted after frame with picture id " |
+ << last_decoded_picture_id_ |
+ << " was handed off for decoding, dropping frame."; |
+ return last_continuous_picture_id_; |
+ } |
- // Remove frames as long as we have too many, |kMaxNumHistoryFrames|. |
- while (decoded_frames_.size() > kMaxNumHistoryFrames) |
- decoded_frames_.erase(decoded_frames_.begin()); |
+ if (newest_picture_id_ == -1 || |
+ AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) { |
+ newest_picture_id_ = frame->picture_id; |
+ } |
- // Remove frames that are too old. |
- uint16_t old_picture_id = Subtract<1 << 16>(newest_picture_id_, kMaxFrameAge); |
- auto old_decoded_it = |
- decoded_frames_.lower_bound(FrameKey(old_picture_id, 0)); |
- decoded_frames_.erase(decoded_frames_.begin(), old_decoded_it); |
+ // Remove frames while we have too many or if they are too old. |
+ while (frame_history_.size() > kMaxNumHistoryFrames || |
danilchap
2016/09/12 12:20:14
probably cleaner to have two loops like before.
Th
philipel
2016/09/16 14:01:20
Clear logic moved to new function ClearOldInfo.
|
+ (!frame_history_.empty() && |
+ AheadOf<uint16_t>(newest_picture_id_ - kMaxHistoryFrameAge, |
+ frame_history_.begin()->first.picture_id))) { |
+ frame_history_.erase(frame_history_.begin()); |
+ } |
FrameKey key(frame->picture_id, frame->spatial_layer); |
- frames_[key] = std::move(frame); |
- frame_inserted_event_.Set(); |
-} |
+ auto info = frame_history_.insert(std::make_pair(key, FrameInfo())).first; |
danilchap
2016/09/12 12:20:14
since your ignore .second anyway, may be cleaner
philipel
2016/09/16 14:01:20
I can't rely on the pointers being stable since I
|
+ info->second.num_missing_continuous = frame->num_references; |
+ info->second.num_missing_decodable = frame->num_references; |
+ |
+ // Check how many dependencies that has already been fulfilled. |
+ for (size_t r = 0; r < frame->num_references; ++r) { |
+ FrameKey ref_key(frame->references[r], frame->spatial_layer); |
+ auto ref_info = |
+ frame_history_.insert(std::make_pair(ref_key, FrameInfo())).first; |
+ if (ref_info->second.continuous) { |
+ --info->second.num_missing_continuous; |
+ if (ref_info->second.decoded) { |
danilchap
2016/09/12 12:20:14
do you distinguish decoded (returned by NextFrame)
philipel
2016/09/16 14:01:20
Since a FrameInfo can be inserted without the corr
|
+ --info->second.num_missing_decodable; |
+ continue; |
+ } |
+ } |
-bool FrameBuffer::IsContinuous(const FrameObject& frame) const { |
- // If a frame with an earlier picture id was inserted compared to the last |
- // decoded frames picture id then that frame arrived too late. |
- if (!decoded_frames_.empty() && |
- AheadOf(decoded_frames_.rbegin()->first, frame.picture_id)) { |
- return false; |
+ // If either the frame isn't continuous or not decodable or both then we |
+ // add the backwards reference so this frame can be updated when new frames |
+ // are inserted or decoded. |
+ ref_info->second.dependent_frames[ref_info->second.num_dependent_frames] = |
+ key; |
+ ++ref_info->second.num_dependent_frames; |
} |
- // Have we decoded all frames that this frame depend on? |
- for (size_t r = 0; r < frame.num_references; ++r) { |
- FrameKey ref_key(frame.references[r], frame.spatial_layer); |
- if (decoded_frames_.find(ref_key) == decoded_frames_.end()) |
- return false; |
+ frames_[key] = std::move(frame); |
+ |
+ if (info->second.num_missing_continuous == 0) { |
+ info->second.continuous = true; |
+ PropagateContinuity(key); |
+ |
+ // Since we now have new continuous frames there might be a better frame |
+ // to return from NextFrame. Signal that thread so that it can again choose |
+ // which frame to return. |
+ frame_inserted_event_.Set(); |
} |
- // If this is a layer frame, have we decoded the lower layer of this |
- // super frame. |
- if (frame.inter_layer_predicted) { |
- RTC_DCHECK_GT(frame.spatial_layer, 0); |
- FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1); |
- if (decoded_frames_.find(ref_key) == decoded_frames_.end()) |
- return false; |
+ return last_continuous_picture_id_; |
+} |
+ |
+void FrameBuffer::PropagateContinuity(const FrameKey& key) { |
+ std::queue<decltype(frame_history_.end())> continuous_frames_; |
+ |
+ auto start_frame = frame_history_.find(key); |
+ RTC_DCHECK(start_frame->second.continuous); |
+ continuous_frames_.push(start_frame); |
+ |
+ while (!continuous_frames_.empty()) { |
+ auto frame = continuous_frames_.front(); |
+ continuous_frames_.pop(); |
+ |
+ if (last_continuous_picture_id_ == -1 || |
+ AheadOf<uint16_t>(frame->first.picture_id, |
+ last_continuous_picture_id_)) { |
+ last_continuous_picture_id_ = frame->first.picture_id; |
+ } |
+ |
+ // Loop through all dependent frames, and if that frame no longer has |
+ // any unfulfilled dependencies then that frame is continuous as well. |
+ for (size_t d = 0; d < frame->second.num_dependent_frames; ++d) { |
+ auto frame_ref = frame_history_.find(frame->second.dependent_frames[d]); |
+ --frame_ref->second.num_missing_continuous; |
+ |
+ if (frame_ref->second.num_missing_continuous == 0) { |
+ frame_ref->second.continuous = true; |
+ continuous_frames_.push(frame_ref); |
+ } |
+ } |
} |
+} |
- return true; |
+void FrameBuffer::PropagateDecodability(const FrameKey& key) { |
+ auto info = frame_history_.find(key); |
+ info->second.decoded = true; |
+ |
+ for (size_t d = 0; d < info->second.num_dependent_frames; ++d) { |
+ auto ref_info = frame_history_.find(info->second.dependent_frames[d]); |
danilchap
2016/09/12 12:20:14
You sure info->second.dependent_frames[d] is in th
|
+ --ref_info->second.num_missing_decodable; |
+ } |
} |
} // namespace video_coding |