Chromium Code Reviews| Index: webrtc/modules/video_coding/frame_buffer2.cc |
| diff --git a/webrtc/modules/video_coding/frame_buffer2.cc b/webrtc/modules/video_coding/frame_buffer2.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..3e13cd6e9faad9c7d5e8db5efe149c699933e14d |
| --- /dev/null |
| +++ b/webrtc/modules/video_coding/frame_buffer2.cc |
| @@ -0,0 +1,128 @@ |
| +/* |
| + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
| + * |
| + * Use of this source code is governed by a BSD-style license |
| + * that can be found in the LICENSE file in the root of the source |
| + * tree. An additional intellectual property rights grant can be found |
| + * in the file PATENTS. All contributing project authors may |
| + * be found in the AUTHORS file in the root of the source tree. |
| + */ |
| + |
| +#include <algorithm> |
| + |
| +#include "webrtc/modules/video_coding/frame_buffer2.h" |
|
danilchap
2016/05/13 13:31:04
this include goes first, before c++ header
philipel
2016/05/16 12:10:21
Done.
|
| + |
| +#include "webrtc/base/checks.h" |
| +#include "webrtc/modules/video_coding/frame_object.h" |
| + |
| +namespace webrtc { |
| +namespace video_coding { |
| + |
| +FrameBuffer::FrameBuffer(Clock* clock, |
| + VCMJitterEstimator* jitter_estimator, |
| + VCMTiming* timing) : |
| + clock_(clock), |
| + frame_inserted_event_(false, false), |
| + jitter_estimator_(jitter_estimator), |
| + timing_(timing), |
| + newest_picture_id_(-1) {} |
| + |
| + |
| +std::unique_ptr<FrameObject> FrameBuffer::NextFrame(int64_t max_wait_time_ms) { |
| + // Instead of using an iterator to point to the selected frame we save |
| + // the key since |frames_| may be updated after we select the frame. |
| + auto frame_key = std::make_pair(-1, -1); |
|
danilchap
2016/05/13 13:31:04
frame_key has type std::pair<int, int>, but used i
philipel
2016/05/16 12:10:21
I no longer use a pair to keep track of which fram
|
| + int64_t wait_ms = max_wait_time_ms; |
| + while (true) { |
| + crit_.Enter(); |
|
danilchap
2016/05/13 13:31:04
may be use
{
rtc::CritScope lock(&crit_);
...
philipel
2016/05/16 12:10:21
After the change to use an iterator instead of a f
|
| + frame_inserted_event_.Reset(); |
| + for (auto& kvp : frames_) { |
|
danilchap
2016/05/13 13:31:04
const auto&
though may be explicit type (const std
|
| + if (IsContinuous(*(kvp.second.get()))) { |
|
danilchap
2016/05/13 13:31:04
IsContinuous(*kvp.second)
Or add
const FrameObjec
philipel
2016/05/16 12:10:21
Added an additional line.
|
| + frame_key = kvp.first; |
| + int64_t render_time = timing_->RenderTimeMs(kvp.second->timestamp, |
| + clock_->TimeInMilliseconds()); |
|
danilchap
2016/05/13 13:31:04
May be you want to cache current time,
or do you e
philipel
2016/05/16 12:10:21
Done.
|
| + wait_ms = timing_->MaxWaitingTime(render_time, |
| + clock_->TimeInMilliseconds()); |
| + |
| + // This will cause the frame buffer to prefer high framerate rather |
| + // than high resolution in the case of the decoder not decoding fast |
| + // enough and the stream has multiple spatial and temporal layers. |
| + if (wait_ms == 0) |
| + continue; |
| + |
| + break; |
| + } |
| + } |
| + crit_.Leave(); |
| + |
| + // If the timout occures, return. Otherwise a new frame has been inserted |
| + // and the best frame to decode next will be selected again. |
| + wait_ms = std::min(wait_ms, max_wait_time_ms); |
| + if (!frame_inserted_event_.Wait(wait_ms)) { |
| + if (frame_key.first != -1) { |
| + crit_.Enter(); |
|
danilchap
2016/05/13 13:31:04
rtc::CritScope lock(&crit_);
philipel
2016/05/16 12:10:21
Don't want to use two different styles within the
|
| + |
| + // TODO(philipel): update jitter estimator with correct values. |
| + jitter_estimator_->UpdateEstimate(100, 100); |
| + |
| + auto frame_it = frames_.find(frame_key); |
|
danilchap
2016/05/13 13:31:04
since you already silently expect frame_it != fram
philipel
2016/05/16 12:10:21
Done.
|
| + std::unique_ptr<FrameObject> frame = std::move(frame_it->second); |
| + frames_.erase(frames_.begin(), ++frame_it); |
| + decoded_frames_[frame->picture_id][frame->spatial_layer] = true; |
| + crit_.Leave(); |
| + return frame; |
| + } else { |
| + break; |
| + } |
| + } |
| + } |
| + return std::unique_ptr<FrameObject>(); |
| +} |
| + |
| +void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) { |
| + rtc::CritScope lock(&crit_); |
| + if (newest_picture_id_ == -1) |
| + newest_picture_id_ = frame->picture_id; |
| + |
| + if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_)) |
| + newest_picture_id_ = frame->picture_id; |
| + |
| + // Remove frames as long as we have to many, |kMaxStoredFrames|, or if |
| + // they are to old, |kMaxFrameAge|. |
| + while (!decoded_frames_.empty() && |
| + (decoded_frames_.size() > kMaxStoredFrames || |
| + ForwardDiff<uint16_t>(decoded_frames_.begin()->first, |
| + newest_picture_id_) > kMaxFrameAge)) { |
| + decoded_frames_.erase(decoded_frames_.begin()); |
| + } |
| + |
| + auto frame_key = std::make_pair(frame->picture_id, frame->spatial_layer); |
| + RTC_DCHECK(frames_.find(frame_key) == frames_.end()); |
| + frames_.insert(std::make_pair(frame_key, std::move(frame))); |
| + frame_inserted_event_.Set(); |
| +} |
| + |
| +bool FrameBuffer::IsContinuous(const FrameObject& frame) const { |
| + for (size_t r = 0; r < frame.num_references; ++r) { |
| + auto decoded_frame_it = decoded_frames_.find(frame.references[r]); |
| + if (decoded_frame_it == decoded_frames_.end() || |
| + !decoded_frame_it->second[frame.spatial_layer]) { |
| + return false; |
| + } |
| + } |
| + |
| + if (frame.inter_layer_predicted) { |
| + RTC_DCHECK_GT(frame.spatial_layer, 0); |
| + auto decoded_frame_it = decoded_frames_.find(frame.picture_id); |
| + if (decoded_frame_it == decoded_frames_.end() || |
| + !decoded_frame_it->second[frame.spatial_layer - 1]) { |
| + return false; |
| + } |
| + } |
| + |
| + return true; |
| +} |
| + |
| + |
| +} // namespace video_coding |
| +} // namespace webrtc |