Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(162)

Unified Diff: webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc

Issue 1198853004: Add statistics gathering for packet loss. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
diff --git a/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc b/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
new file mode 100644
index 0000000000000000000000000000000000000000..392977f0aa5a584b5f5bd20caf496b7272b9edd1
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/packet_loss_stats.h"
+
+static const int kBufferSize = 100;
noahric 2015/06/26 00:21:19 Comment or name to point out that this is the size
bcornell 2015/06/30 19:47:45 Done.
+
+namespace webrtc {
+
+PacketLossStats::PacketLossStats()
+ : single_loss_historic_count_(0),
+ multiple_loss_historic_event_count_(0),
+ multiple_loss_historic_packet_count_(0) {
+}
+
+void PacketLossStats::AddLostPacket(uint16_t sequence_number) {
+ // Detect sequence number wrap around.
noahric 2015/06/26 00:21:19 There's another way of handling this where you hav
bcornell 2015/06/30 19:47:45 Indeed, I'm not sure how to handle that approach i
+ if (sequence_number < 0x4000 && !lost_packets_buffer_.empty()
noahric 2015/06/26 00:21:19 If you lose every ~200 packets or so, this wraparo
bcornell 2015/06/30 19:47:45 True, this did not handle extremely sparse loss ve
+ && *(lost_packets_buffer_.begin()) > 0xC000) {
noahric 2015/06/26 00:21:19 && at the end of the previous line (you can proba
bcornell 2015/06/30 19:47:45 Done.
+ // The buffer contains large numbers and this is a small number.
+ lost_packets_wrapped_buffer_.insert(sequence_number);
+ } else {
+ lost_packets_buffer_.insert(sequence_number);
+ }
+ if (lost_packets_wrapped_buffer_.size() + lost_packets_buffer_.size()
+ > kBufferSize) {
+ PruneBuffer();
+ }
+}
+
+int PacketLossStats::GetSingleLossCount() const {
+ int single_loss_count, unused1, unused2;
+ ComputeLossCounts(&single_loss_count, &unused1, &unused2);
+ return single_loss_count;
+}
+
+int PacketLossStats::GetMultipleLossEventCount() const {
+ int event_count, unused1, unused2;
+ ComputeLossCounts(&unused1, &event_count, &unused2);
+ return event_count;
+}
+
+int PacketLossStats::GetMultipleLossPacketCount() const {
+ int packet_count, unused1, unused2;
+ ComputeLossCounts(&unused1, &unused2, &packet_count);
+ return packet_count;
+}
+
+void PacketLossStats::ComputeLossCounts(
+ int* out_single_loss_count,
+ int* out_multiple_loss_event_count,
+ int* out_multiple_loss_packet_count) const {
+ *out_single_loss_count = single_loss_historic_count_;
+ *out_multiple_loss_event_count = multiple_loss_historic_event_count_;
+ *out_multiple_loss_packet_count = multiple_loss_historic_packet_count_;
+ if (lost_packets_buffer_.empty()) {
+ return;
noahric 2015/06/26 00:21:19 Consider DCHECK to ensure lost_packets_wrapped_buf
bcornell 2015/06/30 19:47:45 Done.
+ }
+ uint16_t last_num = 0;
+ int sequential_count = 0;
+ const std::set<uint16_t>* buffer = &lost_packets_buffer_;
+ for (int i = 0; i < 2; ++i) {
noahric 2015/06/26 00:21:19 C++11 ftw: for (auto buffer : { &lost_packets_buf
bcornell 2015/06/30 19:47:45 Done.
+ for (auto it = buffer->begin(); it != buffer->end(); ++it) {
+ uint16_t next_num = *it;
noahric 2015/06/26 00:21:19 I'd say current_num, though I bet it wouldn't fit
bcornell 2015/06/30 19:47:45 Done.
+ if (sequential_count > 0 && next_num != ((last_num + 1) & 0xFFFF)) {
+ if (sequential_count == 1) {
+ (*out_single_loss_count)++;
+ } else if (sequential_count > 1) {
+ (*out_multiple_loss_event_count)++;
+ *out_multiple_loss_packet_count += sequential_count;
+ }
+ sequential_count = 0;
+ }
+ sequential_count++;
+ last_num = next_num;
+ }
+ buffer = &lost_packets_wrapped_buffer_;
+ }
+ if (sequential_count == 1) {
+ (*out_single_loss_count)++;
+ } else if (sequential_count > 1) {
+ (*out_multiple_loss_event_count)++;
+ *out_multiple_loss_packet_count += sequential_count;
+ }
noahric 2015/06/26 00:21:19 Is there a reasonable way to do some more thorough
bcornell 2015/06/30 19:47:45 Well, the point of the buffer is to allow reasonab
+}
+
+void PacketLossStats::PruneBuffer() {
+ // Remove the oldest lost packet and any contiguous packets and move them
+ // into the historic counts.
+ auto it = lost_packets_buffer_.begin();
+ uint16_t last_removed = *it;
+ int remove_count = 1;
+ it = lost_packets_buffer_.erase(it);
+ while (!lost_packets_buffer_.empty() && *it == last_removed + 1) {
+ last_removed++;
+ remove_count++;
+ it = lost_packets_buffer_.erase(it);
+ }
+ // Continue counting if it is wrap around by swapping in the wrapped buffer
+ // and letting our value wrap as well.
+ if (lost_packets_buffer_.empty()) {
+ lost_packets_buffer_.swap(lost_packets_wrapped_buffer_);
+ it = lost_packets_buffer_.begin();
+ while (!lost_packets_buffer_.empty()
noahric 2015/06/26 00:21:19 Too bad you can't easily combine this with the blo
bcornell 2015/06/30 19:47:45 Don't tell me what I can't combine :-p
+ && *it == ((last_removed + 1) & 0xFFFF)) {
+ last_removed = *it;
+ remove_count++;
+ it = lost_packets_buffer_.erase(it);
+ }
+ }
+ if (remove_count > 1) {
+ multiple_loss_historic_event_count_++;
+ multiple_loss_historic_packet_count_ += remove_count;
+ } else {
+ single_loss_historic_count_++;
+ }
+}
+
+} // namespace webrtc

Powered by Google App Engine
This is Rietveld 408576698