Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: webrtc/tools/event_log_visualizer/analyzer.cc

Issue 2743933004: Unify the FillAudioEncoderTimeSeries with existing processing functions. (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/tools/event_log_visualizer/analyzer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 default_map.Register<AbsoluteSendTime>( 126 default_map.Register<AbsoluteSendTime>(
127 webrtc::RtpExtension::kAbsSendTimeDefaultId); 127 webrtc::RtpExtension::kAbsSendTimeDefaultId);
128 return default_map; 128 return default_map;
129 } 129 }
130 130
131 constexpr float kLeftMargin = 0.01f; 131 constexpr float kLeftMargin = 0.01f;
132 constexpr float kRightMargin = 0.02f; 132 constexpr float kRightMargin = 0.02f;
133 constexpr float kBottomMargin = 0.02f; 133 constexpr float kBottomMargin = 0.02f;
134 constexpr float kTopMargin = 0.05f; 134 constexpr float kTopMargin = 0.05f;
135 135
136 class PacketSizeBytes { 136 rtc::Optional<double> NetworkDelayDiff_AbsSendTime(
137 public: 137 const LoggedRtpPacket& old_packet,
138 using DataType = LoggedRtpPacket; 138 const LoggedRtpPacket& new_packet) {
139 using ResultType = size_t; 139 if (old_packet.header.extension.hasAbsoluteSendTime &&
140 size_t operator()(const LoggedRtpPacket& packet) { 140 new_packet.header.extension.hasAbsoluteSendTime) {
141 return packet.total_length; 141 int64_t send_time_diff = WrappingDifference(
142 } 142 new_packet.header.extension.absoluteSendTime,
143 }; 143 old_packet.header.extension.absoluteSendTime, 1ul << 24);
144 144 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
145 class SequenceNumberDiff { 145 double delay_change_us =
146 public: 146 recv_time_diff - AbsSendTimeToMicroseconds(send_time_diff);
147 using DataType = LoggedRtpPacket; 147 return rtc::Optional<double>(delay_change_us / 1000);
148 using ResultType = int64_t; 148 } else {
149 int64_t operator()(const LoggedRtpPacket& old_packet, 149 return rtc::Optional<double>();
150 const LoggedRtpPacket& new_packet) {
151 return WrappingDifference(new_packet.header.sequenceNumber,
152 old_packet.header.sequenceNumber, 1ul << 16);
153 }
154 };
155
156 class NetworkDelayDiff {
157 public:
158 class AbsSendTime {
159 public:
160 using DataType = LoggedRtpPacket;
161 using ResultType = double;
162 double operator()(const LoggedRtpPacket& old_packet,
163 const LoggedRtpPacket& new_packet) {
164 if (old_packet.header.extension.hasAbsoluteSendTime &&
165 new_packet.header.extension.hasAbsoluteSendTime) {
166 int64_t send_time_diff = WrappingDifference(
167 new_packet.header.extension.absoluteSendTime,
168 old_packet.header.extension.absoluteSendTime, 1ul << 24);
169 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
170 return static_cast<double>(recv_time_diff -
171 AbsSendTimeToMicroseconds(send_time_diff)) /
172 1000;
173 } else {
174 return 0;
175 }
176 }
177 };
178
179 class CaptureTime {
180 public:
181 using DataType = LoggedRtpPacket;
182 using ResultType = double;
183 double operator()(const LoggedRtpPacket& old_packet,
184 const LoggedRtpPacket& new_packet) {
185 int64_t send_time_diff = WrappingDifference(
186 new_packet.header.timestamp, old_packet.header.timestamp, 1ull << 32);
187 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
188
189 const double kVideoSampleRate = 90000;
190 // TODO(terelius): We treat all streams as video for now, even though
191 // audio might be sampled at e.g. 16kHz, because it is really difficult to
192 // figure out the true sampling rate of a stream. The effect is that the
193 // delay will be scaled incorrectly for non-video streams.
194
195 double delay_change =
196 static_cast<double>(recv_time_diff) / 1000 -
197 static_cast<double>(send_time_diff) / kVideoSampleRate * 1000;
198 if (delay_change < -10000 || 10000 < delay_change) {
199 LOG(LS_WARNING) << "Very large delay change. Timestamps correct?";
200 LOG(LS_WARNING) << "Old capture time " << old_packet.header.timestamp
201 << ", received time " << old_packet.timestamp;
202 LOG(LS_WARNING) << "New capture time " << new_packet.header.timestamp
203 << ", received time " << new_packet.timestamp;
204 LOG(LS_WARNING) << "Receive time difference " << recv_time_diff << " = "
205 << static_cast<double>(recv_time_diff) / 1000000 << "s";
206 LOG(LS_WARNING) << "Send time difference " << send_time_diff << " = "
207 << static_cast<double>(send_time_diff) /
208 kVideoSampleRate
209 << "s";
210 }
211 return delay_change;
212 }
213 };
214 };
215
216 template <typename Extractor>
217 class Accumulated {
218 public:
219 using DataType = typename Extractor::DataType;
220 using ResultType = typename Extractor::ResultType;
221 ResultType operator()(const DataType& old_packet,
222 const DataType& new_packet) {
223 sum += extract(old_packet, new_packet);
224 return sum;
225 }
226
227 private:
228 Extractor extract;
229 ResultType sum = 0;
230 };
231
232 // For each element in data, use |Extractor| to extract a y-coordinate and
233 // store the result in a TimeSeries.
234 template <typename Extractor>
235 void Pointwise(const std::vector<typename Extractor::DataType>& data,
236 uint64_t begin_time,
237 TimeSeries* result) {
238 Extractor extract;
239 for (size_t i = 0; i < data.size(); i++) {
240 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
241 float y = extract(data[i]);
242 result->points.emplace_back(x, y);
243 } 150 }
244 } 151 }
245 152
246 // For each pair of adjacent elements in |data|, use |Extractor| to extract a 153 rtc::Optional<double> NetworkDelayDiff_CaptureTime(
247 // y-coordinate and store the result in a TimeSeries. Note that the x-coordinate 154 const LoggedRtpPacket& old_packet,
248 // will be the time of the second element in the pair. 155 const LoggedRtpPacket& new_packet) {
249 template <typename Extractor> 156 int64_t send_time_diff = WrappingDifference(
250 void Pairwise(const std::vector<typename Extractor::DataType>& data, 157 new_packet.header.timestamp, old_packet.header.timestamp, 1ull << 32);
251 uint64_t begin_time, 158 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp;
252 TimeSeries* result) { 159
253 Extractor extract; 160 const double kVideoSampleRate = 90000;
254 for (size_t i = 1; i < data.size(); i++) { 161 // TODO(terelius): We treat all streams as video for now, even though
162 // audio might be sampled at e.g. 16kHz, because it is really difficult to
163 // figure out the true sampling rate of a stream. The effect is that the
164 // delay will be scaled incorrectly for non-video streams.
165
166 double delay_change =
167 static_cast<double>(recv_time_diff) / 1000 -
168 static_cast<double>(send_time_diff) / kVideoSampleRate * 1000;
169 if (delay_change < -10000 || 10000 < delay_change) {
170 LOG(LS_WARNING) << "Very large delay change. Timestamps correct?";
171 LOG(LS_WARNING) << "Old capture time " << old_packet.header.timestamp
172 << ", received time " << old_packet.timestamp;
173 LOG(LS_WARNING) << "New capture time " << new_packet.header.timestamp
174 << ", received time " << new_packet.timestamp;
175 LOG(LS_WARNING) << "Receive time difference " << recv_time_diff << " = "
176 << static_cast<double>(recv_time_diff) / 1000000 << "s";
177 LOG(LS_WARNING) << "Send time difference " << send_time_diff << " = "
178 << static_cast<double>(send_time_diff) / kVideoSampleRate
179 << "s";
180 }
181 return rtc::Optional<double>(delay_change);
182 }
183
184 // For each element in data, use |get_y()| to extract a y-coordinate and
185 // store the result in a TimeSeries.
186 template <typename DataType>
187 void ProcessPoints(
188 rtc::FunctionView<rtc::Optional<float>(const DataType&)> get_y,
189 const std::vector<DataType>& data,
190 uint64_t begin_time,
191 TimeSeries* result) {
192 for (size_t i = 0; i < data.size(); i++) {
255 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000; 193 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
256 float y = extract(data[i - 1], data[i]); 194 rtc::Optional<float> y = get_y(data[i]);
257 result->points.emplace_back(x, y); 195 if (y)
196 result->points.emplace_back(x, *y);
258 } 197 }
259 } 198 }
260 199
200 // For each pair of adjacent elements in |data|, use |get_y| to extract a
201 // y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
202 // will be the time of the second element in the pair.
203 template <typename DataType, typename ResultType>
204 void ProcessPairs(
205 rtc::FunctionView<rtc::Optional<ResultType>(const DataType&,
206 const DataType&)> get_y,
207 const std::vector<DataType>& data,
208 uint64_t begin_time,
209 TimeSeries* result) {
210 for (size_t i = 1; i < data.size(); i++) {
211 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
212 rtc::Optional<ResultType> y = get_y(data[i - 1], data[i]);
213 if (y)
214 result->points.emplace_back(x, static_cast<float>(*y));
215 }
216 }
217
218 // For each element in data, use |extract()| to extract a y-coordinate and
219 // store the result in a TimeSeries.
220 template <typename DataType, typename ResultType>
221 void AccumulatePoints(
222 rtc::FunctionView<rtc::Optional<ResultType>(const DataType&)> extract,
223 const std::vector<DataType>& data,
224 uint64_t begin_time,
225 TimeSeries* result) {
226 ResultType sum = 0;
227 for (size_t i = 0; i < data.size(); i++) {
228 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
229 rtc::Optional<ResultType> y = extract(data[i]);
230 if (y) {
231 sum += *y;
232 result->points.emplace_back(x, static_cast<float>(sum));
233 }
234 }
235 }
236
237 // For each pair of adjacent elements in |data|, use |extract()| to extract a
238 // y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
239 // will be the time of the second element in the pair.
240 template <typename DataType, typename ResultType>
241 void AccumulatePairs(
242 rtc::FunctionView<rtc::Optional<ResultType>(const DataType&,
243 const DataType&)> extract,
244 const std::vector<DataType>& data,
245 uint64_t begin_time,
246 TimeSeries* result) {
247 ResultType sum = 0;
248 for (size_t i = 1; i < data.size(); i++) {
249 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000;
250 rtc::Optional<ResultType> y = extract(data[i - 1], data[i]);
251 if (y)
252 sum += *y;
253 result->points.emplace_back(x, static_cast<float>(sum));
254 }
255 }
256
261 // Calculates a moving average of |data| and stores the result in a TimeSeries. 257 // Calculates a moving average of |data| and stores the result in a TimeSeries.
262 // A data point is generated every |step| microseconds from |begin_time| 258 // A data point is generated every |step| microseconds from |begin_time|
263 // to |end_time|. The value of each data point is the average of the data 259 // to |end_time|. The value of each data point is the average of the data
264 // during the preceeding |window_duration_us| microseconds. 260 // during the preceeding |window_duration_us| microseconds.
265 template <typename Extractor> 261 template <typename DataType, typename ResultType>
266 void MovingAverage(const std::vector<typename Extractor::DataType>& data, 262 void MovingAverage(
267 uint64_t begin_time, 263 rtc::FunctionView<rtc::Optional<ResultType>(const DataType&)> extract,
268 uint64_t end_time, 264 const std::vector<DataType>& data,
269 uint64_t window_duration_us, 265 uint64_t begin_time,
270 uint64_t step, 266 uint64_t end_time,
271 float y_scaling, 267 uint64_t window_duration_us,
272 webrtc::plotting::TimeSeries* result) { 268 uint64_t step,
269 webrtc::plotting::TimeSeries* result) {
273 size_t window_index_begin = 0; 270 size_t window_index_begin = 0;
274 size_t window_index_end = 0; 271 size_t window_index_end = 0;
275 typename Extractor::ResultType sum_in_window = 0; 272 ResultType sum_in_window = 0;
276 Extractor extract;
277 273
278 for (uint64_t t = begin_time; t < end_time + step; t += step) { 274 for (uint64_t t = begin_time; t < end_time + step; t += step) {
279 while (window_index_end < data.size() && 275 while (window_index_end < data.size() &&
280 data[window_index_end].timestamp < t) { 276 data[window_index_end].timestamp < t) {
281 sum_in_window += extract(data[window_index_end]); 277 rtc::Optional<ResultType> value = extract(data[window_index_end]);
278 if (value)
279 sum_in_window += *value;
282 ++window_index_end; 280 ++window_index_end;
283 } 281 }
284 while (window_index_begin < data.size() && 282 while (window_index_begin < data.size() &&
285 data[window_index_begin].timestamp < t - window_duration_us) { 283 data[window_index_begin].timestamp < t - window_duration_us) {
286 sum_in_window -= extract(data[window_index_begin]); 284 rtc::Optional<ResultType> value = extract(data[window_index_begin]);
285 if (value)
286 sum_in_window -= *value;
287 ++window_index_begin; 287 ++window_index_begin;
288 } 288 }
289 float window_duration_s = static_cast<float>(window_duration_us) / 1000000; 289 float window_duration_s = static_cast<float>(window_duration_us) / 1000000;
290 float x = static_cast<float>(t - begin_time) / 1000000; 290 float x = static_cast<float>(t - begin_time) / 1000000;
291 float y = sum_in_window / window_duration_s * y_scaling; 291 float y = sum_in_window / window_duration_s;
292 result->points.emplace_back(x, y); 292 result->points.emplace_back(x, y);
293 } 293 }
294 } 294 }
295 295
296 } // namespace 296 } // namespace
297 297
298 EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log) 298 EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log)
299 : parsed_log_(log), window_duration_(250000), step_(10000) { 299 : parsed_log_(log), window_duration_(250000), step_(10000) {
300 uint64_t first_timestamp = std::numeric_limits<uint64_t>::max(); 300 uint64_t first_timestamp = std::numeric_limits<uint64_t>::max();
301 uint64_t last_timestamp = std::numeric_limits<uint64_t>::min(); 301 uint64_t last_timestamp = std::numeric_limits<uint64_t>::min();
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after
555 name << "RTX "; 555 name << "RTX ";
556 if (stream_id.GetDirection() == kIncomingPacket) { 556 if (stream_id.GetDirection() == kIncomingPacket) {
557 name << "(In) "; 557 name << "(In) ";
558 } else { 558 } else {
559 name << "(Out) "; 559 name << "(Out) ";
560 } 560 }
561 name << SsrcToString(stream_id.GetSsrc()); 561 name << SsrcToString(stream_id.GetSsrc());
562 return name.str(); 562 return name.str();
563 } 563 }
564 564
565 void EventLogAnalyzer::FillAudioEncoderTimeSeries(
566 Plot* plot,
567 rtc::FunctionView<rtc::Optional<float>(
568 const AudioNetworkAdaptationEvent& ana_event)> get_y) const {
569 plot->series_list_.push_back(TimeSeries());
570 plot->series_list_.back().style = LINE_DOT_GRAPH;
571 for (auto& ana_event : audio_network_adaptation_events_) {
572 rtc::Optional<float> y = get_y(ana_event);
573 if (y) {
574 float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
575 plot->series_list_.back().points.emplace_back(x, *y);
576 }
577 }
578 }
579
580 void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction, 565 void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction,
581 Plot* plot) { 566 Plot* plot) {
582 for (auto& kv : rtp_packets_) { 567 for (auto& kv : rtp_packets_) {
583 StreamId stream_id = kv.first; 568 StreamId stream_id = kv.first;
584 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; 569 const std::vector<LoggedRtpPacket>& packet_stream = kv.second;
585 // Filter on direction and SSRC. 570 // Filter on direction and SSRC.
586 if (stream_id.GetDirection() != desired_direction || 571 if (stream_id.GetDirection() != desired_direction ||
587 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { 572 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) {
588 continue; 573 continue;
589 } 574 }
590 575
591 TimeSeries time_series; 576 TimeSeries time_series;
592 time_series.label = GetStreamName(stream_id); 577 time_series.label = GetStreamName(stream_id);
593 time_series.style = BAR_GRAPH; 578 time_series.style = BAR_GRAPH;
594 Pointwise<PacketSizeBytes>(packet_stream, begin_time_, &time_series); 579 ProcessPoints<LoggedRtpPacket>(
580 [](const LoggedRtpPacket& packet) -> rtc::Optional<float> {
581 return rtc::Optional<float>(packet.total_length);
582 },
583 packet_stream, begin_time_, &time_series);
595 plot->series_list_.push_back(std::move(time_series)); 584 plot->series_list_.push_back(std::move(time_series));
596 } 585 }
597 586
598 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 587 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
599 plot->SetSuggestedYAxis(0, 1, "Packet size (bytes)", kBottomMargin, 588 plot->SetSuggestedYAxis(0, 1, "Packet size (bytes)", kBottomMargin,
600 kTopMargin); 589 kTopMargin);
601 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) { 590 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) {
602 plot->SetTitle("Incoming RTP packets"); 591 plot->SetTitle("Incoming RTP packets");
603 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) { 592 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) {
604 plot->SetTitle("Outgoing RTP packets"); 593 plot->SetTitle("Outgoing RTP packets");
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
730 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; 719 const std::vector<LoggedRtpPacket>& packet_stream = kv.second;
731 // Filter on direction and SSRC. 720 // Filter on direction and SSRC.
732 if (stream_id.GetDirection() != kIncomingPacket || 721 if (stream_id.GetDirection() != kIncomingPacket ||
733 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { 722 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) {
734 continue; 723 continue;
735 } 724 }
736 725
737 TimeSeries time_series; 726 TimeSeries time_series;
738 time_series.label = GetStreamName(stream_id); 727 time_series.label = GetStreamName(stream_id);
739 time_series.style = BAR_GRAPH; 728 time_series.style = BAR_GRAPH;
740 Pairwise<SequenceNumberDiff>(packet_stream, begin_time_, &time_series); 729 ProcessPairs<LoggedRtpPacket, float>(
730 [](const LoggedRtpPacket& old_packet,
731 const LoggedRtpPacket& new_packet) {
732 int64_t diff =
733 WrappingDifference(new_packet.header.sequenceNumber,
734 old_packet.header.sequenceNumber, 1ul << 16);
735 return rtc::Optional<float>(diff);
736 },
737 packet_stream, begin_time_, &time_series);
741 plot->series_list_.push_back(std::move(time_series)); 738 plot->series_list_.push_back(std::move(time_series));
742 } 739 }
743 740
744 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 741 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
745 plot->SetSuggestedYAxis(0, 1, "Difference since last packet", kBottomMargin, 742 plot->SetSuggestedYAxis(0, 1, "Difference since last packet", kBottomMargin,
746 kTopMargin); 743 kTopMargin);
747 plot->SetTitle("Sequence number"); 744 plot->SetTitle("Sequence number");
748 } 745 }
749 746
750 void EventLogAnalyzer::CreateIncomingPacketLossGraph(Plot* plot) { 747 void EventLogAnalyzer::CreateIncomingPacketLossGraph(Plot* plot) {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
814 if (stream_id.GetDirection() != kIncomingPacket || 811 if (stream_id.GetDirection() != kIncomingPacket ||
815 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) || 812 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) ||
816 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) || 813 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) ||
817 IsRtxSsrc(stream_id)) { 814 IsRtxSsrc(stream_id)) {
818 continue; 815 continue;
819 } 816 }
820 817
821 TimeSeries capture_time_data; 818 TimeSeries capture_time_data;
822 capture_time_data.label = GetStreamName(stream_id) + " capture-time"; 819 capture_time_data.label = GetStreamName(stream_id) + " capture-time";
823 capture_time_data.style = BAR_GRAPH; 820 capture_time_data.style = BAR_GRAPH;
824 Pairwise<NetworkDelayDiff::CaptureTime>(packet_stream, begin_time_, 821 ProcessPairs<LoggedRtpPacket, double>(NetworkDelayDiff_CaptureTime,
825 &capture_time_data); 822 packet_stream, begin_time_,
823 &capture_time_data);
826 plot->series_list_.push_back(std::move(capture_time_data)); 824 plot->series_list_.push_back(std::move(capture_time_data));
827 825
828 TimeSeries send_time_data; 826 TimeSeries send_time_data;
829 send_time_data.label = GetStreamName(stream_id) + " abs-send-time"; 827 send_time_data.label = GetStreamName(stream_id) + " abs-send-time";
830 send_time_data.style = BAR_GRAPH; 828 send_time_data.style = BAR_GRAPH;
831 Pairwise<NetworkDelayDiff::AbsSendTime>(packet_stream, begin_time_, 829 ProcessPairs<LoggedRtpPacket, double>(NetworkDelayDiff_AbsSendTime,
832 &send_time_data); 830 packet_stream, begin_time_,
831 &send_time_data);
833 plot->series_list_.push_back(std::move(send_time_data)); 832 plot->series_list_.push_back(std::move(send_time_data));
834 } 833 }
835 834
836 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 835 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
837 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin, 836 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin,
838 kTopMargin); 837 kTopMargin);
839 plot->SetTitle("Network latency change between consecutive packets"); 838 plot->SetTitle("Network latency change between consecutive packets");
840 } 839 }
841 840
842 void EventLogAnalyzer::CreateAccumulatedDelayChangeGraph(Plot* plot) { 841 void EventLogAnalyzer::CreateAccumulatedDelayChangeGraph(Plot* plot) {
843 for (auto& kv : rtp_packets_) { 842 for (auto& kv : rtp_packets_) {
844 StreamId stream_id = kv.first; 843 StreamId stream_id = kv.first;
845 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; 844 const std::vector<LoggedRtpPacket>& packet_stream = kv.second;
846 // Filter on direction and SSRC. 845 // Filter on direction and SSRC.
847 if (stream_id.GetDirection() != kIncomingPacket || 846 if (stream_id.GetDirection() != kIncomingPacket ||
848 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) || 847 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) ||
849 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) || 848 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) ||
850 IsRtxSsrc(stream_id)) { 849 IsRtxSsrc(stream_id)) {
851 continue; 850 continue;
852 } 851 }
853 852
854 TimeSeries capture_time_data; 853 TimeSeries capture_time_data;
855 capture_time_data.label = GetStreamName(stream_id) + " capture-time"; 854 capture_time_data.label = GetStreamName(stream_id) + " capture-time";
856 capture_time_data.style = LINE_GRAPH; 855 capture_time_data.style = LINE_GRAPH;
857 Pairwise<Accumulated<NetworkDelayDiff::CaptureTime>>( 856 AccumulatePairs<LoggedRtpPacket, double>(NetworkDelayDiff_CaptureTime,
858 packet_stream, begin_time_, &capture_time_data); 857 packet_stream, begin_time_,
858 &capture_time_data);
859 plot->series_list_.push_back(std::move(capture_time_data)); 859 plot->series_list_.push_back(std::move(capture_time_data));
860 860
861 TimeSeries send_time_data; 861 TimeSeries send_time_data;
862 send_time_data.label = GetStreamName(stream_id) + " abs-send-time"; 862 send_time_data.label = GetStreamName(stream_id) + " abs-send-time";
863 send_time_data.style = LINE_GRAPH; 863 send_time_data.style = LINE_GRAPH;
864 Pairwise<Accumulated<NetworkDelayDiff::AbsSendTime>>( 864 AccumulatePairs<LoggedRtpPacket, double>(NetworkDelayDiff_AbsSendTime,
865 packet_stream, begin_time_, &send_time_data); 865 packet_stream, begin_time_,
866 &send_time_data);
866 plot->series_list_.push_back(std::move(send_time_data)); 867 plot->series_list_.push_back(std::move(send_time_data));
867 } 868 }
868 869
869 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 870 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
870 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin, 871 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin,
871 kTopMargin); 872 kTopMargin);
872 plot->SetTitle("Accumulated network latency change"); 873 plot->SetTitle("Accumulated network latency change");
873 } 874 }
874 875
875 // Plot the fraction of packets lost (as perceived by the loss-based BWE). 876 // Plot the fraction of packets lost (as perceived by the loss-based BWE).
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
979 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; 980 const std::vector<LoggedRtpPacket>& packet_stream = kv.second;
980 // Filter on direction and SSRC. 981 // Filter on direction and SSRC.
981 if (stream_id.GetDirection() != desired_direction || 982 if (stream_id.GetDirection() != desired_direction ||
982 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { 983 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) {
983 continue; 984 continue;
984 } 985 }
985 986
986 TimeSeries time_series; 987 TimeSeries time_series;
987 time_series.label = GetStreamName(stream_id); 988 time_series.label = GetStreamName(stream_id);
988 time_series.style = LINE_GRAPH; 989 time_series.style = LINE_GRAPH;
989 double bytes_to_kilobits = 8.0 / 1000; 990 MovingAverage<LoggedRtpPacket, double>(
990 MovingAverage<PacketSizeBytes>(packet_stream, begin_time_, end_time_, 991 [](const LoggedRtpPacket& packet) {
991 window_duration_, step_, bytes_to_kilobits, 992 return rtc::Optional<double>(packet.total_length * 8.0 / 1000.0);
992 &time_series); 993 },
994 packet_stream, begin_time_, end_time_, window_duration_, step_,
995 &time_series);
993 plot->series_list_.push_back(std::move(time_series)); 996 plot->series_list_.push_back(std::move(time_series));
994 } 997 }
995 998
996 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 999 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
997 plot->SetSuggestedYAxis(0, 1, "Bitrate (kbps)", kBottomMargin, kTopMargin); 1000 plot->SetSuggestedYAxis(0, 1, "Bitrate (kbps)", kBottomMargin, kTopMargin);
998 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) { 1001 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) {
999 plot->SetTitle("Incoming bitrate per stream"); 1002 plot->SetTitle("Incoming bitrate per stream");
1000 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) { 1003 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) {
1001 plot->SetTitle("Outgoing bitrate per stream"); 1004 plot->SetTitle("Outgoing bitrate per stream");
1002 } 1005 }
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after
1316 } 1319 }
1317 } 1320 }
1318 } 1321 }
1319 1322
1320 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1323 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1321 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); 1324 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin);
1322 plot->SetTitle("Timestamps"); 1325 plot->SetTitle("Timestamps");
1323 } 1326 }
1324 1327
1325 void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { 1328 void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) {
1326 FillAudioEncoderTimeSeries( 1329 plot->series_list_.push_back(TimeSeries());
1327 plot, [](const AudioNetworkAdaptationEvent& ana_event) { 1330 plot->series_list_.back().style = LINE_DOT_GRAPH;
1331 plot->series_list_.back().label = "Audio encoder target bitrate";
1332 ProcessPoints<AudioNetworkAdaptationEvent>(
1333 [](const AudioNetworkAdaptationEvent& ana_event) -> rtc::Optional<float> {
1328 if (ana_event.config.bitrate_bps) 1334 if (ana_event.config.bitrate_bps)
1329 return rtc::Optional<float>( 1335 return rtc::Optional<float>(
1330 static_cast<float>(*ana_event.config.bitrate_bps)); 1336 static_cast<float>(*ana_event.config.bitrate_bps));
1331 return rtc::Optional<float>(); 1337 return rtc::Optional<float>();
1332 }); 1338 },
1333 plot->series_list_.back().label = "Audio encoder target bitrate"; 1339 audio_network_adaptation_events_, begin_time_,
1340 &plot->series_list_.back());
1334 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1341 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1335 plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); 1342 plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
1336 plot->SetTitle("Reported audio encoder target bitrate"); 1343 plot->SetTitle("Reported audio encoder target bitrate");
1337 } 1344 }
1338 1345
1339 void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { 1346 void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
1340 FillAudioEncoderTimeSeries( 1347 plot->series_list_.push_back(TimeSeries());
1341 plot, [](const AudioNetworkAdaptationEvent& ana_event) { 1348 plot->series_list_.back().style = LINE_DOT_GRAPH;
1349 plot->series_list_.back().label = "Audio encoder frame length";
1350 ProcessPoints<AudioNetworkAdaptationEvent>(
1351 [](const AudioNetworkAdaptationEvent& ana_event) {
1342 if (ana_event.config.frame_length_ms) 1352 if (ana_event.config.frame_length_ms)
1343 return rtc::Optional<float>( 1353 return rtc::Optional<float>(
1344 static_cast<float>(*ana_event.config.frame_length_ms)); 1354 static_cast<float>(*ana_event.config.frame_length_ms));
1345 return rtc::Optional<float>(); 1355 return rtc::Optional<float>();
1346 }); 1356 },
1347 plot->series_list_.back().label = "Audio encoder frame length"; 1357 audio_network_adaptation_events_, begin_time_,
1358 &plot->series_list_.back());
1348 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1359 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1349 plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); 1360 plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
1350 plot->SetTitle("Reported audio encoder frame length"); 1361 plot->SetTitle("Reported audio encoder frame length");
1351 } 1362 }
1352 1363
1353 void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph( 1364 void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph(
1354 Plot* plot) { 1365 Plot* plot) {
1355 FillAudioEncoderTimeSeries( 1366 plot->series_list_.push_back(TimeSeries());
1356 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { 1367 plot->series_list_.back().style = LINE_DOT_GRAPH;
1368 plot->series_list_.back().label = "Audio encoder uplink packet loss fraction";
1369 ProcessPoints<AudioNetworkAdaptationEvent>(
1370 [](const AudioNetworkAdaptationEvent& ana_event) {
1357 if (ana_event.config.uplink_packet_loss_fraction) 1371 if (ana_event.config.uplink_packet_loss_fraction)
1358 return rtc::Optional<float>(static_cast<float>( 1372 return rtc::Optional<float>(static_cast<float>(
1359 *ana_event.config.uplink_packet_loss_fraction)); 1373 *ana_event.config.uplink_packet_loss_fraction));
1360 return rtc::Optional<float>(); 1374 return rtc::Optional<float>();
1361 }); 1375 },
1362 plot->series_list_.back().label = "Audio encoder uplink packet loss fraction"; 1376 audio_network_adaptation_events_, begin_time_,
1377 &plot->series_list_.back());
1363 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1378 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1364 plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, 1379 plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
1365 kTopMargin); 1380 kTopMargin);
1366 plot->SetTitle("Reported audio encoder lost packets"); 1381 plot->SetTitle("Reported audio encoder lost packets");
1367 } 1382 }
1368 1383
1369 void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { 1384 void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) {
1370 FillAudioEncoderTimeSeries( 1385 plot->series_list_.push_back(TimeSeries());
1371 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { 1386 plot->series_list_.back().style = LINE_DOT_GRAPH;
1387 plot->series_list_.back().label = "Audio encoder FEC";
1388 ProcessPoints<AudioNetworkAdaptationEvent>(
1389 [](const AudioNetworkAdaptationEvent& ana_event) {
1372 if (ana_event.config.enable_fec) 1390 if (ana_event.config.enable_fec)
1373 return rtc::Optional<float>( 1391 return rtc::Optional<float>(
1374 static_cast<float>(*ana_event.config.enable_fec)); 1392 static_cast<float>(*ana_event.config.enable_fec));
1375 return rtc::Optional<float>(); 1393 return rtc::Optional<float>();
1376 }); 1394 },
1377 plot->series_list_.back().label = "Audio encoder FEC"; 1395 audio_network_adaptation_events_, begin_time_,
1396 &plot->series_list_.back());
1378 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1397 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1379 plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); 1398 plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
1380 plot->SetTitle("Reported audio encoder FEC"); 1399 plot->SetTitle("Reported audio encoder FEC");
1381 } 1400 }
1382 1401
1383 void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { 1402 void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) {
1384 FillAudioEncoderTimeSeries( 1403 plot->series_list_.push_back(TimeSeries());
1385 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { 1404 plot->series_list_.back().style = LINE_DOT_GRAPH;
1405 plot->series_list_.back().label = "Audio encoder DTX";
1406 ProcessPoints<AudioNetworkAdaptationEvent>(
1407 [](const AudioNetworkAdaptationEvent& ana_event) {
1386 if (ana_event.config.enable_dtx) 1408 if (ana_event.config.enable_dtx)
1387 return rtc::Optional<float>( 1409 return rtc::Optional<float>(
1388 static_cast<float>(*ana_event.config.enable_dtx)); 1410 static_cast<float>(*ana_event.config.enable_dtx));
1389 return rtc::Optional<float>(); 1411 return rtc::Optional<float>();
1390 }); 1412 },
1391 plot->series_list_.back().label = "Audio encoder DTX"; 1413 audio_network_adaptation_events_, begin_time_,
1414 &plot->series_list_.back());
1392 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1415 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1393 plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); 1416 plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
1394 plot->SetTitle("Reported audio encoder DTX"); 1417 plot->SetTitle("Reported audio encoder DTX");
1395 } 1418 }
1396 1419
1397 void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { 1420 void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
1398 FillAudioEncoderTimeSeries( 1421 plot->series_list_.push_back(TimeSeries());
1399 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { 1422 plot->series_list_.back().style = LINE_DOT_GRAPH;
1423 plot->series_list_.back().label = "Audio encoder number of channels";
1424 ProcessPoints<AudioNetworkAdaptationEvent>(
1425 [](const AudioNetworkAdaptationEvent& ana_event) {
1400 if (ana_event.config.num_channels) 1426 if (ana_event.config.num_channels)
1401 return rtc::Optional<float>( 1427 return rtc::Optional<float>(
1402 static_cast<float>(*ana_event.config.num_channels)); 1428 static_cast<float>(*ana_event.config.num_channels));
1403 return rtc::Optional<float>(); 1429 return rtc::Optional<float>();
1404 }); 1430 },
1405 plot->series_list_.back().label = "Audio encoder number of channels"; 1431 audio_network_adaptation_events_, begin_time_,
1432 &plot->series_list_.back());
1406 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1433 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1407 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", 1434 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
1408 kBottomMargin, kTopMargin); 1435 kBottomMargin, kTopMargin);
1409 plot->SetTitle("Reported audio encoder number of channels"); 1436 plot->SetTitle("Reported audio encoder number of channels");
1410 } 1437 }
1411 } // namespace plotting 1438 } // namespace plotting
1412 } // namespace webrtc 1439 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/tools/event_log_visualizer/analyzer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698