OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 default_map.Register<AbsoluteSendTime>( | 106 default_map.Register<AbsoluteSendTime>( |
107 webrtc::RtpExtension::kAbsSendTimeDefaultId); | 107 webrtc::RtpExtension::kAbsSendTimeDefaultId); |
108 return default_map; | 108 return default_map; |
109 } | 109 } |
110 | 110 |
111 constexpr float kLeftMargin = 0.01f; | 111 constexpr float kLeftMargin = 0.01f; |
112 constexpr float kRightMargin = 0.02f; | 112 constexpr float kRightMargin = 0.02f; |
113 constexpr float kBottomMargin = 0.02f; | 113 constexpr float kBottomMargin = 0.02f; |
114 constexpr float kTopMargin = 0.05f; | 114 constexpr float kTopMargin = 0.05f; |
115 | 115 |
116 class PacketSizeBytes { | 116 template <typename SampleDataType, typename ExtractedType> |
| 117 class Pointwise { |
117 public: | 118 public: |
118 using DataType = LoggedRtpPacket; | 119 using DataType = SampleDataType; |
119 using ResultType = size_t; | 120 using ResultType = ExtractedType; |
120 size_t operator()(const LoggedRtpPacket& packet) { | 121 using FunctionType = |
121 return packet.total_length; | 122 rtc::FunctionView<rtc::Optional<ResultType>(const DataType& data)>; |
| 123 |
| 124 explicit Pointwise(FunctionType get_sample) : get_sample_(get_sample) {} |
| 125 |
| 126 rtc::Optional<ResultType> operator()(DataType data) { |
| 127 return get_sample_(data); |
122 } | 128 } |
| 129 |
| 130 private: |
| 131 FunctionType get_sample_; |
123 }; | 132 }; |
124 | 133 |
125 class SequenceNumberDiff { | 134 template <typename SampleDataType, typename ExtractedType> |
| 135 class Pairwise { |
126 public: | 136 public: |
127 using DataType = LoggedRtpPacket; | 137 using DataType = SampleDataType; |
128 using ResultType = int64_t; | 138 using ResultType = ExtractedType; |
129 int64_t operator()(const LoggedRtpPacket& old_packet, | 139 using FunctionType = |
130 const LoggedRtpPacket& new_packet) { | 140 rtc::FunctionView<rtc::Optional<ResultType>(const DataType& last_sample, |
131 return WrappingDifference(new_packet.header.sequenceNumber, | 141 const DataType& sample)>; |
132 old_packet.header.sequenceNumber, 1ul << 16); | 142 explicit Pairwise(FunctionType get_samples) |
| 143 : get_samples_(get_samples) {} |
| 144 rtc::Optional<ResultType> operator()(DataType sample) { |
| 145 auto result = last_sample_ ? get_samples_(*last_sample_, sample) |
| 146 : rtc::Optional<ResultType>(); |
| 147 last_sample_ = rtc::Optional<DataType>(sample); |
| 148 return result; |
133 } | 149 } |
134 }; | |
135 | 150 |
136 class NetworkDelayDiff { | 151 private: |
137 public: | 152 FunctionType get_samples_; |
138 class AbsSendTime { | 153 rtc::Optional<DataType> last_sample_; |
139 public: | |
140 using DataType = LoggedRtpPacket; | |
141 using ResultType = double; | |
142 double operator()(const LoggedRtpPacket& old_packet, | |
143 const LoggedRtpPacket& new_packet) { | |
144 if (old_packet.header.extension.hasAbsoluteSendTime && | |
145 new_packet.header.extension.hasAbsoluteSendTime) { | |
146 int64_t send_time_diff = WrappingDifference( | |
147 new_packet.header.extension.absoluteSendTime, | |
148 old_packet.header.extension.absoluteSendTime, 1ul << 24); | |
149 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp; | |
150 return static_cast<double>(recv_time_diff - | |
151 AbsSendTimeToMicroseconds(send_time_diff)) / | |
152 1000; | |
153 } else { | |
154 return 0; | |
155 } | |
156 } | |
157 }; | |
158 | |
159 class CaptureTime { | |
160 public: | |
161 using DataType = LoggedRtpPacket; | |
162 using ResultType = double; | |
163 double operator()(const LoggedRtpPacket& old_packet, | |
164 const LoggedRtpPacket& new_packet) { | |
165 int64_t send_time_diff = WrappingDifference( | |
166 new_packet.header.timestamp, old_packet.header.timestamp, 1ull << 32); | |
167 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp; | |
168 | |
169 const double kVideoSampleRate = 90000; | |
170 // TODO(terelius): We treat all streams as video for now, even though | |
171 // audio might be sampled at e.g. 16kHz, because it is really difficult to | |
172 // figure out the true sampling rate of a stream. The effect is that the | |
173 // delay will be scaled incorrectly for non-video streams. | |
174 | |
175 double delay_change = | |
176 static_cast<double>(recv_time_diff) / 1000 - | |
177 static_cast<double>(send_time_diff) / kVideoSampleRate * 1000; | |
178 if (delay_change < -10000 || 10000 < delay_change) { | |
179 LOG(LS_WARNING) << "Very large delay change. Timestamps correct?"; | |
180 LOG(LS_WARNING) << "Old capture time " << old_packet.header.timestamp | |
181 << ", received time " << old_packet.timestamp; | |
182 LOG(LS_WARNING) << "New capture time " << new_packet.header.timestamp | |
183 << ", received time " << new_packet.timestamp; | |
184 LOG(LS_WARNING) << "Receive time difference " << recv_time_diff << " = " | |
185 << static_cast<double>(recv_time_diff) / 1000000 << "s"; | |
186 LOG(LS_WARNING) << "Send time difference " << send_time_diff << " = " | |
187 << static_cast<double>(send_time_diff) / | |
188 kVideoSampleRate | |
189 << "s"; | |
190 } | |
191 return delay_change; | |
192 } | |
193 }; | |
194 }; | 154 }; |
195 | 155 |
196 template <typename Extractor> | 156 template <typename Extractor> |
197 class Accumulated { | 157 class Accumulated { |
198 public: | 158 public: |
199 using DataType = typename Extractor::DataType; | 159 using DataType = typename Extractor::DataType; |
200 using ResultType = typename Extractor::ResultType; | 160 using ResultType = typename Extractor::ResultType; |
201 ResultType operator()(const DataType& old_packet, | 161 using FunctionType = typename Extractor::FunctionType; |
202 const DataType& new_packet) { | 162 explicit Accumulated(FunctionType get_sample) : extract(get_sample) {} |
203 sum += extract(old_packet, new_packet); | 163 rtc::Optional<ResultType> operator()(const DataType& sample) { |
204 return sum; | 164 auto result = extract(sample); |
| 165 if (result) |
| 166 sum += *result; |
| 167 return rtc::Optional<ResultType>(sum); |
205 } | 168 } |
206 | 169 |
207 private: | 170 private: |
208 Extractor extract; | 171 Extractor extract; |
209 ResultType sum = 0; | 172 ResultType sum = 0; |
210 }; | 173 }; |
211 | 174 |
| 175 rtc::Optional<double> NetworkDelayAbsSendTime( |
| 176 const LoggedRtpPacket& old_packet, |
| 177 const LoggedRtpPacket& new_packet) { |
| 178 if (old_packet.header.extension.hasAbsoluteSendTime && |
| 179 new_packet.header.extension.hasAbsoluteSendTime) { |
| 180 int64_t send_time_diff = WrappingDifference( |
| 181 new_packet.header.extension.absoluteSendTime, |
| 182 old_packet.header.extension.absoluteSendTime, 1ul << 24); |
| 183 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp; |
| 184 return rtc::Optional<double>( |
| 185 static_cast<double>(recv_time_diff - |
| 186 AbsSendTimeToMicroseconds(send_time_diff)) / |
| 187 1000); |
| 188 } else { |
| 189 return rtc::Optional<double>(0.0); |
| 190 } |
| 191 } |
| 192 |
| 193 rtc::Optional<double> NetworkDelayDiffCaptureTime( |
| 194 const LoggedRtpPacket& old_packet, |
| 195 const LoggedRtpPacket& new_packet) { |
| 196 int64_t send_time_diff = WrappingDifference( |
| 197 new_packet.header.timestamp, old_packet.header.timestamp, 1ull << 32); |
| 198 int64_t recv_time_diff = new_packet.timestamp - old_packet.timestamp; |
| 199 |
| 200 const double kVideoSampleRate = 90000; |
| 201 // TODO(terelius): We treat all streams as video for now, even though |
| 202 // audio might be sampled at e.g. 16kHz, because it is really difficult to |
| 203 // figure out the true sampling rate of a stream. The effect is that the |
| 204 // delay will be scaled incorrectly for non-video streams. |
| 205 |
| 206 double delay_change = |
| 207 static_cast<double>(recv_time_diff) / 1000 - |
| 208 static_cast<double>(send_time_diff) / kVideoSampleRate * 1000; |
| 209 if (delay_change < -10000 || 10000 < delay_change) { |
| 210 LOG(LS_WARNING) << "Very large delay change. Timestamps correct?"; |
| 211 LOG(LS_WARNING) << "Old capture time " << old_packet.header.timestamp |
| 212 << ", received time " << old_packet.timestamp; |
| 213 LOG(LS_WARNING) << "New capture time " << new_packet.header.timestamp |
| 214 << ", received time " << new_packet.timestamp; |
| 215 LOG(LS_WARNING) << "Receive time difference " << recv_time_diff << " = " |
| 216 << static_cast<double>(recv_time_diff) / 1000000 << "s"; |
| 217 LOG(LS_WARNING) << "Send time difference " << send_time_diff << " = " |
| 218 << static_cast<double>(send_time_diff) / kVideoSampleRate |
| 219 << "s"; |
| 220 } |
| 221 return rtc::Optional<double>(delay_change); |
| 222 } |
| 223 |
212 // For each element in data, use |Extractor| to extract a y-coordinate and | 224 // For each element in data, use |Extractor| to extract a y-coordinate and |
213 // store the result in a TimeSeries. | 225 // store the result in a TimeSeries. |
214 template <typename Extractor> | 226 template <typename Extractor> |
215 void Pointwise(const std::vector<typename Extractor::DataType>& data, | 227 void Process(const std::vector<typename Extractor::DataType>& data, |
216 uint64_t begin_time, | 228 uint64_t begin_time, |
217 TimeSeries* result) { | 229 TimeSeries* result, |
218 Extractor extract; | 230 typename Extractor::FunctionType get_sample) { |
219 for (size_t i = 0; i < data.size(); i++) { | 231 Extractor extract(get_sample); |
220 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000; | 232 for (auto& sample : data) { |
221 float y = extract(data[i]); | 233 float x = static_cast<float>(sample.timestamp - begin_time) / 1000000; |
222 result->points.emplace_back(x, y); | 234 auto y = extract(sample); |
| 235 if (y) |
| 236 result->points.emplace_back(x, *y); |
223 } | 237 } |
224 } | 238 } |
225 | 239 |
226 // For each pair of adjacent elements in |data|, use |Extractor| to extract a | |
227 // y-coordinate and store the result in a TimeSeries. Note that the x-coordinate | |
228 // will be the time of the second element in the pair. | |
229 template <typename Extractor> | |
230 void Pairwise(const std::vector<typename Extractor::DataType>& data, | |
231 uint64_t begin_time, | |
232 TimeSeries* result) { | |
233 Extractor extract; | |
234 for (size_t i = 1; i < data.size(); i++) { | |
235 float x = static_cast<float>(data[i].timestamp - begin_time) / 1000000; | |
236 float y = extract(data[i - 1], data[i]); | |
237 result->points.emplace_back(x, y); | |
238 } | |
239 } | |
240 | |
241 // Calculates a moving average of |data| and stores the result in a TimeSeries. | 240 // Calculates a moving average of |data| and stores the result in a TimeSeries. |
242 // A data point is generated every |step| microseconds from |begin_time| | 241 // A data point is generated every |step| microseconds from |begin_time| |
243 // to |end_time|. The value of each data point is the average of the data | 242 // to |end_time|. The value of each data point is the average of the data |
244 // during the preceeding |window_duration_us| microseconds. | 243 // during the preceeding |window_duration_us| microseconds. |
245 template <typename Extractor> | 244 template <typename Extractor> |
246 void MovingAverage(const std::vector<typename Extractor::DataType>& data, | 245 void MovingAverage(const std::vector<typename Extractor::DataType>& data, |
247 uint64_t begin_time, | 246 uint64_t begin_time, |
248 uint64_t end_time, | 247 uint64_t end_time, |
249 uint64_t window_duration_us, | 248 uint64_t window_duration_us, |
250 uint64_t step, | 249 uint64_t step, |
251 float y_scaling, | 250 float y_scaling, |
252 webrtc::plotting::TimeSeries* result) { | 251 TimeSeries* result, |
| 252 typename Extractor::FunctionType get_sample) { |
253 size_t window_index_begin = 0; | 253 size_t window_index_begin = 0; |
254 size_t window_index_end = 0; | 254 size_t window_index_end = 0; |
255 typename Extractor::ResultType sum_in_window = 0; | 255 typename Extractor::ResultType sum_in_window = 0; |
256 Extractor extract; | 256 Extractor extract(get_sample); |
257 | 257 |
258 for (uint64_t t = begin_time; t < end_time + step; t += step) { | 258 for (uint64_t t = begin_time; t < end_time + step; t += step) { |
259 while (window_index_end < data.size() && | 259 while (window_index_end < data.size() && |
260 data[window_index_end].timestamp < t) { | 260 data[window_index_end].timestamp < t) { |
261 sum_in_window += extract(data[window_index_end]); | 261 auto sample = extract(data[window_index_end]); |
262 ++window_index_end; | 262 ++window_index_end; |
| 263 if (sample) |
| 264 sum_in_window += *sample; |
263 } | 265 } |
264 while (window_index_begin < data.size() && | 266 while (window_index_begin < data.size() && |
265 data[window_index_begin].timestamp < t - window_duration_us) { | 267 data[window_index_begin].timestamp < t - window_duration_us) { |
266 sum_in_window -= extract(data[window_index_begin]); | 268 auto sample = extract(data[window_index_begin]); |
267 ++window_index_begin; | 269 ++window_index_begin; |
| 270 if (sample) |
| 271 sum_in_window -= *sample; |
268 } | 272 } |
269 float window_duration_s = static_cast<float>(window_duration_us) / 1000000; | 273 float window_duration_s = static_cast<float>(window_duration_us) / 1000000; |
270 float x = static_cast<float>(t - begin_time) / 1000000; | 274 float x = static_cast<float>(t - begin_time) / 1000000; |
271 float y = sum_in_window / window_duration_s * y_scaling; | 275 float y = sum_in_window / window_duration_s * y_scaling; |
272 result->points.emplace_back(x, y); | 276 result->points.emplace_back(x, y); |
273 } | 277 } |
274 } | 278 } |
275 | 279 |
276 } // namespace | 280 } // namespace |
277 | 281 |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
529 name << "RTX "; | 533 name << "RTX "; |
530 if (stream_id.GetDirection() == kIncomingPacket) { | 534 if (stream_id.GetDirection() == kIncomingPacket) { |
531 name << "(In) "; | 535 name << "(In) "; |
532 } else { | 536 } else { |
533 name << "(Out) "; | 537 name << "(Out) "; |
534 } | 538 } |
535 name << SsrcToString(stream_id.GetSsrc()); | 539 name << SsrcToString(stream_id.GetSsrc()); |
536 return name.str(); | 540 return name.str(); |
537 } | 541 } |
538 | 542 |
539 void EventLogAnalyzer::FillAudioEncoderTimeSeries( | |
540 Plot* plot, | |
541 rtc::FunctionView<rtc::Optional<float>( | |
542 const AudioNetworkAdaptationEvent& ana_event)> get_y) const { | |
543 plot->series_list_.push_back(TimeSeries()); | |
544 plot->series_list_.back().style = LINE_DOT_GRAPH; | |
545 for (auto& ana_event : audio_network_adaptation_events_) { | |
546 rtc::Optional<float> y = get_y(ana_event); | |
547 if (y) { | |
548 float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; | |
549 plot->series_list_.back().points.emplace_back(x, *y); | |
550 } | |
551 } | |
552 } | |
553 | |
554 void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction, | 543 void EventLogAnalyzer::CreatePacketGraph(PacketDirection desired_direction, |
555 Plot* plot) { | 544 Plot* plot) { |
556 for (auto& kv : rtp_packets_) { | 545 for (auto& kv : rtp_packets_) { |
557 StreamId stream_id = kv.first; | 546 StreamId stream_id = kv.first; |
558 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; | 547 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; |
559 // Filter on direction and SSRC. | 548 // Filter on direction and SSRC. |
560 if (stream_id.GetDirection() != desired_direction || | 549 if (stream_id.GetDirection() != desired_direction || |
561 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { | 550 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { |
562 continue; | 551 continue; |
563 } | 552 } |
564 | 553 |
565 TimeSeries time_series; | 554 TimeSeries time_series; |
566 time_series.label = GetStreamName(stream_id); | 555 time_series.label = GetStreamName(stream_id); |
567 time_series.style = BAR_GRAPH; | 556 time_series.style = BAR_GRAPH; |
568 Pointwise<PacketSizeBytes>(packet_stream, begin_time_, &time_series); | 557 |
| 558 Process<Pointwise<LoggedRtpPacket, float>>( |
| 559 packet_stream, begin_time_, &time_series, |
| 560 [](const LoggedRtpPacket& packet) { |
| 561 return rtc::Optional<float>(packet.total_length); |
| 562 }); |
569 plot->series_list_.push_back(std::move(time_series)); | 563 plot->series_list_.push_back(std::move(time_series)); |
570 } | 564 } |
571 | 565 |
572 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 566 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
573 plot->SetSuggestedYAxis(0, 1, "Packet size (bytes)", kBottomMargin, | 567 plot->SetSuggestedYAxis(0, 1, "Packet size (bytes)", kBottomMargin, |
574 kTopMargin); | 568 kTopMargin); |
575 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) { | 569 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) { |
576 plot->SetTitle("Incoming RTP packets"); | 570 plot->SetTitle("Incoming RTP packets"); |
577 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) { | 571 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) { |
578 plot->SetTitle("Outgoing RTP packets"); | 572 plot->SetTitle("Outgoing RTP packets"); |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
704 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; | 698 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; |
705 // Filter on direction and SSRC. | 699 // Filter on direction and SSRC. |
706 if (stream_id.GetDirection() != kIncomingPacket || | 700 if (stream_id.GetDirection() != kIncomingPacket || |
707 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { | 701 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { |
708 continue; | 702 continue; |
709 } | 703 } |
710 | 704 |
711 TimeSeries time_series; | 705 TimeSeries time_series; |
712 time_series.label = GetStreamName(stream_id); | 706 time_series.label = GetStreamName(stream_id); |
713 time_series.style = BAR_GRAPH; | 707 time_series.style = BAR_GRAPH; |
714 Pairwise<SequenceNumberDiff>(packet_stream, begin_time_, &time_series); | 708 |
| 709 Process<Pairwise<LoggedRtpPacket, float>>( |
| 710 packet_stream, begin_time_, &time_series, |
| 711 [](const LoggedRtpPacket& last_sample, const LoggedRtpPacket& sample) { |
| 712 return rtc::Optional<float>(static_cast<float>(WrappingDifference( |
| 713 sample.header.sequenceNumber, last_sample.header.sequenceNumber, |
| 714 1ul << 16))); |
| 715 }); |
715 plot->series_list_.push_back(std::move(time_series)); | 716 plot->series_list_.push_back(std::move(time_series)); |
716 } | 717 } |
717 | 718 |
718 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 719 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
719 plot->SetSuggestedYAxis(0, 1, "Difference since last packet", kBottomMargin, | 720 plot->SetSuggestedYAxis(0, 1, "Difference since last packet", kBottomMargin, |
720 kTopMargin); | 721 kTopMargin); |
721 plot->SetTitle("Sequence number"); | 722 plot->SetTitle("Sequence number"); |
722 } | 723 } |
723 | 724 |
724 void EventLogAnalyzer::CreateIncomingPacketLossGraph(Plot* plot) { | 725 void EventLogAnalyzer::CreateIncomingPacketLossGraph(Plot* plot) { |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
788 if (stream_id.GetDirection() != kIncomingPacket || | 789 if (stream_id.GetDirection() != kIncomingPacket || |
789 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) || | 790 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) || |
790 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) || | 791 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) || |
791 IsRtxSsrc(stream_id)) { | 792 IsRtxSsrc(stream_id)) { |
792 continue; | 793 continue; |
793 } | 794 } |
794 | 795 |
795 TimeSeries capture_time_data; | 796 TimeSeries capture_time_data; |
796 capture_time_data.label = GetStreamName(stream_id) + " capture-time"; | 797 capture_time_data.label = GetStreamName(stream_id) + " capture-time"; |
797 capture_time_data.style = BAR_GRAPH; | 798 capture_time_data.style = BAR_GRAPH; |
798 Pairwise<NetworkDelayDiff::CaptureTime>(packet_stream, begin_time_, | 799 |
799 &capture_time_data); | 800 Process<Pairwise<LoggedRtpPacket, double>>( |
| 801 packet_stream, begin_time_, &capture_time_data, |
| 802 NetworkDelayDiffCaptureTime); |
800 plot->series_list_.push_back(std::move(capture_time_data)); | 803 plot->series_list_.push_back(std::move(capture_time_data)); |
801 | 804 |
802 TimeSeries send_time_data; | 805 TimeSeries send_time_data; |
803 send_time_data.label = GetStreamName(stream_id) + " abs-send-time"; | 806 send_time_data.label = GetStreamName(stream_id) + " abs-send-time"; |
804 send_time_data.style = BAR_GRAPH; | 807 send_time_data.style = BAR_GRAPH; |
805 Pairwise<NetworkDelayDiff::AbsSendTime>(packet_stream, begin_time_, | 808 |
806 &send_time_data); | 809 Process<Pairwise<LoggedRtpPacket, double>>( |
| 810 packet_stream, begin_time_, &send_time_data, NetworkDelayAbsSendTime); |
| 811 |
807 plot->series_list_.push_back(std::move(send_time_data)); | 812 plot->series_list_.push_back(std::move(send_time_data)); |
808 } | 813 } |
809 | 814 |
810 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 815 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
811 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin, | 816 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin, |
812 kTopMargin); | 817 kTopMargin); |
813 plot->SetTitle("Network latency change between consecutive packets"); | 818 plot->SetTitle("Network latency change between consecutive packets"); |
814 } | 819 } |
815 | 820 |
816 void EventLogAnalyzer::CreateAccumulatedDelayChangeGraph(Plot* plot) { | 821 void EventLogAnalyzer::CreateAccumulatedDelayChangeGraph(Plot* plot) { |
817 for (auto& kv : rtp_packets_) { | 822 for (auto& kv : rtp_packets_) { |
818 StreamId stream_id = kv.first; | 823 StreamId stream_id = kv.first; |
819 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; | 824 const std::vector<LoggedRtpPacket>& packet_stream = kv.second; |
820 // Filter on direction and SSRC. | 825 // Filter on direction and SSRC. |
821 if (stream_id.GetDirection() != kIncomingPacket || | 826 if (stream_id.GetDirection() != kIncomingPacket || |
822 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) || | 827 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_) || |
823 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) || | 828 IsAudioSsrc(stream_id) || !IsVideoSsrc(stream_id) || |
824 IsRtxSsrc(stream_id)) { | 829 IsRtxSsrc(stream_id)) { |
825 continue; | 830 continue; |
826 } | 831 } |
827 | 832 |
828 TimeSeries capture_time_data; | 833 TimeSeries capture_time_data; |
829 capture_time_data.label = GetStreamName(stream_id) + " capture-time"; | 834 capture_time_data.label = GetStreamName(stream_id) + " capture-time"; |
830 capture_time_data.style = LINE_GRAPH; | 835 capture_time_data.style = LINE_GRAPH; |
831 Pairwise<Accumulated<NetworkDelayDiff::CaptureTime>>( | 836 |
832 packet_stream, begin_time_, &capture_time_data); | 837 Process<Accumulated<Pairwise<LoggedRtpPacket, double>>>( |
| 838 packet_stream, begin_time_, &capture_time_data, |
| 839 NetworkDelayDiffCaptureTime); |
| 840 |
833 plot->series_list_.push_back(std::move(capture_time_data)); | 841 plot->series_list_.push_back(std::move(capture_time_data)); |
834 | 842 |
835 TimeSeries send_time_data; | 843 TimeSeries send_time_data; |
836 send_time_data.label = GetStreamName(stream_id) + " abs-send-time"; | 844 send_time_data.label = GetStreamName(stream_id) + " abs-send-time"; |
837 send_time_data.style = LINE_GRAPH; | 845 send_time_data.style = LINE_GRAPH; |
838 Pairwise<Accumulated<NetworkDelayDiff::AbsSendTime>>( | 846 Process<Accumulated<Pairwise<LoggedRtpPacket, double>>>( |
839 packet_stream, begin_time_, &send_time_data); | 847 packet_stream, begin_time_, &send_time_data, NetworkDelayAbsSendTime); |
840 plot->series_list_.push_back(std::move(send_time_data)); | 848 plot->series_list_.push_back(std::move(send_time_data)); |
841 } | 849 } |
842 | 850 |
843 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 851 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
844 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin, | 852 plot->SetSuggestedYAxis(0, 1, "Latency change (ms)", kBottomMargin, |
845 kTopMargin); | 853 kTopMargin); |
846 plot->SetTitle("Accumulated network latency change"); | 854 plot->SetTitle("Accumulated network latency change"); |
847 } | 855 } |
848 | 856 |
849 // Plot the fraction of packets lost (as perceived by the loss-based BWE). | 857 // Plot the fraction of packets lost (as perceived by the loss-based BWE). |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
954 // Filter on direction and SSRC. | 962 // Filter on direction and SSRC. |
955 if (stream_id.GetDirection() != desired_direction || | 963 if (stream_id.GetDirection() != desired_direction || |
956 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { | 964 !MatchingSsrc(stream_id.GetSsrc(), desired_ssrc_)) { |
957 continue; | 965 continue; |
958 } | 966 } |
959 | 967 |
960 TimeSeries time_series; | 968 TimeSeries time_series; |
961 time_series.label = GetStreamName(stream_id); | 969 time_series.label = GetStreamName(stream_id); |
962 time_series.style = LINE_GRAPH; | 970 time_series.style = LINE_GRAPH; |
963 double bytes_to_kilobits = 8.0 / 1000; | 971 double bytes_to_kilobits = 8.0 / 1000; |
964 MovingAverage<PacketSizeBytes>(packet_stream, begin_time_, end_time_, | 972 |
965 window_duration_, step_, bytes_to_kilobits, | 973 MovingAverage<Pointwise<LoggedRtpPacket, float>>( |
966 &time_series); | 974 packet_stream, begin_time_, end_time_, window_duration_, step_, |
| 975 bytes_to_kilobits, &time_series, [](const LoggedRtpPacket& packet) { |
| 976 return rtc::Optional<float>(packet.total_length); |
| 977 }); |
967 plot->series_list_.push_back(std::move(time_series)); | 978 plot->series_list_.push_back(std::move(time_series)); |
968 } | 979 } |
969 | 980 |
970 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 981 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
971 plot->SetSuggestedYAxis(0, 1, "Bitrate (kbps)", kBottomMargin, kTopMargin); | 982 plot->SetSuggestedYAxis(0, 1, "Bitrate (kbps)", kBottomMargin, kTopMargin); |
972 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) { | 983 if (desired_direction == webrtc::PacketDirection::kIncomingPacket) { |
973 plot->SetTitle("Incoming bitrate per stream"); | 984 plot->SetTitle("Incoming bitrate per stream"); |
974 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) { | 985 } else if (desired_direction == webrtc::PacketDirection::kOutgoingPacket) { |
975 plot->SetTitle("Outgoing bitrate per stream"); | 986 plot->SetTitle("Outgoing bitrate per stream"); |
976 } | 987 } |
(...skipping 312 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1289 } | 1300 } |
1290 } | 1301 } |
1291 } | 1302 } |
1292 | 1303 |
1293 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1304 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1294 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); | 1305 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); |
1295 plot->SetTitle("Timestamps"); | 1306 plot->SetTitle("Timestamps"); |
1296 } | 1307 } |
1297 | 1308 |
1298 void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { | 1309 void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { |
1299 FillAudioEncoderTimeSeries( | 1310 plot->series_list_.push_back(TimeSeries()); |
1300 plot, [](const AudioNetworkAdaptationEvent& ana_event) { | 1311 plot->series_list_.back().style = LINE_DOT_GRAPH; |
| 1312 Process<Pointwise<AudioNetworkAdaptationEvent, float>>( |
| 1313 audio_network_adaptation_events_, begin_time_, &plot->series_list_.back(), |
| 1314 [](const AudioNetworkAdaptationEvent& ana_event) { |
1301 if (ana_event.config.bitrate_bps) | 1315 if (ana_event.config.bitrate_bps) |
1302 return rtc::Optional<float>( | 1316 return rtc::Optional<float>( |
1303 static_cast<float>(*ana_event.config.bitrate_bps)); | 1317 static_cast<float>(*ana_event.config.bitrate_bps)); |
1304 return rtc::Optional<float>(); | 1318 return rtc::Optional<float>(); |
1305 }); | 1319 }); |
1306 plot->series_list_.back().label = "Audio encoder target bitrate"; | 1320 plot->series_list_.back().label = "Audio encoder target bitrate"; |
1307 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1321 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1308 plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); | 1322 plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); |
1309 plot->SetTitle("Reported audio encoder target bitrate"); | 1323 plot->SetTitle("Reported audio encoder target bitrate"); |
1310 } | 1324 } |
1311 | 1325 |
1312 void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { | 1326 void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { |
1313 FillAudioEncoderTimeSeries( | 1327 plot->series_list_.push_back(TimeSeries()); |
1314 plot, [](const AudioNetworkAdaptationEvent& ana_event) { | 1328 plot->series_list_.back().style = LINE_DOT_GRAPH; |
| 1329 Process<Pointwise<AudioNetworkAdaptationEvent, float>>( |
| 1330 audio_network_adaptation_events_, begin_time_, &plot->series_list_.back(), |
| 1331 [](const AudioNetworkAdaptationEvent& ana_event) { |
1315 if (ana_event.config.frame_length_ms) | 1332 if (ana_event.config.frame_length_ms) |
1316 return rtc::Optional<float>( | 1333 return rtc::Optional<float>( |
1317 static_cast<float>(*ana_event.config.frame_length_ms)); | 1334 static_cast<float>(*ana_event.config.frame_length_ms)); |
1318 return rtc::Optional<float>(); | 1335 return rtc::Optional<float>(); |
1319 }); | 1336 }); |
1320 plot->series_list_.back().label = "Audio encoder frame length"; | 1337 plot->series_list_.back().label = "Audio encoder frame length"; |
1321 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1338 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1322 plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); | 1339 plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); |
1323 plot->SetTitle("Reported audio encoder frame length"); | 1340 plot->SetTitle("Reported audio encoder frame length"); |
1324 } | 1341 } |
1325 | 1342 |
1326 void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph( | 1343 void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph( |
1327 Plot* plot) { | 1344 Plot* plot) { |
1328 FillAudioEncoderTimeSeries( | 1345 plot->series_list_.push_back(TimeSeries()); |
1329 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { | 1346 plot->series_list_.back().style = LINE_DOT_GRAPH; |
| 1347 Process<Pointwise<AudioNetworkAdaptationEvent, float>>( |
| 1348 audio_network_adaptation_events_, begin_time_, &plot->series_list_.back(), |
| 1349 [&](const AudioNetworkAdaptationEvent& ana_event) { |
1330 if (ana_event.config.uplink_packet_loss_fraction) | 1350 if (ana_event.config.uplink_packet_loss_fraction) |
1331 return rtc::Optional<float>(static_cast<float>( | 1351 return rtc::Optional<float>(static_cast<float>( |
1332 *ana_event.config.uplink_packet_loss_fraction)); | 1352 *ana_event.config.uplink_packet_loss_fraction)); |
1333 return rtc::Optional<float>(); | 1353 return rtc::Optional<float>(); |
1334 }); | 1354 }); |
1335 plot->series_list_.back().label = "Audio encoder uplink packet loss fraction"; | 1355 plot->series_list_.back().label = "Audio encoder uplink packet loss fraction"; |
1336 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1356 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1337 plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, | 1357 plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, |
1338 kTopMargin); | 1358 kTopMargin); |
1339 plot->SetTitle("Reported audio encoder lost packets"); | 1359 plot->SetTitle("Reported audio encoder lost packets"); |
1340 } | 1360 } |
1341 | 1361 |
1342 void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { | 1362 void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { |
1343 FillAudioEncoderTimeSeries( | 1363 plot->series_list_.push_back(TimeSeries()); |
1344 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { | 1364 plot->series_list_.back().style = LINE_DOT_GRAPH; |
| 1365 Process<Pointwise<AudioNetworkAdaptationEvent, float>>( |
| 1366 audio_network_adaptation_events_, begin_time_, &plot->series_list_.back(), |
| 1367 [&](const AudioNetworkAdaptationEvent& ana_event) { |
1345 if (ana_event.config.enable_fec) | 1368 if (ana_event.config.enable_fec) |
1346 return rtc::Optional<float>( | 1369 return rtc::Optional<float>( |
1347 static_cast<float>(*ana_event.config.enable_fec)); | 1370 static_cast<float>(*ana_event.config.enable_fec)); |
1348 return rtc::Optional<float>(); | 1371 return rtc::Optional<float>(); |
1349 }); | 1372 }); |
1350 plot->series_list_.back().label = "Audio encoder FEC"; | 1373 plot->series_list_.back().label = "Audio encoder FEC"; |
1351 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1374 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1352 plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); | 1375 plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); |
1353 plot->SetTitle("Reported audio encoder FEC"); | 1376 plot->SetTitle("Reported audio encoder FEC"); |
1354 } | 1377 } |
1355 | 1378 |
1356 void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { | 1379 void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { |
1357 FillAudioEncoderTimeSeries( | 1380 plot->series_list_.push_back(TimeSeries()); |
1358 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { | 1381 plot->series_list_.back().style = LINE_DOT_GRAPH; |
| 1382 Process<Pointwise<AudioNetworkAdaptationEvent, float>>( |
| 1383 audio_network_adaptation_events_, begin_time_, &plot->series_list_.back(), |
| 1384 [&](const AudioNetworkAdaptationEvent& ana_event) { |
1359 if (ana_event.config.enable_dtx) | 1385 if (ana_event.config.enable_dtx) |
1360 return rtc::Optional<float>( | 1386 return rtc::Optional<float>( |
1361 static_cast<float>(*ana_event.config.enable_dtx)); | 1387 static_cast<float>(*ana_event.config.enable_dtx)); |
1362 return rtc::Optional<float>(); | 1388 return rtc::Optional<float>(); |
1363 }); | 1389 }); |
1364 plot->series_list_.back().label = "Audio encoder DTX"; | 1390 plot->series_list_.back().label = "Audio encoder DTX"; |
1365 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1391 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1366 plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); | 1392 plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); |
1367 plot->SetTitle("Reported audio encoder DTX"); | 1393 plot->SetTitle("Reported audio encoder DTX"); |
1368 } | 1394 } |
1369 | 1395 |
1370 void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { | 1396 void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { |
1371 FillAudioEncoderTimeSeries( | 1397 plot->series_list_.push_back(TimeSeries()); |
1372 plot, [&](const AudioNetworkAdaptationEvent& ana_event) { | 1398 plot->series_list_.back().style = LINE_DOT_GRAPH; |
| 1399 Process<Pointwise<AudioNetworkAdaptationEvent, float>>( |
| 1400 audio_network_adaptation_events_, begin_time_, &plot->series_list_.back(), |
| 1401 [&](const AudioNetworkAdaptationEvent& ana_event) { |
1373 if (ana_event.config.num_channels) | 1402 if (ana_event.config.num_channels) |
1374 return rtc::Optional<float>( | 1403 return rtc::Optional<float>( |
1375 static_cast<float>(*ana_event.config.num_channels)); | 1404 static_cast<float>(*ana_event.config.num_channels)); |
1376 return rtc::Optional<float>(); | 1405 return rtc::Optional<float>(); |
1377 }); | 1406 }); |
1378 plot->series_list_.back().label = "Audio encoder number of channels"; | 1407 plot->series_list_.back().label = "Audio encoder number of channels"; |
1379 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1408 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1380 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", | 1409 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", |
1381 kBottomMargin, kTopMargin); | 1410 kBottomMargin, kTopMargin); |
1382 plot->SetTitle("Reported audio encoder number of channels"); | 1411 plot->SetTitle("Reported audio encoder number of channels"); |
1383 } | 1412 } |
1384 } // namespace plotting | 1413 } // namespace plotting |
1385 } // namespace webrtc | 1414 } // namespace webrtc |
OLD | NEW |