Index: webrtc/tools/event_log_visualizer/analyzer.cc |
diff --git a/webrtc/tools/event_log_visualizer/analyzer.cc b/webrtc/tools/event_log_visualizer/analyzer.cc |
index 90eb5b3f12e35d49574962f3812fff771570d958..a09d6adae94d1a18554c108d44b7b0d1798537b8 100644 |
--- a/webrtc/tools/event_log_visualizer/analyzer.cc |
+++ b/webrtc/tools/event_log_visualizer/analyzer.cc |
@@ -108,6 +108,24 @@ webrtc::RtpHeaderExtensionMap GetDefaultHeaderExtensionMap() { |
return default_map; |
} |
+void FillAudioEncoderTimeSeries( |
minyue-webrtc
2017/02/16 09:54:56
I don't mind making this a member method to avoid
terelius
2017/02/16 10:02:27
The function "Pointwise" is quite similar to this
michaelt
2017/02/16 10:24:22
The rtc::Optional in "FillAudioEncoderTimeSeries"
michaelt
2017/02/16 10:29:42
On the other hand it would be quite a work, to cha
|
+ Plot* plot, |
+ const std::vector<AudioNetworkAdaptationEvent>& |
+ audio_network_adaptation_events, |
+ uint64_t begin_time, |
+ std::function<rtc::Optional<float>( |
+ const AudioNetworkAdaptationEvent& ana_event)> get_y) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ for (auto& ana_event : audio_network_adaptation_events) { |
+ rtc::Optional<float> y = get_y(ana_event); |
+ if (y) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time) / 1000000; |
+ plot->series_list_.back().points.emplace_back(x, *y); |
+ } |
+ } |
+} |
+ |
constexpr float kLeftMargin = 0.01f; |
constexpr float kRightMargin = 0.02f; |
constexpr float kBottomMargin = 0.02f; |
@@ -445,6 +463,10 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log) |
break; |
} |
case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: { |
+ AudioNetworkAdaptationEvent ana_event; |
+ ana_event.timestamp = parsed_log_.GetTimestamp(i); |
+ parsed_log_.GetAudioNetworkAdaptation(i, &ana_event.config); |
+ audio_network_adaptation_events_.push_back(ana_event); |
break; |
} |
case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: { |
@@ -1274,5 +1296,98 @@ void EventLogAnalyzer::CreateTimestampGraph(Plot* plot) { |
plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); |
plot->SetTitle("Timestamps"); |
} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, begin_time_, |
+ [](const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.bitrate_bps) |
+ return rtc::Optional<float>( |
+ static_cast<float>(*ana_event.config.bitrate_bps)); |
+ return rtc::Optional<float>(); |
+ }); |
+ plot->series_list_.back().label = "Audio encoder target bitrate"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder target bitrate"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, begin_time_, |
+ [](const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.frame_length_ms) |
+ return rtc::Optional<float>( |
+ static_cast<float>(*ana_event.config.frame_length_ms)); |
+ return rtc::Optional<float>(); |
+ }); |
+ plot->series_list_.back().label = "Audio encoder frame length"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder frame length"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph( |
+ Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, begin_time_, |
+ [&](const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.uplink_packet_loss_fraction) |
+ return rtc::Optional<float>(static_cast<float>( |
+ *ana_event.config.uplink_packet_loss_fraction)); |
+ return rtc::Optional<float>(); |
+ }); |
+ plot->series_list_.back().label = "Audio encoder uplink packet loss fraction"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, |
+ kTopMargin); |
+ plot->SetTitle("Reported audio encoder lost packets"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, begin_time_, |
+ [&](const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.enable_fec) |
+ return rtc::Optional<float>( |
+ static_cast<float>(*ana_event.config.enable_fec)); |
+ return rtc::Optional<float>(); |
+ }); |
+ plot->series_list_.back().label = "Audio encoder FEC"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder FEC"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, begin_time_, |
+ [&](const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.enable_dtx) |
+ return rtc::Optional<float>( |
+ static_cast<float>(*ana_event.config.enable_dtx)); |
+ return rtc::Optional<float>(); |
+ }); |
+ plot->series_list_.back().label = "Audio encoder DTX"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder DTX"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, begin_time_, |
+ [&](const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.num_channels) |
+ return rtc::Optional<float>( |
+ static_cast<float>(*ana_event.config.num_channels)); |
+ return rtc::Optional<float>(); |
+ }); |
+ plot->series_list_.back().label = "Audio encoder number of channels"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", |
+ kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder number of channels"); |
+} |
} // namespace plotting |
} // namespace webrtc |