Index: webrtc/tools/event_log_visualizer/analyzer.cc |
diff --git a/webrtc/tools/event_log_visualizer/analyzer.cc b/webrtc/tools/event_log_visualizer/analyzer.cc |
index 90eb5b3f12e35d49574962f3812fff771570d958..d3cd67e5710c493bd416299e3d728f1eae289773 100644 |
--- a/webrtc/tools/event_log_visualizer/analyzer.cc |
+++ b/webrtc/tools/event_log_visualizer/analyzer.cc |
@@ -108,6 +108,20 @@ webrtc::RtpHeaderExtensionMap GetDefaultHeaderExtensionMap() { |
return default_map; |
} |
+void FillAudioEncoderTimeSeries( |
+ Plot* plot, |
+ const std::vector<AudioNetworkAdaptationEvent>& |
+ audio_network_adaptation_events, |
+ std::function<void(Plot* plot, |
minyue-webrtc
2017/02/15 15:33:44
can we make this function return y and put
x=
and
michaelt
2017/02/16 07:30:40
Yes
|
+ const AudioNetworkAdaptationEvent& ana_event)> |
+ add_point) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ for (auto& ana_event : audio_network_adaptation_events) { |
+ add_point(plot, ana_event); |
+ } |
+} |
+ |
constexpr float kLeftMargin = 0.01f; |
constexpr float kRightMargin = 0.02f; |
constexpr float kBottomMargin = 0.02f; |
@@ -445,6 +459,10 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log) |
break; |
} |
case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: { |
+ AudioNetworkAdaptationEvent ana_event; |
+ ana_event.timestamp = parsed_log_.GetTimestamp(i); |
+ parsed_log_.GetAudioNetworkAdaptation(i, &ana_event.config); |
+ audio_network_adaptation_events_.push_back(ana_event); |
break; |
} |
case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: { |
@@ -1274,5 +1292,111 @@ void EventLogAnalyzer::CreateTimestampGraph(Plot* plot) { |
plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); |
plot->SetTitle("Timestamps"); |
} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, |
+ [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.bitrate_bps) { |
+ float x = |
+ static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.bitrate_bps); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ }); |
+ plot->series_list_.back().label = "Audio encoder target bitrate"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder target bitrate"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, |
+ [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.frame_length_ms) { |
+ float x = |
+ static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.frame_length_ms); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ }); |
+ plot->series_list_.back().label = "Audio encoder frame length"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder frame length"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph( |
+ Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, |
+ [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.uplink_packet_loss_fraction) { |
+ float x = |
+ static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = |
+ static_cast<float>(*ana_event.config.uplink_packet_loss_fraction); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ }); |
+ plot->series_list_.back().label = "Audio encoder uplink packet loss fraction"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, |
+ kTopMargin); |
+ plot->SetTitle("Reported audio encoder lost packets"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, |
+ [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.enable_fec) { |
+ float x = |
+ static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.enable_fec); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ }); |
+ plot->series_list_.back().label = "Audio encoder FEC"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder FEC"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, |
+ [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.enable_dtx) { |
+ float x = |
+ static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.enable_dtx); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ }); |
+ plot->series_list_.back().label = "Audio encoder DTX"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder DTX"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { |
+ FillAudioEncoderTimeSeries( |
+ plot, audio_network_adaptation_events_, |
+ [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) { |
+ if (ana_event.config.num_channels) { |
+ float x = |
+ static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.num_channels); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ }); |
+ plot->series_list_.back().label = "Audio encoder number of channels"; |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", |
+ kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder number of channels"); |
+} |
} // namespace plotting |
} // namespace webrtc |