Index: webrtc/tools/event_log_visualizer/analyzer.cc |
diff --git a/webrtc/tools/event_log_visualizer/analyzer.cc b/webrtc/tools/event_log_visualizer/analyzer.cc |
index 90eb5b3f12e35d49574962f3812fff771570d958..adbf851a073e6c2722528891798e562cd10ef021 100644 |
--- a/webrtc/tools/event_log_visualizer/analyzer.cc |
+++ b/webrtc/tools/event_log_visualizer/analyzer.cc |
@@ -445,6 +445,10 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log) |
break; |
} |
case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: { |
+ AudioNetworkAdaptationEvent ana_event; |
+ ana_event.timestamp = parsed_log_.GetTimestamp(i); |
+ parsed_log_.GetAudioNetworkAdaptation(i, &ana_event.config); |
+ audio_network_adaptation_events_.push_back(ana_event); |
break; |
} |
case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: { |
@@ -1274,5 +1278,113 @@ void EventLogAnalyzer::CreateTimestampGraph(Plot* plot) { |
plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); |
plot->SetTitle("Timestamps"); |
} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { |
+ plot->series_list_.push_back(TimeSeries()); |
minyue-webrtc
2017/02/15 09:00:22
Sorry, I said template but may be a utility functi
michaelt
2017/02/15 10:30:59
tried a solution with lamdas.
|
+ for (auto& ana_event : audio_network_adaptation_events_) { |
+ if (ana_event.config.bitrate_bps) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.bitrate_bps); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ } |
+ plot->series_list_.back().label = "Audio encoder target bitrate"; |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder target bitrate"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ for (auto& ana_event : audio_network_adaptation_events_) { |
+ if (ana_event.config.frame_length_ms) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.frame_length_ms); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ } |
+ plot->series_list_.back().label = "Audio encoder frame length"; |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder frame length"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph( |
+ Plot* plot) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ for (auto& ana_event : audio_network_adaptation_events_) { |
+ if (ana_event.config.uplink_packet_loss_fraction) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = |
+ static_cast<float>(*ana_event.config.uplink_packet_loss_fraction); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ } |
+ plot->series_list_.back().label = "Audio encoder uplink packet loss fraction"; |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, |
+ kTopMargin); |
+ plot->SetTitle("Reported audio encoder lost packets"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ for (auto& ana_event : audio_network_adaptation_events_) { |
+ if (ana_event.config.enable_fec) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.enable_fec); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ } |
+ plot->series_list_.back().label = "Audio encoder FEC"; |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, |
+ kTopMargin); |
+ plot->SetTitle("Reported audio encoder FEC"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ for (auto& ana_event : audio_network_adaptation_events_) { |
+ if (ana_event.config.enable_dtx) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.enable_dtx); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ } |
+ plot->series_list_.back().label = "Audio encoder DTX"; |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, |
+ kTopMargin); |
+ plot->SetTitle("Reported audio encoder DTX"); |
+} |
+ |
+void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { |
+ plot->series_list_.push_back(TimeSeries()); |
+ for (auto& ana_event : audio_network_adaptation_events_) { |
+ if (ana_event.config.num_channels) { |
+ float x = static_cast<float>(ana_event.timestamp - begin_time_) / 1000000; |
+ float y = static_cast<float>(*ana_event.config.num_channels); |
+ plot->series_list_.back().points.emplace_back(x, y); |
+ } |
+ } |
+ plot->series_list_.back().label = "Audio encoder number of channels"; |
+ plot->series_list_.back().style = LINE_DOT_GRAPH; |
+ |
+ plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
+ plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", |
+ kBottomMargin, kTopMargin); |
+ plot->SetTitle("Reported audio encoder number of channels"); |
+} |
} // namespace plotting |
} // namespace webrtc |