Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1421)

Side by Side Diff: webrtc/tools/event_log_visualizer/analyzer.cc

Issue 2695613005: Add ana config to event log visualiser (Closed)
Patch Set: Respond to comments Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/tools/event_log_visualizer/analyzer.h ('k') | webrtc/tools/event_log_visualizer/main.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
101 // TODO(ivoc): Remove this once this mapping is stored in the event log for 101 // TODO(ivoc): Remove this once this mapping is stored in the event log for
102 // audio streams. Tracking bug: webrtc:6399 102 // audio streams. Tracking bug: webrtc:6399
103 webrtc::RtpHeaderExtensionMap GetDefaultHeaderExtensionMap() { 103 webrtc::RtpHeaderExtensionMap GetDefaultHeaderExtensionMap() {
104 webrtc::RtpHeaderExtensionMap default_map; 104 webrtc::RtpHeaderExtensionMap default_map;
105 default_map.Register<AudioLevel>(webrtc::RtpExtension::kAudioLevelDefaultId); 105 default_map.Register<AudioLevel>(webrtc::RtpExtension::kAudioLevelDefaultId);
106 default_map.Register<AbsoluteSendTime>( 106 default_map.Register<AbsoluteSendTime>(
107 webrtc::RtpExtension::kAbsSendTimeDefaultId); 107 webrtc::RtpExtension::kAbsSendTimeDefaultId);
108 return default_map; 108 return default_map;
109 } 109 }
110 110
111 void FillAudioEncoderTimeSeries(
minyue-webrtc 2017/02/16 09:54:56 I don't mind making this a member method to avoid
terelius 2017/02/16 10:02:27 The function "Pointwise" is quite similar to this
michaelt 2017/02/16 10:24:22 The rtc::Optional in "FillAudioEncoderTimeSeries"
michaelt 2017/02/16 10:29:42 On the other hand it would be quite a work, to cha
112 Plot* plot,
113 const std::vector<AudioNetworkAdaptationEvent>&
114 audio_network_adaptation_events,
115 uint64_t begin_time,
116 std::function<rtc::Optional<float>(
117 const AudioNetworkAdaptationEvent& ana_event)> get_y) {
118 plot->series_list_.push_back(TimeSeries());
119 plot->series_list_.back().style = LINE_DOT_GRAPH;
120 for (auto& ana_event : audio_network_adaptation_events) {
121 rtc::Optional<float> y = get_y(ana_event);
122 if (y) {
123 float x = static_cast<float>(ana_event.timestamp - begin_time) / 1000000;
124 plot->series_list_.back().points.emplace_back(x, *y);
125 }
126 }
127 }
128
111 constexpr float kLeftMargin = 0.01f; 129 constexpr float kLeftMargin = 0.01f;
112 constexpr float kRightMargin = 0.02f; 130 constexpr float kRightMargin = 0.02f;
113 constexpr float kBottomMargin = 0.02f; 131 constexpr float kBottomMargin = 0.02f;
114 constexpr float kTopMargin = 0.05f; 132 constexpr float kTopMargin = 0.05f;
115 133
116 class PacketSizeBytes { 134 class PacketSizeBytes {
117 public: 135 public:
118 using DataType = LoggedRtpPacket; 136 using DataType = LoggedRtpPacket;
119 using ResultType = size_t; 137 using ResultType = size_t;
120 size_t operator()(const LoggedRtpPacket& packet) { 138 size_t operator()(const LoggedRtpPacket& packet) {
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 case ParsedRtcEventLog::BWE_PACKET_LOSS_EVENT: { 456 case ParsedRtcEventLog::BWE_PACKET_LOSS_EVENT: {
439 BwePacketLossEvent bwe_update; 457 BwePacketLossEvent bwe_update;
440 bwe_update.timestamp = parsed_log_.GetTimestamp(i); 458 bwe_update.timestamp = parsed_log_.GetTimestamp(i);
441 parsed_log_.GetBwePacketLossEvent(i, &bwe_update.new_bitrate, 459 parsed_log_.GetBwePacketLossEvent(i, &bwe_update.new_bitrate,
442 &bwe_update.fraction_loss, 460 &bwe_update.fraction_loss,
443 &bwe_update.expected_packets); 461 &bwe_update.expected_packets);
444 bwe_loss_updates_.push_back(bwe_update); 462 bwe_loss_updates_.push_back(bwe_update);
445 break; 463 break;
446 } 464 }
447 case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: { 465 case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: {
466 AudioNetworkAdaptationEvent ana_event;
467 ana_event.timestamp = parsed_log_.GetTimestamp(i);
468 parsed_log_.GetAudioNetworkAdaptation(i, &ana_event.config);
469 audio_network_adaptation_events_.push_back(ana_event);
448 break; 470 break;
449 } 471 }
450 case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: { 472 case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: {
451 break; 473 break;
452 } 474 }
453 case ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT: { 475 case ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT: {
454 break; 476 break;
455 } 477 }
456 case ParsedRtcEventLog::UNKNOWN_EVENT: { 478 case ParsedRtcEventLog::UNKNOWN_EVENT: {
457 break; 479 break;
(...skipping 809 matching lines...) Expand 10 before | Expand all | Expand 10 after
1267 } 1289 }
1268 plot->series_list_.push_back(std::move(timestamp_data)); 1290 plot->series_list_.push_back(std::move(timestamp_data));
1269 } 1291 }
1270 } 1292 }
1271 } 1293 }
1272 1294
1273 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1295 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1274 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); 1296 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin);
1275 plot->SetTitle("Timestamps"); 1297 plot->SetTitle("Timestamps");
1276 } 1298 }
1299
1300 void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) {
1301 FillAudioEncoderTimeSeries(
1302 plot, audio_network_adaptation_events_, begin_time_,
1303 [](const AudioNetworkAdaptationEvent& ana_event) {
1304 if (ana_event.config.bitrate_bps)
1305 return rtc::Optional<float>(
1306 static_cast<float>(*ana_event.config.bitrate_bps));
1307 return rtc::Optional<float>();
1308 });
1309 plot->series_list_.back().label = "Audio encoder target bitrate";
1310 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1311 plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
1312 plot->SetTitle("Reported audio encoder target bitrate");
1313 }
1314
1315 void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
1316 FillAudioEncoderTimeSeries(
1317 plot, audio_network_adaptation_events_, begin_time_,
1318 [](const AudioNetworkAdaptationEvent& ana_event) {
1319 if (ana_event.config.frame_length_ms)
1320 return rtc::Optional<float>(
1321 static_cast<float>(*ana_event.config.frame_length_ms));
1322 return rtc::Optional<float>();
1323 });
1324 plot->series_list_.back().label = "Audio encoder frame length";
1325 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1326 plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
1327 plot->SetTitle("Reported audio encoder frame length");
1328 }
1329
1330 void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph(
1331 Plot* plot) {
1332 FillAudioEncoderTimeSeries(
1333 plot, audio_network_adaptation_events_, begin_time_,
1334 [&](const AudioNetworkAdaptationEvent& ana_event) {
1335 if (ana_event.config.uplink_packet_loss_fraction)
1336 return rtc::Optional<float>(static_cast<float>(
1337 *ana_event.config.uplink_packet_loss_fraction));
1338 return rtc::Optional<float>();
1339 });
1340 plot->series_list_.back().label = "Audio encoder uplink packet loss fraction";
1341 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1342 plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
1343 kTopMargin);
1344 plot->SetTitle("Reported audio encoder lost packets");
1345 }
1346
1347 void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) {
1348 FillAudioEncoderTimeSeries(
1349 plot, audio_network_adaptation_events_, begin_time_,
1350 [&](const AudioNetworkAdaptationEvent& ana_event) {
1351 if (ana_event.config.enable_fec)
1352 return rtc::Optional<float>(
1353 static_cast<float>(*ana_event.config.enable_fec));
1354 return rtc::Optional<float>();
1355 });
1356 plot->series_list_.back().label = "Audio encoder FEC";
1357 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1358 plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
1359 plot->SetTitle("Reported audio encoder FEC");
1360 }
1361
1362 void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) {
1363 FillAudioEncoderTimeSeries(
1364 plot, audio_network_adaptation_events_, begin_time_,
1365 [&](const AudioNetworkAdaptationEvent& ana_event) {
1366 if (ana_event.config.enable_dtx)
1367 return rtc::Optional<float>(
1368 static_cast<float>(*ana_event.config.enable_dtx));
1369 return rtc::Optional<float>();
1370 });
1371 plot->series_list_.back().label = "Audio encoder DTX";
1372 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1373 plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
1374 plot->SetTitle("Reported audio encoder DTX");
1375 }
1376
1377 void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
1378 FillAudioEncoderTimeSeries(
1379 plot, audio_network_adaptation_events_, begin_time_,
1380 [&](const AudioNetworkAdaptationEvent& ana_event) {
1381 if (ana_event.config.num_channels)
1382 return rtc::Optional<float>(
1383 static_cast<float>(*ana_event.config.num_channels));
1384 return rtc::Optional<float>();
1385 });
1386 plot->series_list_.back().label = "Audio encoder number of channels";
1387 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1388 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
1389 kBottomMargin, kTopMargin);
1390 plot->SetTitle("Reported audio encoder number of channels");
1391 }
1277 } // namespace plotting 1392 } // namespace plotting
1278 } // namespace webrtc 1393 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/tools/event_log_visualizer/analyzer.h ('k') | webrtc/tools/event_log_visualizer/main.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698