Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(524)

Side by Side Diff: webrtc/tools/event_log_visualizer/analyzer.cc

Issue 2695613005: Add ana config to event log visualiser (Closed)
Patch Set: Respond to comments Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/tools/event_log_visualizer/analyzer.h ('k') | webrtc/tools/event_log_visualizer/main.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
101 // TODO(ivoc): Remove this once this mapping is stored in the event log for 101 // TODO(ivoc): Remove this once this mapping is stored in the event log for
102 // audio streams. Tracking bug: webrtc:6399 102 // audio streams. Tracking bug: webrtc:6399
103 webrtc::RtpHeaderExtensionMap GetDefaultHeaderExtensionMap() { 103 webrtc::RtpHeaderExtensionMap GetDefaultHeaderExtensionMap() {
104 webrtc::RtpHeaderExtensionMap default_map; 104 webrtc::RtpHeaderExtensionMap default_map;
105 default_map.Register<AudioLevel>(webrtc::RtpExtension::kAudioLevelDefaultId); 105 default_map.Register<AudioLevel>(webrtc::RtpExtension::kAudioLevelDefaultId);
106 default_map.Register<AbsoluteSendTime>( 106 default_map.Register<AbsoluteSendTime>(
107 webrtc::RtpExtension::kAbsSendTimeDefaultId); 107 webrtc::RtpExtension::kAbsSendTimeDefaultId);
108 return default_map; 108 return default_map;
109 } 109 }
110 110
111 void FillAudioEncoderTimeSeries(
112 Plot* plot,
113 const std::vector<AudioNetworkAdaptationEvent>&
114 audio_network_adaptation_events,
115 std::function<void(Plot* plot,
minyue-webrtc 2017/02/15 15:33:44 can we make this function return y and put x= and
michaelt 2017/02/16 07:30:40 Yes
116 const AudioNetworkAdaptationEvent& ana_event)>
117 add_point) {
118 plot->series_list_.push_back(TimeSeries());
119 plot->series_list_.back().style = LINE_DOT_GRAPH;
120 for (auto& ana_event : audio_network_adaptation_events) {
121 add_point(plot, ana_event);
122 }
123 }
124
111 constexpr float kLeftMargin = 0.01f; 125 constexpr float kLeftMargin = 0.01f;
112 constexpr float kRightMargin = 0.02f; 126 constexpr float kRightMargin = 0.02f;
113 constexpr float kBottomMargin = 0.02f; 127 constexpr float kBottomMargin = 0.02f;
114 constexpr float kTopMargin = 0.05f; 128 constexpr float kTopMargin = 0.05f;
115 129
116 class PacketSizeBytes { 130 class PacketSizeBytes {
117 public: 131 public:
118 using DataType = LoggedRtpPacket; 132 using DataType = LoggedRtpPacket;
119 using ResultType = size_t; 133 using ResultType = size_t;
120 size_t operator()(const LoggedRtpPacket& packet) { 134 size_t operator()(const LoggedRtpPacket& packet) {
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 case ParsedRtcEventLog::BWE_PACKET_LOSS_EVENT: { 452 case ParsedRtcEventLog::BWE_PACKET_LOSS_EVENT: {
439 BwePacketLossEvent bwe_update; 453 BwePacketLossEvent bwe_update;
440 bwe_update.timestamp = parsed_log_.GetTimestamp(i); 454 bwe_update.timestamp = parsed_log_.GetTimestamp(i);
441 parsed_log_.GetBwePacketLossEvent(i, &bwe_update.new_bitrate, 455 parsed_log_.GetBwePacketLossEvent(i, &bwe_update.new_bitrate,
442 &bwe_update.fraction_loss, 456 &bwe_update.fraction_loss,
443 &bwe_update.expected_packets); 457 &bwe_update.expected_packets);
444 bwe_loss_updates_.push_back(bwe_update); 458 bwe_loss_updates_.push_back(bwe_update);
445 break; 459 break;
446 } 460 }
447 case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: { 461 case ParsedRtcEventLog::AUDIO_NETWORK_ADAPTATION_EVENT: {
462 AudioNetworkAdaptationEvent ana_event;
463 ana_event.timestamp = parsed_log_.GetTimestamp(i);
464 parsed_log_.GetAudioNetworkAdaptation(i, &ana_event.config);
465 audio_network_adaptation_events_.push_back(ana_event);
448 break; 466 break;
449 } 467 }
450 case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: { 468 case ParsedRtcEventLog::BWE_PACKET_DELAY_EVENT: {
451 break; 469 break;
452 } 470 }
453 case ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT: { 471 case ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT: {
454 break; 472 break;
455 } 473 }
456 case ParsedRtcEventLog::UNKNOWN_EVENT: { 474 case ParsedRtcEventLog::UNKNOWN_EVENT: {
457 break; 475 break;
(...skipping 809 matching lines...) Expand 10 before | Expand all | Expand 10 after
1267 } 1285 }
1268 plot->series_list_.push_back(std::move(timestamp_data)); 1286 plot->series_list_.push_back(std::move(timestamp_data));
1269 } 1287 }
1270 } 1288 }
1271 } 1289 }
1272 1290
1273 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); 1291 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1274 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin); 1292 plot->SetSuggestedYAxis(0, 1, "Timestamp (90khz)", kBottomMargin, kTopMargin);
1275 plot->SetTitle("Timestamps"); 1293 plot->SetTitle("Timestamps");
1276 } 1294 }
1295
1296 void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) {
1297 FillAudioEncoderTimeSeries(
1298 plot, audio_network_adaptation_events_,
1299 [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) {
1300 if (ana_event.config.bitrate_bps) {
1301 float x =
1302 static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
1303 float y = static_cast<float>(*ana_event.config.bitrate_bps);
1304 plot->series_list_.back().points.emplace_back(x, y);
1305 }
1306 });
1307 plot->series_list_.back().label = "Audio encoder target bitrate";
1308 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1309 plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
1310 plot->SetTitle("Reported audio encoder target bitrate");
1311 }
1312
1313 void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
1314 FillAudioEncoderTimeSeries(
1315 plot, audio_network_adaptation_events_,
1316 [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) {
1317 if (ana_event.config.frame_length_ms) {
1318 float x =
1319 static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
1320 float y = static_cast<float>(*ana_event.config.frame_length_ms);
1321 plot->series_list_.back().points.emplace_back(x, y);
1322 }
1323 });
1324 plot->series_list_.back().label = "Audio encoder frame length";
1325 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1326 plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
1327 plot->SetTitle("Reported audio encoder frame length");
1328 }
1329
1330 void EventLogAnalyzer::CreateAudioEncoderUplinkPacketLossFractionGraph(
1331 Plot* plot) {
1332 FillAudioEncoderTimeSeries(
1333 plot, audio_network_adaptation_events_,
1334 [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) {
1335 if (ana_event.config.uplink_packet_loss_fraction) {
1336 float x =
1337 static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
1338 float y =
1339 static_cast<float>(*ana_event.config.uplink_packet_loss_fraction);
1340 plot->series_list_.back().points.emplace_back(x, y);
1341 }
1342 });
1343 plot->series_list_.back().label = "Audio encoder uplink packet loss fraction";
1344 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1345 plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
1346 kTopMargin);
1347 plot->SetTitle("Reported audio encoder lost packets");
1348 }
1349
1350 void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) {
1351 FillAudioEncoderTimeSeries(
1352 plot, audio_network_adaptation_events_,
1353 [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) {
1354 if (ana_event.config.enable_fec) {
1355 float x =
1356 static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
1357 float y = static_cast<float>(*ana_event.config.enable_fec);
1358 plot->series_list_.back().points.emplace_back(x, y);
1359 }
1360 });
1361 plot->series_list_.back().label = "Audio encoder FEC";
1362 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1363 plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
1364 plot->SetTitle("Reported audio encoder FEC");
1365 }
1366
1367 void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) {
1368 FillAudioEncoderTimeSeries(
1369 plot, audio_network_adaptation_events_,
1370 [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) {
1371 if (ana_event.config.enable_dtx) {
1372 float x =
1373 static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
1374 float y = static_cast<float>(*ana_event.config.enable_dtx);
1375 plot->series_list_.back().points.emplace_back(x, y);
1376 }
1377 });
1378 plot->series_list_.back().label = "Audio encoder DTX";
1379 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1380 plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
1381 plot->SetTitle("Reported audio encoder DTX");
1382 }
1383
1384 void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
1385 FillAudioEncoderTimeSeries(
1386 plot, audio_network_adaptation_events_,
1387 [&](Plot* plot, const AudioNetworkAdaptationEvent& ana_event) {
1388 if (ana_event.config.num_channels) {
1389 float x =
1390 static_cast<float>(ana_event.timestamp - begin_time_) / 1000000;
1391 float y = static_cast<float>(*ana_event.config.num_channels);
1392 plot->series_list_.back().points.emplace_back(x, y);
1393 }
1394 });
1395 plot->series_list_.back().label = "Audio encoder number of channels";
1396 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin);
1397 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
1398 kBottomMargin, kTopMargin);
1399 plot->SetTitle("Reported audio encoder number of channels");
1400 }
1277 } // namespace plotting 1401 } // namespace plotting
1278 } // namespace webrtc 1402 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/tools/event_log_visualizer/analyzer.h ('k') | webrtc/tools/event_log_visualizer/main.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698