OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/tools/event_log_visualizer/analyzer.h" | 11 #include "webrtc/tools/event_log_visualizer/analyzer.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <limits> | 14 #include <limits> |
15 #include <map> | 15 #include <map> |
16 #include <sstream> | 16 #include <sstream> |
17 #include <string> | 17 #include <string> |
18 #include <utility> | 18 #include <utility> |
19 | 19 |
20 #include "webrtc/base/checks.h" | 20 #include "webrtc/base/checks.h" |
21 #include "webrtc/base/format_macros.h" | |
21 #include "webrtc/base/logging.h" | 22 #include "webrtc/base/logging.h" |
22 #include "webrtc/base/rate_statistics.h" | 23 #include "webrtc/base/rate_statistics.h" |
23 #include "webrtc/call/audio_receive_stream.h" | 24 #include "webrtc/call/audio_receive_stream.h" |
24 #include "webrtc/call/audio_send_stream.h" | 25 #include "webrtc/call/audio_send_stream.h" |
25 #include "webrtc/call/call.h" | 26 #include "webrtc/call/call.h" |
26 #include "webrtc/common_types.h" | 27 #include "webrtc/common_types.h" |
28 #include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h" | |
29 #include "webrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h" | |
30 #include "webrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h" | |
31 #include "webrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h" | |
32 #include "webrtc/modules/audio_coding/neteq/tools/neteq_test.h" | |
33 #include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h" | |
27 #include "webrtc/modules/congestion_controller/include/congestion_controller.h" | 34 #include "webrtc/modules/congestion_controller/include/congestion_controller.h" |
28 #include "webrtc/modules/include/module_common_types.h" | 35 #include "webrtc/modules/include/module_common_types.h" |
29 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h" | 36 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h" |
30 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" | 37 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" |
31 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.h" | 38 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/common_header.h" |
32 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" | 39 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" |
33 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.h" | 40 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/sender_report.h" |
34 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" | 41 #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" |
35 #include "webrtc/modules/rtp_rtcp/source/rtp_header_extensions.h" | 42 #include "webrtc/modules/rtp_rtcp/source/rtp_header_extensions.h" |
36 #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h" | 43 #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h" |
(...skipping 1352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1389 static_cast<float>(*ana_event.config.num_channels)); | 1396 static_cast<float>(*ana_event.config.num_channels)); |
1390 return rtc::Optional<float>(); | 1397 return rtc::Optional<float>(); |
1391 }, | 1398 }, |
1392 audio_network_adaptation_events_, begin_time_, &time_series); | 1399 audio_network_adaptation_events_, begin_time_, &time_series); |
1393 plot->AppendTimeSeries(std::move(time_series)); | 1400 plot->AppendTimeSeries(std::move(time_series)); |
1394 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | 1401 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); |
1395 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", | 1402 plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", |
1396 kBottomMargin, kTopMargin); | 1403 kBottomMargin, kTopMargin); |
1397 plot->SetTitle("Reported audio encoder number of channels"); | 1404 plot->SetTitle("Reported audio encoder number of channels"); |
1398 } | 1405 } |
1406 | |
1407 class NetEqStreamInput : public test::NetEqInput { | |
1408 public: | |
1409 // Does not take any ownership, and all pointers must refer to valid objects | |
1410 // that outlive the one constructed. | |
1411 NetEqStreamInput(const std::vector<LoggedRtpPacket>* packet_stream, | |
1412 const std::vector<uint64_t>* output_events_us) | |
1413 : packet_stream_(*packet_stream), | |
1414 packet_stream_it_(packet_stream_.begin()), | |
1415 output_events_us_it_(output_events_us->begin()), | |
1416 output_events_us_end_(output_events_us->end()) { | |
1417 RTC_DCHECK(packet_stream); | |
1418 RTC_DCHECK(output_events_us); | |
1419 } | |
1420 | |
1421 rtc::Optional<int64_t> NextPacketTime() const override { | |
1422 if (packet_stream_it_ == packet_stream_.end()) { | |
1423 return rtc::Optional<int64_t>(); | |
1424 } | |
1425 // Convert from us to ms. | |
1426 return rtc::Optional<int64_t>(packet_stream_it_->timestamp / 1000); | |
1427 } | |
1428 | |
1429 rtc::Optional<int64_t> NextOutputEventTime() const override { | |
1430 if (output_events_us_it_ == output_events_us_end_) { | |
1431 return rtc::Optional<int64_t>(); | |
1432 } | |
1433 // Convert from us to ms. | |
1434 return rtc::Optional<int64_t>( | |
1435 rtc::checked_cast<int64_t>(*output_events_us_it_ / 1000)); | |
1436 } | |
1437 | |
1438 std::unique_ptr<PacketData> PopPacket() override { | |
1439 if (packet_stream_it_ == packet_stream_.end()) { | |
1440 return std::unique_ptr<PacketData>(); | |
1441 } | |
1442 std::unique_ptr<PacketData> packet_data(new PacketData()); | |
1443 packet_data->header = packet_stream_it_->header; | |
1444 // Convert from us to ms. | |
1445 packet_data->time_ms = packet_stream_it_->timestamp / 1000.0; | |
1446 | |
1447 // This is a header-only "dummy" packet. Set the payload to all zeros, with | |
1448 // length according to the virtual length. | |
1449 packet_data->payload.SetSize(packet_stream_it_->total_length); | |
1450 std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0); | |
1451 | |
1452 ++packet_stream_it_; | |
1453 return packet_data; | |
1454 } | |
1455 | |
1456 void AdvanceOutputEvent() override { | |
1457 if (output_events_us_it_ != output_events_us_end_) { | |
1458 ++output_events_us_it_; | |
1459 } | |
1460 } | |
1461 | |
1462 bool ended() const override { | |
1463 return packet_stream_it_ == packet_stream_.end() || | |
1464 output_events_us_it_ == output_events_us_end_; | |
1465 } | |
1466 | |
1467 rtc::Optional<RTPHeader> NextHeader() const override { | |
1468 if (packet_stream_it_ == packet_stream_.end()) { | |
1469 return rtc::Optional<RTPHeader>(); | |
1470 } | |
1471 return rtc::Optional<RTPHeader>(packet_stream_it_->header); | |
1472 } | |
1473 | |
1474 private: | |
1475 const std::vector<LoggedRtpPacket>& packet_stream_; | |
1476 std::vector<LoggedRtpPacket>::const_iterator packet_stream_it_; | |
1477 std::vector<uint64_t>::const_iterator output_events_us_it_; | |
1478 const std::vector<uint64_t>::const_iterator output_events_us_end_; | |
1479 }; | |
1480 | |
1481 namespace { | |
1482 // Creates a vector of all audio output events. | |
1483 void CreateOutputEventVector(const ParsedRtcEventLog& parsed_log, | |
terelius
2017/06/01 12:59:08
Would it make sense to split the audio playouts by
hlundin-webrtc
2017/06/08 09:54:54
Done.
| |
1484 std::vector<uint64_t>* output_events_us) { | |
1485 rtc::Optional<uint32_t> ssrc; | |
1486 for (size_t i = 0; i < parsed_log.GetNumberOfEvents(); ++i) { | |
1487 if (parsed_log.GetEventType(i) == ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT) { | |
1488 uint32_t this_ssrc; | |
1489 parsed_log.GetAudioPlayout(i, &this_ssrc); | |
1490 if (!ssrc || *ssrc == 0) { | |
1491 ssrc = rtc::Optional<uint32_t>(this_ssrc); | |
1492 } else { | |
1493 RTC_DCHECK_EQ(this_ssrc, *ssrc) | |
1494 << "Audio output events from multiple SSRCs"; | |
1495 } | |
1496 output_events_us->push_back(parsed_log.GetTimestamp(i)); | |
1497 } else if (parsed_log.GetEventType(i) == ParsedRtcEventLog::LOG_END) { | |
1498 // End of first part of the log. The logging might restart after this, but | |
1499 // it makes the plot hard to interpret if we include subsequent parts. | |
1500 break; | |
1501 } | |
1502 } | |
1503 } | |
1504 | |
1505 // Creates a NetEq test object and all necessary input and output helpers. Runs | |
1506 // the test and returns the NetEqDelayAnalyzer object that was used to | |
1507 // instrument the test. | |
1508 std::unique_ptr<test::NetEqDelayAnalyzer> CreateNetEqTestAndRun( | |
1509 const std::vector<LoggedRtpPacket>* packet_stream, | |
1510 const std::vector<uint64_t>* output_events_us, | |
1511 const std::string& replacement_file_name, | |
1512 int file_sample_rate_hz) { | |
1513 std::unique_ptr<test::NetEqInput> input( | |
1514 new NetEqStreamInput(packet_stream, output_events_us)); | |
1515 | |
1516 constexpr int kReplacementPt = 127; | |
1517 std::set<uint8_t> cn_types; | |
1518 std::set<uint8_t> forbidden_types; | |
1519 input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt, | |
1520 cn_types, forbidden_types)); | |
1521 | |
1522 NetEq::Config config; | |
1523 config.max_packets_in_buffer = 200; | |
1524 config.enable_fast_accelerate = true; | |
1525 | |
1526 std::unique_ptr<test::VoidAudioSink> output(new test::VoidAudioSink()); | |
1527 | |
1528 test::NetEqTest::DecoderMap codecs; | |
1529 | |
1530 // Create a "replacement decoder" that produces the decoded audio by reading | |
1531 // from a file rather than from the encoded payloads. | |
1532 std::unique_ptr<test::ResampleInputAudioFile> replacement_file( | |
1533 new test::ResampleInputAudioFile(replacement_file_name, | |
1534 file_sample_rate_hz)); | |
1535 replacement_file->set_output_rate_hz(48000); | |
1536 std::unique_ptr<AudioDecoder> replacement_decoder( | |
1537 new test::FakeDecodeFromFile(std::move(replacement_file), 48000, false)); | |
1538 test::NetEqTest::ExtDecoderMap ext_codecs; | |
1539 ext_codecs[kReplacementPt] = {replacement_decoder.get(), | |
1540 NetEqDecoder::kDecoderArbitrary, | |
1541 "replacement codec"}; | |
1542 | |
1543 std::unique_ptr<test::NetEqDelayAnalyzer> delay_cb( | |
1544 new test::NetEqDelayAnalyzer); | |
1545 test::DefaultNetEqTestErrorCallback error_cb; | |
1546 test::NetEqTest::Callbacks callbacks; | |
1547 callbacks.error_callback = &error_cb; | |
1548 callbacks.post_insert_packet = delay_cb.get(); | |
1549 callbacks.get_audio_callback = delay_cb.get(); | |
1550 | |
1551 test::NetEqTest test(config, codecs, ext_codecs, std::move(input), | |
1552 std::move(output), callbacks); | |
1553 test.Run(); | |
1554 return delay_cb; | |
1555 } | |
1556 } // namespace | |
1557 | |
1558 // Plots the jitter buffer delay profile. This will plot only for the first | |
1559 // incoming audio SSRC. If the stream contains more than one incoming audio | |
1560 // SSRC, all but the first will be ignored. | |
1561 void EventLogAnalyzer::CreateAudioJitterBufferGraph( | |
1562 const std::string& replacement_file_name, | |
1563 int file_sample_rate_hz, | |
1564 Plot* plot) { | |
1565 const auto& incoming_audio_kv = std::find_if( | |
1566 rtp_packets_.begin(), rtp_packets_.end(), | |
1567 [this](std::pair<StreamId, std::vector<LoggedRtpPacket>> kv) { | |
1568 return kv.first.GetDirection() == kIncomingPacket && | |
1569 this->IsAudioSsrc(kv.first); | |
1570 }); | |
1571 if (incoming_audio_kv == rtp_packets_.end()) { | |
1572 // No incoming audio stream found. | |
1573 return; | |
1574 } | |
1575 | |
1576 std::vector<uint64_t> output_events_us; | |
1577 CreateOutputEventVector(parsed_log_, &output_events_us); | |
1578 | |
1579 auto delay_cb = | |
1580 CreateNetEqTestAndRun(&incoming_audio_kv->second, &output_events_us, | |
1581 replacement_file_name, file_sample_rate_hz); | |
1582 | |
1583 std::vector<float> send_times_s; | |
1584 std::vector<float> arrival_delay_ms; | |
1585 std::vector<float> corrected_arrival_delay_ms; | |
1586 std::vector<rtc::Optional<float>> playout_delay_ms; | |
1587 std::vector<rtc::Optional<float>> target_delay_ms; | |
1588 delay_cb->CreateGraphs(&send_times_s, &arrival_delay_ms, | |
1589 &corrected_arrival_delay_ms, &playout_delay_ms, | |
1590 &target_delay_ms); | |
1591 RTC_DCHECK_EQ(send_times_s.size(), arrival_delay_ms.size()); | |
1592 RTC_DCHECK_EQ(send_times_s.size(), corrected_arrival_delay_ms.size()); | |
1593 RTC_DCHECK_EQ(send_times_s.size(), playout_delay_ms.size()); | |
1594 RTC_DCHECK_EQ(send_times_s.size(), target_delay_ms.size()); | |
1595 | |
1596 std::map<StreamId, TimeSeries> time_series_packet_arrival; | |
1597 std::map<StreamId, TimeSeries> time_series_relative_packet_arrival; | |
1598 std::map<StreamId, TimeSeries> time_series_play_time; | |
1599 std::map<StreamId, TimeSeries> time_series_target_time; | |
1600 float min_y_axis = 0.f; | |
1601 float max_y_axis = 0.f; | |
1602 const StreamId stream_id = incoming_audio_kv->first; | |
1603 for (size_t i = 0; i < send_times_s.size(); ++i) { | |
1604 time_series_packet_arrival[stream_id].points.emplace_back( | |
1605 TimeSeriesPoint(send_times_s[i], arrival_delay_ms[i])); | |
1606 time_series_relative_packet_arrival[stream_id].points.emplace_back( | |
1607 TimeSeriesPoint(send_times_s[i], corrected_arrival_delay_ms[i])); | |
1608 min_y_axis = std::min(min_y_axis, corrected_arrival_delay_ms[i]); | |
1609 max_y_axis = std::max(max_y_axis, corrected_arrival_delay_ms[i]); | |
1610 if (playout_delay_ms[i]) { | |
1611 time_series_play_time[stream_id].points.emplace_back( | |
1612 TimeSeriesPoint(send_times_s[i], *playout_delay_ms[i])); | |
1613 min_y_axis = std::min(min_y_axis, *playout_delay_ms[i]); | |
1614 max_y_axis = std::max(max_y_axis, *playout_delay_ms[i]); | |
1615 } | |
1616 if (target_delay_ms[i]) { | |
1617 time_series_target_time[stream_id].points.emplace_back( | |
1618 TimeSeriesPoint(send_times_s[i], *target_delay_ms[i])); | |
1619 min_y_axis = std::min(min_y_axis, *target_delay_ms[i]); | |
1620 max_y_axis = std::max(max_y_axis, *target_delay_ms[i]); | |
1621 } | |
1622 } | |
1623 | |
1624 for (auto& series : time_series_relative_packet_arrival) { | |
1625 series.second.label = "Relative packet arrival delay"; | |
terelius
2017/06/01 12:59:08
If there are multiple series, you might want to la
hlundin-webrtc
2017/06/08 09:54:54
This code will only produce a single stream to plo
| |
1626 series.second.style = LINE_GRAPH; | |
1627 plot->AppendTimeSeries(std::move(series.second)); | |
1628 } | |
1629 for (auto& series : time_series_play_time) { | |
1630 series.second.label = "Playout delay"; | |
1631 series.second.style = LINE_GRAPH; | |
1632 plot->AppendTimeSeries(std::move(series.second)); | |
1633 } | |
1634 for (auto& series : time_series_target_time) { | |
1635 series.second.label = "Target delay"; | |
1636 series.second.style = LINE_DOT_GRAPH; | |
1637 plot->AppendTimeSeries(std::move(series.second)); | |
1638 } | |
1639 | |
1640 plot->SetXAxis(0, call_duration_s_, "Time (s)", kLeftMargin, kRightMargin); | |
1641 plot->SetYAxis(min_y_axis, max_y_axis, "Relative delay (ms)", kBottomMargin, | |
1642 kTopMargin); | |
1643 plot->SetTitle("NetEq timing"); | |
1644 } | |
1399 } // namespace plotting | 1645 } // namespace plotting |
1400 } // namespace webrtc | 1646 } // namespace webrtc |
OLD | NEW |