Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(629)

Side by Side Diff: webrtc/modules/audio_coding/neteq/neteq_unittest.cc

Issue 2809153002: Change from WebRtcRTPHeader to RTPHeader in NetEq tests and tools (Closed)
Patch Set: Fixing neteq_rtp_fuzzer Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_coding/neteq/include/neteq.h" 11 #include "webrtc/modules/audio_coding/neteq/include/neteq.h"
12 12
13 #include <math.h> 13 #include <math.h>
14 #include <stdlib.h> 14 #include <stdlib.h>
15 #include <string.h> // memset 15 #include <string.h> // memset
16 16
17 #include <algorithm> 17 #include <algorithm>
18 #include <memory> 18 #include <memory>
19 #include <set> 19 #include <set>
20 #include <string> 20 #include <string>
21 #include <vector> 21 #include <vector>
22 22
23 #include "gflags/gflags.h" 23 #include "gflags/gflags.h"
24 #include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h" 24 #include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
25 #include "webrtc/base/ignore_wundef.h" 25 #include "webrtc/base/ignore_wundef.h"
26 #include "webrtc/base/protobuf_utils.h"
26 #include "webrtc/base/sha1digest.h" 27 #include "webrtc/base/sha1digest.h"
27 #include "webrtc/base/stringencode.h" 28 #include "webrtc/base/stringencode.h"
28 #include "webrtc/base/protobuf_utils.h" 29 #include "webrtc/common_types.h"
29 #include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h" 30 #include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
30 #include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h" 31 #include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
31 #include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h" 32 #include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
32 #include "webrtc/modules/include/module_common_types.h" 33 #include "webrtc/modules/include/module_common_types.h"
33 #include "webrtc/test/gtest.h" 34 #include "webrtc/test/gtest.h"
34 #include "webrtc/test/testsupport/fileutils.h" 35 #include "webrtc/test/testsupport/fileutils.h"
35 #include "webrtc/typedefs.h" 36 #include "webrtc/typedefs.h"
36 37
37 #ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT 38 #ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
38 RTC_PUSH_IGNORING_WUNDEF() 39 RTC_PUSH_IGNORING_WUNDEF()
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
243 void Process(); 244 void Process();
244 245
245 void DecodeAndCompare(const std::string& rtp_file, 246 void DecodeAndCompare(const std::string& rtp_file,
246 const std::string& output_checksum, 247 const std::string& output_checksum,
247 const std::string& network_stats_checksum, 248 const std::string& network_stats_checksum,
248 const std::string& rtcp_stats_checksum, 249 const std::string& rtcp_stats_checksum,
249 bool gen_ref); 250 bool gen_ref);
250 251
251 static void PopulateRtpInfo(int frame_index, 252 static void PopulateRtpInfo(int frame_index,
252 int timestamp, 253 int timestamp,
253 WebRtcRTPHeader* rtp_info); 254 RTPHeader* rtp_info);
254 static void PopulateCng(int frame_index, 255 static void PopulateCng(int frame_index,
255 int timestamp, 256 int timestamp,
256 WebRtcRTPHeader* rtp_info, 257 RTPHeader* rtp_info,
257 uint8_t* payload, 258 uint8_t* payload,
258 size_t* payload_len); 259 size_t* payload_len);
259 260
260 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp, 261 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
261 const std::set<uint16_t>& drop_seq_numbers, 262 const std::set<uint16_t>& drop_seq_numbers,
262 bool expect_seq_no_wrap, bool expect_timestamp_wrap); 263 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
263 264
264 void LongCngWithClockDrift(double drift_factor, 265 void LongCngWithClockDrift(double drift_factor,
265 double network_freeze_ms, 266 double network_freeze_ms,
266 bool pull_audio_during_freeze, 267 bool pull_audio_during_freeze,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
311 } 312 }
312 313
313 void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) { 314 void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
314 rtp_source_.reset(test::RtpFileSource::Create(rtp_file)); 315 rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
315 } 316 }
316 317
317 void NetEqDecodingTest::Process() { 318 void NetEqDecodingTest::Process() {
318 // Check if time to receive. 319 // Check if time to receive.
319 while (packet_ && sim_clock_ >= packet_->time_ms()) { 320 while (packet_ && sim_clock_ >= packet_->time_ms()) {
320 if (packet_->payload_length_bytes() > 0) { 321 if (packet_->payload_length_bytes() > 0) {
321 WebRtcRTPHeader rtp_header;
322 packet_->ConvertHeader(&rtp_header);
323 #ifndef WEBRTC_CODEC_ISAC 322 #ifndef WEBRTC_CODEC_ISAC
324 // Ignore payload type 104 (iSAC-swb) if ISAC is not supported. 323 // Ignore payload type 104 (iSAC-swb) if ISAC is not supported.
325 if (rtp_header.header.payloadType != 104) 324 if (packet_->header().payloadType != 104)
326 #endif 325 #endif
327 ASSERT_EQ(0, 326 ASSERT_EQ(0,
328 neteq_->InsertPacket( 327 neteq_->InsertPacket(
329 rtp_header.header, 328 packet_->header(),
330 rtc::ArrayView<const uint8_t>( 329 rtc::ArrayView<const uint8_t>(
331 packet_->payload(), packet_->payload_length_bytes()), 330 packet_->payload(), packet_->payload_length_bytes()),
332 static_cast<uint32_t>(packet_->time_ms() * 331 static_cast<uint32_t>(packet_->time_ms() *
333 (output_sample_rate_ / 1000)))); 332 (output_sample_rate_ / 1000))));
334 } 333 }
335 // Get next packet. 334 // Get next packet.
336 packet_ = rtp_source_->NextPacket(); 335 packet_ = rtp_source_->NextPacket();
337 } 336 }
338 337
339 // Get audio from NetEq. 338 // Get audio from NetEq.
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
402 SCOPED_TRACE("Check output audio."); 401 SCOPED_TRACE("Check output audio.");
403 output.VerifyChecksum(output_checksum); 402 output.VerifyChecksum(output_checksum);
404 SCOPED_TRACE("Check network stats."); 403 SCOPED_TRACE("Check network stats.");
405 network_stats.VerifyChecksum(network_stats_checksum); 404 network_stats.VerifyChecksum(network_stats_checksum);
406 SCOPED_TRACE("Check rtcp stats."); 405 SCOPED_TRACE("Check rtcp stats.");
407 rtcp_stats.VerifyChecksum(rtcp_stats_checksum); 406 rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
408 } 407 }
409 408
410 void NetEqDecodingTest::PopulateRtpInfo(int frame_index, 409 void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
411 int timestamp, 410 int timestamp,
412 WebRtcRTPHeader* rtp_info) { 411 RTPHeader* rtp_info) {
413 rtp_info->header.sequenceNumber = frame_index; 412 rtp_info->sequenceNumber = frame_index;
414 rtp_info->header.timestamp = timestamp; 413 rtp_info->timestamp = timestamp;
415 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC. 414 rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
416 rtp_info->header.payloadType = 94; // PCM16b WB codec. 415 rtp_info->payloadType = 94; // PCM16b WB codec.
417 rtp_info->header.markerBit = 0; 416 rtp_info->markerBit = 0;
418 } 417 }
419 418
420 void NetEqDecodingTest::PopulateCng(int frame_index, 419 void NetEqDecodingTest::PopulateCng(int frame_index,
421 int timestamp, 420 int timestamp,
422 WebRtcRTPHeader* rtp_info, 421 RTPHeader* rtp_info,
423 uint8_t* payload, 422 uint8_t* payload,
424 size_t* payload_len) { 423 size_t* payload_len) {
425 rtp_info->header.sequenceNumber = frame_index; 424 rtp_info->sequenceNumber = frame_index;
426 rtp_info->header.timestamp = timestamp; 425 rtp_info->timestamp = timestamp;
427 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC. 426 rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
428 rtp_info->header.payloadType = 98; // WB CNG. 427 rtp_info->payloadType = 98; // WB CNG.
429 rtp_info->header.markerBit = 0; 428 rtp_info->markerBit = 0;
430 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen. 429 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
431 *payload_len = 1; // Only noise level, no spectral parameters. 430 *payload_len = 1; // Only noise level, no spectral parameters.
432 } 431 }
433 432
434 #if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \ 433 #if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
435 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \ 434 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
436 defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722) && \ 435 defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722) && \
437 !defined(WEBRTC_ARCH_ARM64) 436 !defined(WEBRTC_ARCH_ARM64)
438 #define MAYBE_TestBitExactness TestBitExactness 437 #define MAYBE_TestBitExactness TestBitExactness
439 #else 438 #else
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 } 513 }
515 }; 514 };
516 515
517 TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) { 516 TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
518 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio. 517 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
519 size_t num_frames = 30; 518 size_t num_frames = 30;
520 const size_t kSamples = 10 * 16; 519 const size_t kSamples = 10 * 16;
521 const size_t kPayloadBytes = kSamples * 2; 520 const size_t kPayloadBytes = kSamples * 2;
522 for (size_t i = 0; i < num_frames; ++i) { 521 for (size_t i = 0; i < num_frames; ++i) {
523 const uint8_t payload[kPayloadBytes] = {0}; 522 const uint8_t payload[kPayloadBytes] = {0};
524 WebRtcRTPHeader rtp_info; 523 RTPHeader rtp_info;
525 rtp_info.header.sequenceNumber = i; 524 rtp_info.sequenceNumber = i;
526 rtp_info.header.timestamp = i * kSamples; 525 rtp_info.timestamp = i * kSamples;
527 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC. 526 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
528 rtp_info.header.payloadType = 94; // PCM16b WB codec. 527 rtp_info.payloadType = 94; // PCM16b WB codec.
529 rtp_info.header.markerBit = 0; 528 rtp_info.markerBit = 0;
530 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 529 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
531 } 530 }
532 // Pull out all data. 531 // Pull out all data.
533 for (size_t i = 0; i < num_frames; ++i) { 532 for (size_t i = 0; i < num_frames; ++i) {
534 bool muted; 533 bool muted;
535 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 534 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
536 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 535 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
537 } 536 }
538 537
539 NetEqNetworkStatistics stats; 538 NetEqNetworkStatistics stats;
540 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats)); 539 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
(...skipping 18 matching lines...) Expand all
559 const int kNumFrames = 3000; // Needed for convergence. 558 const int kNumFrames = 3000; // Needed for convergence.
560 int frame_index = 0; 559 int frame_index = 0;
561 const size_t kSamples = 10 * 16; 560 const size_t kSamples = 10 * 16;
562 const size_t kPayloadBytes = kSamples * 2; 561 const size_t kPayloadBytes = kSamples * 2;
563 while (frame_index < kNumFrames) { 562 while (frame_index < kNumFrames) {
564 // Insert one packet each time, except every 10th time where we insert two 563 // Insert one packet each time, except every 10th time where we insert two
565 // packets at once. This will create a negative clock-drift of approx. 10%. 564 // packets at once. This will create a negative clock-drift of approx. 10%.
566 int num_packets = (frame_index % 10 == 0 ? 2 : 1); 565 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
567 for (int n = 0; n < num_packets; ++n) { 566 for (int n = 0; n < num_packets; ++n) {
568 uint8_t payload[kPayloadBytes] = {0}; 567 uint8_t payload[kPayloadBytes] = {0};
569 WebRtcRTPHeader rtp_info; 568 RTPHeader rtp_info;
570 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info); 569 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
571 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 570 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
572 ++frame_index; 571 ++frame_index;
573 } 572 }
574 573
575 // Pull out data once. 574 // Pull out data once.
576 bool muted; 575 bool muted;
577 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 576 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
578 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 577 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
579 } 578 }
580 579
581 NetEqNetworkStatistics network_stats; 580 NetEqNetworkStatistics network_stats;
582 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats)); 581 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
583 EXPECT_EQ(-103192, network_stats.clockdrift_ppm); 582 EXPECT_EQ(-103192, network_stats.clockdrift_ppm);
584 } 583 }
585 584
586 TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) { 585 TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
587 const int kNumFrames = 5000; // Needed for convergence. 586 const int kNumFrames = 5000; // Needed for convergence.
588 int frame_index = 0; 587 int frame_index = 0;
589 const size_t kSamples = 10 * 16; 588 const size_t kSamples = 10 * 16;
590 const size_t kPayloadBytes = kSamples * 2; 589 const size_t kPayloadBytes = kSamples * 2;
591 for (int i = 0; i < kNumFrames; ++i) { 590 for (int i = 0; i < kNumFrames; ++i) {
592 // Insert one packet each time, except every 10th time where we don't insert 591 // Insert one packet each time, except every 10th time where we don't insert
593 // any packet. This will create a positive clock-drift of approx. 11%. 592 // any packet. This will create a positive clock-drift of approx. 11%.
594 int num_packets = (i % 10 == 9 ? 0 : 1); 593 int num_packets = (i % 10 == 9 ? 0 : 1);
595 for (int n = 0; n < num_packets; ++n) { 594 for (int n = 0; n < num_packets; ++n) {
596 uint8_t payload[kPayloadBytes] = {0}; 595 uint8_t payload[kPayloadBytes] = {0};
597 WebRtcRTPHeader rtp_info; 596 RTPHeader rtp_info;
598 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info); 597 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
599 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 598 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
600 ++frame_index; 599 ++frame_index;
601 } 600 }
602 601
603 // Pull out data once. 602 // Pull out data once.
604 bool muted; 603 bool muted;
605 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 604 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
606 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 605 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
607 } 606 }
608 607
609 NetEqNetworkStatistics network_stats; 608 NetEqNetworkStatistics network_stats;
(...skipping 15 matching lines...) Expand all
625 double t_ms; 624 double t_ms;
626 bool muted; 625 bool muted;
627 626
628 // Insert speech for 5 seconds. 627 // Insert speech for 5 seconds.
629 const int kSpeechDurationMs = 5000; 628 const int kSpeechDurationMs = 5000;
630 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) { 629 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
631 // Each turn in this for loop is 10 ms. 630 // Each turn in this for loop is 10 ms.
632 while (next_input_time_ms <= t_ms) { 631 while (next_input_time_ms <= t_ms) {
633 // Insert one 30 ms speech frame. 632 // Insert one 30 ms speech frame.
634 uint8_t payload[kPayloadBytes] = {0}; 633 uint8_t payload[kPayloadBytes] = {0};
635 WebRtcRTPHeader rtp_info; 634 RTPHeader rtp_info;
636 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 635 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
637 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 636 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
638 ++seq_no; 637 ++seq_no;
639 timestamp += kSamples; 638 timestamp += kSamples;
640 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor; 639 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
641 } 640 }
642 // Pull out data once. 641 // Pull out data once.
643 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 642 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
644 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 643 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
645 } 644 }
646 645
647 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 646 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
648 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp(); 647 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
649 ASSERT_TRUE(playout_timestamp); 648 ASSERT_TRUE(playout_timestamp);
650 int32_t delay_before = timestamp - *playout_timestamp; 649 int32_t delay_before = timestamp - *playout_timestamp;
651 650
652 // Insert CNG for 1 minute (= 60000 ms). 651 // Insert CNG for 1 minute (= 60000 ms).
653 const int kCngPeriodMs = 100; 652 const int kCngPeriodMs = 100;
654 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples. 653 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
655 const int kCngDurationMs = 60000; 654 const int kCngDurationMs = 60000;
656 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) { 655 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
657 // Each turn in this for loop is 10 ms. 656 // Each turn in this for loop is 10 ms.
658 while (next_input_time_ms <= t_ms) { 657 while (next_input_time_ms <= t_ms) {
659 // Insert one CNG frame each 100 ms. 658 // Insert one CNG frame each 100 ms.
660 uint8_t payload[kPayloadBytes]; 659 uint8_t payload[kPayloadBytes];
661 size_t payload_len; 660 size_t payload_len;
662 WebRtcRTPHeader rtp_info; 661 RTPHeader rtp_info;
663 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 662 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
664 ASSERT_EQ(0, neteq_->InsertPacket( 663 ASSERT_EQ(0, neteq_->InsertPacket(
665 rtp_info.header, 664 rtp_info,
666 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 665 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
667 ++seq_no; 666 ++seq_no;
668 timestamp += kCngPeriodSamples; 667 timestamp += kCngPeriodSamples;
669 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor; 668 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
670 } 669 }
671 // Pull out data once. 670 // Pull out data once.
672 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 671 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
673 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 672 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
674 } 673 }
675 674
(...skipping 19 matching lines...) Expand all
695 pull_once = false; 694 pull_once = false;
696 // Pull out data once. 695 // Pull out data once.
697 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 696 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
698 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 697 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
699 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 698 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
700 t_ms += 10; 699 t_ms += 10;
701 } 700 }
702 // Insert one CNG frame each 100 ms. 701 // Insert one CNG frame each 100 ms.
703 uint8_t payload[kPayloadBytes]; 702 uint8_t payload[kPayloadBytes];
704 size_t payload_len; 703 size_t payload_len;
705 WebRtcRTPHeader rtp_info; 704 RTPHeader rtp_info;
706 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 705 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
707 ASSERT_EQ(0, neteq_->InsertPacket( 706 ASSERT_EQ(0, neteq_->InsertPacket(
708 rtp_info.header, 707 rtp_info,
709 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 708 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
710 ++seq_no; 709 ++seq_no;
711 timestamp += kCngPeriodSamples; 710 timestamp += kCngPeriodSamples;
712 next_input_time_ms += kCngPeriodMs * drift_factor; 711 next_input_time_ms += kCngPeriodMs * drift_factor;
713 } 712 }
714 } 713 }
715 714
716 // Insert speech again until output type is speech. 715 // Insert speech again until output type is speech.
717 double speech_restart_time_ms = t_ms; 716 double speech_restart_time_ms = t_ms;
718 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) { 717 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
719 // Each turn in this for loop is 10 ms. 718 // Each turn in this for loop is 10 ms.
720 while (next_input_time_ms <= t_ms) { 719 while (next_input_time_ms <= t_ms) {
721 // Insert one 30 ms speech frame. 720 // Insert one 30 ms speech frame.
722 uint8_t payload[kPayloadBytes] = {0}; 721 uint8_t payload[kPayloadBytes] = {0};
723 WebRtcRTPHeader rtp_info; 722 RTPHeader rtp_info;
724 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 723 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
725 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 724 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
726 ++seq_no; 725 ++seq_no;
727 timestamp += kSamples; 726 timestamp += kSamples;
728 next_input_time_ms += kFrameSizeMs * drift_factor; 727 next_input_time_ms += kFrameSizeMs * drift_factor;
729 } 728 }
730 // Pull out data once. 729 // Pull out data once.
731 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 730 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
732 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 731 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
733 // Increase clock. 732 // Increase clock.
734 t_ms += 10; 733 t_ms += 10;
735 } 734 }
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
824 LongCngWithClockDrift(kDriftFactor, 823 LongCngWithClockDrift(kDriftFactor,
825 kNetworkFreezeTimeMs, 824 kNetworkFreezeTimeMs,
826 kGetAudioDuringFreezeRecovery, 825 kGetAudioDuringFreezeRecovery,
827 kDelayToleranceMs, 826 kDelayToleranceMs,
828 kMaxTimeToSpeechMs); 827 kMaxTimeToSpeechMs);
829 } 828 }
830 829
831 TEST_F(NetEqDecodingTest, UnknownPayloadType) { 830 TEST_F(NetEqDecodingTest, UnknownPayloadType) {
832 const size_t kPayloadBytes = 100; 831 const size_t kPayloadBytes = 100;
833 uint8_t payload[kPayloadBytes] = {0}; 832 uint8_t payload[kPayloadBytes] = {0};
834 WebRtcRTPHeader rtp_info; 833 RTPHeader rtp_info;
835 PopulateRtpInfo(0, 0, &rtp_info); 834 PopulateRtpInfo(0, 0, &rtp_info);
836 rtp_info.header.payloadType = 1; // Not registered as a decoder. 835 rtp_info.payloadType = 1; // Not registered as a decoder.
837 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info.header, payload, 0)); 836 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload, 0));
838 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError()); 837 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
839 } 838 }
840 839
841 #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX) 840 #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
842 #define MAYBE_DecoderError DecoderError 841 #define MAYBE_DecoderError DecoderError
843 #else 842 #else
844 #define MAYBE_DecoderError DISABLED_DecoderError 843 #define MAYBE_DecoderError DISABLED_DecoderError
845 #endif 844 #endif
846 845
847 TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { 846 TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
848 const size_t kPayloadBytes = 100; 847 const size_t kPayloadBytes = 100;
849 uint8_t payload[kPayloadBytes] = {0}; 848 uint8_t payload[kPayloadBytes] = {0};
850 WebRtcRTPHeader rtp_info; 849 RTPHeader rtp_info;
851 PopulateRtpInfo(0, 0, &rtp_info); 850 PopulateRtpInfo(0, 0, &rtp_info);
852 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid. 851 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
853 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 852 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
854 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call 853 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
855 // to GetAudio. 854 // to GetAudio.
856 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { 855 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
857 out_frame_.data_[i] = 1; 856 out_frame_.data_[i] = 1;
858 } 857 }
859 bool muted; 858 bool muted;
860 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted)); 859 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
861 ASSERT_FALSE(muted); 860 ASSERT_FALSE(muted);
862 // Verify that there is a decoder error to check. 861 // Verify that there is a decoder error to check.
863 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError()); 862 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
937 // We are using the same 32 kHz input file for all tests, regardless of 936 // We are using the same 32 kHz input file for all tests, regardless of
938 // |sampling_rate_hz|. The output may sound weird, but the test is still 937 // |sampling_rate_hz|. The output may sound weird, but the test is still
939 // valid. 938 // valid.
940 ASSERT_TRUE(input.Init( 939 ASSERT_TRUE(input.Init(
941 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), 940 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
942 10 * sampling_rate_hz, // Max 10 seconds loop length. 941 10 * sampling_rate_hz, // Max 10 seconds loop length.
943 expected_samples_per_channel)); 942 expected_samples_per_channel));
944 943
945 // Payload of 10 ms of PCM16 32 kHz. 944 // Payload of 10 ms of PCM16 32 kHz.
946 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)]; 945 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
947 WebRtcRTPHeader rtp_info; 946 RTPHeader rtp_info;
948 PopulateRtpInfo(0, 0, &rtp_info); 947 PopulateRtpInfo(0, 0, &rtp_info);
949 rtp_info.header.payloadType = payload_type; 948 rtp_info.payloadType = payload_type;
950 949
951 uint32_t receive_timestamp = 0; 950 uint32_t receive_timestamp = 0;
952 bool muted; 951 bool muted;
953 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio. 952 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
954 auto block = input.GetNextBlock(); 953 auto block = input.GetNextBlock();
955 ASSERT_EQ(expected_samples_per_channel, block.size()); 954 ASSERT_EQ(expected_samples_per_channel, block.size());
956 size_t enc_len_bytes = 955 size_t enc_len_bytes =
957 WebRtcPcm16b_Encode(block.data(), block.size(), payload); 956 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
958 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2); 957 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
959 958
960 ASSERT_EQ(0, neteq_->InsertPacket( 959 ASSERT_EQ(0, neteq_->InsertPacket(
961 rtp_info.header, 960 rtp_info,
962 rtc::ArrayView<const uint8_t>(payload, enc_len_bytes), 961 rtc::ArrayView<const uint8_t>(payload, enc_len_bytes),
963 receive_timestamp)); 962 receive_timestamp));
964 output.Reset(); 963 output.Reset();
965 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); 964 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
966 ASSERT_EQ(1u, output.num_channels_); 965 ASSERT_EQ(1u, output.num_channels_);
967 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); 966 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
968 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); 967 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
969 968
970 // Next packet. 969 // Next packet.
971 rtp_info.header.timestamp += expected_samples_per_channel; 970 rtp_info.timestamp += expected_samples_per_channel;
972 rtp_info.header.sequenceNumber++; 971 rtp_info.sequenceNumber++;
973 receive_timestamp += expected_samples_per_channel; 972 receive_timestamp += expected_samples_per_channel;
974 } 973 }
975 974
976 output.Reset(); 975 output.Reset();
977 976
978 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull 977 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
979 // one frame without checking speech-type. This is the first frame pulled 978 // one frame without checking speech-type. This is the first frame pulled
980 // without inserting any packet, and might not be labeled as PLC. 979 // without inserting any packet, and might not be labeled as PLC.
981 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); 980 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
982 ASSERT_EQ(1u, output.num_channels_); 981 ASSERT_EQ(1u, output.num_channels_);
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1083 int packets_inserted = 0; 1082 int packets_inserted = 0;
1084 uint16_t last_seq_no; 1083 uint16_t last_seq_no;
1085 uint32_t last_timestamp; 1084 uint32_t last_timestamp;
1086 bool timestamp_wrapped = false; 1085 bool timestamp_wrapped = false;
1087 bool seq_no_wrapped = false; 1086 bool seq_no_wrapped = false;
1088 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) { 1087 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1089 // Each turn in this for loop is 10 ms. 1088 // Each turn in this for loop is 10 ms.
1090 while (next_input_time_ms <= t_ms) { 1089 while (next_input_time_ms <= t_ms) {
1091 // Insert one 30 ms speech frame. 1090 // Insert one 30 ms speech frame.
1092 uint8_t payload[kPayloadBytes] = {0}; 1091 uint8_t payload[kPayloadBytes] = {0};
1093 WebRtcRTPHeader rtp_info; 1092 RTPHeader rtp_info;
1094 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1093 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1095 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) { 1094 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1096 // This sequence number was not in the set to drop. Insert it. 1095 // This sequence number was not in the set to drop. Insert it.
1097 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 1096 ASSERT_EQ(0,
1098 receive_timestamp)); 1097 neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
1099 ++packets_inserted; 1098 ++packets_inserted;
1100 } 1099 }
1101 NetEqNetworkStatistics network_stats; 1100 NetEqNetworkStatistics network_stats;
1102 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats)); 1101 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1103 1102
1104 // Due to internal NetEq logic, preferred buffer-size is about 4 times the 1103 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1105 // packet size for first few packets. Therefore we refrain from checking 1104 // packet size for first few packets. Therefore we refrain from checking
1106 // the criteria. 1105 // the criteria.
1107 if (packets_inserted > 4) { 1106 if (packets_inserted > 4) {
1108 // Expect preferred and actual buffer size to be no more than 2 frames. 1107 // Expect preferred and actual buffer size to be no more than 2 frames.
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1172 const int kFrameSizeMs = 10; 1171 const int kFrameSizeMs = 10;
1173 const int kSampleRateKhz = 16; 1172 const int kSampleRateKhz = 16;
1174 const int kSamples = kFrameSizeMs * kSampleRateKhz; 1173 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1175 const size_t kPayloadBytes = kSamples * 2; 1174 const size_t kPayloadBytes = kSamples * 2;
1176 1175
1177 const int algorithmic_delay_samples = std::max( 1176 const int algorithmic_delay_samples = std::max(
1178 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8); 1177 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
1179 // Insert three speech packets. Three are needed to get the frame length 1178 // Insert three speech packets. Three are needed to get the frame length
1180 // correct. 1179 // correct.
1181 uint8_t payload[kPayloadBytes] = {0}; 1180 uint8_t payload[kPayloadBytes] = {0};
1182 WebRtcRTPHeader rtp_info; 1181 RTPHeader rtp_info;
1183 bool muted; 1182 bool muted;
1184 for (int i = 0; i < 3; ++i) { 1183 for (int i = 0; i < 3; ++i) {
1185 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1184 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1186 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 1185 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1187 ++seq_no; 1186 ++seq_no;
1188 timestamp += kSamples; 1187 timestamp += kSamples;
1189 1188
1190 // Pull audio once. 1189 // Pull audio once.
1191 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1190 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1192 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1191 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1193 } 1192 }
1194 // Verify speech output. 1193 // Verify speech output.
1195 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 1194 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1196 1195
1197 // Insert same CNG packet twice. 1196 // Insert same CNG packet twice.
1198 const int kCngPeriodMs = 100; 1197 const int kCngPeriodMs = 100;
1199 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz; 1198 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1200 size_t payload_len; 1199 size_t payload_len;
1201 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 1200 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1202 // This is the first time this CNG packet is inserted. 1201 // This is the first time this CNG packet is inserted.
1203 ASSERT_EQ(0, neteq_->InsertPacket( 1202 ASSERT_EQ(
1204 rtp_info.header, 1203 0, neteq_->InsertPacket(
1205 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1204 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1206 1205
1207 // Pull audio once and make sure CNG is played. 1206 // Pull audio once and make sure CNG is played.
1208 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1207 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1209 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1208 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1210 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 1209 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1211 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG. 1210 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
1212 EXPECT_EQ(timestamp - algorithmic_delay_samples, 1211 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1213 out_frame_.timestamp_ + out_frame_.samples_per_channel_); 1212 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
1214 1213
1215 // Insert the same CNG packet again. Note that at this point it is old, since 1214 // Insert the same CNG packet again. Note that at this point it is old, since
1216 // we have already decoded the first copy of it. 1215 // we have already decoded the first copy of it.
1217 ASSERT_EQ(0, neteq_->InsertPacket( 1216 ASSERT_EQ(
1218 rtp_info.header, 1217 0, neteq_->InsertPacket(
1219 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1218 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1220 1219
1221 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since 1220 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1222 // we have already pulled out CNG once. 1221 // we have already pulled out CNG once.
1223 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) { 1222 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
1224 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1223 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1225 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1224 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1226 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 1225 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1227 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG. 1226 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
1228 EXPECT_EQ(timestamp - algorithmic_delay_samples, 1227 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1229 out_frame_.timestamp_ + out_frame_.samples_per_channel_); 1228 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
1230 } 1229 }
1231 1230
1232 // Insert speech again. 1231 // Insert speech again.
1233 ++seq_no; 1232 ++seq_no;
1234 timestamp += kCngPeriodSamples; 1233 timestamp += kCngPeriodSamples;
1235 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1234 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1236 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 1235 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1237 1236
1238 // Pull audio once and verify that the output is speech again. 1237 // Pull audio once and verify that the output is speech again.
1239 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1238 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1240 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1239 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1241 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 1240 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1242 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp(); 1241 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
1243 ASSERT_TRUE(playout_timestamp); 1242 ASSERT_TRUE(playout_timestamp);
1244 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples, 1243 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
1245 *playout_timestamp); 1244 *playout_timestamp);
1246 } 1245 }
1247 1246
1248 rtc::Optional<uint32_t> NetEqDecodingTest::PlayoutTimestamp() { 1247 rtc::Optional<uint32_t> NetEqDecodingTest::PlayoutTimestamp() {
1249 return neteq_->GetPlayoutTimestamp(); 1248 return neteq_->GetPlayoutTimestamp();
1250 } 1249 }
1251 1250
1252 TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); } 1251 TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
1253 1252
1254 TEST_F(NetEqDecodingTest, CngFirst) { 1253 TEST_F(NetEqDecodingTest, CngFirst) {
1255 uint16_t seq_no = 0; 1254 uint16_t seq_no = 0;
1256 uint32_t timestamp = 0; 1255 uint32_t timestamp = 0;
1257 const int kFrameSizeMs = 10; 1256 const int kFrameSizeMs = 10;
1258 const int kSampleRateKhz = 16; 1257 const int kSampleRateKhz = 16;
1259 const int kSamples = kFrameSizeMs * kSampleRateKhz; 1258 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1260 const int kPayloadBytes = kSamples * 2; 1259 const int kPayloadBytes = kSamples * 2;
1261 const int kCngPeriodMs = 100; 1260 const int kCngPeriodMs = 100;
1262 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz; 1261 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1263 size_t payload_len; 1262 size_t payload_len;
1264 1263
1265 uint8_t payload[kPayloadBytes] = {0}; 1264 uint8_t payload[kPayloadBytes] = {0};
1266 WebRtcRTPHeader rtp_info; 1265 RTPHeader rtp_info;
1267 1266
1268 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 1267 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1269 ASSERT_EQ(NetEq::kOK, 1268 ASSERT_EQ(
1270 neteq_->InsertPacket( 1269 NetEq::kOK,
1271 rtp_info.header, 1270 neteq_->InsertPacket(
1272 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1271 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1273 ++seq_no; 1272 ++seq_no;
1274 timestamp += kCngPeriodSamples; 1273 timestamp += kCngPeriodSamples;
1275 1274
1276 // Pull audio once and make sure CNG is played. 1275 // Pull audio once and make sure CNG is played.
1277 bool muted; 1276 bool muted;
1278 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1277 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1279 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1278 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1280 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 1279 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1281 1280
1282 // Insert some speech packets. 1281 // Insert some speech packets.
1283 const uint32_t first_speech_timestamp = timestamp; 1282 const uint32_t first_speech_timestamp = timestamp;
1284 int timeout_counter = 0; 1283 int timeout_counter = 0;
1285 do { 1284 do {
1286 ASSERT_LT(timeout_counter++, 20) << "Test timed out"; 1285 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
1287 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1286 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1288 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 1287 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1289 ++seq_no; 1288 ++seq_no;
1290 timestamp += kSamples; 1289 timestamp += kSamples;
1291 1290
1292 // Pull audio once. 1291 // Pull audio once.
1293 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1292 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1294 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1293 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1295 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp)); 1294 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
1296 // Verify speech output. 1295 // Verify speech output.
1297 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 1296 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1298 } 1297 }
1299 1298
1300 class NetEqDecodingTestWithMutedState : public NetEqDecodingTest { 1299 class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
1301 public: 1300 public:
1302 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() { 1301 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
1303 config_.enable_muted_state = true; 1302 config_.enable_muted_state = true;
1304 } 1303 }
1305 1304
1306 protected: 1305 protected:
1307 static constexpr size_t kSamples = 10 * 16; 1306 static constexpr size_t kSamples = 10 * 16;
1308 static constexpr size_t kPayloadBytes = kSamples * 2; 1307 static constexpr size_t kPayloadBytes = kSamples * 2;
1309 1308
1310 void InsertPacket(uint32_t rtp_timestamp) { 1309 void InsertPacket(uint32_t rtp_timestamp) {
1311 uint8_t payload[kPayloadBytes] = {0}; 1310 uint8_t payload[kPayloadBytes] = {0};
1312 WebRtcRTPHeader rtp_info; 1311 RTPHeader rtp_info;
1313 PopulateRtpInfo(0, rtp_timestamp, &rtp_info); 1312 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
1314 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 1313 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1315 } 1314 }
1316 1315
1317 void InsertCngPacket(uint32_t rtp_timestamp) { 1316 void InsertCngPacket(uint32_t rtp_timestamp) {
1318 uint8_t payload[kPayloadBytes] = {0}; 1317 uint8_t payload[kPayloadBytes] = {0};
1319 WebRtcRTPHeader rtp_info; 1318 RTPHeader rtp_info;
1320 size_t payload_len; 1319 size_t payload_len;
1321 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len); 1320 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
1322 EXPECT_EQ(NetEq::kOK, 1321 EXPECT_EQ(
1323 neteq_->InsertPacket( 1322 NetEq::kOK,
1324 rtp_info.header, 1323 neteq_->InsertPacket(
1325 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1324 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1326 } 1325 }
1327 1326
1328 bool GetAudioReturnMuted() { 1327 bool GetAudioReturnMuted() {
1329 bool muted; 1328 bool muted;
1330 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 1329 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1331 return muted; 1330 return muted;
1332 } 1331 }
1333 1332
1334 void GetAudioUntilMuted() { 1333 void GetAudioUntilMuted() {
1335 while (!GetAudioReturnMuted()) { 1334 while (!GetAudioReturnMuted()) {
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
1538 1537
1539 TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) { 1538 TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
1540 ASSERT_FALSE(config_.enable_muted_state); 1539 ASSERT_FALSE(config_.enable_muted_state);
1541 config2_.enable_muted_state = true; 1540 config2_.enable_muted_state = true;
1542 CreateSecondInstance(); 1541 CreateSecondInstance();
1543 1542
1544 // Insert one speech packet into both NetEqs. 1543 // Insert one speech packet into both NetEqs.
1545 const size_t kSamples = 10 * 16; 1544 const size_t kSamples = 10 * 16;
1546 const size_t kPayloadBytes = kSamples * 2; 1545 const size_t kPayloadBytes = kSamples * 2;
1547 uint8_t payload[kPayloadBytes] = {0}; 1546 uint8_t payload[kPayloadBytes] = {0};
1548 WebRtcRTPHeader rtp_info; 1547 RTPHeader rtp_info;
1549 PopulateRtpInfo(0, 0, &rtp_info); 1548 PopulateRtpInfo(0, 0, &rtp_info);
1550 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 1549 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1551 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info.header, payload, 0)); 1550 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
1552 1551
1553 AudioFrame out_frame1, out_frame2; 1552 AudioFrame out_frame1, out_frame2;
1554 bool muted; 1553 bool muted;
1555 for (int i = 0; i < 1000; ++i) { 1554 for (int i = 0; i < 1000; ++i) {
1556 std::ostringstream ss; 1555 std::ostringstream ss;
1557 ss << "i = " << i; 1556 ss << "i = " << i;
1558 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure. 1557 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1559 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted)); 1558 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
1560 EXPECT_FALSE(muted); 1559 EXPECT_FALSE(muted);
1561 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted)); 1560 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
1562 if (muted) { 1561 if (muted) {
1563 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2)); 1562 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1564 } else { 1563 } else {
1565 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2)); 1564 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1566 } 1565 }
1567 } 1566 }
1568 EXPECT_TRUE(muted); 1567 EXPECT_TRUE(muted);
1569 1568
1570 // Insert new data. Timestamp is corrected for the time elapsed since the last 1569 // Insert new data. Timestamp is corrected for the time elapsed since the last
1571 // packet. 1570 // packet.
1572 PopulateRtpInfo(0, kSamples * 1000, &rtp_info); 1571 PopulateRtpInfo(0, kSamples * 1000, &rtp_info);
1573 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info.header, payload, 0)); 1572 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1574 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info.header, payload, 0)); 1573 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
1575 1574
1576 int counter = 0; 1575 int counter = 0;
1577 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) { 1576 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
1578 ASSERT_LT(counter++, 1000) << "Test timed out"; 1577 ASSERT_LT(counter++, 1000) << "Test timed out";
1579 std::ostringstream ss; 1578 std::ostringstream ss;
1580 ss << "counter = " << counter; 1579 ss << "counter = " << counter;
1581 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure. 1580 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1582 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted)); 1581 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
1583 EXPECT_FALSE(muted); 1582 EXPECT_FALSE(muted);
1584 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted)); 1583 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
1585 if (muted) { 1584 if (muted) {
1586 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2)); 1585 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1587 } else { 1586 } else {
1588 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2)); 1587 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1589 } 1588 }
1590 } 1589 }
1591 EXPECT_FALSE(muted); 1590 EXPECT_FALSE(muted);
1592 } 1591 }
1593 1592
1594 } // namespace webrtc 1593 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698