Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(486)

Side by Side Diff: webrtc/modules/audio_coding/neteq/neteq_unittest.cc

Issue 1965733002: NetEq: Implement muted output (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@muted-expand
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
112 digest->Update(&size, sizeof(size)); 112 digest->Update(&size, sizeof(size));
113 113
114 if (file) 114 if (file)
115 ASSERT_EQ(static_cast<size_t>(size), 115 ASSERT_EQ(static_cast<size_t>(size),
116 fwrite(message.data(), sizeof(char), size, file)); 116 fwrite(message.data(), sizeof(char), size, file));
117 digest->Update(message.data(), sizeof(char) * size); 117 digest->Update(message.data(), sizeof(char) * size);
118 } 118 }
119 119
120 #endif // WEBRTC_NETEQ_UNITTEST_BITEXACT 120 #endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
121 121
122 void LoadDecoders(webrtc::NetEq* neteq) {
123 // Load PCMu.
124 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMu,
125 "pcmu", 0));
126 // Load PCMa.
127 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMa,
128 "pcma", 8));
129 #ifdef WEBRTC_CODEC_ILBC
130 // Load iLBC.
131 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderILBC,
132 "ilbc", 102));
133 #endif
134 #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
135 // Load iSAC.
136 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderISAC,
137 "isac", 103));
138 #endif
139 #ifdef WEBRTC_CODEC_ISAC
140 // Load iSAC SWB.
141 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderISACswb,
142 "isac-swb", 104));
143 #endif
144 #ifdef WEBRTC_CODEC_OPUS
145 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderOpus,
146 "opus", 111));
147 #endif
148 // Load PCM16B nb.
149 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCM16B,
150 "pcm16-nb", 93));
151 // Load PCM16B wb.
152 ASSERT_EQ(0, neteq->RegisterPayloadType(
153 webrtc::NetEqDecoder::kDecoderPCM16Bwb, "pcm16-wb", 94));
154 // Load PCM16B swb32.
155 ASSERT_EQ(
156 0, neteq->RegisterPayloadType(
157 webrtc::NetEqDecoder::kDecoderPCM16Bswb32kHz, "pcm16-swb32", 95));
158 // Load CNG 8 kHz.
159 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderCNGnb,
160 "cng-nb", 13));
161 // Load CNG 16 kHz.
162 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderCNGwb,
163 "cng-wb", 98));
164 }
122 } // namespace 165 } // namespace
123 166
124 namespace webrtc { 167 namespace webrtc {
125 168
126 class ResultSink { 169 class ResultSink {
127 public: 170 public:
128 explicit ResultSink(const std::string& output_file); 171 explicit ResultSink(const std::string& output_file);
129 ~ResultSink(); 172 ~ResultSink();
130 173
131 template<typename T, size_t n> void AddResult( 174 template<typename T, size_t n> void AddResult(
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 static const size_t kBlockSize8kHz = kTimeStepMs * 8; 249 static const size_t kBlockSize8kHz = kTimeStepMs * 8;
207 static const size_t kBlockSize16kHz = kTimeStepMs * 16; 250 static const size_t kBlockSize16kHz = kTimeStepMs * 16;
208 static const size_t kBlockSize32kHz = kTimeStepMs * 32; 251 static const size_t kBlockSize32kHz = kTimeStepMs * 32;
209 static const size_t kBlockSize48kHz = kTimeStepMs * 48; 252 static const size_t kBlockSize48kHz = kTimeStepMs * 48;
210 static const int kInitSampleRateHz = 8000; 253 static const int kInitSampleRateHz = 8000;
211 254
212 NetEqDecodingTest(); 255 NetEqDecodingTest();
213 virtual void SetUp(); 256 virtual void SetUp();
214 virtual void TearDown(); 257 virtual void TearDown();
215 void SelectDecoders(NetEqDecoder* used_codec); 258 void SelectDecoders(NetEqDecoder* used_codec);
216 void LoadDecoders();
217 void OpenInputFile(const std::string &rtp_file); 259 void OpenInputFile(const std::string &rtp_file);
218 void Process(); 260 void Process();
219 261
220 void DecodeAndCompare(const std::string& rtp_file, 262 void DecodeAndCompare(const std::string& rtp_file,
221 const std::string& output_checksum, 263 const std::string& output_checksum,
222 const std::string& network_stats_checksum, 264 const std::string& network_stats_checksum,
223 const std::string& rtcp_stats_checksum, 265 const std::string& rtcp_stats_checksum,
224 bool gen_ref); 266 bool gen_ref);
225 267
226 static void PopulateRtpInfo(int frame_index, 268 static void PopulateRtpInfo(int frame_index,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 algorithmic_delay_ms_(0) { 313 algorithmic_delay_ms_(0) {
272 config_.sample_rate_hz = kInitSampleRateHz; 314 config_.sample_rate_hz = kInitSampleRateHz;
273 } 315 }
274 316
275 void NetEqDecodingTest::SetUp() { 317 void NetEqDecodingTest::SetUp() {
276 neteq_ = NetEq::Create(config_); 318 neteq_ = NetEq::Create(config_);
277 NetEqNetworkStatistics stat; 319 NetEqNetworkStatistics stat;
278 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat)); 320 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
279 algorithmic_delay_ms_ = stat.current_buffer_size_ms; 321 algorithmic_delay_ms_ = stat.current_buffer_size_ms;
280 ASSERT_TRUE(neteq_); 322 ASSERT_TRUE(neteq_);
281 LoadDecoders(); 323 LoadDecoders(neteq_);
282 } 324 }
283 325
284 void NetEqDecodingTest::TearDown() { 326 void NetEqDecodingTest::TearDown() {
285 delete neteq_; 327 delete neteq_;
286 } 328 }
287 329
288 void NetEqDecodingTest::LoadDecoders() {
289 // Load PCMu.
290 ASSERT_EQ(0,
291 neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMu, "pcmu", 0));
292 // Load PCMa.
293 ASSERT_EQ(0,
294 neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMa, "pcma", 8));
295 #ifdef WEBRTC_CODEC_ILBC
296 // Load iLBC.
297 ASSERT_EQ(
298 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderILBC, "ilbc", 102));
299 #endif
300 #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
301 // Load iSAC.
302 ASSERT_EQ(
303 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISAC, "isac", 103));
304 #endif
305 #ifdef WEBRTC_CODEC_ISAC
306 // Load iSAC SWB.
307 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISACswb,
308 "isac-swb", 104));
309 #endif
310 #ifdef WEBRTC_CODEC_OPUS
311 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderOpus,
312 "opus", 111));
313 #endif
314 // Load PCM16B nb.
315 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16B,
316 "pcm16-nb", 93));
317 // Load PCM16B wb.
318 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bwb,
319 "pcm16-wb", 94));
320 // Load PCM16B swb32.
321 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bswb32kHz,
322 "pcm16-swb32", 95));
323 // Load CNG 8 kHz.
324 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGnb,
325 "cng-nb", 13));
326 // Load CNG 16 kHz.
327 ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGwb,
328 "cng-wb", 98));
329 }
330
331 void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) { 330 void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
332 rtp_source_.reset(test::RtpFileSource::Create(rtp_file)); 331 rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
333 } 332 }
334 333
335 void NetEqDecodingTest::Process() { 334 void NetEqDecodingTest::Process() {
336 // Check if time to receive. 335 // Check if time to receive.
337 while (packet_ && sim_clock_ >= packet_->time_ms()) { 336 while (packet_ && sim_clock_ >= packet_->time_ms()) {
338 if (packet_->payload_length_bytes() > 0) { 337 if (packet_->payload_length_bytes() > 0) {
339 WebRtcRTPHeader rtp_header; 338 WebRtcRTPHeader rtp_header;
340 packet_->ConvertHeader(&rtp_header); 339 packet_->ConvertHeader(&rtp_header);
341 #ifndef WEBRTC_CODEC_ISAC 340 #ifndef WEBRTC_CODEC_ISAC
342 // Ignore payload type 104 (iSAC-swb) if ISAC is not supported. 341 // Ignore payload type 104 (iSAC-swb) if ISAC is not supported.
343 if (rtp_header.header.payloadType != 104) 342 if (rtp_header.header.payloadType != 104)
344 #endif 343 #endif
345 ASSERT_EQ(0, neteq_->InsertPacket( 344 ASSERT_EQ(0, neteq_->InsertPacket(
346 rtp_header, 345 rtp_header,
347 rtc::ArrayView<const uint8_t>( 346 rtc::ArrayView<const uint8_t>(
348 packet_->payload(), packet_->payload_length_bytes()), 347 packet_->payload(), packet_->payload_length_bytes()),
349 static_cast<uint32_t>(packet_->time_ms() * 348 static_cast<uint32_t>(packet_->time_ms() *
350 (output_sample_rate_ / 1000)))); 349 (output_sample_rate_ / 1000))));
351 } 350 }
352 // Get next packet. 351 // Get next packet.
353 packet_.reset(rtp_source_->NextPacket()); 352 packet_.reset(rtp_source_->NextPacket());
354 } 353 }
355 354
356 // Get audio from NetEq. 355 // Get audio from NetEq.
357 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 356 bool muted_output;
357 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
358 ASSERT_FALSE(muted_output);
358 ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) || 359 ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) ||
359 (out_frame_.samples_per_channel_ == kBlockSize16kHz) || 360 (out_frame_.samples_per_channel_ == kBlockSize16kHz) ||
360 (out_frame_.samples_per_channel_ == kBlockSize32kHz) || 361 (out_frame_.samples_per_channel_ == kBlockSize32kHz) ||
361 (out_frame_.samples_per_channel_ == kBlockSize48kHz)); 362 (out_frame_.samples_per_channel_ == kBlockSize48kHz));
362 output_sample_rate_ = out_frame_.sample_rate_hz_; 363 output_sample_rate_ = out_frame_.sample_rate_hz_;
363 EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz()); 364 EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz());
364 365
365 // Increase time. 366 // Increase time.
366 sim_clock_ += kTimeStepMs; 367 sim_clock_ += kTimeStepMs;
367 } 368 }
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
538 WebRtcRTPHeader rtp_info; 539 WebRtcRTPHeader rtp_info;
539 rtp_info.header.sequenceNumber = i; 540 rtp_info.header.sequenceNumber = i;
540 rtp_info.header.timestamp = i * kSamples; 541 rtp_info.header.timestamp = i * kSamples;
541 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC. 542 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC.
542 rtp_info.header.payloadType = 94; // PCM16b WB codec. 543 rtp_info.header.payloadType = 94; // PCM16b WB codec.
543 rtp_info.header.markerBit = 0; 544 rtp_info.header.markerBit = 0;
544 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 545 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
545 } 546 }
546 // Pull out all data. 547 // Pull out all data.
547 for (size_t i = 0; i < num_frames; ++i) { 548 for (size_t i = 0; i < num_frames; ++i) {
548 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 549 bool muted_output;
550 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
549 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 551 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
550 } 552 }
551 553
552 NetEqNetworkStatistics stats; 554 NetEqNetworkStatistics stats;
553 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats)); 555 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
554 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms 556 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
555 // spacing (per definition), we expect the delay to increase with 10 ms for 557 // spacing (per definition), we expect the delay to increase with 10 ms for
556 // each packet. Thus, we are calculating the statistics for a series from 10 558 // each packet. Thus, we are calculating the statistics for a series from 10
557 // to 300, in steps of 10 ms. 559 // to 300, in steps of 10 ms.
558 EXPECT_EQ(155, stats.mean_waiting_time_ms); 560 EXPECT_EQ(155, stats.mean_waiting_time_ms);
(...skipping 20 matching lines...) Expand all
579 int num_packets = (frame_index % 10 == 0 ? 2 : 1); 581 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
580 for (int n = 0; n < num_packets; ++n) { 582 for (int n = 0; n < num_packets; ++n) {
581 uint8_t payload[kPayloadBytes] = {0}; 583 uint8_t payload[kPayloadBytes] = {0};
582 WebRtcRTPHeader rtp_info; 584 WebRtcRTPHeader rtp_info;
583 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info); 585 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
584 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 586 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
585 ++frame_index; 587 ++frame_index;
586 } 588 }
587 589
588 // Pull out data once. 590 // Pull out data once.
589 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 591 bool muted_output;
592 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
590 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 593 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
591 } 594 }
592 595
593 NetEqNetworkStatistics network_stats; 596 NetEqNetworkStatistics network_stats;
594 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats)); 597 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
595 EXPECT_EQ(-103196, network_stats.clockdrift_ppm); 598 EXPECT_EQ(-103196, network_stats.clockdrift_ppm);
596 } 599 }
597 600
598 TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) { 601 TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
599 const int kNumFrames = 5000; // Needed for convergence. 602 const int kNumFrames = 5000; // Needed for convergence.
600 int frame_index = 0; 603 int frame_index = 0;
601 const size_t kSamples = 10 * 16; 604 const size_t kSamples = 10 * 16;
602 const size_t kPayloadBytes = kSamples * 2; 605 const size_t kPayloadBytes = kSamples * 2;
603 for (int i = 0; i < kNumFrames; ++i) { 606 for (int i = 0; i < kNumFrames; ++i) {
604 // Insert one packet each time, except every 10th time where we don't insert 607 // Insert one packet each time, except every 10th time where we don't insert
605 // any packet. This will create a positive clock-drift of approx. 11%. 608 // any packet. This will create a positive clock-drift of approx. 11%.
606 int num_packets = (i % 10 == 9 ? 0 : 1); 609 int num_packets = (i % 10 == 9 ? 0 : 1);
607 for (int n = 0; n < num_packets; ++n) { 610 for (int n = 0; n < num_packets; ++n) {
608 uint8_t payload[kPayloadBytes] = {0}; 611 uint8_t payload[kPayloadBytes] = {0};
609 WebRtcRTPHeader rtp_info; 612 WebRtcRTPHeader rtp_info;
610 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info); 613 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
611 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 614 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
612 ++frame_index; 615 ++frame_index;
613 } 616 }
614 617
615 // Pull out data once. 618 // Pull out data once.
616 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 619 bool muted_output;
620 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
617 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 621 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
618 } 622 }
619 623
620 NetEqNetworkStatistics network_stats; 624 NetEqNetworkStatistics network_stats;
621 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats)); 625 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
622 EXPECT_EQ(110946, network_stats.clockdrift_ppm); 626 EXPECT_EQ(110946, network_stats.clockdrift_ppm);
623 } 627 }
624 628
625 void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, 629 void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
626 double network_freeze_ms, 630 double network_freeze_ms,
627 bool pull_audio_during_freeze, 631 bool pull_audio_during_freeze,
628 int delay_tolerance_ms, 632 int delay_tolerance_ms,
629 int max_time_to_speech_ms) { 633 int max_time_to_speech_ms) {
630 uint16_t seq_no = 0; 634 uint16_t seq_no = 0;
631 uint32_t timestamp = 0; 635 uint32_t timestamp = 0;
632 const int kFrameSizeMs = 30; 636 const int kFrameSizeMs = 30;
633 const size_t kSamples = kFrameSizeMs * 16; 637 const size_t kSamples = kFrameSizeMs * 16;
634 const size_t kPayloadBytes = kSamples * 2; 638 const size_t kPayloadBytes = kSamples * 2;
635 double next_input_time_ms = 0.0; 639 double next_input_time_ms = 0.0;
636 double t_ms; 640 double t_ms;
641 bool muted_output;
637 642
638 // Insert speech for 5 seconds. 643 // Insert speech for 5 seconds.
639 const int kSpeechDurationMs = 5000; 644 const int kSpeechDurationMs = 5000;
640 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) { 645 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
641 // Each turn in this for loop is 10 ms. 646 // Each turn in this for loop is 10 ms.
642 while (next_input_time_ms <= t_ms) { 647 while (next_input_time_ms <= t_ms) {
643 // Insert one 30 ms speech frame. 648 // Insert one 30 ms speech frame.
644 uint8_t payload[kPayloadBytes] = {0}; 649 uint8_t payload[kPayloadBytes] = {0};
645 WebRtcRTPHeader rtp_info; 650 WebRtcRTPHeader rtp_info;
646 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 651 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
647 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 652 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
648 ++seq_no; 653 ++seq_no;
649 timestamp += kSamples; 654 timestamp += kSamples;
650 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor; 655 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
651 } 656 }
652 // Pull out data once. 657 // Pull out data once.
653 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 658 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
654 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 659 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
655 } 660 }
656 661
657 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 662 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
658 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp(); 663 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
659 ASSERT_TRUE(playout_timestamp); 664 ASSERT_TRUE(playout_timestamp);
660 int32_t delay_before = timestamp - *playout_timestamp; 665 int32_t delay_before = timestamp - *playout_timestamp;
661 666
662 // Insert CNG for 1 minute (= 60000 ms). 667 // Insert CNG for 1 minute (= 60000 ms).
663 const int kCngPeriodMs = 100; 668 const int kCngPeriodMs = 100;
664 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples. 669 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
665 const int kCngDurationMs = 60000; 670 const int kCngDurationMs = 60000;
666 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) { 671 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
667 // Each turn in this for loop is 10 ms. 672 // Each turn in this for loop is 10 ms.
668 while (next_input_time_ms <= t_ms) { 673 while (next_input_time_ms <= t_ms) {
669 // Insert one CNG frame each 100 ms. 674 // Insert one CNG frame each 100 ms.
670 uint8_t payload[kPayloadBytes]; 675 uint8_t payload[kPayloadBytes];
671 size_t payload_len; 676 size_t payload_len;
672 WebRtcRTPHeader rtp_info; 677 WebRtcRTPHeader rtp_info;
673 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 678 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
674 ASSERT_EQ(0, neteq_->InsertPacket( 679 ASSERT_EQ(0, neteq_->InsertPacket(
675 rtp_info, 680 rtp_info,
676 rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 681 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
677 ++seq_no; 682 ++seq_no;
678 timestamp += kCngPeriodSamples; 683 timestamp += kCngPeriodSamples;
679 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor; 684 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
680 } 685 }
681 // Pull out data once. 686 // Pull out data once.
682 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 687 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
683 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 688 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
684 } 689 }
685 690
686 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 691 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
687 692
688 if (network_freeze_ms > 0) { 693 if (network_freeze_ms > 0) {
689 // First keep pulling audio for |network_freeze_ms| without inserting 694 // First keep pulling audio for |network_freeze_ms| without inserting
690 // any data, then insert CNG data corresponding to |network_freeze_ms| 695 // any data, then insert CNG data corresponding to |network_freeze_ms|
691 // without pulling any output audio. 696 // without pulling any output audio.
692 const double loop_end_time = t_ms + network_freeze_ms; 697 const double loop_end_time = t_ms + network_freeze_ms;
693 for (; t_ms < loop_end_time; t_ms += 10) { 698 for (; t_ms < loop_end_time; t_ms += 10) {
694 // Pull out data once. 699 // Pull out data once.
695 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 700 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
696 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 701 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
697 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 702 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
698 } 703 }
699 bool pull_once = pull_audio_during_freeze; 704 bool pull_once = pull_audio_during_freeze;
700 // If |pull_once| is true, GetAudio will be called once half-way through 705 // If |pull_once| is true, GetAudio will be called once half-way through
701 // the network recovery period. 706 // the network recovery period.
702 double pull_time_ms = (t_ms + next_input_time_ms) / 2; 707 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
703 while (next_input_time_ms <= t_ms) { 708 while (next_input_time_ms <= t_ms) {
704 if (pull_once && next_input_time_ms >= pull_time_ms) { 709 if (pull_once && next_input_time_ms >= pull_time_ms) {
705 pull_once = false; 710 pull_once = false;
706 // Pull out data once. 711 // Pull out data once.
707 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 712 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
708 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 713 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
709 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 714 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
710 t_ms += 10; 715 t_ms += 10;
711 } 716 }
712 // Insert one CNG frame each 100 ms. 717 // Insert one CNG frame each 100 ms.
713 uint8_t payload[kPayloadBytes]; 718 uint8_t payload[kPayloadBytes];
714 size_t payload_len; 719 size_t payload_len;
715 WebRtcRTPHeader rtp_info; 720 WebRtcRTPHeader rtp_info;
716 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 721 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
717 ASSERT_EQ(0, neteq_->InsertPacket( 722 ASSERT_EQ(0, neteq_->InsertPacket(
(...skipping 13 matching lines...) Expand all
731 // Insert one 30 ms speech frame. 736 // Insert one 30 ms speech frame.
732 uint8_t payload[kPayloadBytes] = {0}; 737 uint8_t payload[kPayloadBytes] = {0};
733 WebRtcRTPHeader rtp_info; 738 WebRtcRTPHeader rtp_info;
734 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 739 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
735 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 740 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
736 ++seq_no; 741 ++seq_no;
737 timestamp += kSamples; 742 timestamp += kSamples;
738 next_input_time_ms += kFrameSizeMs * drift_factor; 743 next_input_time_ms += kFrameSizeMs * drift_factor;
739 } 744 }
740 // Pull out data once. 745 // Pull out data once.
741 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 746 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
742 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 747 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
743 // Increase clock. 748 // Increase clock.
744 t_ms += 10; 749 t_ms += 10;
745 } 750 }
746 751
747 // Check that the speech starts again within reasonable time. 752 // Check that the speech starts again within reasonable time.
748 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms; 753 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
749 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms); 754 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
750 playout_timestamp = PlayoutTimestamp(); 755 playout_timestamp = PlayoutTimestamp();
751 ASSERT_TRUE(playout_timestamp); 756 ASSERT_TRUE(playout_timestamp);
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
859 uint8_t payload[kPayloadBytes] = {0}; 864 uint8_t payload[kPayloadBytes] = {0};
860 WebRtcRTPHeader rtp_info; 865 WebRtcRTPHeader rtp_info;
861 PopulateRtpInfo(0, 0, &rtp_info); 866 PopulateRtpInfo(0, 0, &rtp_info);
862 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid. 867 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
863 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 868 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
864 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call 869 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
865 // to GetAudio. 870 // to GetAudio.
866 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { 871 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
867 out_frame_.data_[i] = 1; 872 out_frame_.data_[i] = 1;
868 } 873 }
869 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_)); 874 bool muted_output;
875 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted_output));
876 ASSERT_FALSE(muted_output);
minyue-webrtc 2016/05/11 11:29:54 does this need to be assert? + all similar places
hlundin-webrtc 2016/05/12 07:44:40 I choose to use ASSERT in all places where the tes
minyue-webrtc 2016/05/12 10:56:06 True, and with this regards, many EXPECT can becom
870 // Verify that there is a decoder error to check. 877 // Verify that there is a decoder error to check.
871 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError()); 878 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
872 879
873 enum NetEqDecoderError { 880 enum NetEqDecoderError {
874 ISAC_LENGTH_MISMATCH = 6730, 881 ISAC_LENGTH_MISMATCH = 6730,
875 ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH = 6640 882 ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH = 6640
876 }; 883 };
877 #if defined(WEBRTC_CODEC_ISAC) 884 #if defined(WEBRTC_CODEC_ISAC)
878 EXPECT_EQ(ISAC_LENGTH_MISMATCH, neteq_->LastDecoderError()); 885 EXPECT_EQ(ISAC_LENGTH_MISMATCH, neteq_->LastDecoderError());
879 #elif defined(WEBRTC_CODEC_ISACFX) 886 #elif defined(WEBRTC_CODEC_ISACFX)
(...skipping 16 matching lines...) Expand all
896 EXPECT_EQ(1, out_frame_.data_[i]); 903 EXPECT_EQ(1, out_frame_.data_[i]);
897 } 904 }
898 } 905 }
899 906
900 TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { 907 TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
901 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call 908 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
902 // to GetAudio. 909 // to GetAudio.
903 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { 910 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
904 out_frame_.data_[i] = 1; 911 out_frame_.data_[i] = 1;
905 } 912 }
906 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_)); 913 bool muted_output;
914 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
915 ASSERT_FALSE(muted_output);
907 // Verify that the first block of samples is set to 0. 916 // Verify that the first block of samples is set to 0.
908 static const int kExpectedOutputLength = 917 static const int kExpectedOutputLength =
909 kInitSampleRateHz / 100; // 10 ms at initial sample rate. 918 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
910 for (int i = 0; i < kExpectedOutputLength; ++i) { 919 for (int i = 0; i < kExpectedOutputLength; ++i) {
911 std::ostringstream ss; 920 std::ostringstream ss;
912 ss << "i = " << i; 921 ss << "i = " << i;
913 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. 922 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
914 EXPECT_EQ(0, out_frame_.data_[i]); 923 EXPECT_EQ(0, out_frame_.data_[i]);
915 } 924 }
916 // Verify that the sample rate did not change from the initial configuration. 925 // Verify that the sample rate did not change from the initial configuration.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
948 10 * sampling_rate_hz, // Max 10 seconds loop length. 957 10 * sampling_rate_hz, // Max 10 seconds loop length.
949 expected_samples_per_channel)); 958 expected_samples_per_channel));
950 959
951 // Payload of 10 ms of PCM16 32 kHz. 960 // Payload of 10 ms of PCM16 32 kHz.
952 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)]; 961 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
953 WebRtcRTPHeader rtp_info; 962 WebRtcRTPHeader rtp_info;
954 PopulateRtpInfo(0, 0, &rtp_info); 963 PopulateRtpInfo(0, 0, &rtp_info);
955 rtp_info.header.payloadType = payload_type; 964 rtp_info.header.payloadType = payload_type;
956 965
957 uint32_t receive_timestamp = 0; 966 uint32_t receive_timestamp = 0;
967 bool muted_output;
958 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio. 968 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
959 auto block = input.GetNextBlock(); 969 auto block = input.GetNextBlock();
960 ASSERT_EQ(expected_samples_per_channel, block.size()); 970 ASSERT_EQ(expected_samples_per_channel, block.size());
961 size_t enc_len_bytes = 971 size_t enc_len_bytes =
962 WebRtcPcm16b_Encode(block.data(), block.size(), payload); 972 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
963 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2); 973 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
964 974
965 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>( 975 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
966 payload, enc_len_bytes), 976 payload, enc_len_bytes),
967 receive_timestamp)); 977 receive_timestamp));
968 output.Reset(); 978 output.Reset();
969 ASSERT_EQ(0, neteq_->GetAudio(&output)); 979 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
970 ASSERT_EQ(1u, output.num_channels_); 980 ASSERT_EQ(1u, output.num_channels_);
971 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); 981 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
972 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); 982 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
973 983
974 // Next packet. 984 // Next packet.
975 rtp_info.header.timestamp += expected_samples_per_channel; 985 rtp_info.header.timestamp += expected_samples_per_channel;
976 rtp_info.header.sequenceNumber++; 986 rtp_info.header.sequenceNumber++;
977 receive_timestamp += expected_samples_per_channel; 987 receive_timestamp += expected_samples_per_channel;
978 } 988 }
979 989
980 output.Reset(); 990 output.Reset();
981 991
982 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull 992 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
983 // one frame without checking speech-type. This is the first frame pulled 993 // one frame without checking speech-type. This is the first frame pulled
984 // without inserting any packet, and might not be labeled as PLC. 994 // without inserting any packet, and might not be labeled as PLC.
985 ASSERT_EQ(0, neteq_->GetAudio(&output)); 995 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
986 ASSERT_EQ(1u, output.num_channels_); 996 ASSERT_EQ(1u, output.num_channels_);
987 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); 997 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
988 998
989 // To be able to test the fading of background noise we need at lease to 999 // To be able to test the fading of background noise we need at lease to
990 // pull 611 frames. 1000 // pull 611 frames.
991 const int kFadingThreshold = 611; 1001 const int kFadingThreshold = 611;
992 1002
993 // Test several CNG-to-PLC packet for the expected behavior. The number 20 1003 // Test several CNG-to-PLC packet for the expected behavior. The number 20
994 // is arbitrary, but sufficiently large to test enough number of frames. 1004 // is arbitrary, but sufficiently large to test enough number of frames.
995 const int kNumPlcToCngTestFrames = 20; 1005 const int kNumPlcToCngTestFrames = 20;
996 bool plc_to_cng = false; 1006 bool plc_to_cng = false;
997 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) { 1007 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
998 output.Reset(); 1008 output.Reset();
999 memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero. 1009 memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero.
1000 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1010 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1011 ASSERT_FALSE(muted_output);
1001 ASSERT_EQ(1u, output.num_channels_); 1012 ASSERT_EQ(1u, output.num_channels_);
1002 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); 1013 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
1003 if (output.speech_type_ == AudioFrame::kPLCCNG) { 1014 if (output.speech_type_ == AudioFrame::kPLCCNG) {
1004 plc_to_cng = true; 1015 plc_to_cng = true;
1005 double sum_squared = 0; 1016 double sum_squared = 0;
1006 for (size_t k = 0; 1017 for (size_t k = 0;
1007 k < output.num_channels_ * output.samples_per_channel_; ++k) 1018 k < output.num_channels_ * output.samples_per_channel_; ++k)
1008 sum_squared += output.data_[k] * output.data_[k]; 1019 sum_squared += output.data_[k] * output.data_[k];
1009 TestCondition(sum_squared, n > kFadingThreshold); 1020 TestCondition(sum_squared, n > kFadingThreshold);
1010 } else { 1021 } else {
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
1164 const size_t kPayloadBytes = kBlockSize16kHz * sizeof(int16_t); 1175 const size_t kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1165 uint8_t payload[kPayloadBytes]; 1176 uint8_t payload[kPayloadBytes];
1166 AudioFrame output; 1177 AudioFrame output;
1167 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1; 1178 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
1168 for (size_t n = 0; n < kPayloadBytes; ++n) { 1179 for (size_t n = 0; n < kPayloadBytes; ++n) {
1169 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence. 1180 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1170 } 1181 }
1171 // Insert some packets which decode to noise. We are not interested in 1182 // Insert some packets which decode to noise. We are not interested in
1172 // actual decoded values. 1183 // actual decoded values.
1173 uint32_t receive_timestamp = 0; 1184 uint32_t receive_timestamp = 0;
1185 bool muted_output;
1174 for (int n = 0; n < 100; ++n) { 1186 for (int n = 0; n < 100; ++n) {
1175 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); 1187 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
1176 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1188 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1177 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); 1189 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1178 ASSERT_EQ(1u, output.num_channels_); 1190 ASSERT_EQ(1u, output.num_channels_);
1179 1191
1180 rtp_info.header.sequenceNumber++; 1192 rtp_info.header.sequenceNumber++;
1181 rtp_info.header.timestamp += kBlockSize16kHz; 1193 rtp_info.header.timestamp += kBlockSize16kHz;
1182 receive_timestamp += kBlockSize16kHz; 1194 receive_timestamp += kBlockSize16kHz;
1183 } 1195 }
1184 const int kNumSyncPackets = 10; 1196 const int kNumSyncPackets = 10;
1185 1197
1186 // Make sure sufficient number of sync packets are inserted that we can 1198 // Make sure sufficient number of sync packets are inserted that we can
1187 // conduct a test. 1199 // conduct a test.
1188 ASSERT_GT(kNumSyncPackets, algorithmic_frame_delay); 1200 ASSERT_GT(kNumSyncPackets, algorithmic_frame_delay);
1189 // Insert sync-packets, the decoded sequence should be all-zero. 1201 // Insert sync-packets, the decoded sequence should be all-zero.
1190 for (int n = 0; n < kNumSyncPackets; ++n) { 1202 for (int n = 0; n < kNumSyncPackets; ++n) {
1191 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp)); 1203 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1192 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1204 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1205 ASSERT_FALSE(muted_output);
1193 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); 1206 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1194 ASSERT_EQ(1u, output.num_channels_); 1207 ASSERT_EQ(1u, output.num_channels_);
1195 if (n > algorithmic_frame_delay) { 1208 if (n > algorithmic_frame_delay) {
1196 EXPECT_TRUE(IsAllZero( 1209 EXPECT_TRUE(IsAllZero(
1197 output.data_, output.samples_per_channel_ * output.num_channels_)); 1210 output.data_, output.samples_per_channel_ * output.num_channels_));
1198 } 1211 }
1199 rtp_info.header.sequenceNumber++; 1212 rtp_info.header.sequenceNumber++;
1200 rtp_info.header.timestamp += kBlockSize16kHz; 1213 rtp_info.header.timestamp += kBlockSize16kHz;
1201 receive_timestamp += kBlockSize16kHz; 1214 receive_timestamp += kBlockSize16kHz;
1202 } 1215 }
1203 1216
1204 // We insert regular packets, if sync packet are not correctly buffered then 1217 // We insert regular packets, if sync packet are not correctly buffered then
1205 // network statistics would show some packet loss. 1218 // network statistics would show some packet loss.
1206 for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) { 1219 for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) {
1207 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); 1220 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
1208 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1221 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1222 ASSERT_FALSE(muted_output);
1209 if (n >= algorithmic_frame_delay + 1) { 1223 if (n >= algorithmic_frame_delay + 1) {
1210 // Expect that this frame contain samples from regular RTP. 1224 // Expect that this frame contain samples from regular RTP.
1211 EXPECT_TRUE(IsAllNonZero( 1225 EXPECT_TRUE(IsAllNonZero(
1212 output.data_, output.samples_per_channel_ * output.num_channels_)); 1226 output.data_, output.samples_per_channel_ * output.num_channels_));
1213 } 1227 }
1214 rtp_info.header.sequenceNumber++; 1228 rtp_info.header.sequenceNumber++;
1215 rtp_info.header.timestamp += kBlockSize16kHz; 1229 rtp_info.header.timestamp += kBlockSize16kHz;
1216 receive_timestamp += kBlockSize16kHz; 1230 receive_timestamp += kBlockSize16kHz;
1217 } 1231 }
1218 NetEqNetworkStatistics network_stats; 1232 NetEqNetworkStatistics network_stats;
(...skipping 15 matching lines...) Expand all
1234 const size_t kPayloadBytes = kBlockSize16kHz * sizeof(int16_t); 1248 const size_t kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1235 uint8_t payload[kPayloadBytes]; 1249 uint8_t payload[kPayloadBytes];
1236 AudioFrame output; 1250 AudioFrame output;
1237 for (size_t n = 0; n < kPayloadBytes; ++n) { 1251 for (size_t n = 0; n < kPayloadBytes; ++n) {
1238 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence. 1252 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1239 } 1253 }
1240 // Insert some packets which decode to noise. We are not interested in 1254 // Insert some packets which decode to noise. We are not interested in
1241 // actual decoded values. 1255 // actual decoded values.
1242 uint32_t receive_timestamp = 0; 1256 uint32_t receive_timestamp = 0;
1243 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1; 1257 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
1258 bool muted_output;
1244 for (int n = 0; n < algorithmic_frame_delay; ++n) { 1259 for (int n = 0; n < algorithmic_frame_delay; ++n) {
1245 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); 1260 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
1246 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1261 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1247 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); 1262 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1248 ASSERT_EQ(1u, output.num_channels_); 1263 ASSERT_EQ(1u, output.num_channels_);
1249 rtp_info.header.sequenceNumber++; 1264 rtp_info.header.sequenceNumber++;
1250 rtp_info.header.timestamp += kBlockSize16kHz; 1265 rtp_info.header.timestamp += kBlockSize16kHz;
1251 receive_timestamp += kBlockSize16kHz; 1266 receive_timestamp += kBlockSize16kHz;
1252 } 1267 }
1253 const int kNumSyncPackets = 10; 1268 const int kNumSyncPackets = 10;
1254 1269
1255 WebRtcRTPHeader first_sync_packet_rtp_info; 1270 WebRtcRTPHeader first_sync_packet_rtp_info;
1256 memcpy(&first_sync_packet_rtp_info, &rtp_info, sizeof(rtp_info)); 1271 memcpy(&first_sync_packet_rtp_info, &rtp_info, sizeof(rtp_info));
(...skipping 16 matching lines...) Expand all
1273 // Insert. 1288 // Insert.
1274 for (int n = 0; n < kNumSyncPackets; ++n) { 1289 for (int n = 0; n < kNumSyncPackets; ++n) {
1275 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); 1290 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
1276 rtp_info.header.sequenceNumber++; 1291 rtp_info.header.sequenceNumber++;
1277 rtp_info.header.timestamp += kBlockSize16kHz; 1292 rtp_info.header.timestamp += kBlockSize16kHz;
1278 receive_timestamp += kBlockSize16kHz; 1293 receive_timestamp += kBlockSize16kHz;
1279 } 1294 }
1280 1295
1281 // Decode. 1296 // Decode.
1282 for (int n = 0; n < kNumSyncPackets; ++n) { 1297 for (int n = 0; n < kNumSyncPackets; ++n) {
1283 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1298 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1299 ASSERT_FALSE(muted_output);
1284 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); 1300 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1285 ASSERT_EQ(1u, output.num_channels_); 1301 ASSERT_EQ(1u, output.num_channels_);
1286 EXPECT_TRUE(IsAllNonZero( 1302 EXPECT_TRUE(IsAllNonZero(
1287 output.data_, output.samples_per_channel_ * output.num_channels_)); 1303 output.data_, output.samples_per_channel_ * output.num_channels_));
1288 } 1304 }
1289 } 1305 }
1290 1306
1291 void NetEqDecodingTest::WrapTest(uint16_t start_seq_no, 1307 void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1292 uint32_t start_timestamp, 1308 uint32_t start_timestamp,
1293 const std::set<uint16_t>& drop_seq_numbers, 1309 const std::set<uint16_t>& drop_seq_numbers,
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1340 ++seq_no; 1356 ++seq_no;
1341 timestamp += kSamples; 1357 timestamp += kSamples;
1342 receive_timestamp += kSamples; 1358 receive_timestamp += kSamples;
1343 next_input_time_ms += static_cast<double>(kFrameSizeMs); 1359 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1344 1360
1345 seq_no_wrapped |= seq_no < last_seq_no; 1361 seq_no_wrapped |= seq_no < last_seq_no;
1346 timestamp_wrapped |= timestamp < last_timestamp; 1362 timestamp_wrapped |= timestamp < last_timestamp;
1347 } 1363 }
1348 // Pull out data once. 1364 // Pull out data once.
1349 AudioFrame output; 1365 AudioFrame output;
1350 ASSERT_EQ(0, neteq_->GetAudio(&output)); 1366 bool muted_output;
1367 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted_output));
1351 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); 1368 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1352 ASSERT_EQ(1u, output.num_channels_); 1369 ASSERT_EQ(1u, output.num_channels_);
1353 1370
1354 // Expect delay (in samples) to be less than 2 packets. 1371 // Expect delay (in samples) to be less than 2 packets.
1355 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp(); 1372 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
1356 ASSERT_TRUE(playout_timestamp); 1373 ASSERT_TRUE(playout_timestamp);
1357 EXPECT_LE(timestamp - *playout_timestamp, 1374 EXPECT_LE(timestamp - *playout_timestamp,
1358 static_cast<uint32_t>(kSamples * 2)); 1375 static_cast<uint32_t>(kSamples * 2));
1359 } 1376 }
1360 // Make sure we have actually tested wrap-around. 1377 // Make sure we have actually tested wrap-around.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1396 const int kSampleRateKhz = 16; 1413 const int kSampleRateKhz = 16;
1397 const int kSamples = kFrameSizeMs * kSampleRateKhz; 1414 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1398 const size_t kPayloadBytes = kSamples * 2; 1415 const size_t kPayloadBytes = kSamples * 2;
1399 1416
1400 const int algorithmic_delay_samples = std::max( 1417 const int algorithmic_delay_samples = std::max(
1401 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8); 1418 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
1402 // Insert three speech packets. Three are needed to get the frame length 1419 // Insert three speech packets. Three are needed to get the frame length
1403 // correct. 1420 // correct.
1404 uint8_t payload[kPayloadBytes] = {0}; 1421 uint8_t payload[kPayloadBytes] = {0};
1405 WebRtcRTPHeader rtp_info; 1422 WebRtcRTPHeader rtp_info;
1423 bool muted_output;
1406 for (int i = 0; i < 3; ++i) { 1424 for (int i = 0; i < 3; ++i) {
1407 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1425 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1408 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 1426 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1409 ++seq_no; 1427 ++seq_no;
1410 timestamp += kSamples; 1428 timestamp += kSamples;
1411 1429
1412 // Pull audio once. 1430 // Pull audio once.
1413 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 1431 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1414 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1432 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1415 } 1433 }
1416 // Verify speech output. 1434 // Verify speech output.
1417 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 1435 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1418 1436
1419 // Insert same CNG packet twice. 1437 // Insert same CNG packet twice.
1420 const int kCngPeriodMs = 100; 1438 const int kCngPeriodMs = 100;
1421 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz; 1439 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1422 size_t payload_len; 1440 size_t payload_len;
1423 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 1441 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1424 // This is the first time this CNG packet is inserted. 1442 // This is the first time this CNG packet is inserted.
1425 ASSERT_EQ( 1443 ASSERT_EQ(
1426 0, neteq_->InsertPacket( 1444 0, neteq_->InsertPacket(
1427 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1445 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1428 1446
1429 // Pull audio once and make sure CNG is played. 1447 // Pull audio once and make sure CNG is played.
1430 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 1448 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1431 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1449 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1432 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 1450 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1433 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG. 1451 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
1434 EXPECT_EQ(timestamp - algorithmic_delay_samples, 1452 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1435 out_frame_.timestamp_ + out_frame_.samples_per_channel_); 1453 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
1436 1454
1437 // Insert the same CNG packet again. Note that at this point it is old, since 1455 // Insert the same CNG packet again. Note that at this point it is old, since
1438 // we have already decoded the first copy of it. 1456 // we have already decoded the first copy of it.
1439 ASSERT_EQ( 1457 ASSERT_EQ(
1440 0, neteq_->InsertPacket( 1458 0, neteq_->InsertPacket(
1441 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1459 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1442 1460
1443 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since 1461 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1444 // we have already pulled out CNG once. 1462 // we have already pulled out CNG once.
1445 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) { 1463 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
1446 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 1464 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1447 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1465 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1448 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 1466 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1449 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG. 1467 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
1450 EXPECT_EQ(timestamp - algorithmic_delay_samples, 1468 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1451 out_frame_.timestamp_ + out_frame_.samples_per_channel_); 1469 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
1452 } 1470 }
1453 1471
1454 // Insert speech again. 1472 // Insert speech again.
1455 ++seq_no; 1473 ++seq_no;
1456 timestamp += kCngPeriodSamples; 1474 timestamp += kCngPeriodSamples;
1457 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1475 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1458 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 1476 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1459 1477
1460 // Pull audio once and verify that the output is speech again. 1478 // Pull audio once and verify that the output is speech again.
1461 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 1479 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1462 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1480 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1463 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 1481 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1464 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp(); 1482 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
1465 ASSERT_TRUE(playout_timestamp); 1483 ASSERT_TRUE(playout_timestamp);
1466 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples, 1484 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
1467 *playout_timestamp); 1485 *playout_timestamp);
1468 } 1486 }
1469 1487
1470 rtc::Optional<uint32_t> NetEqDecodingTest::PlayoutTimestamp() { 1488 rtc::Optional<uint32_t> NetEqDecodingTest::PlayoutTimestamp() {
1471 return neteq_->GetPlayoutTimestamp(); 1489 return neteq_->GetPlayoutTimestamp();
(...skipping 17 matching lines...) Expand all
1489 1507
1490 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); 1508 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1491 ASSERT_EQ( 1509 ASSERT_EQ(
1492 NetEq::kOK, 1510 NetEq::kOK,
1493 neteq_->InsertPacket( 1511 neteq_->InsertPacket(
1494 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0)); 1512 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
1495 ++seq_no; 1513 ++seq_no;
1496 timestamp += kCngPeriodSamples; 1514 timestamp += kCngPeriodSamples;
1497 1515
1498 // Pull audio once and make sure CNG is played. 1516 // Pull audio once and make sure CNG is played.
1499 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 1517 bool muted_output;
1518 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1500 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1519 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1501 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); 1520 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1502 1521
1503 // Insert some speech packets. 1522 // Insert some speech packets.
1504 for (int i = 0; i < 3; ++i) { 1523 for (int i = 0; i < 3; ++i) {
1505 PopulateRtpInfo(seq_no, timestamp, &rtp_info); 1524 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1506 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 1525 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1507 ++seq_no; 1526 ++seq_no;
1508 timestamp += kSamples; 1527 timestamp += kSamples;
1509 1528
1510 // Pull audio once. 1529 // Pull audio once.
1511 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); 1530 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1512 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); 1531 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
1513 } 1532 }
1514 // Verify speech output. 1533 // Verify speech output.
1515 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); 1534 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1516 } 1535 }
1536
1537 class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
1538 public:
1539 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
1540 config_.enable_muted_state = true;
1541 }
1542 };
1543
1544 // Verifies that NetEq goes in and out of muted state as expected.
1545 TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
1546 const size_t kSamples = 10 * 16;
1547 const size_t kPayloadBytes = kSamples * 2;
1548 // Insert one speech packet.
1549 uint8_t payload[kPayloadBytes] = {0};
1550 WebRtcRTPHeader rtp_info;
1551 PopulateRtpInfo(0, 0, &rtp_info);
1552 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1553
1554 // Pull out data once.
1555 bool muted_output;
1556 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1557 EXPECT_FALSE(muted_output);
1558
1559 // Pull data until expand starts.
1560 int counter = 0;
1561 while (out_frame_.speech_type_ != AudioFrame::kPLC) {
1562 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1563 EXPECT_FALSE(muted_output);
1564 ASSERT_LT(counter++, 1000) << "Test timed out";
1565 }
1566
1567 // Pull data until faded out.
1568 while (!muted_output) {
1569 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1570 ASSERT_LT(counter++, 1000) << "Test timed out";
1571 }
1572
1573 // Verify that output audio is not written during muted mode. Other parameters
minyue-webrtc 2016/05/11 11:29:54 is this important?
hlundin-webrtc 2016/05/12 07:44:40 Yes. One of the points with muted state is that we
minyue-webrtc 2016/05/12 10:56:07 But this is not sufficient in checking the efficie
hlundin-webrtc 2016/05/12 12:06:20 I updated the comment in neteq.h regarding this. S
1574 // should be correct, though.
1575 AudioFrame new_frame;
1576 for (auto& d : new_frame.data_) {
minyue-webrtc 2016/05/11 11:29:54 and how about memset and memcmp
hlundin-webrtc 2016/05/12 07:44:40 I cannot set a non-zero value to elements wider th
1577 d = 17;
1578 }
1579 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted_output));
1580 EXPECT_TRUE(muted_output);
1581 for (auto d : new_frame.data_) {
1582 EXPECT_EQ(17, d);
1583 }
1584 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
1585 new_frame.timestamp_);
1586 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
1587 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
1588 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
1589 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
1590 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
1591
1592 // Insert new data. Timestamp is corrected for the time elapsed since the last
1593 // packet.
1594 PopulateRtpInfo(0, 16 * 10 * counter, &rtp_info);
minyue-webrtc 2016/05/11 11:29:54 what if the timestamp is not 16 * 10 * counter
hlundin-webrtc 2016/05/12 07:44:40 I changed to kSamples to be consistent within the
1595 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1596
1597 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
1598 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted_output));
1599 ASSERT_LT(counter++, 1000) << "Test timed out";
1600 }
1601 EXPECT_FALSE(muted_output);
1602 }
1603
1604 class NetEqDecodingTestTwoInstances : public NetEqDecodingTest {
1605 public:
1606 NetEqDecodingTestTwoInstances() : NetEqDecodingTest() {}
1607
1608 void SetUp() override {
1609 NetEqDecodingTest::SetUp();
1610 config2_ = config_;
1611 }
1612
1613 void CreateSecondInstance() {
1614 neteq2_.reset(NetEq::Create(config2_));
1615 ASSERT_TRUE(neteq2_);
1616 LoadDecoders(neteq2_.get());
1617 }
1618
1619 protected:
1620 std::unique_ptr<NetEq> neteq2_;
1621 NetEq::Config config2_;
1622 };
1623
1624 namespace {
1625 ::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
1626 const AudioFrame& b) {
1627 if (a.timestamp_ != b.timestamp_)
1628 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
1629 << " != " << b.timestamp_ << ")";
1630 if (a.sample_rate_hz_ != b.sample_rate_hz_)
1631 return ::testing::AssertionFailure() << "sample_rate_hz_ diff ("
1632 << a.sample_rate_hz_
1633 << " != " << b.sample_rate_hz_ << ")";
1634 if (a.samples_per_channel_ != b.samples_per_channel_)
1635 return ::testing::AssertionFailure()
1636 << "samples_per_channel_ diff (" << a.samples_per_channel_
1637 << " != " << b.samples_per_channel_ << ")";
1638 if (a.num_channels_ != b.num_channels_)
1639 return ::testing::AssertionFailure() << "num_channels_ diff ("
1640 << a.num_channels_
1641 << " != " << b.num_channels_ << ")";
1642 if (a.speech_type_ != b.speech_type_)
1643 return ::testing::AssertionFailure() << "speech_type_ diff ("
1644 << a.speech_type_
1645 << " != " << b.speech_type_ << ")";
1646 if (a.vad_activity_ != b.vad_activity_)
1647 return ::testing::AssertionFailure() << "vad_activity_ diff ("
1648 << a.vad_activity_
1649 << " != " << b.vad_activity_ << ")";
1650 return ::testing::AssertionSuccess();
1651 }
1652
1653 ::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
1654 const AudioFrame& b) {
1655 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
1656 if (!res)
1657 return res;
1658 if (memcmp(a.data_, b.data_, a.samples_per_channel_ * a.num_channels_ *
1659 sizeof(a.data_[0])) != 0) {
minyue-webrtc 2016/05/11 11:29:54 weird wrapping
hlundin-webrtc 2016/05/12 07:44:40 Better?
minyue-webrtc 2016/05/12 10:56:07 yes
1660 return ::testing::AssertionFailure() << "data_ diff";
1661 }
1662 return ::testing::AssertionSuccess();
1663 }
1664
1665 } // namespace
1666
1667 TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
1668 ASSERT_FALSE(config_.enable_muted_state);
1669 config2_.enable_muted_state = true;
1670 CreateSecondInstance();
1671
1672 // Insert one speech packet into both NetEqs.
1673 const size_t kSamples = 10 * 16;
1674 const size_t kPayloadBytes = kSamples * 2;
1675 uint8_t payload[kPayloadBytes] = {0};
1676 WebRtcRTPHeader rtp_info;
1677 PopulateRtpInfo(0, 0, &rtp_info);
1678 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1679 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
1680
1681 AudioFrame out_frame1, out_frame2;
1682 bool muted_output;
1683 for (int i = 0; i < 1000; ++i) {
1684 std::ostringstream ss;
1685 ss << "i = " << i;
1686 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1687 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted_output));
1688 EXPECT_FALSE(muted_output);
1689 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted_output));
1690 if (muted_output) {
1691 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1692 } else {
1693 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1694 }
1695 }
1696 EXPECT_TRUE(muted_output);
1697
1698 // Insert new data. Timestamp is corrected for the time elapsed since the last
1699 // packet.
1700 PopulateRtpInfo(0, 16 * 10 * 1000, &rtp_info);
1701 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1702 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
1703
1704 int counter = 0;
1705 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
1706 ASSERT_LT(counter++, 1000) << "Test timed out";
1707 std::ostringstream ss;
1708 ss << "counter = " << counter;
1709 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1710 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted_output));
1711 EXPECT_FALSE(muted_output);
1712 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted_output));
1713 if (muted_output) {
1714 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1715 } else {
1716 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1717 }
1718 }
1719 EXPECT_FALSE(muted_output);
1720 }
1721
1517 } // namespace webrtc 1722 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698