Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(150)

Side by Side Diff: webrtc/modules/audio_coding/neteq/neteq_unittest.cc

Issue 2750783004: Add mute state field to AudioFrame. (Closed)
Patch Set: Third round of comments Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 ASSERT_EQ(true, 146 ASSERT_EQ(true,
147 neteq->RegisterPayloadType(98, SdpAudioFormat("cn", 16000, 1))); 147 neteq->RegisterPayloadType(98, SdpAudioFormat("cn", 16000, 1)));
148 } 148 }
149 } // namespace 149 } // namespace
150 150
151 class ResultSink { 151 class ResultSink {
152 public: 152 public:
153 explicit ResultSink(const std::string& output_file); 153 explicit ResultSink(const std::string& output_file);
154 ~ResultSink(); 154 ~ResultSink();
155 155
156 template<typename T, size_t n> void AddResult( 156 template<typename T> void AddResult(const T* test_results, size_t length);
157 const T (&test_results)[n],
158 size_t length);
159 157
160 void AddResult(const NetEqNetworkStatistics& stats); 158 void AddResult(const NetEqNetworkStatistics& stats);
161 void AddResult(const RtcpStatistics& stats); 159 void AddResult(const RtcpStatistics& stats);
162 160
163 void VerifyChecksum(const std::string& ref_check_sum); 161 void VerifyChecksum(const std::string& ref_check_sum);
164 162
165 private: 163 private:
166 FILE* output_fp_; 164 FILE* output_fp_;
167 std::unique_ptr<rtc::MessageDigest> digest_; 165 std::unique_ptr<rtc::MessageDigest> digest_;
168 }; 166 };
169 167
170 ResultSink::ResultSink(const std::string &output_file) 168 ResultSink::ResultSink(const std::string &output_file)
171 : output_fp_(nullptr), 169 : output_fp_(nullptr),
172 digest_(new rtc::Sha1Digest()) { 170 digest_(new rtc::Sha1Digest()) {
173 if (!output_file.empty()) { 171 if (!output_file.empty()) {
174 output_fp_ = fopen(output_file.c_str(), "wb"); 172 output_fp_ = fopen(output_file.c_str(), "wb");
175 EXPECT_TRUE(output_fp_ != NULL); 173 EXPECT_TRUE(output_fp_ != NULL);
176 } 174 }
177 } 175 }
178 176
179 ResultSink::~ResultSink() { 177 ResultSink::~ResultSink() {
180 if (output_fp_) 178 if (output_fp_)
181 fclose(output_fp_); 179 fclose(output_fp_);
182 } 180 }
183 181
184 template<typename T, size_t n> 182 template<typename T>
185 void ResultSink::AddResult(const T (&test_results)[n], size_t length) { 183 void ResultSink::AddResult(const T* test_results, size_t length) {
186 if (output_fp_) { 184 if (output_fp_) {
187 ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_)); 185 ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_));
188 } 186 }
189 digest_->Update(&test_results, sizeof(T) * length); 187 digest_->Update(test_results, sizeof(T) * length);
190 } 188 }
191 189
192 void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) { 190 void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) {
193 #ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT 191 #ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
194 neteq_unittest::NetEqNetworkStatistics stats; 192 neteq_unittest::NetEqNetworkStatistics stats;
195 Convert(stats_raw, &stats); 193 Convert(stats_raw, &stats);
196 194
197 std::string stats_string; 195 std::string stats_string;
198 ASSERT_TRUE(stats.SerializeToString(&stats_string)); 196 ASSERT_TRUE(stats.SerializeToString(&stats_string));
199 AddMessage(output_fp_, digest_.get(), stats_string); 197 AddMessage(output_fp_, digest_.get(), stats_string);
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
370 ResultSink rtcp_stats(rtcp_out_file); 368 ResultSink rtcp_stats(rtcp_out_file);
371 369
372 packet_ = rtp_source_->NextPacket(); 370 packet_ = rtp_source_->NextPacket();
373 int i = 0; 371 int i = 0;
374 while (packet_) { 372 while (packet_) {
375 std::ostringstream ss; 373 std::ostringstream ss;
376 ss << "Lap number " << i++ << " in DecodeAndCompare while loop"; 374 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
377 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. 375 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
378 ASSERT_NO_FATAL_FAILURE(Process()); 376 ASSERT_NO_FATAL_FAILURE(Process());
379 ASSERT_NO_FATAL_FAILURE(output.AddResult( 377 ASSERT_NO_FATAL_FAILURE(output.AddResult(
380 out_frame_.data_, out_frame_.samples_per_channel_)); 378 out_frame_.data(), out_frame_.samples_per_channel_));
381 379
382 // Query the network statistics API once per second 380 // Query the network statistics API once per second
383 if (sim_clock_ % 1000 == 0) { 381 if (sim_clock_ % 1000 == 0) {
384 // Process NetworkStatistics. 382 // Process NetworkStatistics.
385 NetEqNetworkStatistics current_network_stats; 383 NetEqNetworkStatistics current_network_stats;
386 ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats)); 384 ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats));
387 ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats)); 385 ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats));
388 386
389 // Compare with CurrentDelay, which should be identical. 387 // Compare with CurrentDelay, which should be identical.
390 EXPECT_EQ(current_network_stats.current_buffer_size_ms, 388 EXPECT_EQ(current_network_stats.current_buffer_size_ms,
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after
843 841
844 TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { 842 TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
845 const size_t kPayloadBytes = 100; 843 const size_t kPayloadBytes = 100;
846 uint8_t payload[kPayloadBytes] = {0}; 844 uint8_t payload[kPayloadBytes] = {0};
847 WebRtcRTPHeader rtp_info; 845 WebRtcRTPHeader rtp_info;
848 PopulateRtpInfo(0, 0, &rtp_info); 846 PopulateRtpInfo(0, 0, &rtp_info);
849 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid. 847 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
850 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); 848 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
851 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call 849 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
852 // to GetAudio. 850 // to GetAudio.
851 int16_t* out_frame_data = out_frame_.mutable_data();
853 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { 852 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
854 out_frame_.data_[i] = 1; 853 out_frame_data[i] = 1;
855 } 854 }
856 bool muted; 855 bool muted;
857 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted)); 856 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
858 ASSERT_FALSE(muted); 857 ASSERT_FALSE(muted);
859 // Verify that there is a decoder error to check. 858 // Verify that there is a decoder error to check.
860 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError()); 859 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
861 860
862 enum NetEqDecoderError { 861 enum NetEqDecoderError {
863 ISAC_LENGTH_MISMATCH = 6730, 862 ISAC_LENGTH_MISMATCH = 6730,
864 ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH = 6640 863 ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH = 6640
865 }; 864 };
866 #if defined(WEBRTC_CODEC_ISAC) 865 #if defined(WEBRTC_CODEC_ISAC)
867 EXPECT_EQ(ISAC_LENGTH_MISMATCH, neteq_->LastDecoderError()); 866 EXPECT_EQ(ISAC_LENGTH_MISMATCH, neteq_->LastDecoderError());
868 #elif defined(WEBRTC_CODEC_ISACFX) 867 #elif defined(WEBRTC_CODEC_ISACFX)
869 EXPECT_EQ(ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH, neteq_->LastDecoderError()); 868 EXPECT_EQ(ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH, neteq_->LastDecoderError());
870 #endif 869 #endif
871 // Verify that the first 160 samples are set to 0, and that the remaining 870 // Verify that the first 160 samples are set to 0.
872 // samples are left unmodified.
873 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate. 871 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
872 const int16_t* const_out_frame_data = out_frame_.data();
874 for (int i = 0; i < kExpectedOutputLength; ++i) { 873 for (int i = 0; i < kExpectedOutputLength; ++i) {
875 std::ostringstream ss; 874 std::ostringstream ss;
876 ss << "i = " << i; 875 ss << "i = " << i;
877 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. 876 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
878 EXPECT_EQ(0, out_frame_.data_[i]); 877 EXPECT_EQ(0, const_out_frame_data[i]);
879 }
880 for (size_t i = kExpectedOutputLength; i < AudioFrame::kMaxDataSizeSamples;
881 ++i) {
882 std::ostringstream ss;
883 ss << "i = " << i;
884 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
885 EXPECT_EQ(1, out_frame_.data_[i]);
886 } 878 }
887 } 879 }
888 880
889 TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { 881 TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
890 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call 882 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
891 // to GetAudio. 883 // to GetAudio.
884 int16_t* out_frame_data = out_frame_.mutable_data();
892 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { 885 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
893 out_frame_.data_[i] = 1; 886 out_frame_data[i] = 1;
894 } 887 }
895 bool muted; 888 bool muted;
896 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); 889 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
897 ASSERT_FALSE(muted); 890 ASSERT_FALSE(muted);
898 // Verify that the first block of samples is set to 0. 891 // Verify that the first block of samples is set to 0.
899 static const int kExpectedOutputLength = 892 static const int kExpectedOutputLength =
900 kInitSampleRateHz / 100; // 10 ms at initial sample rate. 893 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
894 const int16_t* const_out_frame_data = out_frame_.data();
901 for (int i = 0; i < kExpectedOutputLength; ++i) { 895 for (int i = 0; i < kExpectedOutputLength; ++i) {
902 std::ostringstream ss; 896 std::ostringstream ss;
903 ss << "i = " << i; 897 ss << "i = " << i;
904 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. 898 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
905 EXPECT_EQ(0, out_frame_.data_[i]); 899 EXPECT_EQ(0, const_out_frame_data[i]);
906 } 900 }
907 // Verify that the sample rate did not change from the initial configuration. 901 // Verify that the sample rate did not change from the initial configuration.
908 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz()); 902 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
909 } 903 }
910 904
911 class NetEqBgnTest : public NetEqDecodingTest { 905 class NetEqBgnTest : public NetEqDecodingTest {
912 protected: 906 protected:
913 virtual void TestCondition(double sum_squared_noise, 907 virtual void TestCondition(double sum_squared_noise,
914 bool should_be_faded) = 0; 908 bool should_be_faded) = 0;
915 909
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
981 // To be able to test the fading of background noise we need at lease to 975 // To be able to test the fading of background noise we need at lease to
982 // pull 611 frames. 976 // pull 611 frames.
983 const int kFadingThreshold = 611; 977 const int kFadingThreshold = 611;
984 978
985 // Test several CNG-to-PLC packet for the expected behavior. The number 20 979 // Test several CNG-to-PLC packet for the expected behavior. The number 20
986 // is arbitrary, but sufficiently large to test enough number of frames. 980 // is arbitrary, but sufficiently large to test enough number of frames.
987 const int kNumPlcToCngTestFrames = 20; 981 const int kNumPlcToCngTestFrames = 20;
988 bool plc_to_cng = false; 982 bool plc_to_cng = false;
989 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) { 983 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
990 output.Reset(); 984 output.Reset();
991 memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero. 985 // Set to non-zero.
986 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
992 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); 987 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
993 ASSERT_FALSE(muted); 988 ASSERT_FALSE(muted);
994 ASSERT_EQ(1u, output.num_channels_); 989 ASSERT_EQ(1u, output.num_channels_);
995 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); 990 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
996 if (output.speech_type_ == AudioFrame::kPLCCNG) { 991 if (output.speech_type_ == AudioFrame::kPLCCNG) {
997 plc_to_cng = true; 992 plc_to_cng = true;
998 double sum_squared = 0; 993 double sum_squared = 0;
994 const int16_t* output_data = output.data();
999 for (size_t k = 0; 995 for (size_t k = 0;
1000 k < output.num_channels_ * output.samples_per_channel_; ++k) 996 k < output.num_channels_ * output.samples_per_channel_; ++k)
1001 sum_squared += output.data_[k] * output.data_[k]; 997 sum_squared += output_data[k] * output_data[k];
1002 TestCondition(sum_squared, n > kFadingThreshold); 998 TestCondition(sum_squared, n > kFadingThreshold);
1003 } else { 999 } else {
1004 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); 1000 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
1005 } 1001 }
1006 } 1002 }
1007 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred. 1003 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
1008 } 1004 }
1009 }; 1005 };
1010 1006
1011 class NetEqBgnTestOn : public NetEqBgnTest { 1007 class NetEqBgnTestOn : public NetEqBgnTest {
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
1350 // Insert one speech packet. 1346 // Insert one speech packet.
1351 InsertPacket(0); 1347 InsertPacket(0);
1352 // Pull out audio once and expect it not to be muted. 1348 // Pull out audio once and expect it not to be muted.
1353 EXPECT_FALSE(GetAudioReturnMuted()); 1349 EXPECT_FALSE(GetAudioReturnMuted());
1354 // Pull data until faded out. 1350 // Pull data until faded out.
1355 GetAudioUntilMuted(); 1351 GetAudioUntilMuted();
1356 1352
1357 // Verify that output audio is not written during muted mode. Other parameters 1353 // Verify that output audio is not written during muted mode. Other parameters
1358 // should be correct, though. 1354 // should be correct, though.
1359 AudioFrame new_frame; 1355 AudioFrame new_frame;
1360 for (auto& d : new_frame.data_) { 1356 int16_t* frame_data = new_frame.mutable_data();
1361 d = 17; 1357 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
1358 frame_data[i] = 17;
1362 } 1359 }
1363 bool muted; 1360 bool muted;
1364 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted)); 1361 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
1365 EXPECT_TRUE(muted); 1362 EXPECT_TRUE(muted);
1366 for (auto d : new_frame.data_) { 1363 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
1367 EXPECT_EQ(17, d); 1364 EXPECT_EQ(17, frame_data[i]);
1368 } 1365 }
1369 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_, 1366 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
1370 new_frame.timestamp_); 1367 new_frame.timestamp_);
1371 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_); 1368 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
1372 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_); 1369 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
1373 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_); 1370 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
1374 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_); 1371 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
1375 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_); 1372 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
1376 1373
1377 // Insert new data. Timestamp is corrected for the time elapsed since the last 1374 // Insert new data. Timestamp is corrected for the time elapsed since the last
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
1516 << " != " << b.vad_activity_ << ")"; 1513 << " != " << b.vad_activity_ << ")";
1517 return ::testing::AssertionSuccess(); 1514 return ::testing::AssertionSuccess();
1518 } 1515 }
1519 1516
1520 ::testing::AssertionResult AudioFramesEqual(const AudioFrame& a, 1517 ::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
1521 const AudioFrame& b) { 1518 const AudioFrame& b) {
1522 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b); 1519 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
1523 if (!res) 1520 if (!res)
1524 return res; 1521 return res;
1525 if (memcmp( 1522 if (memcmp(
1526 a.data_, b.data_, 1523 a.data(), b.data(),
1527 a.samples_per_channel_ * a.num_channels_ * sizeof(a.data_[0])) != 0) { 1524 a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) != 0) {
1528 return ::testing::AssertionFailure() << "data_ diff"; 1525 return ::testing::AssertionFailure() << "data_ diff";
1529 } 1526 }
1530 return ::testing::AssertionSuccess(); 1527 return ::testing::AssertionSuccess();
1531 } 1528 }
1532 1529
1533 } // namespace 1530 } // namespace
1534 1531
1535 TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) { 1532 TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
1536 ASSERT_FALSE(config_.enable_muted_state); 1533 ASSERT_FALSE(config_.enable_muted_state);
1537 config2_.enable_muted_state = true; 1534 config2_.enable_muted_state = true;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1581 if (muted) { 1578 if (muted) {
1582 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2)); 1579 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1583 } else { 1580 } else {
1584 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2)); 1581 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1585 } 1582 }
1586 } 1583 }
1587 EXPECT_FALSE(muted); 1584 EXPECT_FALSE(muted);
1588 } 1585 }
1589 1586
1590 } // namespace webrtc 1587 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698