Index: webrtc/modules/audio_coding/neteq/neteq_unittest.cc |
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc |
index 82529f7dfb335b2f30deccdd27cb34459b0fb6c7..31c756b2eb3174d4faf4aac6f3e53f553a075221 100644 |
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc |
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc |
@@ -153,9 +153,7 @@ class ResultSink { |
explicit ResultSink(const std::string& output_file); |
~ResultSink(); |
- template<typename T, size_t n> void AddResult( |
- const T (&test_results)[n], |
- size_t length); |
+ template<typename T> void AddResult(const T* test_results, size_t length); |
void AddResult(const NetEqNetworkStatistics& stats); |
void AddResult(const RtcpStatistics& stats); |
@@ -181,12 +179,12 @@ ResultSink::~ResultSink() { |
fclose(output_fp_); |
} |
-template<typename T, size_t n> |
-void ResultSink::AddResult(const T (&test_results)[n], size_t length) { |
+template<typename T> |
+void ResultSink::AddResult(const T* test_results, size_t length) { |
if (output_fp_) { |
- ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_)); |
+ ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_)); |
} |
- digest_->Update(&test_results, sizeof(T) * length); |
+ digest_->Update(test_results, sizeof(T) * length); |
} |
void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) { |
@@ -377,7 +375,7 @@ void NetEqDecodingTest::DecodeAndCompare( |
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. |
ASSERT_NO_FATAL_FAILURE(Process()); |
ASSERT_NO_FATAL_FAILURE(output.AddResult( |
- out_frame_.data_, out_frame_.samples_per_channel_)); |
+ out_frame_.data(), out_frame_.samples_per_channel_)); |
// Query the network statistics API once per second |
if (sim_clock_ % 1000 == 0) { |
@@ -850,8 +848,9 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { |
EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); |
// Set all of |out_data_| to 1, and verify that it was set to 0 by the call |
// to GetAudio. |
+ int16_t* out_frame_data = out_frame_.mutable_data(); |
for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { |
- out_frame_.data_[i] = 1; |
+ out_frame_data[i] = 1; |
} |
bool muted; |
EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted)); |
@@ -868,29 +867,23 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { |
#elif defined(WEBRTC_CODEC_ISACFX) |
EXPECT_EQ(ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH, neteq_->LastDecoderError()); |
#endif |
- // Verify that the first 160 samples are set to 0, and that the remaining |
- // samples are left unmodified. |
+ // Verify that the first 160 samples are set to 0. |
static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate. |
+ const int16_t* const_out_frame_data = out_frame_.data(); |
for (int i = 0; i < kExpectedOutputLength; ++i) { |
std::ostringstream ss; |
ss << "i = " << i; |
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. |
- EXPECT_EQ(0, out_frame_.data_[i]); |
- } |
- for (size_t i = kExpectedOutputLength; i < AudioFrame::kMaxDataSizeSamples; |
- ++i) { |
- std::ostringstream ss; |
- ss << "i = " << i; |
- SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. |
- EXPECT_EQ(1, out_frame_.data_[i]); |
+ EXPECT_EQ(0, const_out_frame_data[i]); |
} |
} |
TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { |
// Set all of |out_data_| to 1, and verify that it was set to 0 by the call |
// to GetAudio. |
+ int16_t* out_frame_data = out_frame_.mutable_data(); |
for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { |
- out_frame_.data_[i] = 1; |
+ out_frame_data[i] = 1; |
} |
bool muted; |
EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); |
@@ -898,11 +891,12 @@ TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { |
// Verify that the first block of samples is set to 0. |
static const int kExpectedOutputLength = |
kInitSampleRateHz / 100; // 10 ms at initial sample rate. |
+ const int16_t* const_out_frame_data = out_frame_.data(); |
for (int i = 0; i < kExpectedOutputLength; ++i) { |
std::ostringstream ss; |
ss << "i = " << i; |
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. |
- EXPECT_EQ(0, out_frame_.data_[i]); |
+ EXPECT_EQ(0, const_out_frame_data[i]); |
} |
// Verify that the sample rate did not change from the initial configuration. |
EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz()); |
@@ -988,7 +982,8 @@ class NetEqBgnTest : public NetEqDecodingTest { |
bool plc_to_cng = false; |
for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) { |
output.Reset(); |
- memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero. |
+ // Set to non-zero. |
+ memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes); |
ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); |
ASSERT_FALSE(muted); |
ASSERT_EQ(1u, output.num_channels_); |
@@ -996,9 +991,10 @@ class NetEqBgnTest : public NetEqDecodingTest { |
if (output.speech_type_ == AudioFrame::kPLCCNG) { |
plc_to_cng = true; |
double sum_squared = 0; |
+ const int16_t* output_data = output.data(); |
for (size_t k = 0; |
k < output.num_channels_ * output.samples_per_channel_; ++k) |
- sum_squared += output.data_[k] * output.data_[k]; |
+ sum_squared += output_data[k] * output_data[k]; |
TestCondition(sum_squared, n > kFadingThreshold); |
} else { |
EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); |
@@ -1357,14 +1353,15 @@ TEST_F(NetEqDecodingTestWithMutedState, MutedState) { |
// Verify that output audio is not written during muted mode. Other parameters |
// should be correct, though. |
AudioFrame new_frame; |
- for (auto& d : new_frame.data_) { |
- d = 17; |
+ int16_t* frame_data = new_frame.mutable_data(); |
+ for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) { |
+ frame_data[i] = 17; |
} |
bool muted; |
EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted)); |
EXPECT_TRUE(muted); |
- for (auto d : new_frame.data_) { |
- EXPECT_EQ(17, d); |
+ for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) { |
+ EXPECT_EQ(17, frame_data[i]); |
} |
EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_, |
new_frame.timestamp_); |
@@ -1523,8 +1520,8 @@ namespace { |
if (!res) |
return res; |
if (memcmp( |
- a.data_, b.data_, |
- a.samples_per_channel_ * a.num_channels_ * sizeof(a.data_[0])) != 0) { |
+ a.data(), b.data(), |
+ a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) != 0) { |
return ::testing::AssertionFailure() << "data_ diff"; |
} |
return ::testing::AssertionSuccess(); |