Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1361)

Unified Diff: webrtc/audio/utility/audio_frame_operations_unittest.cc

Issue 2750783004: Add mute state field to AudioFrame. (Closed)
Patch Set: Update new usages of AudioFrame::data_ Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « webrtc/audio/utility/audio_frame_operations.cc ('k') | webrtc/modules/audio_coding/acm2/acm_receiver.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: webrtc/audio/utility/audio_frame_operations_unittest.cc
diff --git a/webrtc/audio/utility/audio_frame_operations_unittest.cc b/webrtc/audio/utility/audio_frame_operations_unittest.cc
index 096ea38d9d52855d94802bf7d3186e8912da6ed2..e3e9804c6cfc547778dd827a01dec8fd412c3c44 100644
--- a/webrtc/audio/utility/audio_frame_operations_unittest.cc
+++ b/webrtc/audio/utility/audio_frame_operations_unittest.cc
@@ -32,24 +32,28 @@ void SetFrameData(int16_t ch1,
int16_t ch3,
int16_t ch4,
AudioFrame* frame) {
+ int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_ * 4; i += 4) {
- frame->data_[i] = ch1;
- frame->data_[i + 1] = ch2;
- frame->data_[i + 2] = ch3;
- frame->data_[i + 3] = ch4;
+ frame_data[i] = ch1;
+ frame_data[i + 1] = ch2;
+ frame_data[i + 2] = ch3;
+ frame_data[i + 3] = ch4;
}
}
void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) {
+ int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
- frame->data_[i] = left;
- frame->data_[i + 1] = right;
+ frame_data[i] = left;
+ frame_data[i + 1] = right;
}
}
void SetFrameData(int16_t data, AudioFrame* frame) {
- for (size_t i = 0; i < frame->samples_per_channel_; i++) {
- frame->data_[i] = data;
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0;
+ i < frame->samples_per_channel_ * frame->num_channels_; i++) {
+ frame_data[i] = data;
}
}
@@ -57,10 +61,13 @@ void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
EXPECT_EQ(frame1.num_channels_, frame2.num_channels_);
EXPECT_EQ(frame1.samples_per_channel_,
frame2.samples_per_channel_);
+ const int16_t* frame1_data = frame1.data();
+ const int16_t* frame2_data = frame2.data();
for (size_t i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
i++) {
- EXPECT_EQ(frame1.data_[i], frame2.data_[i]);
+ EXPECT_EQ(frame1_data[i], frame2_data[i]);
}
+ EXPECT_EQ(frame1.muted(), frame2.muted());
}
void InitFrame(AudioFrame* frame, size_t channels, size_t samples_per_channel,
@@ -81,7 +88,7 @@ void InitFrame(AudioFrame* frame, size_t channels, size_t samples_per_channel,
int16_t GetChannelData(const AudioFrame& frame, size_t channel, size_t index) {
RTC_DCHECK_LT(channel, frame.num_channels_);
RTC_DCHECK_LT(index, frame.samples_per_channel_);
- return frame.data_[index * frame.num_channels_ + channel];
+ return frame.data()[index * frame.num_channels_ + channel];
}
void VerifyFrameDataBounds(const AudioFrame& frame, size_t channel, int16_t max,
@@ -114,6 +121,13 @@ TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
VerifyFramesAreEqual(stereo_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, MonoToStereoMuted) {
+ frame_.num_channels_ = 1;
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
TEST_F(AudioFrameOperationsTest, MonoToStereoBufferSucceeds) {
AudioFrame target_frame;
frame_.num_channels_ = 1;
@@ -122,8 +136,8 @@ TEST_F(AudioFrameOperationsTest, MonoToStereoBufferSucceeds) {
target_frame.num_channels_ = 2;
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
- AudioFrameOperations::MonoToStereo(frame_.data_, frame_.samples_per_channel_,
- target_frame.data_);
+ AudioFrameOperations::MonoToStereo(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
AudioFrame stereo_frame;
stereo_frame.samples_per_channel_ = 320;
@@ -148,6 +162,12 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
VerifyFramesAreEqual(mono_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, StereoToMonoMuted) {
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
TEST_F(AudioFrameOperationsTest, StereoToMonoBufferSucceeds) {
AudioFrame target_frame;
SetFrameData(4, 2, &frame_);
@@ -155,8 +175,8 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoBufferSucceeds) {
target_frame.num_channels_ = 1;
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
- AudioFrameOperations::StereoToMono(frame_.data_, frame_.samples_per_channel_,
- target_frame.data_);
+ AudioFrameOperations::StereoToMono(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
AudioFrame mono_frame;
mono_frame.samples_per_channel_ = 320;
@@ -196,6 +216,13 @@ TEST_F(AudioFrameOperationsTest, QuadToMonoSucceeds) {
VerifyFramesAreEqual(mono_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, QuadToMonoMuted) {
+ frame_.num_channels_ = 4;
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
TEST_F(AudioFrameOperationsTest, QuadToMonoBufferSucceeds) {
AudioFrame target_frame;
frame_.num_channels_ = 4;
@@ -204,8 +231,8 @@ TEST_F(AudioFrameOperationsTest, QuadToMonoBufferSucceeds) {
target_frame.num_channels_ = 1;
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
- AudioFrameOperations::QuadToMono(frame_.data_, frame_.samples_per_channel_,
- target_frame.data_);
+ AudioFrameOperations::QuadToMono(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
AudioFrame mono_frame;
mono_frame.samples_per_channel_ = 320;
mono_frame.num_channels_ = 1;
@@ -244,6 +271,13 @@ TEST_F(AudioFrameOperationsTest, QuadToStereoSucceeds) {
VerifyFramesAreEqual(stereo_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, QuadToStereoMuted) {
+ frame_.num_channels_ = 4;
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
TEST_F(AudioFrameOperationsTest, QuadToStereoBufferSucceeds) {
AudioFrame target_frame;
frame_.num_channels_ = 4;
@@ -252,8 +286,8 @@ TEST_F(AudioFrameOperationsTest, QuadToStereoBufferSucceeds) {
target_frame.num_channels_ = 2;
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
- AudioFrameOperations::QuadToStereo(frame_.data_, frame_.samples_per_channel_,
- target_frame.data_);
+ AudioFrameOperations::QuadToStereo(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
AudioFrame stereo_frame;
stereo_frame.samples_per_channel_ = 320;
stereo_frame.num_channels_ = 2;
@@ -285,6 +319,12 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
VerifyFramesAreEqual(swapped_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::SwapStereoChannels(&frame_);
+ EXPECT_TRUE(frame_.muted());
+}
+
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
frame_.num_channels_ = 1;
// Set data to "stereo", despite it being a mono frame.
@@ -313,9 +353,9 @@ TEST_F(AudioFrameOperationsTest, MuteEnabled) {
AudioFrameOperations::Mute(&frame_, true, true);
AudioFrame muted_frame;
- muted_frame.samples_per_channel_ = 320;
- muted_frame.num_channels_ = 2;
- SetFrameData(0, 0, &muted_frame);
+ muted_frame.samples_per_channel_ = frame_.samples_per_channel_;
+ muted_frame.num_channels_ = frame_.num_channels_;
+ ASSERT_TRUE(muted_frame.muted());
VerifyFramesAreEqual(muted_frame, frame_);
}
@@ -423,6 +463,36 @@ TEST_F(AudioFrameOperationsTest, MuteEndStereoShort) {
EXPECT_EQ(-999, GetChannelData(frame_, 1, 92));
}
+TEST_F(AudioFrameOperationsTest, MuteBeginAlreadyMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::Mute(&frame_, false, true);
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndAlreadyMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::Mute(&frame_, true, false);
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, ApplyHalfGainSucceeds) {
+ SetFrameData(2, &frame_);
+
+ AudioFrame half_gain_frame;
+ half_gain_frame.num_channels_ = frame_.num_channels_;
+ half_gain_frame.samples_per_channel_ = frame_.samples_per_channel_;
+ SetFrameData(1, &half_gain_frame);
+
+ AudioFrameOperations::ApplyHalfGain(&frame_);
+ VerifyFramesAreEqual(half_gain_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ApplyHalfGainMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::ApplyHalfGain(&frame_);
+ EXPECT_TRUE(frame_.muted());
+}
+
// TODO(andrew): should not allow negative scales.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
frame_.num_channels_ = 1;
@@ -459,6 +529,12 @@ TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
VerifyFramesAreEqual(scaled_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, ScaleMuted) {
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
// TODO(andrew): should fail with a negative scale.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, &frame_));
@@ -493,25 +569,61 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
VerifyFramesAreEqual(scaled_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, ScaleWithSatMuted) {
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
// When samples_per_channel_ is 0, the frame counts as empty and zero.
AudioFrame frame_to_add_to;
+ frame_to_add_to.mutable_data(); // Unmute the frame.
+ ASSERT_FALSE(frame_to_add_to.muted());
frame_to_add_to.samples_per_channel_ = 0;
frame_to_add_to.num_channels_ = frame_.num_channels_;
+ SetFrameData(1000, &frame_);
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingXToMutedGivesX) {
+ AudioFrame frame_to_add_to;
+ ASSERT_TRUE(frame_to_add_to.muted());
+ frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+
+ SetFrameData(1000, &frame_);
AudioFrameOperations::Add(frame_, &frame_to_add_to);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
+TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) {
+ AudioFrame frame_to_add_to;
+ frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+ SetFrameData(1000, &frame_to_add_to);
+
+ AudioFrame frame_copy;
+ frame_copy.CopyFrom(frame_to_add_to);
+
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ VerifyFramesAreEqual(frame_copy, frame_to_add_to);
+}
+
TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
AudioFrame frame_to_add_to;
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
frame_to_add_to.num_channels_ = frame_.num_channels_;
SetFrameData(1000, &frame_to_add_to);
+ SetFrameData(2000, &frame_);
AudioFrameOperations::Add(frame_, &frame_to_add_to);
- SetFrameData(frame_.data_[0] + 1000, &frame_);
+ SetFrameData(frame_.data()[0] + 1000, &frame_);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
+
} // namespace
} // namespace webrtc
« no previous file with comments | « webrtc/audio/utility/audio_frame_operations.cc ('k') | webrtc/modules/audio_coding/acm2/acm_receiver.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698