OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
52 public: | 52 public: |
53 MockMixerAudioSource() | 53 MockMixerAudioSource() |
54 : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) { | 54 : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) { |
55 ON_CALL(*this, GetAudioFrameWithMuted(_)) | 55 ON_CALL(*this, GetAudioFrameWithMuted(_)) |
56 .WillByDefault( | 56 .WillByDefault( |
57 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted)); | 57 Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithMuted)); |
58 } | 58 } |
59 | 59 |
60 MOCK_METHOD1(GetAudioFrameWithMuted, AudioFrameWithMuted(int sample_rate_hz)); | 60 MOCK_METHOD1(GetAudioFrameWithMuted, AudioFrameWithMuted(int sample_rate_hz)); |
61 | 61 |
62 MOCK_METHOD0(ssrc, int()); | |
63 | |
62 AudioFrame* fake_frame() { return &fake_frame_; } | 64 AudioFrame* fake_frame() { return &fake_frame_; } |
63 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } | 65 AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
64 void set_fake_info(const AudioFrameInfo audio_frame_info) { | 66 void set_fake_info(const AudioFrameInfo audio_frame_info) { |
65 fake_audio_frame_info_ = audio_frame_info; | 67 fake_audio_frame_info_ = audio_frame_info; |
66 } | 68 } |
67 | 69 |
68 private: | 70 private: |
69 AudioFrame fake_frame_, fake_output_frame_; | 71 AudioFrame fake_frame_, fake_output_frame_; |
70 AudioFrameInfo fake_audio_frame_info_; | 72 AudioFrameInfo fake_audio_frame_info_; |
71 AudioFrameWithMuted FakeAudioFrameWithMuted(int sample_rate_hz) { | 73 AudioFrameWithMuted FakeAudioFrameWithMuted(int sample_rate_hz) { |
72 fake_output_frame_.CopyFrom(fake_frame_); | 74 fake_output_frame_.CopyFrom(fake_frame_); |
73 return { | 75 return { |
74 &fake_output_frame_, // audio_frame_pointer | 76 &fake_output_frame_, // audio_frame_pointer |
75 fake_info(), // audio_frame_info | 77 fake_info(), // audio_frame_info |
76 }; | 78 }; |
77 } | 79 } |
78 }; | 80 }; |
79 | 81 |
80 // Creates participants from |frames| and |frame_info| and adds them | 82 // Creates participants from |frames| and |frame_info| and adds them |
81 // to the mixer. Compares mixed status with |expected_status| | 83 // to the mixer. Compares mixed status with |expected_status| |
82 void MixAndCompare( | 84 void MixAndCompare( |
83 const std::vector<AudioFrame>& frames, | 85 const std::vector<AudioFrame>& frames, |
84 const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info, | 86 const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info, |
85 const std::vector<bool>& expected_status) { | 87 const std::vector<bool>& expected_status) { |
86 int num_audio_sources = frames.size(); | 88 int num_audio_sources = frames.size(); |
87 RTC_DCHECK(frames.size() == frame_info.size()); | 89 RTC_DCHECK(frames.size() == frame_info.size()); |
88 RTC_DCHECK(frame_info.size() == expected_status.size()); | 90 RTC_DCHECK(frame_info.size() == expected_status.size()); |
89 | 91 |
90 const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create()); | 92 const auto mixer = AudioMixerImpl::Create(); |
91 std::vector<MockMixerAudioSource> participants(num_audio_sources); | 93 std::vector<MockMixerAudioSource> participants(num_audio_sources); |
92 | 94 |
93 for (int i = 0; i < num_audio_sources; i++) { | 95 for (int i = 0; i < num_audio_sources; i++) { |
94 participants[i].fake_frame()->CopyFrom(frames[i]); | 96 participants[i].fake_frame()->CopyFrom(frames[i]); |
95 participants[i].set_fake_info(frame_info[i]); | 97 participants[i].set_fake_info(frame_info[i]); |
96 } | 98 } |
97 | 99 |
98 for (int i = 0; i < num_audio_sources; i++) { | 100 for (int i = 0; i < num_audio_sources; i++) { |
99 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 101 EXPECT_TRUE(mixer->AddSource(&participants[i])); |
the sun
2016/10/10 13:35:49
wow, look at that! it's so easy to understand now!
| |
100 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(kDefaultSampleRateHz)) | 102 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(kDefaultSampleRateHz)) |
101 .Times(Exactly(1)); | 103 .Times(Exactly(1)); |
102 } | 104 } |
103 | 105 |
104 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 106 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
105 | 107 |
106 for (int i = 0; i < num_audio_sources; i++) { | 108 for (int i = 0; i < num_audio_sources; i++) { |
107 EXPECT_EQ(expected_status[i], | 109 EXPECT_EQ(expected_status[i], |
108 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 110 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
109 << "Mixed status of AudioSource #" << i << " wrong."; | 111 << "Mixed status of AudioSource #" << i << " wrong."; |
110 } | 112 } |
111 } | 113 } |
112 | 114 |
113 TEST(AudioMixer, LargestEnergyVadActiveMixed) { | 115 TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
114 constexpr int kAudioSources = | 116 constexpr int kAudioSources = |
115 AudioMixer::kMaximumAmountOfMixedAudioSources + 3; | 117 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 3; |
116 | 118 |
117 const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create()); | 119 const auto mixer = AudioMixerImpl::Create(); |
118 | 120 |
119 MockMixerAudioSource participants[kAudioSources]; | 121 MockMixerAudioSource participants[kAudioSources]; |
120 | 122 |
121 for (int i = 0; i < kAudioSources; ++i) { | 123 for (int i = 0; i < kAudioSources; ++i) { |
122 ResetFrame(participants[i].fake_frame()); | 124 ResetFrame(participants[i].fake_frame()); |
123 | 125 |
124 // We set the 80-th sample value since the first 80 samples may be | 126 // We set the 80-th sample value since the first 80 samples may be |
125 // modified by a ramped-in window. | 127 // modified by a ramped-in window. |
126 participants[i].fake_frame()->data_[80] = i; | 128 participants[i].fake_frame()->data_[80] = i; |
127 | 129 |
128 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 130 EXPECT_TRUE(mixer->AddSource(&participants[i])); |
129 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_)).Times(Exactly(1)); | 131 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(_)).Times(Exactly(1)); |
130 } | 132 } |
131 | 133 |
132 // Last participant gives audio frame with passive VAD, although it has the | 134 // Last participant gives audio frame with passive VAD, although it has the |
133 // largest energy. | 135 // largest energy. |
134 participants[kAudioSources - 1].fake_frame()->vad_activity_ = | 136 participants[kAudioSources - 1].fake_frame()->vad_activity_ = |
135 AudioFrame::kVadPassive; | 137 AudioFrame::kVadPassive; |
136 | 138 |
137 AudioFrame audio_frame; | 139 AudioFrame audio_frame; |
138 mixer->Mix(kDefaultSampleRateHz, | 140 mixer->Mix(kDefaultSampleRateHz, |
139 1, // number of channels | 141 1, // number of channels |
140 &audio_frame); | 142 &audio_frame); |
141 | 143 |
142 for (int i = 0; i < kAudioSources; ++i) { | 144 for (int i = 0; i < kAudioSources; ++i) { |
143 bool is_mixed = | 145 bool is_mixed = |
144 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]); | 146 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]); |
145 if (i == kAudioSources - 1 || | 147 if (i == kAudioSources - 1 || |
146 i < kAudioSources - 1 - AudioMixer::kMaximumAmountOfMixedAudioSources) { | 148 i < kAudioSources - 1 - |
149 AudioMixerImpl::kMaximumAmountOfMixedAudioSources) { | |
147 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i | 150 EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i |
148 << " wrong."; | 151 << " wrong."; |
149 } else { | 152 } else { |
150 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i | 153 EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i |
151 << " wrong."; | 154 << " wrong."; |
152 } | 155 } |
153 } | 156 } |
154 } | 157 } |
155 | 158 |
156 TEST(AudioMixer, FrameNotModifiedForSingleParticipant) { | 159 TEST(AudioMixer, FrameNotModifiedForSingleParticipant) { |
157 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create()); | 160 const auto mixer = AudioMixerImpl::Create(); |
aleloi
2016/10/10 13:06:31
Since there is no AudioMixer::Create any longer.
the sun
2016/10/10 13:35:49
Acknowledged.
| |
158 | 161 |
159 MockMixerAudioSource participant; | 162 MockMixerAudioSource participant; |
160 | 163 |
161 ResetFrame(participant.fake_frame()); | 164 ResetFrame(participant.fake_frame()); |
162 const int n_samples = participant.fake_frame()->samples_per_channel_; | 165 const int n_samples = participant.fake_frame()->samples_per_channel_; |
163 | 166 |
164 // Modify the frame so that it's not zero. | 167 // Modify the frame so that it's not zero. |
165 for (int j = 0; j < n_samples; j++) { | 168 for (int j = 0; j < n_samples; j++) { |
166 participant.fake_frame()->data_[j] = j; | 169 participant.fake_frame()->data_[j] = j; |
167 } | 170 } |
168 | 171 |
169 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 172 EXPECT_TRUE(mixer->AddSource(&participant)); |
170 EXPECT_CALL(participant, GetAudioFrameWithMuted(_)).Times(Exactly(2)); | 173 EXPECT_CALL(participant, GetAudioFrameWithMuted(_)).Times(Exactly(2)); |
171 | 174 |
172 AudioFrame audio_frame; | 175 AudioFrame audio_frame; |
173 // Two mix iteration to compare after the ramp-up step. | 176 // Two mix iteration to compare after the ramp-up step. |
174 for (int i = 0; i < 2; i++) { | 177 for (int i = 0; i < 2; i++) { |
175 mixer->Mix(kDefaultSampleRateHz, | 178 mixer->Mix(kDefaultSampleRateHz, |
176 1, // number of channels | 179 1, // number of channels |
177 &audio_frame); | 180 &audio_frame); |
178 } | 181 } |
179 | 182 |
180 EXPECT_EQ( | 183 EXPECT_EQ( |
181 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); | 184 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); |
182 } | 185 } |
183 | 186 |
184 TEST(AudioMixer, ParticipantSampleRate) { | 187 TEST(AudioMixer, ParticipantSampleRate) { |
185 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create()); | 188 const auto mixer = AudioMixerImpl::Create(); |
186 | 189 |
187 MockMixerAudioSource participant; | 190 MockMixerAudioSource participant; |
188 ResetFrame(participant.fake_frame()); | 191 ResetFrame(participant.fake_frame()); |
189 | 192 |
190 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 193 EXPECT_TRUE(mixer->AddSource(&participant)); |
191 for (auto frequency : {8000, 16000, 32000, 48000}) { | 194 for (auto frequency : {8000, 16000, 32000, 48000}) { |
192 EXPECT_CALL(participant, GetAudioFrameWithMuted(frequency)) | 195 EXPECT_CALL(participant, GetAudioFrameWithMuted(frequency)) |
193 .Times(Exactly(1)); | 196 .Times(Exactly(1)); |
194 participant.fake_frame()->sample_rate_hz_ = frequency; | 197 participant.fake_frame()->sample_rate_hz_ = frequency; |
195 participant.fake_frame()->samples_per_channel_ = frequency / 100; | 198 participant.fake_frame()->samples_per_channel_ = frequency / 100; |
196 mixer->Mix(frequency, 1, &frame_for_mixing); | 199 mixer->Mix(frequency, 1, &frame_for_mixing); |
197 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); | 200 EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); |
198 } | 201 } |
199 } | 202 } |
200 | 203 |
201 TEST(AudioMixer, ParticipantNumberOfChannels) { | 204 TEST(AudioMixer, ParticipantNumberOfChannels) { |
202 const std::unique_ptr<AudioMixer> mixer(AudioMixer::Create()); | 205 const auto mixer = AudioMixerImpl::Create(); |
203 | 206 |
204 MockMixerAudioSource participant; | 207 MockMixerAudioSource participant; |
205 ResetFrame(participant.fake_frame()); | 208 ResetFrame(participant.fake_frame()); |
206 | 209 |
207 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participant, true)); | 210 EXPECT_TRUE(mixer->AddSource(&participant)); |
208 for (size_t number_of_channels : {1, 2}) { | 211 for (size_t number_of_channels : {1, 2}) { |
209 EXPECT_CALL(participant, GetAudioFrameWithMuted(kDefaultSampleRateHz)) | 212 EXPECT_CALL(participant, GetAudioFrameWithMuted(kDefaultSampleRateHz)) |
210 .Times(Exactly(1)); | 213 .Times(Exactly(1)); |
211 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); | 214 mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); |
212 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); | 215 EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
213 } | 216 } |
214 } | 217 } |
215 | 218 |
216 // Maximal amount of participants are mixed one iteration, then | 219 // Maximal amount of participants are mixed one iteration, then |
217 // another participant with higher energy is added. | 220 // another participant with higher energy is added. |
218 TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { | 221 TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { |
219 constexpr int kAudioSources = | 222 constexpr int kAudioSources = |
220 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 223 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
221 | 224 |
222 const std::unique_ptr<AudioMixerImpl> mixer(AudioMixerImpl::Create()); | 225 const auto mixer = AudioMixerImpl::Create(); |
223 MockMixerAudioSource participants[kAudioSources]; | 226 MockMixerAudioSource participants[kAudioSources]; |
224 | 227 |
225 for (int i = 0; i < kAudioSources; i++) { | 228 for (int i = 0; i < kAudioSources; i++) { |
226 ResetFrame(participants[i].fake_frame()); | 229 ResetFrame(participants[i].fake_frame()); |
227 // Set the participant audio energy to increase with the index | 230 // Set the participant audio energy to increase with the index |
228 // |i|. | 231 // |i|. |
229 participants[i].fake_frame()->data_[0] = 100 * i; | 232 participants[i].fake_frame()->data_[0] = 100 * i; |
230 } | 233 } |
231 | 234 |
232 // Add all participants but the loudest for mixing. | 235 // Add all participants but the loudest for mixing. |
233 for (int i = 0; i < kAudioSources - 1; i++) { | 236 for (int i = 0; i < kAudioSources - 1; i++) { |
234 EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true)); | 237 EXPECT_TRUE(mixer->AddSource(&participants[i])); |
235 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(kDefaultSampleRateHz)) | 238 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(kDefaultSampleRateHz)) |
236 .Times(Exactly(1)); | 239 .Times(Exactly(1)); |
237 } | 240 } |
238 | 241 |
239 // First mixer iteration | 242 // First mixer iteration |
240 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 243 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
241 | 244 |
242 // All participants but the loudest should have been mixed. | 245 // All participants but the loudest should have been mixed. |
243 for (int i = 0; i < kAudioSources - 1; i++) { | 246 for (int i = 0; i < kAudioSources - 1; i++) { |
244 EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 247 EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
245 << "Mixed status of AudioSource #" << i << " wrong."; | 248 << "Mixed status of AudioSource #" << i << " wrong."; |
246 } | 249 } |
247 | 250 |
248 // Add new participant with higher energy. | 251 // Add new participant with higher energy. |
249 EXPECT_EQ(0, | 252 EXPECT_TRUE(mixer->AddSource(&participants[kAudioSources - 1])); |
250 mixer->SetMixabilityStatus(&participants[kAudioSources - 1], true)); | |
251 for (int i = 0; i < kAudioSources; i++) { | 253 for (int i = 0; i < kAudioSources; i++) { |
252 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(kDefaultSampleRateHz)) | 254 EXPECT_CALL(participants[i], GetAudioFrameWithMuted(kDefaultSampleRateHz)) |
253 .Times(Exactly(1)); | 255 .Times(Exactly(1)); |
254 } | 256 } |
255 | 257 |
256 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 258 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
257 | 259 |
258 // The most quiet participant should not have been mixed. | 260 // The most quiet participant should not have been mixed. |
259 EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) | 261 EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) |
260 << "Mixed status of AudioSource #0 wrong."; | 262 << "Mixed status of AudioSource #0 wrong."; |
261 | 263 |
262 // The loudest participants should have been mixed. | 264 // The loudest participants should have been mixed. |
263 for (int i = 1; i < kAudioSources; i++) { | 265 for (int i = 1; i < kAudioSources; i++) { |
264 EXPECT_EQ(true, | 266 EXPECT_EQ(true, |
265 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) | 267 mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
266 << "Mixed status of AudioSource #" << i << " wrong."; | 268 << "Mixed status of AudioSource #" << i << " wrong."; |
267 } | 269 } |
268 } | 270 } |
269 | 271 |
270 // This test checks that the initialization and participant addition | 272 // This test checks that the initialization and participant addition |
271 // can be done on a different thread. | 273 // can be done on a different thread. |
272 TEST(AudioMixer, ConstructFromOtherThread) { | 274 TEST(AudioMixer, ConstructFromOtherThread) { |
273 std::unique_ptr<rtc::Thread> init_thread = rtc::Thread::Create(); | 275 std::unique_ptr<rtc::Thread> init_thread = rtc::Thread::Create(); |
274 std::unique_ptr<rtc::Thread> participant_thread = rtc::Thread::Create(); | 276 std::unique_ptr<rtc::Thread> participant_thread = rtc::Thread::Create(); |
275 init_thread->Start(); | 277 init_thread->Start(); |
276 std::unique_ptr<AudioMixer> mixer( | 278 const auto mixer = init_thread->Invoke<rtc::scoped_refptr<AudioMixer>>( |
277 init_thread->Invoke<std::unique_ptr<AudioMixer>>( | 279 RTC_FROM_HERE, &AudioMixerImpl::Create); |
278 RTC_FROM_HERE, &AudioMixer::Create)); | |
279 MockMixerAudioSource participant; | 280 MockMixerAudioSource participant; |
280 | 281 |
281 ResetFrame(participant.fake_frame()); | 282 ResetFrame(participant.fake_frame()); |
282 | 283 |
283 participant_thread->Start(); | 284 participant_thread->Start(); |
284 EXPECT_EQ(0, participant_thread->Invoke<int>( | 285 EXPECT_TRUE(participant_thread->Invoke<int>( |
285 RTC_FROM_HERE, rtc::Bind(&AudioMixer::SetMixabilityStatus, | 286 RTC_FROM_HERE, |
286 mixer.get(), &participant, true))); | 287 rtc::Bind(&AudioMixer::AddSource, mixer.get(), &participant))); |
287 | 288 |
288 EXPECT_CALL(participant, GetAudioFrameWithMuted(kDefaultSampleRateHz)) | 289 EXPECT_CALL(participant, GetAudioFrameWithMuted(kDefaultSampleRateHz)) |
289 .Times(Exactly(1)); | 290 .Times(Exactly(1)); |
290 | 291 |
291 // Do one mixer iteration | 292 // Do one mixer iteration |
292 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); | 293 mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
293 } | 294 } |
294 | 295 |
295 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { | 296 TEST(AudioMixer, MutedShouldMixAfterUnmuted) { |
296 constexpr int kAudioSources = | 297 constexpr int kAudioSources = |
297 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 298 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
298 | 299 |
299 std::vector<AudioFrame> frames(kAudioSources); | 300 std::vector<AudioFrame> frames(kAudioSources); |
300 for (auto& frame : frames) { | 301 for (auto& frame : frames) { |
301 ResetFrame(&frame); | 302 ResetFrame(&frame); |
302 } | 303 } |
303 | 304 |
304 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( | 305 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
305 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); | 306 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
306 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; | 307 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
307 std::vector<bool> expected_status(kAudioSources, true); | 308 std::vector<bool> expected_status(kAudioSources, true); |
308 expected_status[0] = false; | 309 expected_status[0] = false; |
309 | 310 |
310 MixAndCompare(frames, frame_info, expected_status); | 311 MixAndCompare(frames, frame_info, expected_status); |
311 } | 312 } |
312 | 313 |
313 TEST(AudioMixer, PassiveShouldMixAfterNormal) { | 314 TEST(AudioMixer, PassiveShouldMixAfterNormal) { |
314 constexpr int kAudioSources = | 315 constexpr int kAudioSources = |
315 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 316 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
316 | 317 |
317 std::vector<AudioFrame> frames(kAudioSources); | 318 std::vector<AudioFrame> frames(kAudioSources); |
318 for (auto& frame : frames) { | 319 for (auto& frame : frames) { |
319 ResetFrame(&frame); | 320 ResetFrame(&frame); |
320 } | 321 } |
321 | 322 |
322 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( | 323 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
323 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); | 324 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
324 frames[0].vad_activity_ = AudioFrame::kVadPassive; | 325 frames[0].vad_activity_ = AudioFrame::kVadPassive; |
325 std::vector<bool> expected_status(kAudioSources, true); | 326 std::vector<bool> expected_status(kAudioSources, true); |
326 expected_status[0] = false; | 327 expected_status[0] = false; |
327 | 328 |
328 MixAndCompare(frames, frame_info, expected_status); | 329 MixAndCompare(frames, frame_info, expected_status); |
329 } | 330 } |
330 | 331 |
331 TEST(AudioMixer, ActiveShouldMixBeforeLoud) { | 332 TEST(AudioMixer, ActiveShouldMixBeforeLoud) { |
332 constexpr int kAudioSources = | 333 constexpr int kAudioSources = |
333 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 334 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
334 | 335 |
335 std::vector<AudioFrame> frames(kAudioSources); | 336 std::vector<AudioFrame> frames(kAudioSources); |
336 for (auto& frame : frames) { | 337 for (auto& frame : frames) { |
337 ResetFrame(&frame); | 338 ResetFrame(&frame); |
338 } | 339 } |
339 | 340 |
340 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( | 341 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
341 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); | 342 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
342 frames[0].vad_activity_ = AudioFrame::kVadPassive; | 343 frames[0].vad_activity_ = AudioFrame::kVadPassive; |
343 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 344 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
344 std::numeric_limits<int16_t>::max()); | 345 std::numeric_limits<int16_t>::max()); |
345 std::vector<bool> expected_status(kAudioSources, true); | 346 std::vector<bool> expected_status(kAudioSources, true); |
346 expected_status[0] = false; | 347 expected_status[0] = false; |
347 | 348 |
348 MixAndCompare(frames, frame_info, expected_status); | 349 MixAndCompare(frames, frame_info, expected_status); |
349 } | 350 } |
350 | 351 |
351 TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { | 352 TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { |
352 constexpr int kAudioSources = | 353 constexpr int kAudioSources = |
353 AudioMixer::kMaximumAmountOfMixedAudioSources + 1; | 354 AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
354 | 355 |
355 std::vector<AudioFrame> frames(kAudioSources); | 356 std::vector<AudioFrame> frames(kAudioSources); |
356 for (auto& frame : frames) { | 357 for (auto& frame : frames) { |
357 ResetFrame(&frame); | 358 ResetFrame(&frame); |
358 } | 359 } |
359 | 360 |
360 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( | 361 std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
361 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); | 362 kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
362 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; | 363 frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
363 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, | 364 std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
364 std::numeric_limits<int16_t>::max()); | 365 std::numeric_limits<int16_t>::max()); |
365 std::vector<bool> expected_status(kAudioSources, true); | 366 std::vector<bool> expected_status(kAudioSources, true); |
366 expected_status[0] = false; | 367 expected_status[0] = false; |
367 | 368 |
368 MixAndCompare(frames, frame_info, expected_status); | 369 MixAndCompare(frames, frame_info, expected_status); |
369 } | 370 } |
370 } // namespace webrtc | 371 } // namespace webrtc |
OLD | NEW |