OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 | 14 |
15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" | 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" |
16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" |
17 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 17 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 18 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
20 #include "webrtc/system_wrappers/include/trace.h" | 20 #include "webrtc/system_wrappers/include/trace.h" |
21 | 21 |
22 namespace webrtc { | 22 namespace webrtc { |
23 namespace { | 23 namespace { |
24 | 24 |
25 struct ParticipantFrameStruct { | 25 struct AudioSourceWithFrame { |
26 ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m) | 26 AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m) |
27 : participant(p), audioFrame(a), muted(m) {} | 27 : audio_source(p), audio_frame(a), muted(m) {} |
28 MixerAudioSource* participant; | 28 MixerAudioSource* audio_source; |
29 AudioFrame* audioFrame; | 29 AudioFrame* audio_frame; |
30 bool muted; | 30 bool muted; |
31 }; | 31 }; |
32 | 32 |
33 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; | 33 typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList; |
34 | 34 |
35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
36 // These effects are applied to |frame| itself prior to mixing. Assumes that | 36 // These effects are applied to |frame| itself prior to mixing. Assumes that |
37 // |mixed_frame| always has at least as many channels as |frame|. Supports | 37 // |mixed_frame| always has at least as many channels as |frame|. Supports |
38 // stereo at most. | 38 // stereo at most. |
39 // | 39 // |
40 // TODO(andrew): consider not modifying |frame| here. | 40 // TODO(andrew): consider not modifying |frame| here. |
41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
42 assert(mixed_frame->num_channels_ >= frame->num_channels_); | 42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
43 if (use_limiter) { | 43 if (use_limiter) { |
44 // Divide by two to avoid saturation in the mixing. | 44 // Divide by two to avoid saturation in the mixing. |
45 // This is only meaningful if the limiter will be used. | 45 // This is only meaningful if the limiter will be used. |
46 *frame >>= 1; | 46 *frame >>= 1; |
47 } | 47 } |
48 if (mixed_frame->num_channels_ > frame->num_channels_) { | 48 if (mixed_frame->num_channels_ > frame->num_channels_) { |
49 // We only support mono-to-stereo. | 49 // We only support mono-to-stereo. |
50 assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1); | 50 RTC_DCHECK_EQ(mixed_frame->num_channels_, static_cast<size_t>(2)); |
| 51 RTC_DCHECK_EQ(frame->num_channels_, static_cast<size_t>(1)); |
51 AudioFrameOperations::MonoToStereo(frame); | 52 AudioFrameOperations::MonoToStereo(frame); |
52 } | 53 } |
53 | 54 |
54 *mixed_frame += *frame; | 55 *mixed_frame += *frame; |
55 } | 56 } |
56 | 57 |
57 // Return the max number of channels from a |list| composed of AudioFrames. | 58 // Return the max number of channels from a |list| composed of AudioFrames. |
58 size_t MaxNumChannels(const AudioFrameList* list) { | 59 size_t MaxNumChannels(const AudioFrameList* list) { |
59 size_t max_num_channels = 1; | 60 size_t max_num_channels = 1; |
60 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end(); | 61 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end(); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
104 if (!mixer->Init()) { | 105 if (!mixer->Init()) { |
105 delete mixer; | 106 delete mixer; |
106 return NULL; | 107 return NULL; |
107 } | 108 } |
108 return mixer; | 109 return mixer; |
109 } | 110 } |
110 | 111 |
111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
112 : _id(id), | 113 : _id(id), |
113 _minimumMixingFreq(kLowestPossible), | 114 _minimumMixingFreq(kLowestPossible), |
114 _mixReceiver(NULL), | |
115 _outputFrequency(kDefaultFrequency), | 115 _outputFrequency(kDefaultFrequency), |
116 _sampleSize(0), | 116 _sampleSize(0), |
117 _audioFramePool(NULL), | 117 _audioFramePool(NULL), |
118 _participantList(), | 118 _audioSourceList(), |
119 _additionalParticipantList(), | 119 _additionalAudioSourceList(), |
120 _numMixedParticipants(0), | 120 _numMixedAudioSources(0), |
121 use_limiter_(true), | 121 use_limiter_(true), |
122 _timeStamp(0), | 122 _timeStamp(0), |
123 _timeScheduler(kProcessPeriodicityInMs), | 123 _timeScheduler(kProcessPeriodicityInMs), |
124 _processCalls(0) {} | 124 _processCalls(0) {} |
125 | 125 |
126 bool NewAudioConferenceMixerImpl::Init() { | 126 bool NewAudioConferenceMixerImpl::Init() { |
127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
128 if (_crit.get() == NULL) | 128 if (_crit.get() == NULL) |
129 return false; | 129 return false; |
130 | 130 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
164 return false; | 164 return false; |
165 | 165 |
166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
167 return false; | 167 return false; |
168 | 168 |
169 return true; | 169 return true; |
170 } | 170 } |
171 | 171 |
172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | 172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { |
173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | 173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
174 assert(_audioFramePool == NULL); | 174 RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr)); |
175 } | 175 } |
176 | 176 |
177 // Process should be called every kProcessPeriodicityInMs ms | 177 // Process should be called every kProcessPeriodicityInMs ms |
178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { | 178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { |
179 int64_t timeUntilNextProcess = 0; | 179 int64_t timeUntilNextProcess = 0; |
180 CriticalSectionScoped cs(_crit.get()); | 180 CriticalSectionScoped cs(_crit.get()); |
181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | 181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { |
182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
183 "failed in TimeToNextUpdate() call"); | 183 "failed in TimeToNextUpdate() call"); |
184 // Sanity check | 184 // Sanity check |
185 assert(false); | 185 RTC_DCHECK(false); |
186 return -1; | 186 return -1; |
187 } | 187 } |
188 return timeUntilNextProcess; | 188 return timeUntilNextProcess; |
189 } | 189 } |
190 | 190 |
191 void NewAudioConferenceMixerImpl::Process() { | 191 void NewAudioConferenceMixerImpl::Process() { |
192 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | 192 // TODO(aleloi) Remove this method. |
| 193 RTC_NOTREACHED(); |
| 194 } |
| 195 |
| 196 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
| 197 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; |
193 { | 198 { |
194 CriticalSectionScoped cs(_crit.get()); | 199 CriticalSectionScoped cs(_crit.get()); |
195 assert(_processCalls == 0); | 200 RTC_DCHECK_EQ(_processCalls, 0); |
196 _processCalls++; | 201 _processCalls++; |
197 | 202 |
198 // Let the scheduler know that we are running one iteration. | 203 // Let the scheduler know that we are running one iteration. |
199 _timeScheduler.UpdateScheduler(); | 204 _timeScheduler.UpdateScheduler(); |
200 } | 205 } |
201 | 206 |
202 AudioFrameList mixList; | 207 AudioFrameList mixList; |
203 AudioFrameList rampOutList; | 208 AudioFrameList rampOutList; |
204 AudioFrameList additionalFramesList; | 209 AudioFrameList additionalFramesList; |
205 std::map<int, MixerAudioSource*> mixedParticipantsMap; | 210 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
206 { | 211 { |
207 CriticalSectionScoped cs(_cbCrit.get()); | 212 CriticalSectionScoped cs(_cbCrit.get()); |
208 | 213 |
209 int32_t lowFreq = GetLowestMixingFrequency(); | 214 int32_t lowFreq = GetLowestMixingFrequency(); |
210 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 215 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
211 // supported so use the closest higher frequency to not lose any | 216 // supported so use the closest higher frequency to not lose any |
212 // information. | 217 // information. |
213 // TODO(henrike): this is probably more appropriate to do in | 218 // TODO(henrike): this is probably more appropriate to do in |
214 // GetLowestMixingFrequency(). | 219 // GetLowestMixingFrequency(). |
215 if (lowFreq == 12000) { | 220 if (lowFreq == 12000) { |
(...skipping 21 matching lines...) Expand all Loading... |
237 if (OutputFrequency() != kSwbInHz) { | 242 if (OutputFrequency() != kSwbInHz) { |
238 SetOutputFrequency(kSwbInHz); | 243 SetOutputFrequency(kSwbInHz); |
239 } | 244 } |
240 break; | 245 break; |
241 case 48000: | 246 case 48000: |
242 if (OutputFrequency() != kFbInHz) { | 247 if (OutputFrequency() != kFbInHz) { |
243 SetOutputFrequency(kFbInHz); | 248 SetOutputFrequency(kFbInHz); |
244 } | 249 } |
245 break; | 250 break; |
246 default: | 251 default: |
247 assert(false); | 252 RTC_DCHECK(false); |
248 | 253 |
249 CriticalSectionScoped cs(_crit.get()); | 254 CriticalSectionScoped cs(_crit.get()); |
250 _processCalls--; | 255 _processCalls--; |
251 return; | 256 return; |
252 } | 257 } |
253 } | 258 } |
254 | 259 |
255 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 260 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap, |
256 &remainingParticipantsAllowedToMix); | 261 &remainingAudioSourcesAllowedToMix); |
257 | 262 |
258 GetAdditionalAudio(&additionalFramesList); | 263 GetAdditionalAudio(&additionalFramesList); |
259 UpdateMixedStatus(mixedParticipantsMap); | 264 UpdateMixedStatus(mixedAudioSourcesMap); |
260 } | 265 } |
261 | 266 |
262 // Get an AudioFrame for mixing from the memory pool. | 267 // TODO(henrike): it might be better to decide the number of channels |
263 AudioFrame* mixedAudio = NULL; | 268 // with an API instead of dynamically. |
264 if (_audioFramePool->PopMemory(mixedAudio) == -1) { | 269 |
265 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 270 // Find the max channels over all mixing lists. |
266 "failed PopMemory() call"); | 271 const size_t num_mixed_channels = std::max( |
267 assert(false); | 272 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), |
268 return; | 273 MaxNumChannels(&rampOutList))); |
269 } | 274 |
| 275 audio_frame_for_mixing->UpdateFrame( |
| 276 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, |
| 277 AudioFrame::kVadPassive, num_mixed_channels); |
| 278 |
| 279 _timeStamp += static_cast<uint32_t>(_sampleSize); |
| 280 |
| 281 use_limiter_ = _numMixedAudioSources > 1 && |
| 282 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
| 283 |
| 284 // We only use the limiter if it supports the output sample rate and |
| 285 // we're actually mixing multiple streams. |
| 286 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); |
270 | 287 |
271 { | 288 { |
272 CriticalSectionScoped cs(_crit.get()); | 289 CriticalSectionScoped cs(_crit.get()); |
| 290 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
| 291 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); |
273 | 292 |
274 // TODO(henrike): it might be better to decide the number of channels | 293 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
275 // with an API instead of dynamically. | |
276 | |
277 // Find the max channels over all mixing lists. | |
278 const size_t num_mixed_channels = | |
279 std::max(MaxNumChannels(&mixList), | |
280 std::max(MaxNumChannels(&additionalFramesList), | |
281 MaxNumChannels(&rampOutList))); | |
282 | |
283 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | |
284 AudioFrame::kNormalSpeech, AudioFrame::kVadPassive, | |
285 num_mixed_channels); | |
286 | |
287 _timeStamp += static_cast<uint32_t>(_sampleSize); | |
288 | |
289 // We only use the limiter if it supports the output sample rate and | |
290 // we're actually mixing multiple streams. | |
291 use_limiter_ = _numMixedParticipants > 1 && | |
292 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | |
293 | |
294 MixFromList(mixedAudio, mixList); | |
295 MixAnonomouslyFromList(mixedAudio, additionalFramesList); | |
296 MixAnonomouslyFromList(mixedAudio, rampOutList); | |
297 | |
298 if (mixedAudio->samples_per_channel_ == 0) { | |
299 // Nothing was mixed, set the audio samples to silence. | 294 // Nothing was mixed, set the audio samples to silence. |
300 mixedAudio->samples_per_channel_ = _sampleSize; | 295 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
301 mixedAudio->Mute(); | 296 audio_frame_for_mixing->Mute(); |
302 } else { | 297 } else { |
303 // Only call the limiter if we have something to mix. | 298 // Only call the limiter if we have something to mix. |
304 LimitMixedAudio(mixedAudio); | 299 LimitMixedAudio(audio_frame_for_mixing); |
305 } | 300 } |
306 } | 301 } |
307 | 302 |
308 { | |
309 CriticalSectionScoped cs(_cbCrit.get()); | |
310 if (_mixReceiver != NULL) { | |
311 const AudioFrame** dummy = NULL; | |
312 _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0); | |
313 } | |
314 } | |
315 | |
316 // Reclaim all outstanding memory. | |
317 _audioFramePool->PushMemory(mixedAudio); | |
318 ClearAudioFrameList(&mixList); | 303 ClearAudioFrameList(&mixList); |
319 ClearAudioFrameList(&rampOutList); | 304 ClearAudioFrameList(&rampOutList); |
320 ClearAudioFrameList(&additionalFramesList); | 305 ClearAudioFrameList(&additionalFramesList); |
321 { | 306 { |
322 CriticalSectionScoped cs(_crit.get()); | 307 CriticalSectionScoped cs(_crit.get()); |
323 _processCalls--; | 308 _processCalls--; |
324 } | 309 } |
325 return; | 310 return; |
326 } | 311 } |
327 | 312 |
328 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback( | |
329 OldAudioMixerOutputReceiver* mixReceiver) { | |
330 CriticalSectionScoped cs(_cbCrit.get()); | |
331 if (_mixReceiver != NULL) { | |
332 return -1; | |
333 } | |
334 _mixReceiver = mixReceiver; | |
335 return 0; | |
336 } | |
337 | |
338 int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { | |
339 CriticalSectionScoped cs(_cbCrit.get()); | |
340 if (_mixReceiver == NULL) { | |
341 return -1; | |
342 } | |
343 _mixReceiver = NULL; | |
344 return 0; | |
345 } | |
346 | |
347 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 313 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
348 const Frequency& frequency) { | 314 const Frequency& frequency) { |
349 CriticalSectionScoped cs(_crit.get()); | 315 CriticalSectionScoped cs(_crit.get()); |
350 | 316 |
351 _outputFrequency = frequency; | 317 _outputFrequency = frequency; |
352 _sampleSize = | 318 _sampleSize = |
353 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 319 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
354 | 320 |
355 return 0; | 321 return 0; |
356 } | 322 } |
357 | 323 |
358 NewAudioConferenceMixer::Frequency | 324 NewAudioConferenceMixer::Frequency |
359 NewAudioConferenceMixerImpl::OutputFrequency() const { | 325 NewAudioConferenceMixerImpl::OutputFrequency() const { |
360 CriticalSectionScoped cs(_crit.get()); | 326 CriticalSectionScoped cs(_crit.get()); |
361 return _outputFrequency; | 327 return _outputFrequency; |
362 } | 328 } |
363 | 329 |
364 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( | 330 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( |
365 MixerAudioSource* participant, | 331 MixerAudioSource* audio_source, |
366 bool mixable) { | 332 bool mixable) { |
367 if (!mixable) { | 333 if (!mixable) { |
368 // Anonymous participants are in a separate list. Make sure that the | 334 // Anonymous audio sources are in a separate list. Make sure that the |
369 // participant is in the _participantList if it is being mixed. | 335 // audio source is in the _audioSourceList if it is being mixed. |
370 SetAnonymousMixabilityStatus(participant, false); | 336 SetAnonymousMixabilityStatus(audio_source, false); |
371 } | 337 } |
372 size_t numMixedParticipants; | 338 size_t numMixedAudioSources; |
373 { | 339 { |
374 CriticalSectionScoped cs(_cbCrit.get()); | 340 CriticalSectionScoped cs(_cbCrit.get()); |
375 const bool isMixed = IsParticipantInList(*participant, _participantList); | 341 const bool isMixed = IsAudioSourceInList(*audio_source, _audioSourceList); |
376 // API must be called with a new state. | 342 // API must be called with a new state. |
377 if (!(mixable ^ isMixed)) { | 343 if (!(mixable ^ isMixed)) { |
378 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 344 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
379 "Mixable is aready %s", isMixed ? "ON" : "off"); | 345 "Mixable is aready %s", isMixed ? "ON" : "off"); |
380 return -1; | 346 return -1; |
381 } | 347 } |
382 bool success = false; | 348 bool success = false; |
383 if (mixable) { | 349 if (mixable) { |
384 success = AddParticipantToList(participant, &_participantList); | 350 success = AddAudioSourceToList(audio_source, &_audioSourceList); |
385 } else { | 351 } else { |
386 success = RemoveParticipantFromList(participant, &_participantList); | 352 success = RemoveAudioSourceFromList(audio_source, &_audioSourceList); |
387 } | 353 } |
388 if (!success) { | 354 if (!success) { |
389 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 355 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
390 "failed to %s participant", mixable ? "add" : "remove"); | 356 "failed to %s audio_source", mixable ? "add" : "remove"); |
391 assert(false); | 357 RTC_DCHECK(false); |
392 return -1; | 358 return -1; |
393 } | 359 } |
394 | 360 |
395 size_t numMixedNonAnonymous = _participantList.size(); | 361 size_t numMixedNonAnonymous = _audioSourceList.size(); |
396 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { | 362 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { |
397 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; | 363 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; |
398 } | 364 } |
399 numMixedParticipants = | 365 numMixedAudioSources = |
400 numMixedNonAnonymous + _additionalParticipantList.size(); | 366 numMixedNonAnonymous + _additionalAudioSourceList.size(); |
401 } | 367 } |
402 // A MixerAudioSource was added or removed. Make sure the scratch | 368 // A MixerAudioSource was added or removed. Make sure the scratch |
403 // buffer is updated if necessary. | 369 // buffer is updated if necessary. |
404 // Note: The scratch buffer may only be updated in Process(). | 370 // Note: The scratch buffer may only be updated in Process(). |
405 CriticalSectionScoped cs(_crit.get()); | 371 CriticalSectionScoped cs(_crit.get()); |
406 _numMixedParticipants = numMixedParticipants; | 372 _numMixedAudioSources = numMixedAudioSources; |
407 return 0; | 373 return 0; |
408 } | 374 } |
409 | 375 |
410 bool NewAudioConferenceMixerImpl::MixabilityStatus( | 376 bool NewAudioConferenceMixerImpl::MixabilityStatus( |
411 const MixerAudioSource& participant) const { | 377 const MixerAudioSource& audio_source) const { |
412 CriticalSectionScoped cs(_cbCrit.get()); | 378 CriticalSectionScoped cs(_cbCrit.get()); |
413 return IsParticipantInList(participant, _participantList); | 379 return IsAudioSourceInList(audio_source, _audioSourceList); |
414 } | 380 } |
415 | 381 |
416 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | 382 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( |
417 MixerAudioSource* participant, | 383 MixerAudioSource* audio_source, |
418 bool anonymous) { | 384 bool anonymous) { |
419 CriticalSectionScoped cs(_cbCrit.get()); | 385 CriticalSectionScoped cs(_cbCrit.get()); |
420 if (IsParticipantInList(*participant, _additionalParticipantList)) { | 386 if (IsAudioSourceInList(*audio_source, _additionalAudioSourceList)) { |
421 if (anonymous) { | 387 if (anonymous) { |
422 return 0; | 388 return 0; |
423 } | 389 } |
424 if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) { | 390 if (!RemoveAudioSourceFromList(audio_source, &_additionalAudioSourceList)) { |
425 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 391 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
426 "unable to remove participant from anonymous list"); | 392 "unable to remove audio_source from anonymous list"); |
427 assert(false); | 393 RTC_DCHECK(false); |
428 return -1; | 394 return -1; |
429 } | 395 } |
430 return AddParticipantToList(participant, &_participantList) ? 0 : -1; | 396 return AddAudioSourceToList(audio_source, &_audioSourceList) ? 0 : -1; |
431 } | 397 } |
432 if (!anonymous) { | 398 if (!anonymous) { |
433 return 0; | 399 return 0; |
434 } | 400 } |
435 const bool mixable = | 401 const bool mixable = |
436 RemoveParticipantFromList(participant, &_participantList); | 402 RemoveAudioSourceFromList(audio_source, &_audioSourceList); |
437 if (!mixable) { | 403 if (!mixable) { |
438 WEBRTC_TRACE( | 404 WEBRTC_TRACE( |
439 kTraceWarning, kTraceAudioMixerServer, _id, | 405 kTraceWarning, kTraceAudioMixerServer, _id, |
440 "participant must be registered before turning it into anonymous"); | 406 "audio_source must be registered before turning it into anonymous"); |
441 // Setting anonymous status is only possible if MixerAudioSource is | 407 // Setting anonymous status is only possible if MixerAudioSource is |
442 // already registered. | 408 // already registered. |
443 return -1; | 409 return -1; |
444 } | 410 } |
445 return AddParticipantToList(participant, &_additionalParticipantList) ? 0 | 411 return AddAudioSourceToList(audio_source, &_additionalAudioSourceList) ? 0 |
446 : -1; | 412 : -1; |
447 } | 413 } |
448 | 414 |
449 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( | 415 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( |
450 const MixerAudioSource& participant) const { | 416 const MixerAudioSource& audio_source) const { |
451 CriticalSectionScoped cs(_cbCrit.get()); | 417 CriticalSectionScoped cs(_cbCrit.get()); |
452 return IsParticipantInList(participant, _additionalParticipantList); | 418 return IsAudioSourceInList(audio_source, _additionalAudioSourceList); |
453 } | 419 } |
454 | 420 |
455 int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) { | 421 int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) { |
456 // Make sure that only allowed sampling frequencies are used. Use closest | 422 // Make sure that only allowed sampling frequencies are used. Use closest |
457 // higher sampling frequency to avoid losing information. | 423 // higher sampling frequency to avoid losing information. |
458 if (static_cast<int>(freq) == 12000) { | 424 if (static_cast<int>(freq) == 12000) { |
459 freq = kWbInHz; | 425 freq = kWbInHz; |
460 } else if (static_cast<int>(freq) == 24000) { | 426 } else if (static_cast<int>(freq) == 24000) { |
461 freq = kSwbInHz; | 427 freq = kSwbInHz; |
462 } | 428 } |
463 | 429 |
464 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || | 430 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || |
465 (freq == kLowestPossible)) { | 431 (freq == kLowestPossible)) { |
466 _minimumMixingFreq = freq; | 432 _minimumMixingFreq = freq; |
467 return 0; | 433 return 0; |
468 } else { | 434 } else { |
469 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 435 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
470 "SetMinimumMixingFrequency incorrect frequency: %i", freq); | 436 "SetMinimumMixingFrequency incorrect frequency: %i", freq); |
471 assert(false); | 437 RTC_DCHECK(false); |
472 return -1; | 438 return -1; |
473 } | 439 } |
474 } | 440 } |
475 | 441 |
476 // Check all AudioFrames that are to be mixed. The highest sampling frequency | 442 // Check all AudioFrames that are to be mixed. The highest sampling frequency |
477 // found is the lowest that can be used without losing information. | 443 // found is the lowest that can be used without losing information. |
478 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const { | 444 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const { |
479 const int participantListFrequency = | 445 const int audioSourceListFrequency = |
480 GetLowestMixingFrequencyFromList(_participantList); | 446 GetLowestMixingFrequencyFromList(_audioSourceList); |
481 const int anonymousListFrequency = | 447 const int anonymousListFrequency = |
482 GetLowestMixingFrequencyFromList(_additionalParticipantList); | 448 GetLowestMixingFrequencyFromList(_additionalAudioSourceList); |
483 const int highestFreq = (participantListFrequency > anonymousListFrequency) | 449 const int highestFreq = (audioSourceListFrequency > anonymousListFrequency) |
484 ? participantListFrequency | 450 ? audioSourceListFrequency |
485 : anonymousListFrequency; | 451 : anonymousListFrequency; |
486 // Check if the user specified a lowest mixing frequency. | 452 // Check if the user specified a lowest mixing frequency. |
487 if (_minimumMixingFreq != kLowestPossible) { | 453 if (_minimumMixingFreq != kLowestPossible) { |
488 if (_minimumMixingFreq > highestFreq) { | 454 if (_minimumMixingFreq > highestFreq) { |
489 return _minimumMixingFreq; | 455 return _minimumMixingFreq; |
490 } | 456 } |
491 } | 457 } |
492 return highestFreq; | 458 return highestFreq; |
493 } | 459 } |
494 | 460 |
495 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( | 461 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( |
496 const MixerAudioSourceList& mixList) const { | 462 const MixerAudioSourceList& mixList) const { |
497 int32_t highestFreq = 8000; | 463 int32_t highestFreq = 8000; |
498 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); | 464 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); |
499 iter != mixList.end(); ++iter) { | 465 iter != mixList.end(); ++iter) { |
500 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | 466 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); |
501 if (neededFrequency > highestFreq) { | 467 if (neededFrequency > highestFreq) { |
502 highestFreq = neededFrequency; | 468 highestFreq = neededFrequency; |
503 } | 469 } |
504 } | 470 } |
505 return highestFreq; | 471 return highestFreq; |
506 } | 472 } |
507 | 473 |
508 void NewAudioConferenceMixerImpl::UpdateToMix( | 474 void NewAudioConferenceMixerImpl::UpdateToMix( |
509 AudioFrameList* mixList, | 475 AudioFrameList* mixList, |
510 AudioFrameList* rampOutList, | 476 AudioFrameList* rampOutList, |
511 std::map<int, MixerAudioSource*>* mixParticipantList, | 477 std::map<int, MixerAudioSource*>* mixAudioSourceList, |
512 size_t* maxAudioFrameCounter) const { | 478 size_t* maxAudioFrameCounter) const { |
513 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 479 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
514 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", | 480 "UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)", |
515 *maxAudioFrameCounter); | 481 *maxAudioFrameCounter); |
516 const size_t mixListStartSize = mixList->size(); | 482 const size_t mixListStartSize = mixList->size(); |
517 AudioFrameList activeList; | 483 AudioFrameList activeList; |
518 // Struct needed by the passive lists to keep track of which AudioFrame | 484 // Struct needed by the passive lists to keep track of which AudioFrame |
519 // belongs to which MixerAudioSource. | 485 // belongs to which MixerAudioSource. |
520 ParticipantFrameStructList passiveWasNotMixedList; | 486 AudioSourceWithFrameList passiveWasNotMixedList; |
521 ParticipantFrameStructList passiveWasMixedList; | 487 AudioSourceWithFrameList passiveWasMixedList; |
522 for (MixerAudioSourceList::const_iterator participant = | 488 for (MixerAudioSourceList::const_iterator audio_source = |
523 _participantList.begin(); | 489 _audioSourceList.begin(); |
524 participant != _participantList.end(); ++participant) { | 490 audio_source != _audioSourceList.end(); ++audio_source) { |
525 // Stop keeping track of passive participants if there are already | 491 // Stop keeping track of passive audioSources if there are already |
526 // enough participants available (they wont be mixed anyway). | 492 // enough audio sources available (they wont be mixed anyway). |
527 bool mustAddToPassiveList = | 493 bool mustAddToPassiveList = |
528 (*maxAudioFrameCounter > | 494 (*maxAudioFrameCounter > |
529 (activeList.size() + passiveWasMixedList.size() + | 495 (activeList.size() + passiveWasMixedList.size() + |
530 passiveWasNotMixedList.size())); | 496 passiveWasNotMixedList.size())); |
531 | 497 |
532 bool wasMixed = false; | 498 bool wasMixed = false; |
533 wasMixed = (*participant)->_mixHistory->WasMixed(); | 499 wasMixed = (*audio_source)->_mixHistory->WasMixed(); |
534 AudioFrame* audioFrame = NULL; | 500 AudioFrame* audioFrame = NULL; |
535 if (_audioFramePool->PopMemory(audioFrame) == -1) { | 501 if (_audioFramePool->PopMemory(audioFrame) == -1) { |
536 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 502 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
537 "failed PopMemory() call"); | 503 "failed PopMemory() call"); |
538 assert(false); | 504 RTC_DCHECK(false); |
539 return; | 505 return; |
540 } | 506 } |
541 audioFrame->sample_rate_hz_ = _outputFrequency; | 507 audioFrame->sample_rate_hz_ = _outputFrequency; |
542 | 508 |
543 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 509 auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame); |
544 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 510 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
545 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 511 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
546 "failed to GetAudioFrameWithMuted() from participant"); | 512 "failed to GetAudioFrameWithMuted() from audio source"); |
547 _audioFramePool->PushMemory(audioFrame); | 513 _audioFramePool->PushMemory(audioFrame); |
548 continue; | 514 continue; |
549 } | 515 } |
550 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 516 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); |
551 if (_participantList.size() != 1) { | 517 if (_audioSourceList.size() != 1) { |
552 // TODO(wu): Issue 3390, add support for multiple participants case. | 518 // TODO(wu): Issue 3390, add support for multiple audio sources case. |
553 audioFrame->ntp_time_ms_ = -1; | 519 audioFrame->ntp_time_ms_ = -1; |
554 } | 520 } |
555 | 521 |
556 // TODO(henrike): this assert triggers in some test cases where SRTP is | 522 // TODO(henrike): this assert triggers in some test cases where SRTP is |
557 // used which prevents NetEQ from making a VAD. Temporarily disable this | 523 // used which prevents NetEQ from making a VAD. Temporarily disable this |
558 // assert until the problem is fixed on a higher level. | 524 // assert until the problem is fixed on a higher level. |
559 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); | 525 // RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown); |
560 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { | 526 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { |
561 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 527 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
562 "invalid VAD state from participant"); | 528 "invalid VAD state from audio source"); |
563 } | 529 } |
564 | 530 |
565 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { | 531 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { |
566 if (!wasMixed && !muted) { | 532 if (!wasMixed && !muted) { |
567 RampIn(*audioFrame); | 533 RampIn(*audioFrame); |
568 } | 534 } |
569 | 535 |
570 if (activeList.size() >= *maxAudioFrameCounter) { | 536 if (activeList.size() >= *maxAudioFrameCounter) { |
571 // There are already more active participants than should be | 537 // There are already more active audio sources than should be |
572 // mixed. Only keep the ones with the highest energy. | 538 // mixed. Only keep the ones with the highest energy. |
573 AudioFrameList::iterator replaceItem; | 539 AudioFrameList::iterator replaceItem; |
574 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame); | 540 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame); |
575 | 541 |
576 bool found_replace_item = false; | 542 bool found_replace_item = false; |
577 for (AudioFrameList::iterator iter = activeList.begin(); | 543 for (AudioFrameList::iterator iter = activeList.begin(); |
578 iter != activeList.end(); ++iter) { | 544 iter != activeList.end(); ++iter) { |
579 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); | 545 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); |
580 if (energy < lowestEnergy) { | 546 if (energy < lowestEnergy) { |
581 replaceItem = iter; | 547 replaceItem = iter; |
582 lowestEnergy = energy; | 548 lowestEnergy = energy; |
583 found_replace_item = true; | 549 found_replace_item = true; |
584 } | 550 } |
585 } | 551 } |
586 if (found_replace_item) { | 552 if (found_replace_item) { |
587 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | 553 RTC_DCHECK(!muted); // Cannot replace with a muted frame. |
588 FrameAndMuteInfo replaceFrame = *replaceItem; | 554 FrameAndMuteInfo replaceFrame = *replaceItem; |
589 | 555 |
590 bool replaceWasMixed = false; | 556 bool replaceWasMixed = false; |
591 std::map<int, MixerAudioSource*>::const_iterator it = | 557 std::map<int, MixerAudioSource*>::const_iterator it = |
592 mixParticipantList->find(replaceFrame.frame->id_); | 558 mixAudioSourceList->find(replaceFrame.frame->id_); |
593 | 559 |
594 // When a frame is pushed to |activeList| it is also pushed | 560 // When a frame is pushed to |activeList| it is also pushed |
595 // to mixParticipantList with the frame's id. This means | 561 // to mixAudioSourceList with the frame's id. This means |
596 // that the Find call above should never fail. | 562 // that the Find call above should never fail. |
597 assert(it != mixParticipantList->end()); | 563 RTC_DCHECK(it != mixAudioSourceList->end()); |
598 replaceWasMixed = it->second->_mixHistory->WasMixed(); | 564 replaceWasMixed = it->second->_mixHistory->WasMixed(); |
599 | 565 |
600 mixParticipantList->erase(replaceFrame.frame->id_); | 566 mixAudioSourceList->erase(replaceFrame.frame->id_); |
601 activeList.erase(replaceItem); | 567 activeList.erase(replaceItem); |
602 | 568 |
603 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 569 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
604 (*mixParticipantList)[audioFrame->id_] = *participant; | 570 (*mixAudioSourceList)[audioFrame->id_] = *audio_source; |
605 assert(mixParticipantList->size() <= | 571 RTC_DCHECK_LE(mixAudioSourceList->size(), |
606 kMaximumAmountOfMixedParticipants); | 572 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
607 | 573 |
608 if (replaceWasMixed) { | 574 if (replaceWasMixed) { |
609 if (!replaceFrame.muted) { | 575 if (!replaceFrame.muted) { |
610 RampOut(*replaceFrame.frame); | 576 RampOut(*replaceFrame.frame); |
611 } | 577 } |
612 rampOutList->push_back(replaceFrame); | 578 rampOutList->push_back(replaceFrame); |
613 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 579 RTC_DCHECK_LE( |
| 580 rampOutList->size(), |
| 581 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
614 } else { | 582 } else { |
615 _audioFramePool->PushMemory(replaceFrame.frame); | 583 _audioFramePool->PushMemory(replaceFrame.frame); |
616 } | 584 } |
617 } else { | 585 } else { |
618 if (wasMixed) { | 586 if (wasMixed) { |
619 if (!muted) { | 587 if (!muted) { |
620 RampOut(*audioFrame); | 588 RampOut(*audioFrame); |
621 } | 589 } |
622 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); | 590 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); |
623 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); | 591 RTC_DCHECK_LE( |
| 592 rampOutList->size(), |
| 593 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
624 } else { | 594 } else { |
625 _audioFramePool->PushMemory(audioFrame); | 595 _audioFramePool->PushMemory(audioFrame); |
626 } | 596 } |
627 } | 597 } |
628 } else { | 598 } else { |
629 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); | 599 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); |
630 (*mixParticipantList)[audioFrame->id_] = *participant; | 600 (*mixAudioSourceList)[audioFrame->id_] = *audio_source; |
631 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 601 RTC_DCHECK_LE(mixAudioSourceList->size(), |
| 602 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
632 } | 603 } |
633 } else { | 604 } else { |
634 if (wasMixed) { | 605 if (wasMixed) { |
635 ParticipantFrameStruct* part_struct = | 606 AudioSourceWithFrame* part_struct = |
636 new ParticipantFrameStruct(*participant, audioFrame, muted); | 607 new AudioSourceWithFrame(*audio_source, audioFrame, muted); |
637 passiveWasMixedList.push_back(part_struct); | 608 passiveWasMixedList.push_back(part_struct); |
638 } else if (mustAddToPassiveList) { | 609 } else if (mustAddToPassiveList) { |
639 if (!muted) { | 610 if (!muted) { |
640 RampIn(*audioFrame); | 611 RampIn(*audioFrame); |
641 } | 612 } |
642 ParticipantFrameStruct* part_struct = | 613 AudioSourceWithFrame* part_struct = |
643 new ParticipantFrameStruct(*participant, audioFrame, muted); | 614 new AudioSourceWithFrame(*audio_source, audioFrame, muted); |
644 passiveWasNotMixedList.push_back(part_struct); | 615 passiveWasNotMixedList.push_back(part_struct); |
645 } else { | 616 } else { |
646 _audioFramePool->PushMemory(audioFrame); | 617 _audioFramePool->PushMemory(audioFrame); |
647 } | 618 } |
648 } | 619 } |
649 } | 620 } |
650 assert(activeList.size() <= *maxAudioFrameCounter); | 621 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); |
651 // At this point it is known which participants should be mixed. Transfer | 622 // At this point it is known which audio sources should be mixed. Transfer |
652 // this information to this functions output parameters. | 623 // this information to this functions output parameters. |
653 for (AudioFrameList::const_iterator iter = activeList.begin(); | 624 for (AudioFrameList::const_iterator iter = activeList.begin(); |
654 iter != activeList.end(); ++iter) { | 625 iter != activeList.end(); ++iter) { |
655 mixList->push_back(*iter); | 626 mixList->push_back(*iter); |
656 } | 627 } |
657 activeList.clear(); | 628 activeList.clear(); |
658 // Always mix a constant number of AudioFrames. If there aren't enough | 629 // Always mix a constant number of AudioFrames. If there aren't enough |
659 // active participants mix passive ones. Starting with those that was mixed | 630 // active audio sources mix passive ones. Starting with those that was mixed |
660 // last iteration. | 631 // last iteration. |
661 for (ParticipantFrameStructList::const_iterator iter = | 632 for (AudioSourceWithFrameList::const_iterator iter = |
662 passiveWasMixedList.begin(); | 633 passiveWasMixedList.begin(); |
663 iter != passiveWasMixedList.end(); ++iter) { | 634 iter != passiveWasMixedList.end(); ++iter) { |
664 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 635 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
665 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 636 mixList->push_back( |
666 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 637 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); |
667 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 638 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; |
| 639 RTC_DCHECK_LE(mixAudioSourceList->size(), |
| 640 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
668 } else { | 641 } else { |
669 _audioFramePool->PushMemory((*iter)->audioFrame); | 642 _audioFramePool->PushMemory((*iter)->audio_frame); |
670 } | 643 } |
671 delete *iter; | 644 delete *iter; |
672 } | 645 } |
673 // And finally the ones that have not been mixed for a while. | 646 // And finally the ones that have not been mixed for a while. |
674 for (ParticipantFrameStructList::const_iterator iter = | 647 for (AudioSourceWithFrameList::const_iterator iter = |
675 passiveWasNotMixedList.begin(); | 648 passiveWasNotMixedList.begin(); |
676 iter != passiveWasNotMixedList.end(); ++iter) { | 649 iter != passiveWasNotMixedList.end(); ++iter) { |
677 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 650 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
678 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); | 651 mixList->push_back( |
679 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; | 652 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); |
680 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); | 653 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; |
| 654 RTC_DCHECK_LE(mixAudioSourceList->size(), |
| 655 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
681 } else { | 656 } else { |
682 _audioFramePool->PushMemory((*iter)->audioFrame); | 657 _audioFramePool->PushMemory((*iter)->audio_frame); |
683 } | 658 } |
684 delete *iter; | 659 delete *iter; |
685 } | 660 } |
686 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); | 661 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); |
687 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | 662 *maxAudioFrameCounter += mixListStartSize - mixList->size(); |
688 } | 663 } |
689 | 664 |
690 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 665 void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
691 AudioFrameList* additionalFramesList) const { | 666 AudioFrameList* additionalFramesList) const { |
692 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 667 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
693 "GetAdditionalAudio(additionalFramesList)"); | 668 "GetAdditionalAudio(additionalFramesList)"); |
694 // The GetAudioFrameWithMuted() callback may result in the participant being | 669 // The GetAudioFrameWithMuted() callback may result in the audio source being |
695 // removed from additionalParticipantList_. If that happens it will | 670 // removed from additionalAudioSourceList_. If that happens it will |
696 // invalidate any iterators. Create a copy of the participants list such | 671 // invalidate any iterators. Create a copy of the audio sources list such |
697 // that the list of participants can be traversed safely. | 672 // that the list of audio sources can be traversed safely. |
698 MixerAudioSourceList additionalParticipantList; | 673 MixerAudioSourceList additionalAudioSourceList; |
699 additionalParticipantList.insert(additionalParticipantList.begin(), | 674 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), |
700 _additionalParticipantList.begin(), | 675 _additionalAudioSourceList.begin(), |
701 _additionalParticipantList.end()); | 676 _additionalAudioSourceList.end()); |
702 | 677 |
703 for (MixerAudioSourceList::const_iterator participant = | 678 for (MixerAudioSourceList::const_iterator audio_source = |
704 additionalParticipantList.begin(); | 679 additionalAudioSourceList.begin(); |
705 participant != additionalParticipantList.end(); ++participant) { | 680 audio_source != additionalAudioSourceList.end(); ++audio_source) { |
706 AudioFrame* audioFrame = NULL; | 681 AudioFrame* audioFrame = NULL; |
707 if (_audioFramePool->PopMemory(audioFrame) == -1) { | 682 if (_audioFramePool->PopMemory(audioFrame) == -1) { |
708 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 683 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
709 "failed PopMemory() call"); | 684 "failed PopMemory() call"); |
710 assert(false); | 685 RTC_DCHECK(false); |
711 return; | 686 return; |
712 } | 687 } |
713 audioFrame->sample_rate_hz_ = _outputFrequency; | 688 audioFrame->sample_rate_hz_ = _outputFrequency; |
714 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 689 auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame); |
715 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 690 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
716 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 691 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
717 "failed to GetAudioFrameWithMuted() from participant"); | 692 "failed to GetAudioFrameWithMuted() from audio_source"); |
718 _audioFramePool->PushMemory(audioFrame); | 693 _audioFramePool->PushMemory(audioFrame); |
719 continue; | 694 continue; |
720 } | 695 } |
721 if (audioFrame->samples_per_channel_ == 0) { | 696 if (audioFrame->samples_per_channel_ == 0) { |
722 // Empty frame. Don't use it. | 697 // Empty frame. Don't use it. |
723 _audioFramePool->PushMemory(audioFrame); | 698 _audioFramePool->PushMemory(audioFrame); |
724 continue; | 699 continue; |
725 } | 700 } |
726 additionalFramesList->push_back(FrameAndMuteInfo( | 701 additionalFramesList->push_back(FrameAndMuteInfo( |
727 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 702 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
728 } | 703 } |
729 } | 704 } |
730 | 705 |
731 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | 706 void NewAudioConferenceMixerImpl::UpdateMixedStatus( |
732 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { | 707 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const { |
733 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 708 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
734 "UpdateMixedStatus(mixedParticipantsMap)"); | 709 "UpdateMixedStatus(mixedAudioSourcesMap)"); |
735 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); | 710 RTC_DCHECK_LE(mixedAudioSourcesMap.size(), |
| 711 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); |
736 | 712 |
737 // Loop through all participants. If they are in the mix map they | 713 // Loop through all audio_sources. If they are in the mix map they |
738 // were mixed. | 714 // were mixed. |
739 for (MixerAudioSourceList::const_iterator participant = | 715 for (MixerAudioSourceList::const_iterator audio_source = |
740 _participantList.begin(); | 716 _audioSourceList.begin(); |
741 participant != _participantList.end(); ++participant) { | 717 audio_source != _audioSourceList.end(); ++audio_source) { |
742 bool isMixed = false; | 718 bool isMixed = false; |
743 for (std::map<int, MixerAudioSource*>::const_iterator it = | 719 for (std::map<int, MixerAudioSource*>::const_iterator it = |
744 mixedParticipantsMap.begin(); | 720 mixedAudioSourcesMap.begin(); |
745 it != mixedParticipantsMap.end(); ++it) { | 721 it != mixedAudioSourcesMap.end(); ++it) { |
746 if (it->second == *participant) { | 722 if (it->second == *audio_source) { |
747 isMixed = true; | 723 isMixed = true; |
748 break; | 724 break; |
749 } | 725 } |
750 } | 726 } |
751 (*participant)->_mixHistory->SetIsMixed(isMixed); | 727 (*audio_source)->_mixHistory->SetIsMixed(isMixed); |
752 } | 728 } |
753 } | 729 } |
754 | 730 |
755 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | 731 void NewAudioConferenceMixerImpl::ClearAudioFrameList( |
756 AudioFrameList* audioFrameList) const { | 732 AudioFrameList* audioFrameList) const { |
757 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 733 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
758 "ClearAudioFrameList(audioFrameList)"); | 734 "ClearAudioFrameList(audioFrameList)"); |
759 for (AudioFrameList::iterator iter = audioFrameList->begin(); | 735 for (AudioFrameList::iterator iter = audioFrameList->begin(); |
760 iter != audioFrameList->end(); ++iter) { | 736 iter != audioFrameList->end(); ++iter) { |
761 _audioFramePool->PushMemory(iter->frame); | 737 _audioFramePool->PushMemory(iter->frame); |
762 } | 738 } |
763 audioFrameList->clear(); | 739 audioFrameList->clear(); |
764 } | 740 } |
765 | 741 |
766 bool NewAudioConferenceMixerImpl::IsParticipantInList( | 742 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( |
767 const MixerAudioSource& participant, | 743 const MixerAudioSource& audio_source, |
768 const MixerAudioSourceList& participantList) const { | 744 const MixerAudioSourceList& audioSourceList) const { |
769 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 745 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
770 "IsParticipantInList(participant,participantList)"); | 746 "IsAudioSourceInList(audio_source,audioSourceList)"); |
771 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); | 747 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); |
772 iter != participantList.end(); ++iter) { | 748 iter != audioSourceList.end(); ++iter) { |
773 if (&participant == *iter) { | 749 if (&audio_source == *iter) { |
774 return true; | 750 return true; |
775 } | 751 } |
776 } | 752 } |
777 return false; | 753 return false; |
778 } | 754 } |
779 | 755 |
780 bool NewAudioConferenceMixerImpl::AddParticipantToList( | 756 bool NewAudioConferenceMixerImpl::AddAudioSourceToList( |
781 MixerAudioSource* participant, | 757 MixerAudioSource* audio_source, |
782 MixerAudioSourceList* participantList) const { | 758 MixerAudioSourceList* audioSourceList) const { |
783 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 759 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
784 "AddParticipantToList(participant, participantList)"); | 760 "AddAudioSourceToList(audio_source, audioSourceList)"); |
785 participantList->push_back(participant); | 761 audioSourceList->push_back(audio_source); |
786 // Make sure that the mixed status is correct for new MixerAudioSource. | 762 // Make sure that the mixed status is correct for new MixerAudioSource. |
787 participant->_mixHistory->ResetMixedStatus(); | 763 audio_source->_mixHistory->ResetMixedStatus(); |
788 return true; | 764 return true; |
789 } | 765 } |
790 | 766 |
791 bool NewAudioConferenceMixerImpl::RemoveParticipantFromList( | 767 bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList( |
792 MixerAudioSource* participant, | 768 MixerAudioSource* audio_source, |
793 MixerAudioSourceList* participantList) const { | 769 MixerAudioSourceList* audioSourceList) const { |
794 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 770 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
795 "RemoveParticipantFromList(participant, participantList)"); | 771 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); |
796 for (MixerAudioSourceList::iterator iter = participantList->begin(); | 772 for (MixerAudioSourceList::iterator iter = audioSourceList->begin(); |
797 iter != participantList->end(); ++iter) { | 773 iter != audioSourceList->end(); ++iter) { |
798 if (*iter == participant) { | 774 if (*iter == audio_source) { |
799 participantList->erase(iter); | 775 audioSourceList->erase(iter); |
800 // Participant is no longer mixed, reset to default. | 776 // AudioSource is no longer mixed, reset to default. |
801 participant->_mixHistory->ResetMixedStatus(); | 777 audio_source->_mixHistory->ResetMixedStatus(); |
802 return true; | 778 return true; |
803 } | 779 } |
804 } | 780 } |
805 return false; | 781 return false; |
806 } | 782 } |
807 | 783 |
808 int32_t NewAudioConferenceMixerImpl::MixFromList( | 784 int32_t NewAudioConferenceMixerImpl::MixFromList( |
809 AudioFrame* mixedAudio, | 785 AudioFrame* mixedAudio, |
810 const AudioFrameList& audioFrameList) const { | 786 const AudioFrameList& audioFrameList, |
811 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 787 int32_t id, |
| 788 bool use_limiter) { |
| 789 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
812 "MixFromList(mixedAudio, audioFrameList)"); | 790 "MixFromList(mixedAudio, audioFrameList)"); |
813 if (audioFrameList.empty()) | 791 if (audioFrameList.empty()) |
814 return 0; | 792 return 0; |
815 | 793 |
816 uint32_t position = 0; | 794 uint32_t position = 0; |
817 | 795 |
818 if (_numMixedParticipants == 1) { | 796 if (audioFrameList.size() == 1) { |
819 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | 797 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; |
820 mixedAudio->elapsed_time_ms_ = | 798 mixedAudio->elapsed_time_ms_ = |
821 audioFrameList.front().frame->elapsed_time_ms_; | 799 audioFrameList.front().frame->elapsed_time_ms_; |
822 } else { | 800 } else { |
823 // TODO(wu): Issue 3390. | 801 // TODO(wu): Issue 3390. |
824 // Audio frame timestamp is only supported in one channel case. | 802 // Audio frame timestamp is only supported in one channel case. |
825 mixedAudio->timestamp_ = 0; | 803 mixedAudio->timestamp_ = 0; |
826 mixedAudio->elapsed_time_ms_ = -1; | 804 mixedAudio->elapsed_time_ms_ = -1; |
827 } | 805 } |
828 | 806 |
829 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | 807 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); |
830 iter != audioFrameList.end(); ++iter) { | 808 iter != audioFrameList.end(); ++iter) { |
831 if (position >= kMaximumAmountOfMixedParticipants) { | 809 if (position >= kMaximumAmountOfMixedAudioSources) { |
832 WEBRTC_TRACE( | 810 WEBRTC_TRACE( |
833 kTraceMemory, kTraceAudioMixerServer, _id, | 811 kTraceMemory, kTraceAudioMixerServer, id, |
834 "Trying to mix more than max amount of mixed participants:%d!", | 812 "Trying to mix more than max amount of mixed audio sources:%d!", |
835 kMaximumAmountOfMixedParticipants); | 813 kMaximumAmountOfMixedAudioSources); |
836 // Assert and avoid crash | 814 // Assert and avoid crash |
837 assert(false); | 815 RTC_DCHECK(false); |
838 position = 0; | 816 position = 0; |
839 } | 817 } |
840 if (!iter->muted) { | 818 if (!iter->muted) { |
841 MixFrames(mixedAudio, iter->frame, use_limiter_); | 819 MixFrames(mixedAudio, iter->frame, use_limiter); |
842 } | 820 } |
843 | 821 |
844 position++; | 822 position++; |
845 } | 823 } |
846 | 824 |
847 return 0; | 825 return 0; |
848 } | 826 } |
849 | 827 |
850 // TODO(andrew): consolidate this function with MixFromList. | 828 // TODO(andrew): consolidate this function with MixFromList. |
851 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( | 829 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( |
(...skipping 21 matching lines...) Expand all Loading... |
873 } | 851 } |
874 | 852 |
875 // Smoothly limit the mixed frame. | 853 // Smoothly limit the mixed frame. |
876 const int error = _limiter->ProcessStream(mixedAudio); | 854 const int error = _limiter->ProcessStream(mixedAudio); |
877 | 855 |
878 // And now we can safely restore the level. This procedure results in | 856 // And now we can safely restore the level. This procedure results in |
879 // some loss of resolution, deemed acceptable. | 857 // some loss of resolution, deemed acceptable. |
880 // | 858 // |
881 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | 859 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS |
882 // and compression gain of 6 dB). However, in the transition frame when this | 860 // and compression gain of 6 dB). However, in the transition frame when this |
883 // is enabled (moving from one to two participants) it has the potential to | 861 // is enabled (moving from one to two audio sources) it has the potential to |
884 // create discontinuities in the mixed frame. | 862 // create discontinuities in the mixed frame. |
885 // | 863 // |
886 // Instead we double the frame (with addition since left-shifting a | 864 // Instead we double the frame (with addition since left-shifting a |
887 // negative value is undefined). | 865 // negative value is undefined). |
888 *mixedAudio += *mixedAudio; | 866 *mixedAudio += *mixedAudio; |
889 | 867 |
890 if (error != _limiter->kNoError) { | 868 if (error != _limiter->kNoError) { |
891 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 869 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
892 "Error from AudioProcessing: %d", error); | 870 "Error from AudioProcessing: %d", error); |
893 assert(false); | 871 RTC_DCHECK(false); |
894 return false; | 872 return false; |
895 } | 873 } |
896 return true; | 874 return true; |
897 } | 875 } |
898 } // namespace webrtc | 876 } // namespace webrtc |
OLD | NEW |