OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
100 } | 100 } |
101 | 101 |
102 void MixHistory::ResetMixedStatus() { | 102 void MixHistory::ResetMixedStatus() { |
103 _isMixed = false; | 103 _isMixed = false; |
104 } | 104 } |
105 | 105 |
106 AudioConferenceMixer* AudioConferenceMixer::Create(int id) { | 106 AudioConferenceMixer* AudioConferenceMixer::Create(int id) { |
107 AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id); | 107 AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id); |
108 if(!mixer->Init()) { | 108 if(!mixer->Init()) { |
109 delete mixer; | 109 delete mixer; |
110 return NULL; | 110 return nullptr; |
111 } | 111 } |
112 return mixer; | 112 return mixer; |
113 } | 113 } |
114 | 114 |
115 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) | 115 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) |
116 : _id(id), | 116 : _id(id), |
117 _minimumMixingFreq(kLowestPossible), | 117 _minimumMixingFreq(kLowestPossible), |
118 _mixReceiver(NULL), | 118 _mixReceiver(nullptr), |
119 _outputFrequency(kDefaultFrequency), | 119 _outputFrequency(kDefaultFrequency), |
120 _sampleSize(0), | 120 _sampleSize(0), |
121 _audioFramePool(NULL), | 121 _audioFramePool(nullptr), |
122 _participantList(), | 122 _participantList(), |
123 _additionalParticipantList(), | 123 _additionalParticipantList(), |
124 _numMixedParticipants(0), | 124 _numMixedParticipants(0), |
125 use_limiter_(true), | 125 use_limiter_(true), |
126 _timeStamp(0), | 126 _timeStamp(0), |
127 _timeScheduler(kProcessPeriodicityInMs), | 127 _timeScheduler(kProcessPeriodicityInMs), |
128 _processCalls(0) {} | 128 _processCalls(0) {} |
129 | 129 |
130 bool AudioConferenceMixerImpl::Init() { | 130 bool AudioConferenceMixerImpl::Init() { |
131 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 131 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
132 if (_crit.get() == NULL) | 132 if (_crit.get() == nullptr) |
133 return false; | 133 return false; |
134 | 134 |
135 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 135 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
136 if(_cbCrit.get() == NULL) | 136 if (_cbCrit.get() == nullptr) |
137 return false; | 137 return false; |
138 | 138 |
139 Config config; | 139 Config config; |
140 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 140 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
141 _limiter.reset(AudioProcessing::Create(config)); | 141 _limiter.reset(AudioProcessing::Create(config)); |
142 if(!_limiter.get()) | 142 if(!_limiter.get()) |
143 return false; | 143 return false; |
144 | 144 |
145 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, | 145 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, |
146 DEFAULT_AUDIO_FRAME_POOLSIZE); | 146 DEFAULT_AUDIO_FRAME_POOLSIZE); |
147 if(_audioFramePool == NULL) | 147 if (_audioFramePool == nullptr) |
148 return false; | 148 return false; |
149 | 149 |
150 if(SetOutputFrequency(kDefaultFrequency) == -1) | 150 if(SetOutputFrequency(kDefaultFrequency) == -1) |
151 return false; | 151 return false; |
152 | 152 |
153 if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | 153 if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) != |
154 _limiter->kNoError) | 154 _limiter->kNoError) |
155 return false; | 155 return false; |
156 | 156 |
157 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | 157 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the |
158 // divide-by-2 but -7 is used instead to give a bit of headroom since the | 158 // divide-by-2 but -7 is used instead to give a bit of headroom since the |
159 // AGC is not a hard limiter. | 159 // AGC is not a hard limiter. |
160 if(_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) | 160 if(_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError) |
161 return false; | 161 return false; |
162 | 162 |
163 if(_limiter->gain_control()->set_compression_gain_db(0) | 163 if(_limiter->gain_control()->set_compression_gain_db(0) |
164 != _limiter->kNoError) | 164 != _limiter->kNoError) |
165 return false; | 165 return false; |
166 | 166 |
167 if(_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) | 167 if(_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) |
168 return false; | 168 return false; |
169 | 169 |
170 if(_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 170 if(_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
171 return false; | 171 return false; |
172 | 172 |
173 return true; | 173 return true; |
174 } | 174 } |
175 | 175 |
176 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() { | 176 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() { |
177 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | 177 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
178 assert(_audioFramePool == NULL); | 178 assert(_audioFramePool == nullptr); |
179 } | 179 } |
180 | 180 |
181 // Process should be called every kProcessPeriodicityInMs ms | 181 // Process should be called every kProcessPeriodicityInMs ms |
182 int64_t AudioConferenceMixerImpl::TimeUntilNextProcess() { | 182 int64_t AudioConferenceMixerImpl::TimeUntilNextProcess() { |
183 int64_t timeUntilNextProcess = 0; | 183 int64_t timeUntilNextProcess = 0; |
184 CriticalSectionScoped cs(_crit.get()); | 184 CriticalSectionScoped cs(_crit.get()); |
185 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | 185 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { |
186 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 186 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
187 "failed in TimeToNextUpdate() call"); | 187 "failed in TimeToNextUpdate() call"); |
188 // Sanity check | 188 // Sanity check |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
258 } | 258 } |
259 | 259 |
260 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 260 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, |
261 &remainingParticipantsAllowedToMix); | 261 &remainingParticipantsAllowedToMix); |
262 | 262 |
263 GetAdditionalAudio(&additionalFramesList); | 263 GetAdditionalAudio(&additionalFramesList); |
264 UpdateMixedStatus(mixedParticipantsMap); | 264 UpdateMixedStatus(mixedParticipantsMap); |
265 } | 265 } |
266 | 266 |
267 // Get an AudioFrame for mixing from the memory pool. | 267 // Get an AudioFrame for mixing from the memory pool. |
268 AudioFrame* mixedAudio = NULL; | 268 AudioFrame* mixedAudio = nullptr; |
269 if(_audioFramePool->PopMemory(mixedAudio) == -1) { | 269 if(_audioFramePool->PopMemory(mixedAudio) == -1) { |
270 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 270 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
271 "failed PopMemory() call"); | 271 "failed PopMemory() call"); |
272 assert(false); | 272 assert(false); |
273 return; | 273 return; |
274 } | 274 } |
275 | 275 |
276 { | 276 { |
277 CriticalSectionScoped cs(_crit.get()); | 277 CriticalSectionScoped cs(_crit.get()); |
278 | 278 |
279 // TODO(henrike): it might be better to decide the number of channels | 279 // TODO(henrike): it might be better to decide the number of channels |
280 // with an API instead of dynamically. | 280 // with an API instead of dynamically. |
281 | 281 |
282 // Find the max channels over all mixing lists. | 282 // Find the max channels over all mixing lists. |
283 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), | 283 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), |
284 std::max(MaxNumChannels(&additionalFramesList), | 284 std::max(MaxNumChannels(&additionalFramesList), |
285 MaxNumChannels(&rampOutList))); | 285 MaxNumChannels(&rampOutList))); |
286 | 286 |
287 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | 287 mixedAudio->UpdateFrame(-1, _timeStamp, nullptr, 0, _outputFrequency, |
288 AudioFrame::kNormalSpeech, | 288 AudioFrame::kNormalSpeech, |
289 AudioFrame::kVadPassive, num_mixed_channels); | 289 AudioFrame::kVadPassive, num_mixed_channels); |
290 | 290 |
291 _timeStamp += static_cast<uint32_t>(_sampleSize); | 291 _timeStamp += static_cast<uint32_t>(_sampleSize); |
292 | 292 |
293 // We only use the limiter if it supports the output sample rate and | 293 // We only use the limiter if it supports the output sample rate and |
294 // we're actually mixing multiple streams. | 294 // we're actually mixing multiple streams. |
295 use_limiter_ = | 295 use_limiter_ = |
296 _numMixedParticipants > 1 && | 296 _numMixedParticipants > 1 && |
297 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 297 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
298 | 298 |
299 MixFromList(mixedAudio, mixList); | 299 MixFromList(mixedAudio, mixList); |
300 MixAnonomouslyFromList(mixedAudio, additionalFramesList); | 300 MixAnonomouslyFromList(mixedAudio, additionalFramesList); |
301 MixAnonomouslyFromList(mixedAudio, rampOutList); | 301 MixAnonomouslyFromList(mixedAudio, rampOutList); |
302 | 302 |
303 if(mixedAudio->samples_per_channel_ == 0) { | 303 if(mixedAudio->samples_per_channel_ == 0) { |
304 // Nothing was mixed, set the audio samples to silence. | 304 // Nothing was mixed, set the audio samples to silence. |
305 mixedAudio->samples_per_channel_ = _sampleSize; | 305 mixedAudio->samples_per_channel_ = _sampleSize; |
306 AudioFrameOperations::Mute(mixedAudio); | 306 AudioFrameOperations::Mute(mixedAudio); |
307 } else { | 307 } else { |
308 // Only call the limiter if we have something to mix. | 308 // Only call the limiter if we have something to mix. |
309 LimitMixedAudio(mixedAudio); | 309 LimitMixedAudio(mixedAudio); |
310 } | 310 } |
311 } | 311 } |
312 | 312 |
313 { | 313 { |
314 CriticalSectionScoped cs(_cbCrit.get()); | 314 CriticalSectionScoped cs(_cbCrit.get()); |
315 if(_mixReceiver != NULL) { | 315 if (_mixReceiver != nullptr) { |
316 const AudioFrame** dummy = NULL; | 316 const AudioFrame** dummy = nullptr; |
317 _mixReceiver->NewMixedAudio( | 317 _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0); |
318 _id, | |
319 *mixedAudio, | |
320 dummy, | |
321 0); | |
322 } | 318 } |
323 } | 319 } |
324 | 320 |
325 // Reclaim all outstanding memory. | 321 // Reclaim all outstanding memory. |
326 _audioFramePool->PushMemory(mixedAudio); | 322 _audioFramePool->PushMemory(mixedAudio); |
327 ClearAudioFrameList(&mixList); | 323 ClearAudioFrameList(&mixList); |
328 ClearAudioFrameList(&rampOutList); | 324 ClearAudioFrameList(&rampOutList); |
329 ClearAudioFrameList(&additionalFramesList); | 325 ClearAudioFrameList(&additionalFramesList); |
330 { | 326 { |
331 CriticalSectionScoped cs(_crit.get()); | 327 CriticalSectionScoped cs(_crit.get()); |
332 _processCalls--; | 328 _processCalls--; |
333 } | 329 } |
334 return; | 330 return; |
335 } | 331 } |
336 | 332 |
337 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( | 333 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( |
338 AudioMixerOutputReceiver* mixReceiver) { | 334 AudioMixerOutputReceiver* mixReceiver) { |
339 CriticalSectionScoped cs(_cbCrit.get()); | 335 CriticalSectionScoped cs(_cbCrit.get()); |
340 if(_mixReceiver != NULL) { | 336 if (_mixReceiver != nullptr) { |
341 return -1; | 337 return -1; |
342 } | 338 } |
343 _mixReceiver = mixReceiver; | 339 _mixReceiver = mixReceiver; |
344 return 0; | 340 return 0; |
345 } | 341 } |
346 | 342 |
347 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { | 343 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { |
348 CriticalSectionScoped cs(_cbCrit.get()); | 344 CriticalSectionScoped cs(_cbCrit.get()); |
349 if(_mixReceiver == NULL) { | 345 if (_mixReceiver == nullptr) { |
350 return -1; | 346 return -1; |
351 } | 347 } |
352 _mixReceiver = NULL; | 348 _mixReceiver = nullptr; |
353 return 0; | 349 return 0; |
354 } | 350 } |
355 | 351 |
356 int32_t AudioConferenceMixerImpl::SetOutputFrequency( | 352 int32_t AudioConferenceMixerImpl::SetOutputFrequency( |
357 const Frequency& frequency) { | 353 const Frequency& frequency) { |
358 CriticalSectionScoped cs(_crit.get()); | 354 CriticalSectionScoped cs(_crit.get()); |
359 | 355 |
360 _outputFrequency = frequency; | 356 _outputFrequency = frequency; |
361 _sampleSize = | 357 _sampleSize = |
362 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); | 358 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
539 ++participant) { | 535 ++participant) { |
540 // Stop keeping track of passive participants if there are already | 536 // Stop keeping track of passive participants if there are already |
541 // enough participants available (they wont be mixed anyway). | 537 // enough participants available (they wont be mixed anyway). |
542 bool mustAddToPassiveList = (*maxAudioFrameCounter > | 538 bool mustAddToPassiveList = (*maxAudioFrameCounter > |
543 (activeList.size() + | 539 (activeList.size() + |
544 passiveWasMixedList.size() + | 540 passiveWasMixedList.size() + |
545 passiveWasNotMixedList.size())); | 541 passiveWasNotMixedList.size())); |
546 | 542 |
547 bool wasMixed = false; | 543 bool wasMixed = false; |
548 wasMixed = (*participant)->_mixHistory->WasMixed(); | 544 wasMixed = (*participant)->_mixHistory->WasMixed(); |
549 AudioFrame* audioFrame = NULL; | 545 AudioFrame* audioFrame = nullptr; |
550 if(_audioFramePool->PopMemory(audioFrame) == -1) { | 546 if(_audioFramePool->PopMemory(audioFrame) == -1) { |
551 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 547 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
552 "failed PopMemory() call"); | 548 "failed PopMemory() call"); |
553 assert(false); | 549 assert(false); |
554 return; | 550 return; |
555 } | 551 } |
556 audioFrame->sample_rate_hz_ = _outputFrequency; | 552 audioFrame->sample_rate_hz_ = _outputFrequency; |
557 | 553 |
558 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 554 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
559 if (ret == MixerParticipant::AudioFrameInfo::kError) { | 555 if (ret == MixerParticipant::AudioFrameInfo::kError) { |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
727 // that the list of participants can be traversed safely. | 723 // that the list of participants can be traversed safely. |
728 MixerParticipantList additionalParticipantList; | 724 MixerParticipantList additionalParticipantList; |
729 additionalParticipantList.insert(additionalParticipantList.begin(), | 725 additionalParticipantList.insert(additionalParticipantList.begin(), |
730 _additionalParticipantList.begin(), | 726 _additionalParticipantList.begin(), |
731 _additionalParticipantList.end()); | 727 _additionalParticipantList.end()); |
732 | 728 |
733 for (MixerParticipantList::const_iterator participant = | 729 for (MixerParticipantList::const_iterator participant = |
734 additionalParticipantList.begin(); | 730 additionalParticipantList.begin(); |
735 participant != additionalParticipantList.end(); | 731 participant != additionalParticipantList.end(); |
736 ++participant) { | 732 ++participant) { |
737 AudioFrame* audioFrame = NULL; | 733 AudioFrame* audioFrame = nullptr; |
738 if(_audioFramePool->PopMemory(audioFrame) == -1) { | 734 if (_audioFramePool->PopMemory(audioFrame) == -1) { |
739 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 735 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
740 "failed PopMemory() call"); | 736 "failed PopMemory() call"); |
741 assert(false); | 737 assert(false); |
742 return; | 738 return; |
743 } | 739 } |
744 audioFrame->sample_rate_hz_ = _outputFrequency; | 740 audioFrame->sample_rate_hz_ = _outputFrequency; |
745 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); | 741 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); |
746 if (ret == MixerParticipant::AudioFrameInfo::kError) { | 742 if (ret == MixerParticipant::AudioFrameInfo::kError) { |
747 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 743 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
748 "failed to GetAudioFrameWithMuted() from participant"); | 744 "failed to GetAudioFrameWithMuted() from participant"); |
749 _audioFramePool->PushMemory(audioFrame); | 745 _audioFramePool->PushMemory(audioFrame); |
750 continue; | 746 continue; |
751 } | 747 } |
752 if(audioFrame->samples_per_channel_ == 0) { | 748 if(audioFrame->samples_per_channel_ == 0) { |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
926 | 922 |
927 if(error != _limiter->kNoError) { | 923 if(error != _limiter->kNoError) { |
928 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 924 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
929 "Error from AudioProcessing: %d", error); | 925 "Error from AudioProcessing: %d", error); |
930 assert(false); | 926 assert(false); |
931 return false; | 927 return false; |
932 } | 928 } |
933 return true; | 929 return true; |
934 } | 930 } |
935 } // namespace webrtc | 931 } // namespace webrtc |
OLD | NEW |