OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
109 : _id(id), | 109 : _id(id), |
110 _minimumMixingFreq(kLowestPossible), | 110 _minimumMixingFreq(kLowestPossible), |
111 _mixReceiver(NULL), | 111 _mixReceiver(NULL), |
112 _outputFrequency(kDefaultFrequency), | 112 _outputFrequency(kDefaultFrequency), |
113 _sampleSize(0), | 113 _sampleSize(0), |
114 _audioFramePool(NULL), | 114 _audioFramePool(NULL), |
115 _participantList(), | 115 _participantList(), |
116 _additionalParticipantList(), | 116 _additionalParticipantList(), |
117 _numMixedParticipants(0), | 117 _numMixedParticipants(0), |
118 use_limiter_(true), | 118 use_limiter_(true), |
119 _timeStamp(0), | 119 _timeStamp(0) {} |
120 _timeScheduler(kProcessPeriodicityInMs), | |
121 _processCalls(0) {} | |
122 | 120 |
123 bool NewAudioConferenceMixerImpl::Init() { | 121 bool NewAudioConferenceMixerImpl::Init() { |
124 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 122 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
125 if (_crit.get() == NULL) | 123 if (_crit.get() == NULL) |
126 return false; | 124 return false; |
127 | 125 |
128 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 126 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
129 if (_cbCrit.get() == NULL) | 127 if (_cbCrit.get() == NULL) |
130 return false; | 128 return false; |
131 | 129 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
164 return false; | 162 return false; |
165 | 163 |
166 return true; | 164 return true; |
167 } | 165 } |
168 | 166 |
169 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | 167 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { |
170 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | 168 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
171 assert(_audioFramePool == NULL); | 169 assert(_audioFramePool == NULL); |
172 } | 170 } |
173 | 171 |
174 // Process should be called every kProcessPeriodicityInMs ms | |
175 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { | |
176 int64_t timeUntilNextProcess = 0; | |
177 CriticalSectionScoped cs(_crit.get()); | |
178 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | |
179 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
180 "failed in TimeToNextUpdate() call"); | |
181 // Sanity check | |
182 assert(false); | |
183 return -1; | |
184 } | |
185 return timeUntilNextProcess; | |
186 } | |
187 | |
188 void NewAudioConferenceMixerImpl::Process() { | |
189 RTC_NOTREACHED(); | |
190 } | |
191 | |
192 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 172 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
193 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | 173 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
194 { | |
195 CriticalSectionScoped cs(_crit.get()); | |
196 assert(_processCalls == 0); | |
197 _processCalls++; | |
198 | |
199 // Let the scheduler know that we are running one iteration. | |
200 _timeScheduler.UpdateScheduler(); | |
201 } | |
202 | 174 |
203 AudioFrameList mixList; | 175 AudioFrameList mixList; |
204 AudioFrameList rampOutList; | 176 AudioFrameList rampOutList; |
205 AudioFrameList additionalFramesList; | 177 AudioFrameList additionalFramesList; |
206 std::map<int, MixerAudioSource*> mixedParticipantsMap; | 178 std::map<int, MixerAudioSource*> mixedParticipantsMap; |
207 { | 179 { |
208 CriticalSectionScoped cs(_cbCrit.get()); | 180 CriticalSectionScoped cs(_cbCrit.get()); |
209 | 181 |
210 int32_t lowFreq = GetLowestMixingFrequency(); | 182 int32_t lowFreq = GetLowestMixingFrequency(); |
211 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 183 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
212 // supported so use the closest higher frequency to not lose any | 184 // supported so use the closest higher frequency to not lose any |
213 // information. | 185 // information. |
214 // TODO(henrike): this is probably more appropriate to do in | 186 // TODO(henrike): this is probably more appropriate to do in |
215 // GetLowestMixingFrequency(). | 187 // GetLowestMixingFrequency(). |
216 if (lowFreq == 12000) { | 188 if (lowFreq == 12000) { |
217 lowFreq = 16000; | 189 lowFreq = 16000; |
218 } else if (lowFreq == 24000) { | 190 } else if (lowFreq == 24000) { |
219 lowFreq = 32000; | 191 lowFreq = 32000; |
220 } | 192 } |
221 if (lowFreq <= 0) { | 193 if (lowFreq <= 0) { |
222 CriticalSectionScoped cs(_crit.get()); | 194 CriticalSectionScoped cs(_crit.get()); |
223 _processCalls--; | |
224 return; | 195 return; |
225 } else { | 196 } else { |
226 switch (lowFreq) { | 197 switch (lowFreq) { |
227 case 8000: | 198 case 8000: |
228 if (OutputFrequency() != kNbInHz) { | 199 if (OutputFrequency() != kNbInHz) { |
229 SetOutputFrequency(kNbInHz); | 200 SetOutputFrequency(kNbInHz); |
230 } | 201 } |
231 break; | 202 break; |
232 case 16000: | 203 case 16000: |
233 if (OutputFrequency() != kWbInHz) { | 204 if (OutputFrequency() != kWbInHz) { |
234 SetOutputFrequency(kWbInHz); | 205 SetOutputFrequency(kWbInHz); |
235 } | 206 } |
236 break; | 207 break; |
237 case 32000: | 208 case 32000: |
238 if (OutputFrequency() != kSwbInHz) { | 209 if (OutputFrequency() != kSwbInHz) { |
239 SetOutputFrequency(kSwbInHz); | 210 SetOutputFrequency(kSwbInHz); |
240 } | 211 } |
241 break; | 212 break; |
242 case 48000: | 213 case 48000: |
243 if (OutputFrequency() != kFbInHz) { | 214 if (OutputFrequency() != kFbInHz) { |
244 SetOutputFrequency(kFbInHz); | 215 SetOutputFrequency(kFbInHz); |
245 } | 216 } |
246 break; | 217 break; |
247 default: | 218 default: |
248 assert(false); | 219 assert(false); |
249 | 220 |
250 CriticalSectionScoped cs(_crit.get()); | 221 CriticalSectionScoped cs(_crit.get()); |
251 _processCalls--; | |
252 return; | 222 return; |
253 } | 223 } |
254 } | 224 } |
255 | 225 |
256 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 226 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, |
257 &remainingParticipantsAllowedToMix); | 227 &remainingParticipantsAllowedToMix); |
258 | 228 |
259 GetAdditionalAudio(&additionalFramesList); | 229 GetAdditionalAudio(&additionalFramesList); |
260 UpdateMixedStatus(mixedParticipantsMap); | 230 UpdateMixedStatus(mixedParticipantsMap); |
261 } | 231 } |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
301 CriticalSectionScoped cs(_cbCrit.get()); | 271 CriticalSectionScoped cs(_cbCrit.get()); |
302 if (_mixReceiver != NULL) { | 272 if (_mixReceiver != NULL) { |
303 const AudioFrame** dummy = NULL; | 273 const AudioFrame** dummy = NULL; |
304 _mixReceiver->NewMixedAudio(_id, *audio_frame_for_mixing, dummy, 0); | 274 _mixReceiver->NewMixedAudio(_id, *audio_frame_for_mixing, dummy, 0); |
305 } | 275 } |
306 } | 276 } |
307 | 277 |
308 ClearAudioFrameList(&mixList); | 278 ClearAudioFrameList(&mixList); |
309 ClearAudioFrameList(&rampOutList); | 279 ClearAudioFrameList(&rampOutList); |
310 ClearAudioFrameList(&additionalFramesList); | 280 ClearAudioFrameList(&additionalFramesList); |
311 { | |
312 CriticalSectionScoped cs(_crit.get()); | |
313 _processCalls--; | |
314 } | |
315 return; | 281 return; |
316 } | 282 } |
317 | 283 |
318 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback( | 284 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback( |
319 AudioMixerOutputReceiver* mixReceiver) { | 285 AudioMixerOutputReceiver* mixReceiver) { |
320 CriticalSectionScoped cs(_cbCrit.get()); | 286 CriticalSectionScoped cs(_cbCrit.get()); |
321 if (_mixReceiver != NULL) { | 287 if (_mixReceiver != NULL) { |
322 return -1; | 288 return -1; |
323 } | 289 } |
324 _mixReceiver = mixReceiver; | 290 _mixReceiver = mixReceiver; |
(...skipping 554 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
879 | 845 |
880 if (error != _limiter->kNoError) { | 846 if (error != _limiter->kNoError) { |
881 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 847 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
882 "Error from AudioProcessing: %d", error); | 848 "Error from AudioProcessing: %d", error); |
883 assert(false); | 849 assert(false); |
884 return false; | 850 return false; |
885 } | 851 } |
886 return true; | 852 return true; |
887 } | 853 } |
888 } // namespace webrtc | 854 } // namespace webrtc |
OLD | NEW |