OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
113 : _id(id), | 113 : _id(id), |
114 _minimumMixingFreq(kLowestPossible), | 114 _minimumMixingFreq(kLowestPossible), |
115 _outputFrequency(kDefaultFrequency), | 115 _outputFrequency(kDefaultFrequency), |
116 _sampleSize(0), | 116 _sampleSize(0), |
117 _audioFramePool(NULL), | 117 _audioFramePool(NULL), |
118 audio_source_list_(), | 118 audio_source_list_(), |
119 additional_audio_source_list_(), | 119 additional_audio_source_list_(), |
120 num_mixed_audio_sources_(0), | 120 num_mixed_audio_sources_(0), |
121 use_limiter_(true), | 121 use_limiter_(true), |
122 _timeStamp(0), | 122 _timeStamp(0) { |
123 _timeScheduler(kProcessPeriodicityInMs), | 123 thread_checker_.DetachFromThread(); |
124 _processCalls(0) {} | 124 } |
125 | 125 |
126 bool NewAudioConferenceMixerImpl::Init() { | 126 bool NewAudioConferenceMixerImpl::Init() { |
127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
128 if (_crit.get() == NULL) | 128 if (_crit.get() == NULL) |
129 return false; | 129 return false; |
130 | 130 |
131 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 131 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
132 if (_cbCrit.get() == NULL) | 132 if (_cbCrit.get() == NULL) |
133 return false; | 133 return false; |
134 | 134 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
167 return false; | 167 return false; |
168 | 168 |
169 return true; | 169 return true; |
170 } | 170 } |
171 | 171 |
172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { | 172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { |
173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | 173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
174 RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr)); | 174 RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr)); |
175 } | 175 } |
176 | 176 |
177 // Process should be called every kProcessPeriodicityInMs ms | |
178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { | |
179 int64_t timeUntilNextProcess = 0; | |
180 CriticalSectionScoped cs(_crit.get()); | |
181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | |
182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | |
183 "failed in TimeToNextUpdate() call"); | |
184 // Sanity check | |
185 RTC_NOTREACHED(); | |
186 return -1; | |
187 } | |
188 return timeUntilNextProcess; | |
189 } | |
190 | |
191 void NewAudioConferenceMixerImpl::Process() { | |
192 // TODO(aleloi) Remove this method. | |
193 RTC_NOTREACHED(); | |
194 } | |
195 | |
196 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 177 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
197 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | 178 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; |
198 { | 179 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
199 CriticalSectionScoped cs(_crit.get()); | |
200 RTC_DCHECK_EQ(_processCalls, 0); | |
201 _processCalls++; | |
202 | |
203 // Let the scheduler know that we are running one iteration. | |
204 _timeScheduler.UpdateScheduler(); | |
205 } | |
206 | |
207 AudioFrameList mixList; | 180 AudioFrameList mixList; |
208 AudioFrameList rampOutList; | 181 AudioFrameList rampOutList; |
209 AudioFrameList additionalFramesList; | 182 AudioFrameList additionalFramesList; |
210 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 183 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
211 { | 184 { |
212 CriticalSectionScoped cs(_cbCrit.get()); | 185 CriticalSectionScoped cs(_cbCrit.get()); |
213 | 186 |
214 int32_t lowFreq = GetLowestMixingFrequency(); | 187 int32_t lowFreq = GetLowestMixingFrequency(); |
215 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 188 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
216 // supported so use the closest higher frequency to not lose any | 189 // supported so use the closest higher frequency to not lose any |
217 // information. | 190 // information. |
218 // TODO(aleloi): this is probably more appropriate to do in | 191 // TODO(aleloi): this is probably more appropriate to do in |
219 // GetLowestMixingFrequency(). | 192 // GetLowestMixingFrequency(). |
220 if (lowFreq == 12000) { | 193 if (lowFreq == 12000) { |
221 lowFreq = 16000; | 194 lowFreq = 16000; |
222 } else if (lowFreq == 24000) { | 195 } else if (lowFreq == 24000) { |
223 lowFreq = 32000; | 196 lowFreq = 32000; |
224 } | 197 } |
225 if (lowFreq <= 0) { | 198 if (lowFreq <= 0) { |
226 CriticalSectionScoped cs(_crit.get()); | |
227 _processCalls--; | |
228 return; | 199 return; |
229 } else { | 200 } else { |
230 switch (lowFreq) { | 201 switch (lowFreq) { |
231 case 8000: | 202 case 8000: |
232 if (OutputFrequency() != kNbInHz) { | 203 if (OutputFrequency() != kNbInHz) { |
233 SetOutputFrequency(kNbInHz); | 204 SetOutputFrequency(kNbInHz); |
234 } | 205 } |
235 break; | 206 break; |
236 case 16000: | 207 case 16000: |
237 if (OutputFrequency() != kWbInHz) { | 208 if (OutputFrequency() != kWbInHz) { |
238 SetOutputFrequency(kWbInHz); | 209 SetOutputFrequency(kWbInHz); |
239 } | 210 } |
240 break; | 211 break; |
241 case 32000: | 212 case 32000: |
242 if (OutputFrequency() != kSwbInHz) { | 213 if (OutputFrequency() != kSwbInHz) { |
243 SetOutputFrequency(kSwbInHz); | 214 SetOutputFrequency(kSwbInHz); |
244 } | 215 } |
245 break; | 216 break; |
246 case 48000: | 217 case 48000: |
247 if (OutputFrequency() != kFbInHz) { | 218 if (OutputFrequency() != kFbInHz) { |
248 SetOutputFrequency(kFbInHz); | 219 SetOutputFrequency(kFbInHz); |
249 } | 220 } |
250 break; | 221 break; |
251 default: | 222 default: |
252 RTC_NOTREACHED(); | 223 RTC_NOTREACHED(); |
253 | |
254 CriticalSectionScoped cs(_crit.get()); | |
255 _processCalls--; | |
256 return; | 224 return; |
257 } | 225 } |
258 } | 226 } |
259 | 227 |
260 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap, | 228 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap, |
261 &remainingAudioSourcesAllowedToMix); | 229 &remainingAudioSourcesAllowedToMix); |
262 | 230 |
263 GetAdditionalAudio(&additionalFramesList); | 231 GetAdditionalAudio(&additionalFramesList); |
264 UpdateMixedStatus(mixedAudioSourcesMap); | 232 UpdateMixedStatus(mixedAudioSourcesMap); |
265 } | 233 } |
(...skipping 30 matching lines...) Expand all Loading... |
296 audio_frame_for_mixing->Mute(); | 264 audio_frame_for_mixing->Mute(); |
297 } else { | 265 } else { |
298 // Only call the limiter if we have something to mix. | 266 // Only call the limiter if we have something to mix. |
299 LimitMixedAudio(audio_frame_for_mixing); | 267 LimitMixedAudio(audio_frame_for_mixing); |
300 } | 268 } |
301 } | 269 } |
302 | 270 |
303 ClearAudioFrameList(&mixList); | 271 ClearAudioFrameList(&mixList); |
304 ClearAudioFrameList(&rampOutList); | 272 ClearAudioFrameList(&rampOutList); |
305 ClearAudioFrameList(&additionalFramesList); | 273 ClearAudioFrameList(&additionalFramesList); |
306 { | |
307 CriticalSectionScoped cs(_crit.get()); | |
308 _processCalls--; | |
309 } | |
310 return; | 274 return; |
311 } | 275 } |
312 | 276 |
313 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 277 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
314 const Frequency& frequency) { | 278 const Frequency& frequency) { |
315 CriticalSectionScoped cs(_crit.get()); | 279 CriticalSectionScoped cs(_crit.get()); |
316 | 280 |
317 _outputFrequency = frequency; | 281 _outputFrequency = frequency; |
318 _sampleSize = | 282 _sampleSize = |
319 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 283 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
869 | 833 |
870 if (error != _limiter->kNoError) { | 834 if (error != _limiter->kNoError) { |
871 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 835 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
872 "Error from AudioProcessing: %d", error); | 836 "Error from AudioProcessing: %d", error); |
873 RTC_NOTREACHED(); | 837 RTC_NOTREACHED(); |
874 return false; | 838 return false; |
875 } | 839 } |
876 return true; | 840 return true; |
877 } | 841 } |
878 } // namespace webrtc | 842 } // namespace webrtc |
OLD | NEW |