| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 280 | 280 |
| 281 // Find the max channels over all mixing lists. | 281 // Find the max channels over all mixing lists. |
| 282 const int num_mixed_channels = std::max(MaxNumChannels(&mixList), | 282 const int num_mixed_channels = std::max(MaxNumChannels(&mixList), |
| 283 std::max(MaxNumChannels(&additionalFramesList), | 283 std::max(MaxNumChannels(&additionalFramesList), |
| 284 MaxNumChannels(&rampOutList))); | 284 MaxNumChannels(&rampOutList))); |
| 285 | 285 |
| 286 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | 286 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, |
| 287 AudioFrame::kNormalSpeech, | 287 AudioFrame::kNormalSpeech, |
| 288 AudioFrame::kVadPassive, num_mixed_channels); | 288 AudioFrame::kVadPassive, num_mixed_channels); |
| 289 | 289 |
| 290 _timeStamp += _sampleSize; | 290 _timeStamp += static_cast<uint32_t>(_sampleSize); |
| 291 | 291 |
| 292 // We only use the limiter if it supports the output sample rate and | 292 // We only use the limiter if it supports the output sample rate and |
| 293 // we're actually mixing multiple streams. | 293 // we're actually mixing multiple streams. |
| 294 use_limiter_ = _numMixedParticipants > 1 && | 294 use_limiter_ = _numMixedParticipants > 1 && |
| 295 _outputFrequency <= kAudioProcMaxNativeSampleRateHz; | 295 _outputFrequency <= kAudioProcMaxNativeSampleRateHz; |
| 296 | 296 |
| 297 MixFromList(*mixedAudio, &mixList); | 297 MixFromList(*mixedAudio, &mixList); |
| 298 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList); | 298 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList); |
| 299 MixAnonomouslyFromList(*mixedAudio, &rampOutList); | 299 MixAnonomouslyFromList(*mixedAudio, &rampOutList); |
| 300 | 300 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 } | 350 } |
| 351 _mixReceiver = NULL; | 351 _mixReceiver = NULL; |
| 352 return 0; | 352 return 0; |
| 353 } | 353 } |
| 354 | 354 |
| 355 int32_t AudioConferenceMixerImpl::SetOutputFrequency( | 355 int32_t AudioConferenceMixerImpl::SetOutputFrequency( |
| 356 const Frequency frequency) { | 356 const Frequency frequency) { |
| 357 CriticalSectionScoped cs(_crit.get()); | 357 CriticalSectionScoped cs(_crit.get()); |
| 358 | 358 |
| 359 _outputFrequency = frequency; | 359 _outputFrequency = frequency; |
| 360 _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000; | 360 _sampleSize = |
| 361 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); |
| 361 | 362 |
| 362 return 0; | 363 return 0; |
| 363 } | 364 } |
| 364 | 365 |
| 365 AudioConferenceMixer::Frequency | 366 AudioConferenceMixer::Frequency |
| 366 AudioConferenceMixerImpl::OutputFrequency() const { | 367 AudioConferenceMixerImpl::OutputFrequency() const { |
| 367 CriticalSectionScoped cs(_crit.get()); | 368 CriticalSectionScoped cs(_crit.get()); |
| 368 return _outputFrequency; | 369 return _outputFrequency; |
| 369 } | 370 } |
| 370 | 371 |
| (...skipping 551 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 922 | 923 |
| 923 if(error != _limiter->kNoError) { | 924 if(error != _limiter->kNoError) { |
| 924 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 925 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 925 "Error from AudioProcessing: %d", error); | 926 "Error from AudioProcessing: %d", error); |
| 926 assert(false); | 927 assert(false); |
| 927 return false; | 928 return false; |
| 928 } | 929 } |
| 929 return true; | 930 return true; |
| 930 } | 931 } |
| 931 } // namespace webrtc | 932 } // namespace webrtc |
| OLD | NEW |