OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
284 MaxNumChannels(&rampOutList))); | 284 MaxNumChannels(&rampOutList))); |
285 | 285 |
286 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | 286 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, |
287 AudioFrame::kNormalSpeech, | 287 AudioFrame::kNormalSpeech, |
288 AudioFrame::kVadPassive, num_mixed_channels); | 288 AudioFrame::kVadPassive, num_mixed_channels); |
289 | 289 |
290 _timeStamp += static_cast<uint32_t>(_sampleSize); | 290 _timeStamp += static_cast<uint32_t>(_sampleSize); |
291 | 291 |
292 // We only use the limiter if it supports the output sample rate and | 292 // We only use the limiter if it supports the output sample rate and |
293 // we're actually mixing multiple streams. | 293 // we're actually mixing multiple streams. |
294 use_limiter_ = _numMixedParticipants > 1 && | 294 use_limiter_ = |
295 _outputFrequency <= kAudioProcMaxNativeSampleRateHz; | 295 _numMixedParticipants > 1 && |
| 296 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
296 | 297 |
297 MixFromList(*mixedAudio, &mixList); | 298 MixFromList(*mixedAudio, &mixList); |
298 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList); | 299 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList); |
299 MixAnonomouslyFromList(*mixedAudio, &rampOutList); | 300 MixAnonomouslyFromList(*mixedAudio, &rampOutList); |
300 | 301 |
301 if(mixedAudio->samples_per_channel_ == 0) { | 302 if(mixedAudio->samples_per_channel_ == 0) { |
302 // Nothing was mixed, set the audio samples to silence. | 303 // Nothing was mixed, set the audio samples to silence. |
303 mixedAudio->samples_per_channel_ = _sampleSize; | 304 mixedAudio->samples_per_channel_ = _sampleSize; |
304 mixedAudio->Mute(); | 305 mixedAudio->Mute(); |
305 } else { | 306 } else { |
(...skipping 617 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
923 | 924 |
924 if(error != _limiter->kNoError) { | 925 if(error != _limiter->kNoError) { |
925 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 926 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
926 "Error from AudioProcessing: %d", error); | 927 "Error from AudioProcessing: %d", error); |
927 assert(false); | 928 assert(false); |
928 return false; | 929 return false; |
929 } | 930 } |
930 return true; | 931 return true; |
931 } | 932 } |
932 } // namespace webrtc | 933 } // namespace webrtc |
OLD | NEW |