Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 #include <functional> | 14 #include <functional> |
| 15 | 15 |
| 16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " | 16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " |
| 17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" |
| 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
| 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
| 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
| 21 #include "webrtc/system_wrappers/include/trace.h" | 21 #include "webrtc/system_wrappers/include/trace.h" |
| 22 #include "webrtc/voice_engine/utility.h" | |
| 22 | 23 |
| 23 namespace webrtc { | 24 namespace webrtc { |
| 24 namespace { | 25 namespace { |
| 25 | 26 |
| 26 class SourceFrame { | 27 class SourceFrame { |
| 27 public: | 28 public: |
| 28 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
| 29 : audio_source_(p), | 30 : audio_source_(p), |
| 30 audio_frame_(a), | 31 audio_frame_(a), |
| 31 muted_(m), | 32 muted_(m), |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 182 | 183 |
| 183 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) | 184 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) |
| 184 return false; | 185 return false; |
| 185 | 186 |
| 186 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 187 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
| 187 return false; | 188 return false; |
| 188 | 189 |
| 189 return true; | 190 return true; |
| 190 } | 191 } |
| 191 | 192 |
| 192 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 193 void NewAudioConferenceMixerImpl::Mix(int sample_rate, |
| 194 size_t number_of_channels, | |
| 195 void* audio_data) { | |
| 196 AudioFrame* audio_frame_for_mixing = | |
| 197 reinterpret_cast<AudioFrame*>(audio_data); | |
|
aleloi
2016/08/05 09:52:22
It's still an AudioFrame on the inside yet...
| |
| 193 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | 198 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; |
| 194 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 199 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 195 AudioFrameList mixList; | 200 AudioFrameList mixList; |
| 196 AudioFrameList additionalFramesList; | 201 AudioFrameList additionalFramesList; |
| 197 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 202 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
| 198 { | 203 { |
| 199 CriticalSectionScoped cs(_cbCrit.get()); | 204 CriticalSectionScoped cs(_cbCrit.get()); |
| 200 | 205 |
| 201 int32_t lowFreq = GetLowestMixingFrequency(); | 206 int32_t lowFreq = GetLowestMixingFrequency(); |
| 202 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 207 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 229 } | 234 } |
| 230 break; | 235 break; |
| 231 case 48000: | 236 case 48000: |
| 232 if (OutputFrequency() != kFbInHz) { | 237 if (OutputFrequency() != kFbInHz) { |
| 233 SetOutputFrequency(kFbInHz); | 238 SetOutputFrequency(kFbInHz); |
| 234 } | 239 } |
| 235 break; | 240 break; |
| 236 default: | 241 default: |
| 237 RTC_NOTREACHED(); | 242 RTC_NOTREACHED(); |
| 238 return; | 243 return; |
| 239 } | 244 } |
|
aleloi2
2016/08/06 10:13:49
Fingers have been itching to rewrite this for mont
| |
| 240 } | 245 } |
| 241 | 246 |
| 242 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); | 247 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); |
| 243 remainingAudioSourcesAllowedToMix -= mixList.size(); | 248 remainingAudioSourcesAllowedToMix -= mixList.size(); |
| 244 GetAdditionalAudio(&additionalFramesList); | 249 GetAdditionalAudio(&additionalFramesList); |
| 245 } | 250 } |
| 246 | 251 |
| 247 // TODO(aleloi): it might be better to decide the number of channels | 252 // TODO(aleloi): it might be better to decide the number of channels |
| 248 // with an API instead of dynamically. | 253 // with an API instead of dynamically. |
| 249 | 254 |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 270 | 275 |
| 271 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 276 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
| 272 // Nothing was mixed, set the audio samples to silence. | 277 // Nothing was mixed, set the audio samples to silence. |
| 273 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; | 278 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
| 274 audio_frame_for_mixing->Mute(); | 279 audio_frame_for_mixing->Mute(); |
| 275 } else { | 280 } else { |
| 276 // Only call the limiter if we have something to mix. | 281 // Only call the limiter if we have something to mix. |
| 277 LimitMixedAudio(audio_frame_for_mixing); | 282 LimitMixedAudio(audio_frame_for_mixing); |
| 278 } | 283 } |
| 279 } | 284 } |
| 285 | |
|
aleloi
2016/08/05 09:52:21
Resampling copied from OutputMixer. It would be ni
| |
| 286 if (audio_frame_for_mixing->num_channels_ != number_of_channels || | |
| 287 audio_frame_for_mixing->sample_rate_hz_ != sample_rate) { | |
| 288 audio_frame_for_mixing->num_channels_ = number_of_channels; | |
| 289 audio_frame_for_mixing->sample_rate_hz_ = sample_rate; | |
| 290 // TODO(andrew): Ideally the downmixing would occur much earlier, in | |
| 291 // AudioCodingModule. | |
| 292 voe::RemixAndResample(*audio_frame_for_mixing, &resampler_, | |
|
aleloi
2016/08/05 09:52:21
I *think* it's OK for RemixAndResample to put data
| |
| 293 audio_frame_for_mixing); | |
| 294 } | |
| 280 return; | 295 return; |
| 281 } | 296 } |
| 282 | 297 |
| 283 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 298 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
| 284 const Frequency& frequency) { | 299 const Frequency& frequency) { |
| 285 CriticalSectionScoped cs(_crit.get()); | 300 CriticalSectionScoped cs(_crit.get()); |
| 286 | 301 |
| 287 _outputFrequency = frequency; | 302 _outputFrequency = frequency; |
| 288 _sampleSize = | 303 _sampleSize = |
| 289 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 304 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
| (...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 663 | 678 |
| 664 if (error != _limiter->kNoError) { | 679 if (error != _limiter->kNoError) { |
| 665 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 680 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 666 "Error from AudioProcessing: %d", error); | 681 "Error from AudioProcessing: %d", error); |
| 667 RTC_NOTREACHED(); | 682 RTC_NOTREACHED(); |
| 668 return false; | 683 return false; |
| 669 } | 684 } |
| 670 return true; | 685 return true; |
| 671 } | 686 } |
| 672 } // namespace webrtc | 687 } // namespace webrtc |
| OLD | NEW |