| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 #include "webrtc/modules/audio_mixer/audio_mixer.h" | |
| 12 | |
| 13 #include "webrtc/base/format_macros.h" | |
| 14 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
| 15 #include "webrtc/modules/utility/include/audio_frame_operations.h" | |
| 16 #include "webrtc/system_wrappers/include/file_wrapper.h" | |
| 17 #include "webrtc/system_wrappers/include/trace.h" | |
| 18 #include "webrtc/voice_engine/include/voe_external_media.h" | |
| 19 #include "webrtc/voice_engine/statistics.h" | |
| 20 #include "webrtc/voice_engine/utility.h" | |
| 21 | |
| 22 namespace webrtc { | |
| 23 namespace voe { | |
| 24 | |
| 25 void AudioMixer::PlayNotification(int32_t id, uint32_t durationMs) { | |
| 26 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), | |
| 27 "AudioMixer::PlayNotification(id=%d, durationMs=%d)", id, | |
| 28 durationMs); | |
| 29 // Not implement yet | |
| 30 } | |
| 31 | |
| 32 void AudioMixer::RecordNotification(int32_t id, uint32_t durationMs) { | |
| 33 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), | |
| 34 "AudioMixer::RecordNotification(id=%d, durationMs=%d)", id, | |
| 35 durationMs); | |
| 36 | |
| 37 // Not implement yet | |
| 38 } | |
| 39 | |
| 40 void AudioMixer::PlayFileEnded(int32_t id) { | |
| 41 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), | |
| 42 "AudioMixer::PlayFileEnded(id=%d)", id); | |
| 43 | |
| 44 // not needed | |
| 45 } | |
| 46 | |
| 47 void AudioMixer::RecordFileEnded(int32_t id) { | |
| 48 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), | |
| 49 "AudioMixer::RecordFileEnded(id=%d)", id); | |
| 50 RTC_DCHECK_EQ(id, _instanceId); | |
| 51 | |
| 52 rtc::CritScope cs(&_fileCritSect); | |
| 53 _outputFileRecording = false; | |
| 54 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 55 "AudioMixer::RecordFileEnded() =>" | |
| 56 "output file recorder module is shutdown"); | |
| 57 } | |
| 58 | |
| 59 int32_t AudioMixer::Create(AudioMixer*& mixer, uint32_t instanceId) { | |
| 60 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId, | |
| 61 "AudioMixer::Create(instanceId=%d)", instanceId); | |
| 62 mixer = new AudioMixer(instanceId); | |
| 63 if (mixer == NULL) { | |
| 64 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId, | |
| 65 "AudioMixer::Create() unable to allocate memory for" | |
| 66 "mixer"); | |
| 67 return -1; | |
| 68 } | |
| 69 return 0; | |
| 70 } | |
| 71 | |
| 72 AudioMixer::AudioMixer(uint32_t instanceId) | |
| 73 : _mixerModule(*NewAudioConferenceMixer::Create(instanceId)), | |
| 74 _audioLevel(), | |
| 75 _instanceId(instanceId), | |
| 76 _externalMediaCallbackPtr(NULL), | |
| 77 _externalMedia(false), | |
| 78 _panLeft(1.0f), | |
| 79 _panRight(1.0f), | |
| 80 _mixingFrequencyHz(8000), | |
| 81 _outputFileRecording(false) { | |
| 82 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1), | |
| 83 "AudioMixer::AudioMixer() - ctor"); | |
| 84 } | |
| 85 | |
| 86 void AudioMixer::Destroy(AudioMixer*& mixer) { | |
| 87 if (mixer) { | |
| 88 delete mixer; | |
| 89 mixer = NULL; | |
| 90 } | |
| 91 } | |
| 92 | |
| 93 AudioMixer::~AudioMixer() { | |
| 94 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1), | |
| 95 "AudioMixer::~AudioMixer() - dtor"); | |
| 96 if (_externalMedia) { | |
| 97 DeRegisterExternalMediaProcessing(); | |
| 98 } | |
| 99 { | |
| 100 rtc::CritScope cs(&_fileCritSect); | |
| 101 if (_outputFileRecorderPtr) { | |
| 102 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL); | |
| 103 _outputFileRecorderPtr->StopRecording(); | |
| 104 } | |
| 105 } | |
| 106 delete &_mixerModule; | |
| 107 } | |
| 108 | |
| 109 int32_t AudioMixer::SetEngineInformation(voe::Statistics& engineStatistics) { | |
| 110 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 111 "AudioMixer::SetEngineInformation()"); | |
| 112 _engineStatisticsPtr = &engineStatistics; | |
| 113 return 0; | |
| 114 } | |
| 115 | |
| 116 int32_t AudioMixer::SetAudioProcessingModule( | |
| 117 AudioProcessing* audioProcessingModule) { | |
| 118 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 119 "AudioMixer::SetAudioProcessingModule(" | |
| 120 "audioProcessingModule=0x%x)", | |
| 121 audioProcessingModule); | |
| 122 _audioProcessingModulePtr = audioProcessingModule; | |
| 123 return 0; | |
| 124 } | |
| 125 | |
| 126 int AudioMixer::RegisterExternalMediaProcessing( | |
| 127 VoEMediaProcess& proccess_object) { | |
| 128 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 129 "AudioMixer::RegisterExternalMediaProcessing()"); | |
| 130 | |
| 131 rtc::CritScope cs(&_callbackCritSect); | |
| 132 _externalMediaCallbackPtr = &proccess_object; | |
| 133 _externalMedia = true; | |
| 134 | |
| 135 return 0; | |
| 136 } | |
| 137 | |
| 138 int AudioMixer::DeRegisterExternalMediaProcessing() { | |
| 139 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 140 "AudioMixer::DeRegisterExternalMediaProcessing()"); | |
| 141 | |
| 142 rtc::CritScope cs(&_callbackCritSect); | |
| 143 _externalMedia = false; | |
| 144 _externalMediaCallbackPtr = NULL; | |
| 145 | |
| 146 return 0; | |
| 147 } | |
| 148 | |
| 149 int32_t AudioMixer::SetMixabilityStatus(MixerAudioSource& audio_source, | |
| 150 bool mixable) { | |
| 151 return _mixerModule.SetMixabilityStatus(&audio_source, mixable); | |
| 152 } | |
| 153 | |
| 154 int32_t AudioMixer::SetAnonymousMixabilityStatus(MixerAudioSource& audio_source, | |
| 155 bool mixable) { | |
| 156 return _mixerModule.SetAnonymousMixabilityStatus(&audio_source, mixable); | |
| 157 } | |
| 158 | |
| 159 int AudioMixer::GetSpeechOutputLevel(uint32_t& level) { | |
| 160 int8_t currentLevel = _audioLevel.Level(); | |
| 161 level = static_cast<uint32_t>(currentLevel); | |
| 162 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 163 "GetSpeechOutputLevel() => level=%u", level); | |
| 164 return 0; | |
| 165 } | |
| 166 | |
| 167 int AudioMixer::GetSpeechOutputLevelFullRange(uint32_t& level) { | |
| 168 int16_t currentLevel = _audioLevel.LevelFullRange(); | |
| 169 level = static_cast<uint32_t>(currentLevel); | |
| 170 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 171 "GetSpeechOutputLevelFullRange() => level=%u", level); | |
| 172 return 0; | |
| 173 } | |
| 174 | |
| 175 int AudioMixer::SetOutputVolumePan(float left, float right) { | |
| 176 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 177 "AudioMixer::SetOutputVolumePan()"); | |
| 178 _panLeft = left; | |
| 179 _panRight = right; | |
| 180 return 0; | |
| 181 } | |
| 182 | |
| 183 int AudioMixer::GetOutputVolumePan(float& left, float& right) { | |
| 184 left = _panLeft; | |
| 185 right = _panRight; | |
| 186 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 187 "GetOutputVolumePan() => left=%2.1f, right=%2.1f", left, right); | |
| 188 return 0; | |
| 189 } | |
| 190 | |
| 191 int AudioMixer::StartRecordingPlayout(const char* fileName, | |
| 192 const CodecInst* codecInst) { | |
| 193 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 194 "AudioMixer::StartRecordingPlayout(fileName=%s)", fileName); | |
| 195 | |
| 196 if (_outputFileRecording) { | |
| 197 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), | |
| 198 "StartRecordingPlayout() is already recording"); | |
| 199 return 0; | |
| 200 } | |
| 201 | |
| 202 FileFormats format; | |
| 203 const uint32_t notificationTime(0); | |
| 204 CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000}; | |
| 205 | |
| 206 if ((codecInst != NULL) && | |
| 207 ((codecInst->channels < 1) || (codecInst->channels > 2))) { | |
| 208 _engineStatisticsPtr->SetLastError( | |
| 209 VE_BAD_ARGUMENT, kTraceError, | |
| 210 "StartRecordingPlayout() invalid compression"); | |
| 211 return (-1); | |
| 212 } | |
| 213 if (codecInst == NULL) { | |
| 214 format = kFileFormatPcm16kHzFile; | |
| 215 codecInst = &dummyCodec; | |
| 216 } else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) || | |
| 217 (STR_CASE_CMP(codecInst->plname, "PCMU") == 0) || | |
| 218 (STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) { | |
| 219 format = kFileFormatWavFile; | |
| 220 } else { | |
| 221 format = kFileFormatCompressedFile; | |
| 222 } | |
| 223 | |
| 224 rtc::CritScope cs(&_fileCritSect); | |
| 225 | |
| 226 if (_outputFileRecorderPtr) { | |
| 227 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL); | |
| 228 } | |
| 229 | |
| 230 _outputFileRecorderPtr = | |
| 231 FileRecorder::CreateFileRecorder(_instanceId, (const FileFormats)format); | |
| 232 if (_outputFileRecorderPtr == NULL) { | |
| 233 _engineStatisticsPtr->SetLastError( | |
| 234 VE_INVALID_ARGUMENT, kTraceError, | |
| 235 "StartRecordingPlayout() fileRecorder format isnot correct"); | |
| 236 return -1; | |
| 237 } | |
| 238 | |
| 239 if (_outputFileRecorderPtr->StartRecordingAudioFile( | |
| 240 fileName, (const CodecInst&)*codecInst, notificationTime) != 0) { | |
| 241 _engineStatisticsPtr->SetLastError( | |
| 242 VE_BAD_FILE, kTraceError, | |
| 243 "StartRecordingAudioFile() failed to start file recording"); | |
| 244 _outputFileRecorderPtr->StopRecording(); | |
| 245 _outputFileRecorderPtr.reset(); | |
| 246 return -1; | |
| 247 } | |
| 248 _outputFileRecorderPtr->RegisterModuleFileCallback(this); | |
| 249 _outputFileRecording = true; | |
| 250 | |
| 251 return 0; | |
| 252 } | |
| 253 | |
| 254 int AudioMixer::StartRecordingPlayout(OutStream* stream, | |
| 255 const CodecInst* codecInst) { | |
| 256 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 257 "AudioMixer::StartRecordingPlayout()"); | |
| 258 | |
| 259 if (_outputFileRecording) { | |
| 260 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), | |
| 261 "StartRecordingPlayout() is already recording"); | |
| 262 return 0; | |
| 263 } | |
| 264 | |
| 265 FileFormats format; | |
| 266 const uint32_t notificationTime(0); | |
| 267 CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000}; | |
| 268 | |
| 269 if (codecInst != NULL && codecInst->channels != 1) { | |
| 270 _engineStatisticsPtr->SetLastError( | |
| 271 VE_BAD_ARGUMENT, kTraceError, | |
| 272 "StartRecordingPlayout() invalid compression"); | |
| 273 return (-1); | |
| 274 } | |
| 275 if (codecInst == NULL) { | |
| 276 format = kFileFormatPcm16kHzFile; | |
| 277 codecInst = &dummyCodec; | |
| 278 } else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) || | |
| 279 (STR_CASE_CMP(codecInst->plname, "PCMU") == 0) || | |
| 280 (STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) { | |
| 281 format = kFileFormatWavFile; | |
| 282 } else { | |
| 283 format = kFileFormatCompressedFile; | |
| 284 } | |
| 285 | |
| 286 rtc::CritScope cs(&_fileCritSect); | |
| 287 | |
| 288 if (_outputFileRecorderPtr) { | |
| 289 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL); | |
| 290 } | |
| 291 | |
| 292 _outputFileRecorderPtr = | |
| 293 FileRecorder::CreateFileRecorder(_instanceId, (const FileFormats)format); | |
| 294 if (_outputFileRecorderPtr == NULL) { | |
| 295 _engineStatisticsPtr->SetLastError( | |
| 296 VE_INVALID_ARGUMENT, kTraceError, | |
| 297 "StartRecordingPlayout() fileRecorder format isnot correct"); | |
| 298 return -1; | |
| 299 } | |
| 300 | |
| 301 if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream, *codecInst, | |
| 302 notificationTime) != 0) { | |
| 303 _engineStatisticsPtr->SetLastError( | |
| 304 VE_BAD_FILE, kTraceError, | |
| 305 "StartRecordingAudioFile() failed to start file recording"); | |
| 306 _outputFileRecorderPtr->StopRecording(); | |
| 307 _outputFileRecorderPtr.reset(); | |
| 308 return -1; | |
| 309 } | |
| 310 | |
| 311 _outputFileRecorderPtr->RegisterModuleFileCallback(this); | |
| 312 _outputFileRecording = true; | |
| 313 | |
| 314 return 0; | |
| 315 } | |
| 316 | |
| 317 int AudioMixer::StopRecordingPlayout() { | |
| 318 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 319 "AudioMixer::StopRecordingPlayout()"); | |
| 320 | |
| 321 if (!_outputFileRecording) { | |
| 322 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1), | |
| 323 "StopRecordingPlayout() file isnot recording"); | |
| 324 return -1; | |
| 325 } | |
| 326 | |
| 327 rtc::CritScope cs(&_fileCritSect); | |
| 328 | |
| 329 if (_outputFileRecorderPtr->StopRecording() != 0) { | |
| 330 _engineStatisticsPtr->SetLastError( | |
| 331 VE_STOP_RECORDING_FAILED, kTraceError, | |
| 332 "StopRecording(), could not stop recording"); | |
| 333 return -1; | |
| 334 } | |
| 335 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL); | |
| 336 _outputFileRecorderPtr.reset(); | |
| 337 _outputFileRecording = false; | |
| 338 | |
| 339 return 0; | |
| 340 } | |
| 341 | |
| 342 int AudioMixer::GetMixedAudio(int sample_rate_hz, | |
| 343 size_t num_channels, | |
| 344 AudioFrame* frame) { | |
| 345 WEBRTC_TRACE( | |
| 346 kTraceStream, kTraceVoice, VoEId(_instanceId, -1), | |
| 347 "AudioMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%" PRIuS ")", | |
| 348 sample_rate_hz, num_channels); | |
| 349 | |
| 350 // --- Record playout if enabled | |
| 351 { | |
| 352 rtc::CritScope cs(&_fileCritSect); | |
| 353 if (_outputFileRecording && _outputFileRecorderPtr) | |
| 354 _outputFileRecorderPtr->RecordAudioToFile(_audioFrame); | |
| 355 } | |
| 356 | |
| 357 _mixerModule.Mix(sample_rate_hz, num_channels, frame); | |
| 358 | |
| 359 return 0; | |
| 360 } | |
| 361 | |
| 362 int32_t AudioMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm) { | |
| 363 if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz) { | |
| 364 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), | |
| 365 "AudioMixer::DoOperationsOnCombinedSignal() => " | |
| 366 "mixing frequency = %d", | |
| 367 _audioFrame.sample_rate_hz_); | |
| 368 _mixingFrequencyHz = _audioFrame.sample_rate_hz_; | |
| 369 } | |
| 370 | |
| 371 // Scale left and/or right channel(s) if balance is active | |
| 372 if (_panLeft != 1.0 || _panRight != 1.0) { | |
| 373 if (_audioFrame.num_channels_ == 1) { | |
| 374 AudioFrameOperations::MonoToStereo(&_audioFrame); | |
| 375 } else { | |
| 376 // Pure stereo mode (we are receiving a stereo signal). | |
| 377 } | |
| 378 | |
| 379 RTC_DCHECK_EQ(_audioFrame.num_channels_, static_cast<size_t>(2)); | |
| 380 AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame); | |
| 381 } | |
| 382 | |
| 383 // --- Far-end Voice Quality Enhancement (AudioProcessing Module) | |
| 384 if (feed_data_to_apm) { | |
| 385 if (_audioProcessingModulePtr->ProcessReverseStream(&_audioFrame) != 0) { | |
| 386 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), | |
| 387 "AudioProcessingModule::ProcessReverseStream() => error"); | |
| 388 RTC_DCHECK(false); | |
| 389 } | |
| 390 } | |
| 391 | |
| 392 // --- External media processing | |
| 393 { | |
| 394 rtc::CritScope cs(&_callbackCritSect); | |
| 395 if (_externalMedia) { | |
| 396 const bool is_stereo = (_audioFrame.num_channels_ == 2); | |
| 397 if (_externalMediaCallbackPtr) { | |
| 398 _externalMediaCallbackPtr->Process( | |
| 399 -1, kPlaybackAllChannelsMixed, | |
| 400 reinterpret_cast<int16_t*>(_audioFrame.data_), | |
| 401 _audioFrame.samples_per_channel_, _audioFrame.sample_rate_hz_, | |
| 402 is_stereo); | |
| 403 } | |
| 404 } | |
| 405 } | |
| 406 | |
| 407 // --- Measure audio level (0-9) for the combined signal | |
| 408 _audioLevel.ComputeLevel(_audioFrame); | |
| 409 | |
| 410 return 0; | |
| 411 } | |
| 412 } // namespace voe | |
| 413 } // namespace webrtc | |
| OLD | NEW |