| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 53 int MaxNumChannels(const AudioFrameList* list) { | 53 int MaxNumChannels(const AudioFrameList* list) { |
| 54 int max_num_channels = 1; | 54 int max_num_channels = 1; |
| 55 for (AudioFrameList::const_iterator iter = list->begin(); | 55 for (AudioFrameList::const_iterator iter = list->begin(); |
| 56 iter != list->end(); | 56 iter != list->end(); |
| 57 ++iter) { | 57 ++iter) { |
| 58 max_num_channels = std::max(max_num_channels, (*iter)->num_channels_); | 58 max_num_channels = std::max(max_num_channels, (*iter)->num_channels_); |
| 59 } | 59 } |
| 60 return max_num_channels; | 60 return max_num_channels; |
| 61 } | 61 } |
| 62 | 62 |
| 63 void SetParticipantStatistics(ParticipantStatistics* stats, | |
| 64 const AudioFrame& frame) { | |
| 65 stats->participant = frame.id_; | |
| 66 stats->level = 0; // TODO(andrew): to what should this be set? | |
| 67 } | |
| 68 | |
| 69 } // namespace | 63 } // namespace |
| 70 | 64 |
| 71 MixerParticipant::MixerParticipant() | 65 MixerParticipant::MixerParticipant() |
| 72 : _mixHistory(new MixHistory()) { | 66 : _mixHistory(new MixHistory()) { |
| 73 } | 67 } |
| 74 | 68 |
| 75 MixerParticipant::~MixerParticipant() { | 69 MixerParticipant::~MixerParticipant() { |
| 76 delete _mixHistory; | 70 delete _mixHistory; |
| 77 } | 71 } |
| 78 | 72 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 110 AudioConferenceMixer* AudioConferenceMixer::Create(int id) { | 104 AudioConferenceMixer* AudioConferenceMixer::Create(int id) { |
| 111 AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id); | 105 AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id); |
| 112 if(!mixer->Init()) { | 106 if(!mixer->Init()) { |
| 113 delete mixer; | 107 delete mixer; |
| 114 return NULL; | 108 return NULL; |
| 115 } | 109 } |
| 116 return mixer; | 110 return mixer; |
| 117 } | 111 } |
| 118 | 112 |
| 119 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) | 113 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id) |
| 120 : _scratchParticipantsToMixAmount(0), | 114 : _id(id), |
| 121 _scratchMixedParticipants(), | |
| 122 _scratchVadPositiveParticipantsAmount(0), | |
| 123 _scratchVadPositiveParticipants(), | |
| 124 _id(id), | |
| 125 _minimumMixingFreq(kLowestPossible), | 115 _minimumMixingFreq(kLowestPossible), |
| 126 _mixReceiver(NULL), | 116 _mixReceiver(NULL), |
| 127 _mixerStatusCallback(NULL), | |
| 128 _amountOf10MsBetweenCallbacks(1), | |
| 129 _amountOf10MsUntilNextCallback(0), | |
| 130 _mixerStatusCb(false), | |
| 131 _outputFrequency(kDefaultFrequency), | 117 _outputFrequency(kDefaultFrequency), |
| 132 _sampleSize(0), | 118 _sampleSize(0), |
| 133 _audioFramePool(NULL), | 119 _audioFramePool(NULL), |
| 134 _participantList(), | 120 _participantList(), |
| 135 _additionalParticipantList(), | 121 _additionalParticipantList(), |
| 136 _numMixedParticipants(0), | 122 _numMixedParticipants(0), |
| 137 use_limiter_(true), | 123 use_limiter_(true), |
| 138 _timeStamp(0), | 124 _timeStamp(0), |
| 139 _timeScheduler(kProcessPeriodicityInMs), | 125 _timeScheduler(kProcessPeriodicityInMs), |
| 140 _mixedAudioLevel(), | |
| 141 _processCalls(0) {} | 126 _processCalls(0) {} |
| 142 | 127 |
| 143 bool AudioConferenceMixerImpl::Init() { | 128 bool AudioConferenceMixerImpl::Init() { |
| 144 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 129 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
| 145 if (_crit.get() == NULL) | 130 if (_crit.get() == NULL) |
| 146 return false; | 131 return false; |
| 147 | 132 |
| 148 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | 133 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); |
| 149 if(_cbCrit.get() == NULL) | 134 if(_cbCrit.get() == NULL) |
| 150 return false; | 135 return false; |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 268 _processCalls--; | 253 _processCalls--; |
| 269 return -1; | 254 return -1; |
| 270 } | 255 } |
| 271 } | 256 } |
| 272 | 257 |
| 273 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 258 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, |
| 274 remainingParticipantsAllowedToMix); | 259 remainingParticipantsAllowedToMix); |
| 275 | 260 |
| 276 GetAdditionalAudio(&additionalFramesList); | 261 GetAdditionalAudio(&additionalFramesList); |
| 277 UpdateMixedStatus(mixedParticipantsMap); | 262 UpdateMixedStatus(mixedParticipantsMap); |
| 278 _scratchParticipantsToMixAmount = mixedParticipantsMap.size(); | |
| 279 } | 263 } |
| 280 | 264 |
| 281 // Get an AudioFrame for mixing from the memory pool. | 265 // Get an AudioFrame for mixing from the memory pool. |
| 282 AudioFrame* mixedAudio = NULL; | 266 AudioFrame* mixedAudio = NULL; |
| 283 if(_audioFramePool->PopMemory(mixedAudio) == -1) { | 267 if(_audioFramePool->PopMemory(mixedAudio) == -1) { |
| 284 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 268 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
| 285 "failed PopMemory() call"); | 269 "failed PopMemory() call"); |
| 286 assert(false); | 270 assert(false); |
| 287 return -1; | 271 return -1; |
| 288 } | 272 } |
| 289 | 273 |
| 290 bool timeForMixerCallback = false; | |
| 291 int retval = 0; | 274 int retval = 0; |
| 292 int32_t audioLevel = 0; | |
| 293 { | 275 { |
| 294 CriticalSectionScoped cs(_crit.get()); | 276 CriticalSectionScoped cs(_crit.get()); |
| 295 | 277 |
| 296 // TODO(henrike): it might be better to decide the number of channels | 278 // TODO(henrike): it might be better to decide the number of channels |
| 297 // with an API instead of dynamically. | 279 // with an API instead of dynamically. |
| 298 | 280 |
| 299 // Find the max channels over all mixing lists. | 281 // Find the max channels over all mixing lists. |
| 300 const int num_mixed_channels = std::max(MaxNumChannels(&mixList), | 282 const int num_mixed_channels = std::max(MaxNumChannels(&mixList), |
| 301 std::max(MaxNumChannels(&additionalFramesList), | 283 std::max(MaxNumChannels(&additionalFramesList), |
| 302 MaxNumChannels(&rampOutList))); | 284 MaxNumChannels(&rampOutList))); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 318 | 300 |
| 319 if(mixedAudio->samples_per_channel_ == 0) { | 301 if(mixedAudio->samples_per_channel_ == 0) { |
| 320 // Nothing was mixed, set the audio samples to silence. | 302 // Nothing was mixed, set the audio samples to silence. |
| 321 mixedAudio->samples_per_channel_ = _sampleSize; | 303 mixedAudio->samples_per_channel_ = _sampleSize; |
| 322 mixedAudio->Mute(); | 304 mixedAudio->Mute(); |
| 323 } else { | 305 } else { |
| 324 // Only call the limiter if we have something to mix. | 306 // Only call the limiter if we have something to mix. |
| 325 if(!LimitMixedAudio(*mixedAudio)) | 307 if(!LimitMixedAudio(*mixedAudio)) |
| 326 retval = -1; | 308 retval = -1; |
| 327 } | 309 } |
| 328 | |
| 329 _mixedAudioLevel.ComputeLevel(mixedAudio->data_,_sampleSize); | |
| 330 audioLevel = _mixedAudioLevel.GetLevel(); | |
| 331 | |
| 332 if(_mixerStatusCb) { | |
| 333 _scratchVadPositiveParticipantsAmount = 0; | |
| 334 UpdateVADPositiveParticipants(&mixList); | |
| 335 if(_amountOf10MsUntilNextCallback-- == 0) { | |
| 336 _amountOf10MsUntilNextCallback = _amountOf10MsBetweenCallbacks; | |
| 337 timeForMixerCallback = true; | |
| 338 } | |
| 339 } | |
| 340 } | 310 } |
| 341 | 311 |
| 342 { | 312 { |
| 343 CriticalSectionScoped cs(_cbCrit.get()); | 313 CriticalSectionScoped cs(_cbCrit.get()); |
| 344 if(_mixReceiver != NULL) { | 314 if(_mixReceiver != NULL) { |
| 345 const AudioFrame** dummy = NULL; | 315 const AudioFrame** dummy = NULL; |
| 346 _mixReceiver->NewMixedAudio( | 316 _mixReceiver->NewMixedAudio( |
| 347 _id, | 317 _id, |
| 348 *mixedAudio, | 318 *mixedAudio, |
| 349 dummy, | 319 dummy, |
| 350 0); | 320 0); |
| 351 } | 321 } |
| 352 | |
| 353 if((_mixerStatusCallback != NULL) && | |
| 354 timeForMixerCallback) { | |
| 355 _mixerStatusCallback->MixedParticipants( | |
| 356 _id, | |
| 357 _scratchMixedParticipants, | |
| 358 static_cast<uint32_t>(_scratchParticipantsToMixAmount)); | |
| 359 | |
| 360 _mixerStatusCallback->VADPositiveParticipants( | |
| 361 _id, | |
| 362 _scratchVadPositiveParticipants, | |
| 363 _scratchVadPositiveParticipantsAmount); | |
| 364 _mixerStatusCallback->MixedAudioLevel(_id,audioLevel); | |
| 365 } | |
| 366 } | 322 } |
| 367 | 323 |
| 368 // Reclaim all outstanding memory. | 324 // Reclaim all outstanding memory. |
| 369 _audioFramePool->PushMemory(mixedAudio); | 325 _audioFramePool->PushMemory(mixedAudio); |
| 370 ClearAudioFrameList(&mixList); | 326 ClearAudioFrameList(&mixList); |
| 371 ClearAudioFrameList(&rampOutList); | 327 ClearAudioFrameList(&rampOutList); |
| 372 ClearAudioFrameList(&additionalFramesList); | 328 ClearAudioFrameList(&additionalFramesList); |
| 373 { | 329 { |
| 374 CriticalSectionScoped cs(_crit.get()); | 330 CriticalSectionScoped cs(_crit.get()); |
| 375 _processCalls--; | 331 _processCalls--; |
| (...skipping 29 matching lines...) Expand all Loading... |
| 405 | 361 |
| 406 return 0; | 362 return 0; |
| 407 } | 363 } |
| 408 | 364 |
| 409 AudioConferenceMixer::Frequency | 365 AudioConferenceMixer::Frequency |
| 410 AudioConferenceMixerImpl::OutputFrequency() const { | 366 AudioConferenceMixerImpl::OutputFrequency() const { |
| 411 CriticalSectionScoped cs(_crit.get()); | 367 CriticalSectionScoped cs(_crit.get()); |
| 412 return _outputFrequency; | 368 return _outputFrequency; |
| 413 } | 369 } |
| 414 | 370 |
| 415 int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback( | |
| 416 AudioMixerStatusReceiver& mixerStatusCallback, | |
| 417 const uint32_t amountOf10MsBetweenCallbacks) { | |
| 418 if(amountOf10MsBetweenCallbacks == 0) { | |
| 419 WEBRTC_TRACE( | |
| 420 kTraceWarning, | |
| 421 kTraceAudioMixerServer, | |
| 422 _id, | |
| 423 "amountOf10MsBetweenCallbacks(%d) needs to be larger than 0"); | |
| 424 return -1; | |
| 425 } | |
| 426 { | |
| 427 CriticalSectionScoped cs(_cbCrit.get()); | |
| 428 if(_mixerStatusCallback != NULL) { | |
| 429 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
| 430 "Mixer status callback already registered"); | |
| 431 return -1; | |
| 432 } | |
| 433 _mixerStatusCallback = &mixerStatusCallback; | |
| 434 } | |
| 435 { | |
| 436 CriticalSectionScoped cs(_crit.get()); | |
| 437 _amountOf10MsBetweenCallbacks = amountOf10MsBetweenCallbacks; | |
| 438 _amountOf10MsUntilNextCallback = 0; | |
| 439 _mixerStatusCb = true; | |
| 440 } | |
| 441 return 0; | |
| 442 } | |
| 443 | |
| 444 int32_t AudioConferenceMixerImpl::UnRegisterMixerStatusCallback() { | |
| 445 { | |
| 446 CriticalSectionScoped cs(_crit.get()); | |
| 447 if(!_mixerStatusCb) | |
| 448 { | |
| 449 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
| 450 "Mixer status callback not registered"); | |
| 451 return -1; | |
| 452 } | |
| 453 _mixerStatusCb = false; | |
| 454 } | |
| 455 { | |
| 456 CriticalSectionScoped cs(_cbCrit.get()); | |
| 457 _mixerStatusCallback = NULL; | |
| 458 } | |
| 459 return 0; | |
| 460 } | |
| 461 | |
| 462 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( | 371 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( |
| 463 MixerParticipant& participant, | 372 MixerParticipant& participant, |
| 464 bool mixable) { | 373 bool mixable) { |
| 465 if (!mixable) { | 374 if (!mixable) { |
| 466 // Anonymous participants are in a separate list. Make sure that the | 375 // Anonymous participants are in a separate list. Make sure that the |
| 467 // participant is in the _participantList if it is being mixed. | 376 // participant is in the _participantList if it is being mixed. |
| 468 SetAnonymousMixabilityStatus(participant, false); | 377 SetAnonymousMixabilityStatus(participant, false); |
| 469 } | 378 } |
| 470 size_t numMixedParticipants; | 379 size_t numMixedParticipants; |
| 471 { | 380 { |
| (...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 879 | 788 |
| 880 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants( | 789 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants( |
| 881 AudioFrameList* mixList) { | 790 AudioFrameList* mixList) { |
| 882 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 791 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 883 "UpdateVADPositiveParticipants(mixList)"); | 792 "UpdateVADPositiveParticipants(mixList)"); |
| 884 | 793 |
| 885 for (AudioFrameList::iterator iter = mixList->begin(); | 794 for (AudioFrameList::iterator iter = mixList->begin(); |
| 886 iter != mixList->end(); | 795 iter != mixList->end(); |
| 887 ++iter) { | 796 ++iter) { |
| 888 CalculateEnergy(**iter); | 797 CalculateEnergy(**iter); |
| 889 if((*iter)->vad_activity_ == AudioFrame::kVadActive) { | |
| 890 _scratchVadPositiveParticipants[ | |
| 891 _scratchVadPositiveParticipantsAmount].participant = | |
| 892 (*iter)->id_; | |
| 893 // TODO(andrew): to what should this be set? | |
| 894 _scratchVadPositiveParticipants[ | |
| 895 _scratchVadPositiveParticipantsAmount].level = 0; | |
| 896 _scratchVadPositiveParticipantsAmount++; | |
| 897 } | |
| 898 } | 798 } |
| 899 } | 799 } |
| 900 | 800 |
| 901 bool AudioConferenceMixerImpl::IsParticipantInList( | 801 bool AudioConferenceMixerImpl::IsParticipantInList( |
| 902 MixerParticipant& participant, | 802 MixerParticipant& participant, |
| 903 MixerParticipantList* participantList) const { | 803 MixerParticipantList* participantList) const { |
| 904 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 804 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 905 "IsParticipantInList(participant,participantList)"); | 805 "IsParticipantInList(participant,participantList)"); |
| 906 for (MixerParticipantList::const_iterator iter = participantList->begin(); | 806 for (MixerParticipantList::const_iterator iter = participantList->begin(); |
| 907 iter != participantList->end(); | 807 iter != participantList->end(); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 970 kTraceAudioMixerServer, | 870 kTraceAudioMixerServer, |
| 971 _id, | 871 _id, |
| 972 "Trying to mix more than max amount of mixed participants:%d!", | 872 "Trying to mix more than max amount of mixed participants:%d!", |
| 973 kMaximumAmountOfMixedParticipants); | 873 kMaximumAmountOfMixedParticipants); |
| 974 // Assert and avoid crash | 874 // Assert and avoid crash |
| 975 assert(false); | 875 assert(false); |
| 976 position = 0; | 876 position = 0; |
| 977 } | 877 } |
| 978 MixFrames(&mixedAudio, (*iter), use_limiter_); | 878 MixFrames(&mixedAudio, (*iter), use_limiter_); |
| 979 | 879 |
| 980 SetParticipantStatistics(&_scratchMixedParticipants[position], | |
| 981 **iter); | |
| 982 | |
| 983 position++; | 880 position++; |
| 984 } | 881 } |
| 985 | 882 |
| 986 return 0; | 883 return 0; |
| 987 } | 884 } |
| 988 | 885 |
| 989 // TODO(andrew): consolidate this function with MixFromList. | 886 // TODO(andrew): consolidate this function with MixFromList. |
| 990 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList( | 887 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList( |
| 991 AudioFrame& mixedAudio, | 888 AudioFrame& mixedAudio, |
| 992 const AudioFrameList* audioFrameList) { | 889 const AudioFrameList* audioFrameList) { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1025 | 922 |
| 1026 if(error != _limiter->kNoError) { | 923 if(error != _limiter->kNoError) { |
| 1027 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 924 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 1028 "Error from AudioProcessing: %d", error); | 925 "Error from AudioProcessing: %d", error); |
| 1029 assert(false); | 926 assert(false); |
| 1030 return false; | 927 return false; |
| 1031 } | 928 } |
| 1032 return true; | 929 return true; |
| 1033 } | 930 } |
| 1034 } // namespace webrtc | 931 } // namespace webrtc |
| OLD | NEW |