Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 104 if (!mixer->Init()) { | 104 if (!mixer->Init()) { |
| 105 delete mixer; | 105 delete mixer; |
| 106 return NULL; | 106 return NULL; |
| 107 } | 107 } |
| 108 return mixer; | 108 return mixer; |
| 109 } | 109 } |
| 110 | 110 |
| 111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | 111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) |
| 112 : _id(id), | 112 : _id(id), |
| 113 _minimumMixingFreq(kLowestPossible), | 113 _minimumMixingFreq(kLowestPossible), |
| 114 _mixReceiver(NULL), | |
| 115 _outputFrequency(kDefaultFrequency), | 114 _outputFrequency(kDefaultFrequency), |
| 116 _sampleSize(0), | 115 _sampleSize(0), |
| 117 _audioFramePool(NULL), | 116 _audioFramePool(NULL), |
| 118 _participantList(), | 117 _participantList(), |
| 119 _additionalParticipantList(), | 118 _additionalParticipantList(), |
| 120 _numMixedParticipants(0), | 119 _numMixedParticipants(0), |
| 121 use_limiter_(true), | 120 use_limiter_(true), |
| 122 _timeStamp(0), | 121 _timeStamp(0), |
| 123 _timeScheduler(kProcessPeriodicityInMs), | 122 _timeScheduler(kProcessPeriodicityInMs), |
| 124 _processCalls(0) {} | 123 _processCalls(0) {} |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 181 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 183 "failed in TimeToNextUpdate() call"); | 182 "failed in TimeToNextUpdate() call"); |
| 184 // Sanity check | 183 // Sanity check |
| 185 assert(false); | 184 assert(false); |
| 186 return -1; | 185 return -1; |
| 187 } | 186 } |
| 188 return timeUntilNextProcess; | 187 return timeUntilNextProcess; |
| 189 } | 188 } |
| 190 | 189 |
| 191 void NewAudioConferenceMixerImpl::Process() { | 190 void NewAudioConferenceMixerImpl::Process() { |
| 191 RTC_NOTREACHED(); | |
|
tommi
2016/07/06 19:42:17
todo to remove?
aleloi
2016/07/07 09:20:16
Done. (There is also a dependent CL https://codere
| |
| 192 } | |
| 193 | |
| 194 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | |
| 192 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; | 195 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; |
| 193 { | 196 { |
| 194 CriticalSectionScoped cs(_crit.get()); | 197 CriticalSectionScoped cs(_crit.get()); |
| 195 assert(_processCalls == 0); | 198 assert(_processCalls == 0); |
|
tommi
2016/07/06 19:42:17
can you change all assert()'s in this file to RTC_
aleloi
2016/07/07 09:20:16
Done.
| |
| 196 _processCalls++; | 199 _processCalls++; |
| 197 | 200 |
| 198 // Let the scheduler know that we are running one iteration. | 201 // Let the scheduler know that we are running one iteration. |
| 199 _timeScheduler.UpdateScheduler(); | 202 _timeScheduler.UpdateScheduler(); |
| 200 } | 203 } |
| 201 | 204 |
| 202 AudioFrameList mixList; | 205 AudioFrameList mixList; |
| 203 AudioFrameList rampOutList; | 206 AudioFrameList rampOutList; |
| 204 AudioFrameList additionalFramesList; | 207 AudioFrameList additionalFramesList; |
| 205 std::map<int, MixerAudioSource*> mixedParticipantsMap; | 208 std::map<int, MixerAudioSource*> mixedParticipantsMap; |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 252 } | 255 } |
| 253 } | 256 } |
| 254 | 257 |
| 255 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 258 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, |
| 256 &remainingParticipantsAllowedToMix); | 259 &remainingParticipantsAllowedToMix); |
| 257 | 260 |
| 258 GetAdditionalAudio(&additionalFramesList); | 261 GetAdditionalAudio(&additionalFramesList); |
| 259 UpdateMixedStatus(mixedParticipantsMap); | 262 UpdateMixedStatus(mixedParticipantsMap); |
| 260 } | 263 } |
| 261 | 264 |
| 262 // Get an AudioFrame for mixing from the memory pool. | |
| 263 AudioFrame* mixedAudio = NULL; | |
| 264 if (_audioFramePool->PopMemory(mixedAudio) == -1) { | |
| 265 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | |
| 266 "failed PopMemory() call"); | |
| 267 assert(false); | |
| 268 return; | |
| 269 } | |
| 270 | |
| 271 { | 265 { |
| 272 CriticalSectionScoped cs(_crit.get()); | 266 CriticalSectionScoped cs(_crit.get()); |
| 273 | 267 |
| 274 // TODO(henrike): it might be better to decide the number of channels | 268 // TODO(henrike): it might be better to decide the number of channels |
| 275 // with an API instead of dynamically. | 269 // with an API instead of dynamically. |
| 276 | 270 |
| 277 // Find the max channels over all mixing lists. | 271 // Find the max channels over all mixing lists. |
| 278 const size_t num_mixed_channels = | 272 const size_t num_mixed_channels = |
| 279 std::max(MaxNumChannels(&mixList), | 273 std::max(MaxNumChannels(&mixList), |
| 280 std::max(MaxNumChannels(&additionalFramesList), | 274 std::max(MaxNumChannels(&additionalFramesList), |
| 281 MaxNumChannels(&rampOutList))); | 275 MaxNumChannels(&rampOutList))); |
| 282 | 276 |
| 283 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | 277 audio_frame_for_mixing->UpdateFrame( |
|
tommi
2016/07/06 19:42:17
is there a way to avoid calling UpdateFrame() whil
| |
| 284 AudioFrame::kNormalSpeech, AudioFrame::kVadPassive, | 278 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, |
| 285 num_mixed_channels); | 279 AudioFrame::kVadPassive, num_mixed_channels); |
| 286 | 280 |
| 287 _timeStamp += static_cast<uint32_t>(_sampleSize); | 281 _timeStamp += static_cast<uint32_t>(_sampleSize); |
|
tommi
2016/07/06 19:42:17
_timeStamp is only used in this method, so possibl
aleloi
2016/07/07 09:20:16
_sampleSize and _outputFrequency are only changed
tommi
2016/07/08 12:24:17
Acknowledged.
| |
| 288 | 282 |
| 289 // We only use the limiter if it supports the output sample rate and | 283 // We only use the limiter if it supports the output sample rate and |
| 290 // we're actually mixing multiple streams. | 284 // we're actually mixing multiple streams. |
| 291 use_limiter_ = _numMixedParticipants > 1 && | 285 use_limiter_ = _numMixedParticipants > 1 && |
| 292 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 286 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
| 293 | 287 |
| 294 MixFromList(mixedAudio, mixList); | 288 MixFromList(audio_frame_for_mixing, mixList); |
| 295 MixAnonomouslyFromList(mixedAudio, additionalFramesList); | 289 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
| 296 MixAnonomouslyFromList(mixedAudio, rampOutList); | 290 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); |
| 297 | 291 |
| 298 if (mixedAudio->samples_per_channel_ == 0) { | 292 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
| 299 // Nothing was mixed, set the audio samples to silence. | 293 // Nothing was mixed, set the audio samples to silence. |
| 300 mixedAudio->samples_per_channel_ = _sampleSize; | 294 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
| 301 mixedAudio->Mute(); | 295 audio_frame_for_mixing->Mute(); |
| 302 } else { | 296 } else { |
| 303 // Only call the limiter if we have something to mix. | 297 // Only call the limiter if we have something to mix. |
| 304 LimitMixedAudio(mixedAudio); | 298 LimitMixedAudio(audio_frame_for_mixing); |
| 305 } | 299 } |
| 306 } | 300 } |
| 307 | 301 |
| 308 { | |
| 309 CriticalSectionScoped cs(_cbCrit.get()); | |
| 310 if (_mixReceiver != NULL) { | |
| 311 const AudioFrame** dummy = NULL; | |
| 312 _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0); | |
| 313 } | |
| 314 } | |
| 315 | |
| 316 // Reclaim all outstanding memory. | |
| 317 _audioFramePool->PushMemory(mixedAudio); | |
| 318 ClearAudioFrameList(&mixList); | 302 ClearAudioFrameList(&mixList); |
| 319 ClearAudioFrameList(&rampOutList); | 303 ClearAudioFrameList(&rampOutList); |
| 320 ClearAudioFrameList(&additionalFramesList); | 304 ClearAudioFrameList(&additionalFramesList); |
| 321 { | 305 { |
| 322 CriticalSectionScoped cs(_crit.get()); | 306 CriticalSectionScoped cs(_crit.get()); |
| 323 _processCalls--; | 307 _processCalls--; |
| 324 } | 308 } |
| 325 return; | 309 return; |
| 326 } | 310 } |
| 327 | 311 |
| 328 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback( | |
| 329 OldAudioMixerOutputReceiver* mixReceiver) { | |
| 330 CriticalSectionScoped cs(_cbCrit.get()); | |
| 331 if (_mixReceiver != NULL) { | |
| 332 return -1; | |
| 333 } | |
| 334 _mixReceiver = mixReceiver; | |
| 335 return 0; | |
| 336 } | |
| 337 | |
| 338 int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { | |
| 339 CriticalSectionScoped cs(_cbCrit.get()); | |
| 340 if (_mixReceiver == NULL) { | |
| 341 return -1; | |
| 342 } | |
| 343 _mixReceiver = NULL; | |
| 344 return 0; | |
| 345 } | |
| 346 | |
| 347 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 312 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
| 348 const Frequency& frequency) { | 313 const Frequency& frequency) { |
| 349 CriticalSectionScoped cs(_crit.get()); | 314 CriticalSectionScoped cs(_crit.get()); |
| 350 | 315 |
| 351 _outputFrequency = frequency; | 316 _outputFrequency = frequency; |
| 352 _sampleSize = | 317 _sampleSize = |
| 353 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 318 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
| 354 | 319 |
| 355 return 0; | 320 return 0; |
| 356 } | 321 } |
| (...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 808 int32_t NewAudioConferenceMixerImpl::MixFromList( | 773 int32_t NewAudioConferenceMixerImpl::MixFromList( |
| 809 AudioFrame* mixedAudio, | 774 AudioFrame* mixedAudio, |
| 810 const AudioFrameList& audioFrameList) const { | 775 const AudioFrameList& audioFrameList) const { |
| 811 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 776 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 812 "MixFromList(mixedAudio, audioFrameList)"); | 777 "MixFromList(mixedAudio, audioFrameList)"); |
| 813 if (audioFrameList.empty()) | 778 if (audioFrameList.empty()) |
| 814 return 0; | 779 return 0; |
| 815 | 780 |
| 816 uint32_t position = 0; | 781 uint32_t position = 0; |
| 817 | 782 |
| 818 if (_numMixedParticipants == 1) { | 783 if (_numMixedParticipants == 1) { |
|
tommi
2016/07/06 19:42:17
btw, it looks like this variable is the only reaso
aleloi
2016/07/07 09:20:16
Thank you for the idea! I think more unnecessary l
| |
| 819 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | 784 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; |
| 820 mixedAudio->elapsed_time_ms_ = | 785 mixedAudio->elapsed_time_ms_ = |
| 821 audioFrameList.front().frame->elapsed_time_ms_; | 786 audioFrameList.front().frame->elapsed_time_ms_; |
| 822 } else { | 787 } else { |
| 823 // TODO(wu): Issue 3390. | 788 // TODO(wu): Issue 3390. |
| 824 // Audio frame timestamp is only supported in one channel case. | 789 // Audio frame timestamp is only supported in one channel case. |
| 825 mixedAudio->timestamp_ = 0; | 790 mixedAudio->timestamp_ = 0; |
| 826 mixedAudio->elapsed_time_ms_ = -1; | 791 mixedAudio->elapsed_time_ms_ = -1; |
| 827 } | 792 } |
| 828 | 793 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 889 | 854 |
| 890 if (error != _limiter->kNoError) { | 855 if (error != _limiter->kNoError) { |
| 891 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 856 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 892 "Error from AudioProcessing: %d", error); | 857 "Error from AudioProcessing: %d", error); |
| 893 assert(false); | 858 assert(false); |
| 894 return false; | 859 return false; |
| 895 } | 860 } |
| 896 return true; | 861 return true; |
| 897 } | 862 } |
| 898 } // namespace webrtc | 863 } // namespace webrtc |
| OLD | NEW |