Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) | 180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) |
| 181 return false; | 181 return false; |
| 182 | 182 |
| 183 return true; | 183 return true; |
| 184 } | 184 } |
| 185 | 185 |
| 186 void NewAudioConferenceMixerImpl::Mix(int sample_rate, | 186 void NewAudioConferenceMixerImpl::Mix(int sample_rate, |
| 187 size_t number_of_channels, | 187 size_t number_of_channels, |
| 188 AudioFrame* audio_frame_for_mixing) { | 188 AudioFrame* audio_frame_for_mixing) { |
| 189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
| 190 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | |
| 191 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 192 AudioFrameList mixList; | 191 AudioFrameList mixList; |
| 193 AudioFrameList additionalFramesList; | 192 AudioFrameList additionalFramesList; |
| 194 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
| 195 { | 194 { |
| 196 CriticalSectionScoped cs(cb_crit_.get()); | 195 CriticalSectionScoped cs(cb_crit_.get()); |
| 197 Frequency mixing_frequency; | 196 Frequency mixing_frequency; |
| 198 | 197 |
| 199 switch (sample_rate) { | 198 switch (sample_rate) { |
| 200 case 8000: | 199 case 8000: |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 211 break; | 210 break; |
| 212 default: | 211 default: |
| 213 RTC_NOTREACHED(); | 212 RTC_NOTREACHED(); |
| 214 return; | 213 return; |
| 215 } | 214 } |
| 216 | 215 |
| 217 if (OutputFrequency() != mixing_frequency) { | 216 if (OutputFrequency() != mixing_frequency) { |
| 218 SetOutputFrequency(mixing_frequency); | 217 SetOutputFrequency(mixing_frequency); |
| 219 } | 218 } |
| 220 | 219 |
| 221 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); | 220 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); |
| 222 remainingAudioSourcesAllowedToMix -= mixList.size(); | |
|
aleloi
2016/08/18 09:21:14
The changes above are a simplification of the hand
| |
| 223 GetAdditionalAudio(&additionalFramesList); | 221 GetAdditionalAudio(&additionalFramesList); |
| 224 } | 222 } |
| 225 | 223 |
| 226 for (FrameAndMuteInfo& frame_and_mute : mixList) { | 224 for (FrameAndMuteInfo& frame_and_mute : mixList) { |
| 227 RemixFrame(frame_and_mute.frame, number_of_channels); | 225 RemixFrame(frame_and_mute.frame, number_of_channels); |
| 228 } | 226 } |
| 229 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | 227 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { |
| 230 RemixFrame(frame_and_mute.frame, number_of_channels); | 228 RemixFrame(frame_and_mute.frame, number_of_channels); |
| 231 } | 229 } |
| 232 | 230 |
| (...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 519 audioFrameList.front().frame->elapsed_time_ms_; | 517 audioFrameList.front().frame->elapsed_time_ms_; |
| 520 } else { | 518 } else { |
| 521 // TODO(wu): Issue 3390. | 519 // TODO(wu): Issue 3390. |
| 522 // Audio frame timestamp is only supported in one channel case. | 520 // Audio frame timestamp is only supported in one channel case. |
| 523 mixedAudio->timestamp_ = 0; | 521 mixedAudio->timestamp_ = 0; |
| 524 mixedAudio->elapsed_time_ms_ = -1; | 522 mixedAudio->elapsed_time_ms_ = -1; |
| 525 } | 523 } |
| 526 | 524 |
| 527 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | 525 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); |
| 528 iter != audioFrameList.end(); ++iter) { | 526 iter != audioFrameList.end(); ++iter) { |
| 529 if (position >= kMaximumAmountOfMixedAudioSources) { | |
| 530 WEBRTC_TRACE( | |
| 531 kTraceMemory, kTraceAudioMixerServer, id, | |
| 532 "Trying to mix more than max amount of mixed audio sources:%d!", | |
| 533 kMaximumAmountOfMixedAudioSources); | |
| 534 // Assert and avoid crash | |
| 535 RTC_NOTREACHED(); | |
| 536 position = 0; | |
| 537 } | |
|
aleloi
2016/08/18 09:21:14
It may happen that audioFrameList.size() > kMax..S
| |
| 538 if (!iter->muted) { | 527 if (!iter->muted) { |
| 539 MixFrames(mixedAudio, iter->frame, use_limiter); | 528 MixFrames(mixedAudio, iter->frame, use_limiter); |
| 540 } | 529 } |
| 541 | 530 |
| 542 position++; | 531 position++; |
| 543 } | 532 } |
| 544 | 533 |
| 545 return 0; | 534 return 0; |
| 546 } | 535 } |
| 547 | 536 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 603 } | 592 } |
| 604 | 593 |
| 605 uint32_t NewAudioConferenceMixerImpl::GetOutputAudioLevelFullRange() { | 594 uint32_t NewAudioConferenceMixerImpl::GetOutputAudioLevelFullRange() { |
| 606 int16_t current_level = audio_level_.LevelFullRange(); | 595 int16_t current_level = audio_level_.LevelFullRange(); |
| 607 uint32_t level = static_cast<uint32_t>(current_level); | 596 uint32_t level = static_cast<uint32_t>(current_level); |
| 608 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 597 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
| 609 "GetAudioOutputLevelFullRange() => level=%u", level); | 598 "GetAudioOutputLevelFullRange() => level=%u", level); |
| 610 return level; | 599 return level; |
| 611 } | 600 } |
| 612 } // namespace webrtc | 601 } // namespace webrtc |
| OLD | NEW |