OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
47 muted_(m), | 47 muted_(m), |
48 energy_(energy), | 48 energy_(energy), |
49 was_mixed_before_(was_mixed_before) {} | 49 was_mixed_before_(was_mixed_before) {} |
50 | 50 |
51 // a.shouldMixBefore(b) is used to select mixer participants. | 51 // a.shouldMixBefore(b) is used to select mixer participants. |
52 bool shouldMixBefore(const SourceFrame& other) const { | 52 bool shouldMixBefore(const SourceFrame& other) const { |
53 if (muted_ != other.muted_) { | 53 if (muted_ != other.muted_) { |
54 return other.muted_; | 54 return other.muted_; |
55 } | 55 } |
56 | 56 |
57 auto our_activity = audio_frame_->vad_activity_; | 57 const auto our_activity = audio_frame_->vad_activity_; |
58 auto other_activity = other.audio_frame_->vad_activity_; | 58 const auto other_activity = other.audio_frame_->vad_activity_; |
59 | 59 |
60 if (our_activity != other_activity) { | 60 if (our_activity != other_activity) { |
61 return our_activity == AudioFrame::kVadActive; | 61 return our_activity == AudioFrame::kVadActive; |
62 } | 62 } |
63 | 63 |
64 return energy_ > other.energy_; | 64 return energy_ > other.energy_; |
65 } | 65 } |
66 | 66 |
67 MixerAudioSource* audio_source_; | 67 MixerAudioSource* audio_source_; |
68 AudioFrame* audio_frame_; | 68 AudioFrame* audio_frame_; |
(...skipping 12 matching lines...) Expand all Loading... | |
81 } | 81 } |
82 } | 82 } |
83 | 83 |
84 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { | 84 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
85 for (const auto& source_frame : mixed_sources_and_frames) { | 85 for (const auto& source_frame : mixed_sources_and_frames) { |
86 // Ramp in previously unmixed. | 86 // Ramp in previously unmixed. |
87 if (!source_frame.was_mixed_before_) { | 87 if (!source_frame.was_mixed_before_) { |
88 NewMixerRampIn(source_frame.audio_frame_); | 88 NewMixerRampIn(source_frame.audio_frame_); |
89 } | 89 } |
90 | 90 |
91 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed(); | 91 const bool is_mixed = source_frame.audio_source_->mix_history_->IsMixed(); |
92 // Ramp out currently unmixed. | 92 // Ramp out currently unmixed. |
93 if (source_frame.was_mixed_before_ && !is_mixed) { | 93 if (source_frame.was_mixed_before_ && !is_mixed) { |
94 NewMixerRampOut(source_frame.audio_frame_); | 94 NewMixerRampOut(source_frame.audio_frame_); |
95 } | 95 } |
96 } | 96 } |
97 } | 97 } |
98 | 98 |
99 } // namespace | 99 } // namespace |
100 | 100 |
101 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} | 101 MixerAudioSource::MixerAudioSource() : mix_history_(new NewMixHistory()) {} |
102 | 102 |
103 MixerAudioSource::~MixerAudioSource() { | 103 MixerAudioSource::~MixerAudioSource() { |
104 delete _mixHistory; | 104 delete mix_history_; |
ivoc
2016/09/07 11:33:45
How about changing mix_history into a unique_ptr?
aleloi
2016/09/07 11:38:11
Fixed in one of the dependent CLs https://coderevi
| |
105 } | 105 } |
106 | 106 |
107 bool MixerAudioSource::IsMixed() const { | 107 bool MixerAudioSource::IsMixed() const { |
108 return _mixHistory->IsMixed(); | 108 return mix_history_->IsMixed(); |
109 } | 109 } |
110 | 110 |
111 NewMixHistory::NewMixHistory() : is_mixed_(0) {} | 111 NewMixHistory::NewMixHistory() : is_mixed_(0) {} |
112 | 112 |
113 NewMixHistory::~NewMixHistory() {} | 113 NewMixHistory::~NewMixHistory() {} |
114 | 114 |
115 bool NewMixHistory::IsMixed() const { | 115 bool NewMixHistory::IsMixed() const { |
116 return is_mixed_; | 116 return is_mixed_; |
117 } | 117 } |
118 | 118 |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
177 | 177 |
178 return std::unique_ptr<AudioMixer>( | 178 return std::unique_ptr<AudioMixer>( |
179 new AudioMixerImpl(id, std::move(limiter))); | 179 new AudioMixerImpl(id, std::move(limiter))); |
180 } | 180 } |
181 | 181 |
182 void AudioMixerImpl::Mix(int sample_rate, | 182 void AudioMixerImpl::Mix(int sample_rate, |
183 size_t number_of_channels, | 183 size_t number_of_channels, |
184 AudioFrame* audio_frame_for_mixing) { | 184 AudioFrame* audio_frame_for_mixing) { |
185 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 185 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
186 RTC_DCHECK_RUN_ON(&thread_checker_); | 186 RTC_DCHECK_RUN_ON(&thread_checker_); |
187 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | |
188 | 187 |
189 if (sample_rate != kNbInHz && sample_rate != kWbInHz && | 188 if (sample_rate != kNbInHz && sample_rate != kWbInHz && |
190 sample_rate != kSwbInHz && sample_rate != kFbInHz) { | 189 sample_rate != kSwbInHz && sample_rate != kFbInHz) { |
191 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 190 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
192 "Invalid frequency: %d", sample_rate); | 191 "Invalid frequency: %d", sample_rate); |
193 RTC_NOTREACHED(); | 192 RTC_NOTREACHED(); |
194 return; | 193 return; |
195 } | 194 } |
196 | 195 |
197 if (OutputFrequency() != sample_rate) { | 196 if (OutputFrequency() != sample_rate) { |
(...skipping 11 matching lines...) Expand all Loading... | |
209 } | 208 } |
210 | 209 |
211 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), | 210 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
212 anonymous_mix_list.end()); | 211 anonymous_mix_list.end()); |
213 | 212 |
214 for (const auto& frame : mix_list) { | 213 for (const auto& frame : mix_list) { |
215 RemixFrame(frame, number_of_channels); | 214 RemixFrame(frame, number_of_channels); |
216 } | 215 } |
217 | 216 |
218 audio_frame_for_mixing->UpdateFrame( | 217 audio_frame_for_mixing->UpdateFrame( |
219 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | 218 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech, |
220 AudioFrame::kVadPassive, number_of_channels); | 219 AudioFrame::kVadPassive, number_of_channels); |
221 | 220 |
222 time_stamp_ += static_cast<uint32_t>(sample_size_); | 221 time_stamp_ += static_cast<uint32_t>(sample_size_); |
223 | 222 |
224 use_limiter_ = num_mixed_audio_sources > 1; | 223 use_limiter_ = num_mixed_audio_sources > 1; |
225 | 224 |
226 // We only use the limiter if we're actually mixing multiple streams. | 225 // We only use the limiter if we're actually mixing multiple streams. |
227 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); | 226 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); |
228 | 227 |
229 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 228 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
(...skipping 27 matching lines...) Expand all Loading... | |
257 | 256 |
258 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, | 257 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
259 bool mixable) { | 258 bool mixable) { |
260 if (!mixable) { | 259 if (!mixable) { |
261 // Anonymous audio sources are in a separate list. Make sure that the | 260 // Anonymous audio sources are in a separate list. Make sure that the |
262 // audio source is in the _audioSourceList if it is being mixed. | 261 // audio source is in the _audioSourceList if it is being mixed. |
263 SetAnonymousMixabilityStatus(audio_source, false); | 262 SetAnonymousMixabilityStatus(audio_source, false); |
264 } | 263 } |
265 { | 264 { |
266 rtc::CritScope lock(&crit_); | 265 rtc::CritScope lock(&crit_); |
267 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); | 266 const bool is_mixed = |
267 IsAudioSourceInList(*audio_source, audio_source_list_); | |
268 // API must be called with a new state. | 268 // API must be called with a new state. |
269 if (!(mixable ^ isMixed)) { | 269 if (!(mixable ^ is_mixed)) { |
270 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 270 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
271 "Mixable is aready %s", isMixed ? "ON" : "off"); | 271 "Mixable is aready %s", is_mixed ? "ON" : "off"); |
272 return -1; | 272 return -1; |
273 } | 273 } |
274 bool success = false; | 274 bool success = false; |
275 if (mixable) { | 275 if (mixable) { |
276 success = AddAudioSourceToList(audio_source, &audio_source_list_); | 276 success = AddAudioSourceToList(audio_source, &audio_source_list_); |
277 } else { | 277 } else { |
278 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); | 278 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); |
279 } | 279 } |
280 if (!success) { | 280 if (!success) { |
281 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 281 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
282 "failed to %s audio_source", mixable ? "add" : "remove"); | 282 "failed to %s audio_source", mixable ? "add" : "remove"); |
283 RTC_NOTREACHED(); | 283 RTC_NOTREACHED(); |
284 return -1; | 284 return -1; |
285 } | 285 } |
286 | 286 |
287 size_t numMixedNonAnonymous = audio_source_list_.size(); | 287 size_t num_mixed_non_anonymous = audio_source_list_.size(); |
288 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { | 288 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) { |
289 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; | 289 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources; |
290 } | 290 } |
291 num_mixed_audio_sources_ = | 291 num_mixed_audio_sources_ = |
292 numMixedNonAnonymous + additional_audio_source_list_.size(); | 292 num_mixed_non_anonymous + additional_audio_source_list_.size(); |
293 } | 293 } |
294 return 0; | 294 return 0; |
295 } | 295 } |
296 | 296 |
297 bool AudioMixerImpl::MixabilityStatus( | 297 bool AudioMixerImpl::MixabilityStatus( |
298 const MixerAudioSource& audio_source) const { | 298 const MixerAudioSource& audio_source) const { |
299 rtc::CritScope lock(&crit_); | 299 rtc::CritScope lock(&crit_); |
300 return IsAudioSourceInList(audio_source, audio_source_list_); | 300 return IsAudioSourceInList(audio_source, audio_source_list_); |
301 } | 301 } |
302 | 302 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
339 const MixerAudioSource& audio_source) const { | 339 const MixerAudioSource& audio_source) const { |
340 rtc::CritScope lock(&crit_); | 340 rtc::CritScope lock(&crit_); |
341 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 341 return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
342 } | 342 } |
343 | 343 |
344 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { | 344 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { |
345 RTC_DCHECK_RUN_ON(&thread_checker_); | 345 RTC_DCHECK_RUN_ON(&thread_checker_); |
346 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 346 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
347 "GetNonAnonymousAudio()"); | 347 "GetNonAnonymousAudio()"); |
348 AudioFrameList result; | 348 AudioFrameList result; |
349 std::vector<SourceFrame> audioSourceMixingDataList; | 349 std::vector<SourceFrame> audio_source_mixing_data_list; |
350 std::vector<SourceFrame> ramp_list; | 350 std::vector<SourceFrame> ramp_list; |
351 | 351 |
352 // Get audio source audio and put it in the struct vector. | 352 // Get audio source audio and put it in the struct vector. |
353 for (MixerAudioSource* audio_source : audio_source_list_) { | 353 for (auto* const audio_source : audio_source_list_) { |
354 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 354 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
355 id_, static_cast<int>(output_frequency_)); | 355 id_, static_cast<int>(OutputFrequency())); |
356 | 356 |
357 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 357 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
358 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 358 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
359 | 359 |
360 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 360 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
361 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 361 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
362 "failed to GetAudioFrameWithMuted() from participant"); | 362 "failed to GetAudioFrameWithMuted() from participant"); |
363 continue; | 363 continue; |
364 } | 364 } |
365 audioSourceMixingDataList.emplace_back( | 365 audio_source_mixing_data_list.emplace_back( |
366 audio_source, audio_source_audio_frame, | 366 audio_source, audio_source_audio_frame, |
367 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 367 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, |
368 audio_source->_mixHistory->WasMixed()); | 368 audio_source->mix_history_->WasMixed()); |
369 } | 369 } |
370 | 370 |
371 // Sort frames by sorting function. | 371 // Sort frames by sorting function. |
372 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | 372 std::sort(audio_source_mixing_data_list.begin(), |
373 audio_source_mixing_data_list.end(), | |
373 std::mem_fn(&SourceFrame::shouldMixBefore)); | 374 std::mem_fn(&SourceFrame::shouldMixBefore)); |
374 | 375 |
375 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources; | 376 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; |
376 // Go through list in order and put things in mixList. | 377 |
377 for (SourceFrame& p : audioSourceMixingDataList) { | 378 // Go through list in order and put unmuted frames in result list. |
379 for (const SourceFrame& p : audio_source_mixing_data_list) { | |
378 // Filter muted. | 380 // Filter muted. |
379 if (p.muted_) { | 381 if (p.muted_) { |
380 p.audio_source_->_mixHistory->SetIsMixed(false); | 382 p.audio_source_->mix_history_->SetIsMixed(false); |
381 continue; | 383 continue; |
382 } | 384 } |
383 | 385 |
384 // Add frame to result vector for mixing. | 386 // Add frame to result vector for mixing. |
385 bool is_mixed = false; | 387 bool is_mixed = false; |
386 if (maxAudioFrameCounter > 0) { | 388 if (max_audio_frame_counter > 0) { |
387 --maxAudioFrameCounter; | 389 --max_audio_frame_counter; |
388 result.push_back(p.audio_frame_); | 390 result.push_back(p.audio_frame_); |
389 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, | 391 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, |
390 p.was_mixed_before_, -1); | 392 p.was_mixed_before_, -1); |
391 is_mixed = true; | 393 is_mixed = true; |
392 } | 394 } |
393 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | 395 p.audio_source_->mix_history_->SetIsMixed(is_mixed); |
394 } | 396 } |
395 Ramp(ramp_list); | 397 Ramp(ramp_list); |
396 return result; | 398 return result; |
397 } | 399 } |
398 | 400 |
399 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { | 401 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { |
400 RTC_DCHECK_RUN_ON(&thread_checker_); | 402 RTC_DCHECK_RUN_ON(&thread_checker_); |
401 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 403 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
402 "GetAnonymousAudio()"); | 404 "GetAnonymousAudio()"); |
403 // The GetAudioFrameWithMuted() callback may result in the audio source being | 405 // The GetAudioFrameWithMuted() callback may result in the audio source being |
404 // removed from additionalAudioFramesList_. If that happens it will | 406 // removed from additionalAudioFramesList_. If that happens it will |
405 // invalidate any iterators. Create a copy of the audio sources list such | 407 // invalidate any iterators. Create a copy of the audio sources list such |
406 // that the list of participants can be traversed safely. | 408 // that the list of participants can be traversed safely. |
407 std::vector<SourceFrame> ramp_list; | 409 std::vector<SourceFrame> ramp_list; |
408 MixerAudioSourceList additionalAudioSourceList; | 410 MixerAudioSourceList additional_audio_sources_list; |
409 AudioFrameList result; | 411 AudioFrameList result; |
410 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 412 additional_audio_sources_list.insert(additional_audio_sources_list.begin(), |
411 additional_audio_source_list_.begin(), | 413 additional_audio_source_list_.begin(), |
412 additional_audio_source_list_.end()); | 414 additional_audio_source_list_.end()); |
413 | 415 |
414 for (MixerAudioSourceList::const_iterator audio_source = | 416 for (const auto& audio_source : additional_audio_sources_list) { |
415 additionalAudioSourceList.begin(); | 417 const auto audio_frame_with_info = |
416 audio_source != additionalAudioSourceList.end(); ++audio_source) { | 418 audio_source->GetAudioFrameWithMuted(id_, OutputFrequency()); |
417 auto audio_frame_with_info = | 419 const auto ret = audio_frame_with_info.audio_frame_info; |
418 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); | |
419 auto ret = audio_frame_with_info.audio_frame_info; | |
420 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 420 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
421 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 421 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
422 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 422 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
423 "failed to GetAudioFrameWithMuted() from audio_source"); | 423 "failed to GetAudioFrameWithMuted() from audio_source"); |
424 continue; | 424 continue; |
425 } | 425 } |
426 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { | 426 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
427 result.push_back(audio_frame); | 427 result.push_back(audio_frame); |
428 ramp_list.emplace_back(*audio_source, audio_frame, false, | 428 ramp_list.emplace_back(audio_source, audio_frame, false, |
429 (*audio_source)->_mixHistory->IsMixed(), -1); | 429 audio_source->mix_history_->IsMixed(), 0); |
430 (*audio_source)->_mixHistory->SetIsMixed(true); | 430 audio_source->mix_history_->SetIsMixed(true); |
431 } | 431 } |
432 } | 432 } |
433 Ramp(ramp_list); | 433 Ramp(ramp_list); |
434 return result; | 434 return result; |
435 } | 435 } |
436 | 436 |
437 bool AudioMixerImpl::IsAudioSourceInList( | 437 bool AudioMixerImpl::IsAudioSourceInList( |
438 const MixerAudioSource& audio_source, | 438 const MixerAudioSource& audio_source, |
439 const MixerAudioSourceList& audioSourceList) const { | 439 const MixerAudioSourceList& audio_source_list) const { |
440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
441 "IsAudioSourceInList(audio_source,audioSourceList)"); | 441 "IsAudioSourceInList(audio_source,audio_source_list)"); |
442 return std::find(audioSourceList.begin(), audioSourceList.end(), | 442 return std::find(audio_source_list.begin(), audio_source_list.end(), |
443 &audio_source) != audioSourceList.end(); | 443 &audio_source) != audio_source_list.end(); |
444 } | 444 } |
445 | 445 |
446 bool AudioMixerImpl::AddAudioSourceToList( | 446 bool AudioMixerImpl::AddAudioSourceToList( |
447 MixerAudioSource* audio_source, | 447 MixerAudioSource* audio_source, |
448 MixerAudioSourceList* audioSourceList) const { | 448 MixerAudioSourceList* audio_source_list) const { |
449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
450 "AddAudioSourceToList(audio_source, audioSourceList)"); | 450 "AddAudioSourceToList(audio_source, audio_source_list)"); |
451 audioSourceList->push_back(audio_source); | 451 audio_source_list->push_back(audio_source); |
452 // Make sure that the mixed status is correct for new MixerAudioSource. | 452 // Make sure that the mixed status is correct for new MixerAudioSource. |
453 audio_source->_mixHistory->ResetMixedStatus(); | 453 audio_source->mix_history_->ResetMixedStatus(); |
454 return true; | 454 return true; |
455 } | 455 } |
456 | 456 |
457 bool AudioMixerImpl::RemoveAudioSourceFromList( | 457 bool AudioMixerImpl::RemoveAudioSourceFromList( |
458 MixerAudioSource* audio_source, | 458 MixerAudioSource* audio_source, |
459 MixerAudioSourceList* audioSourceList) const { | 459 MixerAudioSourceList* audio_source_list) const { |
460 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 460 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
461 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); | 461 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); |
462 auto iter = | 462 const auto iter = std::find(audio_source_list->begin(), |
463 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); | 463 audio_source_list->end(), audio_source); |
464 if (iter != audioSourceList->end()) { | 464 if (iter != audio_source_list->end()) { |
465 audioSourceList->erase(iter); | 465 audio_source_list->erase(iter); |
466 // AudioSource is no longer mixed, reset to default. | 466 // AudioSource is no longer mixed, reset to default. |
467 audio_source->_mixHistory->ResetMixedStatus(); | 467 audio_source->mix_history_->ResetMixedStatus(); |
468 return true; | 468 return true; |
469 } else { | 469 } else { |
470 return false; | 470 return false; |
471 } | 471 } |
472 } | 472 } |
473 | 473 |
474 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, | 474 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixed_audio, |
475 const AudioFrameList& audioFrameList, | 475 const AudioFrameList& audio_frame_list, |
476 int32_t id, | 476 int32_t id, |
477 bool use_limiter) { | 477 bool use_limiter) { |
478 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | 478 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
479 "MixFromList(mixedAudio, audioFrameList)"); | 479 "MixFromList(mixed_audio, audio_frame_list)"); |
480 if (audioFrameList.empty()) | 480 if (audio_frame_list.empty()) |
481 return 0; | 481 return 0; |
482 | 482 |
483 uint32_t position = 0; | 483 if (audio_frame_list.size() == 1) { |
484 | 484 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; |
485 if (audioFrameList.size() == 1) { | 485 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; |
486 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_; | |
487 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_; | |
488 } else { | 486 } else { |
489 // TODO(wu): Issue 3390. | 487 // TODO(wu): Issue 3390. |
490 // Audio frame timestamp is only supported in one channel case. | 488 // Audio frame timestamp is only supported in one channel case. |
491 mixedAudio->timestamp_ = 0; | 489 mixed_audio->timestamp_ = 0; |
492 mixedAudio->elapsed_time_ms_ = -1; | 490 mixed_audio->elapsed_time_ms_ = -1; |
493 } | 491 } |
494 | 492 |
495 for (const auto& frame : audioFrameList) { | 493 for (const auto& frame : audio_frame_list) { |
496 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_); | 494 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); |
497 RTC_DCHECK_EQ( | 495 RTC_DCHECK_EQ( |
498 frame->samples_per_channel_, | 496 frame->samples_per_channel_, |
499 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) / | 497 static_cast<size_t>( |
500 1000)); | 498 (mixed_audio->sample_rate_hz_ * kFrameDurationInMs) / 1000)); |
501 | 499 |
502 // Mix |f.frame| into |mixedAudio|, with saturation protection. | 500 // Mix |f.frame| into |mixed_audio|, with saturation protection. |
503 // These effect is applied to |f.frame| itself prior to mixing. | 501 // These effect is applied to |f.frame| itself prior to mixing. |
504 if (use_limiter) { | 502 if (use_limiter) { |
505 // Divide by two to avoid saturation in the mixing. | 503 // Divide by two to avoid saturation in the mixing. |
506 // This is only meaningful if the limiter will be used. | 504 // This is only meaningful if the limiter will be used. |
507 *frame >>= 1; | 505 *frame >>= 1; |
508 } | 506 } |
509 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_); | 507 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
510 *mixedAudio += *frame; | 508 *mixed_audio += *frame; |
511 position++; | |
512 } | 509 } |
513 return 0; | 510 return 0; |
514 } | 511 } |
515 | 512 |
516 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { | 513 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
517 RTC_DCHECK_RUN_ON(&thread_checker_); | 514 RTC_DCHECK_RUN_ON(&thread_checker_); |
518 if (!use_limiter_) { | 515 if (!use_limiter_) { |
519 return true; | 516 return true; |
520 } | 517 } |
521 | 518 |
522 // Smoothly limit the mixed frame. | 519 // Smoothly limit the mixed frame. |
523 const int error = limiter_->ProcessStream(mixedAudio); | 520 const int error = limiter_->ProcessStream(mixed_audio); |
524 | 521 |
525 // And now we can safely restore the level. This procedure results in | 522 // And now we can safely restore the level. This procedure results in |
526 // some loss of resolution, deemed acceptable. | 523 // some loss of resolution, deemed acceptable. |
527 // | 524 // |
528 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | 525 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS |
529 // and compression gain of 6 dB). However, in the transition frame when this | 526 // and compression gain of 6 dB). However, in the transition frame when this |
530 // is enabled (moving from one to two audio sources) it has the potential to | 527 // is enabled (moving from one to two audio sources) it has the potential to |
531 // create discontinuities in the mixed frame. | 528 // create discontinuities in the mixed frame. |
532 // | 529 // |
533 // Instead we double the frame (with addition since left-shifting a | 530 // Instead we double the frame (with addition since left-shifting a |
534 // negative value is undefined). | 531 // negative value is undefined). |
535 *mixedAudio += *mixedAudio; | 532 *mixed_audio += *mixed_audio; |
536 | 533 |
537 if (error != limiter_->kNoError) { | 534 if (error != limiter_->kNoError) { |
538 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 535 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
539 "Error from AudioProcessing: %d", error); | 536 "Error from AudioProcessing: %d", error); |
540 RTC_NOTREACHED(); | 537 RTC_NOTREACHED(); |
541 return false; | 538 return false; |
542 } | 539 } |
543 return true; | 540 return true; |
544 } | 541 } |
545 | 542 |
546 int AudioMixerImpl::GetOutputAudioLevel() { | 543 int AudioMixerImpl::GetOutputAudioLevel() { |
547 RTC_DCHECK_RUN_ON(&thread_checker_); | 544 RTC_DCHECK_RUN_ON(&thread_checker_); |
548 const int level = audio_level_.Level(); | 545 const int level = audio_level_.Level(); |
549 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 546 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
550 "GetAudioOutputLevel() => level=%d", level); | 547 "GetAudioOutputLevel() => level=%d", level); |
551 return level; | 548 return level; |
552 } | 549 } |
553 | 550 |
554 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 551 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
555 RTC_DCHECK_RUN_ON(&thread_checker_); | 552 RTC_DCHECK_RUN_ON(&thread_checker_); |
556 const int level = audio_level_.LevelFullRange(); | 553 const int level = audio_level_.LevelFullRange(); |
557 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 554 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
558 "GetAudioOutputLevelFullRange() => level=%d", level); | 555 "GetAudioOutputLevelFullRange() => level=%d", level); |
559 return level; | 556 return level; |
560 } | 557 } |
561 } // namespace webrtc | 558 } // namespace webrtc |
OLD | NEW |