OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 #include <utility> | 15 #include <utility> |
16 | 16 |
17 #include "webrtc/base/logging.h" | 17 #include "webrtc/base/logging.h" |
18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
20 #include "webrtc/system_wrappers/include/trace.h" | 20 #include "webrtc/system_wrappers/include/trace.h" |
21 | 21 |
22 namespace webrtc { | 22 namespace webrtc { |
23 namespace { | 23 namespace { |
24 | 24 |
25 class SourceFrame { | 25 class SourceFrame { |
hlundin-webrtc
2016/10/07 15:03:10
I'm still inclined to say that this should be a st
aleloi
2016/10/10 10:58:17
I'm quite sure that also was the conclusion of our
| |
26 public: | 26 public: |
27 SourceFrame(AudioSourceWithMixStatus* audio_source, | 27 SourceFrame(AudioMixerImpl::SourceStatus* source_status, |
hlundin-webrtc
2016/10/07 15:03:10
This passing of non-const pointer makes me cringe
aleloi
2016/10/10 10:58:17
Acknowledged.
| |
28 AudioFrame* audio_frame, | 28 AudioFrame* audio_frame, |
29 bool muted) | 29 bool muted) |
30 : audio_source_(audio_source), audio_frame_(audio_frame), muted_(muted) { | 30 : source_status_(source_status), |
31 audio_frame_(audio_frame), | |
32 muted_(muted) { | |
33 RTC_DCHECK(source_status); | |
34 RTC_DCHECK(audio_frame); | |
31 if (!muted_) { | 35 if (!muted_) { |
32 energy_ = AudioMixerCalculateEnergy(*audio_frame); | 36 energy_ = AudioMixerCalculateEnergy(*audio_frame); |
33 } | 37 } |
34 } | 38 } |
35 | 39 |
36 SourceFrame(AudioSourceWithMixStatus* audio_source, | 40 SourceFrame(AudioMixerImpl::SourceStatus* source_status, |
37 AudioFrame* audio_frame, | 41 AudioFrame* audio_frame, |
38 bool muted, | 42 bool muted, |
39 uint32_t energy) | 43 uint32_t energy) |
40 : audio_source_(audio_source), | 44 : source_status_(source_status), |
41 audio_frame_(audio_frame), | 45 audio_frame_(audio_frame), |
42 muted_(muted), | 46 muted_(muted), |
43 energy_(energy) {} | 47 energy_(energy) { |
44 | 48 RTC_DCHECK(source_status); |
45 // a.ShouldMixBefore(b) is used to select mixer sources. | 49 RTC_DCHECK(audio_frame); |
46 bool ShouldMixBefore(const SourceFrame& other) const { | |
47 if (muted_ != other.muted_) { | |
48 return other.muted_; | |
49 } | |
50 | |
51 const auto our_activity = audio_frame_->vad_activity_; | |
52 const auto other_activity = other.audio_frame_->vad_activity_; | |
53 | |
54 if (our_activity != other_activity) { | |
55 return our_activity == AudioFrame::kVadActive; | |
56 } | |
57 | |
58 return energy_ > other.energy_; | |
59 } | 50 } |
60 | 51 |
61 AudioSourceWithMixStatus* audio_source_ = nullptr; | 52 AudioMixerImpl::SourceStatus* source_status_ = nullptr; |
62 AudioFrame* audio_frame_ = nullptr; | 53 AudioFrame* audio_frame_ = nullptr; |
63 bool muted_ = true; | 54 bool muted_ = true; |
64 uint32_t energy_ = 0; | 55 uint32_t energy_ = 0; |
65 }; | 56 }; |
66 | 57 |
58 // ShouldMixBefore(a, b) is used to select mixer sources. | |
59 bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) { | |
60 if (a.muted_ != b.muted_) { | |
61 return b.muted_; | |
62 } | |
67 | 63 |
68 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { | 64 const auto a_activity = a.audio_frame_->vad_activity_; |
65 const auto b_activity = b.audio_frame_->vad_activity_; | |
66 | |
67 if (a_activity != b_activity) { | |
68 return a_activity == AudioFrame::kVadActive; | |
69 } | |
70 | |
71 return a.energy_ > b.energy_; | |
72 } | |
73 | |
74 void RampAndUpdateGain( | |
75 const std::vector<SourceFrame>& mixed_sources_and_frames) { | |
69 for (const auto& source_frame : mixed_sources_and_frames) { | 76 for (const auto& source_frame : mixed_sources_and_frames) { |
70 // Ramp in previously unmixed. | 77 float target_gain_ = source_frame.source_status_->is_mixed_ ? 1 : 0; |
hlundin-webrtc
2016/10/07 15:03:10
target_gain without underscore.
aleloi
2016/10/10 10:58:17
Oops!
| |
71 if (!source_frame.audio_source_->WasMixed()) { | 78 Ramp(source_frame.audio_frame_, source_frame.source_status_->gain_, |
72 NewMixerRampIn(source_frame.audio_frame_); | 79 target_gain_); |
73 } | 80 source_frame.source_status_->gain_ = target_gain_; |
74 | |
75 const bool is_mixed = source_frame.audio_source_->IsMixed(); | |
76 // Ramp out currently unmixed. | |
77 if (source_frame.audio_source_->WasMixed() && !is_mixed) { | |
78 NewMixerRampOut(source_frame.audio_frame_); | |
79 } | |
80 } | 81 } |
81 } | 82 } |
82 | 83 |
83 // Mix the AudioFrames stored in audioFrameList into mixed_audio. | 84 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
84 int32_t MixFromList(AudioFrame* mixed_audio, | 85 int32_t MixFromList(AudioFrame* mixed_audio, |
85 const AudioFrameList& audio_frame_list, | 86 const AudioFrameList& audio_frame_list, |
86 int32_t id, | 87 int32_t id, |
87 bool use_limiter) { | 88 bool use_limiter) { |
88 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | 89 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
89 "MixFromList(mixed_audio, audio_frame_list)"); | 90 "MixFromList(mixed_audio, audio_frame_list)"); |
(...skipping 24 matching lines...) Expand all Loading... | |
114 // Divide by two to avoid saturation in the mixing. | 115 // Divide by two to avoid saturation in the mixing. |
115 // This is only meaningful if the limiter will be used. | 116 // This is only meaningful if the limiter will be used. |
116 *frame >>= 1; | 117 *frame >>= 1; |
117 } | 118 } |
118 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); | 119 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
119 *mixed_audio += *frame; | 120 *mixed_audio += *frame; |
120 } | 121 } |
121 return 0; | 122 return 0; |
122 } | 123 } |
123 | 124 |
124 MixerAudioSourceList::const_iterator FindSourceInList( | 125 AudioMixerImpl::MixerAudioSourceList::const_iterator FindSourceInList( |
125 AudioMixerImpl::Source const* audio_source, | 126 AudioMixerImpl::Source const* audio_source, |
126 MixerAudioSourceList const* audio_source_list) { | 127 AudioMixerImpl::MixerAudioSourceList const* audio_source_list) { |
127 return std::find_if(audio_source_list->begin(), audio_source_list->end(), | 128 return std::find_if(audio_source_list->begin(), audio_source_list->end(), |
128 [audio_source](const AudioSourceWithMixStatus& p) { | 129 [audio_source](const AudioMixerImpl::SourceStatus& p) { |
129 return p.audio_source() == audio_source; | 130 return p.audio_source_ == audio_source; |
130 }); | 131 }); |
131 } | 132 } |
132 | 133 |
133 MixerAudioSourceList::iterator FindSourceInList( | 134 AudioMixerImpl::MixerAudioSourceList::iterator FindSourceInList( |
134 AudioMixerImpl::Source const* audio_source, | 135 AudioMixerImpl::Source const* audio_source, |
135 MixerAudioSourceList* audio_source_list) { | 136 AudioMixerImpl::MixerAudioSourceList* audio_source_list) { |
136 return std::find_if(audio_source_list->begin(), audio_source_list->end(), | 137 return std::find_if(audio_source_list->begin(), audio_source_list->end(), |
137 [audio_source](const AudioSourceWithMixStatus& p) { | 138 [audio_source](const AudioMixerImpl::SourceStatus& p) { |
138 return p.audio_source() == audio_source; | 139 return p.audio_source_ == audio_source; |
139 }); | 140 }); |
140 } | 141 } |
141 | 142 |
142 } // namespace | 143 } // namespace |
143 | 144 |
144 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { | 145 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { |
145 return AudioMixerImpl::Create(id); | 146 return AudioMixerImpl::Create(id); |
146 } | 147 } |
147 | 148 |
148 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter) | 149 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter) |
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
355 RTC_DCHECK_RUN_ON(&thread_checker_); | 356 RTC_DCHECK_RUN_ON(&thread_checker_); |
356 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 357 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
357 "GetNonAnonymousAudio()"); | 358 "GetNonAnonymousAudio()"); |
358 AudioFrameList result; | 359 AudioFrameList result; |
359 std::vector<SourceFrame> audio_source_mixing_data_list; | 360 std::vector<SourceFrame> audio_source_mixing_data_list; |
360 std::vector<SourceFrame> ramp_list; | 361 std::vector<SourceFrame> ramp_list; |
361 | 362 |
362 // Get audio source audio and put it in the struct vector. | 363 // Get audio source audio and put it in the struct vector. |
363 for (auto& source_and_status : audio_source_list_) { | 364 for (auto& source_and_status : audio_source_list_) { |
364 auto audio_frame_with_info = | 365 auto audio_frame_with_info = |
365 source_and_status.audio_source()->GetAudioFrameWithMuted( | 366 source_and_status.audio_source_->GetAudioFrameWithMuted( |
366 id_, static_cast<int>(OutputFrequency())); | 367 id_, static_cast<int>(OutputFrequency())); |
367 | 368 |
368 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 369 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
369 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 370 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
370 | 371 |
371 if (audio_frame_info == Source::AudioFrameInfo::kError) { | 372 if (audio_frame_info == Source::AudioFrameInfo::kError) { |
372 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 373 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
373 "failed to GetAudioFrameWithMuted() from source"); | 374 "failed to GetAudioFrameWithMuted() from source"); |
374 continue; | 375 continue; |
375 } | 376 } |
376 audio_source_mixing_data_list.emplace_back( | 377 audio_source_mixing_data_list.emplace_back( |
377 &source_and_status, audio_source_audio_frame, | 378 &source_and_status, audio_source_audio_frame, |
378 audio_frame_info == Source::AudioFrameInfo::kMuted); | 379 audio_frame_info == Source::AudioFrameInfo::kMuted); |
379 } | 380 } |
380 | 381 |
381 // Sort frames by sorting function. | 382 // Sort frames by sorting function. |
382 std::sort(audio_source_mixing_data_list.begin(), | 383 std::sort(audio_source_mixing_data_list.begin(), |
383 audio_source_mixing_data_list.end(), | 384 audio_source_mixing_data_list.end(), ShouldMixBefore); |
384 std::mem_fn(&SourceFrame::ShouldMixBefore)); | |
385 | 385 |
386 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; | 386 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; |
387 | 387 |
388 // Go through list in order and put unmuted frames in result list. | 388 // Go through list in order and put unmuted frames in result list. |
389 for (const auto& p : audio_source_mixing_data_list) { | 389 for (const auto& p : audio_source_mixing_data_list) { |
390 // Filter muted. | 390 // Filter muted. |
391 if (p.muted_) { | 391 if (p.muted_) { |
392 p.audio_source_->SetIsMixed(false); | 392 p.source_status_->is_mixed_ = false; |
393 continue; | 393 continue; |
394 } | 394 } |
395 | 395 |
396 // Add frame to result vector for mixing. | 396 // Add frame to result vector for mixing. |
397 bool is_mixed = false; | 397 bool is_mixed = false; |
398 if (max_audio_frame_counter > 0) { | 398 if (max_audio_frame_counter > 0) { |
399 --max_audio_frame_counter; | 399 --max_audio_frame_counter; |
400 result.push_back(p.audio_frame_); | 400 result.push_back(p.audio_frame_); |
401 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, -1); | 401 ramp_list.emplace_back(p.source_status_, p.audio_frame_, false, -1); |
402 is_mixed = true; | 402 is_mixed = true; |
403 } | 403 } |
404 p.audio_source_->SetIsMixed(is_mixed); | 404 p.source_status_->is_mixed_ = is_mixed; |
405 } | 405 } |
406 Ramp(ramp_list); | 406 RampAndUpdateGain(ramp_list); |
407 return result; | 407 return result; |
408 } | 408 } |
409 | 409 |
410 AudioFrameList AudioMixerImpl::GetAnonymousAudio() { | 410 AudioFrameList AudioMixerImpl::GetAnonymousAudio() { |
411 RTC_DCHECK_RUN_ON(&thread_checker_); | 411 RTC_DCHECK_RUN_ON(&thread_checker_); |
412 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 412 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
413 "GetAnonymousAudio()"); | 413 "GetAnonymousAudio()"); |
414 std::vector<SourceFrame> ramp_list; | 414 std::vector<SourceFrame> ramp_list; |
415 AudioFrameList result; | 415 AudioFrameList result; |
416 for (auto& source_and_status : additional_audio_source_list_) { | 416 for (auto& source_and_status : additional_audio_source_list_) { |
417 const auto audio_frame_with_info = | 417 const auto audio_frame_with_info = |
418 source_and_status.audio_source()->GetAudioFrameWithMuted( | 418 source_and_status.audio_source_->GetAudioFrameWithMuted( |
419 id_, OutputFrequency()); | 419 id_, OutputFrequency()); |
420 const auto ret = audio_frame_with_info.audio_frame_info; | 420 const auto ret = audio_frame_with_info.audio_frame_info; |
421 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 421 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
422 if (ret == Source::AudioFrameInfo::kError) { | 422 if (ret == Source::AudioFrameInfo::kError) { |
423 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 423 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
424 "failed to GetAudioFrameWithMuted() from audio_source"); | 424 "failed to GetAudioFrameWithMuted() from audio_source"); |
425 continue; | 425 continue; |
426 } | 426 } |
427 if (ret != Source::AudioFrameInfo::kMuted) { | 427 if (ret != Source::AudioFrameInfo::kMuted) { |
428 result.push_back(audio_frame); | 428 result.push_back(audio_frame); |
429 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0); | 429 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0); |
430 source_and_status.SetIsMixed(true); | 430 source_and_status.is_mixed_ = true; |
431 } | 431 } |
432 } | 432 } |
433 Ramp(ramp_list); | 433 RampAndUpdateGain(ramp_list); |
434 return result; | 434 return result; |
435 } | 435 } |
436 | 436 |
437 bool AudioMixerImpl::AddAudioSourceToList( | 437 bool AudioMixerImpl::AddAudioSourceToList( |
438 Source* audio_source, | 438 Source* audio_source, |
439 MixerAudioSourceList* audio_source_list) const { | 439 MixerAudioSourceList* audio_source_list) const { |
440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
441 "AddAudioSourceToList(audio_source, audio_source_list)"); | 441 "AddAudioSourceToList(audio_source, audio_source_list)"); |
442 audio_source_list->emplace_back(audio_source); | 442 audio_source_list->emplace_back(audio_source, false, 0); |
443 return true; | 443 return true; |
444 } | 444 } |
445 | 445 |
446 bool AudioMixerImpl::RemoveAudioSourceFromList( | 446 bool AudioMixerImpl::RemoveAudioSourceFromList( |
447 Source* audio_source, | 447 Source* audio_source, |
448 MixerAudioSourceList* audio_source_list) const { | 448 MixerAudioSourceList* audio_source_list) const { |
449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
450 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); | 450 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); |
451 const auto iter = FindSourceInList(audio_source, audio_source_list); | 451 const auto iter = FindSourceInList(audio_source, audio_source_list); |
452 if (iter != audio_source_list->end()) { | 452 if (iter != audio_source_list->end()) { |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
504 } | 504 } |
505 | 505 |
506 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( | 506 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( |
507 AudioMixerImpl::Source* audio_source) { | 507 AudioMixerImpl::Source* audio_source) { |
508 RTC_DCHECK_RUN_ON(&thread_checker_); | 508 RTC_DCHECK_RUN_ON(&thread_checker_); |
509 rtc::CritScope lock(&crit_); | 509 rtc::CritScope lock(&crit_); |
510 | 510 |
511 const auto non_anonymous_iter = | 511 const auto non_anonymous_iter = |
512 FindSourceInList(audio_source, &audio_source_list_); | 512 FindSourceInList(audio_source, &audio_source_list_); |
513 if (non_anonymous_iter != audio_source_list_.end()) { | 513 if (non_anonymous_iter != audio_source_list_.end()) { |
514 return non_anonymous_iter->IsMixed(); | 514 return non_anonymous_iter->is_mixed_; |
515 } | 515 } |
516 | 516 |
517 const auto anonymous_iter = | 517 const auto anonymous_iter = |
518 FindSourceInList(audio_source, &additional_audio_source_list_); | 518 FindSourceInList(audio_source, &additional_audio_source_list_); |
519 if (anonymous_iter != audio_source_list_.end()) { | 519 if (anonymous_iter != audio_source_list_.end()) { |
520 return anonymous_iter->IsMixed(); | 520 return anonymous_iter->is_mixed_; |
521 } | 521 } |
522 | 522 |
523 LOG(LS_ERROR) << "Audio source unknown"; | 523 LOG(LS_ERROR) << "Audio source unknown"; |
524 return false; | 524 return false; |
525 } | 525 } |
526 } // namespace webrtc | 526 } // namespace webrtc |
OLD | NEW |