OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 #include <utility> | 15 #include <utility> |
16 | 16 |
17 #include "webrtc/base/logging.h" | |
the sun
2016/10/06 09:50:31
nit: belongs in different CL? I don't see you doin
aleloi
2016/10/06 12:15:57
I do in the new methods GetSourceWithStatusForTest
| |
17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
19 #include "webrtc/system_wrappers/include/trace.h" | 20 #include "webrtc/system_wrappers/include/trace.h" |
20 | 21 |
21 namespace webrtc { | 22 namespace webrtc { |
22 namespace { | 23 namespace { |
23 | 24 |
24 class SourceFrame { | 25 class SourceFrame { |
25 public: | 26 public: |
26 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 27 SourceFrame(AudioSourceWithMixStatus* audio_source, |
27 : audio_source_(p), | 28 AudioFrame* audio_frame, |
28 audio_frame_(a), | 29 bool muted) |
29 muted_(m), | 30 : audio_source_(audio_source), audio_frame_(audio_frame), muted_(muted) { |
30 was_mixed_before_(was_mixed_before) { | |
31 if (!muted_) { | 31 if (!muted_) { |
32 energy_ = NewMixerCalculateEnergy(*a); | 32 energy_ = AudioMixerCalculateEnergy(*audio_frame); |
33 } | 33 } |
34 } | 34 } |
35 | 35 |
36 SourceFrame(MixerAudioSource* p, | 36 SourceFrame(AudioSourceWithMixStatus* audio_source, |
37 AudioFrame* a, | 37 AudioFrame* audio_frame, |
38 bool m, | 38 bool muted, |
39 bool was_mixed_before, | |
40 uint32_t energy) | 39 uint32_t energy) |
41 : audio_source_(p), | 40 : audio_source_(audio_source), |
42 audio_frame_(a), | 41 audio_frame_(audio_frame), |
43 muted_(m), | 42 muted_(muted), |
44 energy_(energy), | 43 energy_(energy) {} |
45 was_mixed_before_(was_mixed_before) {} | |
46 | 44 |
47 // a.shouldMixBefore(b) is used to select mixer participants. | 45 // a.ShouldMixBefore(b) is used to select mixer sources. |
48 bool shouldMixBefore(const SourceFrame& other) const { | 46 bool ShouldMixBefore(const SourceFrame& other) const { |
49 if (muted_ != other.muted_) { | 47 if (muted_ != other.muted_) { |
50 return other.muted_; | 48 return other.muted_; |
51 } | 49 } |
52 | 50 |
53 const auto our_activity = audio_frame_->vad_activity_; | 51 const auto our_activity = audio_frame_->vad_activity_; |
54 const auto other_activity = other.audio_frame_->vad_activity_; | 52 const auto other_activity = other.audio_frame_->vad_activity_; |
55 | 53 |
56 if (our_activity != other_activity) { | 54 if (our_activity != other_activity) { |
57 return our_activity == AudioFrame::kVadActive; | 55 return our_activity == AudioFrame::kVadActive; |
58 } | 56 } |
59 | 57 |
60 return energy_ > other.energy_; | 58 return energy_ > other.energy_; |
61 } | 59 } |
62 | 60 |
63 MixerAudioSource* audio_source_; | 61 AudioSourceWithMixStatus* audio_source_; |
the sun
2016/10/06 09:50:31
Provide default values, since you haven't removed
aleloi
2016/10/06 12:15:57
Done.
| |
64 AudioFrame* audio_frame_; | 62 AudioFrame* audio_frame_; |
65 bool muted_; | 63 bool muted_; |
66 uint32_t energy_; | 64 uint32_t energy_; |
67 bool was_mixed_before_; | |
68 }; | 65 }; |
69 | 66 |
70 // Remixes a frame between stereo and mono. | 67 // Remixes a frame between stereo and mono. |
71 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | 68 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
72 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 69 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
73 if (frame->num_channels_ == 1 && number_of_channels == 2) { | 70 if (frame->num_channels_ == 1 && number_of_channels == 2) { |
74 AudioFrameOperations::MonoToStereo(frame); | 71 AudioFrameOperations::MonoToStereo(frame); |
75 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 72 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
76 AudioFrameOperations::StereoToMono(frame); | 73 AudioFrameOperations::StereoToMono(frame); |
77 } | 74 } |
78 } | 75 } |
79 | 76 |
80 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { | 77 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
81 for (const auto& source_frame : mixed_sources_and_frames) { | 78 for (const auto& source_frame : mixed_sources_and_frames) { |
82 // Ramp in previously unmixed. | 79 // Ramp in previously unmixed. |
83 if (!source_frame.was_mixed_before_) { | 80 if (!source_frame.audio_source_->WasMixed()) { |
84 NewMixerRampIn(source_frame.audio_frame_); | 81 NewMixerRampIn(source_frame.audio_frame_); |
85 } | 82 } |
86 | 83 |
87 const bool is_mixed = source_frame.audio_source_->IsMixed(); | 84 const bool is_mixed = source_frame.audio_source_->IsMixed(); |
88 // Ramp out currently unmixed. | 85 // Ramp out currently unmixed. |
89 if (source_frame.was_mixed_before_ && !is_mixed) { | 86 if (source_frame.audio_source_->WasMixed() && !is_mixed) { |
90 NewMixerRampOut(source_frame.audio_frame_); | 87 NewMixerRampOut(source_frame.audio_frame_); |
91 } | 88 } |
92 } | 89 } |
93 } | 90 } |
94 | 91 |
95 // Mix the AudioFrames stored in audioFrameList into mixed_audio. | 92 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
96 int32_t MixFromList(AudioFrame* mixed_audio, | 93 int32_t MixFromList(AudioFrame* mixed_audio, |
97 const AudioFrameList& audio_frame_list, | 94 const AudioFrameList& audio_frame_list, |
98 int32_t id, | 95 int32_t id, |
99 bool use_limiter) { | 96 bool use_limiter) { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
146 num_mixed_audio_sources_(0), | 143 num_mixed_audio_sources_(0), |
147 use_limiter_(true), | 144 use_limiter_(true), |
148 time_stamp_(0), | 145 time_stamp_(0), |
149 limiter_(std::move(limiter)) { | 146 limiter_(std::move(limiter)) { |
150 SetOutputFrequency(kDefaultFrequency); | 147 SetOutputFrequency(kDefaultFrequency); |
151 thread_checker_.DetachFromThread(); | 148 thread_checker_.DetachFromThread(); |
152 } | 149 } |
153 | 150 |
154 AudioMixerImpl::~AudioMixerImpl() {} | 151 AudioMixerImpl::~AudioMixerImpl() {} |
155 | 152 |
156 std::unique_ptr<AudioMixer> AudioMixerImpl::Create(int id) { | 153 std::unique_ptr<AudioMixerImpl> AudioMixerImpl::Create(int id) { |
157 Config config; | 154 Config config; |
158 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
159 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); | 156 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); |
160 if (!limiter.get()) | 157 if (!limiter.get()) |
161 return nullptr; | 158 return nullptr; |
162 | 159 |
163 if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | 160 if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) != |
164 limiter->kNoError) | 161 limiter->kNoError) |
165 return nullptr; | 162 return nullptr; |
166 | 163 |
167 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | 164 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the |
168 // divide-by-2 but -7 is used instead to give a bit of headroom since the | 165 // divide-by-2 but -7 is used instead to give a bit of headroom since the |
169 // AGC is not a hard limiter. | 166 // AGC is not a hard limiter. |
170 if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) | 167 if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) |
171 return nullptr; | 168 return nullptr; |
172 | 169 |
173 if (limiter->gain_control()->set_compression_gain_db(0) != limiter->kNoError) | 170 if (limiter->gain_control()->set_compression_gain_db(0) != limiter->kNoError) |
174 return nullptr; | 171 return nullptr; |
175 | 172 |
176 if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) | 173 if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) |
177 return nullptr; | 174 return nullptr; |
178 | 175 |
179 if (limiter->gain_control()->Enable(true) != limiter->kNoError) | 176 if (limiter->gain_control()->Enable(true) != limiter->kNoError) |
180 return nullptr; | 177 return nullptr; |
181 | 178 |
182 return std::unique_ptr<AudioMixer>( | 179 return std::unique_ptr<AudioMixerImpl>( |
183 new AudioMixerImpl(id, std::move(limiter))); | 180 new AudioMixerImpl(id, std::move(limiter))); |
184 } | 181 } |
185 | 182 |
186 void AudioMixerImpl::Mix(int sample_rate, | 183 void AudioMixerImpl::Mix(int sample_rate, |
187 size_t number_of_channels, | 184 size_t number_of_channels, |
188 AudioFrame* audio_frame_for_mixing) { | 185 AudioFrame* audio_frame_for_mixing) { |
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 186 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
190 RTC_DCHECK_RUN_ON(&thread_checker_); | 187 RTC_DCHECK_RUN_ON(&thread_checker_); |
191 | 188 |
192 if (sample_rate != kNbInHz && sample_rate != kWbInHz && | 189 if (sample_rate != kNbInHz && sample_rate != kWbInHz && |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
337 ? 0 | 334 ? 0 |
338 : -1; | 335 : -1; |
339 } | 336 } |
340 | 337 |
341 bool AudioMixerImpl::AnonymousMixabilityStatus( | 338 bool AudioMixerImpl::AnonymousMixabilityStatus( |
342 const MixerAudioSource& audio_source) const { | 339 const MixerAudioSource& audio_source) const { |
343 rtc::CritScope lock(&crit_); | 340 rtc::CritScope lock(&crit_); |
344 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 341 return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
345 } | 342 } |
346 | 343 |
347 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { | 344 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() { |
348 RTC_DCHECK_RUN_ON(&thread_checker_); | 345 RTC_DCHECK_RUN_ON(&thread_checker_); |
349 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 346 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
350 "GetNonAnonymousAudio()"); | 347 "GetNonAnonymousAudio()"); |
351 AudioFrameList result; | 348 AudioFrameList result; |
352 std::vector<SourceFrame> audio_source_mixing_data_list; | 349 std::vector<SourceFrame> audio_source_mixing_data_list; |
353 std::vector<SourceFrame> ramp_list; | 350 std::vector<SourceFrame> ramp_list; |
354 | 351 |
355 // Get audio source audio and put it in the struct vector. | 352 // Get audio source audio and put it in the struct vector. |
356 for (auto* const audio_source : audio_source_list_) { | 353 for (auto& source_and_status : audio_source_list_) { |
357 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 354 auto audio_frame_with_info = |
358 id_, static_cast<int>(OutputFrequency())); | 355 source_and_status.audio_source()->GetAudioFrameWithMuted( |
356 id_, static_cast<int>(OutputFrequency())); | |
359 | 357 |
360 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 358 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
361 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 359 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
362 | 360 |
363 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 361 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
364 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 362 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
365 "failed to GetAudioFrameWithMuted() from participant"); | 363 "failed to GetAudioFrameWithMuted() from source"); |
366 continue; | 364 continue; |
367 } | 365 } |
368 audio_source_mixing_data_list.emplace_back( | 366 audio_source_mixing_data_list.emplace_back( |
369 audio_source, audio_source_audio_frame, | 367 &source_and_status, audio_source_audio_frame, |
370 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 368 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted); |
371 audio_source->WasMixed()); | |
372 } | 369 } |
373 | 370 |
374 // Sort frames by sorting function. | 371 // Sort frames by sorting function. |
375 std::sort(audio_source_mixing_data_list.begin(), | 372 std::sort(audio_source_mixing_data_list.begin(), |
376 audio_source_mixing_data_list.end(), | 373 audio_source_mixing_data_list.end(), |
377 std::mem_fn(&SourceFrame::shouldMixBefore)); | 374 std::mem_fn(&SourceFrame::ShouldMixBefore)); |
378 | 375 |
379 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; | 376 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; |
380 | 377 |
381 // Go through list in order and put unmuted frames in result list. | 378 // Go through list in order and put unmuted frames in result list. |
382 for (const SourceFrame& p : audio_source_mixing_data_list) { | 379 for (const auto& p : audio_source_mixing_data_list) { |
383 // Filter muted. | 380 // Filter muted. |
384 if (p.muted_) { | 381 if (p.muted_) { |
385 p.audio_source_->SetIsMixed(false); | 382 p.audio_source_->SetIsMixed(false); |
386 continue; | 383 continue; |
387 } | 384 } |
388 | 385 |
389 // Add frame to result vector for mixing. | 386 // Add frame to result vector for mixing. |
390 bool is_mixed = false; | 387 bool is_mixed = false; |
391 if (max_audio_frame_counter > 0) { | 388 if (max_audio_frame_counter > 0) { |
392 --max_audio_frame_counter; | 389 --max_audio_frame_counter; |
393 result.push_back(p.audio_frame_); | 390 result.push_back(p.audio_frame_); |
394 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, | 391 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, -1); |
395 p.was_mixed_before_, -1); | |
396 is_mixed = true; | 392 is_mixed = true; |
397 } | 393 } |
398 p.audio_source_->SetIsMixed(is_mixed); | 394 p.audio_source_->SetIsMixed(is_mixed); |
399 } | 395 } |
400 Ramp(ramp_list); | 396 Ramp(ramp_list); |
401 return result; | 397 return result; |
402 } | 398 } |
403 | 399 |
404 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { | 400 AudioFrameList AudioMixerImpl::GetAnonymousAudio() { |
405 RTC_DCHECK_RUN_ON(&thread_checker_); | 401 RTC_DCHECK_RUN_ON(&thread_checker_); |
406 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 402 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
407 "GetAnonymousAudio()"); | 403 "GetAnonymousAudio()"); |
408 // The GetAudioFrameWithMuted() callback may result in the audio source being | |
409 // removed from additionalAudioFramesList_. If that happens it will | |
410 // invalidate any iterators. Create a copy of the audio sources list such | |
411 // that the list of participants can be traversed safely. | |
412 std::vector<SourceFrame> ramp_list; | 404 std::vector<SourceFrame> ramp_list; |
413 MixerAudioSourceList additional_audio_sources_list; | |
414 AudioFrameList result; | 405 AudioFrameList result; |
415 additional_audio_sources_list.insert(additional_audio_sources_list.begin(), | 406 for (auto& source_and_status : additional_audio_source_list_) { |
416 additional_audio_source_list_.begin(), | |
417 additional_audio_source_list_.end()); | |
418 | |
419 for (const auto& audio_source : additional_audio_sources_list) { | |
420 const auto audio_frame_with_info = | 407 const auto audio_frame_with_info = |
421 audio_source->GetAudioFrameWithMuted(id_, OutputFrequency()); | 408 source_and_status.audio_source()->GetAudioFrameWithMuted( |
409 id_, OutputFrequency()); | |
422 const auto ret = audio_frame_with_info.audio_frame_info; | 410 const auto ret = audio_frame_with_info.audio_frame_info; |
423 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 411 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
424 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 412 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
425 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 413 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
426 "failed to GetAudioFrameWithMuted() from audio_source"); | 414 "failed to GetAudioFrameWithMuted() from audio_source"); |
427 continue; | 415 continue; |
428 } | 416 } |
429 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { | 417 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
430 result.push_back(audio_frame); | 418 result.push_back(audio_frame); |
431 ramp_list.emplace_back(audio_source, audio_frame, false, | 419 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0); |
432 audio_source->IsMixed(), 0); | 420 source_and_status.SetIsMixed(true); |
433 audio_source->SetIsMixed(true); | |
434 } | 421 } |
435 } | 422 } |
436 Ramp(ramp_list); | 423 Ramp(ramp_list); |
437 return result; | 424 return result; |
438 } | 425 } |
439 | 426 |
440 bool AudioMixerImpl::IsAudioSourceInList( | 427 bool AudioMixerImpl::IsAudioSourceInList( |
the sun
2016/10/06 09:50:31
Note, this type of function, which does not touch
aleloi
2016/10/06 12:15:57
Done.
| |
441 const MixerAudioSource& audio_source, | 428 const MixerAudioSource& audio_source, |
442 const MixerAudioSourceList& audio_source_list) const { | 429 const MixerAudioSourceList& audio_source_list) const { |
443 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 430 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
444 "IsAudioSourceInList(audio_source,audio_source_list)"); | 431 "IsAudioSourceInList(audio_source,audio_source_list)"); |
445 return std::find(audio_source_list.begin(), audio_source_list.end(), | 432 for (const auto& source_and_mix : audio_source_list) { |
446 &audio_source) != audio_source_list.end(); | 433 if (source_and_mix.audio_source() == &audio_source) { |
434 return true; | |
435 } | |
436 } | |
437 return false; | |
447 } | 438 } |
448 | 439 |
449 bool AudioMixerImpl::AddAudioSourceToList( | 440 bool AudioMixerImpl::AddAudioSourceToList( |
450 MixerAudioSource* audio_source, | 441 MixerAudioSource* audio_source, |
451 MixerAudioSourceList* audio_source_list) const { | 442 MixerAudioSourceList* audio_source_list) const { |
452 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 443 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
453 "AddAudioSourceToList(audio_source, audio_source_list)"); | 444 "AddAudioSourceToList(audio_source, audio_source_list)"); |
454 audio_source_list->push_back(audio_source); | 445 audio_source_list->emplace_back(audio_source); |
455 // Make sure that the mixed status is correct for new MixerAudioSource. | |
456 audio_source->ResetMixedStatus(); | |
457 return true; | 446 return true; |
458 } | 447 } |
459 | 448 |
460 bool AudioMixerImpl::RemoveAudioSourceFromList( | 449 bool AudioMixerImpl::RemoveAudioSourceFromList( |
461 MixerAudioSource* audio_source, | 450 MixerAudioSource* audio_source, |
462 MixerAudioSourceList* audio_source_list) const { | 451 MixerAudioSourceList* audio_source_list) const { |
463 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 452 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
464 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); | 453 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); |
465 const auto iter = std::find(audio_source_list->begin(), | 454 const auto iter = |
466 audio_source_list->end(), audio_source); | 455 std::find_if(audio_source_list->begin(), audio_source_list->end(), |
the sun
2016/10/06 09:50:31
Looks repeated from IsAudioSourceInList(), althoug
aleloi
2016/10/06 12:15:57
Turns out I still end up with two identical versio
| |
456 [audio_source](const AudioSourceWithMixStatus& p) { | |
457 return p.audio_source() == audio_source; | |
458 }); | |
467 if (iter != audio_source_list->end()) { | 459 if (iter != audio_source_list->end()) { |
468 audio_source_list->erase(iter); | 460 audio_source_list->erase(iter); |
469 // AudioSource is no longer mixed, reset to default. | |
470 audio_source->ResetMixedStatus(); | |
471 return true; | 461 return true; |
472 } else { | 462 } else { |
473 return false; | 463 return false; |
474 } | 464 } |
475 } | 465 } |
476 | 466 |
477 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { | 467 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
478 RTC_DCHECK_RUN_ON(&thread_checker_); | 468 RTC_DCHECK_RUN_ON(&thread_checker_); |
479 if (!use_limiter_) { | 469 if (!use_limiter_) { |
480 return true; | 470 return true; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
512 return level; | 502 return level; |
513 } | 503 } |
514 | 504 |
515 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 505 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
516 RTC_DCHECK_RUN_ON(&thread_checker_); | 506 RTC_DCHECK_RUN_ON(&thread_checker_); |
517 const int level = audio_level_.LevelFullRange(); | 507 const int level = audio_level_.LevelFullRange(); |
518 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 508 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
519 "GetAudioOutputLevelFullRange() => level=%d", level); | 509 "GetAudioOutputLevelFullRange() => level=%d", level); |
520 return level; | 510 return level; |
521 } | 511 } |
512 | |
513 AudioSourceWithMixStatus* AudioMixerImpl::GetSourceWithStatusForTest( | |
514 MixerAudioSource* audio_source) { | |
515 RTC_DCHECK_RUN_ON(&thread_checker_); | |
516 rtc::CritScope lock(&crit_); | |
517 for (auto& source_and_mix : audio_source_list_) { | |
518 if (source_and_mix.audio_source() == audio_source) { | |
519 return &source_and_mix; | |
520 } | |
521 } | |
522 for (auto& source_and_mix : additional_audio_source_list_) { | |
523 if (source_and_mix.audio_source() == audio_source) { | |
524 return &source_and_mix; | |
525 } | |
526 } | |
527 | |
528 LOG_T_F(LS_ERROR) << "Audio source unknown"; | |
529 return nullptr; | |
530 } | |
531 | |
532 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( | |
533 MixerAudioSource* audio_source) { | |
534 RTC_DCHECK_RUN_ON(&thread_checker_); | |
535 rtc::CritScope lock(&crit_); | |
536 const auto* const ptr = GetSourceWithStatusForTest(audio_source); | |
537 if (ptr) { | |
538 return ptr->IsMixed(); | |
539 } else { | |
540 LOG_T_F(LS_ERROR) << "Audio source unknown"; | |
541 return false; | |
542 } | |
543 } | |
522 } // namespace webrtc | 544 } // namespace webrtc |
OLD | NEW |