OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 #include <utility> | 15 #include <utility> |
16 | 16 |
17 #include "webrtc/base/logging.h" | |
17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
19 #include "webrtc/system_wrappers/include/trace.h" | 20 #include "webrtc/system_wrappers/include/trace.h" |
20 | 21 |
21 namespace webrtc { | 22 namespace webrtc { |
22 namespace { | 23 namespace { |
23 | 24 |
24 class SourceFrame { | 25 class SourceFrame { |
25 public: | 26 public: |
26 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 27 SourceFrame(AudioSourceWithMixStatus* p, AudioFrame* a, bool m) |
27 : audio_source_(p), | 28 : audio_source_(p), audio_frame_(a), muted_(m) { |
28 audio_frame_(a), | |
29 muted_(m), | |
30 was_mixed_before_(was_mixed_before) { | |
31 if (!muted_) { | 29 if (!muted_) { |
32 energy_ = NewMixerCalculateEnergy(*a); | 30 energy_ = NewMixerCalculateEnergy(*a); |
33 } | 31 } |
34 } | 32 } |
35 | 33 |
36 SourceFrame(MixerAudioSource* p, | 34 SourceFrame(AudioSourceWithMixStatus* p, |
37 AudioFrame* a, | 35 AudioFrame* a, |
38 bool m, | 36 bool m, |
39 bool was_mixed_before, | |
40 uint32_t energy) | 37 uint32_t energy) |
41 : audio_source_(p), | 38 : audio_source_(p), audio_frame_(a), muted_(m), energy_(energy) {} |
42 audio_frame_(a), | |
43 muted_(m), | |
44 energy_(energy), | |
45 was_mixed_before_(was_mixed_before) {} | |
46 | 39 |
47 // a.shouldMixBefore(b) is used to select mixer participants. | 40 // a.shouldMixBefore(b) is used to select mixer participants. |
48 bool shouldMixBefore(const SourceFrame& other) const { | 41 bool shouldMixBefore(const SourceFrame& other) const { |
49 if (muted_ != other.muted_) { | 42 if (muted_ != other.muted_) { |
50 return other.muted_; | 43 return other.muted_; |
51 } | 44 } |
52 | 45 |
53 const auto our_activity = audio_frame_->vad_activity_; | 46 const auto our_activity = audio_frame_->vad_activity_; |
54 const auto other_activity = other.audio_frame_->vad_activity_; | 47 const auto other_activity = other.audio_frame_->vad_activity_; |
55 | 48 |
56 if (our_activity != other_activity) { | 49 if (our_activity != other_activity) { |
57 return our_activity == AudioFrame::kVadActive; | 50 return our_activity == AudioFrame::kVadActive; |
58 } | 51 } |
59 | 52 |
60 return energy_ > other.energy_; | 53 return energy_ > other.energy_; |
61 } | 54 } |
62 | 55 |
63 MixerAudioSource* audio_source_; | 56 AudioSourceWithMixStatus* audio_source_; |
64 AudioFrame* audio_frame_; | 57 AudioFrame* audio_frame_; |
65 bool muted_; | 58 bool muted_; |
66 uint32_t energy_; | 59 uint32_t energy_; |
67 bool was_mixed_before_; | |
68 }; | 60 }; |
69 | 61 |
70 // Remixes a frame between stereo and mono. | 62 // Remixes a frame between stereo and mono. |
71 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | 63 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
72 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 64 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
73 if (frame->num_channels_ == 1 && number_of_channels == 2) { | 65 if (frame->num_channels_ == 1 && number_of_channels == 2) { |
74 AudioFrameOperations::MonoToStereo(frame); | 66 AudioFrameOperations::MonoToStereo(frame); |
75 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 67 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
76 AudioFrameOperations::StereoToMono(frame); | 68 AudioFrameOperations::StereoToMono(frame); |
77 } | 69 } |
78 } | 70 } |
79 | 71 |
80 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { | 72 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
81 for (const auto& source_frame : mixed_sources_and_frames) { | 73 for (const auto& source_frame : mixed_sources_and_frames) { |
82 // Ramp in previously unmixed. | 74 // Ramp in previously unmixed. |
83 if (!source_frame.was_mixed_before_) { | 75 if (!source_frame.audio_source_->WasMixed()) { |
84 NewMixerRampIn(source_frame.audio_frame_); | 76 NewMixerRampIn(source_frame.audio_frame_); |
85 } | 77 } |
86 | 78 |
87 const bool is_mixed = source_frame.audio_source_->IsMixed(); | 79 const bool is_mixed = source_frame.audio_source_->IsMixed(); |
88 // Ramp out currently unmixed. | 80 // Ramp out currently unmixed. |
89 if (source_frame.was_mixed_before_ && !is_mixed) { | 81 if (source_frame.audio_source_->WasMixed() && !is_mixed) { |
90 NewMixerRampOut(source_frame.audio_frame_); | 82 NewMixerRampOut(source_frame.audio_frame_); |
91 } | 83 } |
92 } | 84 } |
93 } | 85 } |
94 | 86 |
95 // Mix the AudioFrames stored in audioFrameList into mixed_audio. | 87 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
96 int32_t MixFromList(AudioFrame* mixed_audio, | 88 int32_t MixFromList(AudioFrame* mixed_audio, |
97 const AudioFrameList& audio_frame_list, | 89 const AudioFrameList& audio_frame_list, |
98 int32_t id, | 90 int32_t id, |
99 bool use_limiter) { | 91 bool use_limiter) { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
146 num_mixed_audio_sources_(0), | 138 num_mixed_audio_sources_(0), |
147 use_limiter_(true), | 139 use_limiter_(true), |
148 time_stamp_(0), | 140 time_stamp_(0), |
149 limiter_(std::move(limiter)) { | 141 limiter_(std::move(limiter)) { |
150 SetOutputFrequency(kDefaultFrequency); | 142 SetOutputFrequency(kDefaultFrequency); |
151 thread_checker_.DetachFromThread(); | 143 thread_checker_.DetachFromThread(); |
152 } | 144 } |
153 | 145 |
154 AudioMixerImpl::~AudioMixerImpl() {} | 146 AudioMixerImpl::~AudioMixerImpl() {} |
155 | 147 |
156 std::unique_ptr<AudioMixer> AudioMixerImpl::Create(int id) { | 148 std::unique_ptr<AudioMixerImpl> AudioMixerImpl::Create(int id) { |
157 Config config; | 149 Config config; |
158 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 150 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
159 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); | 151 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config)); |
160 if (!limiter.get()) | 152 if (!limiter.get()) |
161 return nullptr; | 153 return nullptr; |
162 | 154 |
163 if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) != | 155 if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) != |
164 limiter->kNoError) | 156 limiter->kNoError) |
165 return nullptr; | 157 return nullptr; |
166 | 158 |
167 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | 159 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the |
168 // divide-by-2 but -7 is used instead to give a bit of headroom since the | 160 // divide-by-2 but -7 is used instead to give a bit of headroom since the |
169 // AGC is not a hard limiter. | 161 // AGC is not a hard limiter. |
170 if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) | 162 if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) |
171 return nullptr; | 163 return nullptr; |
172 | 164 |
173 if (limiter->gain_control()->set_compression_gain_db(0) != limiter->kNoError) | 165 if (limiter->gain_control()->set_compression_gain_db(0) != limiter->kNoError) |
174 return nullptr; | 166 return nullptr; |
175 | 167 |
176 if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) | 168 if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) |
177 return nullptr; | 169 return nullptr; |
178 | 170 |
179 if (limiter->gain_control()->Enable(true) != limiter->kNoError) | 171 if (limiter->gain_control()->Enable(true) != limiter->kNoError) |
180 return nullptr; | 172 return nullptr; |
181 | 173 |
182 return std::unique_ptr<AudioMixer>( | 174 return std::unique_ptr<AudioMixerImpl>( |
183 new AudioMixerImpl(id, std::move(limiter))); | 175 new AudioMixerImpl(id, std::move(limiter))); |
184 } | 176 } |
185 | 177 |
186 void AudioMixerImpl::Mix(int sample_rate, | 178 void AudioMixerImpl::Mix(int sample_rate, |
187 size_t number_of_channels, | 179 size_t number_of_channels, |
188 AudioFrame* audio_frame_for_mixing) { | 180 AudioFrame* audio_frame_for_mixing) { |
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 181 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
190 RTC_DCHECK_RUN_ON(&thread_checker_); | 182 RTC_DCHECK_RUN_ON(&thread_checker_); |
191 | 183 |
192 if (sample_rate != kNbInHz && sample_rate != kWbInHz && | 184 if (sample_rate != kNbInHz && sample_rate != kWbInHz && |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
337 ? 0 | 329 ? 0 |
338 : -1; | 330 : -1; |
339 } | 331 } |
340 | 332 |
341 bool AudioMixerImpl::AnonymousMixabilityStatus( | 333 bool AudioMixerImpl::AnonymousMixabilityStatus( |
342 const MixerAudioSource& audio_source) const { | 334 const MixerAudioSource& audio_source) const { |
343 rtc::CritScope lock(&crit_); | 335 rtc::CritScope lock(&crit_); |
344 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 336 return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
345 } | 337 } |
346 | 338 |
347 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const { | 339 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() { |
348 RTC_DCHECK_RUN_ON(&thread_checker_); | 340 RTC_DCHECK_RUN_ON(&thread_checker_); |
349 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 341 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
350 "GetNonAnonymousAudio()"); | 342 "GetNonAnonymousAudio()"); |
351 AudioFrameList result; | 343 AudioFrameList result; |
352 std::vector<SourceFrame> audio_source_mixing_data_list; | 344 std::vector<SourceFrame> audio_source_mixing_data_list; |
353 std::vector<SourceFrame> ramp_list; | 345 std::vector<SourceFrame> ramp_list; |
354 | 346 |
355 // Get audio source audio and put it in the struct vector. | 347 // Get audio source audio and put it in the struct vector. |
356 for (auto* const audio_source : audio_source_list_) { | 348 for (auto& source_and_status : audio_source_list_) { |
357 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 349 auto audio_frame_with_info = |
358 id_, static_cast<int>(OutputFrequency())); | 350 source_and_status.audio_source()->GetAudioFrameWithMuted( |
351 id_, static_cast<int>(OutputFrequency())); | |
359 | 352 |
360 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 353 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
361 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 354 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
362 | 355 |
363 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 356 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
364 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 357 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
365 "failed to GetAudioFrameWithMuted() from participant"); | 358 "failed to GetAudioFrameWithMuted() from participant"); |
366 continue; | 359 continue; |
367 } | 360 } |
368 audio_source_mixing_data_list.emplace_back( | 361 audio_source_mixing_data_list.emplace_back( |
369 audio_source, audio_source_audio_frame, | 362 &source_and_status, audio_source_audio_frame, |
370 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 363 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted); |
371 audio_source->WasMixed()); | |
372 } | 364 } |
373 | 365 |
374 // Sort frames by sorting function. | 366 // Sort frames by sorting function. |
375 std::sort(audio_source_mixing_data_list.begin(), | 367 std::sort(audio_source_mixing_data_list.begin(), |
376 audio_source_mixing_data_list.end(), | 368 audio_source_mixing_data_list.end(), |
377 std::mem_fn(&SourceFrame::shouldMixBefore)); | 369 std::mem_fn(&SourceFrame::shouldMixBefore)); |
378 | 370 |
379 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; | 371 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; |
380 | 372 |
381 // Go through list in order and put unmuted frames in result list. | 373 // Go through list in order and put unmuted frames in result list. |
382 for (const SourceFrame& p : audio_source_mixing_data_list) { | 374 for (const auto& p : audio_source_mixing_data_list) { |
383 // Filter muted. | 375 // Filter muted. |
384 if (p.muted_) { | 376 if (p.muted_) { |
385 p.audio_source_->SetIsMixed(false); | 377 p.audio_source_->SetIsMixed(false); |
386 continue; | 378 continue; |
387 } | 379 } |
388 | 380 |
389 // Add frame to result vector for mixing. | 381 // Add frame to result vector for mixing. |
390 bool is_mixed = false; | 382 bool is_mixed = false; |
391 if (max_audio_frame_counter > 0) { | 383 if (max_audio_frame_counter > 0) { |
392 --max_audio_frame_counter; | 384 --max_audio_frame_counter; |
393 result.push_back(p.audio_frame_); | 385 result.push_back(p.audio_frame_); |
394 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, | 386 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, -1); |
395 p.was_mixed_before_, -1); | |
396 is_mixed = true; | 387 is_mixed = true; |
397 } | 388 } |
398 p.audio_source_->SetIsMixed(is_mixed); | 389 p.audio_source_->SetIsMixed(is_mixed); |
399 } | 390 } |
400 Ramp(ramp_list); | 391 Ramp(ramp_list); |
401 return result; | 392 return result; |
402 } | 393 } |
403 | 394 |
404 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const { | 395 AudioFrameList AudioMixerImpl::GetAnonymousAudio() { |
the sun
2016/10/04 20:36:09
Why do we need to treat "anonymous" audio differen
aleloi
2016/10/05 15:18:19
Good point! The latest plan seems to be to remove
| |
405 RTC_DCHECK_RUN_ON(&thread_checker_); | 396 RTC_DCHECK_RUN_ON(&thread_checker_); |
406 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 397 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
407 "GetAnonymousAudio()"); | 398 "GetAnonymousAudio()"); |
408 // The GetAudioFrameWithMuted() callback may result in the audio source being | |
409 // removed from additionalAudioFramesList_. If that happens it will | |
410 // invalidate any iterators. Create a copy of the audio sources list such | |
411 // that the list of participants can be traversed safely. | |
412 std::vector<SourceFrame> ramp_list; | 399 std::vector<SourceFrame> ramp_list; |
413 MixerAudioSourceList additional_audio_sources_list; | |
414 AudioFrameList result; | 400 AudioFrameList result; |
415 additional_audio_sources_list.insert(additional_audio_sources_list.begin(), | 401 for (auto& source_and_status : additional_audio_source_list_) { |
416 additional_audio_source_list_.begin(), | |
417 additional_audio_source_list_.end()); | |
418 | |
419 for (const auto& audio_source : additional_audio_sources_list) { | |
420 const auto audio_frame_with_info = | 402 const auto audio_frame_with_info = |
421 audio_source->GetAudioFrameWithMuted(id_, OutputFrequency()); | 403 source_and_status.audio_source()->GetAudioFrameWithMuted( |
404 id_, OutputFrequency()); | |
422 const auto ret = audio_frame_with_info.audio_frame_info; | 405 const auto ret = audio_frame_with_info.audio_frame_info; |
423 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 406 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
424 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 407 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
425 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 408 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
426 "failed to GetAudioFrameWithMuted() from audio_source"); | 409 "failed to GetAudioFrameWithMuted() from audio_source"); |
427 continue; | 410 continue; |
428 } | 411 } |
429 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { | 412 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
430 result.push_back(audio_frame); | 413 result.push_back(audio_frame); |
431 ramp_list.emplace_back(audio_source, audio_frame, false, | 414 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0); |
432 audio_source->IsMixed(), 0); | 415 source_and_status.SetIsMixed(true); |
433 audio_source->SetIsMixed(true); | |
434 } | 416 } |
435 } | 417 } |
436 Ramp(ramp_list); | 418 Ramp(ramp_list); |
437 return result; | 419 return result; |
438 } | 420 } |
439 | 421 |
440 bool AudioMixerImpl::IsAudioSourceInList( | 422 bool AudioMixerImpl::IsAudioSourceInList( |
441 const MixerAudioSource& audio_source, | 423 const MixerAudioSource& audio_source, |
442 const MixerAudioSourceList& audio_source_list) const { | 424 const MixerAudioSourceList& audio_source_list) const { |
443 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 425 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
444 "IsAudioSourceInList(audio_source,audio_source_list)"); | 426 "IsAudioSourceInList(audio_source,audio_source_list)"); |
445 return std::find(audio_source_list.begin(), audio_source_list.end(), | 427 return std::find_if(audio_source_list.begin(), audio_source_list.end(), |
ivoc
2016/10/04 20:39:29
Maybe it's not that important, but I think a for l
aleloi
2016/10/05 15:18:18
Probably. The loop contains about as many characte
| |
446 &audio_source) != audio_source_list.end(); | 428 [&audio_source](const AudioSourceWithMixStatus& p) { |
429 return p.audio_source() == &audio_source; | |
430 }) != audio_source_list.end(); | |
447 } | 431 } |
448 | 432 |
449 bool AudioMixerImpl::AddAudioSourceToList( | 433 bool AudioMixerImpl::AddAudioSourceToList( |
450 MixerAudioSource* audio_source, | 434 MixerAudioSource* audio_source, |
451 MixerAudioSourceList* audio_source_list) const { | 435 MixerAudioSourceList* audio_source_list) const { |
452 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 436 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
453 "AddAudioSourceToList(audio_source, audio_source_list)"); | 437 "AddAudioSourceToList(audio_source, audio_source_list)"); |
454 audio_source_list->push_back(audio_source); | 438 audio_source_list->emplace_back(audio_source); |
455 // Make sure that the mixed status is correct for new MixerAudioSource. | |
456 audio_source->ResetMixedStatus(); | |
457 return true; | 439 return true; |
458 } | 440 } |
459 | 441 |
460 bool AudioMixerImpl::RemoveAudioSourceFromList( | 442 bool AudioMixerImpl::RemoveAudioSourceFromList( |
461 MixerAudioSource* audio_source, | 443 MixerAudioSource* audio_source, |
462 MixerAudioSourceList* audio_source_list) const { | 444 MixerAudioSourceList* audio_source_list) const { |
463 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | 445 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
464 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); | 446 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); |
465 const auto iter = std::find(audio_source_list->begin(), | 447 const auto iter = |
466 audio_source_list->end(), audio_source); | 448 std::find_if(audio_source_list->begin(), audio_source_list->end(), |
449 [audio_source](const AudioSourceWithMixStatus& p) { | |
450 return p.audio_source() == audio_source; | |
451 }); | |
467 if (iter != audio_source_list->end()) { | 452 if (iter != audio_source_list->end()) { |
468 audio_source_list->erase(iter); | 453 audio_source_list->erase(iter); |
469 // AudioSource is no longer mixed, reset to default. | |
470 audio_source->ResetMixedStatus(); | |
471 return true; | 454 return true; |
472 } else { | 455 } else { |
473 return false; | 456 return false; |
474 } | 457 } |
475 } | 458 } |
476 | 459 |
477 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { | 460 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
478 RTC_DCHECK_RUN_ON(&thread_checker_); | 461 RTC_DCHECK_RUN_ON(&thread_checker_); |
479 if (!use_limiter_) { | 462 if (!use_limiter_) { |
480 return true; | 463 return true; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
512 return level; | 495 return level; |
513 } | 496 } |
514 | 497 |
515 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 498 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
516 RTC_DCHECK_RUN_ON(&thread_checker_); | 499 RTC_DCHECK_RUN_ON(&thread_checker_); |
517 const int level = audio_level_.LevelFullRange(); | 500 const int level = audio_level_.LevelFullRange(); |
518 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 501 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
519 "GetAudioOutputLevelFullRange() => level=%d", level); | 502 "GetAudioOutputLevelFullRange() => level=%d", level); |
520 return level; | 503 return level; |
521 } | 504 } |
505 | |
506 AudioSourceWithMixStatus* AudioMixerImpl::GetSourceWithStatus( | |
the sun
2016/10/04 20:36:09
If this is only used for unit testing, can you mar
aleloi
2016/10/05 15:18:19
I don't understand, how should I mark it?
The un
the sun
2016/10/05 19:32:10
Well, you could either add a comment in the .h, or
aleloi
2016/10/06 09:26:12
I looked around in the webrtc source. ..ForTest/Fo
| |
507 MixerAudioSource* audio_source) { | |
508 RTC_DCHECK_RUN_ON(&thread_checker_); | |
509 rtc::CritScope lock(&crit_); | |
510 auto iter = std::find_if(audio_source_list_.begin(), audio_source_list_.end(), | |
ivoc
2016/10/04 20:39:29
Again, I think this code would look simpler (and p
aleloi
2016/10/05 15:18:18
I tried and it was indeed shorter :)
| |
511 [audio_source](const AudioSourceWithMixStatus& p) { | |
512 return p.audio_source() == audio_source; | |
513 }); | |
514 if (iter != audio_source_list_.end()) { | |
515 return &(*iter); | |
516 } | |
517 | |
518 iter = std::find_if(additional_audio_source_list_.begin(), | |
519 additional_audio_source_list_.end(), | |
520 [audio_source](const AudioSourceWithMixStatus& p) { | |
521 return p.audio_source() == audio_source; | |
522 }); | |
523 if (iter != additional_audio_source_list_.end()) { | |
524 return &(*iter); | |
525 } else { | |
526 LOG_T_F(LS_ERROR) << "Audio source unknown"; | |
527 return nullptr; | |
528 } | |
529 } | |
530 | |
531 bool AudioMixerImpl::GetAudioSourceMixabilityStatus( | |
532 MixerAudioSource* audio_source) { | |
533 RTC_DCHECK_RUN_ON(&thread_checker_); | |
534 rtc::CritScope lock(&crit_); | |
535 const auto* const ptr = GetSourceWithStatus(audio_source); | |
536 if (ptr) { | |
537 return ptr->IsMixed(); | |
538 } else { | |
539 LOG_T_F(LS_ERROR) << "Audio source unknown"; | |
540 return false; | |
541 } | |
542 } | |
522 } // namespace webrtc | 543 } // namespace webrtc |
OLD | NEW |