Chromium Code Reviews

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2396803004: Moved MixerAudioSource and removed audio_mixer_defines.h. (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <functional> 14 #include <functional>
15 #include <utility> 15 #include <utility>
16 16
17 #include "webrtc/base/logging.h" 17 #include "webrtc/base/logging.h"
18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" 19 #include "webrtc/modules/utility/include/audio_frame_operations.h"
20 #include "webrtc/system_wrappers/include/trace.h" 20 #include "webrtc/system_wrappers/include/trace.h"
21 21
22 namespace webrtc { 22 namespace webrtc {
23 namespace { 23 namespace {
24 24
25 class SourceFrame { 25 class SourceFrame {
26 public: 26 public:
27 SourceFrame(AudioSourceWithMixStatus* audio_source, 27 SourceFrame(AudioMixerImpl::SourceStatus* source_status,
28 AudioFrame* audio_frame, 28 AudioFrame* audio_frame,
29 bool muted) 29 bool muted)
30 : audio_source_(audio_source), audio_frame_(audio_frame), muted_(muted) { 30 : source_status_(source_status),
31 audio_frame_(audio_frame),
32 muted_(muted) {
33 RTC_DCHECK(source_status);
34 RTC_DCHECK(audio_frame);
31 if (!muted_) { 35 if (!muted_) {
32 energy_ = AudioMixerCalculateEnergy(*audio_frame); 36 energy_ = AudioMixerCalculateEnergy(*audio_frame);
33 } 37 }
34 } 38 }
35 39
36 SourceFrame(AudioSourceWithMixStatus* audio_source, 40 SourceFrame(AudioMixerImpl::SourceStatus* source_status,
37 AudioFrame* audio_frame, 41 AudioFrame* audio_frame,
38 bool muted, 42 bool muted,
39 uint32_t energy) 43 uint32_t energy)
40 : audio_source_(audio_source), 44 : source_status_(source_status),
41 audio_frame_(audio_frame), 45 audio_frame_(audio_frame),
42 muted_(muted), 46 muted_(muted),
43 energy_(energy) {} 47 energy_(energy) {
44 48 RTC_DCHECK(source_status);
45 // a.ShouldMixBefore(b) is used to select mixer sources. 49 RTC_DCHECK(audio_frame);
aleloi 2016/10/07 12:59:21 Added checks.
46 bool ShouldMixBefore(const SourceFrame& other) const {
47 if (muted_ != other.muted_) {
48 return other.muted_;
49 }
50
51 const auto our_activity = audio_frame_->vad_activity_;
52 const auto other_activity = other.audio_frame_->vad_activity_;
53
54 if (our_activity != other_activity) {
55 return our_activity == AudioFrame::kVadActive;
56 }
57
58 return energy_ > other.energy_;
59 } 50 }
60 51
61 AudioSourceWithMixStatus* audio_source_ = nullptr; 52 AudioMixerImpl::SourceStatus* source_status_ = nullptr;
62 AudioFrame* audio_frame_ = nullptr; 53 AudioFrame* audio_frame_ = nullptr;
63 bool muted_ = true; 54 bool muted_ = true;
64 uint32_t energy_ = 0; 55 uint32_t energy_ = 0;
65 }; 56 };
66 57
67 // Remixes a frame between stereo and mono. 58 // ShouldMixBefore(a, b) is used to select mixer sources.
68 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { 59 bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
69 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 60 if (a.muted_ != b.muted_) {
70 if (frame->num_channels_ == 1 && number_of_channels == 2) { 61 return b.muted_;
71 AudioFrameOperations::MonoToStereo(frame); 62 }
72 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { 63
73 AudioFrameOperations::StereoToMono(frame); 64 const auto a_activity = a.audio_frame_->vad_activity_;
aleloi 2016/10/07 12:59:21 Moved to audio_frame_operations
65 const auto b_activity = b.audio_frame_->vad_activity_;
66
67 if (a_activity != b_activity) {
68 return a_activity == AudioFrame::kVadActive;
69 }
70
71 return a.energy_ > b.energy_;
72 }
aleloi 2016/10/07 12:59:21 Moved ShouldMixBefore outside.
73
74 void RampAndUpdateGain(
75 const std::vector<SourceFrame>& mixed_sources_and_frames) {
76 for (const auto& source_frame : mixed_sources_and_frames) {
77 float target_gain_ = source_frame.source_status_->is_mixed_ ? 1 : 0;
78 Ramp(source_frame.audio_frame_, source_frame.source_status_->gain_,
79 target_gain_);
80 source_frame.source_status_->gain_ = target_gain_;
74 } 81 }
75 } 82 }
76 83
77 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
78 for (const auto& source_frame : mixed_sources_and_frames) {
79 // Ramp in previously unmixed.
80 if (!source_frame.audio_source_->WasMixed()) {
81 NewMixerRampIn(source_frame.audio_frame_);
aleloi 2016/10/07 12:59:21 Replaced with single 'Ramp' in audio_frame_operati
82 }
83
84 const bool is_mixed = source_frame.audio_source_->IsMixed();
85 // Ramp out currently unmixed.
86 if (source_frame.audio_source_->WasMixed() && !is_mixed) {
87 NewMixerRampOut(source_frame.audio_frame_);
88 }
89 }
90 }
91
92 // Mix the AudioFrames stored in audioFrameList into mixed_audio. 84 // Mix the AudioFrames stored in audioFrameList into mixed_audio.
93 int32_t MixFromList(AudioFrame* mixed_audio, 85 int32_t MixFromList(AudioFrame* mixed_audio,
94 const AudioFrameList& audio_frame_list, 86 const AudioFrameList& audio_frame_list,
95 int32_t id, 87 int32_t id,
96 bool use_limiter) { 88 bool use_limiter) {
97 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, 89 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
98 "MixFromList(mixed_audio, audio_frame_list)"); 90 "MixFromList(mixed_audio, audio_frame_list)");
99 if (audio_frame_list.empty()) 91 if (audio_frame_list.empty())
100 return 0; 92 return 0;
101 93
(...skipping 21 matching lines...)
123 // Divide by two to avoid saturation in the mixing. 115 // Divide by two to avoid saturation in the mixing.
124 // This is only meaningful if the limiter will be used. 116 // This is only meaningful if the limiter will be used.
125 *frame >>= 1; 117 *frame >>= 1;
126 } 118 }
127 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); 119 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
128 *mixed_audio += *frame; 120 *mixed_audio += *frame;
129 } 121 }
130 return 0; 122 return 0;
131 } 123 }
132 124
133 MixerAudioSourceList::const_iterator FindSourceInList( 125 AudioMixerImpl::MixerAudioSourceList::const_iterator FindSourceInList(
134 MixerAudioSource const* audio_source, 126 AudioMixerImpl::Source const* audio_source,
135 MixerAudioSourceList const* audio_source_list) { 127 AudioMixerImpl::MixerAudioSourceList const* audio_source_list) {
136 return std::find_if(audio_source_list->begin(), audio_source_list->end(), 128 return std::find_if(audio_source_list->begin(), audio_source_list->end(),
137 [audio_source](const AudioSourceWithMixStatus& p) { 129 [audio_source](const AudioMixerImpl::SourceStatus& p) {
138 return p.audio_source() == audio_source; 130 return p.audio_source_ == audio_source;
139 }); 131 });
140 } 132 }
141 133
142 MixerAudioSourceList::iterator FindSourceInList( 134 AudioMixerImpl::MixerAudioSourceList::iterator FindSourceInList(
143 MixerAudioSource const* audio_source, 135 AudioMixerImpl::Source const* audio_source,
144 MixerAudioSourceList* audio_source_list) { 136 AudioMixerImpl::MixerAudioSourceList* audio_source_list) {
145 return std::find_if(audio_source_list->begin(), audio_source_list->end(), 137 return std::find_if(audio_source_list->begin(), audio_source_list->end(),
146 [audio_source](const AudioSourceWithMixStatus& p) { 138 [audio_source](const AudioMixerImpl::SourceStatus& p) {
147 return p.audio_source() == audio_source; 139 return p.audio_source_ == audio_source;
148 }); 140 });
149 } 141 }
150 142
151 } // namespace 143 } // namespace
152 144
153 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { 145 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) {
154 return AudioMixerImpl::Create(id); 146 return AudioMixerImpl::Create(id);
155 } 147 }
156 148
157 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter) 149 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter)
(...skipping 107 matching lines...)
265 sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000; 257 sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000;
266 258
267 return 0; 259 return 0;
268 } 260 }
269 261
270 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { 262 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
271 RTC_DCHECK_RUN_ON(&thread_checker_); 263 RTC_DCHECK_RUN_ON(&thread_checker_);
272 return output_frequency_; 264 return output_frequency_;
273 } 265 }
274 266
275 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 267 int32_t AudioMixerImpl::SetMixabilityStatus(Source* audio_source,
276 bool mixable) { 268 bool mixable) {
277 if (!mixable) { 269 if (!mixable) {
278 // Anonymous audio sources are in a separate list. Make sure that the 270 // Anonymous audio sources are in a separate list. Make sure that the
279 // audio source is in the _audioSourceList if it is being mixed. 271 // audio source is in the _audioSourceList if it is being mixed.
280 SetAnonymousMixabilityStatus(audio_source, false); 272 SetAnonymousMixabilityStatus(audio_source, false);
281 } 273 }
282 { 274 {
283 rtc::CritScope lock(&crit_); 275 rtc::CritScope lock(&crit_);
284 const bool is_mixed = FindSourceInList(audio_source, &audio_source_list_) != 276 const bool is_mixed = FindSourceInList(audio_source, &audio_source_list_) !=
285 audio_source_list_.end(); 277 audio_source_list_.end();
(...skipping 19 matching lines...)
305 size_t num_mixed_non_anonymous = audio_source_list_.size(); 297 size_t num_mixed_non_anonymous = audio_source_list_.size();
306 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) { 298 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) {
307 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources; 299 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources;
308 } 300 }
309 num_mixed_audio_sources_ = 301 num_mixed_audio_sources_ =
310 num_mixed_non_anonymous + additional_audio_source_list_.size(); 302 num_mixed_non_anonymous + additional_audio_source_list_.size();
311 } 303 }
312 return 0; 304 return 0;
313 } 305 }
314 306
315 bool AudioMixerImpl::MixabilityStatus( 307 bool AudioMixerImpl::MixabilityStatus(const Source& audio_source) const {
316 const MixerAudioSource& audio_source) const {
317 rtc::CritScope lock(&crit_); 308 rtc::CritScope lock(&crit_);
318 return FindSourceInList(&audio_source, &audio_source_list_) != 309 return FindSourceInList(&audio_source, &audio_source_list_) !=
319 audio_source_list_.end(); 310 audio_source_list_.end();
320 } 311 }
321 312
322 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( 313 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(Source* audio_source,
323 MixerAudioSource* audio_source, 314 bool anonymous) {
324 bool anonymous) {
325 rtc::CritScope lock(&crit_); 315 rtc::CritScope lock(&crit_);
326 if (FindSourceInList(audio_source, &additional_audio_source_list_) != 316 if (FindSourceInList(audio_source, &additional_audio_source_list_) !=
327 additional_audio_source_list_.end()) { 317 additional_audio_source_list_.end()) {
328 if (anonymous) { 318 if (anonymous) {
329 return 0; 319 return 0;
330 } 320 }
331 if (!RemoveAudioSourceFromList(audio_source, 321 if (!RemoveAudioSourceFromList(audio_source,
332 &additional_audio_source_list_)) { 322 &additional_audio_source_list_)) {
333 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 323 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
334 "unable to remove audio_source from anonymous list"); 324 "unable to remove audio_source from anonymous list");
(...skipping 14 matching lines...)
349 // Setting anonymous status is only possible if MixerAudioSource is 339 // Setting anonymous status is only possible if MixerAudioSource is
350 // already registered. 340 // already registered.
351 return -1; 341 return -1;
352 } 342 }
353 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) 343 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
354 ? 0 344 ? 0
355 : -1; 345 : -1;
356 } 346 }
357 347
358 bool AudioMixerImpl::AnonymousMixabilityStatus( 348 bool AudioMixerImpl::AnonymousMixabilityStatus(
359 const MixerAudioSource& audio_source) const { 349 const Source& audio_source) const {
360 rtc::CritScope lock(&crit_); 350 rtc::CritScope lock(&crit_);
361 return FindSourceInList(&audio_source, &additional_audio_source_list_) != 351 return FindSourceInList(&audio_source, &additional_audio_source_list_) !=
362 additional_audio_source_list_.end(); 352 additional_audio_source_list_.end();
363 } 353 }
364 354
365 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() { 355 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
366 RTC_DCHECK_RUN_ON(&thread_checker_); 356 RTC_DCHECK_RUN_ON(&thread_checker_);
367 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 357 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
368 "GetNonAnonymousAudio()"); 358 "GetNonAnonymousAudio()");
369 AudioFrameList result; 359 AudioFrameList result;
370 std::vector<SourceFrame> audio_source_mixing_data_list; 360 std::vector<SourceFrame> audio_source_mixing_data_list;
371 std::vector<SourceFrame> ramp_list; 361 std::vector<SourceFrame> ramp_list;
372 362
373 // Get audio source audio and put it in the struct vector. 363 // Get audio source audio and put it in the struct vector.
374 for (auto& source_and_status : audio_source_list_) { 364 for (auto& source_and_status : audio_source_list_) {
375 auto audio_frame_with_info = 365 auto audio_frame_with_info =
376 source_and_status.audio_source()->GetAudioFrameWithMuted( 366 source_and_status.audio_source_->GetAudioFrameWithMuted(
377 id_, static_cast<int>(OutputFrequency())); 367 id_, static_cast<int>(OutputFrequency()));
378 368
379 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; 369 const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 370 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
381 371
382 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 372 if (audio_frame_info == Source::AudioFrameInfo::kError) {
383 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 373 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
384 "failed to GetAudioFrameWithMuted() from source"); 374 "failed to GetAudioFrameWithMuted() from source");
385 continue; 375 continue;
386 } 376 }
387 audio_source_mixing_data_list.emplace_back( 377 audio_source_mixing_data_list.emplace_back(
388 &source_and_status, audio_source_audio_frame, 378 &source_and_status, audio_source_audio_frame,
389 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted); 379 audio_frame_info == Source::AudioFrameInfo::kMuted);
390 } 380 }
391 381
392 // Sort frames by sorting function. 382 // Sort frames by sorting function.
393 std::sort(audio_source_mixing_data_list.begin(), 383 std::sort(audio_source_mixing_data_list.begin(),
394 audio_source_mixing_data_list.end(), 384 audio_source_mixing_data_list.end(), ShouldMixBefore);
395 std::mem_fn(&SourceFrame::ShouldMixBefore));
396 385
397 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; 386 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
398 387
399 // Go through list in order and put unmuted frames in result list. 388 // Go through list in order and put unmuted frames in result list.
400 for (const auto& p : audio_source_mixing_data_list) { 389 for (const auto& p : audio_source_mixing_data_list) {
401 // Filter muted. 390 // Filter muted.
402 if (p.muted_) { 391 if (p.muted_) {
403 p.audio_source_->SetIsMixed(false); 392 p.source_status_->is_mixed_ = false;
404 continue; 393 continue;
405 } 394 }
406 395
407 // Add frame to result vector for mixing. 396 // Add frame to result vector for mixing.
408 bool is_mixed = false; 397 bool is_mixed = false;
409 if (max_audio_frame_counter > 0) { 398 if (max_audio_frame_counter > 0) {
410 --max_audio_frame_counter; 399 --max_audio_frame_counter;
411 result.push_back(p.audio_frame_); 400 result.push_back(p.audio_frame_);
412 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, -1); 401 ramp_list.emplace_back(p.source_status_, p.audio_frame_, false, -1);
413 is_mixed = true; 402 is_mixed = true;
414 } 403 }
415 p.audio_source_->SetIsMixed(is_mixed); 404 p.source_status_->is_mixed_ = is_mixed;
416 } 405 }
417 Ramp(ramp_list); 406 RampAndUpdateGain(ramp_list);
418 return result; 407 return result;
419 } 408 }
420 409
421 AudioFrameList AudioMixerImpl::GetAnonymousAudio() { 410 AudioFrameList AudioMixerImpl::GetAnonymousAudio() {
422 RTC_DCHECK_RUN_ON(&thread_checker_); 411 RTC_DCHECK_RUN_ON(&thread_checker_);
423 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 412 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
424 "GetAnonymousAudio()"); 413 "GetAnonymousAudio()");
425 std::vector<SourceFrame> ramp_list; 414 std::vector<SourceFrame> ramp_list;
426 AudioFrameList result; 415 AudioFrameList result;
427 for (auto& source_and_status : additional_audio_source_list_) { 416 for (auto& source_and_status : additional_audio_source_list_) {
428 const auto audio_frame_with_info = 417 const auto audio_frame_with_info =
429 source_and_status.audio_source()->GetAudioFrameWithMuted( 418 source_and_status.audio_source_->GetAudioFrameWithMuted(
430 id_, OutputFrequency()); 419 id_, OutputFrequency());
431 const auto ret = audio_frame_with_info.audio_frame_info; 420 const auto ret = audio_frame_with_info.audio_frame_info;
432 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 421 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
433 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 422 if (ret == Source::AudioFrameInfo::kError) {
434 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 423 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
435 "failed to GetAudioFrameWithMuted() from audio_source"); 424 "failed to GetAudioFrameWithMuted() from audio_source");
436 continue; 425 continue;
437 } 426 }
438 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { 427 if (ret != Source::AudioFrameInfo::kMuted) {
439 result.push_back(audio_frame); 428 result.push_back(audio_frame);
440 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0); 429 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0);
441 source_and_status.SetIsMixed(true); 430 source_and_status.is_mixed_ = true;
442 } 431 }
443 } 432 }
444 Ramp(ramp_list); 433 RampAndUpdateGain(ramp_list);
445 return result; 434 return result;
446 } 435 }
447 436
448 bool AudioMixerImpl::AddAudioSourceToList( 437 bool AudioMixerImpl::AddAudioSourceToList(
449 MixerAudioSource* audio_source, 438 Source* audio_source,
450 MixerAudioSourceList* audio_source_list) const { 439 MixerAudioSourceList* audio_source_list) const {
451 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
452 "AddAudioSourceToList(audio_source, audio_source_list)"); 441 "AddAudioSourceToList(audio_source, audio_source_list)");
453 audio_source_list->emplace_back(audio_source); 442 audio_source_list->emplace_back(audio_source, false, 0);
454 return true; 443 return true;
455 } 444 }
456 445
457 bool AudioMixerImpl::RemoveAudioSourceFromList( 446 bool AudioMixerImpl::RemoveAudioSourceFromList(
458 MixerAudioSource* audio_source, 447 Source* audio_source,
459 MixerAudioSourceList* audio_source_list) const { 448 MixerAudioSourceList* audio_source_list) const {
460 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
461 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); 450 "RemoveAudioSourceFromList(audio_source, audio_source_list)");
462 const auto iter = FindSourceInList(audio_source, audio_source_list); 451 const auto iter = FindSourceInList(audio_source, audio_source_list);
463 if (iter != audio_source_list->end()) { 452 if (iter != audio_source_list->end()) {
464 audio_source_list->erase(iter); 453 audio_source_list->erase(iter);
465 return true; 454 return true;
466 } else { 455 } else {
467 return false; 456 return false;
468 } 457 }
(...skipping 39 matching lines...)
508 497
509 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 498 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
510 RTC_DCHECK_RUN_ON(&thread_checker_); 499 RTC_DCHECK_RUN_ON(&thread_checker_);
511 const int level = audio_level_.LevelFullRange(); 500 const int level = audio_level_.LevelFullRange();
512 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 501 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
513 "GetAudioOutputLevelFullRange() => level=%d", level); 502 "GetAudioOutputLevelFullRange() => level=%d", level);
514 return level; 503 return level;
515 } 504 }
516 505
517 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( 506 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
518 MixerAudioSource* audio_source) { 507 AudioMixerImpl::Source* audio_source) {
519 RTC_DCHECK_RUN_ON(&thread_checker_); 508 RTC_DCHECK_RUN_ON(&thread_checker_);
520 rtc::CritScope lock(&crit_); 509 rtc::CritScope lock(&crit_);
521 510
522 const auto non_anonymous_iter = 511 const auto non_anonymous_iter =
523 FindSourceInList(audio_source, &audio_source_list_); 512 FindSourceInList(audio_source, &audio_source_list_);
524 if (non_anonymous_iter != audio_source_list_.end()) { 513 if (non_anonymous_iter != audio_source_list_.end()) {
525 return non_anonymous_iter->IsMixed(); 514 return non_anonymous_iter->is_mixed_;
526 } 515 }
527 516
528 const auto anonymous_iter = 517 const auto anonymous_iter =
529 FindSourceInList(audio_source, &additional_audio_source_list_); 518 FindSourceInList(audio_source, &additional_audio_source_list_);
530 if (anonymous_iter != audio_source_list_.end()) { 519 if (anonymous_iter != audio_source_list_.end()) {
531 return anonymous_iter->IsMixed(); 520 return anonymous_iter->is_mixed_;
532 } 521 }
533 522
534 LOG(LS_ERROR) << "Audio source unknown"; 523 LOG(LS_ERROR) << "Audio source unknown";
535 return false; 524 return false;
536 } 525 }
537 } // namespace webrtc 526 } // namespace webrtc
OLDNEW

Powered by Google App Engine