Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(226)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2396803004: Moved MixerAudioSource and removed audio_mixer_defines.h. (Closed)
Patch Set: Renamed AudioFrameWithMuted to AudioFrameWithInfo and changed RemixFrame argument order. Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
57 57
58 return energy_ > other.energy_; 58 return energy_ > other.energy_;
59 } 59 }
60 60
61 AudioSourceWithMixStatus* audio_source_ = nullptr; 61 AudioSourceWithMixStatus* audio_source_ = nullptr;
62 AudioFrame* audio_frame_ = nullptr; 62 AudioFrame* audio_frame_ = nullptr;
63 bool muted_ = true; 63 bool muted_ = true;
64 uint32_t energy_ = 0; 64 uint32_t energy_ = 0;
65 }; 65 };
66 66
67 // Remixes a frame between stereo and mono.
68 void RemixFrame(AudioFrame* frame, size_t number_of_channels) {
69 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
70 if (frame->num_channels_ == 1 && number_of_channels == 2) {
71 AudioFrameOperations::MonoToStereo(frame);
72 } else if (frame->num_channels_ == 2 && number_of_channels == 1) {
73 AudioFrameOperations::StereoToMono(frame);
74 }
75 }
76 67
77 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { 68 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
78 for (const auto& source_frame : mixed_sources_and_frames) { 69 for (const auto& source_frame : mixed_sources_and_frames) {
79 // Ramp in previously unmixed. 70 // Ramp in previously unmixed.
80 if (!source_frame.audio_source_->WasMixed()) { 71 if (!source_frame.audio_source_->WasMixed()) {
81 NewMixerRampIn(source_frame.audio_frame_); 72 NewMixerRampIn(source_frame.audio_frame_);
82 } 73 }
83 74
84 const bool is_mixed = source_frame.audio_source_->IsMixed(); 75 const bool is_mixed = source_frame.audio_source_->IsMixed();
85 // Ramp out currently unmixed. 76 // Ramp out currently unmixed.
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
124 // This is only meaningful if the limiter will be used. 115 // This is only meaningful if the limiter will be used.
125 *frame >>= 1; 116 *frame >>= 1;
126 } 117 }
127 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); 118 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
128 *mixed_audio += *frame; 119 *mixed_audio += *frame;
129 } 120 }
130 return 0; 121 return 0;
131 } 122 }
132 123
133 MixerAudioSourceList::const_iterator FindSourceInList( 124 MixerAudioSourceList::const_iterator FindSourceInList(
134 MixerAudioSource const* audio_source, 125 AudioMixerImpl::Source const* audio_source,
135 MixerAudioSourceList const* audio_source_list) { 126 MixerAudioSourceList const* audio_source_list) {
136 return std::find_if(audio_source_list->begin(), audio_source_list->end(), 127 return std::find_if(audio_source_list->begin(), audio_source_list->end(),
137 [audio_source](const AudioSourceWithMixStatus& p) { 128 [audio_source](const AudioSourceWithMixStatus& p) {
138 return p.audio_source() == audio_source; 129 return p.audio_source() == audio_source;
139 }); 130 });
140 } 131 }
141 132
142 MixerAudioSourceList::iterator FindSourceInList( 133 MixerAudioSourceList::iterator FindSourceInList(
143 MixerAudioSource const* audio_source, 134 AudioMixerImpl::Source const* audio_source,
144 MixerAudioSourceList* audio_source_list) { 135 MixerAudioSourceList* audio_source_list) {
145 return std::find_if(audio_source_list->begin(), audio_source_list->end(), 136 return std::find_if(audio_source_list->begin(), audio_source_list->end(),
146 [audio_source](const AudioSourceWithMixStatus& p) { 137 [audio_source](const AudioSourceWithMixStatus& p) {
147 return p.audio_source() == audio_source; 138 return p.audio_source() == audio_source;
148 }); 139 });
149 } 140 }
150 141
151 } // namespace 142 } // namespace
152 143
153 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { 144 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) {
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
223 rtc::CritScope lock(&crit_); 214 rtc::CritScope lock(&crit_);
224 mix_list = GetNonAnonymousAudio(); 215 mix_list = GetNonAnonymousAudio();
225 anonymous_mix_list = GetAnonymousAudio(); 216 anonymous_mix_list = GetAnonymousAudio();
226 num_mixed_audio_sources = num_mixed_audio_sources_; 217 num_mixed_audio_sources = num_mixed_audio_sources_;
227 } 218 }
228 219
229 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), 220 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(),
230 anonymous_mix_list.end()); 221 anonymous_mix_list.end());
231 222
232 for (const auto& frame : mix_list) { 223 for (const auto& frame : mix_list) {
233 RemixFrame(frame, number_of_channels); 224 RemixFrame(number_of_channels, frame);
234 } 225 }
235 226
236 audio_frame_for_mixing->UpdateFrame( 227 audio_frame_for_mixing->UpdateFrame(
237 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech, 228 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech,
238 AudioFrame::kVadPassive, number_of_channels); 229 AudioFrame::kVadPassive, number_of_channels);
239 230
240 time_stamp_ += static_cast<uint32_t>(sample_size_); 231 time_stamp_ += static_cast<uint32_t>(sample_size_);
241 232
242 use_limiter_ = num_mixed_audio_sources > 1; 233 use_limiter_ = num_mixed_audio_sources > 1;
243 234
(...skipping 21 matching lines...) Expand all
265 sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000; 256 sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000;
266 257
267 return 0; 258 return 0;
268 } 259 }
269 260
270 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { 261 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
271 RTC_DCHECK_RUN_ON(&thread_checker_); 262 RTC_DCHECK_RUN_ON(&thread_checker_);
272 return output_frequency_; 263 return output_frequency_;
273 } 264 }
274 265
275 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 266 int32_t AudioMixerImpl::SetMixabilityStatus(Source* audio_source,
276 bool mixable) { 267 bool mixable) {
277 if (!mixable) { 268 if (!mixable) {
278 // Anonymous audio sources are in a separate list. Make sure that the 269 // Anonymous audio sources are in a separate list. Make sure that the
279 // audio source is in the _audioSourceList if it is being mixed. 270 // audio source is in the _audioSourceList if it is being mixed.
280 SetAnonymousMixabilityStatus(audio_source, false); 271 SetAnonymousMixabilityStatus(audio_source, false);
281 } 272 }
282 { 273 {
283 rtc::CritScope lock(&crit_); 274 rtc::CritScope lock(&crit_);
284 const bool is_mixed = FindSourceInList(audio_source, &audio_source_list_) != 275 const bool is_mixed = FindSourceInList(audio_source, &audio_source_list_) !=
285 audio_source_list_.end(); 276 audio_source_list_.end();
(...skipping 19 matching lines...) Expand all
305 size_t num_mixed_non_anonymous = audio_source_list_.size(); 296 size_t num_mixed_non_anonymous = audio_source_list_.size();
306 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) { 297 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) {
307 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources; 298 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources;
308 } 299 }
309 num_mixed_audio_sources_ = 300 num_mixed_audio_sources_ =
310 num_mixed_non_anonymous + additional_audio_source_list_.size(); 301 num_mixed_non_anonymous + additional_audio_source_list_.size();
311 } 302 }
312 return 0; 303 return 0;
313 } 304 }
314 305
315 bool AudioMixerImpl::MixabilityStatus( 306 bool AudioMixerImpl::MixabilityStatus(const Source& audio_source) const {
316 const MixerAudioSource& audio_source) const {
317 rtc::CritScope lock(&crit_); 307 rtc::CritScope lock(&crit_);
318 return FindSourceInList(&audio_source, &audio_source_list_) != 308 return FindSourceInList(&audio_source, &audio_source_list_) !=
319 audio_source_list_.end(); 309 audio_source_list_.end();
320 } 310 }
321 311
322 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( 312 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(Source* audio_source,
323 MixerAudioSource* audio_source, 313 bool anonymous) {
324 bool anonymous) {
325 rtc::CritScope lock(&crit_); 314 rtc::CritScope lock(&crit_);
326 if (FindSourceInList(audio_source, &additional_audio_source_list_) != 315 if (FindSourceInList(audio_source, &additional_audio_source_list_) !=
327 additional_audio_source_list_.end()) { 316 additional_audio_source_list_.end()) {
328 if (anonymous) { 317 if (anonymous) {
329 return 0; 318 return 0;
330 } 319 }
331 if (!RemoveAudioSourceFromList(audio_source, 320 if (!RemoveAudioSourceFromList(audio_source,
332 &additional_audio_source_list_)) { 321 &additional_audio_source_list_)) {
333 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 322 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
334 "unable to remove audio_source from anonymous list"); 323 "unable to remove audio_source from anonymous list");
(...skipping 14 matching lines...) Expand all
349 // Setting anonymous status is only possible if MixerAudioSource is 338 // Setting anonymous status is only possible if MixerAudioSource is
350 // already registered. 339 // already registered.
351 return -1; 340 return -1;
352 } 341 }
353 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) 342 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
354 ? 0 343 ? 0
355 : -1; 344 : -1;
356 } 345 }
357 346
358 bool AudioMixerImpl::AnonymousMixabilityStatus( 347 bool AudioMixerImpl::AnonymousMixabilityStatus(
359 const MixerAudioSource& audio_source) const { 348 const Source& audio_source) const {
360 rtc::CritScope lock(&crit_); 349 rtc::CritScope lock(&crit_);
361 return FindSourceInList(&audio_source, &additional_audio_source_list_) != 350 return FindSourceInList(&audio_source, &additional_audio_source_list_) !=
362 additional_audio_source_list_.end(); 351 additional_audio_source_list_.end();
363 } 352 }
364 353
365 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() { 354 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
366 RTC_DCHECK_RUN_ON(&thread_checker_); 355 RTC_DCHECK_RUN_ON(&thread_checker_);
367 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 356 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
368 "GetNonAnonymousAudio()"); 357 "GetNonAnonymousAudio()");
369 AudioFrameList result; 358 AudioFrameList result;
370 std::vector<SourceFrame> audio_source_mixing_data_list; 359 std::vector<SourceFrame> audio_source_mixing_data_list;
371 std::vector<SourceFrame> ramp_list; 360 std::vector<SourceFrame> ramp_list;
372 361
373 // Get audio source audio and put it in the struct vector. 362 // Get audio source audio and put it in the struct vector.
374 for (auto& source_and_status : audio_source_list_) { 363 for (auto& source_and_status : audio_source_list_) {
375 auto audio_frame_with_info = 364 auto audio_frame_with_info =
376 source_and_status.audio_source()->GetAudioFrameWithMuted( 365 source_and_status.audio_source()->GetAudioFrameWithInfo(
377 id_, static_cast<int>(OutputFrequency())); 366 id_, static_cast<int>(OutputFrequency()));
378 367
379 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; 368 const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 369 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
381 370
382 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 371 if (audio_frame_info == Source::AudioFrameInfo::kError) {
383 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 372 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
384 "failed to GetAudioFrameWithMuted() from source"); 373 "failed to GetAudioFrameWithMuted() from source");
385 continue; 374 continue;
386 } 375 }
387 audio_source_mixing_data_list.emplace_back( 376 audio_source_mixing_data_list.emplace_back(
388 &source_and_status, audio_source_audio_frame, 377 &source_and_status, audio_source_audio_frame,
389 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted); 378 audio_frame_info == Source::AudioFrameInfo::kMuted);
390 } 379 }
391 380
392 // Sort frames by sorting function. 381 // Sort frames by sorting function.
393 std::sort(audio_source_mixing_data_list.begin(), 382 std::sort(audio_source_mixing_data_list.begin(),
394 audio_source_mixing_data_list.end(), 383 audio_source_mixing_data_list.end(),
395 std::mem_fn(&SourceFrame::ShouldMixBefore)); 384 std::mem_fn(&SourceFrame::ShouldMixBefore));
396 385
397 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; 386 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
398 387
399 // Go through list in order and put unmuted frames in result list. 388 // Go through list in order and put unmuted frames in result list.
(...skipping 19 matching lines...) Expand all
419 } 408 }
420 409
421 AudioFrameList AudioMixerImpl::GetAnonymousAudio() { 410 AudioFrameList AudioMixerImpl::GetAnonymousAudio() {
422 RTC_DCHECK_RUN_ON(&thread_checker_); 411 RTC_DCHECK_RUN_ON(&thread_checker_);
423 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 412 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
424 "GetAnonymousAudio()"); 413 "GetAnonymousAudio()");
425 std::vector<SourceFrame> ramp_list; 414 std::vector<SourceFrame> ramp_list;
426 AudioFrameList result; 415 AudioFrameList result;
427 for (auto& source_and_status : additional_audio_source_list_) { 416 for (auto& source_and_status : additional_audio_source_list_) {
428 const auto audio_frame_with_info = 417 const auto audio_frame_with_info =
429 source_and_status.audio_source()->GetAudioFrameWithMuted( 418 source_and_status.audio_source()->GetAudioFrameWithInfo(
430 id_, OutputFrequency()); 419 id_, OutputFrequency());
431 const auto ret = audio_frame_with_info.audio_frame_info; 420 const auto ret = audio_frame_with_info.audio_frame_info;
432 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 421 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
433 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 422 if (ret == Source::AudioFrameInfo::kError) {
434 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 423 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
435 "failed to GetAudioFrameWithMuted() from audio_source"); 424 "failed to GetAudioFrameWithMuted() from audio_source");
436 continue; 425 continue;
437 } 426 }
438 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { 427 if (ret != Source::AudioFrameInfo::kMuted) {
439 result.push_back(audio_frame); 428 result.push_back(audio_frame);
440 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0); 429 ramp_list.emplace_back(&source_and_status, audio_frame, false, 0);
441 source_and_status.SetIsMixed(true); 430 source_and_status.SetIsMixed(true);
442 } 431 }
443 } 432 }
444 Ramp(ramp_list); 433 Ramp(ramp_list);
445 return result; 434 return result;
446 } 435 }
447 436
448 bool AudioMixerImpl::AddAudioSourceToList( 437 bool AudioMixerImpl::AddAudioSourceToList(
449 MixerAudioSource* audio_source, 438 Source* audio_source,
450 MixerAudioSourceList* audio_source_list) const { 439 MixerAudioSourceList* audio_source_list) const {
451 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 440 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
452 "AddAudioSourceToList(audio_source, audio_source_list)"); 441 "AddAudioSourceToList(audio_source, audio_source_list)");
453 audio_source_list->emplace_back(audio_source); 442 audio_source_list->emplace_back(audio_source);
454 return true; 443 return true;
455 } 444 }
456 445
457 bool AudioMixerImpl::RemoveAudioSourceFromList( 446 bool AudioMixerImpl::RemoveAudioSourceFromList(
458 MixerAudioSource* audio_source, 447 Source* audio_source,
459 MixerAudioSourceList* audio_source_list) const { 448 MixerAudioSourceList* audio_source_list) const {
460 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 449 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
461 "RemoveAudioSourceFromList(audio_source, audio_source_list)"); 450 "RemoveAudioSourceFromList(audio_source, audio_source_list)");
462 const auto iter = FindSourceInList(audio_source, audio_source_list); 451 const auto iter = FindSourceInList(audio_source, audio_source_list);
463 if (iter != audio_source_list->end()) { 452 if (iter != audio_source_list->end()) {
464 audio_source_list->erase(iter); 453 audio_source_list->erase(iter);
465 return true; 454 return true;
466 } else { 455 } else {
467 return false; 456 return false;
468 } 457 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
508 497
509 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 498 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
510 RTC_DCHECK_RUN_ON(&thread_checker_); 499 RTC_DCHECK_RUN_ON(&thread_checker_);
511 const int level = audio_level_.LevelFullRange(); 500 const int level = audio_level_.LevelFullRange();
512 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 501 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
513 "GetAudioOutputLevelFullRange() => level=%d", level); 502 "GetAudioOutputLevelFullRange() => level=%d", level);
514 return level; 503 return level;
515 } 504 }
516 505
517 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( 506 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
518 MixerAudioSource* audio_source) { 507 AudioMixerImpl::Source* audio_source) {
519 RTC_DCHECK_RUN_ON(&thread_checker_); 508 RTC_DCHECK_RUN_ON(&thread_checker_);
520 rtc::CritScope lock(&crit_); 509 rtc::CritScope lock(&crit_);
521 510
522 const auto non_anonymous_iter = 511 const auto non_anonymous_iter =
523 FindSourceInList(audio_source, &audio_source_list_); 512 FindSourceInList(audio_source, &audio_source_list_);
524 if (non_anonymous_iter != audio_source_list_.end()) { 513 if (non_anonymous_iter != audio_source_list_.end()) {
525 return non_anonymous_iter->IsMixed(); 514 return non_anonymous_iter->IsMixed();
526 } 515 }
527 516
528 const auto anonymous_iter = 517 const auto anonymous_iter =
529 FindSourceInList(audio_source, &additional_audio_source_list_); 518 FindSourceInList(audio_source, &additional_audio_source_list_);
530 if (anonymous_iter != audio_source_list_.end()) { 519 if (anonymous_iter != audio_source_list_.end()) {
531 return anonymous_iter->IsMixed(); 520 return anonymous_iter->IsMixed();
532 } 521 }
533 522
534 LOG(LS_ERROR) << "Audio source unknown"; 523 LOG(LS_ERROR) << "Audio source unknown";
535 return false; 524 return false;
536 } 525 }
537 } // namespace webrtc 526 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_mixer/audio_mixer_impl.h ('k') | webrtc/modules/audio_mixer/audio_source_with_mix_status.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698