| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 #include "webrtc/modules/audio_mixer/new_audio_conference_mixer_impl.h" | |
| 12 | |
| 13 #include <algorithm> | |
| 14 #include <functional> | |
| 15 | |
| 16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | |
| 17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | |
| 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
| 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | |
| 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
| 21 #include "webrtc/system_wrappers/include/trace.h" | |
| 22 | |
| 23 namespace webrtc { | |
| 24 namespace { | |
| 25 | |
| 26 class SourceFrame { | |
| 27 public: | |
| 28 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | |
| 29 : audio_source_(p), | |
| 30 audio_frame_(a), | |
| 31 muted_(m), | |
| 32 was_mixed_before_(was_mixed_before) { | |
| 33 if (!muted_) { | |
| 34 energy_ = NewMixerCalculateEnergy(*a); | |
| 35 } | |
| 36 } | |
| 37 | |
| 38 // a.shouldMixBefore(b) is used to select mixer participants. | |
| 39 bool shouldMixBefore(const SourceFrame& other) const { | |
| 40 if (muted_ != other.muted_) { | |
| 41 return other.muted_; | |
| 42 } | |
| 43 | |
| 44 auto our_activity = audio_frame_->vad_activity_; | |
| 45 auto other_activity = other.audio_frame_->vad_activity_; | |
| 46 | |
| 47 if (our_activity != other_activity) { | |
| 48 return our_activity == AudioFrame::kVadActive; | |
| 49 } | |
| 50 | |
| 51 return energy_ > other.energy_; | |
| 52 } | |
| 53 | |
| 54 MixerAudioSource* audio_source_; | |
| 55 AudioFrame* audio_frame_; | |
| 56 bool muted_; | |
| 57 uint32_t energy_; | |
| 58 bool was_mixed_before_; | |
| 59 }; | |
| 60 | |
| 61 // Remixes a frame between stereo and mono. | |
| 62 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | |
| 63 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | |
| 64 if (frame->num_channels_ == 1 && number_of_channels == 2) { | |
| 65 AudioFrameOperations::MonoToStereo(frame); | |
| 66 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | |
| 67 AudioFrameOperations::StereoToMono(frame); | |
| 68 } | |
| 69 } | |
| 70 | |
| 71 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | |
| 72 // These effects are applied to |frame| itself prior to mixing. Assumes that | |
| 73 // |mixed_frame| always has at least as many channels as |frame|. Supports | |
| 74 // stereo at most. | |
| 75 // | |
| 76 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | |
| 77 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | |
| 78 if (use_limiter) { | |
| 79 // Divide by two to avoid saturation in the mixing. | |
| 80 // This is only meaningful if the limiter will be used. | |
| 81 *frame >>= 1; | |
| 82 } | |
| 83 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); | |
| 84 *mixed_frame += *frame; | |
| 85 } | |
| 86 | |
| 87 } // namespace | |
| 88 | |
| 89 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} | |
| 90 | |
| 91 MixerAudioSource::~MixerAudioSource() { | |
| 92 delete _mixHistory; | |
| 93 } | |
| 94 | |
| 95 bool MixerAudioSource::IsMixed() const { | |
| 96 return _mixHistory->IsMixed(); | |
| 97 } | |
| 98 | |
| 99 NewMixHistory::NewMixHistory() : is_mixed_(0) {} | |
| 100 | |
| 101 NewMixHistory::~NewMixHistory() {} | |
| 102 | |
| 103 bool NewMixHistory::IsMixed() const { | |
| 104 return is_mixed_; | |
| 105 } | |
| 106 | |
| 107 bool NewMixHistory::WasMixed() const { | |
| 108 // Was mixed is the same as is mixed depending on perspective. This function | |
| 109 // is for the perspective of NewAudioConferenceMixerImpl. | |
| 110 return IsMixed(); | |
| 111 } | |
| 112 | |
| 113 int32_t NewMixHistory::SetIsMixed(const bool mixed) { | |
| 114 is_mixed_ = mixed; | |
| 115 return 0; | |
| 116 } | |
| 117 | |
| 118 void NewMixHistory::ResetMixedStatus() { | |
| 119 is_mixed_ = false; | |
| 120 } | |
| 121 | |
| 122 NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) { | |
| 123 NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id); | |
| 124 if (!mixer->Init()) { | |
| 125 delete mixer; | |
| 126 return NULL; | |
| 127 } | |
| 128 return mixer; | |
| 129 } | |
| 130 | |
| 131 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) | |
| 132 : id_(id), | |
| 133 output_frequency_(kDefaultFrequency), | |
| 134 sample_size_(0), | |
| 135 audio_source_list_(), | |
| 136 additional_audio_source_list_(), | |
| 137 num_mixed_audio_sources_(0), | |
| 138 use_limiter_(true), | |
| 139 time_stamp_(0) { | |
| 140 thread_checker_.DetachFromThread(); | |
| 141 } | |
| 142 | |
| 143 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {} | |
| 144 | |
| 145 bool NewAudioConferenceMixerImpl::Init() { | |
| 146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
| 147 if (crit_.get() == NULL) | |
| 148 return false; | |
| 149 | |
| 150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
| 151 if (cb_crit_.get() == NULL) | |
| 152 return false; | |
| 153 | |
| 154 Config config; | |
| 155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | |
| 156 limiter_.reset(AudioProcessing::Create(config)); | |
| 157 if (!limiter_.get()) | |
| 158 return false; | |
| 159 | |
| 160 if (SetOutputFrequency(kDefaultFrequency) == -1) | |
| 161 return false; | |
| 162 | |
| 163 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != | |
| 164 limiter_->kNoError) | |
| 165 return false; | |
| 166 | |
| 167 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the | |
| 168 // divide-by-2 but -7 is used instead to give a bit of headroom since the | |
| 169 // AGC is not a hard limiter. | |
| 170 if (limiter_->gain_control()->set_target_level_dbfs(7) != limiter_->kNoError) | |
| 171 return false; | |
| 172 | |
| 173 if (limiter_->gain_control()->set_compression_gain_db(0) != | |
| 174 limiter_->kNoError) | |
| 175 return false; | |
| 176 | |
| 177 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) | |
| 178 return false; | |
| 179 | |
| 180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) | |
| 181 return false; | |
| 182 | |
| 183 return true; | |
| 184 } | |
| 185 | |
| 186 void NewAudioConferenceMixerImpl::Mix(int sample_rate, | |
| 187 size_t number_of_channels, | |
| 188 AudioFrame* audio_frame_for_mixing) { | |
| 189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | |
| 190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 191 AudioFrameList mixList; | |
| 192 AudioFrameList additionalFramesList; | |
| 193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | |
| 194 { | |
| 195 CriticalSectionScoped cs(cb_crit_.get()); | |
| 196 Frequency mixing_frequency; | |
| 197 | |
| 198 switch (sample_rate) { | |
| 199 case 8000: | |
| 200 mixing_frequency = kNbInHz; | |
| 201 break; | |
| 202 case 16000: | |
| 203 mixing_frequency = kWbInHz; | |
| 204 break; | |
| 205 case 32000: | |
| 206 mixing_frequency = kSwbInHz; | |
| 207 break; | |
| 208 case 48000: | |
| 209 mixing_frequency = kFbInHz; | |
| 210 break; | |
| 211 default: | |
| 212 RTC_NOTREACHED(); | |
| 213 return; | |
| 214 } | |
| 215 | |
| 216 if (OutputFrequency() != mixing_frequency) { | |
| 217 SetOutputFrequency(mixing_frequency); | |
| 218 } | |
| 219 | |
| 220 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); | |
| 221 GetAdditionalAudio(&additionalFramesList); | |
| 222 } | |
| 223 | |
| 224 for (FrameAndMuteInfo& frame_and_mute : mixList) { | |
| 225 RemixFrame(frame_and_mute.frame, number_of_channels); | |
| 226 } | |
| 227 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | |
| 228 RemixFrame(frame_and_mute.frame, number_of_channels); | |
| 229 } | |
| 230 | |
| 231 audio_frame_for_mixing->UpdateFrame( | |
| 232 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | |
| 233 AudioFrame::kVadPassive, number_of_channels); | |
| 234 | |
| 235 time_stamp_ += static_cast<uint32_t>(sample_size_); | |
| 236 | |
| 237 use_limiter_ = num_mixed_audio_sources_ > 1; | |
| 238 | |
| 239 // We only use the limiter if it supports the output sample rate and | |
| 240 // we're actually mixing multiple streams. | |
| 241 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | |
| 242 | |
| 243 { | |
| 244 CriticalSectionScoped cs(crit_.get()); | |
| 245 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | |
| 246 | |
| 247 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | |
| 248 // Nothing was mixed, set the audio samples to silence. | |
| 249 audio_frame_for_mixing->samples_per_channel_ = sample_size_; | |
| 250 audio_frame_for_mixing->Mute(); | |
| 251 } else { | |
| 252 // Only call the limiter if we have something to mix. | |
| 253 LimitMixedAudio(audio_frame_for_mixing); | |
| 254 } | |
| 255 } | |
| 256 | |
| 257 // Pass the final result to the level indicator. | |
| 258 audio_level_.ComputeLevel(*audio_frame_for_mixing); | |
| 259 | |
| 260 return; | |
| 261 } | |
| 262 | |
| 263 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | |
| 264 const Frequency& frequency) { | |
| 265 CriticalSectionScoped cs(crit_.get()); | |
| 266 | |
| 267 output_frequency_ = frequency; | |
| 268 sample_size_ = | |
| 269 static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000); | |
| 270 | |
| 271 return 0; | |
| 272 } | |
| 273 | |
| 274 NewAudioConferenceMixer::Frequency | |
| 275 NewAudioConferenceMixerImpl::OutputFrequency() const { | |
| 276 CriticalSectionScoped cs(crit_.get()); | |
| 277 return output_frequency_; | |
| 278 } | |
| 279 | |
| 280 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( | |
| 281 MixerAudioSource* audio_source, | |
| 282 bool mixable) { | |
| 283 if (!mixable) { | |
| 284 // Anonymous audio sources are in a separate list. Make sure that the | |
| 285 // audio source is in the _audioSourceList if it is being mixed. | |
| 286 SetAnonymousMixabilityStatus(audio_source, false); | |
| 287 } | |
| 288 size_t numMixedAudioSources; | |
| 289 { | |
| 290 CriticalSectionScoped cs(cb_crit_.get()); | |
| 291 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); | |
| 292 // API must be called with a new state. | |
| 293 if (!(mixable ^ isMixed)) { | |
| 294 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | |
| 295 "Mixable is aready %s", isMixed ? "ON" : "off"); | |
| 296 return -1; | |
| 297 } | |
| 298 bool success = false; | |
| 299 if (mixable) { | |
| 300 success = AddAudioSourceToList(audio_source, &audio_source_list_); | |
| 301 } else { | |
| 302 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); | |
| 303 } | |
| 304 if (!success) { | |
| 305 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | |
| 306 "failed to %s audio_source", mixable ? "add" : "remove"); | |
| 307 RTC_NOTREACHED(); | |
| 308 return -1; | |
| 309 } | |
| 310 | |
| 311 size_t numMixedNonAnonymous = audio_source_list_.size(); | |
| 312 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { | |
| 313 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; | |
| 314 } | |
| 315 numMixedAudioSources = | |
| 316 numMixedNonAnonymous + additional_audio_source_list_.size(); | |
| 317 } | |
| 318 // A MixerAudioSource was added or removed. Make sure the scratch | |
| 319 // buffer is updated if necessary. | |
| 320 // Note: The scratch buffer may only be updated in Process(). | |
| 321 CriticalSectionScoped cs(crit_.get()); | |
| 322 num_mixed_audio_sources_ = numMixedAudioSources; | |
| 323 return 0; | |
| 324 } | |
| 325 | |
| 326 bool NewAudioConferenceMixerImpl::MixabilityStatus( | |
| 327 const MixerAudioSource& audio_source) const { | |
| 328 CriticalSectionScoped cs(cb_crit_.get()); | |
| 329 return IsAudioSourceInList(audio_source, audio_source_list_); | |
| 330 } | |
| 331 | |
| 332 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | |
| 333 MixerAudioSource* audio_source, | |
| 334 bool anonymous) { | |
| 335 CriticalSectionScoped cs(cb_crit_.get()); | |
| 336 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { | |
| 337 if (anonymous) { | |
| 338 return 0; | |
| 339 } | |
| 340 if (!RemoveAudioSourceFromList(audio_source, | |
| 341 &additional_audio_source_list_)) { | |
| 342 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | |
| 343 "unable to remove audio_source from anonymous list"); | |
| 344 RTC_NOTREACHED(); | |
| 345 return -1; | |
| 346 } | |
| 347 return AddAudioSourceToList(audio_source, &audio_source_list_) ? 0 : -1; | |
| 348 } | |
| 349 if (!anonymous) { | |
| 350 return 0; | |
| 351 } | |
| 352 const bool mixable = | |
| 353 RemoveAudioSourceFromList(audio_source, &audio_source_list_); | |
| 354 if (!mixable) { | |
| 355 WEBRTC_TRACE( | |
| 356 kTraceWarning, kTraceAudioMixerServer, id_, | |
| 357 "audio_source must be registered before turning it into anonymous"); | |
| 358 // Setting anonymous status is only possible if MixerAudioSource is | |
| 359 // already registered. | |
| 360 return -1; | |
| 361 } | |
| 362 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) | |
| 363 ? 0 | |
| 364 : -1; | |
| 365 } | |
| 366 | |
| 367 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( | |
| 368 const MixerAudioSource& audio_source) const { | |
| 369 CriticalSectionScoped cs(cb_crit_.get()); | |
| 370 return IsAudioSourceInList(audio_source, additional_audio_source_list_); | |
| 371 } | |
| 372 | |
| 373 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( | |
| 374 size_t maxAudioFrameCounter) const { | |
| 375 AudioFrameList result; | |
| 376 std::vector<SourceFrame> audioSourceMixingDataList; | |
| 377 | |
| 378 // Get audio source audio and put it in the struct vector. | |
| 379 for (MixerAudioSource* audio_source : audio_source_list_) { | |
| 380 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | |
| 381 id_, static_cast<int>(output_frequency_)); | |
| 382 | |
| 383 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | |
| 384 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | |
| 385 | |
| 386 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | |
| 387 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | |
| 388 "failed to GetAudioFrameWithMuted() from participant"); | |
| 389 continue; | |
| 390 } | |
| 391 audioSourceMixingDataList.emplace_back( | |
| 392 audio_source, audio_source_audio_frame, | |
| 393 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | |
| 394 audio_source->_mixHistory->WasMixed()); | |
| 395 } | |
| 396 | |
| 397 // Sort frames by sorting function. | |
| 398 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | |
| 399 std::mem_fn(&SourceFrame::shouldMixBefore)); | |
| 400 | |
| 401 // Go through list in order and put things in mixList. | |
| 402 for (SourceFrame& p : audioSourceMixingDataList) { | |
| 403 // Filter muted. | |
| 404 if (p.muted_) { | |
| 405 p.audio_source_->_mixHistory->SetIsMixed(false); | |
| 406 continue; | |
| 407 } | |
| 408 | |
| 409 // Add frame to result vector for mixing. | |
| 410 bool is_mixed = false; | |
| 411 if (maxAudioFrameCounter > 0) { | |
| 412 --maxAudioFrameCounter; | |
| 413 if (!p.was_mixed_before_) { | |
| 414 NewMixerRampIn(p.audio_frame_); | |
| 415 } | |
| 416 result.emplace_back(p.audio_frame_, false); | |
| 417 is_mixed = true; | |
| 418 } | |
| 419 | |
| 420 // Ramp out unmuted. | |
| 421 if (p.was_mixed_before_ && !is_mixed) { | |
| 422 NewMixerRampOut(p.audio_frame_); | |
| 423 result.emplace_back(p.audio_frame_, false); | |
| 424 } | |
| 425 | |
| 426 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | |
| 427 } | |
| 428 return result; | |
| 429 } | |
| 430 | |
| 431 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | |
| 432 AudioFrameList* additionalFramesList) const { | |
| 433 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
| 434 "GetAdditionalAudio(additionalFramesList)"); | |
| 435 // The GetAudioFrameWithMuted() callback may result in the audio source being | |
| 436 // removed from additionalAudioFramesList_. If that happens it will | |
| 437 // invalidate any iterators. Create a copy of the audio sources list such | |
| 438 // that the list of participants can be traversed safely. | |
| 439 MixerAudioSourceList additionalAudioSourceList; | |
| 440 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | |
| 441 additional_audio_source_list_.begin(), | |
| 442 additional_audio_source_list_.end()); | |
| 443 | |
| 444 for (MixerAudioSourceList::const_iterator audio_source = | |
| 445 additionalAudioSourceList.begin(); | |
| 446 audio_source != additionalAudioSourceList.end(); ++audio_source) { | |
| 447 auto audio_frame_with_info = | |
| 448 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); | |
| 449 auto ret = audio_frame_with_info.audio_frame_info; | |
| 450 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | |
| 451 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | |
| 452 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | |
| 453 "failed to GetAudioFrameWithMuted() from audio_source"); | |
| 454 continue; | |
| 455 } | |
| 456 if (audio_frame->samples_per_channel_ == 0) { | |
| 457 // Empty frame. Don't use it. | |
| 458 continue; | |
| 459 } | |
| 460 additionalFramesList->push_back(FrameAndMuteInfo( | |
| 461 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | |
| 462 } | |
| 463 } | |
| 464 | |
| 465 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | |
| 466 const MixerAudioSource& audio_source, | |
| 467 const MixerAudioSourceList& audioSourceList) const { | |
| 468 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
| 469 "IsAudioSourceInList(audio_source,audioSourceList)"); | |
| 470 return std::find(audioSourceList.begin(), audioSourceList.end(), | |
| 471 &audio_source) != audioSourceList.end(); | |
| 472 } | |
| 473 | |
| 474 bool NewAudioConferenceMixerImpl::AddAudioSourceToList( | |
| 475 MixerAudioSource* audio_source, | |
| 476 MixerAudioSourceList* audioSourceList) const { | |
| 477 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
| 478 "AddAudioSourceToList(audio_source, audioSourceList)"); | |
| 479 audioSourceList->push_back(audio_source); | |
| 480 // Make sure that the mixed status is correct for new MixerAudioSource. | |
| 481 audio_source->_mixHistory->ResetMixedStatus(); | |
| 482 return true; | |
| 483 } | |
| 484 | |
| 485 bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList( | |
| 486 MixerAudioSource* audio_source, | |
| 487 MixerAudioSourceList* audioSourceList) const { | |
| 488 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
| 489 "RemoveAudioSourceFromList(audio_source, audioSourceList)"); | |
| 490 auto iter = | |
| 491 std::find(audioSourceList->begin(), audioSourceList->end(), audio_source); | |
| 492 if (iter != audioSourceList->end()) { | |
| 493 audioSourceList->erase(iter); | |
| 494 // AudioSource is no longer mixed, reset to default. | |
| 495 audio_source->_mixHistory->ResetMixedStatus(); | |
| 496 return true; | |
| 497 } else { | |
| 498 return false; | |
| 499 } | |
| 500 } | |
| 501 | |
| 502 int32_t NewAudioConferenceMixerImpl::MixFromList( | |
| 503 AudioFrame* mixedAudio, | |
| 504 const AudioFrameList& audioFrameList, | |
| 505 int32_t id, | |
| 506 bool use_limiter) { | |
| 507 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
| 508 "MixFromList(mixedAudio, audioFrameList)"); | |
| 509 if (audioFrameList.empty()) | |
| 510 return 0; | |
| 511 | |
| 512 uint32_t position = 0; | |
| 513 | |
| 514 if (audioFrameList.size() == 1) { | |
| 515 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; | |
| 516 mixedAudio->elapsed_time_ms_ = | |
| 517 audioFrameList.front().frame->elapsed_time_ms_; | |
| 518 } else { | |
| 519 // TODO(wu): Issue 3390. | |
| 520 // Audio frame timestamp is only supported in one channel case. | |
| 521 mixedAudio->timestamp_ = 0; | |
| 522 mixedAudio->elapsed_time_ms_ = -1; | |
| 523 } | |
| 524 | |
| 525 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
| 526 iter != audioFrameList.end(); ++iter) { | |
| 527 if (!iter->muted) { | |
| 528 MixFrames(mixedAudio, iter->frame, use_limiter); | |
| 529 } | |
| 530 | |
| 531 position++; | |
| 532 } | |
| 533 | |
| 534 return 0; | |
| 535 } | |
| 536 | |
| 537 // TODO(andrew): consolidate this function with MixFromList. | |
| 538 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( | |
| 539 AudioFrame* mixedAudio, | |
| 540 const AudioFrameList& audioFrameList) const { | |
| 541 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, | |
| 542 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); | |
| 543 | |
| 544 if (audioFrameList.empty()) | |
| 545 return 0; | |
| 546 | |
| 547 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); | |
| 548 iter != audioFrameList.end(); ++iter) { | |
| 549 if (!iter->muted) { | |
| 550 MixFrames(mixedAudio, iter->frame, use_limiter_); | |
| 551 } | |
| 552 } | |
| 553 return 0; | |
| 554 } | |
| 555 | |
| 556 bool NewAudioConferenceMixerImpl::LimitMixedAudio( | |
| 557 AudioFrame* mixedAudio) const { | |
| 558 if (!use_limiter_) { | |
| 559 return true; | |
| 560 } | |
| 561 | |
| 562 // Smoothly limit the mixed frame. | |
| 563 const int error = limiter_->ProcessStream(mixedAudio); | |
| 564 | |
| 565 // And now we can safely restore the level. This procedure results in | |
| 566 // some loss of resolution, deemed acceptable. | |
| 567 // | |
| 568 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | |
| 569 // and compression gain of 6 dB). However, in the transition frame when this | |
| 570 // is enabled (moving from one to two audio sources) it has the potential to | |
| 571 // create discontinuities in the mixed frame. | |
| 572 // | |
| 573 // Instead we double the frame (with addition since left-shifting a | |
| 574 // negative value is undefined). | |
| 575 *mixedAudio += *mixedAudio; | |
| 576 | |
| 577 if (error != limiter_->kNoError) { | |
| 578 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | |
| 579 "Error from AudioProcessing: %d", error); | |
| 580 RTC_NOTREACHED(); | |
| 581 return false; | |
| 582 } | |
| 583 return true; | |
| 584 } | |
| 585 | |
| 586 int NewAudioConferenceMixerImpl::GetOutputAudioLevel() { | |
| 587 const int level = audio_level_.Level(); | |
| 588 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | |
| 589 "GetAudioOutputLevel() => level=%d", level); | |
| 590 return level; | |
| 591 } | |
| 592 | |
| 593 int NewAudioConferenceMixerImpl::GetOutputAudioLevelFullRange() { | |
| 594 const int level = audio_level_.LevelFullRange(); | |
| 595 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | |
| 596 "GetAudioOutputLevelFullRange() => level=%d", level); | |
| 597 return level; | |
| 598 } | |
| 599 } // namespace webrtc | |
| OLD | NEW |