| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 
| 3  * | 3  * | 
| 4  *  Use of this source code is governed by a BSD-style license | 4  *  Use of this source code is governed by a BSD-style license | 
| 5  *  that can be found in the LICENSE file in the root of the source | 5  *  that can be found in the LICENSE file in the root of the source | 
| 6  *  tree. An additional intellectual property rights grant can be found | 6  *  tree. An additional intellectual property rights grant can be found | 
| 7  *  in the file PATENTS.  All contributing project authors may | 7  *  in the file PATENTS.  All contributing project authors may | 
| 8  *  be found in the AUTHORS file in the root of the source tree. | 8  *  be found in the AUTHORS file in the root of the source tree. | 
| 9  */ | 9  */ | 
| 10 | 10 | 
| 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 
| 12 | 12 | 
| 13 #include <algorithm> | 13 #include <algorithm> | 
| 14 #include <functional> | 14 #include <functional> | 
| 15 | 15 | 
|  | 16 #include "webrtc/base/thread_annotations.h" | 
| 16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 
| 17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | 18 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | 
| 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 19 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 
| 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 20 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 
| 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 
| 21 #include "webrtc/system_wrappers/include/trace.h" | 22 #include "webrtc/system_wrappers/include/trace.h" | 
| 22 | 23 | 
| 23 namespace webrtc { | 24 namespace webrtc { | 
| 24 namespace { | 25 namespace { | 
| 25 | 26 | 
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 140   thread_checker_.DetachFromThread(); | 141   thread_checker_.DetachFromThread(); | 
| 141 } | 142 } | 
| 142 | 143 | 
| 143 AudioMixerImpl::~AudioMixerImpl() {} | 144 AudioMixerImpl::~AudioMixerImpl() {} | 
| 144 | 145 | 
| 145 bool AudioMixerImpl::Init() { | 146 bool AudioMixerImpl::Init() { | 
| 146   crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | 147   crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); | 
| 147   if (crit_.get() == NULL) | 148   if (crit_.get() == NULL) | 
| 148     return false; | 149     return false; | 
| 149 | 150 | 
| 150   cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); |  | 
| 151   if (cb_crit_.get() == NULL) |  | 
| 152     return false; |  | 
| 153 |  | 
| 154   Config config; | 151   Config config; | 
| 155   config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 152   config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 
| 156   limiter_.reset(AudioProcessing::Create(config)); | 153   limiter_.reset(AudioProcessing::Create(config)); | 
| 157   if (!limiter_.get()) | 154   if (!limiter_.get()) | 
| 158     return false; | 155     return false; | 
| 159 | 156 | 
| 160   if (SetOutputFrequency(kDefaultFrequency) == -1) | 157   if (SetOutputFrequency(kDefaultFrequency) == -1) | 
| 161     return false; | 158     return false; | 
| 162 | 159 | 
| 163   if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != | 160   if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != | 
| (...skipping 17 matching lines...) Expand all  Loading... | 
| 181     return false; | 178     return false; | 
| 182 | 179 | 
| 183   return true; | 180   return true; | 
| 184 } | 181 } | 
| 185 | 182 | 
| 186 void AudioMixerImpl::Mix(int sample_rate, | 183 void AudioMixerImpl::Mix(int sample_rate, | 
| 187                          size_t number_of_channels, | 184                          size_t number_of_channels, | 
| 188                          AudioFrame* audio_frame_for_mixing) { | 185                          AudioFrame* audio_frame_for_mixing) { | 
| 189   RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 186   RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 
| 190   RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 187   RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
|  | 188   std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 
|  | 189 | 
|  | 190   if (sample_rate != kNbInHz && sample_rate != kWbInHz && | 
|  | 191       sample_rate != kSwbInHz && sample_rate != kFbInHz) { | 
|  | 192     WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 
|  | 193                  "Invalid frequency: %d", sample_rate); | 
|  | 194     RTC_NOTREACHED(); | 
|  | 195     return; | 
|  | 196   } | 
|  | 197 | 
|  | 198   if (OutputFrequency() != sample_rate) { | 
|  | 199     SetOutputFrequency(static_cast<Frequency>(sample_rate)); | 
|  | 200   } | 
|  | 201 | 
| 191   AudioFrameList mixList; | 202   AudioFrameList mixList; | 
| 192   AudioFrameList additionalFramesList; | 203   AudioFrameList additionalFramesList; | 
| 193   std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |  | 
| 194   { | 204   { | 
| 195     CriticalSectionScoped cs(cb_crit_.get()); | 205     CriticalSectionScoped cs(crit_.get()); | 
| 196     Frequency mixing_frequency; |  | 
| 197 |  | 
| 198     switch (sample_rate) { |  | 
| 199       case 8000: |  | 
| 200         mixing_frequency = kNbInHz; |  | 
| 201         break; |  | 
| 202       case 16000: |  | 
| 203         mixing_frequency = kWbInHz; |  | 
| 204         break; |  | 
| 205       case 32000: |  | 
| 206         mixing_frequency = kSwbInHz; |  | 
| 207         break; |  | 
| 208       case 48000: |  | 
| 209         mixing_frequency = kFbInHz; |  | 
| 210         break; |  | 
| 211       default: |  | 
| 212         RTC_NOTREACHED(); |  | 
| 213         return; |  | 
| 214     } |  | 
| 215 |  | 
| 216     if (OutputFrequency() != mixing_frequency) { |  | 
| 217       SetOutputFrequency(mixing_frequency); |  | 
| 218     } |  | 
| 219 |  | 
| 220     mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); | 206     mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); | 
| 221     GetAdditionalAudio(&additionalFramesList); | 207     GetAdditionalAudio(&additionalFramesList); | 
| 222   } | 208   } | 
| 223 | 209 | 
| 224   for (FrameAndMuteInfo& frame_and_mute : mixList) { | 210   for (FrameAndMuteInfo& frame_and_mute : mixList) { | 
| 225     RemixFrame(frame_and_mute.frame, number_of_channels); | 211     RemixFrame(frame_and_mute.frame, number_of_channels); | 
| 226   } | 212   } | 
| 227   for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | 213   for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | 
| 228     RemixFrame(frame_and_mute.frame, number_of_channels); | 214     RemixFrame(frame_and_mute.frame, number_of_channels); | 
| 229   } | 215   } | 
| 230 | 216 | 
| 231   audio_frame_for_mixing->UpdateFrame( | 217   audio_frame_for_mixing->UpdateFrame( | 
| 232       -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | 218       -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | 
| 233       AudioFrame::kVadPassive, number_of_channels); | 219       AudioFrame::kVadPassive, number_of_channels); | 
| 234 | 220 | 
| 235   time_stamp_ += static_cast<uint32_t>(sample_size_); | 221   time_stamp_ += static_cast<uint32_t>(sample_size_); | 
| 236 | 222 | 
| 237   use_limiter_ = num_mixed_audio_sources_ > 1; | 223   use_limiter_ = num_mixed_audio_sources_ > 1; | 
| 238 | 224 | 
| 239   // We only use the limiter if it supports the output sample rate and | 225   // We only use the limiter if it supports the output sample rate and | 
| 240   // we're actually mixing multiple streams. | 226   // we're actually mixing multiple streams. | 
| 241   MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | 227   MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | 
| 242 | 228   MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 
| 243   { | 229   if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 
| 244     CriticalSectionScoped cs(crit_.get()); | 230     // Nothing was mixed, set the audio samples to silence. | 
| 245     MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 231     audio_frame_for_mixing->samples_per_channel_ = sample_size_; | 
| 246 | 232     audio_frame_for_mixing->Mute(); | 
| 247     if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 233   } else { | 
| 248       // Nothing was mixed, set the audio samples to silence. | 234     // Only call the limiter if we have something to mix. | 
| 249       audio_frame_for_mixing->samples_per_channel_ = sample_size_; | 235     LimitMixedAudio(audio_frame_for_mixing); | 
| 250       audio_frame_for_mixing->Mute(); |  | 
| 251     } else { |  | 
| 252       // Only call the limiter if we have something to mix. |  | 
| 253       LimitMixedAudio(audio_frame_for_mixing); |  | 
| 254     } |  | 
| 255   } | 236   } | 
| 256 | 237 | 
| 257   // Pass the final result to the level indicator. | 238   // Pass the final result to the level indicator. | 
| 258   audio_level_.ComputeLevel(*audio_frame_for_mixing); | 239   audio_level_.ComputeLevel(*audio_frame_for_mixing); | 
| 259 | 240 | 
| 260   return; | 241   return; | 
| 261 } | 242 } | 
| 262 | 243 | 
| 263 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { | 244 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { | 
| 264   CriticalSectionScoped cs(crit_.get()); | 245   RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| 265 |  | 
| 266   output_frequency_ = frequency; | 246   output_frequency_ = frequency; | 
| 267   sample_size_ = | 247   sample_size_ = | 
| 268       static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); | 248       static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); | 
| 269 | 249 | 
| 270   return 0; | 250   return 0; | 
| 271 } | 251 } | 
| 272 | 252 | 
| 273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { | 253 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { | 
| 274   CriticalSectionScoped cs(crit_.get()); | 254   RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| 275   return output_frequency_; | 255   return output_frequency_; | 
| 276 } | 256 } | 
| 277 | 257 | 
| 278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, | 258 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, | 
| 279                                             bool mixable) { | 259                                             bool mixable) { | 
| 280   if (!mixable) { | 260   if (!mixable) { | 
| 281     // Anonymous audio sources are in a separate list. Make sure that the | 261     // Anonymous audio sources are in a separate list. Make sure that the | 
| 282     // audio source is in the _audioSourceList if it is being mixed. | 262     // audio source is in the _audioSourceList if it is being mixed. | 
| 283     SetAnonymousMixabilityStatus(audio_source, false); | 263     SetAnonymousMixabilityStatus(audio_source, false); | 
| 284   } | 264   } | 
| 285   size_t numMixedAudioSources; |  | 
| 286   { | 265   { | 
| 287     CriticalSectionScoped cs(cb_crit_.get()); | 266     CriticalSectionScoped cs(crit_.get()); | 
| 288     const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); | 267     const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); | 
| 289     // API must be called with a new state. | 268     // API must be called with a new state. | 
| 290     if (!(mixable ^ isMixed)) { | 269     if (!(mixable ^ isMixed)) { | 
| 291       WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 270       WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, | 
| 292                    "Mixable is aready %s", isMixed ? "ON" : "off"); | 271                    "Mixable is aready %s", isMixed ? "ON" : "off"); | 
| 293       return -1; | 272       return -1; | 
| 294     } | 273     } | 
| 295     bool success = false; | 274     bool success = false; | 
| 296     if (mixable) { | 275     if (mixable) { | 
| 297       success = AddAudioSourceToList(audio_source, &audio_source_list_); | 276       success = AddAudioSourceToList(audio_source, &audio_source_list_); | 
| 298     } else { | 277     } else { | 
| 299       success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); | 278       success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); | 
| 300     } | 279     } | 
| 301     if (!success) { | 280     if (!success) { | 
| 302       WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 281       WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 
| 303                    "failed to %s audio_source", mixable ? "add" : "remove"); | 282                    "failed to %s audio_source", mixable ? "add" : "remove"); | 
| 304       RTC_NOTREACHED(); | 283       RTC_NOTREACHED(); | 
| 305       return -1; | 284       return -1; | 
| 306     } | 285     } | 
| 307 | 286 | 
| 308     size_t numMixedNonAnonymous = audio_source_list_.size(); | 287     size_t numMixedNonAnonymous = audio_source_list_.size(); | 
| 309     if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { | 288     if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { | 
| 310       numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; | 289       numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; | 
| 311     } | 290     } | 
| 312     numMixedAudioSources = | 291     num_mixed_audio_sources_ = | 
| 313         numMixedNonAnonymous + additional_audio_source_list_.size(); | 292         numMixedNonAnonymous + additional_audio_source_list_.size(); | 
| 314   } | 293   } | 
| 315   // A MixerAudioSource was added or removed. Make sure the scratch |  | 
| 316   // buffer is updated if necessary. |  | 
| 317   // Note: The scratch buffer may only be updated in Process(). |  | 
| 318   CriticalSectionScoped cs(crit_.get()); |  | 
| 319   num_mixed_audio_sources_ = numMixedAudioSources; |  | 
| 320   return 0; | 294   return 0; | 
| 321 } | 295 } | 
| 322 | 296 | 
| 323 bool AudioMixerImpl::MixabilityStatus( | 297 bool AudioMixerImpl::MixabilityStatus( | 
| 324     const MixerAudioSource& audio_source) const { | 298     const MixerAudioSource& audio_source) const { | 
| 325   CriticalSectionScoped cs(cb_crit_.get()); | 299   CriticalSectionScoped cs(crit_.get()); | 
| 326   return IsAudioSourceInList(audio_source, audio_source_list_); | 300   return IsAudioSourceInList(audio_source, audio_source_list_); | 
| 327 } | 301 } | 
| 328 | 302 | 
| 329 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( | 303 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( | 
| 330     MixerAudioSource* audio_source, | 304     MixerAudioSource* audio_source, | 
| 331     bool anonymous) { | 305     bool anonymous) { | 
| 332   CriticalSectionScoped cs(cb_crit_.get()); | 306   CriticalSectionScoped cs(crit_.get()); | 
| 333   if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { | 307   if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { | 
| 334     if (anonymous) { | 308     if (anonymous) { | 
| 335       return 0; | 309       return 0; | 
| 336     } | 310     } | 
| 337     if (!RemoveAudioSourceFromList(audio_source, | 311     if (!RemoveAudioSourceFromList(audio_source, | 
| 338                                    &additional_audio_source_list_)) { | 312                                    &additional_audio_source_list_)) { | 
| 339       WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 313       WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 
| 340                    "unable to remove audio_source from anonymous list"); | 314                    "unable to remove audio_source from anonymous list"); | 
| 341       RTC_NOTREACHED(); | 315       RTC_NOTREACHED(); | 
| 342       return -1; | 316       return -1; | 
| (...skipping 13 matching lines...) Expand all  Loading... | 
| 356     // already registered. | 330     // already registered. | 
| 357     return -1; | 331     return -1; | 
| 358   } | 332   } | 
| 359   return AddAudioSourceToList(audio_source, &additional_audio_source_list_) | 333   return AddAudioSourceToList(audio_source, &additional_audio_source_list_) | 
| 360              ? 0 | 334              ? 0 | 
| 361              : -1; | 335              : -1; | 
| 362 } | 336 } | 
| 363 | 337 | 
| 364 bool AudioMixerImpl::AnonymousMixabilityStatus( | 338 bool AudioMixerImpl::AnonymousMixabilityStatus( | 
| 365     const MixerAudioSource& audio_source) const { | 339     const MixerAudioSource& audio_source) const { | 
| 366   CriticalSectionScoped cs(cb_crit_.get()); | 340   CriticalSectionScoped cs(crit_.get()); | 
| 367   return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 341   return IsAudioSourceInList(audio_source, additional_audio_source_list_); | 
| 368 } | 342 } | 
| 369 | 343 | 
| 370 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { | 344 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { | 
| 371   AudioFrameList result; | 345   AudioFrameList result; | 
| 372   std::vector<SourceFrame> audioSourceMixingDataList; | 346   std::vector<SourceFrame> audioSourceMixingDataList; | 
| 373 | 347 | 
| 374   // Get audio source audio and put it in the struct vector. | 348   // Get audio source audio and put it in the struct vector. | 
| 375   for (MixerAudioSource* audio_source : audio_source_list_) { | 349   for (MixerAudioSource* audio_source : audio_source_list_) { | 
| 376     auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 350     auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | 
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 584   return level; | 558   return level; | 
| 585 } | 559 } | 
| 586 | 560 | 
| 587 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 561 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 
| 588   const int level = audio_level_.LevelFullRange(); | 562   const int level = audio_level_.LevelFullRange(); | 
| 589   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 563   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 
| 590                "GetAudioOutputLevelFullRange() => level=%d", level); | 564                "GetAudioOutputLevelFullRange() => level=%d", level); | 
| 591   return level; | 565   return level; | 
| 592 } | 566 } | 
| 593 }  // namespace webrtc | 567 }  // namespace webrtc | 
| OLD | NEW | 
|---|