Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 33 // These effects are applied to |frame| itself prior to mixing. Assumes that | 33 // These effects are applied to |frame| itself prior to mixing. Assumes that |
| 34 // |mixed_frame| always has at least as many channels as |frame|. Supports | 34 // |mixed_frame| always has at least as many channels as |frame|. Supports |
| 35 // stereo at most. | 35 // stereo at most. |
| 36 // | 36 // |
| 37 // TODO(andrew): consider not modifying |frame| here. | 37 // TODO(andrew): consider not modifying |frame| here. |
| 38 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 38 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
| 39 assert(mixed_frame->num_channels_ >= frame->num_channels_); | 39 assert(mixed_frame->num_channels_ >= frame->num_channels_); |
| 40 if (use_limiter) { | 40 if (use_limiter) { |
| 41 // Divide by two to avoid saturation in the mixing. | 41 // Divide by two to avoid saturation in the mixing. |
| 42 // This is only meaningful if the limiter will be used. | 42 // This is only meaningful if the limiter will be used. |
| 43 *frame >>= 1; | 43 AudioFrameOperations::ShiftDown(frame); |
|
the sun
2016/10/19 09:23:16
Note, if this function was called DivideBy2(), App
aleloi
2016/10/20 08:27:05
Done.
| |
| 44 } | 44 } |
| 45 if (mixed_frame->num_channels_ > frame->num_channels_) { | 45 if (mixed_frame->num_channels_ > frame->num_channels_) { |
| 46 // We only support mono-to-stereo. | 46 // We only support mono-to-stereo. |
| 47 assert(mixed_frame->num_channels_ == 2 && | 47 assert(mixed_frame->num_channels_ == 2 && |
| 48 frame->num_channels_ == 1); | 48 frame->num_channels_ == 1); |
| 49 AudioFrameOperations::MonoToStereo(frame); | 49 AudioFrameOperations::MonoToStereo(frame); |
| 50 } | 50 } |
| 51 | 51 |
| 52 *mixed_frame += *frame; | 52 AudioFrameOperations::AddFrames(*frame, mixed_frame); |
| 53 } | 53 } |
| 54 | 54 |
| 55 // Return the max number of channels from a |list| composed of AudioFrames. | 55 // Return the max number of channels from a |list| composed of AudioFrames. |
| 56 size_t MaxNumChannels(const AudioFrameList* list) { | 56 size_t MaxNumChannels(const AudioFrameList* list) { |
| 57 size_t max_num_channels = 1; | 57 size_t max_num_channels = 1; |
| 58 for (AudioFrameList::const_iterator iter = list->begin(); | 58 for (AudioFrameList::const_iterator iter = list->begin(); |
| 59 iter != list->end(); | 59 iter != list->end(); |
| 60 ++iter) { | 60 ++iter) { |
| 61 max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_); | 61 max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_); |
| 62 } | 62 } |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 277 CriticalSectionScoped cs(_crit.get()); | 277 CriticalSectionScoped cs(_crit.get()); |
| 278 | 278 |
| 279 // TODO(henrike): it might be better to decide the number of channels | 279 // TODO(henrike): it might be better to decide the number of channels |
| 280 // with an API instead of dynamically. | 280 // with an API instead of dynamically. |
| 281 | 281 |
| 282 // Find the max channels over all mixing lists. | 282 // Find the max channels over all mixing lists. |
| 283 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), | 283 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), |
| 284 std::max(MaxNumChannels(&additionalFramesList), | 284 std::max(MaxNumChannels(&additionalFramesList), |
| 285 MaxNumChannels(&rampOutList))); | 285 MaxNumChannels(&rampOutList))); |
| 286 | 286 |
| 287 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | 287 AudioFrameOperations::UpdateFrame( |
| 288 AudioFrame::kNormalSpeech, | 288 -1, _timeStamp, NULL, 0, _outputFrequency, |
| 289 AudioFrame::kVadPassive, num_mixed_channels); | 289 AudioFrame::kNormalSpeech, AudioFrame::kVadPassive, |
| 290 num_mixed_channels, mixedAudio); | |
| 290 | 291 |
| 291 _timeStamp += static_cast<uint32_t>(_sampleSize); | 292 _timeStamp += static_cast<uint32_t>(_sampleSize); |
| 292 | 293 |
| 293 // We only use the limiter if it supports the output sample rate and | 294 // We only use the limiter if it supports the output sample rate and |
| 294 // we're actually mixing multiple streams. | 295 // we're actually mixing multiple streams. |
| 295 use_limiter_ = | 296 use_limiter_ = |
| 296 _numMixedParticipants > 1 && | 297 _numMixedParticipants > 1 && |
| 297 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 298 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
| 298 | 299 |
| 299 MixFromList(mixedAudio, mixList); | 300 MixFromList(mixedAudio, mixList); |
| 300 MixAnonomouslyFromList(mixedAudio, additionalFramesList); | 301 MixAnonomouslyFromList(mixedAudio, additionalFramesList); |
| 301 MixAnonomouslyFromList(mixedAudio, rampOutList); | 302 MixAnonomouslyFromList(mixedAudio, rampOutList); |
| 302 | 303 |
| 303 if(mixedAudio->samples_per_channel_ == 0) { | 304 if(mixedAudio->samples_per_channel_ == 0) { |
| 304 // Nothing was mixed, set the audio samples to silence. | 305 // Nothing was mixed, set the audio samples to silence. |
| 305 mixedAudio->samples_per_channel_ = _sampleSize; | 306 mixedAudio->samples_per_channel_ = _sampleSize; |
| 306 mixedAudio->Mute(); | 307 AudioFrameOperations::Mute(mixedAudio); |
| 307 } else { | 308 } else { |
| 308 // Only call the limiter if we have something to mix. | 309 // Only call the limiter if we have something to mix. |
| 309 LimitMixedAudio(mixedAudio); | 310 LimitMixedAudio(mixedAudio); |
| 310 } | 311 } |
| 311 } | 312 } |
| 312 | 313 |
| 313 { | 314 { |
| 314 CriticalSectionScoped cs(_cbCrit.get()); | 315 CriticalSectionScoped cs(_cbCrit.get()); |
| 315 if(_mixReceiver != NULL) { | 316 if(_mixReceiver != NULL) { |
| 316 const AudioFrame** dummy = NULL; | 317 const AudioFrame** dummy = NULL; |
| (...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 915 // And now we can safely restore the level. This procedure results in | 916 // And now we can safely restore the level. This procedure results in |
| 916 // some loss of resolution, deemed acceptable. | 917 // some loss of resolution, deemed acceptable. |
| 917 // | 918 // |
| 918 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS | 919 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS |
| 919 // and compression gain of 6 dB). However, in the transition frame when this | 920 // and compression gain of 6 dB). However, in the transition frame when this |
| 920 // is enabled (moving from one to two participants) it has the potential to | 921 // is enabled (moving from one to two participants) it has the potential to |
| 921 // create discontinuities in the mixed frame. | 922 // create discontinuities in the mixed frame. |
| 922 // | 923 // |
| 923 // Instead we double the frame (with addition since left-shifting a | 924 // Instead we double the frame (with addition since left-shifting a |
| 924 // negative value is undefined). | 925 // negative value is undefined). |
| 925 *mixedAudio += *mixedAudio; | 926 AudioFrameOperations::AddFrames(*mixedAudio, mixedAudio); |
| 926 | 927 |
| 927 if(error != _limiter->kNoError) { | 928 if(error != _limiter->kNoError) { |
| 928 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 929 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 929 "Error from AudioProcessing: %d", error); | 930 "Error from AudioProcessing: %d", error); |
| 930 assert(false); | 931 assert(false); |
| 931 return false; | 932 return false; |
| 932 } | 933 } |
| 933 return true; | 934 return true; |
| 934 } | 935 } |
| 935 } // namespace webrtc | 936 } // namespace webrtc |
| OLD | NEW |