OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
66 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 66 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
67 AudioFrameOperations::StereoToMono(frame); | 67 AudioFrameOperations::StereoToMono(frame); |
68 } | 68 } |
69 } | 69 } |
70 | 70 |
71 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 71 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
72 // These effects are applied to |frame| itself prior to mixing. Assumes that | 72 // These effects are applied to |frame| itself prior to mixing. Assumes that |
73 // |mixed_frame| always has at least as many channels as |frame|. Supports | 73 // |mixed_frame| always has at least as many channels as |frame|. Supports |
74 // stereo at most. | 74 // stereo at most. |
75 // | 75 // |
76 // TODO(andrew): consider not modifying |frame| here. | |
77 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 76 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
78 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 77 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
79 if (use_limiter) { | 78 if (use_limiter) { |
80 // Divide by two to avoid saturation in the mixing. | 79 // Divide by two to avoid saturation in the mixing. |
81 // This is only meaningful if the limiter will be used. | 80 // This is only meaningful if the limiter will be used. |
82 *frame >>= 1; | 81 *frame >>= 1; |
83 } | 82 } |
84 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); | 83 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); |
85 *mixed_frame += *frame; | 84 *mixed_frame += *frame; |
86 } | 85 } |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
230 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { | 229 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { |
231 RemixFrame(frame_and_mute.frame, number_of_channels); | 230 RemixFrame(frame_and_mute.frame, number_of_channels); |
232 } | 231 } |
233 | 232 |
234 audio_frame_for_mixing->UpdateFrame( | 233 audio_frame_for_mixing->UpdateFrame( |
235 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, | 234 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, |
236 AudioFrame::kVadPassive, number_of_channels); | 235 AudioFrame::kVadPassive, number_of_channels); |
237 | 236 |
238 time_stamp_ += static_cast<uint32_t>(sample_size_); | 237 time_stamp_ += static_cast<uint32_t>(sample_size_); |
239 | 238 |
240 use_limiter_ = num_mixed_audio_sources_ > 1 && | 239 use_limiter_ = num_mixed_audio_sources_ > 1; |
241 output_frequency_ <= AudioProcessing::kMaxNativeSampleRateHz; | |
242 | 240 |
243 // We only use the limiter if it supports the output sample rate and | 241 // We only use the limiter if it supports the output sample rate and |
244 // we're actually mixing multiple streams. | 242 // we're actually mixing multiple streams. |
245 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); | 243 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); |
246 | 244 |
247 { | 245 { |
248 CriticalSectionScoped cs(crit_.get()); | 246 CriticalSectionScoped cs(crit_.get()); |
249 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 247 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
250 | 248 |
251 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 249 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
252 // Nothing was mixed, set the audio samples to silence. | 250 // Nothing was mixed, set the audio samples to silence. |
253 audio_frame_for_mixing->samples_per_channel_ = sample_size_; | 251 audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
254 audio_frame_for_mixing->Mute(); | 252 audio_frame_for_mixing->Mute(); |
255 } else { | 253 } else { |
256 // Only call the limiter if we have something to mix. | 254 // Only call the limiter if we have something to mix. |
257 LimitMixedAudio(audio_frame_for_mixing); | 255 LimitMixedAudio(audio_frame_for_mixing); |
258 } | 256 } |
259 } | 257 } |
258 | |
259 // Pass the final result to the level indicator. | |
260 audio_level_.ComputeLevel(*audio_frame_for_mixing); | |
261 | |
260 return; | 262 return; |
261 } | 263 } |
262 | 264 |
263 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 265 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
264 const Frequency& frequency) { | 266 const Frequency& frequency) { |
265 CriticalSectionScoped cs(crit_.get()); | 267 CriticalSectionScoped cs(crit_.get()); |
266 | 268 |
267 output_frequency_ = frequency; | 269 output_frequency_ = frequency; |
268 sample_size_ = | 270 sample_size_ = |
269 static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000); | 271 static_cast<size_t>((output_frequency_ * kProcessPeriodicityInMs) / 1000); |
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
584 *mixedAudio += *mixedAudio; | 586 *mixedAudio += *mixedAudio; |
585 | 587 |
586 if (error != limiter_->kNoError) { | 588 if (error != limiter_->kNoError) { |
587 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 589 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
588 "Error from AudioProcessing: %d", error); | 590 "Error from AudioProcessing: %d", error); |
589 RTC_NOTREACHED(); | 591 RTC_NOTREACHED(); |
590 return false; | 592 return false; |
591 } | 593 } |
592 return true; | 594 return true; |
593 } | 595 } |
596 | |
597 int NewAudioConferenceMixerImpl::GetOutputAudioLevel() { | |
598 int level = audio_level_.Level(); | |
kwiberg-webrtc
2016/08/24 08:04:38
It's a good habit to make local variables const if
aleloi
2016/08/24 08:15:16
Done, thanks for the tip!
| |
599 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | |
600 "GetAudioOutputLevel() => level=%d", level); | |
601 return level; | |
602 } | |
603 | |
604 int NewAudioConferenceMixerImpl::GetOutputAudioLevelFullRange() { | |
605 int level = audio_level_.LevelFullRange(); | |
606 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | |
607 "GetAudioOutputLevelFullRange() => level=%d", level); | |
608 return level; | |
609 } | |
594 } // namespace webrtc | 610 } // namespace webrtc |
OLD | NEW |