OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 | 15 |
16 #include "webrtc/base/thread_annotations.h" | |
17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
18 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" | |
19 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
20 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 17 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
22 #include "webrtc/system_wrappers/include/trace.h" | 18 #include "webrtc/system_wrappers/include/trace.h" |
23 | 19 |
24 namespace webrtc { | 20 namespace webrtc { |
25 namespace { | 21 namespace { |
26 | 22 |
27 class SourceFrame { | 23 class SourceFrame { |
28 public: | 24 public: |
29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 25 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
30 : audio_source_(p), | 26 : audio_source_(p), |
31 audio_frame_(a), | 27 audio_frame_(a), |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
64 } | 60 } |
65 | 61 |
66 MixerAudioSource* audio_source_; | 62 MixerAudioSource* audio_source_; |
67 AudioFrame* audio_frame_; | 63 AudioFrame* audio_frame_; |
68 bool muted_; | 64 bool muted_; |
69 uint32_t energy_; | 65 uint32_t energy_; |
70 bool was_mixed_before_; | 66 bool was_mixed_before_; |
71 }; | 67 }; |
72 | 68 |
73 // Remixes a frame between stereo and mono. | 69 // Remixes a frame between stereo and mono. |
74 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | 70 void RemixFrame(AudioFrame* frame, int number_of_channels) { |
75 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 71 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
76 if (frame->num_channels_ == 1 && number_of_channels == 2) { | 72 if (frame->num_channels_ == 1 && number_of_channels == 2) { |
77 AudioFrameOperations::MonoToStereo(frame); | 73 AudioFrameOperations::MonoToStereo(frame); |
78 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 74 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
79 AudioFrameOperations::StereoToMono(frame); | 75 AudioFrameOperations::StereoToMono(frame); |
80 } | 76 } |
81 } | 77 } |
82 | 78 |
83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { | 79 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
84 for (const auto& source_frame : mixed_sources_and_frames) { | 80 for (const auto& source_frame : mixed_sources_and_frames) { |
85 // Ramp in previously unmixed. | 81 // Ramp in previously unmixed. |
86 if (!source_frame.was_mixed_before_) { | 82 if (!source_frame.was_mixed_before_) { |
87 NewMixerRampIn(source_frame.audio_frame_); | 83 NewMixerRampIn(source_frame.audio_frame_); |
88 } | 84 } |
89 | 85 |
90 const bool is_mixed = source_frame.audio_source_->mix_history_->IsMixed(); | 86 const bool is_mixed = source_frame.audio_source_->mix_history_->IsMixed(); |
91 // Ramp out currently unmixed. | 87 // Ramp out currently unmixed. |
92 if (source_frame.was_mixed_before_ && !is_mixed) { | 88 if (source_frame.was_mixed_before_ && !is_mixed) { |
93 NewMixerRampOut(source_frame.audio_frame_); | 89 NewMixerRampOut(source_frame.audio_frame_); |
94 } | 90 } |
95 } | 91 } |
96 } | 92 } |
97 | 93 |
98 } // namespace | 94 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
95 int32_t MixFromList(AudioFrame* mixed_audio, | |
96 const AudioFrameList& audio_frame_list, | |
97 int32_t id, | |
98 bool use_limiter) { | |
99 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
100 "MixFromList(mixed_audio, audio_frame_list)"); | |
101 if (audio_frame_list.empty()) | |
102 return 0; | |
99 | 103 |
100 MixerAudioSource::MixerAudioSource() : mix_history_(new NewMixHistory()) {} | 104 if (audio_frame_list.size() == 1) { |
105 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; | |
106 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; | |
107 } else { | |
108 // TODO(wu): Issue 3390. | |
109 // Audio frame timestamp is only supported in one channel case. | |
110 mixed_audio->timestamp_ = 0; | |
111 mixed_audio->elapsed_time_ms_ = -1; | |
112 } | |
101 | 113 |
102 MixerAudioSource::~MixerAudioSource() { | 114 for (const auto& frame : audio_frame_list) { |
103 delete mix_history_; | 115 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); |
104 } | 116 RTC_DCHECK_EQ( |
117 frame->samples_per_channel_, | |
118 static_cast<size_t>((mixed_audio->sample_rate_hz_ * | |
119 webrtc::AudioMixerImpl::kFrameDurationInMs) / | |
120 1000)); | |
105 | 121 |
106 bool MixerAudioSource::IsMixed() const { | 122 // Mix |f.frame| into |mixed_audio|, with saturation protection. |
107 return mix_history_->IsMixed(); | 123 // These effect is applied to |f.frame| itself prior to mixing. |
108 } | 124 if (use_limiter) { |
109 | 125 // Divide by two to avoid saturation in the mixing. |
110 NewMixHistory::NewMixHistory() : is_mixed_(0) {} | 126 // This is only meaningful if the limiter will be used. |
111 | 127 *frame >>= 1; |
112 NewMixHistory::~NewMixHistory() {} | 128 } |
113 | 129 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
114 bool NewMixHistory::IsMixed() const { | 130 *mixed_audio += *frame; |
115 return is_mixed_; | 131 } |
116 } | |
117 | |
118 bool NewMixHistory::WasMixed() const { | |
119 // Was mixed is the same as is mixed depending on perspective. This function | |
120 // is for the perspective of NewAudioConferenceMixerImpl. | |
121 return IsMixed(); | |
122 } | |
123 | |
124 int32_t NewMixHistory::SetIsMixed(const bool mixed) { | |
125 is_mixed_ = mixed; | |
126 return 0; | 132 return 0; |
127 } | 133 } |
128 | 134 |
129 void NewMixHistory::ResetMixedStatus() { | 135 } // namespace |
130 is_mixed_ = false; | |
131 } | |
132 | 136 |
133 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { | 137 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { |
134 AudioMixerImpl* mixer = new AudioMixerImpl(id); | 138 AudioMixerImpl* mixer = new AudioMixerImpl(id); |
ossu
2016/09/01 15:42:50
This is a bit strange. Not sure it's 100% related
aleloi
2016/09/02 11:52:34
Changed in upstream CL.
| |
135 if (!mixer->Init()) { | 139 if (!mixer->Init()) { |
136 delete mixer; | 140 delete mixer; |
137 return NULL; | 141 return std::unique_ptr<AudioMixer>(nullptr); |
138 } | 142 } |
139 return std::unique_ptr<AudioMixer>(mixer); | 143 return std::unique_ptr<AudioMixer>(mixer); |
140 } | 144 } |
141 | 145 |
142 AudioMixerImpl::AudioMixerImpl(int id) | 146 AudioMixerImpl::AudioMixerImpl(int id) |
143 : id_(id), | 147 : id_(id), |
144 output_frequency_(kDefaultFrequency), | 148 output_frequency_(kDefaultFrequency), |
145 sample_size_(0), | 149 sample_size_(0), |
146 audio_source_list_(), | 150 audio_source_list_(), |
147 additional_audio_source_list_(), | 151 additional_audio_source_list_(), |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
184 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) | 188 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) |
185 return false; | 189 return false; |
186 | 190 |
187 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) | 191 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) |
188 return false; | 192 return false; |
189 | 193 |
190 return true; | 194 return true; |
191 } | 195 } |
192 | 196 |
193 void AudioMixerImpl::Mix(int sample_rate, | 197 void AudioMixerImpl::Mix(int sample_rate, |
194 size_t number_of_channels, | 198 int number_of_channels, |
195 AudioFrame* audio_frame_for_mixing) { | 199 AudioFrame* audio_frame_for_mixing) { |
196 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 200 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
197 RTC_DCHECK_RUN_ON(&thread_checker_); | 201 RTC_DCHECK_RUN_ON(&thread_checker_); |
198 | 202 |
199 if (sample_rate != kNbInHz && sample_rate != kWbInHz && | 203 if (sample_rate != kNbInHz && sample_rate != kWbInHz && |
200 sample_rate != kSwbInHz && sample_rate != kFbInHz) { | 204 sample_rate != kSwbInHz && sample_rate != kFbInHz) { |
201 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 205 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
202 "Invalid frequency: %d", sample_rate); | 206 "Invalid frequency: %d", sample_rate); |
203 RTC_NOTREACHED(); | 207 RTC_NOTREACHED(); |
204 return; | 208 return; |
205 } | 209 } |
206 | 210 |
207 if (OutputFrequency() != sample_rate) { | 211 if (OutputFrequency() != sample_rate) { |
208 SetOutputFrequency(static_cast<Frequency>(sample_rate)); | 212 SetOutputFrequency(static_cast<Frequency>(sample_rate)); |
209 } | 213 } |
210 | 214 |
211 AudioFrameList mix_list; | 215 AudioFrameList mix_list; |
212 AudioFrameList anonymous_mix_list; | 216 AudioFrameList anonymous_mix_list; |
213 int num_mixed_audio_sources; | 217 int num_mixed_audio_sources; |
214 { | 218 { |
215 CriticalSectionScoped cs(crit_.get()); | 219 CriticalSectionScoped cs(crit_.get()); |
216 mix_list = GetNonAnonymousAudio(); | 220 mix_list = GetNonAnonymousAudio(); |
217 anonymous_mix_list = GetAnonymousAudio(); | 221 anonymous_mix_list = GetAnonymousAudio(); |
218 num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_); | 222 num_mixed_audio_sources = num_mixed_audio_sources_; |
219 } | 223 } |
220 | 224 |
221 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), | 225 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
222 anonymous_mix_list.end()); | 226 anonymous_mix_list.end()); |
223 | 227 |
224 for (const auto& frame : mix_list) { | 228 for (const auto& frame : mix_list) { |
225 RemixFrame(frame, number_of_channels); | 229 RemixFrame(frame, number_of_channels); |
226 } | 230 } |
227 | 231 |
228 audio_frame_for_mixing->UpdateFrame( | 232 audio_frame_for_mixing->UpdateFrame( |
(...skipping 18 matching lines...) Expand all Loading... | |
247 | 251 |
248 // Pass the final result to the level indicator. | 252 // Pass the final result to the level indicator. |
249 audio_level_.ComputeLevel(*audio_frame_for_mixing); | 253 audio_level_.ComputeLevel(*audio_frame_for_mixing); |
250 | 254 |
251 return; | 255 return; |
252 } | 256 } |
253 | 257 |
254 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { | 258 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
255 RTC_DCHECK_RUN_ON(&thread_checker_); | 259 RTC_DCHECK_RUN_ON(&thread_checker_); |
256 output_frequency_ = frequency; | 260 output_frequency_ = frequency; |
257 sample_size_ = | 261 sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000; |
258 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); | |
259 | 262 |
260 return 0; | 263 return 0; |
261 } | 264 } |
262 | 265 |
263 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { | 266 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { |
264 RTC_DCHECK_RUN_ON(&thread_checker_); | 267 RTC_DCHECK_RUN_ON(&thread_checker_); |
265 return output_frequency_; | 268 return output_frequency_; |
266 } | 269 } |
267 | 270 |
268 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, | 271 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
(...skipping 19 matching lines...) Expand all Loading... | |
288 } else { | 291 } else { |
289 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); | 292 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); |
290 } | 293 } |
291 if (!success) { | 294 if (!success) { |
292 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 295 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
293 "failed to %s audio_source", mixable ? "add" : "remove"); | 296 "failed to %s audio_source", mixable ? "add" : "remove"); |
294 RTC_NOTREACHED(); | 297 RTC_NOTREACHED(); |
295 return -1; | 298 return -1; |
296 } | 299 } |
297 | 300 |
298 size_t num_mixed_non_anonymous = audio_source_list_.size(); | 301 int num_mixed_non_anonymous = static_cast<int>(audio_source_list_.size()); |
ossu
2016/09/01 15:42:50
Why not just keep this as size_t? According to the
aleloi
2016/09/02 11:52:34
Thanks! I didn't read it carefully enough. Changin
| |
299 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) { | 302 if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) { |
300 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources; | 303 num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources; |
301 } | 304 } |
302 num_mixed_audio_sources_ = | 305 num_mixed_audio_sources_ = |
303 num_mixed_non_anonymous + additional_audio_source_list_.size(); | 306 num_mixed_non_anonymous + |
307 static_cast<int>(additional_audio_source_list_.size()); | |
304 } | 308 } |
305 return 0; | 309 return 0; |
306 } | 310 } |
307 | 311 |
308 bool AudioMixerImpl::MixabilityStatus( | 312 bool AudioMixerImpl::MixabilityStatus( |
309 const MixerAudioSource& audio_source) const { | 313 const MixerAudioSource& audio_source) const { |
310 CriticalSectionScoped cs(crit_.get()); | 314 CriticalSectionScoped cs(crit_.get()); |
311 return IsAudioSourceInList(audio_source, audio_source_list_); | 315 return IsAudioSourceInList(audio_source, audio_source_list_); |
312 } | 316 } |
313 | 317 |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
475 if (iter != audio_source_list->end()) { | 479 if (iter != audio_source_list->end()) { |
476 audio_source_list->erase(iter); | 480 audio_source_list->erase(iter); |
477 // AudioSource is no longer mixed, reset to default. | 481 // AudioSource is no longer mixed, reset to default. |
478 audio_source->mix_history_->ResetMixedStatus(); | 482 audio_source->mix_history_->ResetMixedStatus(); |
479 return true; | 483 return true; |
480 } else { | 484 } else { |
481 return false; | 485 return false; |
482 } | 486 } |
483 } | 487 } |
484 | 488 |
485 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixed_audio, | |
486 const AudioFrameList& audio_frame_list, | |
487 int32_t id, | |
488 bool use_limiter) { | |
489 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | |
490 "MixFromList(mixed_audio, audio_frame_list)"); | |
491 if (audio_frame_list.empty()) | |
492 return 0; | |
493 | |
494 if (audio_frame_list.size() == 1) { | |
495 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; | |
496 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; | |
497 } else { | |
498 // TODO(wu): Issue 3390. | |
499 // Audio frame timestamp is only supported in one channel case. | |
500 mixed_audio->timestamp_ = 0; | |
501 mixed_audio->elapsed_time_ms_ = -1; | |
502 } | |
503 | |
504 for (const auto& frame : audio_frame_list) { | |
505 RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); | |
506 RTC_DCHECK_EQ( | |
507 frame->samples_per_channel_, | |
508 static_cast<size_t>( | |
509 (mixed_audio->sample_rate_hz_ * kFrameDurationInMs) / 1000)); | |
510 | |
511 // Mix |f.frame| into |mixed_audio|, with saturation protection. | |
512 // These effect is applied to |f.frame| itself prior to mixing. | |
513 if (use_limiter) { | |
514 // Divide by two to avoid saturation in the mixing. | |
515 // This is only meaningful if the limiter will be used. | |
516 *frame >>= 1; | |
517 } | |
518 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); | |
519 *mixed_audio += *frame; | |
520 } | |
521 return 0; | |
522 } | |
523 | |
524 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { | 489 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
525 RTC_DCHECK_RUN_ON(&thread_checker_); | 490 RTC_DCHECK_RUN_ON(&thread_checker_); |
526 if (!use_limiter_) { | 491 if (!use_limiter_) { |
527 return true; | 492 return true; |
528 } | 493 } |
529 | 494 |
530 // Smoothly limit the mixed frame. | 495 // Smoothly limit the mixed frame. |
531 const int error = limiter_->ProcessStream(mixed_audio); | 496 const int error = limiter_->ProcessStream(mixed_audio); |
532 | 497 |
533 // And now we can safely restore the level. This procedure results in | 498 // And now we can safely restore the level. This procedure results in |
(...skipping 26 matching lines...) Expand all Loading... | |
560 } | 525 } |
561 | 526 |
562 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 527 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
563 RTC_DCHECK_RUN_ON(&thread_checker_); | 528 RTC_DCHECK_RUN_ON(&thread_checker_); |
564 const int level = audio_level_.LevelFullRange(); | 529 const int level = audio_level_.LevelFullRange(); |
565 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 530 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
566 "GetAudioOutputLevelFullRange() => level=%d", level); | 531 "GetAudioOutputLevelFullRange() => level=%d", level); |
567 return level; | 532 return level; |
568 } | 533 } |
569 } // namespace webrtc | 534 } // namespace webrtc |
OLD | NEW |