OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 #include <utility> | 15 #include <utility> |
16 | 16 |
17 #include "webrtc/base/logging.h" | 17 #include "webrtc/base/logging.h" |
18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
20 | 20 |
21 namespace webrtc { | 21 namespace webrtc { |
22 namespace { | 22 namespace { |
23 | 23 |
24 struct SourceFrame { | 24 struct SourceFrame { |
25 SourceFrame(AudioMixerImpl::SourceStatus* source_status, | 25 SourceFrame(AudioMixerImpl::SourceStatusWithFrame* source_status_with_frame, |
26 AudioFrame* audio_frame, | 26 AudioFrame* audio_frame, |
27 bool muted) | 27 bool muted) |
28 : source_status(source_status), audio_frame(audio_frame), muted(muted) { | 28 : source_status_with_frame(source_status_with_frame), |
29 RTC_DCHECK(source_status); | 29 audio_frame(audio_frame), |
30 muted(muted) { | |
31 RTC_DCHECK(source_status_with_frame); | |
30 RTC_DCHECK(audio_frame); | 32 RTC_DCHECK(audio_frame); |
31 if (!muted) { | 33 if (!muted) { |
32 energy = AudioMixerCalculateEnergy(*audio_frame); | 34 energy = AudioMixerCalculateEnergy(*audio_frame); |
33 } | 35 } |
34 } | 36 } |
35 | 37 |
36 SourceFrame(AudioMixerImpl::SourceStatus* source_status, | 38 SourceFrame(AudioMixerImpl::SourceStatusWithFrame* source_status_with_frame, |
37 AudioFrame* audio_frame, | 39 AudioFrame* audio_frame, |
38 bool muted, | 40 bool muted, |
39 uint32_t energy) | 41 uint32_t energy) |
40 : source_status(source_status), | 42 : source_status_with_frame(source_status_with_frame), |
41 audio_frame(audio_frame), | 43 audio_frame(audio_frame), |
42 muted(muted), | 44 muted(muted), |
43 energy(energy) { | 45 energy(energy) { |
44 RTC_DCHECK(source_status); | 46 RTC_DCHECK(source_status_with_frame); |
45 RTC_DCHECK(audio_frame); | 47 RTC_DCHECK(audio_frame); |
46 } | 48 } |
47 | 49 |
48 AudioMixerImpl::SourceStatus* source_status = nullptr; | 50 AudioMixerImpl::SourceStatusWithFrame* source_status_with_frame = nullptr; |
49 AudioFrame* audio_frame = nullptr; | 51 AudioFrame* audio_frame = nullptr; |
50 bool muted = true; | 52 bool muted = true; |
51 uint32_t energy = 0; | 53 uint32_t energy = 0; |
52 }; | 54 }; |
53 | 55 |
54 // ShouldMixBefore(a, b) is used to select mixer sources. | 56 // ShouldMixBefore(a, b) is used to select mixer sources. |
55 bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) { | 57 bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) { |
56 if (a.muted != b.muted) { | 58 if (a.muted != b.muted) { |
57 return b.muted; | 59 return b.muted; |
58 } | 60 } |
59 | 61 |
60 const auto a_activity = a.audio_frame->vad_activity_; | 62 const auto a_activity = a.audio_frame->vad_activity_; |
61 const auto b_activity = b.audio_frame->vad_activity_; | 63 const auto b_activity = b.audio_frame->vad_activity_; |
62 | 64 |
63 if (a_activity != b_activity) { | 65 if (a_activity != b_activity) { |
64 return a_activity == AudioFrame::kVadActive; | 66 return a_activity == AudioFrame::kVadActive; |
65 } | 67 } |
66 | 68 |
67 return a.energy > b.energy; | 69 return a.energy > b.energy; |
68 } | 70 } |
69 | 71 |
70 void RampAndUpdateGain( | 72 void RampAndUpdateGain( |
71 const std::vector<SourceFrame>& mixed_sources_and_frames) { | 73 const std::vector<SourceFrame>& mixed_sources_and_frames) { |
72 for (const auto& source_frame : mixed_sources_and_frames) { | 74 for (const auto& source_frame : mixed_sources_and_frames) { |
73 float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f; | 75 float target_gain = |
74 Ramp(source_frame.source_status->gain, target_gain, | 76 source_frame.source_status_with_frame->is_mixed ? 1.0f : 0.0f; |
77 Ramp(source_frame.source_status_with_frame->gain, target_gain, | |
75 source_frame.audio_frame); | 78 source_frame.audio_frame); |
76 source_frame.source_status->gain = target_gain; | 79 source_frame.source_status_with_frame->gain = target_gain; |
77 } | 80 } |
78 } | 81 } |
79 | 82 |
80 // Mix the AudioFrames stored in audioFrameList into mixed_audio. | 83 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
81 int32_t MixFromList(AudioFrame* mixed_audio, | 84 int32_t MixFromList(AudioFrame* mixed_audio, |
82 const AudioFrameList& audio_frame_list, | 85 const AudioFrameList& audio_frame_list, |
83 bool use_limiter) { | 86 bool use_limiter) { |
84 if (audio_frame_list.empty()) { | 87 if (audio_frame_list.empty()) { |
85 return 0; | 88 return 0; |
86 } | 89 } |
(...skipping 22 matching lines...) Expand all Loading... | |
109 // Divide by two to avoid saturation in the mixing. | 112 // Divide by two to avoid saturation in the mixing. |
110 // This is only meaningful if the limiter will be used. | 113 // This is only meaningful if the limiter will be used. |
111 *frame >>= 1; | 114 *frame >>= 1; |
112 } | 115 } |
113 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); | 116 RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
114 *mixed_audio += *frame; | 117 *mixed_audio += *frame; |
115 } | 118 } |
116 return 0; | 119 return 0; |
117 } | 120 } |
118 | 121 |
119 AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList( | 122 AudioMixerImpl::SourceStatusWithFrameList::const_iterator FindSourceInList( |
120 AudioMixerImpl::Source const* audio_source, | 123 AudioMixerImpl::Source const* audio_source, |
121 AudioMixerImpl::SourceStatusList const* audio_source_list) { | 124 AudioMixerImpl::SourceStatusWithFrameList const* audio_source_list) { |
122 return std::find_if(audio_source_list->begin(), audio_source_list->end(), | 125 return std::find_if( |
123 [audio_source](const AudioMixerImpl::SourceStatus& p) { | 126 audio_source_list->begin(), audio_source_list->end(), |
124 return p.audio_source == audio_source; | 127 [audio_source]( |
125 }); | 128 const std::unique_ptr<AudioMixerImpl::SourceStatusWithFrame>& p) { |
129 return p->audio_source == audio_source; | |
130 }); | |
126 } | 131 } |
127 | 132 |
128 // TODO(aleloi): remove non-const version when WEBRTC only supports modern STL. | 133 // TODO(aleloi): remove non-const version when WEBRTC only supports modern STL. |
129 AudioMixerImpl::SourceStatusList::iterator FindSourceInList( | 134 AudioMixerImpl::SourceStatusWithFrameList::iterator FindSourceInList( |
130 AudioMixerImpl::Source const* audio_source, | 135 AudioMixerImpl::Source const* audio_source, |
131 AudioMixerImpl::SourceStatusList* audio_source_list) { | 136 AudioMixerImpl::SourceStatusWithFrameList* audio_source_list) { |
132 return std::find_if(audio_source_list->begin(), audio_source_list->end(), | 137 return std::find_if( |
133 [audio_source](const AudioMixerImpl::SourceStatus& p) { | 138 audio_source_list->begin(), audio_source_list->end(), |
134 return p.audio_source == audio_source; | 139 [audio_source]( |
135 }); | 140 const std::unique_ptr<AudioMixerImpl::SourceStatusWithFrame>& p) { |
141 return p->audio_source == audio_source; | |
142 }); | |
136 } | 143 } |
137 | 144 |
138 } // namespace | 145 } // namespace |
139 | 146 |
140 AudioMixerImpl::AudioMixerImpl(std::unique_ptr<AudioProcessing> limiter) | 147 AudioMixerImpl::AudioMixerImpl(std::unique_ptr<AudioProcessing> limiter) |
141 : audio_source_list_(), | 148 : audio_source_list_(), |
142 use_limiter_(true), | 149 use_limiter_(true), |
143 time_stamp_(0), | 150 time_stamp_(0), |
144 limiter_(std::move(limiter)) { | 151 limiter_(std::move(limiter)) { |
145 SetOutputFrequency(kDefaultFrequency); | 152 SetOutputFrequency(kDefaultFrequency); |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
238 RTC_DCHECK_RUN_ON(&thread_checker_); | 245 RTC_DCHECK_RUN_ON(&thread_checker_); |
239 return output_frequency_; | 246 return output_frequency_; |
240 } | 247 } |
241 | 248 |
242 bool AudioMixerImpl::AddSource(Source* audio_source) { | 249 bool AudioMixerImpl::AddSource(Source* audio_source) { |
243 RTC_DCHECK(audio_source); | 250 RTC_DCHECK(audio_source); |
244 rtc::CritScope lock(&crit_); | 251 rtc::CritScope lock(&crit_); |
245 RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) == | 252 RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) == |
246 audio_source_list_.end()) | 253 audio_source_list_.end()) |
247 << "Source already added to mixer"; | 254 << "Source already added to mixer"; |
248 audio_source_list_.emplace_back(audio_source, false, 0); | 255 audio_source_list_.emplace_back( |
256 new SourceStatusWithFrame(audio_source, false, 0)); | |
249 return true; | 257 return true; |
250 } | 258 } |
251 | 259 |
252 bool AudioMixerImpl::RemoveSource(Source* audio_source) { | 260 bool AudioMixerImpl::RemoveSource(Source* audio_source) { |
253 RTC_DCHECK(audio_source); | 261 RTC_DCHECK(audio_source); |
254 rtc::CritScope lock(&crit_); | 262 rtc::CritScope lock(&crit_); |
255 const auto iter = FindSourceInList(audio_source, &audio_source_list_); | 263 const auto iter = FindSourceInList(audio_source, &audio_source_list_); |
256 RTC_DCHECK(iter != audio_source_list_.end()) << "Source not present in mixer"; | 264 RTC_DCHECK(iter != audio_source_list_.end()) << "Source not present in mixer"; |
257 audio_source_list_.erase(iter); | 265 audio_source_list_.erase(iter); |
258 return true; | 266 return true; |
259 } | 267 } |
260 | 268 |
261 AudioFrameList AudioMixerImpl::GetAudioFromSources() { | 269 AudioFrameList AudioMixerImpl::GetAudioFromSources() { |
262 RTC_DCHECK_RUN_ON(&thread_checker_); | 270 RTC_DCHECK_RUN_ON(&thread_checker_); |
263 AudioFrameList result; | 271 AudioFrameList result; |
264 std::vector<SourceFrame> audio_source_mixing_data_list; | 272 std::vector<SourceFrame> audio_source_mixing_data_list; |
265 std::vector<SourceFrame> ramp_list; | 273 std::vector<SourceFrame> ramp_list; |
266 | 274 |
267 // Get audio source audio and put it in the struct vector. | 275 // Get audio from the audio sources and put it in the SourceFrame vector. |
aleloi
2016/10/14 15:34:44
This comment doesn't have to do anything with the
kwiberg-webrtc
2016/10/14 19:59:32
Acknowledged.
| |
268 for (auto& source_and_status : audio_source_list_) { | 276 for (auto& source_and_status : audio_source_list_) { |
269 auto audio_frame_with_info = | 277 AudioFrame* const audio_source_audio_frame = |
270 source_and_status.audio_source->GetAudioFrameWithInfo( | 278 &source_and_status->audio_frame; |
271 static_cast<int>(OutputFrequency())); | 279 const auto audio_frame_info = |
272 | 280 source_and_status->audio_source->GetAudioFrameWithInfo( |
273 const auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 281 OutputFrequency(), audio_source_audio_frame); |
274 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | |
275 | 282 |
276 if (audio_frame_info == Source::AudioFrameInfo::kError) { | 283 if (audio_frame_info == Source::AudioFrameInfo::kError) { |
277 LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source"; | 284 LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source"; |
278 continue; | 285 continue; |
279 } | 286 } |
280 audio_source_mixing_data_list.emplace_back( | 287 audio_source_mixing_data_list.emplace_back( |
281 &source_and_status, audio_source_audio_frame, | 288 // Using get() is OK here, since audio_source_mixing_data_list is |
289 // local and no SourceStatus pointers leak outside of | |
290 // this method (they are passed to Ramp, but are not leaked | |
291 // from there) | |
292 source_and_status.get(), audio_source_audio_frame, | |
282 audio_frame_info == Source::AudioFrameInfo::kMuted); | 293 audio_frame_info == Source::AudioFrameInfo::kMuted); |
283 } | 294 } |
284 | 295 |
285 // Sort frames by sorting function. | 296 // Sort frames by sorting function. |
286 std::sort(audio_source_mixing_data_list.begin(), | 297 std::sort(audio_source_mixing_data_list.begin(), |
287 audio_source_mixing_data_list.end(), ShouldMixBefore); | 298 audio_source_mixing_data_list.end(), ShouldMixBefore); |
288 | 299 |
289 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; | 300 int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources; |
290 | 301 |
291 // Go through list in order and put unmuted frames in result list. | 302 // Go through list in order and put unmuted frames in result list. |
292 for (const auto& p : audio_source_mixing_data_list) { | 303 for (const auto& p : audio_source_mixing_data_list) { |
293 // Filter muted. | 304 // Filter muted. |
294 if (p.muted) { | 305 if (p.muted) { |
295 p.source_status->is_mixed = false; | 306 p.source_status_with_frame->is_mixed = false; |
296 continue; | 307 continue; |
297 } | 308 } |
298 | 309 |
299 // Add frame to result vector for mixing. | 310 // Add frame to result vector for mixing. |
300 bool is_mixed = false; | 311 bool is_mixed = false; |
301 if (max_audio_frame_counter > 0) { | 312 if (max_audio_frame_counter > 0) { |
302 --max_audio_frame_counter; | 313 --max_audio_frame_counter; |
303 result.push_back(p.audio_frame); | 314 result.push_back(p.audio_frame); |
304 ramp_list.emplace_back(p.source_status, p.audio_frame, false, -1); | 315 ramp_list.emplace_back(p.source_status_with_frame, p.audio_frame, false, |
316 -1); | |
305 is_mixed = true; | 317 is_mixed = true; |
306 } | 318 } |
307 p.source_status->is_mixed = is_mixed; | 319 p.source_status_with_frame->is_mixed = is_mixed; |
308 } | 320 } |
309 RampAndUpdateGain(ramp_list); | 321 RampAndUpdateGain(ramp_list); |
310 return result; | 322 return result; |
311 } | 323 } |
312 | 324 |
313 | 325 |
314 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { | 326 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
315 RTC_DCHECK_RUN_ON(&thread_checker_); | 327 RTC_DCHECK_RUN_ON(&thread_checker_); |
316 if (!use_limiter_) { | 328 if (!use_limiter_) { |
317 return true; | 329 return true; |
(...skipping 23 matching lines...) Expand all Loading... | |
341 } | 353 } |
342 | 354 |
343 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( | 355 bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( |
344 AudioMixerImpl::Source* audio_source) const { | 356 AudioMixerImpl::Source* audio_source) const { |
345 RTC_DCHECK_RUN_ON(&thread_checker_); | 357 RTC_DCHECK_RUN_ON(&thread_checker_); |
346 rtc::CritScope lock(&crit_); | 358 rtc::CritScope lock(&crit_); |
347 | 359 |
348 const auto non_anonymous_iter = | 360 const auto non_anonymous_iter = |
349 FindSourceInList(audio_source, &audio_source_list_); | 361 FindSourceInList(audio_source, &audio_source_list_); |
350 if (non_anonymous_iter != audio_source_list_.end()) { | 362 if (non_anonymous_iter != audio_source_list_.end()) { |
351 return non_anonymous_iter->is_mixed; | 363 return (*non_anonymous_iter)->is_mixed; |
352 } | 364 } |
353 | 365 |
354 LOG(LS_ERROR) << "Audio source unknown"; | 366 LOG(LS_ERROR) << "Audio source unknown"; |
355 return false; | 367 return false; |
356 } | 368 } |
357 } // namespace webrtc | 369 } // namespace webrtc |
OLD | NEW |