Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(590)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2286343002: Less lock acquisitions for AudioMixer. (Closed)
Patch Set: Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <functional> 14 #include <functional>
15 15
16 #include "webrtc/base/thread_annotations.h"
16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" 17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" 18 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" 19 #include "webrtc/modules/audio_processing/include/audio_processing.h"
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" 20 #include "webrtc/modules/utility/include/audio_frame_operations.h"
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
21 #include "webrtc/system_wrappers/include/trace.h" 22 #include "webrtc/system_wrappers/include/trace.h"
22 23
23 namespace webrtc { 24 namespace webrtc {
24 namespace { 25 namespace {
25 26
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
140 thread_checker_.DetachFromThread(); 141 thread_checker_.DetachFromThread();
141 } 142 }
142 143
143 AudioMixerImpl::~AudioMixerImpl() {} 144 AudioMixerImpl::~AudioMixerImpl() {}
144 145
145 bool AudioMixerImpl::Init() { 146 bool AudioMixerImpl::Init() {
146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); 147 crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
147 if (crit_.get() == NULL) 148 if (crit_.get() == NULL)
148 return false; 149 return false;
149 150
150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
151 if (cb_crit_.get() == NULL)
152 return false;
153
154 Config config; 151 Config config;
155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 152 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
156 limiter_.reset(AudioProcessing::Create(config)); 153 limiter_.reset(AudioProcessing::Create(config));
157 if (!limiter_.get()) 154 if (!limiter_.get())
158 return false; 155 return false;
159 156
160 if (SetOutputFrequency(kDefaultFrequency) == -1) 157 if (SetOutputFrequency(kDefaultFrequency) == -1)
161 return false; 158 return false;
162 159
163 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != 160 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) !=
(...skipping 17 matching lines...) Expand all
181 return false; 178 return false;
182 179
183 return true; 180 return true;
184 } 181 }
185 182
186 void AudioMixerImpl::Mix(int sample_rate, 183 void AudioMixerImpl::Mix(int sample_rate,
187 size_t number_of_channels, 184 size_t number_of_channels,
188 AudioFrame* audio_frame_for_mixing) { 185 AudioFrame* audio_frame_for_mixing) {
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 186 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 187 RTC_DCHECK(thread_checker_.CalledOnValidThread());
188 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
189 Frequency mixing_frequency;
190
191 switch (sample_rate) {
ivoc 2016/08/31 09:22:45 It's not really related to the threading/locking i
aleloi 2016/08/31 11:03:10 This is indeed simpler! Thanks!
192 case 8000:
193 mixing_frequency = kNbInHz;
194 break;
195 case 16000:
196 mixing_frequency = kWbInHz;
197 break;
198 case 32000:
199 mixing_frequency = kSwbInHz;
200 break;
201 case 48000:
202 mixing_frequency = kFbInHz;
203 break;
204 default:
205 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
206 "Invalid frequency: %d", sample_rate);
207 RTC_NOTREACHED();
208 return;
209 }
210
211 if (OutputFrequency() != mixing_frequency) {
212 SetOutputFrequency(mixing_frequency);
213 }
214
191 AudioFrameList mixList; 215 AudioFrameList mixList;
192 AudioFrameList additionalFramesList; 216 AudioFrameList additionalFramesList;
193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
194 { 217 {
195 CriticalSectionScoped cs(cb_crit_.get()); 218 CriticalSectionScoped cs(crit_.get());
196 Frequency mixing_frequency;
197
198 switch (sample_rate) {
199 case 8000:
200 mixing_frequency = kNbInHz;
201 break;
202 case 16000:
203 mixing_frequency = kWbInHz;
204 break;
205 case 32000:
206 mixing_frequency = kSwbInHz;
207 break;
208 case 48000:
209 mixing_frequency = kFbInHz;
210 break;
211 default:
212 RTC_NOTREACHED();
213 return;
214 }
215
216 if (OutputFrequency() != mixing_frequency) {
217 SetOutputFrequency(mixing_frequency);
218 }
219
220 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); 219 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources);
221 GetAdditionalAudio(&additionalFramesList); 220 GetAdditionalAudio(&additionalFramesList);
222 } 221 }
223 222
224 for (FrameAndMuteInfo& frame_and_mute : mixList) { 223 for (FrameAndMuteInfo& frame_and_mute : mixList) {
225 RemixFrame(frame_and_mute.frame, number_of_channels); 224 RemixFrame(frame_and_mute.frame, number_of_channels);
226 } 225 }
227 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { 226 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) {
228 RemixFrame(frame_and_mute.frame, number_of_channels); 227 RemixFrame(frame_and_mute.frame, number_of_channels);
229 } 228 }
230 229
231 audio_frame_for_mixing->UpdateFrame( 230 audio_frame_for_mixing->UpdateFrame(
232 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 231 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech,
233 AudioFrame::kVadPassive, number_of_channels); 232 AudioFrame::kVadPassive, number_of_channels);
234 233
235 time_stamp_ += static_cast<uint32_t>(sample_size_); 234 time_stamp_ += static_cast<uint32_t>(sample_size_);
236 235
237 use_limiter_ = num_mixed_audio_sources_ > 1; 236 use_limiter_ = num_mixed_audio_sources_ > 1;
238 237
239 // We only use the limiter if it supports the output sample rate and 238 // We only use the limiter if it supports the output sample rate and
240 // we're actually mixing multiple streams. 239 // we're actually mixing multiple streams.
241 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); 240 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_);
242 241
243 { 242 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
244 CriticalSectionScoped cs(crit_.get()); 243 // Nothing was mixed, set the audio samples to silence.
245 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); 244 audio_frame_for_mixing->samples_per_channel_ = sample_size_;
kwiberg-webrtc 2016/08/31 09:10:36 Where did this line go?
aleloi 2016/08/31 11:03:10 It should still be here, but be removed in the nex
246 245 audio_frame_for_mixing->Mute();
247 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 246 } else {
248 // Nothing was mixed, set the audio samples to silence. 247 // Only call the limiter if we have something to mix.
249 audio_frame_for_mixing->samples_per_channel_ = sample_size_; 248 LimitMixedAudio(audio_frame_for_mixing);
250 audio_frame_for_mixing->Mute();
251 } else {
252 // Only call the limiter if we have something to mix.
253 LimitMixedAudio(audio_frame_for_mixing);
254 }
255 } 249 }
256 250
257 // Pass the final result to the level indicator. 251 // Pass the final result to the level indicator.
258 audio_level_.ComputeLevel(*audio_frame_for_mixing); 252 audio_level_.ComputeLevel(*audio_frame_for_mixing);
259 253
260 return; 254 return;
261 } 255 }
262 256
263 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { 257 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) {
264 CriticalSectionScoped cs(crit_.get()); 258 RTC_DCHECK(thread_checker_.CalledOnValidThread());
265
266 output_frequency_ = frequency; 259 output_frequency_ = frequency;
267 sample_size_ = 260 sample_size_ =
268 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); 261 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000);
269 262
270 return 0; 263 return 0;
271 } 264 }
272 265
273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { 266 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
274 CriticalSectionScoped cs(crit_.get()); 267 RTC_DCHECK(thread_checker_.CalledOnValidThread());
275 return output_frequency_; 268 return output_frequency_;
276 } 269 }
277 270
278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 271 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
279 bool mixable) { 272 bool mixable) {
280 if (!mixable) { 273 if (!mixable) {
281 // Anonymous audio sources are in a separate list. Make sure that the 274 // Anonymous audio sources are in a separate list. Make sure that the
282 // audio source is in the _audioSourceList if it is being mixed. 275 // audio source is in the _audioSourceList if it is being mixed.
283 SetAnonymousMixabilityStatus(audio_source, false); 276 SetAnonymousMixabilityStatus(audio_source, false);
284 } 277 }
285 size_t numMixedAudioSources;
286 { 278 {
287 CriticalSectionScoped cs(cb_crit_.get()); 279 CriticalSectionScoped cs(crit_.get());
288 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); 280 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_);
289 // API must be called with a new state. 281 // API must be called with a new state.
290 if (!(mixable ^ isMixed)) { 282 if (!(mixable ^ isMixed)) {
291 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 283 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
292 "Mixable is aready %s", isMixed ? "ON" : "off"); 284 "Mixable is aready %s", isMixed ? "ON" : "off");
293 return -1; 285 return -1;
294 } 286 }
295 bool success = false; 287 bool success = false;
296 if (mixable) { 288 if (mixable) {
297 success = AddAudioSourceToList(audio_source, &audio_source_list_); 289 success = AddAudioSourceToList(audio_source, &audio_source_list_);
298 } else { 290 } else {
299 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); 291 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
300 } 292 }
301 if (!success) { 293 if (!success) {
302 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 294 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
303 "failed to %s audio_source", mixable ? "add" : "remove"); 295 "failed to %s audio_source", mixable ? "add" : "remove");
304 RTC_NOTREACHED(); 296 RTC_NOTREACHED();
305 return -1; 297 return -1;
306 } 298 }
307 299
308 size_t numMixedNonAnonymous = audio_source_list_.size(); 300 size_t numMixedNonAnonymous = audio_source_list_.size();
309 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { 301 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) {
310 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; 302 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources;
311 } 303 }
312 numMixedAudioSources = 304 num_mixed_audio_sources_ =
313 numMixedNonAnonymous + additional_audio_source_list_.size(); 305 numMixedNonAnonymous + additional_audio_source_list_.size();
314 } 306 }
315 // A MixerAudioSource was added or removed. Make sure the scratch
316 // buffer is updated if necessary.
317 // Note: The scratch buffer may only be updated in Process().
318 CriticalSectionScoped cs(crit_.get());
319 num_mixed_audio_sources_ = numMixedAudioSources;
320 return 0; 307 return 0;
321 } 308 }
322 309
323 bool AudioMixerImpl::MixabilityStatus( 310 bool AudioMixerImpl::MixabilityStatus(
324 const MixerAudioSource& audio_source) const { 311 const MixerAudioSource& audio_source) const {
325 CriticalSectionScoped cs(cb_crit_.get()); 312 CriticalSectionScoped cs(crit_.get());
326 return IsAudioSourceInList(audio_source, audio_source_list_); 313 return IsAudioSourceInList(audio_source, audio_source_list_);
327 } 314 }
328 315
329 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( 316 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
330 MixerAudioSource* audio_source, 317 MixerAudioSource* audio_source,
331 bool anonymous) { 318 bool anonymous) {
332 CriticalSectionScoped cs(cb_crit_.get()); 319 CriticalSectionScoped cs(crit_.get());
333 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { 320 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
334 if (anonymous) { 321 if (anonymous) {
335 return 0; 322 return 0;
336 } 323 }
337 if (!RemoveAudioSourceFromList(audio_source, 324 if (!RemoveAudioSourceFromList(audio_source,
338 &additional_audio_source_list_)) { 325 &additional_audio_source_list_)) {
339 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 326 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
340 "unable to remove audio_source from anonymous list"); 327 "unable to remove audio_source from anonymous list");
341 RTC_NOTREACHED(); 328 RTC_NOTREACHED();
342 return -1; 329 return -1;
(...skipping 13 matching lines...) Expand all
356 // already registered. 343 // already registered.
357 return -1; 344 return -1;
358 } 345 }
359 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) 346 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
360 ? 0 347 ? 0
361 : -1; 348 : -1;
362 } 349 }
363 350
364 bool AudioMixerImpl::AnonymousMixabilityStatus( 351 bool AudioMixerImpl::AnonymousMixabilityStatus(
365 const MixerAudioSource& audio_source) const { 352 const MixerAudioSource& audio_source) const {
366 CriticalSectionScoped cs(cb_crit_.get()); 353 CriticalSectionScoped cs(crit_.get());
367 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 354 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
368 } 355 }
369 356
370 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { 357 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const
358 EXCLUSIVE_LOCKS_REQUIRED(crit_) {
kwiberg-webrtc 2016/08/31 09:10:36 IIRC we always put these annotations in the .h fil
aleloi 2016/08/31 11:03:10 This makes sense! Thanks! I've moved the annotatio
371 AudioFrameList result; 359 AudioFrameList result;
372 std::vector<SourceFrame> audioSourceMixingDataList; 360 std::vector<SourceFrame> audioSourceMixingDataList;
373 361
374 // Get audio source audio and put it in the struct vector. 362 // Get audio source audio and put it in the struct vector.
375 for (MixerAudioSource* audio_source : audio_source_list_) { 363 for (MixerAudioSource* audio_source : audio_source_list_) {
376 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 364 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
377 id_, static_cast<int>(output_frequency_)); 365 id_, static_cast<int>(output_frequency_));
378 366
379 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 367 auto audio_frame_info = audio_frame_with_info.audio_frame_info;
380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 368 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 if (p.was_mixed_before_ && !is_mixed) { 405 if (p.was_mixed_before_ && !is_mixed) {
418 NewMixerRampOut(p.audio_frame_); 406 NewMixerRampOut(p.audio_frame_);
419 result.emplace_back(p.audio_frame_, false); 407 result.emplace_back(p.audio_frame_, false);
420 } 408 }
421 409
422 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 410 p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
423 } 411 }
424 return result; 412 return result;
425 } 413 }
426 414
427 void AudioMixerImpl::GetAdditionalAudio( 415 void AudioMixerImpl::GetAdditionalAudio(AudioFrameList* additionalFramesList)
428 AudioFrameList* additionalFramesList) const { 416 const EXCLUSIVE_LOCKS_REQUIRED(crit_) {
kwiberg-webrtc 2016/08/31 09:10:36 Move to .h file.
aleloi 2016/08/31 11:03:10 Done.
429 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 417 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
430 "GetAdditionalAudio(additionalFramesList)"); 418 "GetAdditionalAudio(additionalFramesList)");
431 // The GetAudioFrameWithMuted() callback may result in the audio source being 419 // The GetAudioFrameWithMuted() callback may result in the audio source being
432 // removed from additionalAudioFramesList_. If that happens it will 420 // removed from additionalAudioFramesList_. If that happens it will
433 // invalidate any iterators. Create a copy of the audio sources list such 421 // invalidate any iterators. Create a copy of the audio sources list such
434 // that the list of participants can be traversed safely. 422 // that the list of participants can be traversed safely.
435 MixerAudioSourceList additionalAudioSourceList; 423 MixerAudioSourceList additionalAudioSourceList;
436 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 424 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
437 additional_audio_source_list_.begin(), 425 additional_audio_source_list_.begin(),
438 additional_audio_source_list_.end()); 426 additional_audio_source_list_.end());
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
584 return level; 572 return level;
585 } 573 }
586 574
587 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 575 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
588 const int level = audio_level_.LevelFullRange(); 576 const int level = audio_level_.LevelFullRange();
589 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 577 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
590 "GetAudioOutputLevelFullRange() => level=%d", level); 578 "GetAudioOutputLevelFullRange() => level=%d", level);
591 return level; 579 return level;
592 } 580 }
593 } // namespace webrtc 581 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698