Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(175)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2286343002: Less lock acquisitions for AudioMixer. (Closed)
Patch Set: Construction and initialization. Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <functional> 14 #include <functional>
15 #include <utility>
15 16
17 #include "webrtc/base/thread_annotations.h"
16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" 18 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" 19 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" 20 #include "webrtc/modules/audio_processing/include/audio_processing.h"
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" 21 #include "webrtc/modules/utility/include/audio_frame_operations.h"
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 22 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
21 #include "webrtc/system_wrappers/include/trace.h" 23 #include "webrtc/system_wrappers/include/trace.h"
22 24
23 namespace webrtc { 25 namespace webrtc {
24 namespace { 26 namespace {
25 27
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
113 int32_t NewMixHistory::SetIsMixed(const bool mixed) { 115 int32_t NewMixHistory::SetIsMixed(const bool mixed) {
114 is_mixed_ = mixed; 116 is_mixed_ = mixed;
115 return 0; 117 return 0;
116 } 118 }
117 119
118 void NewMixHistory::ResetMixedStatus() { 120 void NewMixHistory::ResetMixedStatus() {
119 is_mixed_ = false; 121 is_mixed_ = false;
120 } 122 }
121 123
122 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { 124 std::unique_ptr<AudioMixer> AudioMixer::Create(int id) {
123 AudioMixerImpl* mixer = new AudioMixerImpl(id); 125 return AudioMixerImpl::Create(id);
124 if (!mixer->Init()) {
125 delete mixer;
126 return NULL;
127 }
128 return std::unique_ptr<AudioMixer>(mixer);
129 } 126 }
130 127
131 AudioMixerImpl::AudioMixerImpl(int id) 128 AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter)
132 : id_(id), 129 : crit_(new CriticalSectionWrapper()),
aleloi 2016/09/02 08:51:21 Comments in critical_section_wrapper.h request to
kwiberg-webrtc 2016/09/02 09:00:26 Are you referring to this comment? // Legacy fa
aleloi 2016/09/02 09:27:28 I did a 'git grep' for CriticalSection. The constr
kwiberg-webrtc 2016/09/02 09:44:00 I'm guessing that although no one explicitly calls
133 output_frequency_(kDefaultFrequency), 130 id_(id),
134 sample_size_(0),
135 audio_source_list_(), 131 audio_source_list_(),
136 additional_audio_source_list_(), 132 additional_audio_source_list_(),
137 num_mixed_audio_sources_(0), 133 num_mixed_audio_sources_(0),
138 use_limiter_(true), 134 use_limiter_(true),
139 time_stamp_(0) { 135 time_stamp_(0),
136 limiter_(std::move(limiter)) {
137 SetOutputFrequency(kDefaultFrequency);
140 thread_checker_.DetachFromThread(); 138 thread_checker_.DetachFromThread();
141 } 139 }
142 140
143 AudioMixerImpl::~AudioMixerImpl() {} 141 AudioMixerImpl::~AudioMixerImpl() {}
144 142
145 bool AudioMixerImpl::Init() { 143 std::unique_ptr<AudioMixer> AudioMixerImpl::Create(int id) {
146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
147 if (crit_.get() == NULL)
148 return false;
149
150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
151 if (cb_crit_.get() == NULL)
152 return false;
153
154 Config config; 144 Config config;
155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 145 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
156 limiter_.reset(AudioProcessing::Create(config)); 146 std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config));
157 if (!limiter_.get()) 147 if (!limiter.get())
158 return false; 148 return nullptr;
159 149
160 if (SetOutputFrequency(kDefaultFrequency) == -1) 150 if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
161 return false; 151 limiter->kNoError)
162 152 return nullptr;
163 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) !=
164 limiter_->kNoError)
165 return false;
166 153
167 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the 154 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
168 // divide-by-2 but -7 is used instead to give a bit of headroom since the 155 // divide-by-2 but -7 is used instead to give a bit of headroom since the
169 // AGC is not a hard limiter. 156 // AGC is not a hard limiter.
170 if (limiter_->gain_control()->set_target_level_dbfs(7) != limiter_->kNoError) 157 if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError)
171 return false; 158 return nullptr;
172 159
173 if (limiter_->gain_control()->set_compression_gain_db(0) != 160 if (limiter->gain_control()->set_compression_gain_db(0) != limiter->kNoError)
174 limiter_->kNoError) 161 return nullptr;
175 return false;
176 162
177 if (limiter_->gain_control()->enable_limiter(true) != limiter_->kNoError) 163 if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError)
178 return false; 164 return nullptr;
179 165
180 if (limiter_->gain_control()->Enable(true) != limiter_->kNoError) 166 if (limiter->gain_control()->Enable(true) != limiter->kNoError)
181 return false; 167 return nullptr;
182 168
183 return true; 169 return std::unique_ptr<AudioMixer>(
170 new AudioMixerImpl(id, std::move(limiter)));
184 } 171 }
185 172
186 void AudioMixerImpl::Mix(int sample_rate, 173 void AudioMixerImpl::Mix(int sample_rate,
187 size_t number_of_channels, 174 size_t number_of_channels,
188 AudioFrame* audio_frame_for_mixing) { 175 AudioFrame* audio_frame_for_mixing) {
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 176 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 177 RTC_DCHECK_RUN_ON(&thread_checker_);
178 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
179
180 if (sample_rate != kNbInHz && sample_rate != kWbInHz &&
181 sample_rate != kSwbInHz && sample_rate != kFbInHz) {
182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
183 "Invalid frequency: %d", sample_rate);
184 RTC_NOTREACHED();
185 return;
186 }
187
188 if (OutputFrequency() != sample_rate) {
189 SetOutputFrequency(static_cast<Frequency>(sample_rate));
190 }
191
191 AudioFrameList mixList; 192 AudioFrameList mixList;
192 AudioFrameList additionalFramesList; 193 AudioFrameList additionalFramesList;
193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; 194 int num_mixed_audio_sources;
194 { 195 {
195 CriticalSectionScoped cs(cb_crit_.get()); 196 CriticalSectionScoped cs(crit_.get());
196 Frequency mixing_frequency;
197
198 switch (sample_rate) {
199 case 8000:
200 mixing_frequency = kNbInHz;
201 break;
202 case 16000:
203 mixing_frequency = kWbInHz;
204 break;
205 case 32000:
206 mixing_frequency = kSwbInHz;
207 break;
208 case 48000:
209 mixing_frequency = kFbInHz;
210 break;
211 default:
212 RTC_NOTREACHED();
213 return;
214 }
215
216 if (OutputFrequency() != mixing_frequency) {
217 SetOutputFrequency(mixing_frequency);
218 }
219
220 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); 197 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources);
221 GetAdditionalAudio(&additionalFramesList); 198 GetAdditionalAudio(&additionalFramesList);
199 num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_);
222 } 200 }
223 201
224 for (FrameAndMuteInfo& frame_and_mute : mixList) { 202 for (FrameAndMuteInfo& frame_and_mute : mixList) {
225 RemixFrame(frame_and_mute.frame, number_of_channels); 203 RemixFrame(frame_and_mute.frame, number_of_channels);
226 } 204 }
227 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { 205 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) {
228 RemixFrame(frame_and_mute.frame, number_of_channels); 206 RemixFrame(frame_and_mute.frame, number_of_channels);
229 } 207 }
230 208
231 audio_frame_for_mixing->UpdateFrame( 209 audio_frame_for_mixing->UpdateFrame(
232 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 210 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech,
233 AudioFrame::kVadPassive, number_of_channels); 211 AudioFrame::kVadPassive, number_of_channels);
234 212
235 time_stamp_ += static_cast<uint32_t>(sample_size_); 213 time_stamp_ += static_cast<uint32_t>(sample_size_);
236 214
237 use_limiter_ = num_mixed_audio_sources_ > 1; 215 use_limiter_ = num_mixed_audio_sources > 1;
238 216
239 // We only use the limiter if it supports the output sample rate and 217 // We only use the limiter if it supports the output sample rate and
240 // we're actually mixing multiple streams. 218 // we're actually mixing multiple streams.
241 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); 219 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_);
242 220 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
243 { 221 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
244 CriticalSectionScoped cs(crit_.get()); 222 // Nothing was mixed, set the audio samples to silence.
245 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); 223 audio_frame_for_mixing->samples_per_channel_ = sample_size_;
246 224 audio_frame_for_mixing->Mute();
247 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 225 } else {
248 // Nothing was mixed, set the audio samples to silence. 226 // Only call the limiter if we have something to mix.
249 audio_frame_for_mixing->samples_per_channel_ = sample_size_; 227 LimitMixedAudio(audio_frame_for_mixing);
250 audio_frame_for_mixing->Mute();
251 } else {
252 // Only call the limiter if we have something to mix.
253 LimitMixedAudio(audio_frame_for_mixing);
254 }
255 } 228 }
256 229
257 // Pass the final result to the level indicator. 230 // Pass the final result to the level indicator.
258 audio_level_.ComputeLevel(*audio_frame_for_mixing); 231 audio_level_.ComputeLevel(*audio_frame_for_mixing);
259 232
260 return; 233 return;
261 } 234 }
262 235
263 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { 236 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) {
264 CriticalSectionScoped cs(crit_.get()); 237 RTC_DCHECK_RUN_ON(&thread_checker_);
265
266 output_frequency_ = frequency; 238 output_frequency_ = frequency;
267 sample_size_ = 239 sample_size_ =
268 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); 240 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000);
269 241
270 return 0; 242 return 0;
271 } 243 }
272 244
273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { 245 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
274 CriticalSectionScoped cs(crit_.get()); 246 RTC_DCHECK_RUN_ON(&thread_checker_);
275 return output_frequency_; 247 return output_frequency_;
276 } 248 }
277 249
278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 250 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
279 bool mixable) { 251 bool mixable) {
280 if (!mixable) { 252 if (!mixable) {
281 // Anonymous audio sources are in a separate list. Make sure that the 253 // Anonymous audio sources are in a separate list. Make sure that the
282 // audio source is in the _audioSourceList if it is being mixed. 254 // audio source is in the _audioSourceList if it is being mixed.
283 SetAnonymousMixabilityStatus(audio_source, false); 255 SetAnonymousMixabilityStatus(audio_source, false);
284 } 256 }
285 size_t numMixedAudioSources;
286 { 257 {
287 CriticalSectionScoped cs(cb_crit_.get()); 258 CriticalSectionScoped cs(crit_.get());
288 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); 259 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_);
289 // API must be called with a new state. 260 // API must be called with a new state.
290 if (!(mixable ^ isMixed)) { 261 if (!(mixable ^ isMixed)) {
291 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 262 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
292 "Mixable is aready %s", isMixed ? "ON" : "off"); 263 "Mixable is aready %s", isMixed ? "ON" : "off");
293 return -1; 264 return -1;
294 } 265 }
295 bool success = false; 266 bool success = false;
296 if (mixable) { 267 if (mixable) {
297 success = AddAudioSourceToList(audio_source, &audio_source_list_); 268 success = AddAudioSourceToList(audio_source, &audio_source_list_);
298 } else { 269 } else {
299 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); 270 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
300 } 271 }
301 if (!success) { 272 if (!success) {
302 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 273 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
303 "failed to %s audio_source", mixable ? "add" : "remove"); 274 "failed to %s audio_source", mixable ? "add" : "remove");
304 RTC_NOTREACHED(); 275 RTC_NOTREACHED();
305 return -1; 276 return -1;
306 } 277 }
307 278
308 size_t numMixedNonAnonymous = audio_source_list_.size(); 279 size_t numMixedNonAnonymous = audio_source_list_.size();
309 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { 280 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) {
310 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; 281 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources;
311 } 282 }
312 numMixedAudioSources = 283 num_mixed_audio_sources_ =
313 numMixedNonAnonymous + additional_audio_source_list_.size(); 284 numMixedNonAnonymous + additional_audio_source_list_.size();
314 } 285 }
315 // A MixerAudioSource was added or removed. Make sure the scratch
316 // buffer is updated if necessary.
317 // Note: The scratch buffer may only be updated in Process().
318 CriticalSectionScoped cs(crit_.get());
319 num_mixed_audio_sources_ = numMixedAudioSources;
320 return 0; 286 return 0;
321 } 287 }
322 288
323 bool AudioMixerImpl::MixabilityStatus( 289 bool AudioMixerImpl::MixabilityStatus(
324 const MixerAudioSource& audio_source) const { 290 const MixerAudioSource& audio_source) const {
325 CriticalSectionScoped cs(cb_crit_.get()); 291 CriticalSectionScoped cs(crit_.get());
326 return IsAudioSourceInList(audio_source, audio_source_list_); 292 return IsAudioSourceInList(audio_source, audio_source_list_);
327 } 293 }
328 294
329 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( 295 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
330 MixerAudioSource* audio_source, 296 MixerAudioSource* audio_source,
331 bool anonymous) { 297 bool anonymous) {
332 CriticalSectionScoped cs(cb_crit_.get()); 298 CriticalSectionScoped cs(crit_.get());
333 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { 299 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
334 if (anonymous) { 300 if (anonymous) {
335 return 0; 301 return 0;
336 } 302 }
337 if (!RemoveAudioSourceFromList(audio_source, 303 if (!RemoveAudioSourceFromList(audio_source,
338 &additional_audio_source_list_)) { 304 &additional_audio_source_list_)) {
339 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 305 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
340 "unable to remove audio_source from anonymous list"); 306 "unable to remove audio_source from anonymous list");
341 RTC_NOTREACHED(); 307 RTC_NOTREACHED();
342 return -1; 308 return -1;
(...skipping 13 matching lines...) Expand all
356 // already registered. 322 // already registered.
357 return -1; 323 return -1;
358 } 324 }
359 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) 325 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
360 ? 0 326 ? 0
361 : -1; 327 : -1;
362 } 328 }
363 329
364 bool AudioMixerImpl::AnonymousMixabilityStatus( 330 bool AudioMixerImpl::AnonymousMixabilityStatus(
365 const MixerAudioSource& audio_source) const { 331 const MixerAudioSource& audio_source) const {
366 CriticalSectionScoped cs(cb_crit_.get()); 332 CriticalSectionScoped cs(crit_.get());
367 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 333 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
368 } 334 }
369 335
370 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { 336 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const {
337 RTC_DCHECK_RUN_ON(&thread_checker_);
371 AudioFrameList result; 338 AudioFrameList result;
372 std::vector<SourceFrame> audioSourceMixingDataList; 339 std::vector<SourceFrame> audioSourceMixingDataList;
373 340
374 // Get audio source audio and put it in the struct vector. 341 // Get audio source audio and put it in the struct vector.
375 for (MixerAudioSource* audio_source : audio_source_list_) { 342 for (MixerAudioSource* audio_source : audio_source_list_) {
376 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 343 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
377 id_, static_cast<int>(output_frequency_)); 344 id_, static_cast<int>(output_frequency_));
378 345
379 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 346 auto audio_frame_info = audio_frame_with_info.audio_frame_info;
380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 347 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
419 result.emplace_back(p.audio_frame_, false); 386 result.emplace_back(p.audio_frame_, false);
420 } 387 }
421 388
422 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 389 p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
423 } 390 }
424 return result; 391 return result;
425 } 392 }
426 393
427 void AudioMixerImpl::GetAdditionalAudio( 394 void AudioMixerImpl::GetAdditionalAudio(
428 AudioFrameList* additionalFramesList) const { 395 AudioFrameList* additionalFramesList) const {
396 RTC_DCHECK_RUN_ON(&thread_checker_);
429 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 397 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
430 "GetAdditionalAudio(additionalFramesList)"); 398 "GetAdditionalAudio(additionalFramesList)");
431 // The GetAudioFrameWithMuted() callback may result in the audio source being 399 // The GetAudioFrameWithMuted() callback may result in the audio source being
432 // removed from additionalAudioFramesList_. If that happens it will 400 // removed from additionalAudioFramesList_. If that happens it will
433 // invalidate any iterators. Create a copy of the audio sources list such 401 // invalidate any iterators. Create a copy of the audio sources list such
434 // that the list of participants can be traversed safely. 402 // that the list of participants can be traversed safely.
435 MixerAudioSourceList additionalAudioSourceList; 403 MixerAudioSourceList additionalAudioSourceList;
436 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 404 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
437 additional_audio_source_list_.begin(), 405 additional_audio_source_list_.begin(),
438 additional_audio_source_list_.end()); 406 additional_audio_source_list_.end());
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
526 position++; 494 position++;
527 } 495 }
528 496
529 return 0; 497 return 0;
530 } 498 }
531 499
532 // TODO(andrew): consolidate this function with MixFromList. 500 // TODO(andrew): consolidate this function with MixFromList.
533 int32_t AudioMixerImpl::MixAnonomouslyFromList( 501 int32_t AudioMixerImpl::MixAnonomouslyFromList(
534 AudioFrame* mixedAudio, 502 AudioFrame* mixedAudio,
535 const AudioFrameList& audioFrameList) const { 503 const AudioFrameList& audioFrameList) const {
504 RTC_DCHECK_RUN_ON(&thread_checker_);
536 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 505 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
537 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); 506 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
538 507
539 if (audioFrameList.empty()) 508 if (audioFrameList.empty())
540 return 0; 509 return 0;
541 510
542 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); 511 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
543 iter != audioFrameList.end(); ++iter) { 512 iter != audioFrameList.end(); ++iter) {
544 if (!iter->muted) { 513 if (!iter->muted) {
545 MixFrames(mixedAudio, iter->frame, use_limiter_); 514 MixFrames(mixedAudio, iter->frame, use_limiter_);
546 } 515 }
547 } 516 }
548 return 0; 517 return 0;
549 } 518 }
550 519
551 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { 520 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
521 RTC_DCHECK_RUN_ON(&thread_checker_);
552 if (!use_limiter_) { 522 if (!use_limiter_) {
553 return true; 523 return true;
554 } 524 }
555 525
556 // Smoothly limit the mixed frame. 526 // Smoothly limit the mixed frame.
557 const int error = limiter_->ProcessStream(mixedAudio); 527 const int error = limiter_->ProcessStream(mixedAudio);
558 528
559 // And now we can safely restore the level. This procedure results in 529 // And now we can safely restore the level. This procedure results in
560 // some loss of resolution, deemed acceptable. 530 // some loss of resolution, deemed acceptable.
561 // 531 //
562 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 532 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
563 // and compression gain of 6 dB). However, in the transition frame when this 533 // and compression gain of 6 dB). However, in the transition frame when this
564 // is enabled (moving from one to two audio sources) it has the potential to 534 // is enabled (moving from one to two audio sources) it has the potential to
565 // create discontinuities in the mixed frame. 535 // create discontinuities in the mixed frame.
566 // 536 //
567 // Instead we double the frame (with addition since left-shifting a 537 // Instead we double the frame (with addition since left-shifting a
568 // negative value is undefined). 538 // negative value is undefined).
569 *mixedAudio += *mixedAudio; 539 *mixedAudio += *mixedAudio;
570 540
571 if (error != limiter_->kNoError) { 541 if (error != limiter_->kNoError) {
572 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 542 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
573 "Error from AudioProcessing: %d", error); 543 "Error from AudioProcessing: %d", error);
574 RTC_NOTREACHED(); 544 RTC_NOTREACHED();
575 return false; 545 return false;
576 } 546 }
577 return true; 547 return true;
578 } 548 }
579 549
580 int AudioMixerImpl::GetOutputAudioLevel() { 550 int AudioMixerImpl::GetOutputAudioLevel() {
551 RTC_DCHECK_RUN_ON(&thread_checker_);
581 const int level = audio_level_.Level(); 552 const int level = audio_level_.Level();
582 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 553 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
583 "GetAudioOutputLevel() => level=%d", level); 554 "GetAudioOutputLevel() => level=%d", level);
584 return level; 555 return level;
585 } 556 }
586 557
587 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 558 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
559 RTC_DCHECK_RUN_ON(&thread_checker_);
588 const int level = audio_level_.LevelFullRange(); 560 const int level = audio_level_.LevelFullRange();
589 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 561 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
590 "GetAudioOutputLevelFullRange() => level=%d", level); 562 "GetAudioOutputLevelFullRange() => level=%d", level);
591 return level; 563 return level;
592 } 564 }
593 } // namespace webrtc 565 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698