OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 19 matching lines...) Expand all Loading... | |
30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
31 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" | 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" |
33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" | 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" |
34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" | 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
36 #include "webrtc/modules/audio_processing/processing_component.h" | 36 #include "webrtc/modules/audio_processing/processing_component.h" |
37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
39 #include "webrtc/modules/interface/module_common_types.h" | 39 #include "webrtc/modules/interface/module_common_types.h" |
40 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" | |
41 #include "webrtc/system_wrappers/interface/file_wrapper.h" | 40 #include "webrtc/system_wrappers/interface/file_wrapper.h" |
42 #include "webrtc/system_wrappers/interface/logging.h" | 41 #include "webrtc/system_wrappers/interface/logging.h" |
43 #include "webrtc/system_wrappers/interface/metrics.h" | 42 #include "webrtc/system_wrappers/interface/metrics.h" |
44 | 43 |
45 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 44 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
46 // Files generated at build-time by the protobuf compiler. | 45 // Files generated at build-time by the protobuf compiler. |
47 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD | 46 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD |
48 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" | 47 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" |
49 #else | 48 #else |
50 #include "webrtc/audio_processing/debug.pb.h" | 49 #include "webrtc/audio_processing/debug.pb.h" |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
183 | 182 |
184 AudioProcessingImpl::AudioProcessingImpl(const Config& config, | 183 AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
185 Beamformer<float>* beamformer) | 184 Beamformer<float>* beamformer) |
186 : echo_cancellation_(NULL), | 185 : echo_cancellation_(NULL), |
187 echo_control_mobile_(NULL), | 186 echo_control_mobile_(NULL), |
188 gain_control_(NULL), | 187 gain_control_(NULL), |
189 high_pass_filter_(NULL), | 188 high_pass_filter_(NULL), |
190 level_estimator_(NULL), | 189 level_estimator_(NULL), |
191 noise_suppression_(NULL), | 190 noise_suppression_(NULL), |
192 voice_detection_(NULL), | 191 voice_detection_(NULL), |
193 crit_(CriticalSectionWrapper::CreateCriticalSection()), | |
194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 192 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
195 debug_file_(FileWrapper::Create()), | 193 debug_file_(FileWrapper::Create()), |
196 event_msg_(new audioproc::Event()), | 194 event_msg_(new audioproc::Event()), |
197 #endif | 195 #endif |
198 fwd_proc_format_(kSampleRate16kHz), | 196 fwd_proc_format_(kSampleRate16kHz), |
199 rev_proc_format_(kSampleRate16kHz, 1), | 197 rev_proc_format_(kSampleRate16kHz, 1), |
200 split_rate_(kSampleRate16kHz), | 198 split_rate_(kSampleRate16kHz), |
201 stream_delay_ms_(0), | 199 stream_delay_ms_(0), |
202 delay_offset_ms_(0), | 200 delay_offset_ms_(0), |
203 was_stream_delay_set_(false), | 201 was_stream_delay_set_(false), |
(...skipping 11 matching lines...) Expand all Loading... | |
215 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), | 213 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), |
216 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 214 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
217 transient_suppressor_enabled_(false), | 215 transient_suppressor_enabled_(false), |
218 #else | 216 #else |
219 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), | 217 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), |
220 #endif | 218 #endif |
221 beamformer_enabled_(config.Get<Beamforming>().enabled), | 219 beamformer_enabled_(config.Get<Beamforming>().enabled), |
222 beamformer_(beamformer), | 220 beamformer_(beamformer), |
223 array_geometry_(config.Get<Beamforming>().array_geometry), | 221 array_geometry_(config.Get<Beamforming>().array_geometry), |
224 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { | 222 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { |
225 echo_cancellation_ = | 223 |
226 new EchoCancellationImpl(this, crit_, &render_thread_, &capture_thread_); | 224 echo_cancellation_ = new EchoCancellationImpl( |
225 this, &crit_render_, &crit_capture_, &render_thread_, &capture_thread_); | |
227 component_list_.push_back(echo_cancellation_); | 226 component_list_.push_back(echo_cancellation_); |
228 | 227 |
229 echo_control_mobile_ = | 228 echo_control_mobile_ = new EchoControlMobileImpl( |
230 new EchoControlMobileImpl(this, crit_, &render_thread_, &capture_thread_); | 229 this, &crit_render_, &crit_capture_, &render_thread_, &capture_thread_); |
231 component_list_.push_back(echo_control_mobile_); | 230 component_list_.push_back(echo_control_mobile_); |
232 | 231 |
233 gain_control_ = | 232 gain_control_ = new GainControlImpl(this, &crit_capture_, &crit_capture_, |
234 new GainControlImpl(this, crit_, &render_thread_, &capture_thread_); | 233 &render_thread_, &capture_thread_); |
234 | |
235 component_list_.push_back(gain_control_); | 235 component_list_.push_back(gain_control_); |
236 | 236 |
237 high_pass_filter_ = new HighPassFilterImpl(this, crit_); | 237 high_pass_filter_ = new HighPassFilterImpl(this); |
238 component_list_.push_back(high_pass_filter_); | 238 component_list_.push_back(high_pass_filter_); |
239 | 239 |
240 level_estimator_ = new LevelEstimatorImpl(this, crit_); | 240 level_estimator_ = new LevelEstimatorImpl(this); |
241 component_list_.push_back(level_estimator_); | 241 component_list_.push_back(level_estimator_); |
242 | 242 |
243 noise_suppression_ = new NoiseSuppressionImpl(this, crit_, &capture_thread_); | 243 noise_suppression_ = |
244 new NoiseSuppressionImpl(this, &crit_capture_, &capture_thread_); | |
244 component_list_.push_back(noise_suppression_); | 245 component_list_.push_back(noise_suppression_); |
245 | 246 |
246 voice_detection_ = new VoiceDetectionImpl(this, crit_, &capture_thread_); | 247 voice_detection_ = |
248 new VoiceDetectionImpl(this, &crit_capture_, &capture_thread_); | |
247 component_list_.push_back(voice_detection_); | 249 component_list_.push_back(voice_detection_); |
248 | 250 |
249 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); | 251 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); |
250 | 252 |
251 SetExtraOptions(config); | 253 SetExtraOptions(config); |
252 | 254 |
253 render_thread_.DetachFromThread(); | 255 render_thread_.DetachFromThread(); |
254 capture_thread_.DetachFromThread(); | 256 capture_thread_.DetachFromThread(); |
255 } | 257 } |
256 | 258 |
257 AudioProcessingImpl::~AudioProcessingImpl() { | 259 AudioProcessingImpl::~AudioProcessingImpl() { |
258 { | 260 { |
259 CriticalSectionScoped crit_scoped(crit_); | 261 rtc::CritScope cs_render(&crit_render_); |
262 rtc::CritScope cs_capture(&crit_capture_); | |
260 // Depends on gain_control_ and gain_control_for_new_agc_. | 263 // Depends on gain_control_ and gain_control_for_new_agc_. |
261 agc_manager_.reset(); | 264 agc_manager_.reset(); |
262 // Depends on gain_control_. | 265 // Depends on gain_control_. |
263 gain_control_for_new_agc_.reset(); | 266 gain_control_for_new_agc_.reset(); |
264 while (!component_list_.empty()) { | 267 while (!component_list_.empty()) { |
265 ProcessingComponent* component = component_list_.front(); | 268 ProcessingComponent* component = component_list_.front(); |
266 component->Destroy(); | 269 component->Destroy(); |
267 delete component; | 270 delete component; |
268 component_list_.pop_front(); | 271 component_list_.pop_front(); |
269 } | 272 } |
270 | 273 |
271 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 274 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
272 if (debug_file_->Open()) { | 275 if (debug_file_->Open()) { |
273 debug_file_->CloseFile(); | 276 debug_file_->CloseFile(); |
274 } | 277 } |
275 #endif | 278 #endif |
276 } | 279 } |
277 delete crit_; | |
278 crit_ = NULL; | |
279 } | 280 } |
280 | 281 |
281 int AudioProcessingImpl::Initialize() { | 282 int AudioProcessingImpl::Initialize() { |
282 CriticalSectionScoped crit_scoped(crit_); | 283 // Run in a single-threaded manner during initialization. |
284 rtc::CritScope cs_render(&crit_render_); | |
285 rtc::CritScope cs_capture(&crit_capture_); | |
283 return InitializeLocked(); | 286 return InitializeLocked(); |
284 } | 287 } |
285 | 288 |
286 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, | 289 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
287 int output_sample_rate_hz, | 290 int output_sample_rate_hz, |
288 int reverse_sample_rate_hz, | 291 int reverse_sample_rate_hz, |
289 ChannelLayout input_layout, | 292 ChannelLayout input_layout, |
290 ChannelLayout output_layout, | 293 ChannelLayout output_layout, |
291 ChannelLayout reverse_layout) { | 294 ChannelLayout reverse_layout) { |
295 // Run in a single-threaded manner during initialization. | |
296 rtc::CritScope cs_render(&crit_render_); | |
297 rtc::CritScope cs_capture(&crit_capture_); | |
298 | |
292 const ProcessingConfig processing_config = { | 299 const ProcessingConfig processing_config = { |
293 {{input_sample_rate_hz, | 300 {{input_sample_rate_hz, |
294 ChannelsFromLayout(input_layout), | 301 ChannelsFromLayout(input_layout), |
295 LayoutHasKeyboard(input_layout)}, | 302 LayoutHasKeyboard(input_layout)}, |
296 {output_sample_rate_hz, | 303 {output_sample_rate_hz, |
297 ChannelsFromLayout(output_layout), | 304 ChannelsFromLayout(output_layout), |
298 LayoutHasKeyboard(output_layout)}, | 305 LayoutHasKeyboard(output_layout)}, |
299 {reverse_sample_rate_hz, | 306 {reverse_sample_rate_hz, |
300 ChannelsFromLayout(reverse_layout), | 307 ChannelsFromLayout(reverse_layout), |
301 LayoutHasKeyboard(reverse_layout)}, | 308 LayoutHasKeyboard(reverse_layout)}, |
302 {reverse_sample_rate_hz, | 309 {reverse_sample_rate_hz, |
303 ChannelsFromLayout(reverse_layout), | 310 ChannelsFromLayout(reverse_layout), |
304 LayoutHasKeyboard(reverse_layout)}}}; | 311 LayoutHasKeyboard(reverse_layout)}}}; |
305 | 312 |
306 return Initialize(processing_config); | 313 return Initialize(processing_config); |
307 } | 314 } |
308 | 315 |
309 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 316 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
310 CriticalSectionScoped crit_scoped(crit_); | 317 // Run in a single-threaded manner during initialization. |
318 rtc::CritScope cs_render(&crit_render_); | |
319 rtc::CritScope cs_capture(&crit_capture_); | |
311 return InitializeLocked(processing_config); | 320 return InitializeLocked(processing_config); |
312 } | 321 } |
313 | 322 |
314 // Calls InitializeLocked() if any of the audio parameters have changed from | 323 // Calls InitializeLocked() if any of the audio parameters have changed from |
315 // their current values. | 324 // their current values (needs to be called while holding the crit_render_ |
316 int AudioProcessingImpl::MaybeInitializeLocked( | 325 // lock). |
326 int AudioProcessingImpl::MaybeInitialize( | |
317 const ProcessingConfig& processing_config) { | 327 const ProcessingConfig& processing_config) { |
328 RTC_DCHECK(crit_render_.IsLocked()); | |
kwiberg-webrtc
2015/10/27 13:15:56
Is this check useful? Doesn't it just check that *
peah-webrtc
2015/11/05 11:47:58
I added the check as a threadchecker instead which
| |
318 if (processing_config == shared_state_.api_format_) { | 329 if (processing_config == shared_state_.api_format_) { |
319 return kNoError; | 330 return kNoError; |
320 } | 331 } |
332 | |
333 rtc::CritScope cs_capture(&crit_capture_); | |
321 return InitializeLocked(processing_config); | 334 return InitializeLocked(processing_config); |
322 } | 335 } |
323 | 336 |
324 int AudioProcessingImpl::InitializeLocked() { | 337 int AudioProcessingImpl::InitializeLocked() { |
325 const int fwd_audio_buffer_channels = | 338 const int fwd_audio_buffer_channels = |
326 beamformer_enabled_ | 339 beamformer_enabled_ |
327 ? shared_state_.api_format_.input_stream().num_channels() | 340 ? shared_state_.api_format_.input_stream().num_channels() |
328 : shared_state_.api_format_.output_stream().num_channels(); | 341 : shared_state_.api_format_.output_stream().num_channels(); |
329 const int rev_audio_buffer_out_num_frames = | 342 const int rev_audio_buffer_out_num_frames = |
330 shared_state_.api_format_.reverse_output_stream().num_frames() == 0 | 343 shared_state_.api_format_.reverse_output_stream().num_frames() == 0 |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
452 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 465 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
453 split_rate_ = kSampleRate16kHz; | 466 split_rate_ = kSampleRate16kHz; |
454 } else { | 467 } else { |
455 split_rate_ = fwd_proc_format_.sample_rate_hz(); | 468 split_rate_ = fwd_proc_format_.sample_rate_hz(); |
456 } | 469 } |
457 | 470 |
458 return InitializeLocked(); | 471 return InitializeLocked(); |
459 } | 472 } |
460 | 473 |
461 void AudioProcessingImpl::SetExtraOptions(const Config& config) { | 474 void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
462 CriticalSectionScoped crit_scoped(crit_); | 475 // Run in a single-threaded manner when setting ehe extra options. |
kwiberg-webrtc
2015/10/27 13:15:57
the
peah-webrtc
2015/11/05 11:47:58
Done.
| |
476 rtc::CritScope cs_render(&crit_render_); | |
477 rtc::CritScope cs_capture(&crit_capture_); | |
463 for (auto item : component_list_) { | 478 for (auto item : component_list_) { |
464 item->SetExtraOptions(config); | 479 item->SetExtraOptions(config); |
465 } | 480 } |
466 | 481 |
467 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { | 482 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { |
468 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; | 483 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; |
469 InitializeTransient(); | 484 InitializeTransient(); |
470 } | 485 } |
471 } | 486 } |
472 | 487 |
(...skipping 12 matching lines...) Expand all Loading... | |
485 | 500 |
486 int AudioProcessingImpl::num_input_channels() const { | 501 int AudioProcessingImpl::num_input_channels() const { |
487 return shared_state_.api_format_.input_stream().num_channels(); | 502 return shared_state_.api_format_.input_stream().num_channels(); |
488 } | 503 } |
489 | 504 |
490 int AudioProcessingImpl::num_output_channels() const { | 505 int AudioProcessingImpl::num_output_channels() const { |
491 return shared_state_.api_format_.output_stream().num_channels(); | 506 return shared_state_.api_format_.output_stream().num_channels(); |
492 } | 507 } |
493 | 508 |
494 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 509 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
495 CriticalSectionScoped lock(crit_); | 510 rtc::CritScope cs(&crit_capture_); |
496 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 511 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
497 output_will_be_muted_ = muted; | 512 output_will_be_muted_ = muted; |
498 if (agc_manager_.get()) { | 513 if (agc_manager_.get()) { |
499 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 514 agc_manager_->SetCaptureMuted(output_will_be_muted_); |
500 } | 515 } |
501 } | 516 } |
502 | 517 |
503 | 518 |
504 int AudioProcessingImpl::ProcessStream(const float* const* src, | 519 int AudioProcessingImpl::ProcessStream(const float* const* src, |
505 size_t samples_per_channel, | 520 size_t samples_per_channel, |
506 int input_sample_rate_hz, | 521 int input_sample_rate_hz, |
507 ChannelLayout input_layout, | 522 ChannelLayout input_layout, |
508 int output_sample_rate_hz, | 523 int output_sample_rate_hz, |
509 ChannelLayout output_layout, | 524 ChannelLayout output_layout, |
510 float* const* dest) { | 525 float* const* dest) { |
511 CriticalSectionScoped crit_scoped(crit_); | |
512 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 526 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
513 StreamConfig input_stream = shared_state_.api_format_.input_stream(); | 527 StreamConfig input_stream; |
528 StreamConfig output_stream; | |
529 { | |
530 // Access the shared_state_.api_format_.input_stream beneath the capture | |
531 // lock. | |
532 // The lock must be released as it is later required in the call | |
533 // to ProcessStream(,,,); | |
534 rtc::CritScope cs(&crit_capture_); | |
535 input_stream = shared_state_.api_format_.input_stream(); | |
536 output_stream = shared_state_.api_format_.output_stream(); | |
537 } | |
538 | |
514 input_stream.set_sample_rate_hz(input_sample_rate_hz); | 539 input_stream.set_sample_rate_hz(input_sample_rate_hz); |
515 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); | 540 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
516 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); | 541 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
517 | |
518 StreamConfig output_stream = shared_state_.api_format_.output_stream(); | |
519 output_stream.set_sample_rate_hz(output_sample_rate_hz); | 542 output_stream.set_sample_rate_hz(output_sample_rate_hz); |
520 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); | 543 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
521 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); | 544 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
522 | 545 |
523 if (samples_per_channel != input_stream.num_frames()) { | 546 if (samples_per_channel != input_stream.num_frames()) { |
524 return kBadDataLengthError; | 547 return kBadDataLengthError; |
525 } | 548 } |
526 return ProcessStream(src, input_stream, output_stream, dest); | 549 return ProcessStream(src, input_stream, output_stream, dest); |
527 } | 550 } |
528 | 551 |
529 int AudioProcessingImpl::ProcessStream(const float* const* src, | 552 int AudioProcessingImpl::ProcessStream(const float* const* src, |
530 const StreamConfig& input_config, | 553 const StreamConfig& input_config, |
531 const StreamConfig& output_config, | 554 const StreamConfig& output_config, |
532 float* const* dest) { | 555 float* const* dest) { |
533 CriticalSectionScoped crit_scoped(crit_); | |
534 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 556 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
557 { | |
558 // Acquire the capture lock in order to safely call the function | |
559 // that retrieves the render side data. This function accesses apm | |
560 // getters that need the capture lock held when being called. | |
561 rtc::CritScope cs_capture(&crit_capture_); | |
562 echo_cancellation_->ReadQueuedRenderData(); | |
563 echo_control_mobile_->ReadQueuedRenderData(); | |
564 gain_control_->ReadQueuedRenderData(); | |
565 } | |
535 if (!src || !dest) { | 566 if (!src || !dest) { |
536 return kNullPointerError; | 567 return kNullPointerError; |
537 } | 568 } |
538 | 569 |
539 echo_cancellation_->ReadQueuedRenderData(); | |
540 echo_control_mobile_->ReadQueuedRenderData(); | |
541 gain_control_->ReadQueuedRenderData(); | |
542 | |
543 ProcessingConfig processing_config = shared_state_.api_format_; | 570 ProcessingConfig processing_config = shared_state_.api_format_; |
544 processing_config.input_stream() = input_config; | 571 processing_config.input_stream() = input_config; |
545 processing_config.output_stream() = output_config; | 572 processing_config.output_stream() = output_config; |
546 | 573 |
547 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 574 { |
575 // Do conditional reinitialization. | |
576 rtc::CritScope cs_render(&crit_render_); | |
577 RETURN_ON_ERR(MaybeInitialize(processing_config)); | |
578 } | |
579 rtc::CritScope cs_capture(&crit_capture_); | |
580 | |
548 assert(processing_config.input_stream().num_frames() == | 581 assert(processing_config.input_stream().num_frames() == |
549 shared_state_.api_format_.input_stream().num_frames()); | 582 shared_state_.api_format_.input_stream().num_frames()); |
550 | 583 |
551 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 584 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
552 if (debug_file_->Open()) { | 585 if (debug_file_->Open()) { |
553 RETURN_ON_ERR(WriteConfigMessage(false)); | 586 RETURN_ON_ERR(WriteConfigMessage(false)); |
554 | 587 |
555 event_msg_->set_type(audioproc::Event::STREAM); | 588 event_msg_->set_type(audioproc::Event::STREAM); |
556 audioproc::Stream* msg = event_msg_->mutable_stream(); | 589 audioproc::Stream* msg = event_msg_->mutable_stream(); |
557 const size_t channel_size = | 590 const size_t channel_size = |
(...skipping 17 matching lines...) Expand all Loading... | |
575 i < shared_state_.api_format_.output_stream().num_channels(); ++i) | 608 i < shared_state_.api_format_.output_stream().num_channels(); ++i) |
576 msg->add_output_channel(dest[i], channel_size); | 609 msg->add_output_channel(dest[i], channel_size); |
577 RETURN_ON_ERR(WriteMessageToDebugFile()); | 610 RETURN_ON_ERR(WriteMessageToDebugFile()); |
578 } | 611 } |
579 #endif | 612 #endif |
580 | 613 |
581 return kNoError; | 614 return kNoError; |
582 } | 615 } |
583 | 616 |
584 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 617 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
585 CriticalSectionScoped crit_scoped(crit_); | |
586 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 618 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
587 echo_cancellation_->ReadQueuedRenderData(); | 619 { |
588 echo_control_mobile_->ReadQueuedRenderData(); | 620 // Acquire the capture lock in order to safely call the function |
589 gain_control_->ReadQueuedRenderData(); | 621 // that retrieves the render side data. This function accesses apm |
622 // getters that need the capture lock held when being called. | |
623 // The lock needs to be released as | |
624 // echo_control_mobile_->is_enabled() aquires this lock as well. | |
625 rtc::CritScope cs_capture(&crit_capture_); | |
626 echo_cancellation_->ReadQueuedRenderData(); | |
627 echo_control_mobile_->ReadQueuedRenderData(); | |
628 gain_control_->ReadQueuedRenderData(); | |
629 } | |
590 | 630 |
591 if (!frame) { | 631 if (!frame) { |
592 return kNullPointerError; | 632 return kNullPointerError; |
593 } | 633 } |
594 // Must be a native rate. | 634 // Must be a native rate. |
595 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 635 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
596 frame->sample_rate_hz_ != kSampleRate16kHz && | 636 frame->sample_rate_hz_ != kSampleRate16kHz && |
597 frame->sample_rate_hz_ != kSampleRate32kHz && | 637 frame->sample_rate_hz_ != kSampleRate32kHz && |
598 frame->sample_rate_hz_ != kSampleRate48kHz) { | 638 frame->sample_rate_hz_ != kSampleRate48kHz) { |
599 return kBadSampleRateError; | 639 return kBadSampleRateError; |
600 } | 640 } |
601 | 641 |
602 if (echo_control_mobile_->is_enabled() && | 642 if (echo_control_mobile_->is_enabled() && |
603 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { | 643 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { |
604 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; | 644 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; |
605 return kUnsupportedComponentError; | 645 return kUnsupportedComponentError; |
606 } | 646 } |
607 | 647 |
608 // TODO(ajm): The input and output rates and channels are currently | 648 ProcessingConfig processing_config; |
609 // constrained to be identical in the int16 interface. | 649 { |
610 ProcessingConfig processing_config = shared_state_.api_format_; | 650 // Aquire lock for the access of api_format. |
651 // The lock is released immediately due to the conditional | |
652 // reinitialization. | |
653 rtc::CritScope cs_capture(&crit_capture_); | |
654 // TODO(ajm): The input and output rates and channels are currently | |
655 // constrained to be identical in the int16 interface. | |
656 processing_config = shared_state_.api_format_; | |
657 } | |
611 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 658 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
612 processing_config.input_stream().set_num_channels(frame->num_channels_); | 659 processing_config.input_stream().set_num_channels(frame->num_channels_); |
613 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 660 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
614 processing_config.output_stream().set_num_channels(frame->num_channels_); | 661 processing_config.output_stream().set_num_channels(frame->num_channels_); |
615 | 662 |
616 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 663 { |
664 // Do conditional reinitialization. | |
665 rtc::CritScope cs_render(&crit_render_); | |
666 RETURN_ON_ERR(MaybeInitialize(processing_config)); | |
667 } | |
668 rtc::CritScope cs_capture(&crit_capture_); | |
617 if (frame->samples_per_channel_ != | 669 if (frame->samples_per_channel_ != |
618 shared_state_.api_format_.input_stream().num_frames()) { | 670 shared_state_.api_format_.input_stream().num_frames()) { |
619 return kBadDataLengthError; | 671 return kBadDataLengthError; |
620 } | 672 } |
621 | 673 |
622 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 674 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
623 if (debug_file_->Open()) { | 675 if (debug_file_->Open()) { |
624 event_msg_->set_type(audioproc::Event::STREAM); | 676 event_msg_->set_type(audioproc::Event::STREAM); |
625 audioproc::Stream* msg = event_msg_->mutable_stream(); | 677 audioproc::Stream* msg = event_msg_->mutable_stream(); |
626 const size_t data_size = | 678 const size_t data_size = |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
722 | 774 |
723 was_stream_delay_set_ = false; | 775 was_stream_delay_set_ = false; |
724 return kNoError; | 776 return kNoError; |
725 } | 777 } |
726 | 778 |
727 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, | 779 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
728 size_t samples_per_channel, | 780 size_t samples_per_channel, |
729 int rev_sample_rate_hz, | 781 int rev_sample_rate_hz, |
730 ChannelLayout layout) { | 782 ChannelLayout layout) { |
731 RTC_DCHECK(render_thread_.CalledOnValidThread()); | 783 RTC_DCHECK(render_thread_.CalledOnValidThread()); |
784 rtc::CritScope cs(&crit_render_); | |
732 const StreamConfig reverse_config = { | 785 const StreamConfig reverse_config = { |
733 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), | 786 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
734 }; | 787 }; |
735 if (samples_per_channel != reverse_config.num_frames()) { | 788 if (samples_per_channel != reverse_config.num_frames()) { |
736 return kBadDataLengthError; | 789 return kBadDataLengthError; |
737 } | 790 } |
738 return AnalyzeReverseStream(data, reverse_config, reverse_config); | 791 return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config); |
739 } | 792 } |
740 | 793 |
741 int AudioProcessingImpl::ProcessReverseStream( | 794 int AudioProcessingImpl::ProcessReverseStream( |
742 const float* const* src, | 795 const float* const* src, |
743 const StreamConfig& reverse_input_config, | 796 const StreamConfig& reverse_input_config, |
744 const StreamConfig& reverse_output_config, | 797 const StreamConfig& reverse_output_config, |
745 float* const* dest) { | 798 float* const* dest) { |
746 RTC_DCHECK(render_thread_.CalledOnValidThread()); | 799 RTC_DCHECK(render_thread_.CalledOnValidThread()); |
747 RETURN_ON_ERR( | 800 rtc::CritScope cs(&crit_render_); |
748 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); | 801 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
802 reverse_output_config)); | |
749 if (is_rev_processed()) { | 803 if (is_rev_processed()) { |
750 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), | 804 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), |
751 dest); | 805 dest); |
752 } else if (rev_conversion_needed()) { | 806 } else if (rev_conversion_needed()) { |
753 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, | 807 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, |
754 reverse_output_config.num_samples()); | 808 reverse_output_config.num_samples()); |
755 } else { | 809 } else { |
756 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 810 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
757 reverse_input_config.num_channels(), dest); | 811 reverse_input_config.num_channels(), dest); |
758 } | 812 } |
759 | 813 |
760 return kNoError; | 814 return kNoError; |
761 } | 815 } |
762 | 816 |
763 int AudioProcessingImpl::AnalyzeReverseStream( | 817 int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
764 const float* const* src, | 818 const float* const* src, |
765 const StreamConfig& reverse_input_config, | 819 const StreamConfig& reverse_input_config, |
766 const StreamConfig& reverse_output_config) { | 820 const StreamConfig& reverse_output_config) { |
767 CriticalSectionScoped crit_scoped(crit_); | |
768 if (src == NULL) { | 821 if (src == NULL) { |
769 return kNullPointerError; | 822 return kNullPointerError; |
770 } | 823 } |
771 | 824 |
772 if (reverse_input_config.num_channels() <= 0) { | 825 if (reverse_input_config.num_channels() <= 0) { |
773 return kBadNumberChannelsError; | 826 return kBadNumberChannelsError; |
774 } | 827 } |
775 | 828 |
776 ProcessingConfig processing_config = shared_state_.api_format_; | 829 ProcessingConfig processing_config = shared_state_.api_format_; |
777 processing_config.reverse_input_stream() = reverse_input_config; | 830 processing_config.reverse_input_stream() = reverse_input_config; |
778 processing_config.reverse_output_stream() = reverse_output_config; | 831 processing_config.reverse_output_stream() = reverse_output_config; |
779 | 832 |
780 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 833 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
781 assert(reverse_input_config.num_frames() == | 834 assert(reverse_input_config.num_frames() == |
782 shared_state_.api_format_.reverse_input_stream().num_frames()); | 835 shared_state_.api_format_.reverse_input_stream().num_frames()); |
783 | 836 |
784 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 837 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
785 if (debug_file_->Open()) { | 838 if (debug_file_->Open()) { |
786 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 839 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
787 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 840 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
788 const size_t channel_size = | 841 const size_t channel_size = |
789 sizeof(float) * | 842 sizeof(float) * |
790 shared_state_.api_format_.reverse_input_stream().num_frames(); | 843 shared_state_.api_format_.reverse_input_stream().num_frames(); |
791 for (int i = 0; | 844 for (int i = 0; |
792 i < shared_state_.api_format_.reverse_input_stream().num_channels(); | 845 i < shared_state_.api_format_.reverse_input_stream().num_channels(); |
793 ++i) | 846 ++i) |
794 msg->add_channel(src[i], channel_size); | 847 msg->add_channel(src[i], channel_size); |
795 RETURN_ON_ERR(WriteMessageToDebugFile()); | 848 RETURN_ON_ERR(WriteMessageToDebugFile()); |
796 } | 849 } |
797 #endif | 850 #endif |
798 | 851 |
799 render_audio_->CopyFrom(src, | 852 render_audio_->CopyFrom(src, |
800 shared_state_.api_format_.reverse_input_stream()); | 853 shared_state_.api_format_.reverse_input_stream()); |
801 return ProcessReverseStreamLocked(); | 854 return ProcessReverseStreamLocked(); |
802 } | 855 } |
803 | 856 |
804 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 857 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
805 RTC_DCHECK(render_thread_.CalledOnValidThread()); | 858 RTC_DCHECK(render_thread_.CalledOnValidThread()); |
806 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | 859 RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
860 rtc::CritScope cs(&crit_render_); | |
807 if (is_rev_processed()) { | 861 if (is_rev_processed()) { |
808 render_audio_->InterleaveTo(frame, true); | 862 render_audio_->InterleaveTo(frame, true); |
809 } | 863 } |
810 | 864 |
811 return kNoError; | 865 return kNoError; |
812 } | 866 } |
813 | 867 |
814 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 868 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
815 RTC_DCHECK(render_thread_.CalledOnValidThread()); | 869 RTC_DCHECK(render_thread_.CalledOnValidThread()); |
816 CriticalSectionScoped crit_scoped(crit_); | 870 rtc::CritScope cs(&crit_render_); |
817 if (frame == NULL) { | 871 if (frame == NULL) { |
818 return kNullPointerError; | 872 return kNullPointerError; |
819 } | 873 } |
820 // Must be a native rate. | 874 // Must be a native rate. |
821 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 875 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
822 frame->sample_rate_hz_ != kSampleRate16kHz && | 876 frame->sample_rate_hz_ != kSampleRate16kHz && |
823 frame->sample_rate_hz_ != kSampleRate32kHz && | 877 frame->sample_rate_hz_ != kSampleRate32kHz && |
824 frame->sample_rate_hz_ != kSampleRate48kHz) { | 878 frame->sample_rate_hz_ != kSampleRate48kHz) { |
825 return kBadSampleRateError; | 879 return kBadSampleRateError; |
826 } | 880 } |
(...skipping 10 matching lines...) Expand all Loading... | |
837 ProcessingConfig processing_config = shared_state_.api_format_; | 891 ProcessingConfig processing_config = shared_state_.api_format_; |
838 processing_config.reverse_input_stream().set_sample_rate_hz( | 892 processing_config.reverse_input_stream().set_sample_rate_hz( |
839 frame->sample_rate_hz_); | 893 frame->sample_rate_hz_); |
840 processing_config.reverse_input_stream().set_num_channels( | 894 processing_config.reverse_input_stream().set_num_channels( |
841 frame->num_channels_); | 895 frame->num_channels_); |
842 processing_config.reverse_output_stream().set_sample_rate_hz( | 896 processing_config.reverse_output_stream().set_sample_rate_hz( |
843 frame->sample_rate_hz_); | 897 frame->sample_rate_hz_); |
844 processing_config.reverse_output_stream().set_num_channels( | 898 processing_config.reverse_output_stream().set_num_channels( |
845 frame->num_channels_); | 899 frame->num_channels_); |
846 | 900 |
847 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 901 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
848 if (frame->samples_per_channel_ != | 902 if (frame->samples_per_channel_ != |
849 shared_state_.api_format_.reverse_input_stream().num_frames()) { | 903 shared_state_.api_format_.reverse_input_stream().num_frames()) { |
850 return kBadDataLengthError; | 904 return kBadDataLengthError; |
851 } | 905 } |
852 | 906 |
853 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 907 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
854 if (debug_file_->Open()) { | 908 if (debug_file_->Open()) { |
855 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 909 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
856 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 910 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
857 const size_t data_size = | 911 const size_t data_size = |
858 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 912 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
859 msg->set_data(frame->data_, data_size); | 913 msg->set_data(frame->data_, data_size); |
860 RETURN_ON_ERR(WriteMessageToDebugFile()); | 914 RETURN_ON_ERR(WriteMessageToDebugFile()); |
861 } | 915 } |
862 #endif | 916 #endif |
863 render_audio_->DeinterleaveFrom(frame); | 917 render_audio_->DeinterleaveFrom(frame); |
864 return ProcessReverseStreamLocked(); | 918 return ProcessReverseStreamLocked(); |
865 } | 919 } |
866 | 920 |
867 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 921 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
868 AudioBuffer* ra = render_audio_.get(); // For brevity. | 922 AudioBuffer* ra = render_audio_.get(); // For brevity. |
869 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { | 923 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { |
870 ra->SplitIntoFrequencyBands(); | 924 ra->SplitIntoFrequencyBands(); |
871 } | 925 } |
872 | 926 |
873 if (intelligibility_enabled_) { | 927 if (intelligibility_enabled_) { |
928 // Currently run in single-threaded mode when the intelligibility | |
929 // enhancer is activated. | |
930 // TODO(peah): Fix to be properly multi-threaded. | |
931 rtc::CritScope cs(&crit_capture_); | |
874 intelligibility_enhancer_->ProcessRenderAudio( | 932 intelligibility_enhancer_->ProcessRenderAudio( |
875 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); | 933 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); |
876 } | 934 } |
877 | 935 |
878 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); | 936 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); |
879 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); | 937 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); |
880 if (!use_new_agc_) { | 938 if (!use_new_agc_) { |
881 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); | 939 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); |
882 } | 940 } |
883 | 941 |
884 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && | 942 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && |
885 is_rev_processed()) { | 943 is_rev_processed()) { |
886 ra->MergeFrequencyBands(); | 944 ra->MergeFrequencyBands(); |
887 } | 945 } |
888 | 946 |
889 return kNoError; | 947 return kNoError; |
890 } | 948 } |
891 | 949 |
892 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 950 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
893 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 951 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
952 rtc::CritScope cs(&crit_capture_); | |
894 Error retval = kNoError; | 953 Error retval = kNoError; |
895 was_stream_delay_set_ = true; | 954 was_stream_delay_set_ = true; |
896 delay += delay_offset_ms_; | 955 delay += delay_offset_ms_; |
897 | 956 |
898 if (delay < 0) { | 957 if (delay < 0) { |
899 delay = 0; | 958 delay = 0; |
900 retval = kBadStreamParameterWarning; | 959 retval = kBadStreamParameterWarning; |
901 } | 960 } |
902 | 961 |
903 // TODO(ajm): the max is rather arbitrarily chosen; investigate. | 962 // TODO(ajm): the max is rather arbitrarily chosen; investigate. |
904 if (delay > 500) { | 963 if (delay > 500) { |
905 delay = 500; | 964 delay = 500; |
906 retval = kBadStreamParameterWarning; | 965 retval = kBadStreamParameterWarning; |
907 } | 966 } |
908 | 967 |
909 stream_delay_ms_ = delay; | 968 stream_delay_ms_ = delay; |
910 return retval; | 969 return retval; |
911 } | 970 } |
912 | 971 |
913 int AudioProcessingImpl::stream_delay_ms() const { | 972 int AudioProcessingImpl::stream_delay_ms() const { |
914 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 973 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
974 rtc::CritScope cs(&crit_capture_); | |
915 return stream_delay_ms_; | 975 return stream_delay_ms_; |
916 } | 976 } |
917 | 977 |
918 bool AudioProcessingImpl::was_stream_delay_set() const { | 978 bool AudioProcessingImpl::was_stream_delay_set() const { |
919 return was_stream_delay_set_; | 979 return was_stream_delay_set_; |
920 } | 980 } |
921 | 981 |
922 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { | 982 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
923 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 983 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
984 rtc::CritScope cs(&crit_capture_); | |
924 key_pressed_ = key_pressed; | 985 key_pressed_ = key_pressed; |
925 } | 986 } |
926 | 987 |
927 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 988 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
928 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 989 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
929 CriticalSectionScoped crit_scoped(crit_); | 990 rtc::CritScope cs(&crit_capture_); |
930 delay_offset_ms_ = offset; | 991 delay_offset_ms_ = offset; |
931 } | 992 } |
932 | 993 |
933 int AudioProcessingImpl::delay_offset_ms() const { | 994 int AudioProcessingImpl::delay_offset_ms() const { |
934 RTC_DCHECK(capture_thread_.CalledOnValidThread()); | 995 RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
996 rtc::CritScope cs(&crit_capture_); | |
935 return delay_offset_ms_; | 997 return delay_offset_ms_; |
936 } | 998 } |
937 | 999 |
938 int AudioProcessingImpl::StartDebugRecording( | 1000 int AudioProcessingImpl::StartDebugRecording( |
939 const char filename[AudioProcessing::kMaxFilenameSize]) { | 1001 const char filename[AudioProcessing::kMaxFilenameSize]) { |
940 CriticalSectionScoped crit_scoped(crit_); | 1002 // Run in a single-threaded manner. |
1003 rtc::CritScope cs_render(&crit_render_); | |
1004 rtc::CritScope cs_capture(&crit_capture_); | |
1005 | |
941 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1006 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
942 | 1007 |
943 if (filename == NULL) { | 1008 if (filename == NULL) { |
944 return kNullPointerError; | 1009 return kNullPointerError; |
945 } | 1010 } |
946 | 1011 |
947 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1012 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
948 // Stop any ongoing recording. | 1013 // Stop any ongoing recording. |
949 if (debug_file_->Open()) { | 1014 if (debug_file_->Open()) { |
950 if (debug_file_->CloseFile() == -1) { | 1015 if (debug_file_->CloseFile() == -1) { |
951 return kFileError; | 1016 return kFileError; |
952 } | 1017 } |
953 } | 1018 } |
954 | 1019 |
955 if (debug_file_->OpenFile(filename, false) == -1) { | 1020 if (debug_file_->OpenFile(filename, false) == -1) { |
956 debug_file_->CloseFile(); | 1021 debug_file_->CloseFile(); |
957 return kFileError; | 1022 return kFileError; |
958 } | 1023 } |
959 | 1024 |
960 RETURN_ON_ERR(WriteConfigMessage(true)); | 1025 RETURN_ON_ERR(WriteConfigMessage(true)); |
961 RETURN_ON_ERR(WriteInitMessage()); | 1026 RETURN_ON_ERR(WriteInitMessage()); |
962 return kNoError; | 1027 return kNoError; |
963 #else | 1028 #else |
964 return kUnsupportedFunctionError; | 1029 return kUnsupportedFunctionError; |
965 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1030 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
966 } | 1031 } |
967 | 1032 |
968 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | 1033 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
969 CriticalSectionScoped crit_scoped(crit_); | 1034 // Run in a single-threaded manner. |
1035 rtc::CritScope cs_render(&crit_render_); | |
1036 rtc::CritScope cs_capture(&crit_capture_); | |
970 | 1037 |
971 if (handle == NULL) { | 1038 if (handle == NULL) { |
972 return kNullPointerError; | 1039 return kNullPointerError; |
973 } | 1040 } |
974 | 1041 |
975 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1042 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
976 // Stop any ongoing recording. | 1043 // Stop any ongoing recording. |
977 if (debug_file_->Open()) { | 1044 if (debug_file_->Open()) { |
978 if (debug_file_->CloseFile() == -1) { | 1045 if (debug_file_->CloseFile() == -1) { |
979 return kFileError; | 1046 return kFileError; |
980 } | 1047 } |
981 } | 1048 } |
982 | 1049 |
983 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { | 1050 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { |
984 return kFileError; | 1051 return kFileError; |
985 } | 1052 } |
986 | 1053 |
987 RETURN_ON_ERR(WriteConfigMessage(true)); | 1054 RETURN_ON_ERR(WriteConfigMessage(true)); |
988 RETURN_ON_ERR(WriteInitMessage()); | 1055 RETURN_ON_ERR(WriteInitMessage()); |
989 return kNoError; | 1056 return kNoError; |
990 #else | 1057 #else |
991 return kUnsupportedFunctionError; | 1058 return kUnsupportedFunctionError; |
992 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1059 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
993 } | 1060 } |
994 | 1061 |
995 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1062 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
996 rtc::PlatformFile handle) { | 1063 rtc::PlatformFile handle) { |
1064 // Run in a single-threaded manner. | |
1065 rtc::CritScope cs_render(&crit_render_); | |
1066 rtc::CritScope cs_capture(&crit_capture_); | |
997 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1067 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
998 return StartDebugRecording(stream); | 1068 return StartDebugRecording(stream); |
999 } | 1069 } |
1000 | 1070 |
1001 int AudioProcessingImpl::StopDebugRecording() { | 1071 int AudioProcessingImpl::StopDebugRecording() { |
1002 CriticalSectionScoped crit_scoped(crit_); | 1072 // Run in a single-threaded manner. |
1073 rtc::CritScope cs_render(&crit_render_); | |
1074 rtc::CritScope cs_capture(&crit_capture_); | |
1003 | 1075 |
1004 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1076 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1005 // We just return if recording hasn't started. | 1077 // We just return if recording hasn't started. |
1006 if (debug_file_->Open()) { | 1078 if (debug_file_->Open()) { |
1007 if (debug_file_->CloseFile() == -1) { | 1079 if (debug_file_->CloseFile() == -1) { |
1008 return kFileError; | 1080 return kFileError; |
1009 } | 1081 } |
1010 } | 1082 } |
1011 return kNoError; | 1083 return kNoError; |
1012 #else | 1084 #else |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1190 if (aec_system_delay_jumps_ == -1) { | 1262 if (aec_system_delay_jumps_ == -1) { |
1191 aec_system_delay_jumps_ = 0; // Activate counter if needed. | 1263 aec_system_delay_jumps_ = 0; // Activate counter if needed. |
1192 } | 1264 } |
1193 aec_system_delay_jumps_++; | 1265 aec_system_delay_jumps_++; |
1194 } | 1266 } |
1195 last_aec_system_delay_ms_ = aec_system_delay_ms; | 1267 last_aec_system_delay_ms_ = aec_system_delay_ms; |
1196 } | 1268 } |
1197 } | 1269 } |
1198 | 1270 |
1199 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { | 1271 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
1200 CriticalSectionScoped crit_scoped(crit_); | 1272 // Run in a single-threaded manner. |
1273 rtc::CritScope cs_render(&crit_render_); | |
1274 rtc::CritScope cs_capture(&crit_capture_); | |
1275 | |
1201 if (stream_delay_jumps_ > -1) { | 1276 if (stream_delay_jumps_ > -1) { |
1202 RTC_HISTOGRAM_ENUMERATION( | 1277 RTC_HISTOGRAM_ENUMERATION( |
1203 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", | 1278 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", |
1204 stream_delay_jumps_, 51); | 1279 stream_delay_jumps_, 51); |
1205 } | 1280 } |
1206 stream_delay_jumps_ = -1; | 1281 stream_delay_jumps_ = -1; |
1207 last_stream_delay_ms_ = 0; | 1282 last_stream_delay_ms_ = 0; |
1208 | 1283 |
1209 if (aec_system_delay_jumps_ > -1) { | 1284 if (aec_system_delay_jumps_ > -1) { |
1210 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1285 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1303 | 1378 |
1304 event_msg_->set_type(audioproc::Event::CONFIG); | 1379 event_msg_->set_type(audioproc::Event::CONFIG); |
1305 event_msg_->mutable_config()->CopyFrom(config); | 1380 event_msg_->mutable_config()->CopyFrom(config); |
1306 | 1381 |
1307 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1382 RETURN_ON_ERR(WriteMessageToDebugFile()); |
1308 return kNoError; | 1383 return kNoError; |
1309 } | 1384 } |
1310 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1385 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1311 | 1386 |
1312 } // namespace webrtc | 1387 } // namespace webrtc |
OLD | NEW |