Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(488)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_impl.cc

Issue 1424663003: Lock scheme #8: Introduced the new locking scheme (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@add_threadcheckers_CL
Patch Set: Merged changes from Lock scheme #9: WIP:Adding lock and thread annotations to the APM submodules in… Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 19 matching lines...) Expand all
30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
31 #include "webrtc/modules/audio_processing/gain_control_impl.h" 31 #include "webrtc/modules/audio_processing/gain_control_impl.h"
32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h"
34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h"
35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h"
36 #include "webrtc/modules/audio_processing/processing_component.h" 36 #include "webrtc/modules/audio_processing/processing_component.h"
37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h"
39 #include "webrtc/modules/include/module_common_types.h" 39 #include "webrtc/modules/include/module_common_types.h"
40 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
41 #include "webrtc/system_wrappers/include/file_wrapper.h" 40 #include "webrtc/system_wrappers/include/file_wrapper.h"
42 #include "webrtc/system_wrappers/include/logging.h" 41 #include "webrtc/system_wrappers/include/logging.h"
43 #include "webrtc/system_wrappers/include/metrics.h" 42 #include "webrtc/system_wrappers/include/metrics.h"
44 43
45 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 44 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
46 // Files generated at build-time by the protobuf compiler. 45 // Files generated at build-time by the protobuf compiler.
47 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD 46 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD
48 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" 47 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
49 #else 48 #else
50 #include "webrtc/audio_processing/debug.pb.h" 49 #include "webrtc/audio_processing/debug.pb.h"
(...skipping 17 matching lines...) Expand all
68 case AudioProcessing::kStereo: 67 case AudioProcessing::kStereo:
69 return false; 68 return false;
70 case AudioProcessing::kMonoAndKeyboard: 69 case AudioProcessing::kMonoAndKeyboard:
71 case AudioProcessing::kStereoAndKeyboard: 70 case AudioProcessing::kStereoAndKeyboard:
72 return true; 71 return true;
73 } 72 }
74 73
75 assert(false); 74 assert(false);
76 return false; 75 return false;
77 } 76 }
77 } // namespace
78 78
79 } // namespace 79 struct ApmPublicSubmodules {
80 ApmPublicSubmodules()
81 : echo_cancellation(nullptr),
82 echo_control_mobile(nullptr),
83 gain_control(nullptr),
84 high_pass_filter(nullptr),
85 level_estimator(nullptr),
86 noise_suppression(nullptr),
87 voice_detection(nullptr) {}
88 // Accessed externally of APM without any lock acquired.
89 EchoCancellationImpl* echo_cancellation;
90 EchoControlMobileImpl* echo_control_mobile;
91 GainControlImpl* gain_control;
92 HighPassFilterImpl* high_pass_filter;
93 LevelEstimatorImpl* level_estimator;
94 NoiseSuppressionImpl* noise_suppression;
95 VoiceDetectionImpl* voice_detection;
96 rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc;
97
98 // Accessed internally from both render and capture.
99 rtc::scoped_ptr<TransientSuppressor> transient_suppressor;
100 rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer;
101 };
102
103 struct ApmPrivateSubmodules {
104 explicit ApmPrivateSubmodules(Beamformer<float>* beamformer)
105 : beamformer(beamformer) {}
106 // Accessed internally from capture or during initialization
107 std::list<ProcessingComponent*> component_list;
108 rtc::scoped_ptr<Beamformer<float>> beamformer;
109 rtc::scoped_ptr<AgcManagerDirect> agc_manager;
110 };
80 111
81 // Throughout webrtc, it's assumed that success is represented by zero. 112 // Throughout webrtc, it's assumed that success is represented by zero.
82 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); 113 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
83 114
84 // This class has two main functionalities: 115 // This class has two main functionalities:
85 // 116 //
86 // 1) It is returned instead of the real GainControl after the new AGC has been 117 // 1) It is returned instead of the real GainControl after the new AGC has been
87 // enabled in order to prevent an outside user from overriding compression 118 // enabled in order to prevent an outside user from overriding compression
88 // settings. It doesn't do anything in its implementation, except for 119 // settings. It doesn't do anything in its implementation, except for
89 // delegating the const methods and Enable calls to the real GainControl, so 120 // delegating the const methods and Enable calls to the real GainControl, so
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
165 196
166 AudioProcessing* AudioProcessing::Create(const Config& config) { 197 AudioProcessing* AudioProcessing::Create(const Config& config) {
167 return Create(config, nullptr); 198 return Create(config, nullptr);
168 } 199 }
169 200
170 AudioProcessing* AudioProcessing::Create(const Config& config, 201 AudioProcessing* AudioProcessing::Create(const Config& config,
171 Beamformer<float>* beamformer) { 202 Beamformer<float>* beamformer) {
172 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer); 203 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
173 if (apm->Initialize() != kNoError) { 204 if (apm->Initialize() != kNoError) {
174 delete apm; 205 delete apm;
175 apm = NULL; 206 apm = nullptr;
176 } 207 }
177 208
178 return apm; 209 return apm;
179 } 210 }
180 211
181 AudioProcessingImpl::AudioProcessingImpl(const Config& config) 212 AudioProcessingImpl::AudioProcessingImpl(const Config& config)
182 : AudioProcessingImpl(config, nullptr) {} 213 : AudioProcessingImpl(config, nullptr) {}
183 214
184 AudioProcessingImpl::AudioProcessingImpl(const Config& config, 215 AudioProcessingImpl::AudioProcessingImpl(const Config& config,
185 Beamformer<float>* beamformer) 216 Beamformer<float>* beamformer)
186 : echo_cancellation_(NULL), 217 : public_submodules_(new ApmPublicSubmodules()),
187 echo_control_mobile_(NULL), 218 private_submodules_(new ApmPrivateSubmodules(beamformer)),
188 gain_control_(NULL), 219 constants_(config.Get<ExperimentalAgc>().startup_min_volume,
189 high_pass_filter_(NULL), 220 config.Get<Beamforming>().array_geometry,
190 level_estimator_(NULL), 221 config.Get<Beamforming>().target_direction,
191 noise_suppression_(NULL), 222 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
192 voice_detection_(NULL), 223 false,
193 crit_(CriticalSectionWrapper::CreateCriticalSection()), 224 #else
194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 225 config.Get<ExperimentalAgc>().enabled,
195 debug_file_(FileWrapper::Create()),
196 event_msg_(new audioproc::Event()),
197 #endif 226 #endif
198 fwd_proc_format_(kSampleRate16kHz), 227 config.Get<Intelligibility>().enabled,
199 rev_proc_format_(kSampleRate16kHz, 1), 228 config.Get<Beamforming>().enabled),
200 split_rate_(kSampleRate16kHz), 229
201 stream_delay_ms_(0),
202 delay_offset_ms_(0),
203 was_stream_delay_set_(false),
204 last_stream_delay_ms_(0),
205 last_aec_system_delay_ms_(0),
206 stream_delay_jumps_(-1),
207 aec_system_delay_jumps_(-1),
208 output_will_be_muted_(false),
209 key_pressed_(false),
210 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) 230 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
211 use_new_agc_(false), 231 capture_(false)
212 #else 232 #else
213 use_new_agc_(config.Get<ExperimentalAgc>().enabled), 233 capture_(config.Get<ExperimentalNs>().enabled)
214 #endif 234 #endif
215 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), 235 {
216 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) 236 {
217 transient_suppressor_enabled_(false), 237 rtc::CritScope cs_render(&crit_render_);
218 #else 238 rtc::CritScope cs_capture(&crit_capture_);
219 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
220 #endif
221 beamformer_enabled_(config.Get<Beamforming>().enabled),
222 beamformer_(beamformer),
223 array_geometry_(config.Get<Beamforming>().array_geometry),
224 target_direction_(config.Get<Beamforming>().target_direction),
225 intelligibility_enabled_(config.Get<Intelligibility>().enabled) {
226 echo_cancellation_ = new EchoCancellationImpl(this, crit_);
227 component_list_.push_back(echo_cancellation_);
228 239
229 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); 240 public_submodules_->echo_cancellation = new EchoCancellationImpl(
230 component_list_.push_back(echo_control_mobile_); 241 this, &crit_render_, &crit_capture_);
242 public_submodules_->echo_control_mobile = new EchoControlMobileImpl(
243 this, &crit_render_, &crit_capture_);
244 public_submodules_->gain_control =
245 new GainControlImpl(this, &crit_capture_, &crit_capture_);
246 public_submodules_->high_pass_filter =
247 new HighPassFilterImpl(this, &crit_capture_);
248 public_submodules_->level_estimator =
249 new LevelEstimatorImpl(this, &crit_capture_);
250 public_submodules_->noise_suppression =
251 new NoiseSuppressionImpl(this, &crit_capture_);
252 public_submodules_->voice_detection =
253 new VoiceDetectionImpl(this, &crit_capture_);
254 public_submodules_->gain_control_for_new_agc.reset(
255 new GainControlForNewAgc(public_submodules_->gain_control));
231 256
232 gain_control_ = new GainControlImpl(this, crit_); 257 private_submodules_->component_list.push_back(
233 component_list_.push_back(gain_control_); 258 public_submodules_->echo_cancellation);
234 259 private_submodules_->component_list.push_back(
235 high_pass_filter_ = new HighPassFilterImpl(this, crit_); 260 public_submodules_->echo_control_mobile);
236 component_list_.push_back(high_pass_filter_); 261 private_submodules_->component_list.push_back(
237 262 public_submodules_->gain_control);
238 level_estimator_ = new LevelEstimatorImpl(this, crit_); 263 private_submodules_->component_list.push_back(
239 component_list_.push_back(level_estimator_); 264 public_submodules_->high_pass_filter);
240 265 private_submodules_->component_list.push_back(
241 noise_suppression_ = new NoiseSuppressionImpl(this, crit_); 266 public_submodules_->level_estimator);
242 component_list_.push_back(noise_suppression_); 267 private_submodules_->component_list.push_back(
243 268 public_submodules_->noise_suppression);
244 voice_detection_ = new VoiceDetectionImpl(this, crit_); 269 private_submodules_->component_list.push_back(
245 component_list_.push_back(voice_detection_); 270 public_submodules_->voice_detection);
246 271 }
247 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
248 272
249 SetExtraOptions(config); 273 SetExtraOptions(config);
250 } 274 }
251 275
252 AudioProcessingImpl::~AudioProcessingImpl() { 276 AudioProcessingImpl::~AudioProcessingImpl() {
the sun 2015/11/27 12:25:52 CS not needed here anymore?
peah-webrtc 2015/11/27 12:57:51 The critical section was there in the old locking
the sun 2015/11/27 13:32:28 Yeah, sorry.
peah-webrtc 2015/11/27 13:50:22 Np :-)
253 { 277 // Depends on gain_control_ and
254 CriticalSectionScoped crit_scoped(crit_); 278 // public_submodules_->gain_control_for_new_agc.
255 // Depends on gain_control_ and gain_control_for_new_agc_. 279 private_submodules_->agc_manager.reset();
256 agc_manager_.reset(); 280 // Depends on gain_control_.
257 // Depends on gain_control_. 281 public_submodules_->gain_control_for_new_agc.reset();
258 gain_control_for_new_agc_.reset(); 282 while (!private_submodules_->component_list.empty()) {
259 while (!component_list_.empty()) { 283 ProcessingComponent* component =
260 ProcessingComponent* component = component_list_.front(); 284 private_submodules_->component_list.front();
261 component->Destroy(); 285 component->Destroy();
262 delete component; 286 delete component;
263 component_list_.pop_front(); 287 private_submodules_->component_list.pop_front();
264 } 288 }
265 289
266 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 290 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
267 if (debug_file_->Open()) { 291 if (debug_dump_.debug_file->Open()) {
268 debug_file_->CloseFile(); 292 debug_dump_.debug_file->CloseFile();
269 } 293 }
270 #endif 294 #endif
271 }
272 delete crit_;
273 crit_ = NULL;
274 } 295 }
275 296
276 int AudioProcessingImpl::Initialize() { 297 int AudioProcessingImpl::Initialize() {
277 CriticalSectionScoped crit_scoped(crit_); 298 // Run in a single-threaded manner during initialization.
299 rtc::CritScope cs_render(&crit_render_);
300 rtc::CritScope cs_capture(&crit_capture_);
278 return InitializeLocked(); 301 return InitializeLocked();
279 } 302 }
280 303
281 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, 304 int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
282 int output_sample_rate_hz, 305 int output_sample_rate_hz,
283 int reverse_sample_rate_hz, 306 int reverse_sample_rate_hz,
284 ChannelLayout input_layout, 307 ChannelLayout input_layout,
285 ChannelLayout output_layout, 308 ChannelLayout output_layout,
286 ChannelLayout reverse_layout) { 309 ChannelLayout reverse_layout) {
287 const ProcessingConfig processing_config = { 310 const ProcessingConfig processing_config = {
288 {{input_sample_rate_hz, 311 {{input_sample_rate_hz,
289 ChannelsFromLayout(input_layout), 312 ChannelsFromLayout(input_layout),
290 LayoutHasKeyboard(input_layout)}, 313 LayoutHasKeyboard(input_layout)},
291 {output_sample_rate_hz, 314 {output_sample_rate_hz,
292 ChannelsFromLayout(output_layout), 315 ChannelsFromLayout(output_layout),
293 LayoutHasKeyboard(output_layout)}, 316 LayoutHasKeyboard(output_layout)},
294 {reverse_sample_rate_hz, 317 {reverse_sample_rate_hz,
295 ChannelsFromLayout(reverse_layout), 318 ChannelsFromLayout(reverse_layout),
296 LayoutHasKeyboard(reverse_layout)}, 319 LayoutHasKeyboard(reverse_layout)},
297 {reverse_sample_rate_hz, 320 {reverse_sample_rate_hz,
298 ChannelsFromLayout(reverse_layout), 321 ChannelsFromLayout(reverse_layout),
299 LayoutHasKeyboard(reverse_layout)}}}; 322 LayoutHasKeyboard(reverse_layout)}}};
300 323
301 return Initialize(processing_config); 324 return Initialize(processing_config);
302 } 325 }
303 326
304 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { 327 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
305 CriticalSectionScoped crit_scoped(crit_); 328 // Run in a single-threaded manner during initialization.
329 rtc::CritScope cs_render(&crit_render_);
330 rtc::CritScope cs_capture(&crit_capture_);
306 return InitializeLocked(processing_config); 331 return InitializeLocked(processing_config);
307 } 332 }
308 333
309 int AudioProcessingImpl::MaybeInitializeLockedRender( 334 int AudioProcessingImpl::MaybeInitializeRender(
310 const ProcessingConfig& processing_config) { 335 const ProcessingConfig& processing_config) {
311 return MaybeInitializeLocked(processing_config); 336 return MaybeInitialize(processing_config);
312 } 337 }
313 338
314 int AudioProcessingImpl::MaybeInitializeLockedCapture( 339 int AudioProcessingImpl::MaybeInitializeCapture(
315 const ProcessingConfig& processing_config) { 340 const ProcessingConfig& processing_config) {
316 return MaybeInitializeLocked(processing_config); 341 return MaybeInitialize(processing_config);
317 } 342 }
318 343
319 // Calls InitializeLocked() if any of the audio parameters have changed from 344 // Calls InitializeLocked() if any of the audio parameters have changed from
320 // their current values. 345 // their current values (needs to be called while holding the crit_render_lock).
321 int AudioProcessingImpl::MaybeInitializeLocked( 346 int AudioProcessingImpl::MaybeInitialize(
322 const ProcessingConfig& processing_config) { 347 const ProcessingConfig& processing_config) {
323 if (processing_config == shared_state_.api_format_) { 348 // Called from both threads. Thread check is therefore not possible.
349 if (processing_config == formats_.api_format) {
324 return kNoError; 350 return kNoError;
325 } 351 }
352
353 rtc::CritScope cs_capture(&crit_capture_);
326 return InitializeLocked(processing_config); 354 return InitializeLocked(processing_config);
327 } 355 }
328 356
329 int AudioProcessingImpl::InitializeLocked() { 357 int AudioProcessingImpl::InitializeLocked() {
330 const int fwd_audio_buffer_channels = 358 const int fwd_audio_buffer_channels =
331 beamformer_enabled_ 359 constants_.beamformer_enabled
332 ? shared_state_.api_format_.input_stream().num_channels() 360 ? formats_.api_format.input_stream().num_channels()
333 : shared_state_.api_format_.output_stream().num_channels(); 361 : formats_.api_format.output_stream().num_channels();
334 const int rev_audio_buffer_out_num_frames = 362 const int rev_audio_buffer_out_num_frames =
335 shared_state_.api_format_.reverse_output_stream().num_frames() == 0 363 formats_.api_format.reverse_output_stream().num_frames() == 0
336 ? rev_proc_format_.num_frames() 364 ? formats_.rev_proc_format.num_frames()
337 : shared_state_.api_format_.reverse_output_stream().num_frames(); 365 : formats_.api_format.reverse_output_stream().num_frames();
338 if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) { 366 if (formats_.api_format.reverse_input_stream().num_channels() > 0) {
339 render_audio_.reset(new AudioBuffer( 367 render_.render_audio.reset(new AudioBuffer(
340 shared_state_.api_format_.reverse_input_stream().num_frames(), 368 formats_.api_format.reverse_input_stream().num_frames(),
341 shared_state_.api_format_.reverse_input_stream().num_channels(), 369 formats_.api_format.reverse_input_stream().num_channels(),
342 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), 370 formats_.rev_proc_format.num_frames(),
371 formats_.rev_proc_format.num_channels(),
343 rev_audio_buffer_out_num_frames)); 372 rev_audio_buffer_out_num_frames));
344 if (rev_conversion_needed()) { 373 if (rev_conversion_needed()) {
345 render_converter_ = AudioConverter::Create( 374 render_.render_converter = AudioConverter::Create(
346 shared_state_.api_format_.reverse_input_stream().num_channels(), 375 formats_.api_format.reverse_input_stream().num_channels(),
347 shared_state_.api_format_.reverse_input_stream().num_frames(), 376 formats_.api_format.reverse_input_stream().num_frames(),
348 shared_state_.api_format_.reverse_output_stream().num_channels(), 377 formats_.api_format.reverse_output_stream().num_channels(),
349 shared_state_.api_format_.reverse_output_stream().num_frames()); 378 formats_.api_format.reverse_output_stream().num_frames());
350 } else { 379 } else {
351 render_converter_.reset(nullptr); 380 render_.render_converter.reset(nullptr);
352 } 381 }
353 } else { 382 } else {
354 render_audio_.reset(nullptr); 383 render_.render_audio.reset(nullptr);
355 render_converter_.reset(nullptr); 384 render_.render_converter.reset(nullptr);
356 } 385 }
357 capture_audio_.reset( 386 capture_.capture_audio.reset(
358 new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(), 387 new AudioBuffer(formats_.api_format.input_stream().num_frames(),
359 shared_state_.api_format_.input_stream().num_channels(), 388 formats_.api_format.input_stream().num_channels(),
360 fwd_proc_format_.num_frames(), fwd_audio_buffer_channels, 389 capture_nonlocked_.fwd_proc_format.num_frames(),
361 shared_state_.api_format_.output_stream().num_frames())); 390 fwd_audio_buffer_channels,
391 formats_.api_format.output_stream().num_frames()));
362 392
363 // Initialize all components. 393 // Initialize all components.
364 for (auto item : component_list_) { 394 for (auto item : private_submodules_->component_list) {
365 int err = item->Initialize(); 395 int err = item->Initialize();
366 if (err != kNoError) { 396 if (err != kNoError) {
367 return err; 397 return err;
368 } 398 }
369 } 399 }
370 400
371 InitializeExperimentalAgc(); 401 InitializeExperimentalAgc();
372 402
373 InitializeTransient(); 403 InitializeTransient();
374 404
375 InitializeBeamformer(); 405 InitializeBeamformer();
376 406
377 InitializeIntelligibility(); 407 InitializeIntelligibility();
378 408
379 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 409 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
380 if (debug_file_->Open()) { 410 if (debug_dump_.debug_file->Open()) {
381 int err = WriteInitMessage(); 411 int err = WriteInitMessage();
382 if (err != kNoError) { 412 if (err != kNoError) {
383 return err; 413 return err;
384 } 414 }
385 } 415 }
386 #endif 416 #endif
387 417
388 return kNoError; 418 return kNoError;
389 } 419 }
390 420
391 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { 421 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
392 // TODO(peah): Refactor to be allowed to verify using thread annotations.
393 for (const auto& stream : config.streams) { 422 for (const auto& stream : config.streams) {
394 if (stream.num_channels() < 0) { 423 if (stream.num_channels() < 0) {
395 return kBadNumberChannelsError; 424 return kBadNumberChannelsError;
396 } 425 }
397 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { 426 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
398 return kBadSampleRateError; 427 return kBadSampleRateError;
399 } 428 }
400 } 429 }
401 430
402 const int num_in_channels = config.input_stream().num_channels(); 431 const int num_in_channels = config.input_stream().num_channels();
403 const int num_out_channels = config.output_stream().num_channels(); 432 const int num_out_channels = config.output_stream().num_channels();
404 433
405 // Need at least one input channel. 434 // Need at least one input channel.
406 // Need either one output channel or as many outputs as there are inputs. 435 // Need either one output channel or as many outputs as there are inputs.
407 if (num_in_channels == 0 || 436 if (num_in_channels == 0 ||
408 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { 437 !(num_out_channels == 1 || num_out_channels == num_in_channels)) {
409 return kBadNumberChannelsError; 438 return kBadNumberChannelsError;
410 } 439 }
411 440
412 if (beamformer_enabled_ && 441 if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) !=
413 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || 442 constants_.array_geometry.size() ||
the sun 2015/11/27 12:25:52 did git cl format do this? I think the old version
peah-webrtc 2015/11/27 12:57:51 Yes, I retried, and got the same result.
the sun 2015/11/27 13:32:28 Acknowledged.
414 num_out_channels > 1)) { 443 num_out_channels > 1)) {
415 return kBadNumberChannelsError; 444 return kBadNumberChannelsError;
416 } 445 }
417 446
418 shared_state_.api_format_ = config; 447 formats_.api_format = config;
419 448
420 // We process at the closest native rate >= min(input rate, output rate)... 449 // We process at the closest native rate >= min(input rate, output rate)...
421 const int min_proc_rate = 450 const int min_proc_rate =
422 std::min(shared_state_.api_format_.input_stream().sample_rate_hz(), 451 std::min(formats_.api_format.input_stream().sample_rate_hz(),
423 shared_state_.api_format_.output_stream().sample_rate_hz()); 452 formats_.api_format.output_stream().sample_rate_hz());
424 int fwd_proc_rate; 453 int fwd_proc_rate;
425 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { 454 for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
426 fwd_proc_rate = kNativeSampleRatesHz[i]; 455 fwd_proc_rate = kNativeSampleRatesHz[i];
427 if (fwd_proc_rate >= min_proc_rate) { 456 if (fwd_proc_rate >= min_proc_rate) {
428 break; 457 break;
429 } 458 }
430 } 459 }
431 // ...with one exception. 460 // ...with one exception.
432 if (echo_control_mobile_->is_enabled() && 461 if (public_submodules_->echo_control_mobile->is_enabled() &&
433 min_proc_rate > kMaxAECMSampleRateHz) { 462 min_proc_rate > kMaxAECMSampleRateHz) {
434 fwd_proc_rate = kMaxAECMSampleRateHz; 463 fwd_proc_rate = kMaxAECMSampleRateHz;
435 } 464 }
436 465
437 fwd_proc_format_ = StreamConfig(fwd_proc_rate); 466 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate);
438 467
439 // We normally process the reverse stream at 16 kHz. Unless... 468 // We normally process the reverse stream at 16 kHz. Unless...
440 int rev_proc_rate = kSampleRate16kHz; 469 int rev_proc_rate = kSampleRate16kHz;
441 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { 470 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) {
442 // ...the forward stream is at 8 kHz. 471 // ...the forward stream is at 8 kHz.
443 rev_proc_rate = kSampleRate8kHz; 472 rev_proc_rate = kSampleRate8kHz;
444 } else { 473 } else {
445 if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() == 474 if (formats_.api_format.reverse_input_stream().sample_rate_hz() ==
446 kSampleRate32kHz) { 475 kSampleRate32kHz) {
447 // ...or the input is at 32 kHz, in which case we use the splitting 476 // ...or the input is at 32 kHz, in which case we use the splitting
448 // filter rather than the resampler. 477 // filter rather than the resampler.
449 rev_proc_rate = kSampleRate32kHz; 478 rev_proc_rate = kSampleRate32kHz;
450 } 479 }
451 } 480 }
452 481
453 // Always downmix the reverse stream to mono for analysis. This has been 482 // Always downmix the reverse stream to mono for analysis. This has been
454 // demonstrated to work well for AEC in most practical scenarios. 483 // demonstrated to work well for AEC in most practical scenarios.
455 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); 484 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1);
456 485
457 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 486 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz ||
458 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { 487 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) {
459 split_rate_ = kSampleRate16kHz; 488 capture_nonlocked_.split_rate = kSampleRate16kHz;
460 } else { 489 } else {
461 split_rate_ = fwd_proc_format_.sample_rate_hz(); 490 capture_nonlocked_.split_rate =
491 capture_nonlocked_.fwd_proc_format.sample_rate_hz();
462 } 492 }
463 493
464 return InitializeLocked(); 494 return InitializeLocked();
465 } 495 }
466 496
467 void AudioProcessingImpl::SetExtraOptions(const Config& config) { 497 void AudioProcessingImpl::SetExtraOptions(const Config& config) {
468 CriticalSectionScoped crit_scoped(crit_); 498 // Run in a single-threaded manner when setting the extra options.
469 for (auto item : component_list_) { 499 rtc::CritScope cs_render(&crit_render_);
500 rtc::CritScope cs_capture(&crit_capture_);
501 for (auto item : private_submodules_->component_list) {
470 item->SetExtraOptions(config); 502 item->SetExtraOptions(config);
471 } 503 }
472 504
473 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { 505 if (capture_.transient_suppressor_enabled !=
474 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; 506 config.Get<ExperimentalNs>().enabled) {
507 capture_.transient_suppressor_enabled =
508 config.Get<ExperimentalNs>().enabled;
475 InitializeTransient(); 509 InitializeTransient();
476 } 510 }
477 } 511 }
478 512
479
480 int AudioProcessingImpl::proc_sample_rate_hz() const { 513 int AudioProcessingImpl::proc_sample_rate_hz() const {
481 // TODO(peah): Refactor to be allowed to verify using thread annotations. 514 // Used as callback from submodules, hence locking is not allowed.
482 return fwd_proc_format_.sample_rate_hz(); 515 return capture_nonlocked_.fwd_proc_format.sample_rate_hz();
483 } 516 }
484 517
485 int AudioProcessingImpl::proc_split_sample_rate_hz() const { 518 int AudioProcessingImpl::proc_split_sample_rate_hz() const {
486 // TODO(peah): Refactor to be allowed to verify using thread annotations. 519 // Used as callback from submodules, hence locking is not allowed.
487 520 return capture_nonlocked_.split_rate;
488 return split_rate_;
489 } 521 }
490 522
491 int AudioProcessingImpl::num_reverse_channels() const { 523 int AudioProcessingImpl::num_reverse_channels() const {
492 // TODO(peah): Refactor to be allowed to verify using thread annotations. 524 // Used as callback from submodules, hence locking is not allowed.
493 return rev_proc_format_.num_channels(); 525 return formats_.rev_proc_format.num_channels();
494 } 526 }
495 527
496 int AudioProcessingImpl::num_input_channels() const { 528 int AudioProcessingImpl::num_input_channels() const {
497 return shared_state_.api_format_.input_stream().num_channels(); 529 // Used as callback from submodules, hence locking is not allowed.
530 return formats_.api_format.input_stream().num_channels();
498 } 531 }
499 532
500 int AudioProcessingImpl::num_output_channels() const { 533 int AudioProcessingImpl::num_output_channels() const {
501 // TODO(peah): Refactor to be allowed to verify using thread annotations. 534 // Used as callback from submodules, hence locking is not allowed.
502 return shared_state_.api_format_.output_stream().num_channels(); 535 return formats_.api_format.output_stream().num_channels();
503 } 536 }
504 537
505 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { 538 void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
506 CriticalSectionScoped lock(crit_); 539 rtc::CritScope cs(&crit_capture_);
507 output_will_be_muted_ = muted; 540 capture_.output_will_be_muted = muted;
508 if (agc_manager_.get()) { 541 if (private_submodules_->agc_manager.get()) {
509 agc_manager_->SetCaptureMuted(output_will_be_muted_); 542 private_submodules_->agc_manager->SetCaptureMuted(
543 capture_.output_will_be_muted);
510 } 544 }
511 } 545 }
512 546
513 547
514 int AudioProcessingImpl::ProcessStream(const float* const* src, 548 int AudioProcessingImpl::ProcessStream(const float* const* src,
515 size_t samples_per_channel, 549 size_t samples_per_channel,
516 int input_sample_rate_hz, 550 int input_sample_rate_hz,
517 ChannelLayout input_layout, 551 ChannelLayout input_layout,
518 int output_sample_rate_hz, 552 int output_sample_rate_hz,
519 ChannelLayout output_layout, 553 ChannelLayout output_layout,
520 float* const* dest) { 554 float* const* dest) {
521 CriticalSectionScoped crit_scoped(crit_); 555 StreamConfig input_stream;
522 StreamConfig input_stream = shared_state_.api_format_.input_stream(); 556 StreamConfig output_stream;
557 {
558 // Access the formats_.api_format.input_stream beneath the capture lock.
559 // The lock must be released as it is later required in the call
560 // to ProcessStream(,,,);
561 rtc::CritScope cs(&crit_capture_);
562 input_stream = formats_.api_format.input_stream();
563 output_stream = formats_.api_format.output_stream();
564 }
565
523 input_stream.set_sample_rate_hz(input_sample_rate_hz); 566 input_stream.set_sample_rate_hz(input_sample_rate_hz);
524 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); 567 input_stream.set_num_channels(ChannelsFromLayout(input_layout));
525 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); 568 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
526
527 StreamConfig output_stream = shared_state_.api_format_.output_stream();
528 output_stream.set_sample_rate_hz(output_sample_rate_hz); 569 output_stream.set_sample_rate_hz(output_sample_rate_hz);
529 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); 570 output_stream.set_num_channels(ChannelsFromLayout(output_layout));
530 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); 571 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
531 572
532 if (samples_per_channel != input_stream.num_frames()) { 573 if (samples_per_channel != input_stream.num_frames()) {
533 return kBadDataLengthError; 574 return kBadDataLengthError;
534 } 575 }
535 return ProcessStream(src, input_stream, output_stream, dest); 576 return ProcessStream(src, input_stream, output_stream, dest);
536 } 577 }
537 578
538 int AudioProcessingImpl::ProcessStream(const float* const* src, 579 int AudioProcessingImpl::ProcessStream(const float* const* src,
539 const StreamConfig& input_config, 580 const StreamConfig& input_config,
540 const StreamConfig& output_config, 581 const StreamConfig& output_config,
541 float* const* dest) { 582 float* const* dest) {
542 CriticalSectionScoped crit_scoped(crit_); 583 {
584 // Acquire the capture lock in order to safely call the function
585 // that retrieves the render side data. This function accesses apm
586 // getters that need the capture lock held when being called.
587 rtc::CritScope cs_capture(&crit_capture_);
588 public_submodules_->echo_cancellation->ReadQueuedRenderData();
589 public_submodules_->echo_control_mobile->ReadQueuedRenderData();
590 public_submodules_->gain_control->ReadQueuedRenderData();
591 }
543 if (!src || !dest) { 592 if (!src || !dest) {
544 return kNullPointerError; 593 return kNullPointerError;
545 } 594 }
546 595
547 echo_cancellation_->ReadQueuedRenderData(); 596 ProcessingConfig processing_config = formats_.api_format;
548 echo_control_mobile_->ReadQueuedRenderData();
549 gain_control_->ReadQueuedRenderData();
550
551 ProcessingConfig processing_config = shared_state_.api_format_;
552 processing_config.input_stream() = input_config; 597 processing_config.input_stream() = input_config;
553 processing_config.output_stream() = output_config; 598 processing_config.output_stream() = output_config;
554 599
555 RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config)); 600 {
601 // Do conditional reinitialization.
602 rtc::CritScope cs_render(&crit_render_);
603 RETURN_ON_ERR(MaybeInitializeCapture(processing_config));
604 }
605 rtc::CritScope cs_capture(&crit_capture_);
556 assert(processing_config.input_stream().num_frames() == 606 assert(processing_config.input_stream().num_frames() ==
557 shared_state_.api_format_.input_stream().num_frames()); 607 formats_.api_format.input_stream().num_frames());
558 608
559 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 609 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
560 if (debug_file_->Open()) { 610 if (debug_dump_.debug_file->Open()) {
561 RETURN_ON_ERR(WriteConfigMessage(false)); 611 RETURN_ON_ERR(WriteConfigMessage(false));
562 612
563 event_msg_->set_type(audioproc::Event::STREAM); 613 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
564 audioproc::Stream* msg = event_msg_->mutable_stream(); 614 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
565 const size_t channel_size = 615 const size_t channel_size =
566 sizeof(float) * shared_state_.api_format_.input_stream().num_frames(); 616 sizeof(float) * formats_.api_format.input_stream().num_frames();
567 for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels(); 617 for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i)
568 ++i)
569 msg->add_input_channel(src[i], channel_size); 618 msg->add_input_channel(src[i], channel_size);
570 } 619 }
571 #endif 620 #endif
572 621
573 capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream()); 622 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
574 RETURN_ON_ERR(ProcessStreamLocked()); 623 RETURN_ON_ERR(ProcessStreamLocked());
575 capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest); 624 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
576 625
577 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 626 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
578 if (debug_file_->Open()) { 627 if (debug_dump_.debug_file->Open()) {
579 audioproc::Stream* msg = event_msg_->mutable_stream(); 628 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
580 const size_t channel_size = 629 const size_t channel_size =
581 sizeof(float) * shared_state_.api_format_.output_stream().num_frames(); 630 sizeof(float) * formats_.api_format.output_stream().num_frames();
582 for (int i = 0; 631 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i)
583 i < shared_state_.api_format_.output_stream().num_channels(); ++i)
584 msg->add_output_channel(dest[i], channel_size); 632 msg->add_output_channel(dest[i], channel_size);
585 RETURN_ON_ERR(WriteMessageToDebugFile()); 633 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
634 &crit_debug_, &debug_dump_.capture));
586 } 635 }
587 #endif 636 #endif
588 637
589 return kNoError; 638 return kNoError;
590 } 639 }
591 640
592 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { 641 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
593 CriticalSectionScoped crit_scoped(crit_); 642 {
594 echo_cancellation_->ReadQueuedRenderData(); 643 // Acquire the capture lock in order to safely call the function
595 echo_control_mobile_->ReadQueuedRenderData(); 644 // that retrieves the render side data. This function accesses apm
596 gain_control_->ReadQueuedRenderData(); 645 // getters that need the capture lock held when being called.
646 // The lock needs to be released as
647 // public_submodules_->echo_control_mobile->is_enabled() aquires this lock
648 // as well.
649 rtc::CritScope cs_capture(&crit_capture_);
650 public_submodules_->echo_cancellation->ReadQueuedRenderData();
651 public_submodules_->echo_control_mobile->ReadQueuedRenderData();
652 public_submodules_->gain_control->ReadQueuedRenderData();
653 }
597 654
598 if (!frame) { 655 if (!frame) {
599 return kNullPointerError; 656 return kNullPointerError;
600 } 657 }
601 // Must be a native rate. 658 // Must be a native rate.
602 if (frame->sample_rate_hz_ != kSampleRate8kHz && 659 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
603 frame->sample_rate_hz_ != kSampleRate16kHz && 660 frame->sample_rate_hz_ != kSampleRate16kHz &&
604 frame->sample_rate_hz_ != kSampleRate32kHz && 661 frame->sample_rate_hz_ != kSampleRate32kHz &&
605 frame->sample_rate_hz_ != kSampleRate48kHz) { 662 frame->sample_rate_hz_ != kSampleRate48kHz) {
606 return kBadSampleRateError; 663 return kBadSampleRateError;
607 } 664 }
608 665
609 if (echo_control_mobile_->is_enabled() && 666 if (public_submodules_->echo_control_mobile->is_enabled() &&
610 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { 667 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
611 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; 668 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
612 return kUnsupportedComponentError; 669 return kUnsupportedComponentError;
613 } 670 }
614 671
615 // TODO(ajm): The input and output rates and channels are currently 672 ProcessingConfig processing_config;
616 // constrained to be identical in the int16 interface. 673 {
617 ProcessingConfig processing_config = shared_state_.api_format_; 674 // Aquire lock for the access of api_format.
675 // The lock is released immediately due to the conditional
676 // reinitialization.
677 rtc::CritScope cs_capture(&crit_capture_);
678 // TODO(ajm): The input and output rates and channels are currently
679 // constrained to be identical in the int16 interface.
680 processing_config = formats_.api_format;
681 }
618 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); 682 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
619 processing_config.input_stream().set_num_channels(frame->num_channels_); 683 processing_config.input_stream().set_num_channels(frame->num_channels_);
620 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); 684 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
621 processing_config.output_stream().set_num_channels(frame->num_channels_); 685 processing_config.output_stream().set_num_channels(frame->num_channels_);
622 686
623 RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config)); 687 {
688 // Do conditional reinitialization.
689 rtc::CritScope cs_render(&crit_render_);
690 RETURN_ON_ERR(MaybeInitializeCapture(processing_config));
691 }
692 rtc::CritScope cs_capture(&crit_capture_);
624 if (frame->samples_per_channel_ != 693 if (frame->samples_per_channel_ !=
625 shared_state_.api_format_.input_stream().num_frames()) { 694 formats_.api_format.input_stream().num_frames()) {
626 return kBadDataLengthError; 695 return kBadDataLengthError;
627 } 696 }
628 697
629 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 698 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
630 if (debug_file_->Open()) { 699 if (debug_dump_.debug_file->Open()) {
631 event_msg_->set_type(audioproc::Event::STREAM); 700 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
632 audioproc::Stream* msg = event_msg_->mutable_stream(); 701 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
633 const size_t data_size = 702 const size_t data_size =
634 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 703 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
635 msg->set_input_data(frame->data_, data_size); 704 msg->set_input_data(frame->data_, data_size);
636 } 705 }
637 #endif 706 #endif
638 707
639 capture_audio_->DeinterleaveFrom(frame); 708 capture_.capture_audio->DeinterleaveFrom(frame);
640 RETURN_ON_ERR(ProcessStreamLocked()); 709 RETURN_ON_ERR(ProcessStreamLocked());
641 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed())); 710 capture_.capture_audio->InterleaveTo(frame,
711 output_copy_needed(is_data_processed()));
642 712
643 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 713 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
644 if (debug_file_->Open()) { 714 if (debug_dump_.debug_file->Open()) {
645 audioproc::Stream* msg = event_msg_->mutable_stream(); 715 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
646 const size_t data_size = 716 const size_t data_size =
647 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 717 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
648 msg->set_output_data(frame->data_, data_size); 718 msg->set_output_data(frame->data_, data_size);
649 RETURN_ON_ERR(WriteMessageToDebugFile()); 719 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
720 &crit_debug_, &debug_dump_.capture));
650 } 721 }
651 #endif 722 #endif
652 723
653 return kNoError; 724 return kNoError;
654 } 725 }
655 726
656 int AudioProcessingImpl::ProcessStreamLocked() { 727 int AudioProcessingImpl::ProcessStreamLocked() {
657 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 728 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
658 if (debug_file_->Open()) { 729 if (debug_dump_.debug_file->Open()) {
659 audioproc::Stream* msg = event_msg_->mutable_stream(); 730 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
660 msg->set_delay(stream_delay_ms_); 731 msg->set_delay(capture_nonlocked_.stream_delay_ms);
661 msg->set_drift(echo_cancellation_->stream_drift_samples()); 732 msg->set_drift(
733 public_submodules_->echo_cancellation->stream_drift_samples());
662 msg->set_level(gain_control()->stream_analog_level()); 734 msg->set_level(gain_control()->stream_analog_level());
663 msg->set_keypress(key_pressed_); 735 msg->set_keypress(capture_.key_pressed);
664 } 736 }
665 #endif 737 #endif
666 738
667 MaybeUpdateHistograms(); 739 MaybeUpdateHistograms();
668 740
669 AudioBuffer* ca = capture_audio_.get(); // For brevity. 741 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity.
670 742
671 if (use_new_agc_ && gain_control_->is_enabled()) { 743 if (constants_.use_new_agc &&
672 agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(), 744 public_submodules_->gain_control->is_enabled()) {
673 fwd_proc_format_.num_frames()); 745 private_submodules_->agc_manager->AnalyzePreProcess(
746 ca->channels()[0], ca->num_channels(),
747 capture_nonlocked_.fwd_proc_format.num_frames());
674 } 748 }
675 749
676 bool data_processed = is_data_processed(); 750 bool data_processed = is_data_processed();
677 if (analysis_needed(data_processed)) { 751 if (analysis_needed(data_processed)) {
678 ca->SplitIntoFrequencyBands(); 752 ca->SplitIntoFrequencyBands();
679 } 753 }
680 754
681 if (intelligibility_enabled_) { 755 if (constants_.intelligibility_enabled) {
682 intelligibility_enhancer_->AnalyzeCaptureAudio( 756 public_submodules_->intelligibility_enhancer->AnalyzeCaptureAudio(
683 ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); 757 ca->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
758 ca->num_channels());
684 } 759 }
685 760
686 if (beamformer_enabled_) { 761 if (constants_.beamformer_enabled) {
687 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); 762 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(),
763 ca->split_data_f());
688 ca->set_num_channels(1); 764 ca->set_num_channels(1);
689 } 765 }
690 766
691 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); 767 RETURN_ON_ERR(public_submodules_->high_pass_filter->ProcessCaptureAudio(ca));
692 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); 768 RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca));
693 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); 769 RETURN_ON_ERR(public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca));
694 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); 770 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(ca));
695 771
696 if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) { 772 if (public_submodules_->echo_control_mobile->is_enabled() &&
773 public_submodules_->noise_suppression->is_enabled()) {
697 ca->CopyLowPassToReference(); 774 ca->CopyLowPassToReference();
698 } 775 }
699 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca)); 776 RETURN_ON_ERR(public_submodules_->noise_suppression->ProcessCaptureAudio(ca));
700 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca)); 777 RETURN_ON_ERR(
701 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca)); 778 public_submodules_->echo_control_mobile->ProcessCaptureAudio(ca));
779 RETURN_ON_ERR(public_submodules_->voice_detection->ProcessCaptureAudio(ca));
702 780
703 if (use_new_agc_ && gain_control_->is_enabled() && 781 if (constants_.use_new_agc &&
704 (!beamformer_enabled_ || beamformer_->is_target_present())) { 782 public_submodules_->gain_control->is_enabled() &&
705 agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz], 783 (!constants_.beamformer_enabled ||
706 ca->num_frames_per_band(), split_rate_); 784 private_submodules_->beamformer->is_target_present())) {
785 private_submodules_->agc_manager->Process(
786 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(),
787 capture_nonlocked_.split_rate);
707 } 788 }
708 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca)); 789 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca));
709 790
710 if (synthesis_needed(data_processed)) { 791 if (synthesis_needed(data_processed)) {
711 ca->MergeFrequencyBands(); 792 ca->MergeFrequencyBands();
712 } 793 }
713 794
714 // TODO(aluebs): Investigate if the transient suppression placement should be 795 // TODO(aluebs): Investigate if the transient suppression placement should be
715 // before or after the AGC. 796 // before or after the AGC.
716 if (transient_suppressor_enabled_) { 797 if (capture_.transient_suppressor_enabled) {
717 float voice_probability = 798 float voice_probability =
718 agc_manager_.get() ? agc_manager_->voice_probability() : 1.f; 799 private_submodules_->agc_manager.get()
800 ? private_submodules_->agc_manager->voice_probability()
801 : 1.f;
719 802
720 transient_suppressor_->Suppress( 803 public_submodules_->transient_suppressor->Suppress(
721 ca->channels_f()[0], ca->num_frames(), ca->num_channels(), 804 ca->channels_f()[0], ca->num_frames(), ca->num_channels(),
722 ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(), 805 ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(),
723 ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability, 806 ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability,
724 key_pressed_); 807 capture_.key_pressed);
725 } 808 }
726 809
727 // The level estimator operates on the recombined data. 810 // The level estimator operates on the recombined data.
728 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); 811 RETURN_ON_ERR(public_submodules_->level_estimator->ProcessStream(ca));
729 812
730 was_stream_delay_set_ = false; 813 capture_.was_stream_delay_set = false;
731 return kNoError; 814 return kNoError;
732 } 815 }
733 816
734 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, 817 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
735 size_t samples_per_channel, 818 size_t samples_per_channel,
736 int rev_sample_rate_hz, 819 int rev_sample_rate_hz,
737 ChannelLayout layout) { 820 ChannelLayout layout) {
821 rtc::CritScope cs(&crit_render_);
738 const StreamConfig reverse_config = { 822 const StreamConfig reverse_config = {
739 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), 823 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout),
740 }; 824 };
741 if (samples_per_channel != reverse_config.num_frames()) { 825 if (samples_per_channel != reverse_config.num_frames()) {
742 return kBadDataLengthError; 826 return kBadDataLengthError;
743 } 827 }
744 return AnalyzeReverseStream(data, reverse_config, reverse_config); 828 return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config);
745 } 829 }
746 830
747 int AudioProcessingImpl::ProcessReverseStream( 831 int AudioProcessingImpl::ProcessReverseStream(
748 const float* const* src, 832 const float* const* src,
749 const StreamConfig& reverse_input_config, 833 const StreamConfig& reverse_input_config,
750 const StreamConfig& reverse_output_config, 834 const StreamConfig& reverse_output_config,
751 float* const* dest) { 835 float* const* dest) {
752 RETURN_ON_ERR( 836 rtc::CritScope cs(&crit_render_);
753 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); 837 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config,
838 reverse_output_config));
754 if (is_rev_processed()) { 839 if (is_rev_processed()) {
755 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), 840 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(),
756 dest); 841 dest);
757 } else if (render_check_rev_conversion_needed()) { 842 } else if (render_check_rev_conversion_needed()) {
758 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, 843 render_.render_converter->Convert(src, reverse_input_config.num_samples(),
759 reverse_output_config.num_samples()); 844 dest,
845 reverse_output_config.num_samples());
760 } else { 846 } else {
761 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), 847 CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
762 reverse_input_config.num_channels(), dest); 848 reverse_input_config.num_channels(), dest);
763 } 849 }
764 850
765 return kNoError; 851 return kNoError;
766 } 852 }
767 853
768 int AudioProcessingImpl::AnalyzeReverseStream( 854 int AudioProcessingImpl::AnalyzeReverseStreamLocked(
769 const float* const* src, 855 const float* const* src,
770 const StreamConfig& reverse_input_config, 856 const StreamConfig& reverse_input_config,
771 const StreamConfig& reverse_output_config) { 857 const StreamConfig& reverse_output_config) {
772 CriticalSectionScoped crit_scoped(crit_); 858 if (src == nullptr) {
773 if (src == NULL) {
774 return kNullPointerError; 859 return kNullPointerError;
775 } 860 }
776 861
777 if (reverse_input_config.num_channels() <= 0) { 862 if (reverse_input_config.num_channels() <= 0) {
778 return kBadNumberChannelsError; 863 return kBadNumberChannelsError;
779 } 864 }
780 865
781 ProcessingConfig processing_config = shared_state_.api_format_; 866 ProcessingConfig processing_config = formats_.api_format;
782 processing_config.reverse_input_stream() = reverse_input_config; 867 processing_config.reverse_input_stream() = reverse_input_config;
783 processing_config.reverse_output_stream() = reverse_output_config; 868 processing_config.reverse_output_stream() = reverse_output_config;
784 869
785 RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config)); 870 RETURN_ON_ERR(MaybeInitializeRender(processing_config));
786 assert(reverse_input_config.num_frames() == 871 assert(reverse_input_config.num_frames() ==
787 shared_state_.api_format_.reverse_input_stream().num_frames()); 872 formats_.api_format.reverse_input_stream().num_frames());
788 873
789 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 874 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
790 if (debug_file_->Open()) { 875 if (debug_dump_.debug_file->Open()) {
791 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); 876 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
792 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); 877 audioproc::ReverseStream* msg =
878 debug_dump_.render.event_msg->mutable_reverse_stream();
793 const size_t channel_size = 879 const size_t channel_size =
794 sizeof(float) * 880 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
795 shared_state_.api_format_.reverse_input_stream().num_frames();
796 for (int i = 0; 881 for (int i = 0;
797 i < shared_state_.api_format_.reverse_input_stream().num_channels(); 882 i < formats_.api_format.reverse_input_stream().num_channels(); ++i)
798 ++i)
799 msg->add_channel(src[i], channel_size); 883 msg->add_channel(src[i], channel_size);
800 RETURN_ON_ERR(WriteMessageToDebugFile()); 884 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
885 &crit_debug_, &debug_dump_.render));
801 } 886 }
802 #endif 887 #endif
803 888
804 render_audio_->CopyFrom(src, 889 render_.render_audio->CopyFrom(src,
805 shared_state_.api_format_.reverse_input_stream()); 890 formats_.api_format.reverse_input_stream());
806 return ProcessReverseStreamLocked(); 891 return ProcessReverseStreamLocked();
807 } 892 }
808 893
809 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { 894 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
810 RETURN_ON_ERR(AnalyzeReverseStream(frame)); 895 RETURN_ON_ERR(AnalyzeReverseStream(frame));
896 rtc::CritScope cs(&crit_render_);
811 if (is_rev_processed()) { 897 if (is_rev_processed()) {
812 render_audio_->InterleaveTo(frame, true); 898 render_.render_audio->InterleaveTo(frame, true);
813 } 899 }
814 900
815 return kNoError; 901 return kNoError;
816 } 902 }
817 903
818 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { 904 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
819 CriticalSectionScoped crit_scoped(crit_); 905 rtc::CritScope cs(&crit_render_);
820 if (frame == NULL) { 906 if (frame == nullptr) {
821 return kNullPointerError; 907 return kNullPointerError;
822 } 908 }
823 // Must be a native rate. 909 // Must be a native rate.
824 if (frame->sample_rate_hz_ != kSampleRate8kHz && 910 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
825 frame->sample_rate_hz_ != kSampleRate16kHz && 911 frame->sample_rate_hz_ != kSampleRate16kHz &&
826 frame->sample_rate_hz_ != kSampleRate32kHz && 912 frame->sample_rate_hz_ != kSampleRate32kHz &&
827 frame->sample_rate_hz_ != kSampleRate48kHz) { 913 frame->sample_rate_hz_ != kSampleRate48kHz) {
828 return kBadSampleRateError; 914 return kBadSampleRateError;
829 } 915 }
830 // This interface does not tolerate different forward and reverse rates. 916 // This interface does not tolerate different forward and reverse rates.
831 if (frame->sample_rate_hz_ != 917 if (frame->sample_rate_hz_ !=
832 shared_state_.api_format_.input_stream().sample_rate_hz()) { 918 formats_.api_format.input_stream().sample_rate_hz()) {
833 return kBadSampleRateError; 919 return kBadSampleRateError;
834 } 920 }
835 921
836 if (frame->num_channels_ <= 0) { 922 if (frame->num_channels_ <= 0) {
837 return kBadNumberChannelsError; 923 return kBadNumberChannelsError;
838 } 924 }
839 925
840 ProcessingConfig processing_config = shared_state_.api_format_; 926 ProcessingConfig processing_config = formats_.api_format;
841 processing_config.reverse_input_stream().set_sample_rate_hz( 927 processing_config.reverse_input_stream().set_sample_rate_hz(
842 frame->sample_rate_hz_); 928 frame->sample_rate_hz_);
843 processing_config.reverse_input_stream().set_num_channels( 929 processing_config.reverse_input_stream().set_num_channels(
844 frame->num_channels_); 930 frame->num_channels_);
845 processing_config.reverse_output_stream().set_sample_rate_hz( 931 processing_config.reverse_output_stream().set_sample_rate_hz(
846 frame->sample_rate_hz_); 932 frame->sample_rate_hz_);
847 processing_config.reverse_output_stream().set_num_channels( 933 processing_config.reverse_output_stream().set_num_channels(
848 frame->num_channels_); 934 frame->num_channels_);
849 935
850 RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config)); 936 RETURN_ON_ERR(MaybeInitializeRender(processing_config));
851 if (frame->samples_per_channel_ != 937 if (frame->samples_per_channel_ !=
852 shared_state_.api_format_.reverse_input_stream().num_frames()) { 938 formats_.api_format.reverse_input_stream().num_frames()) {
853 return kBadDataLengthError; 939 return kBadDataLengthError;
854 } 940 }
855 941
856 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 942 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
857 if (debug_file_->Open()) { 943 if (debug_dump_.debug_file->Open()) {
858 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); 944 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
859 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); 945 audioproc::ReverseStream* msg =
946 debug_dump_.render.event_msg->mutable_reverse_stream();
860 const size_t data_size = 947 const size_t data_size =
861 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 948 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
862 msg->set_data(frame->data_, data_size); 949 msg->set_data(frame->data_, data_size);
863 RETURN_ON_ERR(WriteMessageToDebugFile()); 950 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
951 &crit_debug_, &debug_dump_.render));
864 } 952 }
865 #endif 953 #endif
866 render_audio_->DeinterleaveFrom(frame); 954 render_.render_audio->DeinterleaveFrom(frame);
867 return ProcessReverseStreamLocked(); 955 return ProcessReverseStreamLocked();
868 } 956 }
869 957
870 int AudioProcessingImpl::ProcessReverseStreamLocked() { 958 int AudioProcessingImpl::ProcessReverseStreamLocked() {
871 AudioBuffer* ra = render_audio_.get(); // For brevity. 959 AudioBuffer* ra = render_.render_audio.get(); // For brevity.
872 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { 960 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) {
873 ra->SplitIntoFrequencyBands(); 961 ra->SplitIntoFrequencyBands();
874 } 962 }
875 963
876 if (intelligibility_enabled_) { 964 if (constants_.intelligibility_enabled) {
877 intelligibility_enhancer_->ProcessRenderAudio( 965 // Currently run in single-threaded mode when the intelligibility
878 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); 966 // enhancer is activated.
967 // TODO(peah): Fix to be properly multi-threaded.
968 rtc::CritScope cs(&crit_capture_);
969 public_submodules_->intelligibility_enhancer->ProcessRenderAudio(
970 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
971 ra->num_channels());
879 } 972 }
880 973
881 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); 974 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra));
882 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); 975 RETURN_ON_ERR(
883 if (!use_new_agc_) { 976 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra));
884 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); 977 if (!constants_.use_new_agc) {
978 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra));
885 } 979 }
886 980
887 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && 981 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz &&
888 is_rev_processed()) { 982 is_rev_processed()) {
889 ra->MergeFrequencyBands(); 983 ra->MergeFrequencyBands();
890 } 984 }
891 985
892 return kNoError; 986 return kNoError;
893 } 987 }
894 988
895 int AudioProcessingImpl::set_stream_delay_ms(int delay) { 989 int AudioProcessingImpl::set_stream_delay_ms(int delay) {
990 rtc::CritScope cs(&crit_capture_);
896 Error retval = kNoError; 991 Error retval = kNoError;
897 was_stream_delay_set_ = true; 992 capture_.was_stream_delay_set = true;
898 delay += delay_offset_ms_; 993 delay += capture_.delay_offset_ms;
899 994
900 if (delay < 0) { 995 if (delay < 0) {
901 delay = 0; 996 delay = 0;
902 retval = kBadStreamParameterWarning; 997 retval = kBadStreamParameterWarning;
903 } 998 }
904 999
905 // TODO(ajm): the max is rather arbitrarily chosen; investigate. 1000 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
906 if (delay > 500) { 1001 if (delay > 500) {
907 delay = 500; 1002 delay = 500;
908 retval = kBadStreamParameterWarning; 1003 retval = kBadStreamParameterWarning;
909 } 1004 }
910 1005
911 stream_delay_ms_ = delay; 1006 capture_nonlocked_.stream_delay_ms = delay;
912 return retval; 1007 return retval;
913 } 1008 }
914 1009
915 int AudioProcessingImpl::stream_delay_ms() const { 1010 int AudioProcessingImpl::stream_delay_ms() const {
916 return stream_delay_ms_; 1011 // Used as callback from submodules, hence locking is not allowed.
1012 return capture_nonlocked_.stream_delay_ms;
917 } 1013 }
918 1014
919 bool AudioProcessingImpl::was_stream_delay_set() const { 1015 bool AudioProcessingImpl::was_stream_delay_set() const {
920 return was_stream_delay_set_; 1016 // Used as callback from submodules, hence locking is not allowed.
1017 return capture_.was_stream_delay_set;
921 } 1018 }
922 1019
923 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { 1020 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
924 key_pressed_ = key_pressed; 1021 rtc::CritScope cs(&crit_capture_);
1022 capture_.key_pressed = key_pressed;
925 } 1023 }
926 1024
927 void AudioProcessingImpl::set_delay_offset_ms(int offset) { 1025 void AudioProcessingImpl::set_delay_offset_ms(int offset) {
928 CriticalSectionScoped crit_scoped(crit_); 1026 rtc::CritScope cs(&crit_capture_);
929 delay_offset_ms_ = offset; 1027 capture_.delay_offset_ms = offset;
930 } 1028 }
931 1029
932 int AudioProcessingImpl::delay_offset_ms() const { 1030 int AudioProcessingImpl::delay_offset_ms() const {
933 return delay_offset_ms_; 1031 rtc::CritScope cs(&crit_capture_);
1032 return capture_.delay_offset_ms;
934 } 1033 }
935 1034
936 int AudioProcessingImpl::StartDebugRecording( 1035 int AudioProcessingImpl::StartDebugRecording(
937 const char filename[AudioProcessing::kMaxFilenameSize]) { 1036 const char filename[AudioProcessing::kMaxFilenameSize]) {
938 CriticalSectionScoped crit_scoped(crit_); 1037 // Run in a single-threaded manner.
1038 rtc::CritScope cs_render(&crit_render_);
1039 rtc::CritScope cs_capture(&crit_capture_);
939 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); 1040 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, "");
940 1041
941 if (filename == NULL) { 1042 if (filename == nullptr) {
942 return kNullPointerError; 1043 return kNullPointerError;
943 } 1044 }
944 1045
945 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1046 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
946 // Stop any ongoing recording. 1047 // Stop any ongoing recording.
947 if (debug_file_->Open()) { 1048 if (debug_dump_.debug_file->Open()) {
948 if (debug_file_->CloseFile() == -1) { 1049 if (debug_dump_.debug_file->CloseFile() == -1) {
949 return kFileError; 1050 return kFileError;
950 } 1051 }
951 } 1052 }
952 1053
953 if (debug_file_->OpenFile(filename, false) == -1) { 1054 if (debug_dump_.debug_file->OpenFile(filename, false) == -1) {
954 debug_file_->CloseFile(); 1055 debug_dump_.debug_file->CloseFile();
955 return kFileError; 1056 return kFileError;
956 } 1057 }
957 1058
958 RETURN_ON_ERR(WriteConfigMessage(true)); 1059 RETURN_ON_ERR(WriteConfigMessage(true));
959 RETURN_ON_ERR(WriteInitMessage()); 1060 RETURN_ON_ERR(WriteInitMessage());
960 return kNoError; 1061 return kNoError;
961 #else 1062 #else
962 return kUnsupportedFunctionError; 1063 return kUnsupportedFunctionError;
963 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1064 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
964 } 1065 }
965 1066
966 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { 1067 int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
967 CriticalSectionScoped crit_scoped(crit_); 1068 // Run in a single-threaded manner.
1069 rtc::CritScope cs_render(&crit_render_);
1070 rtc::CritScope cs_capture(&crit_capture_);
968 1071
969 if (handle == NULL) { 1072 if (handle == nullptr) {
970 return kNullPointerError; 1073 return kNullPointerError;
971 } 1074 }
972 1075
973 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1076 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
974 // Stop any ongoing recording. 1077 // Stop any ongoing recording.
975 if (debug_file_->Open()) { 1078 if (debug_dump_.debug_file->Open()) {
976 if (debug_file_->CloseFile() == -1) { 1079 if (debug_dump_.debug_file->CloseFile() == -1) {
977 return kFileError; 1080 return kFileError;
978 } 1081 }
979 } 1082 }
980 1083
981 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { 1084 if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) {
982 return kFileError; 1085 return kFileError;
983 } 1086 }
984 1087
985 RETURN_ON_ERR(WriteConfigMessage(true)); 1088 RETURN_ON_ERR(WriteConfigMessage(true));
986 RETURN_ON_ERR(WriteInitMessage()); 1089 RETURN_ON_ERR(WriteInitMessage());
987 return kNoError; 1090 return kNoError;
988 #else 1091 #else
989 return kUnsupportedFunctionError; 1092 return kUnsupportedFunctionError;
990 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1093 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
991 } 1094 }
992 1095
993 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( 1096 int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
994 rtc::PlatformFile handle) { 1097 rtc::PlatformFile handle) {
1098 // Run in a single-threaded manner.
1099 rtc::CritScope cs_render(&crit_render_);
1100 rtc::CritScope cs_capture(&crit_capture_);
995 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); 1101 FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
996 return StartDebugRecording(stream); 1102 return StartDebugRecording(stream);
997 } 1103 }
998 1104
999 int AudioProcessingImpl::StopDebugRecording() { 1105 int AudioProcessingImpl::StopDebugRecording() {
1000 CriticalSectionScoped crit_scoped(crit_); 1106 // Run in a single-threaded manner.
1107 rtc::CritScope cs_render(&crit_render_);
1108 rtc::CritScope cs_capture(&crit_capture_);
1001 1109
1002 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1110 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1003 // We just return if recording hasn't started. 1111 // We just return if recording hasn't started.
1004 if (debug_file_->Open()) { 1112 if (debug_dump_.debug_file->Open()) {
1005 if (debug_file_->CloseFile() == -1) { 1113 if (debug_dump_.debug_file->CloseFile() == -1) {
1006 return kFileError; 1114 return kFileError;
1007 } 1115 }
1008 } 1116 }
1009 return kNoError; 1117 return kNoError;
1010 #else 1118 #else
1011 return kUnsupportedFunctionError; 1119 return kUnsupportedFunctionError;
1012 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1120 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1013 } 1121 }
1014 1122
1015 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { 1123 EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
1016 return echo_cancellation_; 1124 // Adding a lock here has no effect as it allows any access to the submodule
1125 // from the returned pointer.
1126 return public_submodules_->echo_cancellation;
1017 } 1127 }
1018 1128
1019 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { 1129 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
1020 return echo_control_mobile_; 1130 // Adding a lock here has no effect as it allows any access to the submodule
1131 // from the returned pointer.
1132 return public_submodules_->echo_control_mobile;
1021 } 1133 }
1022 1134
1023 GainControl* AudioProcessingImpl::gain_control() const { 1135 GainControl* AudioProcessingImpl::gain_control() const {
1024 if (use_new_agc_) { 1136 // Adding a lock here has no effect as it allows any access to the submodule
1025 return gain_control_for_new_agc_.get(); 1137 // from the returned pointer.
1138 if (constants_.use_new_agc) {
1139 return public_submodules_->gain_control_for_new_agc.get();
1026 } 1140 }
1027 return gain_control_; 1141 return public_submodules_->gain_control;
1028 } 1142 }
1029 1143
1030 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { 1144 HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
1031 return high_pass_filter_; 1145 // Adding a lock here has no effect as it allows any access to the submodule
1146 // from the returned pointer.
1147 return public_submodules_->high_pass_filter;
1032 } 1148 }
1033 1149
1034 LevelEstimator* AudioProcessingImpl::level_estimator() const { 1150 LevelEstimator* AudioProcessingImpl::level_estimator() const {
1035 return level_estimator_; 1151 // Adding a lock here has no effect as it allows any access to the submodule
1152 // from the returned pointer.
1153 return public_submodules_->level_estimator;
1036 } 1154 }
1037 1155
1038 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { 1156 NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
1039 return noise_suppression_; 1157 // Adding a lock here has no effect as it allows any access to the submodule
1158 // from the returned pointer.
1159 return public_submodules_->noise_suppression;
1040 } 1160 }
1041 1161
1042 VoiceDetection* AudioProcessingImpl::voice_detection() const { 1162 VoiceDetection* AudioProcessingImpl::voice_detection() const {
1043 return voice_detection_; 1163 // Adding a lock here has no effect as it allows any access to the submodule
1164 // from the returned pointer.
1165 return public_submodules_->voice_detection;
1044 } 1166 }
1045 1167
1046 bool AudioProcessingImpl::is_data_processed() const { 1168 bool AudioProcessingImpl::is_data_processed() const {
1047 if (beamformer_enabled_) { 1169 if (constants_.beamformer_enabled) {
1048 return true; 1170 return true;
1049 } 1171 }
1050 1172
1051 int enabled_count = 0; 1173 int enabled_count = 0;
1052 for (auto item : component_list_) { 1174 for (auto item : private_submodules_->component_list) {
1053 if (item->is_component_enabled()) { 1175 if (item->is_component_enabled()) {
1054 enabled_count++; 1176 enabled_count++;
1055 } 1177 }
1056 } 1178 }
1057 1179
1058 // Data is unchanged if no components are enabled, or if only level_estimator_ 1180 // Data is unchanged if no components are enabled, or if only
1059 // or voice_detection_ is enabled. 1181 // public_submodules_->level_estimator
1182 // or public_submodules_->voice_detection is enabled.
1060 if (enabled_count == 0) { 1183 if (enabled_count == 0) {
1061 return false; 1184 return false;
1062 } else if (enabled_count == 1) { 1185 } else if (enabled_count == 1) {
1063 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) { 1186 if (public_submodules_->level_estimator->is_enabled() ||
1187 public_submodules_->voice_detection->is_enabled()) {
1064 return false; 1188 return false;
1065 } 1189 }
1066 } else if (enabled_count == 2) { 1190 } else if (enabled_count == 2) {
1067 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { 1191 if (public_submodules_->level_estimator->is_enabled() &&
1192 public_submodules_->voice_detection->is_enabled()) {
1068 return false; 1193 return false;
1069 } 1194 }
1070 } 1195 }
1071 return true; 1196 return true;
1072 } 1197 }
1073 1198
1074 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { 1199 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
1075 // Check if we've upmixed or downmixed the audio. 1200 // Check if we've upmixed or downmixed the audio.
1076 return ((shared_state_.api_format_.output_stream().num_channels() != 1201 return ((formats_.api_format.output_stream().num_channels() !=
1077 shared_state_.api_format_.input_stream().num_channels()) || 1202 formats_.api_format.input_stream().num_channels()) ||
1078 is_data_processed || transient_suppressor_enabled_); 1203 is_data_processed || capture_.transient_suppressor_enabled);
1079 } 1204 }
1080 1205
1081 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { 1206 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
1082 return (is_data_processed && 1207 return (is_data_processed &&
1083 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 1208 (capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
1084 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); 1209 kSampleRate32kHz ||
1210 capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
1211 kSampleRate48kHz));
1085 } 1212 }
1086 1213
1087 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { 1214 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
1088 if (!is_data_processed && !voice_detection_->is_enabled() && 1215 if (!is_data_processed &&
1089 !transient_suppressor_enabled_) { 1216 !public_submodules_->voice_detection->is_enabled() &&
1090 // Only level_estimator_ is enabled. 1217 !capture_.transient_suppressor_enabled) {
1218 // Only public_submodules_->level_estimator is enabled.
1091 return false; 1219 return false;
1092 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 1220 } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
1093 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { 1221 kSampleRate32kHz ||
1094 // Something besides level_estimator_ is enabled, and we have super-wb. 1222 capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
1223 kSampleRate48kHz) {
1224 // Something besides public_submodules_->level_estimator is enabled, and we
1225 // have super-wb.
1095 return true; 1226 return true;
1096 } 1227 }
1097 return false; 1228 return false;
1098 } 1229 }
1099 1230
1100 bool AudioProcessingImpl::is_rev_processed() const { 1231 bool AudioProcessingImpl::is_rev_processed() const {
1101 return intelligibility_enabled_ && intelligibility_enhancer_->active(); 1232 return constants_.intelligibility_enabled &&
1233 public_submodules_->intelligibility_enhancer->active();
1102 } 1234 }
1103 1235
1104 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { 1236 bool AudioProcessingImpl::render_check_rev_conversion_needed() const {
1105 return rev_conversion_needed(); 1237 return rev_conversion_needed();
1106 } 1238 }
1107 1239
1108 bool AudioProcessingImpl::rev_conversion_needed() const { 1240 bool AudioProcessingImpl::rev_conversion_needed() const {
1109 // TODO(peah): Refactor to be allowed to verify using thread annotations. 1241 return (formats_.api_format.reverse_input_stream() !=
1110 return (shared_state_.api_format_.reverse_input_stream() != 1242 formats_.api_format.reverse_output_stream());
1111 shared_state_.api_format_.reverse_output_stream());
1112 } 1243 }
1113 1244
1114 void AudioProcessingImpl::InitializeExperimentalAgc() { 1245 void AudioProcessingImpl::InitializeExperimentalAgc() {
1115 // TODO(peah): Refactor to be allowed to verify using thread annotations. 1246 if (constants_.use_new_agc) {
1116 if (use_new_agc_) { 1247 if (!private_submodules_->agc_manager.get()) {
1117 if (!agc_manager_.get()) { 1248 private_submodules_->agc_manager.reset(new AgcManagerDirect(
1118 agc_manager_.reset(new AgcManagerDirect(gain_control_, 1249 public_submodules_->gain_control,
1119 gain_control_for_new_agc_.get(), 1250 public_submodules_->gain_control_for_new_agc.get(),
1120 agc_startup_min_volume_)); 1251 constants_.agc_startup_min_volume));
1121 } 1252 }
1122 agc_manager_->Initialize(); 1253 private_submodules_->agc_manager->Initialize();
1123 agc_manager_->SetCaptureMuted(output_will_be_muted_); 1254 private_submodules_->agc_manager->SetCaptureMuted(
1255 capture_.output_will_be_muted);
1124 } 1256 }
1125 } 1257 }
1126 1258
1127 void AudioProcessingImpl::InitializeTransient() { 1259 void AudioProcessingImpl::InitializeTransient() {
1128 // TODO(peah): Refactor to be allowed to verify using thread annotations. 1260 if (capture_.transient_suppressor_enabled) {
1129 if (transient_suppressor_enabled_) { 1261 if (!public_submodules_->transient_suppressor.get()) {
1130 if (!transient_suppressor_.get()) { 1262 public_submodules_->transient_suppressor.reset(new TransientSuppressor());
1131 transient_suppressor_.reset(new TransientSuppressor());
1132 } 1263 }
1133 transient_suppressor_->Initialize( 1264 public_submodules_->transient_suppressor->Initialize(
1134 fwd_proc_format_.sample_rate_hz(), split_rate_, 1265 capture_nonlocked_.fwd_proc_format.sample_rate_hz(),
1135 shared_state_.api_format_.output_stream().num_channels()); 1266 capture_nonlocked_.split_rate,
1267 formats_.api_format.output_stream().num_channels());
1136 } 1268 }
1137 } 1269 }
1138 1270
1139 void AudioProcessingImpl::InitializeBeamformer() { 1271 void AudioProcessingImpl::InitializeBeamformer() {
1140 // TODO(peah): Refactor to be allowed to verify using thread annotations. 1272 if (constants_.beamformer_enabled) {
1141 if (beamformer_enabled_) { 1273 if (!private_submodules_->beamformer) {
1142 if (!beamformer_) { 1274 private_submodules_->beamformer.reset(new NonlinearBeamformer(
1143 beamformer_.reset( 1275 constants_.array_geometry, constants_.target_direction));
1144 new NonlinearBeamformer(array_geometry_, target_direction_));
1145 } 1276 }
1146 beamformer_->Initialize(kChunkSizeMs, split_rate_); 1277 private_submodules_->beamformer->Initialize(kChunkSizeMs,
1278 capture_nonlocked_.split_rate);
1147 } 1279 }
1148 } 1280 }
1149 1281
1150 void AudioProcessingImpl::InitializeIntelligibility() { 1282 void AudioProcessingImpl::InitializeIntelligibility() {
1151 // TODO(peah): Refactor to be allowed to verify using thread annotations. 1283 if (constants_.intelligibility_enabled) {
1152 if (intelligibility_enabled_) {
1153 IntelligibilityEnhancer::Config config; 1284 IntelligibilityEnhancer::Config config;
1154 config.sample_rate_hz = split_rate_; 1285 config.sample_rate_hz = capture_nonlocked_.split_rate;
1155 config.num_capture_channels = capture_audio_->num_channels(); 1286 config.num_capture_channels = capture_.capture_audio->num_channels();
1156 config.num_render_channels = render_audio_->num_channels(); 1287 config.num_render_channels = render_.render_audio->num_channels();
1157 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); 1288 public_submodules_->intelligibility_enhancer.reset(
1289 new IntelligibilityEnhancer(config));
1158 } 1290 }
1159 } 1291 }
1160 1292
1161 void AudioProcessingImpl::MaybeUpdateHistograms() { 1293 void AudioProcessingImpl::MaybeUpdateHistograms() {
1162 static const int kMinDiffDelayMs = 60; 1294 static const int kMinDiffDelayMs = 60;
1163 1295
1164 if (echo_cancellation()->is_enabled()) { 1296 if (echo_cancellation()->is_enabled()) {
1165 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. 1297 // Activate delay_jumps_ counters if we know echo_cancellation is runnning.
1166 // If a stream has echo we know that the echo_cancellation is in process. 1298 // If a stream has echo we know that the echo_cancellation is in process.
1167 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { 1299 if (capture_.stream_delay_jumps == -1 &&
1168 stream_delay_jumps_ = 0; 1300 echo_cancellation()->stream_has_echo()) {
1301 capture_.stream_delay_jumps = 0;
1169 } 1302 }
1170 if (aec_system_delay_jumps_ == -1 && 1303 if (capture_.aec_system_delay_jumps == -1 &&
1171 echo_cancellation()->stream_has_echo()) { 1304 echo_cancellation()->stream_has_echo()) {
1172 aec_system_delay_jumps_ = 0; 1305 capture_.aec_system_delay_jumps = 0;
1173 } 1306 }
1174 1307
1175 // Detect a jump in platform reported system delay and log the difference. 1308 // Detect a jump in platform reported system delay and log the difference.
1176 const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_; 1309 const int diff_stream_delay_ms =
1177 if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) { 1310 capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms;
1311 if (diff_stream_delay_ms > kMinDiffDelayMs &&
1312 capture_.last_stream_delay_ms != 0) {
1178 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", 1313 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump",
1179 diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100); 1314 diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100);
1180 if (stream_delay_jumps_ == -1) { 1315 if (capture_.stream_delay_jumps == -1) {
1181 stream_delay_jumps_ = 0; // Activate counter if needed. 1316 capture_.stream_delay_jumps = 0; // Activate counter if needed.
1182 } 1317 }
1183 stream_delay_jumps_++; 1318 capture_.stream_delay_jumps++;
1184 } 1319 }
1185 last_stream_delay_ms_ = stream_delay_ms_; 1320 capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms;
1186 1321
1187 // Detect a jump in AEC system delay and log the difference. 1322 // Detect a jump in AEC system delay and log the difference.
1188 const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000); 1323 const int frames_per_ms =
1324 rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000);
1189 const int aec_system_delay_ms = 1325 const int aec_system_delay_ms =
1190 WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; 1326 WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms;
1191 const int diff_aec_system_delay_ms = 1327 const int diff_aec_system_delay_ms =
1192 aec_system_delay_ms - last_aec_system_delay_ms_; 1328 aec_system_delay_ms - capture_.last_aec_system_delay_ms;
1193 if (diff_aec_system_delay_ms > kMinDiffDelayMs && 1329 if (diff_aec_system_delay_ms > kMinDiffDelayMs &&
1194 last_aec_system_delay_ms_ != 0) { 1330 capture_.last_aec_system_delay_ms != 0) {
1195 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", 1331 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump",
1196 diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, 1332 diff_aec_system_delay_ms, kMinDiffDelayMs, 1000,
1197 100); 1333 100);
1198 if (aec_system_delay_jumps_ == -1) { 1334 if (capture_.aec_system_delay_jumps == -1) {
1199 aec_system_delay_jumps_ = 0; // Activate counter if needed. 1335 capture_.aec_system_delay_jumps = 0; // Activate counter if needed.
1200 } 1336 }
1201 aec_system_delay_jumps_++; 1337 capture_.aec_system_delay_jumps++;
1202 } 1338 }
1203 last_aec_system_delay_ms_ = aec_system_delay_ms; 1339 capture_.last_aec_system_delay_ms = aec_system_delay_ms;
1204 } 1340 }
1205 } 1341 }
1206 1342
1207 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { 1343 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() {
1208 CriticalSectionScoped crit_scoped(crit_); 1344 // Run in a single-threaded manner.
1209 if (stream_delay_jumps_ > -1) { 1345 rtc::CritScope cs_render(&crit_render_);
1346 rtc::CritScope cs_capture(&crit_capture_);
1347
1348 if (capture_.stream_delay_jumps > -1) {
1210 RTC_HISTOGRAM_ENUMERATION( 1349 RTC_HISTOGRAM_ENUMERATION(
1211 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", 1350 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps",
1212 stream_delay_jumps_, 51); 1351 capture_.stream_delay_jumps, 51);
1213 } 1352 }
1214 stream_delay_jumps_ = -1; 1353 capture_.stream_delay_jumps = -1;
1215 last_stream_delay_ms_ = 0; 1354 capture_.last_stream_delay_ms = 0;
1216 1355
1217 if (aec_system_delay_jumps_ > -1) { 1356 if (capture_.aec_system_delay_jumps > -1) {
1218 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", 1357 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
1219 aec_system_delay_jumps_, 51); 1358 capture_.aec_system_delay_jumps, 51);
1220 } 1359 }
1221 aec_system_delay_jumps_ = -1; 1360 capture_.aec_system_delay_jumps = -1;
1222 last_aec_system_delay_ms_ = 0; 1361 capture_.last_aec_system_delay_ms = 0;
1223 } 1362 }
1224 1363
1225 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1364 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1226 int AudioProcessingImpl::WriteMessageToDebugFile() { 1365 int AudioProcessingImpl::WriteMessageToDebugFile(
1227 int32_t size = event_msg_->ByteSize(); 1366 FileWrapper* debug_file,
1367 rtc::CriticalSection* crit_debug,
1368 ApmDebugDumpThreadState* debug_state) {
1369 int32_t size = debug_state->event_msg->ByteSize();
1228 if (size <= 0) { 1370 if (size <= 0) {
1229 return kUnspecifiedError; 1371 return kUnspecifiedError;
1230 } 1372 }
1231 #if defined(WEBRTC_ARCH_BIG_ENDIAN) 1373 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1232 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be 1374 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
1233 // pretty safe in assuming little-endian. 1375 // pretty safe in assuming little-endian.
1234 #endif 1376 #endif
1235 1377
1236 if (!event_msg_->SerializeToString(&event_str_)) { 1378 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) {
1237 return kUnspecifiedError; 1379 return kUnspecifiedError;
1238 } 1380 }
1239 1381
1240 // Write message preceded by its size. 1382 {
1241 if (!debug_file_->Write(&size, sizeof(int32_t))) { 1383 // Ensure atomic writes of the message.
1242 return kFileError; 1384 rtc::CritScope cs_capture(crit_debug);
1243 } 1385 // Write message preceded by its size.
1244 if (!debug_file_->Write(event_str_.data(), event_str_.length())) { 1386 if (!debug_file->Write(&size, sizeof(int32_t))) {
1245 return kFileError; 1387 return kFileError;
1388 }
1389 if (!debug_file->Write(debug_state->event_str.data(),
1390 debug_state->event_str.length())) {
1391 return kFileError;
1392 }
1246 } 1393 }
1247 1394
1248 event_msg_->Clear(); 1395 debug_state->event_msg->Clear();
1249 1396
1250 return kNoError; 1397 return kNoError;
1251 } 1398 }
1252 1399
1253 int AudioProcessingImpl::WriteInitMessage() { 1400 int AudioProcessingImpl::WriteInitMessage() {
1254 // TODO(peah): Refactor to be allowed to verify using thread annotations. 1401 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT);
1255 event_msg_->set_type(audioproc::Event::INIT); 1402 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init();
1256 audioproc::Init* msg = event_msg_->mutable_init(); 1403 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz());
1257 msg->set_sample_rate( 1404
1258 shared_state_.api_format_.input_stream().sample_rate_hz());
1259 msg->set_num_input_channels( 1405 msg->set_num_input_channels(
1260 shared_state_.api_format_.input_stream().num_channels()); 1406 formats_.api_format.input_stream().num_channels());
1261 msg->set_num_output_channels( 1407 msg->set_num_output_channels(
1262 shared_state_.api_format_.output_stream().num_channels()); 1408 formats_.api_format.output_stream().num_channels());
1263 msg->set_num_reverse_channels( 1409 msg->set_num_reverse_channels(
1264 shared_state_.api_format_.reverse_input_stream().num_channels()); 1410 formats_.api_format.reverse_input_stream().num_channels());
1265 msg->set_reverse_sample_rate( 1411 msg->set_reverse_sample_rate(
1266 shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); 1412 formats_.api_format.reverse_input_stream().sample_rate_hz());
1267 msg->set_output_sample_rate( 1413 msg->set_output_sample_rate(
1268 shared_state_.api_format_.output_stream().sample_rate_hz()); 1414 formats_.api_format.output_stream().sample_rate_hz());
1269 // TODO(ekmeyerson): Add reverse output fields to event_msg_. 1415 // TODO(ekmeyerson): Add reverse output fields to
1416 // debug_dump_.capture.event_msg.
1270 1417
1271 RETURN_ON_ERR(WriteMessageToDebugFile()); 1418 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1419 &crit_debug_, &debug_dump_.capture));
1272 return kNoError; 1420 return kNoError;
1273 } 1421 }
1274 1422
1275 int AudioProcessingImpl::WriteConfigMessage(bool forced) { 1423 int AudioProcessingImpl::WriteConfigMessage(bool forced) {
1276 audioproc::Config config; 1424 audioproc::Config config;
1277 1425
1278 config.set_aec_enabled(echo_cancellation_->is_enabled()); 1426 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled());
1279 config.set_aec_delay_agnostic_enabled( 1427 config.set_aec_delay_agnostic_enabled(
1280 echo_cancellation_->is_delay_agnostic_enabled()); 1428 public_submodules_->echo_cancellation->is_delay_agnostic_enabled());
1281 config.set_aec_drift_compensation_enabled( 1429 config.set_aec_drift_compensation_enabled(
1282 echo_cancellation_->is_drift_compensation_enabled()); 1430 public_submodules_->echo_cancellation->is_drift_compensation_enabled());
1283 config.set_aec_extended_filter_enabled( 1431 config.set_aec_extended_filter_enabled(
1284 echo_cancellation_->is_extended_filter_enabled()); 1432 public_submodules_->echo_cancellation->is_extended_filter_enabled());
1285 config.set_aec_suppression_level( 1433 config.set_aec_suppression_level(static_cast<int>(
1286 static_cast<int>(echo_cancellation_->suppression_level())); 1434 public_submodules_->echo_cancellation->suppression_level()));
1287 1435
1288 config.set_aecm_enabled(echo_control_mobile_->is_enabled()); 1436 config.set_aecm_enabled(
1437 public_submodules_->echo_control_mobile->is_enabled());
1289 config.set_aecm_comfort_noise_enabled( 1438 config.set_aecm_comfort_noise_enabled(
1290 echo_control_mobile_->is_comfort_noise_enabled()); 1439 public_submodules_->echo_control_mobile->is_comfort_noise_enabled());
1291 config.set_aecm_routing_mode( 1440 config.set_aecm_routing_mode(static_cast<int>(
1292 static_cast<int>(echo_control_mobile_->routing_mode())); 1441 public_submodules_->echo_control_mobile->routing_mode()));
1293 1442
1294 config.set_agc_enabled(gain_control_->is_enabled()); 1443 config.set_agc_enabled(public_submodules_->gain_control->is_enabled());
1295 config.set_agc_mode(static_cast<int>(gain_control_->mode())); 1444 config.set_agc_mode(
1296 config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled()); 1445 static_cast<int>(public_submodules_->gain_control->mode()));
1297 config.set_noise_robust_agc_enabled(use_new_agc_); 1446 config.set_agc_limiter_enabled(
1447 public_submodules_->gain_control->is_limiter_enabled());
1448 config.set_noise_robust_agc_enabled(constants_.use_new_agc);
1298 1449
1299 config.set_hpf_enabled(high_pass_filter_->is_enabled()); 1450 config.set_hpf_enabled(public_submodules_->high_pass_filter->is_enabled());
1300 1451
1301 config.set_ns_enabled(noise_suppression_->is_enabled()); 1452 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled());
1302 config.set_ns_level(static_cast<int>(noise_suppression_->level())); 1453 config.set_ns_level(
1454 static_cast<int>(public_submodules_->noise_suppression->level()));
1303 1455
1304 config.set_transient_suppression_enabled(transient_suppressor_enabled_); 1456 config.set_transient_suppression_enabled(
1457 capture_.transient_suppressor_enabled);
1305 1458
1306 std::string serialized_config = config.SerializeAsString(); 1459 std::string serialized_config = config.SerializeAsString();
1307 if (!forced && last_serialized_config_ == serialized_config) { 1460 if (!forced &&
1461 debug_dump_.capture.last_serialized_config == serialized_config) {
1308 return kNoError; 1462 return kNoError;
1309 } 1463 }
1310 1464
1311 last_serialized_config_ = serialized_config; 1465 debug_dump_.capture.last_serialized_config = serialized_config;
1312 1466
1313 event_msg_->set_type(audioproc::Event::CONFIG); 1467 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG);
1314 event_msg_->mutable_config()->CopyFrom(config); 1468 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config);
1315 1469
1316 RETURN_ON_ERR(WriteMessageToDebugFile()); 1470 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1471 &crit_debug_, &debug_dump_.capture));
1317 return kNoError; 1472 return kNoError;
1318 } 1473 }
1319 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1474 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1320 1475
1321 } // namespace webrtc 1476 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698