Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(138)

Side by Side Diff: webrtc/voice_engine/transmit_mixer.cc

Issue 1607353002: Swap use of CriticalSectionWrapper with rtc::CriticalSection in voice_engine/ (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/transmit_mixer.h" 11 #include "webrtc/voice_engine/transmit_mixer.h"
12 12
13 #include "webrtc/base/format_macros.h" 13 #include "webrtc/base/format_macros.h"
14 #include "webrtc/base/logging.h" 14 #include "webrtc/base/logging.h"
15 #include "webrtc/modules/utility/include/audio_frame_operations.h" 15 #include "webrtc/modules/utility/include/audio_frame_operations.h"
16 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
17 #include "webrtc/system_wrappers/include/event_wrapper.h" 16 #include "webrtc/system_wrappers/include/event_wrapper.h"
18 #include "webrtc/system_wrappers/include/trace.h" 17 #include "webrtc/system_wrappers/include/trace.h"
19 #include "webrtc/voice_engine/channel.h" 18 #include "webrtc/voice_engine/channel.h"
20 #include "webrtc/voice_engine/channel_manager.h" 19 #include "webrtc/voice_engine/channel_manager.h"
21 #include "webrtc/voice_engine/include/voe_external_media.h" 20 #include "webrtc/voice_engine/include/voe_external_media.h"
22 #include "webrtc/voice_engine/statistics.h" 21 #include "webrtc/voice_engine/statistics.h"
23 #include "webrtc/voice_engine/utility.h" 22 #include "webrtc/voice_engine/utility.h"
24 #include "webrtc/voice_engine/voe_base_impl.h" 23 #include "webrtc/voice_engine/voe_base_impl.h"
25 24
26 namespace webrtc { 25 namespace webrtc {
27 namespace voe { 26 namespace voe {
28 27
29 // TODO(ajm): The thread safety of this is dubious... 28 // TODO(ajm): The thread safety of this is dubious...
30 void 29 void
31 TransmitMixer::OnPeriodicProcess() 30 TransmitMixer::OnPeriodicProcess()
32 { 31 {
33 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), 32 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
34 "TransmitMixer::OnPeriodicProcess()"); 33 "TransmitMixer::OnPeriodicProcess()");
35 34
36 #if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION) 35 #if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
37 bool send_typing_noise_warning = false; 36 bool send_typing_noise_warning = false;
38 bool typing_noise_detected = false; 37 bool typing_noise_detected = false;
39 { 38 {
40 CriticalSectionScoped cs(&_critSect); 39 rtc::CritScope cs(&_critSect);
the sun 2016/01/21 13:07:28 suuuper nit: "lock" or "cs"?
tommi 2016/01/21 15:29:23 both are used throughout the code, so here I guess
41 if (_typingNoiseWarningPending) { 40 if (_typingNoiseWarningPending) {
42 send_typing_noise_warning = true; 41 send_typing_noise_warning = true;
43 typing_noise_detected = _typingNoiseDetected; 42 typing_noise_detected = _typingNoiseDetected;
44 _typingNoiseWarningPending = false; 43 _typingNoiseWarningPending = false;
45 } 44 }
46 } 45 }
47 if (send_typing_noise_warning) { 46 if (send_typing_noise_warning) {
48 CriticalSectionScoped cs(&_callbackCritSect); 47 rtc::CritScope cs(&_callbackCritSect);
49 if (_voiceEngineObserverPtr) { 48 if (_voiceEngineObserverPtr) {
50 if (typing_noise_detected) { 49 if (typing_noise_detected) {
51 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 50 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
52 "TransmitMixer::OnPeriodicProcess() => " 51 "TransmitMixer::OnPeriodicProcess() => "
53 "CallbackOnError(VE_TYPING_NOISE_WARNING)"); 52 "CallbackOnError(VE_TYPING_NOISE_WARNING)");
54 _voiceEngineObserverPtr->CallbackOnError( 53 _voiceEngineObserverPtr->CallbackOnError(
55 -1, 54 -1,
56 VE_TYPING_NOISE_WARNING); 55 VE_TYPING_NOISE_WARNING);
57 } else { 56 } else {
58 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 57 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
59 "TransmitMixer::OnPeriodicProcess() => " 58 "TransmitMixer::OnPeriodicProcess() => "
60 "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)"); 59 "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)");
61 _voiceEngineObserverPtr->CallbackOnError( 60 _voiceEngineObserverPtr->CallbackOnError(
62 -1, 61 -1,
63 VE_TYPING_NOISE_OFF_WARNING); 62 VE_TYPING_NOISE_OFF_WARNING);
64 } 63 }
65 } 64 }
66 } 65 }
67 #endif 66 #endif
68 67
69 bool saturationWarning = false; 68 bool saturationWarning = false;
70 { 69 {
71 // Modify |_saturationWarning| under lock to avoid conflict with write op 70 // Modify |_saturationWarning| under lock to avoid conflict with write op
72 // in ProcessAudio and also ensure that we don't hold the lock during the 71 // in ProcessAudio and also ensure that we don't hold the lock during the
73 // callback. 72 // callback.
74 CriticalSectionScoped cs(&_critSect); 73 rtc::CritScope cs(&_critSect);
75 saturationWarning = _saturationWarning; 74 saturationWarning = _saturationWarning;
76 if (_saturationWarning) 75 if (_saturationWarning)
77 _saturationWarning = false; 76 _saturationWarning = false;
78 } 77 }
79 78
80 if (saturationWarning) 79 if (saturationWarning)
81 { 80 {
82 CriticalSectionScoped cs(&_callbackCritSect); 81 rtc::CritScope cs(&_callbackCritSect);
83 if (_voiceEngineObserverPtr) 82 if (_voiceEngineObserverPtr)
84 { 83 {
85 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 84 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
86 "TransmitMixer::OnPeriodicProcess() =>" 85 "TransmitMixer::OnPeriodicProcess() =>"
87 " CallbackOnError(VE_SATURATION_WARNING)"); 86 " CallbackOnError(VE_SATURATION_WARNING)");
88 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING); 87 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
89 } 88 }
90 } 89 }
91 } 90 }
92 91
(...skipping 18 matching lines...) Expand all
111 // Not implement yet 110 // Not implement yet
112 } 111 }
113 112
114 void TransmitMixer::PlayFileEnded(int32_t id) 113 void TransmitMixer::PlayFileEnded(int32_t id)
115 { 114 {
116 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), 115 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
117 "TransmitMixer::PlayFileEnded(id=%d)", id); 116 "TransmitMixer::PlayFileEnded(id=%d)", id);
118 117
119 assert(id == _filePlayerId); 118 assert(id == _filePlayerId);
120 119
121 CriticalSectionScoped cs(&_critSect); 120 rtc::CritScope cs(&_critSect);
122 121
123 _filePlaying = false; 122 _filePlaying = false;
124 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), 123 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
125 "TransmitMixer::PlayFileEnded() =>" 124 "TransmitMixer::PlayFileEnded() =>"
126 "file player module is shutdown"); 125 "file player module is shutdown");
127 } 126 }
128 127
129 void 128 void
130 TransmitMixer::RecordFileEnded(int32_t id) 129 TransmitMixer::RecordFileEnded(int32_t id)
131 { 130 {
132 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1), 131 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
133 "TransmitMixer::RecordFileEnded(id=%d)", id); 132 "TransmitMixer::RecordFileEnded(id=%d)", id);
134 133
135 if (id == _fileRecorderId) 134 if (id == _fileRecorderId)
136 { 135 {
137 CriticalSectionScoped cs(&_critSect); 136 rtc::CritScope cs(&_critSect);
138 _fileRecording = false; 137 _fileRecording = false;
139 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), 138 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
140 "TransmitMixer::RecordFileEnded() => fileRecorder module" 139 "TransmitMixer::RecordFileEnded() => fileRecorder module"
141 "is shutdown"); 140 "is shutdown");
142 } else if (id == _fileCallRecorderId) 141 } else if (id == _fileCallRecorderId)
143 { 142 {
144 CriticalSectionScoped cs(&_critSect); 143 rtc::CritScope cs(&_critSect);
145 _fileCallRecording = false; 144 _fileCallRecording = false;
146 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1), 145 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
147 "TransmitMixer::RecordFileEnded() => fileCallRecorder" 146 "TransmitMixer::RecordFileEnded() => fileCallRecorder"
148 "module is shutdown"); 147 "module is shutdown");
149 } 148 }
150 } 149 }
151 150
152 int32_t 151 int32_t
153 TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId) 152 TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId)
154 { 153 {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 _fileCallRecorderPtr(NULL), 185 _fileCallRecorderPtr(NULL),
187 // Avoid conflict with other channels by adding 1024 - 1026, 186 // Avoid conflict with other channels by adding 1024 - 1026,
188 // won't use as much as 1024 channels. 187 // won't use as much as 1024 channels.
189 _filePlayerId(instanceId + 1024), 188 _filePlayerId(instanceId + 1024),
190 _fileRecorderId(instanceId + 1025), 189 _fileRecorderId(instanceId + 1025),
191 _fileCallRecorderId(instanceId + 1026), 190 _fileCallRecorderId(instanceId + 1026),
192 _filePlaying(false), 191 _filePlaying(false),
193 _fileRecording(false), 192 _fileRecording(false),
194 _fileCallRecording(false), 193 _fileCallRecording(false),
195 _audioLevel(), 194 _audioLevel(),
196 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
197 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
198 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION 195 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
199 _typingNoiseWarningPending(false), 196 _typingNoiseWarningPending(false),
200 _typingNoiseDetected(false), 197 _typingNoiseDetected(false),
201 #endif 198 #endif
202 _saturationWarning(false), 199 _saturationWarning(false),
203 _instanceId(instanceId), 200 _instanceId(instanceId),
204 _mixFileWithMicrophone(false), 201 _mixFileWithMicrophone(false),
205 _captureLevel(0), 202 _captureLevel(0),
206 external_postproc_ptr_(NULL), 203 external_postproc_ptr_(NULL),
207 external_preproc_ptr_(NULL), 204 external_preproc_ptr_(NULL),
(...skipping 11 matching lines...) Expand all
219 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1), 216 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
220 "TransmitMixer::~TransmitMixer() - dtor"); 217 "TransmitMixer::~TransmitMixer() - dtor");
221 _monitorModule.DeRegisterObserver(); 218 _monitorModule.DeRegisterObserver();
222 if (_processThreadPtr) 219 if (_processThreadPtr)
223 { 220 {
224 _processThreadPtr->DeRegisterModule(&_monitorModule); 221 _processThreadPtr->DeRegisterModule(&_monitorModule);
225 } 222 }
226 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed); 223 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
227 DeRegisterExternalMediaProcessing(kRecordingPreprocessing); 224 DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
228 { 225 {
229 CriticalSectionScoped cs(&_critSect); 226 rtc::CritScope cs(&_critSect);
230 if (_fileRecorderPtr) 227 if (_fileRecorderPtr)
231 { 228 {
232 _fileRecorderPtr->RegisterModuleFileCallback(NULL); 229 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
233 _fileRecorderPtr->StopRecording(); 230 _fileRecorderPtr->StopRecording();
234 FileRecorder::DestroyFileRecorder(_fileRecorderPtr); 231 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
235 _fileRecorderPtr = NULL; 232 _fileRecorderPtr = NULL;
236 } 233 }
237 if (_fileCallRecorderPtr) 234 if (_fileCallRecorderPtr)
238 { 235 {
239 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL); 236 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
240 _fileCallRecorderPtr->StopRecording(); 237 _fileCallRecorderPtr->StopRecording();
241 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr); 238 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
242 _fileCallRecorderPtr = NULL; 239 _fileCallRecorderPtr = NULL;
243 } 240 }
244 if (_filePlayerPtr) 241 if (_filePlayerPtr)
245 { 242 {
246 _filePlayerPtr->RegisterModuleFileCallback(NULL); 243 _filePlayerPtr->RegisterModuleFileCallback(NULL);
247 _filePlayerPtr->StopPlayingFile(); 244 _filePlayerPtr->StopPlayingFile();
248 FilePlayer::DestroyFilePlayer(_filePlayerPtr); 245 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
249 _filePlayerPtr = NULL; 246 _filePlayerPtr = NULL;
250 } 247 }
251 } 248 }
252 delete &_critSect;
253 delete &_callbackCritSect;
254 } 249 }
255 250
256 int32_t 251 int32_t
257 TransmitMixer::SetEngineInformation(ProcessThread& processThread, 252 TransmitMixer::SetEngineInformation(ProcessThread& processThread,
258 Statistics& engineStatistics, 253 Statistics& engineStatistics,
259 ChannelManager& channelManager) 254 ChannelManager& channelManager)
260 { 255 {
261 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 256 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
262 "TransmitMixer::SetEngineInformation()"); 257 "TransmitMixer::SetEngineInformation()");
263 258
264 _processThreadPtr = &processThread; 259 _processThreadPtr = &processThread;
265 _engineStatisticsPtr = &engineStatistics; 260 _engineStatisticsPtr = &engineStatistics;
266 _channelManagerPtr = &channelManager; 261 _channelManagerPtr = &channelManager;
267 262
268 _processThreadPtr->RegisterModule(&_monitorModule); 263 _processThreadPtr->RegisterModule(&_monitorModule);
269 _monitorModule.RegisterObserver(*this); 264 _monitorModule.RegisterObserver(*this);
270 265
271 return 0; 266 return 0;
272 } 267 }
273 268
274 int32_t 269 int32_t
275 TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) 270 TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
276 { 271 {
277 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 272 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
278 "TransmitMixer::RegisterVoiceEngineObserver()"); 273 "TransmitMixer::RegisterVoiceEngineObserver()");
279 CriticalSectionScoped cs(&_callbackCritSect); 274 rtc::CritScope cs(&_callbackCritSect);
280 275
281 if (_voiceEngineObserverPtr) 276 if (_voiceEngineObserverPtr)
282 { 277 {
283 _engineStatisticsPtr->SetLastError( 278 _engineStatisticsPtr->SetLastError(
284 VE_INVALID_OPERATION, kTraceError, 279 VE_INVALID_OPERATION, kTraceError,
285 "RegisterVoiceEngineObserver() observer already enabled"); 280 "RegisterVoiceEngineObserver() observer already enabled");
286 return -1; 281 return -1;
287 } 282 }
288 _voiceEngineObserverPtr = &observer; 283 _voiceEngineObserverPtr = &observer;
289 return 0; 284 return 0;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
333 nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift, 328 nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
334 currentMicLevel); 329 currentMicLevel);
335 330
336 // --- Resample input audio and create/store the initial audio frame 331 // --- Resample input audio and create/store the initial audio frame
337 GenerateAudioFrame(static_cast<const int16_t*>(audioSamples), 332 GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
338 nSamples, 333 nSamples,
339 nChannels, 334 nChannels,
340 samplesPerSec); 335 samplesPerSec);
341 336
342 { 337 {
343 CriticalSectionScoped cs(&_callbackCritSect); 338 rtc::CritScope cs(&_callbackCritSect);
344 if (external_preproc_ptr_) { 339 if (external_preproc_ptr_) {
345 external_preproc_ptr_->Process(-1, kRecordingPreprocessing, 340 external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
346 _audioFrame.data_, 341 _audioFrame.data_,
347 _audioFrame.samples_per_channel_, 342 _audioFrame.samples_per_channel_,
348 _audioFrame.sample_rate_hz_, 343 _audioFrame.sample_rate_hz_,
349 _audioFrame.num_channels_ == 2); 344 _audioFrame.num_channels_ == 2);
350 } 345 }
351 } 346 }
352 347
353 // --- Near-end audio processing. 348 // --- Near-end audio processing.
(...skipping 27 matching lines...) Expand all
381 376
382 // --- Mix with file (does not affect the mixing frequency) 377 // --- Mix with file (does not affect the mixing frequency)
383 if (_filePlaying) 378 if (_filePlaying)
384 { 379 {
385 MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_); 380 MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_);
386 } 381 }
387 382
388 // --- Record to file 383 // --- Record to file
389 bool file_recording = false; 384 bool file_recording = false;
390 { 385 {
391 CriticalSectionScoped cs(&_critSect); 386 rtc::CritScope cs(&_critSect);
392 file_recording = _fileRecording; 387 file_recording = _fileRecording;
393 } 388 }
394 if (file_recording) 389 if (file_recording)
395 { 390 {
396 RecordAudioToFile(_audioFrame.sample_rate_hz_); 391 RecordAudioToFile(_audioFrame.sample_rate_hz_);
397 } 392 }
398 393
399 { 394 {
400 CriticalSectionScoped cs(&_callbackCritSect); 395 rtc::CritScope cs(&_callbackCritSect);
401 if (external_postproc_ptr_) { 396 if (external_postproc_ptr_) {
402 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed, 397 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
403 _audioFrame.data_, 398 _audioFrame.data_,
404 _audioFrame.samples_per_channel_, 399 _audioFrame.samples_per_channel_,
405 _audioFrame.sample_rate_hz_, 400 _audioFrame.sample_rate_hz_,
406 _audioFrame.num_channels_ == 2); 401 _audioFrame.num_channels_ == 2);
407 } 402 }
408 } 403 }
409 404
410 // --- Measure audio level of speech after all processing. 405 // --- Measure audio level of speech after all processing.
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
513 format, volumeScaling, startPosition, stopPosition); 508 format, volumeScaling, startPosition, stopPosition);
514 509
515 if (_filePlaying) 510 if (_filePlaying)
516 { 511 {
517 _engineStatisticsPtr->SetLastError( 512 _engineStatisticsPtr->SetLastError(
518 VE_ALREADY_PLAYING, kTraceWarning, 513 VE_ALREADY_PLAYING, kTraceWarning,
519 "StartPlayingFileAsMicrophone() is already playing"); 514 "StartPlayingFileAsMicrophone() is already playing");
520 return 0; 515 return 0;
521 } 516 }
522 517
523 CriticalSectionScoped cs(&_critSect); 518 rtc::CritScope cs(&_critSect);
524 519
525 // Destroy the old instance 520 // Destroy the old instance
526 if (_filePlayerPtr) 521 if (_filePlayerPtr)
527 { 522 {
528 _filePlayerPtr->RegisterModuleFileCallback(NULL); 523 _filePlayerPtr->RegisterModuleFileCallback(NULL);
529 FilePlayer::DestroyFilePlayer(_filePlayerPtr); 524 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
530 _filePlayerPtr = NULL; 525 _filePlayerPtr = NULL;
531 } 526 }
532 527
533 // Dynamically create the instance 528 // Dynamically create the instance
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
590 } 585 }
591 586
592 if (_filePlaying) 587 if (_filePlaying)
593 { 588 {
594 _engineStatisticsPtr->SetLastError( 589 _engineStatisticsPtr->SetLastError(
595 VE_ALREADY_PLAYING, kTraceWarning, 590 VE_ALREADY_PLAYING, kTraceWarning,
596 "StartPlayingFileAsMicrophone() is already playing"); 591 "StartPlayingFileAsMicrophone() is already playing");
597 return 0; 592 return 0;
598 } 593 }
599 594
600 CriticalSectionScoped cs(&_critSect); 595 rtc::CritScope cs(&_critSect);
601 596
602 // Destroy the old instance 597 // Destroy the old instance
603 if (_filePlayerPtr) 598 if (_filePlayerPtr)
604 { 599 {
605 _filePlayerPtr->RegisterModuleFileCallback(NULL); 600 _filePlayerPtr->RegisterModuleFileCallback(NULL);
606 FilePlayer::DestroyFilePlayer(_filePlayerPtr); 601 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
607 _filePlayerPtr = NULL; 602 _filePlayerPtr = NULL;
608 } 603 }
609 604
610 // Dynamically create the instance 605 // Dynamically create the instance
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
647 int TransmitMixer::StopPlayingFileAsMicrophone() 642 int TransmitMixer::StopPlayingFileAsMicrophone()
648 { 643 {
649 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1), 644 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
650 "TransmitMixer::StopPlayingFileAsMicrophone()"); 645 "TransmitMixer::StopPlayingFileAsMicrophone()");
651 646
652 if (!_filePlaying) 647 if (!_filePlaying)
653 { 648 {
654 return 0; 649 return 0;
655 } 650 }
656 651
657 CriticalSectionScoped cs(&_critSect); 652 rtc::CritScope cs(&_critSect);
658 653
659 if (_filePlayerPtr->StopPlayingFile() != 0) 654 if (_filePlayerPtr->StopPlayingFile() != 0)
660 { 655 {
661 _engineStatisticsPtr->SetLastError( 656 _engineStatisticsPtr->SetLastError(
662 VE_CANNOT_STOP_PLAYOUT, kTraceError, 657 VE_CANNOT_STOP_PLAYOUT, kTraceError,
663 "StopPlayingFile() couldnot stop playing file"); 658 "StopPlayingFile() couldnot stop playing file");
664 return -1; 659 return -1;
665 } 660 }
666 661
667 _filePlayerPtr->RegisterModuleFileCallback(NULL); 662 _filePlayerPtr->RegisterModuleFileCallback(NULL);
(...skipping 11 matching lines...) Expand all
679 return _filePlaying; 674 return _filePlaying;
680 } 675 }
681 676
682 int TransmitMixer::StartRecordingMicrophone(const char* fileName, 677 int TransmitMixer::StartRecordingMicrophone(const char* fileName,
683 const CodecInst* codecInst) 678 const CodecInst* codecInst)
684 { 679 {
685 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 680 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
686 "TransmitMixer::StartRecordingMicrophone(fileName=%s)", 681 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
687 fileName); 682 fileName);
688 683
689 CriticalSectionScoped cs(&_critSect); 684 rtc::CritScope cs(&_critSect);
690 685
691 if (_fileRecording) 686 if (_fileRecording)
692 { 687 {
693 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), 688 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
694 "StartRecordingMicrophone() is already recording"); 689 "StartRecordingMicrophone() is already recording");
695 return 0; 690 return 0;
696 } 691 }
697 692
698 FileFormats format; 693 FileFormats format;
699 const uint32_t notificationTime(0); // Not supported in VoE 694 const uint32_t notificationTime(0); // Not supported in VoE
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
757 752
758 return 0; 753 return 0;
759 } 754 }
760 755
761 int TransmitMixer::StartRecordingMicrophone(OutStream* stream, 756 int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
762 const CodecInst* codecInst) 757 const CodecInst* codecInst)
763 { 758 {
764 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 759 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
765 "TransmitMixer::StartRecordingMicrophone()"); 760 "TransmitMixer::StartRecordingMicrophone()");
766 761
767 CriticalSectionScoped cs(&_critSect); 762 rtc::CritScope cs(&_critSect);
768 763
769 if (_fileRecording) 764 if (_fileRecording)
770 { 765 {
771 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), 766 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
772 "StartRecordingMicrophone() is already recording"); 767 "StartRecordingMicrophone() is already recording");
773 return 0; 768 return 0;
774 } 769 }
775 770
776 FileFormats format; 771 FileFormats format;
777 const uint32_t notificationTime(0); // Not supported in VoE 772 const uint32_t notificationTime(0); // Not supported in VoE
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
834 829
835 return 0; 830 return 0;
836 } 831 }
837 832
838 833
839 int TransmitMixer::StopRecordingMicrophone() 834 int TransmitMixer::StopRecordingMicrophone()
840 { 835 {
841 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 836 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
842 "TransmitMixer::StopRecordingMicrophone()"); 837 "TransmitMixer::StopRecordingMicrophone()");
843 838
844 CriticalSectionScoped cs(&_critSect); 839 rtc::CritScope cs(&_critSect);
845 840
846 if (!_fileRecording) 841 if (!_fileRecording)
847 { 842 {
848 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), 843 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
849 "StopRecordingMicrophone() isnot recording"); 844 "StopRecordingMicrophone() isnot recording");
850 return 0; 845 return 0;
851 } 846 }
852 847
853 if (_fileRecorderPtr->StopRecording() != 0) 848 if (_fileRecorderPtr->StopRecording() != 0)
854 { 849 {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
896 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) || 891 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
897 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) || 892 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
898 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0)) 893 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
899 { 894 {
900 format = kFileFormatWavFile; 895 format = kFileFormatWavFile;
901 } else 896 } else
902 { 897 {
903 format = kFileFormatCompressedFile; 898 format = kFileFormatCompressedFile;
904 } 899 }
905 900
906 CriticalSectionScoped cs(&_critSect); 901 rtc::CritScope cs(&_critSect);
907 902
908 // Destroy the old instance 903 // Destroy the old instance
909 if (_fileCallRecorderPtr) 904 if (_fileCallRecorderPtr)
910 { 905 {
911 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL); 906 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
912 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr); 907 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
913 _fileCallRecorderPtr = NULL; 908 _fileCallRecorderPtr = NULL;
914 } 909 }
915 910
916 _fileCallRecorderPtr 911 _fileCallRecorderPtr
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
974 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) || 969 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
975 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) || 970 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
976 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0)) 971 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
977 { 972 {
978 format = kFileFormatWavFile; 973 format = kFileFormatWavFile;
979 } else 974 } else
980 { 975 {
981 format = kFileFormatCompressedFile; 976 format = kFileFormatCompressedFile;
982 } 977 }
983 978
984 CriticalSectionScoped cs(&_critSect); 979 rtc::CritScope cs(&_critSect);
985 980
986 // Destroy the old instance 981 // Destroy the old instance
987 if (_fileCallRecorderPtr) 982 if (_fileCallRecorderPtr)
988 { 983 {
989 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL); 984 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
990 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr); 985 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
991 _fileCallRecorderPtr = NULL; 986 _fileCallRecorderPtr = NULL;
992 } 987 }
993 988
994 _fileCallRecorderPtr = 989 _fileCallRecorderPtr =
(...skipping 30 matching lines...) Expand all
1025 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 1020 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1026 "TransmitMixer::StopRecordingCall()"); 1021 "TransmitMixer::StopRecordingCall()");
1027 1022
1028 if (!_fileCallRecording) 1023 if (!_fileCallRecording)
1029 { 1024 {
1030 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1), 1025 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1031 "StopRecordingCall() file isnot recording"); 1026 "StopRecordingCall() file isnot recording");
1032 return -1; 1027 return -1;
1033 } 1028 }
1034 1029
1035 CriticalSectionScoped cs(&_critSect); 1030 rtc::CritScope cs(&_critSect);
1036 1031
1037 if (_fileCallRecorderPtr->StopRecording() != 0) 1032 if (_fileCallRecorderPtr->StopRecording() != 0)
1038 { 1033 {
1039 _engineStatisticsPtr->SetLastError( 1034 _engineStatisticsPtr->SetLastError(
1040 VE_STOP_RECORDING_FAILED, kTraceError, 1035 VE_STOP_RECORDING_FAILED, kTraceError,
1041 "StopRecording(), could not stop recording"); 1036 "StopRecording(), could not stop recording");
1042 return -1; 1037 return -1;
1043 } 1038 }
1044 1039
1045 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL); 1040 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1046 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr); 1041 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1047 _fileCallRecorderPtr = NULL; 1042 _fileCallRecorderPtr = NULL;
1048 _fileCallRecording = false; 1043 _fileCallRecording = false;
1049 1044
1050 return 0; 1045 return 0;
1051 } 1046 }
1052 1047
1053 void 1048 void
1054 TransmitMixer::SetMixWithMicStatus(bool mix) 1049 TransmitMixer::SetMixWithMicStatus(bool mix)
1055 { 1050 {
1056 _mixFileWithMicrophone = mix; 1051 _mixFileWithMicrophone = mix;
1057 } 1052 }
1058 1053
1059 int TransmitMixer::RegisterExternalMediaProcessing( 1054 int TransmitMixer::RegisterExternalMediaProcessing(
1060 VoEMediaProcess* object, 1055 VoEMediaProcess* object,
1061 ProcessingTypes type) { 1056 ProcessingTypes type) {
1062 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 1057 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1063 "TransmitMixer::RegisterExternalMediaProcessing()"); 1058 "TransmitMixer::RegisterExternalMediaProcessing()");
1064 1059
1065 CriticalSectionScoped cs(&_callbackCritSect); 1060 rtc::CritScope cs(&_callbackCritSect);
1066 if (!object) { 1061 if (!object) {
1067 return -1; 1062 return -1;
1068 } 1063 }
1069 1064
1070 // Store the callback object according to the processing type. 1065 // Store the callback object according to the processing type.
1071 if (type == kRecordingAllChannelsMixed) { 1066 if (type == kRecordingAllChannelsMixed) {
1072 external_postproc_ptr_ = object; 1067 external_postproc_ptr_ = object;
1073 } else if (type == kRecordingPreprocessing) { 1068 } else if (type == kRecordingPreprocessing) {
1074 external_preproc_ptr_ = object; 1069 external_preproc_ptr_ = object;
1075 } else { 1070 } else {
1076 return -1; 1071 return -1;
1077 } 1072 }
1078 return 0; 1073 return 0;
1079 } 1074 }
1080 1075
1081 int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) { 1076 int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
1082 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), 1077 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1083 "TransmitMixer::DeRegisterExternalMediaProcessing()"); 1078 "TransmitMixer::DeRegisterExternalMediaProcessing()");
1084 1079
1085 CriticalSectionScoped cs(&_callbackCritSect); 1080 rtc::CritScope cs(&_callbackCritSect);
1086 if (type == kRecordingAllChannelsMixed) { 1081 if (type == kRecordingAllChannelsMixed) {
1087 external_postproc_ptr_ = NULL; 1082 external_postproc_ptr_ = NULL;
1088 } else if (type == kRecordingPreprocessing) { 1083 } else if (type == kRecordingPreprocessing) {
1089 external_preproc_ptr_ = NULL; 1084 external_preproc_ptr_ = NULL;
1090 } else { 1085 } else {
1091 return -1; 1086 return -1;
1092 } 1087 }
1093 return 0; 1088 return 0;
1094 } 1089 }
1095 1090
(...skipping 24 matching lines...) Expand all
1120 return _audioLevel.LevelFullRange(); 1115 return _audioLevel.LevelFullRange();
1121 } 1116 }
1122 1117
1123 bool TransmitMixer::IsRecordingCall() 1118 bool TransmitMixer::IsRecordingCall()
1124 { 1119 {
1125 return _fileCallRecording; 1120 return _fileCallRecording;
1126 } 1121 }
1127 1122
1128 bool TransmitMixer::IsRecordingMic() 1123 bool TransmitMixer::IsRecordingMic()
1129 { 1124 {
1130 CriticalSectionScoped cs(&_critSect); 1125 rtc::CritScope cs(&_critSect);
1131 return _fileRecording; 1126 return _fileRecording;
1132 } 1127 }
1133 1128
1134 void TransmitMixer::GenerateAudioFrame(const int16_t* audio, 1129 void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
1135 size_t samples_per_channel, 1130 size_t samples_per_channel,
1136 size_t num_channels, 1131 size_t num_channels,
1137 int sample_rate_hz) { 1132 int sample_rate_hz) {
1138 int codec_rate; 1133 int codec_rate;
1139 size_t num_codec_channels; 1134 size_t num_codec_channels;
1140 GetSendCodecInfo(&codec_rate, &num_codec_channels); 1135 GetSendCodecInfo(&codec_rate, &num_codec_channels);
(...skipping 14 matching lines...) Expand all
1155 _audioFrame.sample_rate_hz_, AudioProcessing::kMaxAECMSampleRateHz); 1150 _audioFrame.sample_rate_hz_, AudioProcessing::kMaxAECMSampleRateHz);
1156 } 1151 }
1157 _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels); 1152 _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels);
1158 RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz, 1153 RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz,
1159 &resampler_, &_audioFrame); 1154 &resampler_, &_audioFrame);
1160 } 1155 }
1161 1156
1162 int32_t TransmitMixer::RecordAudioToFile( 1157 int32_t TransmitMixer::RecordAudioToFile(
1163 uint32_t mixingFrequency) 1158 uint32_t mixingFrequency)
1164 { 1159 {
1165 CriticalSectionScoped cs(&_critSect); 1160 rtc::CritScope cs(&_critSect);
1166 if (_fileRecorderPtr == NULL) 1161 if (_fileRecorderPtr == NULL)
1167 { 1162 {
1168 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), 1163 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1169 "TransmitMixer::RecordAudioToFile() filerecorder doesnot" 1164 "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1170 "exist"); 1165 "exist");
1171 return -1; 1166 return -1;
1172 } 1167 }
1173 1168
1174 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0) 1169 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
1175 { 1170 {
1176 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), 1171 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1177 "TransmitMixer::RecordAudioToFile() file recording" 1172 "TransmitMixer::RecordAudioToFile() file recording"
1178 "failed"); 1173 "failed");
1179 return -1; 1174 return -1;
1180 } 1175 }
1181 1176
1182 return 0; 1177 return 0;
1183 } 1178 }
1184 1179
1185 int32_t TransmitMixer::MixOrReplaceAudioWithFile( 1180 int32_t TransmitMixer::MixOrReplaceAudioWithFile(
1186 int mixingFrequency) 1181 int mixingFrequency)
1187 { 1182 {
1188 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]); 1183 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
1189 1184
1190 size_t fileSamples(0); 1185 size_t fileSamples(0);
1191 { 1186 {
1192 CriticalSectionScoped cs(&_critSect); 1187 rtc::CritScope cs(&_critSect);
1193 if (_filePlayerPtr == NULL) 1188 if (_filePlayerPtr == NULL)
1194 { 1189 {
1195 WEBRTC_TRACE(kTraceWarning, kTraceVoice, 1190 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1196 VoEId(_instanceId, -1), 1191 VoEId(_instanceId, -1),
1197 "TransmitMixer::MixOrReplaceAudioWithFile()" 1192 "TransmitMixer::MixOrReplaceAudioWithFile()"
1198 "fileplayer doesnot exist"); 1193 "fileplayer doesnot exist");
1199 return -1; 1194 return -1;
1200 } 1195 }
1201 1196
1202 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(), 1197 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1260 1255
1261 int err = audioproc_->ProcessStream(&_audioFrame); 1256 int err = audioproc_->ProcessStream(&_audioFrame);
1262 if (err != 0) { 1257 if (err != 0) {
1263 LOG(LS_ERROR) << "ProcessStream() error: " << err; 1258 LOG(LS_ERROR) << "ProcessStream() error: " << err;
1264 assert(false); 1259 assert(false);
1265 } 1260 }
1266 1261
1267 // Store new capture level. Only updated when analog AGC is enabled. 1262 // Store new capture level. Only updated when analog AGC is enabled.
1268 _captureLevel = agc->stream_analog_level(); 1263 _captureLevel = agc->stream_analog_level();
1269 1264
1270 CriticalSectionScoped cs(&_critSect); 1265 rtc::CritScope cs(&_critSect);
1271 // Triggers a callback in OnPeriodicProcess(). 1266 // Triggers a callback in OnPeriodicProcess().
1272 _saturationWarning |= agc->stream_is_saturated(); 1267 _saturationWarning |= agc->stream_is_saturated();
1273 } 1268 }
1274 1269
1275 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION 1270 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1276 void TransmitMixer::TypingDetection(bool keyPressed) 1271 void TransmitMixer::TypingDetection(bool keyPressed)
1277 { 1272 {
1278 // We let the VAD determine if we're using this feature or not. 1273 // We let the VAD determine if we're using this feature or not.
1279 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) { 1274 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
1280 return; 1275 return;
1281 } 1276 }
1282 1277
1283 bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive; 1278 bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
1284 if (_typingDetection.Process(keyPressed, vadActive)) { 1279 if (_typingDetection.Process(keyPressed, vadActive)) {
1285 CriticalSectionScoped cs(&_critSect); 1280 rtc::CritScope cs(&_critSect);
1286 _typingNoiseWarningPending = true; 1281 _typingNoiseWarningPending = true;
1287 _typingNoiseDetected = true; 1282 _typingNoiseDetected = true;
1288 } else { 1283 } else {
1289 CriticalSectionScoped cs(&_critSect); 1284 rtc::CritScope cs(&_critSect);
1290 // If there is already a warning pending, do not change the state. 1285 // If there is already a warning pending, do not change the state.
1291 // Otherwise set a warning pending if last callback was for noise detected. 1286 // Otherwise set a warning pending if last callback was for noise detected.
1292 if (!_typingNoiseWarningPending && _typingNoiseDetected) { 1287 if (!_typingNoiseWarningPending && _typingNoiseDetected) {
1293 _typingNoiseWarningPending = true; 1288 _typingNoiseWarningPending = true;
1294 _typingNoiseDetected = false; 1289 _typingNoiseDetected = false;
1295 } 1290 }
1296 } 1291 }
1297 } 1292 }
1298 #endif 1293 #endif
1299 1294
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1333 void TransmitMixer::EnableStereoChannelSwapping(bool enable) { 1328 void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1334 swap_stereo_channels_ = enable; 1329 swap_stereo_channels_ = enable;
1335 } 1330 }
1336 1331
1337 bool TransmitMixer::IsStereoChannelSwappingEnabled() { 1332 bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1338 return swap_stereo_channels_; 1333 return swap_stereo_channels_;
1339 } 1334 }
1340 1335
1341 } // namespace voe 1336 } // namespace voe
1342 } // namespace webrtc 1337 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698