| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 48 // take care of any buffering required to convert between native buffers and | 48 // take care of any buffering required to convert between native buffers and |
| 49 // buffers used by WebRTC. It is beneficial for the performance if the native | 49 // buffers used by WebRTC. It is beneficial for the performance if the native |
| 50 // size is as close to 10ms as possible since it results in "clean" callback | 50 // size is as close to 10ms as possible since it results in "clean" callback |
| 51 // sequence without bursts of callbacks back to back. | 51 // sequence without bursts of callbacks back to back. |
| 52 const double kPreferredIOBufferDuration = 0.01; | 52 const double kPreferredIOBufferDuration = 0.01; |
| 53 // Try to use mono to save resources. Also avoids channel format conversion | 53 // Try to use mono to save resources. Also avoids channel format conversion |
| 54 // in the I/O audio unit. Initial tests have shown that it is possible to use | 54 // in the I/O audio unit. Initial tests have shown that it is possible to use |
| 55 // mono natively for built-in microphones and for BT headsets but not for | 55 // mono natively for built-in microphones and for BT headsets but not for |
| 56 // wired headsets. Wired headsets only support stereo as native channel format | 56 // wired headsets. Wired headsets only support stereo as native channel format |
| 57 // but it is a low cost operation to do a format conversion to mono in the | 57 // but it is a low cost operation to do a format conversion to mono in the |
| 58 // audio unit. Hence, we will not hit a CHECK in | 58 // audio unit. Hence, we will not hit a RTC_CHECK in |
| 59 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the | 59 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the |
| 60 // preferred number of channels and the actual number of channels. | 60 // preferred number of channels and the actual number of channels. |
| 61 const int kPreferredNumberOfChannels = 1; | 61 const int kPreferredNumberOfChannels = 1; |
| 62 // Number of bytes per audio sample for 16-bit signed integer representation. | 62 // Number of bytes per audio sample for 16-bit signed integer representation. |
| 63 const UInt32 kBytesPerSample = 2; | 63 const UInt32 kBytesPerSample = 2; |
| 64 // Hardcoded delay estimates based on real measurements. | 64 // Hardcoded delay estimates based on real measurements. |
| 65 // TODO(henrika): these value is not used in combination with built-in AEC. | 65 // TODO(henrika): these value is not used in combination with built-in AEC. |
| 66 // Can most likely be removed. | 66 // Can most likely be removed. |
| 67 const UInt16 kFixedPlayoutDelayEstimate = 30; | 67 const UInt16 kFixedPlayoutDelayEstimate = 30; |
| 68 const UInt16 kFixedRecordDelayEstimate = 30; | 68 const UInt16 kFixedRecordDelayEstimate = 30; |
| 69 | 69 |
| 70 using ios::CheckAndLogError; | 70 using ios::CheckAndLogError; |
| 71 | 71 |
| 72 // Activates an audio session suitable for full duplex VoIP sessions when | 72 // Activates an audio session suitable for full duplex VoIP sessions when |
| 73 // |activate| is true. Also sets the preferred sample rate and IO buffer | 73 // |activate| is true. Also sets the preferred sample rate and IO buffer |
| 74 // duration. Deactivates an active audio session if |activate| is set to false. | 74 // duration. Deactivates an active audio session if |activate| is set to false. |
| 75 static void ActivateAudioSession(AVAudioSession* session, bool activate) { | 75 static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
| 76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; | 76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
| 77 @autoreleasepool { | 77 @autoreleasepool { |
| 78 NSError* error = nil; | 78 NSError* error = nil; |
| 79 BOOL success = NO; | 79 BOOL success = NO; |
| 80 // Deactivate the audio session and return if |activate| is false. | 80 // Deactivate the audio session and return if |activate| is false. |
| 81 if (!activate) { | 81 if (!activate) { |
| 82 success = [session setActive:NO error:&error]; | 82 success = [session setActive:NO error:&error]; |
| 83 DCHECK(CheckAndLogError(success, error)); | 83 RTC_DCHECK(CheckAndLogError(success, error)); |
| 84 return; | 84 return; |
| 85 } | 85 } |
| 86 // Use a category which supports simultaneous recording and playback. | 86 // Use a category which supports simultaneous recording and playback. |
| 87 // By default, using this category implies that our app’s audio is | 87 // By default, using this category implies that our app’s audio is |
| 88 // nonmixable, hence activating the session will interrupt any other | 88 // nonmixable, hence activating the session will interrupt any other |
| 89 // audio sessions which are also nonmixable. | 89 // audio sessions which are also nonmixable. |
| 90 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | 90 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
| 91 error = nil; | 91 error = nil; |
| 92 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord | 92 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
| 93 error:&error]; | 93 error:&error]; |
| 94 DCHECK(CheckAndLogError(success, error)); | 94 RTC_DCHECK(CheckAndLogError(success, error)); |
| 95 } | 95 } |
| 96 // Specify mode for two-way voice communication (e.g. VoIP). | 96 // Specify mode for two-way voice communication (e.g. VoIP). |
| 97 if (session.mode != AVAudioSessionModeVoiceChat) { | 97 if (session.mode != AVAudioSessionModeVoiceChat) { |
| 98 error = nil; | 98 error = nil; |
| 99 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; | 99 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; |
| 100 DCHECK(CheckAndLogError(success, error)); | 100 RTC_DCHECK(CheckAndLogError(success, error)); |
| 101 } | 101 } |
| 102 // Set the session's sample rate or the hardware sample rate. | 102 // Set the session's sample rate or the hardware sample rate. |
| 103 // It is essential that we use the same sample rate as stream format | 103 // It is essential that we use the same sample rate as stream format |
| 104 // to ensure that the I/O unit does not have to do sample rate conversion. | 104 // to ensure that the I/O unit does not have to do sample rate conversion. |
| 105 error = nil; | 105 error = nil; |
| 106 success = | 106 success = |
| 107 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; | 107 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; |
| 108 DCHECK(CheckAndLogError(success, error)); | 108 RTC_DCHECK(CheckAndLogError(success, error)); |
| 109 // Set the preferred audio I/O buffer duration, in seconds. | 109 // Set the preferred audio I/O buffer duration, in seconds. |
| 110 // TODO(henrika): add more comments here. | 110 // TODO(henrika): add more comments here. |
| 111 error = nil; | 111 error = nil; |
| 112 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration | 112 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration |
| 113 error:&error]; | 113 error:&error]; |
| 114 DCHECK(CheckAndLogError(success, error)); | 114 RTC_DCHECK(CheckAndLogError(success, error)); |
| 115 | 115 |
| 116 // TODO(henrika): add observers here... | 116 // TODO(henrika): add observers here... |
| 117 | 117 |
| 118 // Activate the audio session. Activation can fail if another active audio | 118 // Activate the audio session. Activation can fail if another active audio |
| 119 // session (e.g. phone call) has higher priority than ours. | 119 // session (e.g. phone call) has higher priority than ours. |
| 120 error = nil; | 120 error = nil; |
| 121 success = [session setActive:YES error:&error]; | 121 success = [session setActive:YES error:&error]; |
| 122 DCHECK(CheckAndLogError(success, error)); | 122 RTC_DCHECK(CheckAndLogError(success, error)); |
| 123 CHECK(session.isInputAvailable) << "No input path is available!"; | 123 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; |
| 124 // Ensure that category and mode are actually activated. | 124 // Ensure that category and mode are actually activated. |
| 125 DCHECK( | 125 RTC_DCHECK( |
| 126 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); | 126 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); |
| 127 DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); | 127 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); |
| 128 // Try to set the preferred number of hardware audio channels. These calls | 128 // Try to set the preferred number of hardware audio channels. These calls |
| 129 // must be done after setting the audio session’s category and mode and | 129 // must be done after setting the audio session’s category and mode and |
| 130 // activating the session. | 130 // activating the session. |
| 131 // We try to use mono in both directions to save resources and format | 131 // We try to use mono in both directions to save resources and format |
| 132 // conversions in the audio unit. Some devices does only support stereo; | 132 // conversions in the audio unit. Some devices does only support stereo; |
| 133 // e.g. wired headset on iPhone 6. | 133 // e.g. wired headset on iPhone 6. |
| 134 // TODO(henrika): add support for stereo if needed. | 134 // TODO(henrika): add support for stereo if needed. |
| 135 error = nil; | 135 error = nil; |
| 136 success = | 136 success = |
| 137 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels | 137 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels |
| 138 error:&error]; | 138 error:&error]; |
| 139 DCHECK(CheckAndLogError(success, error)); | 139 RTC_DCHECK(CheckAndLogError(success, error)); |
| 140 error = nil; | 140 error = nil; |
| 141 success = | 141 success = |
| 142 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels | 142 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels |
| 143 error:&error]; | 143 error:&error]; |
| 144 DCHECK(CheckAndLogError(success, error)); | 144 RTC_DCHECK(CheckAndLogError(success, error)); |
| 145 } | 145 } |
| 146 } | 146 } |
| 147 | 147 |
| 148 #if !defined(NDEBUG) | 148 #if !defined(NDEBUG) |
| 149 // Helper method for printing out an AudioStreamBasicDescription structure. | 149 // Helper method for printing out an AudioStreamBasicDescription structure. |
| 150 static void LogABSD(AudioStreamBasicDescription absd) { | 150 static void LogABSD(AudioStreamBasicDescription absd) { |
| 151 char formatIDString[5]; | 151 char formatIDString[5]; |
| 152 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); | 152 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); |
| 153 bcopy(&formatID, formatIDString, 4); | 153 bcopy(&formatID, formatIDString, 4); |
| 154 formatIDString[4] = '\0'; | 154 formatIDString[4] = '\0'; |
| (...skipping 28 matching lines...) Expand all Loading... |
| 183 _playing(0), | 183 _playing(0), |
| 184 _initialized(false), | 184 _initialized(false), |
| 185 _recIsInitialized(false), | 185 _recIsInitialized(false), |
| 186 _playIsInitialized(false), | 186 _playIsInitialized(false), |
| 187 _audioInterruptionObserver(nullptr) { | 187 _audioInterruptionObserver(nullptr) { |
| 188 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | 188 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); |
| 189 } | 189 } |
| 190 | 190 |
| 191 AudioDeviceIOS::~AudioDeviceIOS() { | 191 AudioDeviceIOS::~AudioDeviceIOS() { |
| 192 LOGI() << "~dtor"; | 192 LOGI() << "~dtor"; |
| 193 DCHECK(_threadChecker.CalledOnValidThread()); | 193 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 194 Terminate(); | 194 Terminate(); |
| 195 } | 195 } |
| 196 | 196 |
| 197 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 197 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
| 198 LOGI() << "AttachAudioBuffer"; | 198 LOGI() << "AttachAudioBuffer"; |
| 199 DCHECK(audioBuffer); | 199 RTC_DCHECK(audioBuffer); |
| 200 DCHECK(_threadChecker.CalledOnValidThread()); | 200 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 201 _audioDeviceBuffer = audioBuffer; | 201 _audioDeviceBuffer = audioBuffer; |
| 202 } | 202 } |
| 203 | 203 |
| 204 int32_t AudioDeviceIOS::Init() { | 204 int32_t AudioDeviceIOS::Init() { |
| 205 LOGI() << "Init"; | 205 LOGI() << "Init"; |
| 206 DCHECK(_threadChecker.CalledOnValidThread()); | 206 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 207 if (_initialized) { | 207 if (_initialized) { |
| 208 return 0; | 208 return 0; |
| 209 } | 209 } |
| 210 #if !defined(NDEBUG) | 210 #if !defined(NDEBUG) |
| 211 LogDeviceInfo(); | 211 LogDeviceInfo(); |
| 212 #endif | 212 #endif |
| 213 // Store the preferred sample rate and preferred number of channels already | 213 // Store the preferred sample rate and preferred number of channels already |
| 214 // here. They have not been set and confirmed yet since ActivateAudioSession() | 214 // here. They have not been set and confirmed yet since ActivateAudioSession() |
| 215 // is not called until audio is about to start. However, it makes sense to | 215 // is not called until audio is about to start. However, it makes sense to |
| 216 // store the parameters now and then verify at a later stage. | 216 // store the parameters now and then verify at a later stage. |
| 217 _playoutParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels); | 217 _playoutParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels); |
| 218 _recordParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels); | 218 _recordParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels); |
| 219 // Ensure that the audio device buffer (ADB) knows about the internal audio | 219 // Ensure that the audio device buffer (ADB) knows about the internal audio |
| 220 // parameters. Note that, even if we are unable to get a mono audio session, | 220 // parameters. Note that, even if we are unable to get a mono audio session, |
| 221 // we will always tell the I/O audio unit to do a channel format conversion | 221 // we will always tell the I/O audio unit to do a channel format conversion |
| 222 // to guarantee mono on the "input side" of the audio unit. | 222 // to guarantee mono on the "input side" of the audio unit. |
| 223 UpdateAudioDeviceBuffer(); | 223 UpdateAudioDeviceBuffer(); |
| 224 _initialized = true; | 224 _initialized = true; |
| 225 return 0; | 225 return 0; |
| 226 } | 226 } |
| 227 | 227 |
| 228 int32_t AudioDeviceIOS::Terminate() { | 228 int32_t AudioDeviceIOS::Terminate() { |
| 229 LOGI() << "Terminate"; | 229 LOGI() << "Terminate"; |
| 230 DCHECK(_threadChecker.CalledOnValidThread()); | 230 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 231 if (!_initialized) { | 231 if (!_initialized) { |
| 232 return 0; | 232 return 0; |
| 233 } | 233 } |
| 234 ShutdownPlayOrRecord(); | 234 ShutdownPlayOrRecord(); |
| 235 _initialized = false; | 235 _initialized = false; |
| 236 return 0; | 236 return 0; |
| 237 } | 237 } |
| 238 | 238 |
| 239 int32_t AudioDeviceIOS::InitPlayout() { | 239 int32_t AudioDeviceIOS::InitPlayout() { |
| 240 LOGI() << "InitPlayout"; | 240 LOGI() << "InitPlayout"; |
| 241 DCHECK(_threadChecker.CalledOnValidThread()); | 241 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 242 DCHECK(_initialized); | 242 RTC_DCHECK(_initialized); |
| 243 DCHECK(!_playIsInitialized); | 243 RTC_DCHECK(!_playIsInitialized); |
| 244 DCHECK(!_playing); | 244 RTC_DCHECK(!_playing); |
| 245 if (!_recIsInitialized) { | 245 if (!_recIsInitialized) { |
| 246 if (!InitPlayOrRecord()) { | 246 if (!InitPlayOrRecord()) { |
| 247 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 247 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; |
| 248 return -1; | 248 return -1; |
| 249 } | 249 } |
| 250 } | 250 } |
| 251 _playIsInitialized = true; | 251 _playIsInitialized = true; |
| 252 return 0; | 252 return 0; |
| 253 } | 253 } |
| 254 | 254 |
| 255 int32_t AudioDeviceIOS::InitRecording() { | 255 int32_t AudioDeviceIOS::InitRecording() { |
| 256 LOGI() << "InitRecording"; | 256 LOGI() << "InitRecording"; |
| 257 DCHECK(_threadChecker.CalledOnValidThread()); | 257 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 258 DCHECK(_initialized); | 258 RTC_DCHECK(_initialized); |
| 259 DCHECK(!_recIsInitialized); | 259 RTC_DCHECK(!_recIsInitialized); |
| 260 DCHECK(!_recording); | 260 RTC_DCHECK(!_recording); |
| 261 if (!_playIsInitialized) { | 261 if (!_playIsInitialized) { |
| 262 if (!InitPlayOrRecord()) { | 262 if (!InitPlayOrRecord()) { |
| 263 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 263 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; |
| 264 return -1; | 264 return -1; |
| 265 } | 265 } |
| 266 } | 266 } |
| 267 _recIsInitialized = true; | 267 _recIsInitialized = true; |
| 268 return 0; | 268 return 0; |
| 269 } | 269 } |
| 270 | 270 |
| 271 int32_t AudioDeviceIOS::StartPlayout() { | 271 int32_t AudioDeviceIOS::StartPlayout() { |
| 272 LOGI() << "StartPlayout"; | 272 LOGI() << "StartPlayout"; |
| 273 DCHECK(_threadChecker.CalledOnValidThread()); | 273 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 274 DCHECK(_playIsInitialized); | 274 RTC_DCHECK(_playIsInitialized); |
| 275 DCHECK(!_playing); | 275 RTC_DCHECK(!_playing); |
| 276 _fineAudioBuffer->ResetPlayout(); | 276 _fineAudioBuffer->ResetPlayout(); |
| 277 if (!_recording) { | 277 if (!_recording) { |
| 278 OSStatus result = AudioOutputUnitStart(_vpioUnit); | 278 OSStatus result = AudioOutputUnitStart(_vpioUnit); |
| 279 if (result != noErr) { | 279 if (result != noErr) { |
| 280 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 280 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
| 281 return -1; | 281 return -1; |
| 282 } | 282 } |
| 283 } | 283 } |
| 284 rtc::AtomicOps::ReleaseStore(&_playing, 1); | 284 rtc::AtomicOps::ReleaseStore(&_playing, 1); |
| 285 return 0; | 285 return 0; |
| 286 } | 286 } |
| 287 | 287 |
| 288 int32_t AudioDeviceIOS::StopPlayout() { | 288 int32_t AudioDeviceIOS::StopPlayout() { |
| 289 LOGI() << "StopPlayout"; | 289 LOGI() << "StopPlayout"; |
| 290 DCHECK(_threadChecker.CalledOnValidThread()); | 290 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 291 if (!_playIsInitialized || !_playing) { | 291 if (!_playIsInitialized || !_playing) { |
| 292 return 0; | 292 return 0; |
| 293 } | 293 } |
| 294 if (!_recording) { | 294 if (!_recording) { |
| 295 ShutdownPlayOrRecord(); | 295 ShutdownPlayOrRecord(); |
| 296 } | 296 } |
| 297 _playIsInitialized = false; | 297 _playIsInitialized = false; |
| 298 rtc::AtomicOps::ReleaseStore(&_playing, 0); | 298 rtc::AtomicOps::ReleaseStore(&_playing, 0); |
| 299 return 0; | 299 return 0; |
| 300 } | 300 } |
| 301 | 301 |
| 302 int32_t AudioDeviceIOS::StartRecording() { | 302 int32_t AudioDeviceIOS::StartRecording() { |
| 303 LOGI() << "StartRecording"; | 303 LOGI() << "StartRecording"; |
| 304 DCHECK(_threadChecker.CalledOnValidThread()); | 304 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 305 DCHECK(_recIsInitialized); | 305 RTC_DCHECK(_recIsInitialized); |
| 306 DCHECK(!_recording); | 306 RTC_DCHECK(!_recording); |
| 307 _fineAudioBuffer->ResetRecord(); | 307 _fineAudioBuffer->ResetRecord(); |
| 308 if (!_playing) { | 308 if (!_playing) { |
| 309 OSStatus result = AudioOutputUnitStart(_vpioUnit); | 309 OSStatus result = AudioOutputUnitStart(_vpioUnit); |
| 310 if (result != noErr) { | 310 if (result != noErr) { |
| 311 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 311 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
| 312 return -1; | 312 return -1; |
| 313 } | 313 } |
| 314 } | 314 } |
| 315 rtc::AtomicOps::ReleaseStore(&_recording, 1); | 315 rtc::AtomicOps::ReleaseStore(&_recording, 1); |
| 316 return 0; | 316 return 0; |
| 317 } | 317 } |
| 318 | 318 |
| 319 int32_t AudioDeviceIOS::StopRecording() { | 319 int32_t AudioDeviceIOS::StopRecording() { |
| 320 LOGI() << "StopRecording"; | 320 LOGI() << "StopRecording"; |
| 321 DCHECK(_threadChecker.CalledOnValidThread()); | 321 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 322 if (!_recIsInitialized || !_recording) { | 322 if (!_recIsInitialized || !_recording) { |
| 323 return 0; | 323 return 0; |
| 324 } | 324 } |
| 325 if (!_playing) { | 325 if (!_playing) { |
| 326 ShutdownPlayOrRecord(); | 326 ShutdownPlayOrRecord(); |
| 327 } | 327 } |
| 328 _recIsInitialized = false; | 328 _recIsInitialized = false; |
| 329 rtc::AtomicOps::ReleaseStore(&_recording, 0); | 329 rtc::AtomicOps::ReleaseStore(&_recording, 0); |
| 330 return 0; | 330 return 0; |
| 331 } | 331 } |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 370 return 0; | 370 return 0; |
| 371 } | 371 } |
| 372 | 372 |
| 373 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { | 373 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { |
| 374 delayMS = kFixedRecordDelayEstimate; | 374 delayMS = kFixedRecordDelayEstimate; |
| 375 return 0; | 375 return 0; |
| 376 } | 376 } |
| 377 | 377 |
| 378 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { | 378 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { |
| 379 LOGI() << "GetPlayoutAudioParameters"; | 379 LOGI() << "GetPlayoutAudioParameters"; |
| 380 DCHECK(_playoutParameters.is_valid()); | 380 RTC_DCHECK(_playoutParameters.is_valid()); |
| 381 DCHECK(_threadChecker.CalledOnValidThread()); | 381 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 382 *params = _playoutParameters; | 382 *params = _playoutParameters; |
| 383 return 0; | 383 return 0; |
| 384 } | 384 } |
| 385 | 385 |
| 386 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { | 386 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { |
| 387 LOGI() << "GetRecordAudioParameters"; | 387 LOGI() << "GetRecordAudioParameters"; |
| 388 DCHECK(_recordParameters.is_valid()); | 388 RTC_DCHECK(_recordParameters.is_valid()); |
| 389 DCHECK(_threadChecker.CalledOnValidThread()); | 389 RTC_DCHECK(_threadChecker.CalledOnValidThread()); |
| 390 *params = _recordParameters; | 390 *params = _recordParameters; |
| 391 return 0; | 391 return 0; |
| 392 } | 392 } |
| 393 | 393 |
| 394 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { | 394 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { |
| 395 LOGI() << "UpdateAudioDevicebuffer"; | 395 LOGI() << "UpdateAudioDevicebuffer"; |
| 396 // AttachAudioBuffer() is called at construction by the main class but check | 396 // AttachAudioBuffer() is called at construction by the main class but check |
| 397 // just in case. | 397 // just in case. |
| 398 DCHECK(_audioDeviceBuffer) << "AttachAudioBuffer must be called first"; | 398 RTC_DCHECK(_audioDeviceBuffer) << "AttachAudioBuffer must be called first"; |
| 399 // Inform the audio device buffer (ADB) about the new audio format. | 399 // Inform the audio device buffer (ADB) about the new audio format. |
| 400 _audioDeviceBuffer->SetPlayoutSampleRate(_playoutParameters.sample_rate()); | 400 _audioDeviceBuffer->SetPlayoutSampleRate(_playoutParameters.sample_rate()); |
| 401 _audioDeviceBuffer->SetPlayoutChannels(_playoutParameters.channels()); | 401 _audioDeviceBuffer->SetPlayoutChannels(_playoutParameters.channels()); |
| 402 _audioDeviceBuffer->SetRecordingSampleRate(_recordParameters.sample_rate()); | 402 _audioDeviceBuffer->SetRecordingSampleRate(_recordParameters.sample_rate()); |
| 403 _audioDeviceBuffer->SetRecordingChannels(_recordParameters.channels()); | 403 _audioDeviceBuffer->SetRecordingChannels(_recordParameters.channels()); |
| 404 } | 404 } |
| 405 | 405 |
| 406 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { | 406 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { |
| 407 LOGI() << "SetupAudioBuffersForActiveAudioSession"; | 407 LOGI() << "SetupAudioBuffersForActiveAudioSession"; |
| 408 AVAudioSession* session = [AVAudioSession sharedInstance]; | 408 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| (...skipping 12 matching lines...) Expand all Loading... |
| 421 << "Failed to enable an audio session with the preferred sample rate!"; | 421 << "Failed to enable an audio session with the preferred sample rate!"; |
| 422 } | 422 } |
| 423 | 423 |
| 424 // At this stage, we also know the exact IO buffer duration and can add | 424 // At this stage, we also know the exact IO buffer duration and can add |
| 425 // that info to the existing audio parameters where it is converted into | 425 // that info to the existing audio parameters where it is converted into |
| 426 // number of audio frames. | 426 // number of audio frames. |
| 427 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. | 427 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. |
| 428 // Hence, 128 is the size we expect to see in upcoming render callbacks. | 428 // Hence, 128 is the size we expect to see in upcoming render callbacks. |
| 429 _playoutParameters.reset(session.sampleRate, _playoutParameters.channels(), | 429 _playoutParameters.reset(session.sampleRate, _playoutParameters.channels(), |
| 430 session.IOBufferDuration); | 430 session.IOBufferDuration); |
| 431 DCHECK(_playoutParameters.is_complete()); | 431 RTC_DCHECK(_playoutParameters.is_complete()); |
| 432 _recordParameters.reset(session.sampleRate, _recordParameters.channels(), | 432 _recordParameters.reset(session.sampleRate, _recordParameters.channels(), |
| 433 session.IOBufferDuration); | 433 session.IOBufferDuration); |
| 434 DCHECK(_recordParameters.is_complete()); | 434 RTC_DCHECK(_recordParameters.is_complete()); |
| 435 LOG(LS_INFO) << " frames per I/O buffer: " | 435 LOG(LS_INFO) << " frames per I/O buffer: " |
| 436 << _playoutParameters.frames_per_buffer(); | 436 << _playoutParameters.frames_per_buffer(); |
| 437 LOG(LS_INFO) << " bytes per I/O buffer: " | 437 LOG(LS_INFO) << " bytes per I/O buffer: " |
| 438 << _playoutParameters.GetBytesPerBuffer(); | 438 << _playoutParameters.GetBytesPerBuffer(); |
| 439 DCHECK_EQ(_playoutParameters.GetBytesPerBuffer(), | 439 RTC_DCHECK_EQ(_playoutParameters.GetBytesPerBuffer(), |
| 440 _recordParameters.GetBytesPerBuffer()); | 440 _recordParameters.GetBytesPerBuffer()); |
| 441 | 441 |
| 442 // Update the ADB parameters since the sample rate might have changed. | 442 // Update the ADB parameters since the sample rate might have changed. |
| 443 UpdateAudioDeviceBuffer(); | 443 UpdateAudioDeviceBuffer(); |
| 444 | 444 |
| 445 // Create a modified audio buffer class which allows us to ask for, | 445 // Create a modified audio buffer class which allows us to ask for, |
| 446 // or deliver, any number of samples (and not only multiple of 10ms) to match | 446 // or deliver, any number of samples (and not only multiple of 10ms) to match |
| 447 // the native audio unit buffer size. | 447 // the native audio unit buffer size. |
| 448 DCHECK(_audioDeviceBuffer); | 448 RTC_DCHECK(_audioDeviceBuffer); |
| 449 _fineAudioBuffer.reset(new FineAudioBuffer( | 449 _fineAudioBuffer.reset(new FineAudioBuffer( |
| 450 _audioDeviceBuffer, _playoutParameters.GetBytesPerBuffer(), | 450 _audioDeviceBuffer, _playoutParameters.GetBytesPerBuffer(), |
| 451 _playoutParameters.sample_rate())); | 451 _playoutParameters.sample_rate())); |
| 452 | 452 |
| 453 // The extra/temporary playoutbuffer must be of this size to avoid | 453 // The extra/temporary playoutbuffer must be of this size to avoid |
| 454 // unnecessary memcpy while caching data between successive callbacks. | 454 // unnecessary memcpy while caching data between successive callbacks. |
| 455 const int requiredPlayoutBufferSize = | 455 const int requiredPlayoutBufferSize = |
| 456 _fineAudioBuffer->RequiredPlayoutBufferSizeBytes(); | 456 _fineAudioBuffer->RequiredPlayoutBufferSizeBytes(); |
| 457 LOG(LS_INFO) << " required playout buffer size: " | 457 LOG(LS_INFO) << " required playout buffer size: " |
| 458 << requiredPlayoutBufferSize; | 458 << requiredPlayoutBufferSize; |
| 459 _playoutAudioBuffer.reset(new SInt8[requiredPlayoutBufferSize]); | 459 _playoutAudioBuffer.reset(new SInt8[requiredPlayoutBufferSize]); |
| 460 | 460 |
| 461 // Allocate AudioBuffers to be used as storage for the received audio. | 461 // Allocate AudioBuffers to be used as storage for the received audio. |
| 462 // The AudioBufferList structure works as a placeholder for the | 462 // The AudioBufferList structure works as a placeholder for the |
| 463 // AudioBuffer structure, which holds a pointer to the actual data buffer | 463 // AudioBuffer structure, which holds a pointer to the actual data buffer |
| 464 // in |_recordAudioBuffer|. Recorded audio will be rendered into this memory | 464 // in |_recordAudioBuffer|. Recorded audio will be rendered into this memory |
| 465 // at each input callback when calling AudioUnitRender(). | 465 // at each input callback when calling AudioUnitRender(). |
| 466 const int dataByteSize = _recordParameters.GetBytesPerBuffer(); | 466 const int dataByteSize = _recordParameters.GetBytesPerBuffer(); |
| 467 _recordAudioBuffer.reset(new SInt8[dataByteSize]); | 467 _recordAudioBuffer.reset(new SInt8[dataByteSize]); |
| 468 _audioRecordBufferList.mNumberBuffers = 1; | 468 _audioRecordBufferList.mNumberBuffers = 1; |
| 469 AudioBuffer* audioBuffer = &_audioRecordBufferList.mBuffers[0]; | 469 AudioBuffer* audioBuffer = &_audioRecordBufferList.mBuffers[0]; |
| 470 audioBuffer->mNumberChannels = _recordParameters.channels(); | 470 audioBuffer->mNumberChannels = _recordParameters.channels(); |
| 471 audioBuffer->mDataByteSize = dataByteSize; | 471 audioBuffer->mDataByteSize = dataByteSize; |
| 472 audioBuffer->mData = _recordAudioBuffer.get(); | 472 audioBuffer->mData = _recordAudioBuffer.get(); |
| 473 } | 473 } |
| 474 | 474 |
| 475 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { | 475 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
| 476 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; | 476 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; |
| 477 DCHECK(!_vpioUnit); | 477 RTC_DCHECK(!_vpioUnit); |
| 478 // Create an audio component description to identify the Voice-Processing | 478 // Create an audio component description to identify the Voice-Processing |
| 479 // I/O audio unit. | 479 // I/O audio unit. |
| 480 AudioComponentDescription vpioUnitDescription; | 480 AudioComponentDescription vpioUnitDescription; |
| 481 vpioUnitDescription.componentType = kAudioUnitType_Output; | 481 vpioUnitDescription.componentType = kAudioUnitType_Output; |
| 482 vpioUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | 482 vpioUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO; |
| 483 vpioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple; | 483 vpioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple; |
| 484 vpioUnitDescription.componentFlags = 0; | 484 vpioUnitDescription.componentFlags = 0; |
| 485 vpioUnitDescription.componentFlagsMask = 0; | 485 vpioUnitDescription.componentFlagsMask = 0; |
| 486 // Obtain an audio unit instance given the description. | 486 // Obtain an audio unit instance given the description. |
| 487 AudioComponent foundVpioUnitRef = | 487 AudioComponent foundVpioUnitRef = |
| (...skipping 24 matching lines...) Expand all Loading... |
| 512 sizeof(enableOutput)), | 512 sizeof(enableOutput)), |
| 513 "Failed to enable output on output scope of output element"); | 513 "Failed to enable output on output scope of output element"); |
| 514 | 514 |
| 515 // Set the application formats for input and output: | 515 // Set the application formats for input and output: |
| 516 // - use same format in both directions | 516 // - use same format in both directions |
| 517 // - avoid resampling in the I/O unit by using the hardware sample rate | 517 // - avoid resampling in the I/O unit by using the hardware sample rate |
| 518 // - linear PCM => noncompressed audio data format with one frame per packet | 518 // - linear PCM => noncompressed audio data format with one frame per packet |
| 519 // - no need to specify interleaving since only mono is supported | 519 // - no need to specify interleaving since only mono is supported |
| 520 AudioStreamBasicDescription applicationFormat = {0}; | 520 AudioStreamBasicDescription applicationFormat = {0}; |
| 521 UInt32 size = sizeof(applicationFormat); | 521 UInt32 size = sizeof(applicationFormat); |
| 522 DCHECK_EQ(_playoutParameters.sample_rate(), _recordParameters.sample_rate()); | 522 RTC_DCHECK_EQ(_playoutParameters.sample_rate(), |
| 523 DCHECK_EQ(1, kPreferredNumberOfChannels); | 523 _recordParameters.sample_rate()); |
| 524 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); |
| 524 applicationFormat.mSampleRate = _playoutParameters.sample_rate(); | 525 applicationFormat.mSampleRate = _playoutParameters.sample_rate(); |
| 525 applicationFormat.mFormatID = kAudioFormatLinearPCM; | 526 applicationFormat.mFormatID = kAudioFormatLinearPCM; |
| 526 applicationFormat.mFormatFlags = | 527 applicationFormat.mFormatFlags = |
| 527 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | 528 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
| 528 applicationFormat.mBytesPerPacket = kBytesPerSample; | 529 applicationFormat.mBytesPerPacket = kBytesPerSample; |
| 529 applicationFormat.mFramesPerPacket = 1; // uncompressed | 530 applicationFormat.mFramesPerPacket = 1; // uncompressed |
| 530 applicationFormat.mBytesPerFrame = kBytesPerSample; | 531 applicationFormat.mBytesPerFrame = kBytesPerSample; |
| 531 applicationFormat.mChannelsPerFrame = kPreferredNumberOfChannels; | 532 applicationFormat.mChannelsPerFrame = kPreferredNumberOfChannels; |
| 532 applicationFormat.mBitsPerChannel = 8 * kBytesPerSample; | 533 applicationFormat.mBitsPerChannel = 8 * kBytesPerSample; |
| 533 #if !defined(NDEBUG) | 534 #if !defined(NDEBUG) |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 673 return true; | 674 return true; |
| 674 } | 675 } |
| 675 | 676 |
| 676 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( | 677 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( |
| 677 void* inRefCon, | 678 void* inRefCon, |
| 678 AudioUnitRenderActionFlags* ioActionFlags, | 679 AudioUnitRenderActionFlags* ioActionFlags, |
| 679 const AudioTimeStamp* inTimeStamp, | 680 const AudioTimeStamp* inTimeStamp, |
| 680 UInt32 inBusNumber, | 681 UInt32 inBusNumber, |
| 681 UInt32 inNumberFrames, | 682 UInt32 inNumberFrames, |
| 682 AudioBufferList* ioData) { | 683 AudioBufferList* ioData) { |
| 683 DCHECK_EQ(1u, inBusNumber); | 684 RTC_DCHECK_EQ(1u, inBusNumber); |
| 684 DCHECK(!ioData); // no buffer should be allocated for input at this stage | 685 RTC_DCHECK(!ioData); // no buffer should be allocated for input at this stage |
| 685 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon); | 686 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon); |
| 686 return audio_device_ios->OnRecordedDataIsAvailable( | 687 return audio_device_ios->OnRecordedDataIsAvailable( |
| 687 ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames); | 688 ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames); |
| 688 } | 689 } |
| 689 | 690 |
| 690 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( | 691 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( |
| 691 AudioUnitRenderActionFlags* ioActionFlags, | 692 AudioUnitRenderActionFlags* ioActionFlags, |
| 692 const AudioTimeStamp* inTimeStamp, | 693 const AudioTimeStamp* inTimeStamp, |
| 693 UInt32 inBusNumber, | 694 UInt32 inBusNumber, |
| 694 UInt32 inNumberFrames) { | 695 UInt32 inNumberFrames) { |
| 695 DCHECK_EQ(_recordParameters.frames_per_buffer(), inNumberFrames); | 696 RTC_DCHECK_EQ(_recordParameters.frames_per_buffer(), inNumberFrames); |
| 696 OSStatus result = noErr; | 697 OSStatus result = noErr; |
| 697 // Simply return if recording is not enabled. | 698 // Simply return if recording is not enabled. |
| 698 if (!rtc::AtomicOps::AcquireLoad(&_recording)) | 699 if (!rtc::AtomicOps::AcquireLoad(&_recording)) |
| 699 return result; | 700 return result; |
| 700 // Obtain the recorded audio samples by initiating a rendering cycle. | 701 // Obtain the recorded audio samples by initiating a rendering cycle. |
| 701 // Since it happens on the input bus, the |ioData| parameter is a reference | 702 // Since it happens on the input bus, the |ioData| parameter is a reference |
| 702 // to the preallocated audio buffer list that the audio unit renders into. | 703 // to the preallocated audio buffer list that the audio unit renders into. |
| 703 // TODO(henrika): should error handling be improved? | 704 // TODO(henrika): should error handling be improved? |
| 704 AudioBufferList* ioData = &_audioRecordBufferList; | 705 AudioBufferList* ioData = &_audioRecordBufferList; |
| 705 result = AudioUnitRender(_vpioUnit, ioActionFlags, inTimeStamp, inBusNumber, | 706 result = AudioUnitRender(_vpioUnit, ioActionFlags, inTimeStamp, inBusNumber, |
| 706 inNumberFrames, ioData); | 707 inNumberFrames, ioData); |
| 707 if (result != noErr) { | 708 if (result != noErr) { |
| 708 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 709 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
| 709 return result; | 710 return result; |
| 710 } | 711 } |
| 711 // Get a pointer to the recorded audio and send it to the WebRTC ADB. | 712 // Get a pointer to the recorded audio and send it to the WebRTC ADB. |
| 712 // Use the FineAudioBuffer instance to convert between native buffer size | 713 // Use the FineAudioBuffer instance to convert between native buffer size |
| 713 // and the 10ms buffer size used by WebRTC. | 714 // and the 10ms buffer size used by WebRTC. |
| 714 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize; | 715 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize; |
| 715 CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames); | 716 RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames); |
| 716 SInt8* data = static_cast<SInt8*>(ioData->mBuffers[0].mData); | 717 SInt8* data = static_cast<SInt8*>(ioData->mBuffers[0].mData); |
| 717 _fineAudioBuffer->DeliverRecordedData(data, dataSizeInBytes, | 718 _fineAudioBuffer->DeliverRecordedData(data, dataSizeInBytes, |
| 718 kFixedPlayoutDelayEstimate, | 719 kFixedPlayoutDelayEstimate, |
| 719 kFixedRecordDelayEstimate); | 720 kFixedRecordDelayEstimate); |
| 720 return noErr; | 721 return noErr; |
| 721 } | 722 } |
| 722 | 723 |
| 723 OSStatus AudioDeviceIOS::GetPlayoutData( | 724 OSStatus AudioDeviceIOS::GetPlayoutData( |
| 724 void* inRefCon, | 725 void* inRefCon, |
| 725 AudioUnitRenderActionFlags* ioActionFlags, | 726 AudioUnitRenderActionFlags* ioActionFlags, |
| 726 const AudioTimeStamp* inTimeStamp, | 727 const AudioTimeStamp* inTimeStamp, |
| 727 UInt32 inBusNumber, | 728 UInt32 inBusNumber, |
| 728 UInt32 inNumberFrames, | 729 UInt32 inNumberFrames, |
| 729 AudioBufferList* ioData) { | 730 AudioBufferList* ioData) { |
| 730 DCHECK_EQ(0u, inBusNumber); | 731 RTC_DCHECK_EQ(0u, inBusNumber); |
| 731 DCHECK(ioData); | 732 RTC_DCHECK(ioData); |
| 732 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon); | 733 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon); |
| 733 return audio_device_ios->OnGetPlayoutData(ioActionFlags, inNumberFrames, | 734 return audio_device_ios->OnGetPlayoutData(ioActionFlags, inNumberFrames, |
| 734 ioData); | 735 ioData); |
| 735 } | 736 } |
| 736 | 737 |
| 737 OSStatus AudioDeviceIOS::OnGetPlayoutData( | 738 OSStatus AudioDeviceIOS::OnGetPlayoutData( |
| 738 AudioUnitRenderActionFlags* ioActionFlags, | 739 AudioUnitRenderActionFlags* ioActionFlags, |
| 739 UInt32 inNumberFrames, | 740 UInt32 inNumberFrames, |
| 740 AudioBufferList* ioData) { | 741 AudioBufferList* ioData) { |
| 741 // Verify 16-bit, noninterleaved mono PCM signal format. | 742 // Verify 16-bit, noninterleaved mono PCM signal format. |
| 742 DCHECK_EQ(1u, ioData->mNumberBuffers); | 743 RTC_DCHECK_EQ(1u, ioData->mNumberBuffers); |
| 743 DCHECK_EQ(1u, ioData->mBuffers[0].mNumberChannels); | 744 RTC_DCHECK_EQ(1u, ioData->mBuffers[0].mNumberChannels); |
| 744 // Get pointer to internal audio buffer to which new audio data shall be | 745 // Get pointer to internal audio buffer to which new audio data shall be |
| 745 // written. | 746 // written. |
| 746 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize; | 747 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize; |
| 747 CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames); | 748 RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames); |
| 748 SInt8* destination = static_cast<SInt8*>(ioData->mBuffers[0].mData); | 749 SInt8* destination = static_cast<SInt8*>(ioData->mBuffers[0].mData); |
| 749 // Produce silence and give audio unit a hint about it if playout is not | 750 // Produce silence and give audio unit a hint about it if playout is not |
| 750 // activated. | 751 // activated. |
| 751 if (!rtc::AtomicOps::AcquireLoad(&_playing)) { | 752 if (!rtc::AtomicOps::AcquireLoad(&_playing)) { |
| 752 *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence; | 753 *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence; |
| 753 memset(destination, 0, dataSizeInBytes); | 754 memset(destination, 0, dataSizeInBytes); |
| 754 return noErr; | 755 return noErr; |
| 755 } | 756 } |
| 756 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 757 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
| 757 // the native I/O audio unit) to a preallocated intermediate buffer and | 758 // the native I/O audio unit) to a preallocated intermediate buffer and |
| 758 // copy the result to the audio buffer in the |ioData| destination. | 759 // copy the result to the audio buffer in the |ioData| destination. |
| 759 SInt8* source = _playoutAudioBuffer.get(); | 760 SInt8* source = _playoutAudioBuffer.get(); |
| 760 _fineAudioBuffer->GetPlayoutData(source); | 761 _fineAudioBuffer->GetPlayoutData(source); |
| 761 memcpy(destination, source, dataSizeInBytes); | 762 memcpy(destination, source, dataSizeInBytes); |
| 762 return noErr; | 763 return noErr; |
| 763 } | 764 } |
| 764 | 765 |
| 765 } // namespace webrtc | 766 } // namespace webrtc |
| OLD | NEW |