Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. | 2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 168 state_ = kUninitialized; | 168 state_ = kUninitialized; |
| 169 return true; | 169 return true; |
| 170 } | 170 } |
| 171 | 171 |
| 172 VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const { | 172 VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const { |
| 173 return state_; | 173 return state_; |
| 174 } | 174 } |
| 175 | 175 |
| 176 bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) { | 176 bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) { |
| 177 RTC_DCHECK_GE(state_, kUninitialized); | 177 RTC_DCHECK_GE(state_, kUninitialized); |
| 178 RTCLog(@"Initializing audio unit."); | 178 RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate); |
| 179 | 179 |
| 180 OSStatus result = noErr; | 180 OSStatus result = noErr; |
| 181 AudioStreamBasicDescription format = GetFormat(sample_rate); | 181 AudioStreamBasicDescription format = GetFormat(sample_rate); |
| 182 UInt32 size = sizeof(format); | 182 UInt32 size = sizeof(format); |
| 183 #if !defined(NDEBUG) | 183 #if !defined(NDEBUG) |
| 184 LogStreamDescription(format); | 184 LogStreamDescription(format); |
| 185 #endif | 185 #endif |
| 186 | 186 |
| 187 // Set the format on the output scope of the input element/bus. | 187 // Set the format on the output scope of the input element/bus. |
| 188 result = | 188 result = |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 221 ++failed_initalize_attempts; | 221 ++failed_initalize_attempts; |
| 222 if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) { | 222 if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) { |
| 223 // Max number of initialization attempts exceeded, hence abort. | 223 // Max number of initialization attempts exceeded, hence abort. |
| 224 RTCLogError(@"Too many initialization attempts."); | 224 RTCLogError(@"Too many initialization attempts."); |
| 225 return false; | 225 return false; |
| 226 } | 226 } |
| 227 RTCLog(@"Pause 100ms and try audio unit initialization again..."); | 227 RTCLog(@"Pause 100ms and try audio unit initialization again..."); |
| 228 [NSThread sleepForTimeInterval:0.1f]; | 228 [NSThread sleepForTimeInterval:0.1f]; |
| 229 result = AudioUnitInitialize(vpio_unit_); | 229 result = AudioUnitInitialize(vpio_unit_); |
| 230 } | 230 } |
| 231 RTCLog(@"Voice Processing I/O unit is now initialized."); | 231 if (result == noErr) { |
| 232 RTCLog(@"Voice Processing I/O unit is now initialized."); | |
| 233 } | |
| 232 state_ = kInitialized; | 234 state_ = kInitialized; |
| 233 return true; | 235 return true; |
| 234 } | 236 } |
| 235 | 237 |
| 236 bool VoiceProcessingAudioUnit::Start() { | 238 bool VoiceProcessingAudioUnit::Start() { |
| 237 RTC_DCHECK_GE(state_, kUninitialized); | 239 RTC_DCHECK_GE(state_, kUninitialized); |
| 238 RTCLog(@"Starting audio unit."); | 240 RTCLog(@"Starting audio unit."); |
| 239 | 241 |
| 240 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 242 OSStatus result = |
|
henrika_webrtc
2016/05/04 12:33:07
Perhaps a comment regarding why we need this liste
tkchin_webrtc
2016/05/05 23:23:28
Removing these changes for now. Will address BT is
henrika_webrtc
2016/05/06 11:22:17
Acknowledged.
| |
| 243 AudioUnitAddPropertyListener(vpio_unit_, kAudioUnitProperty_SampleRate, | |
| 244 OnSampleRateChange, this); | |
| 245 if (result != noErr) { | |
| 246 RTCLogError(@"Failed to add sample rate listener. Error=%ld.", | |
| 247 (long)result); | |
| 248 } | |
| 249 | |
| 250 result = AudioOutputUnitStart(vpio_unit_); | |
| 241 if (result != noErr) { | 251 if (result != noErr) { |
| 242 RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result); | 252 RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result); |
| 243 return false; | 253 return false; |
| 254 } else { | |
| 255 RTCLog(@"Started audio unit"); | |
| 244 } | 256 } |
| 245 state_ = kStarted; | 257 state_ = kStarted; |
| 246 return true; | 258 return true; |
| 247 } | 259 } |
| 248 | 260 |
| 249 bool VoiceProcessingAudioUnit::Stop() { | 261 bool VoiceProcessingAudioUnit::Stop() { |
| 250 RTC_DCHECK_GE(state_, kUninitialized); | 262 RTC_DCHECK_GE(state_, kUninitialized); |
| 251 RTCLog(@"Stopping audio unit."); | 263 RTCLog(@"Stopping audio unit."); |
| 252 | 264 |
| 253 OSStatus result = AudioOutputUnitStop(vpio_unit_); | 265 OSStatus result = AudioOutputUnitStop(vpio_unit_); |
| 254 if (result != noErr) { | 266 if (result != noErr) { |
| 255 RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result); | 267 RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result); |
| 256 return false; | 268 return false; |
| 269 } else { | |
| 270 RTCLog(@"Stopped audio unit"); | |
| 257 } | 271 } |
| 272 | |
| 273 result = | |
|
henrika_webrtc
2016/05/04 12:33:07
Do we need a check that it is disabled at close?
tkchin_webrtc
2016/05/05 23:23:28
Removing these changes for now. Will address BT is
henrika_webrtc
2016/05/06 11:22:16
Acknowledged.
| |
| 274 AudioUnitRemovePropertyListenerWithUserData(vpio_unit_, | |
| 275 kAudioUnitProperty_SampleRate, | |
| 276 OnSampleRateChange, this); | |
| 277 if (result != noErr) { | |
| 278 RTCLogError(@"Failed to remove sample rate listener. Error=%ld.", | |
| 279 (long)result); | |
| 280 } | |
| 281 | |
| 258 state_ = kInitialized; | 282 state_ = kInitialized; |
| 259 return true; | 283 return true; |
| 260 } | 284 } |
| 261 | 285 |
| 262 bool VoiceProcessingAudioUnit::Uninitialize() { | 286 bool VoiceProcessingAudioUnit::Uninitialize() { |
| 263 RTC_DCHECK_GE(state_, kUninitialized); | 287 RTC_DCHECK_GE(state_, kUninitialized); |
| 264 RTCLog(@"Unintializing audio unit."); | 288 RTCLog(@"Unintializing audio unit."); |
| 265 | 289 |
| 266 OSStatus result = AudioUnitUninitialize(vpio_unit_); | 290 OSStatus result = AudioUnitUninitialize(vpio_unit_); |
| 267 if (result != noErr) { | 291 if (result != noErr) { |
| 268 RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result); | 292 RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result); |
| 269 return false; | 293 return false; |
| 294 } else { | |
| 295 RTCLog(@"Uninitialized audio unit."); | |
| 270 } | 296 } |
| 271 return true; | 297 return true; |
| 272 } | 298 } |
| 273 | 299 |
| 274 OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags, | 300 OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags, |
| 275 const AudioTimeStamp* time_stamp, | 301 const AudioTimeStamp* time_stamp, |
| 276 UInt32 output_bus_number, | 302 UInt32 output_bus_number, |
| 277 UInt32 num_frames, | 303 UInt32 num_frames, |
| 278 AudioBufferList* io_data) { | 304 AudioBufferList* io_data) { |
| 279 RTC_DCHECK(vpio_unit_) << "Init() not called."; | 305 RTC_DCHECK(vpio_unit_) << "Init() not called."; |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 305 const AudioTimeStamp* time_stamp, | 331 const AudioTimeStamp* time_stamp, |
| 306 UInt32 bus_number, | 332 UInt32 bus_number, |
| 307 UInt32 num_frames, | 333 UInt32 num_frames, |
| 308 AudioBufferList* io_data) { | 334 AudioBufferList* io_data) { |
| 309 VoiceProcessingAudioUnit* audio_unit = | 335 VoiceProcessingAudioUnit* audio_unit = |
| 310 static_cast<VoiceProcessingAudioUnit*>(in_ref_con); | 336 static_cast<VoiceProcessingAudioUnit*>(in_ref_con); |
| 311 return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number, | 337 return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number, |
| 312 num_frames, io_data); | 338 num_frames, io_data); |
| 313 } | 339 } |
| 314 | 340 |
| 341 void VoiceProcessingAudioUnit::OnSampleRateChange( | |
|
henrika_webrtc
2016/05/04 12:33:07
Can we now get this callback and the routing chang
tkchin_webrtc
2016/05/05 23:23:28
There shouldn't be conflicts - we are always using
henrika_webrtc
2016/05/06 11:22:17
Acknowledged.
| |
| 342 void* in_ref_con, | |
| 343 AudioUnit audio_unit, | |
| 344 AudioUnitPropertyID property_id, | |
| 345 AudioUnitScope scope, | |
| 346 AudioUnitElement element) { | |
| 347 VoiceProcessingAudioUnit* vp_unit = | |
| 348 static_cast<VoiceProcessingAudioUnit*>(in_ref_con); | |
| 349 vp_unit->NotifySampleRateChange(audio_unit, property_id, scope, element); | |
| 350 } | |
| 351 | |
| 315 OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData( | 352 OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData( |
| 316 AudioUnitRenderActionFlags* flags, | 353 AudioUnitRenderActionFlags* flags, |
| 317 const AudioTimeStamp* time_stamp, | 354 const AudioTimeStamp* time_stamp, |
| 318 UInt32 bus_number, | 355 UInt32 bus_number, |
| 319 UInt32 num_frames, | 356 UInt32 num_frames, |
| 320 AudioBufferList* io_data) { | 357 AudioBufferList* io_data) { |
| 321 return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames, | 358 return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames, |
| 322 io_data); | 359 io_data); |
| 323 } | 360 } |
| 324 | 361 |
| 325 OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData( | 362 OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData( |
| 326 AudioUnitRenderActionFlags* flags, | 363 AudioUnitRenderActionFlags* flags, |
| 327 const AudioTimeStamp* time_stamp, | 364 const AudioTimeStamp* time_stamp, |
| 328 UInt32 bus_number, | 365 UInt32 bus_number, |
| 329 UInt32 num_frames, | 366 UInt32 num_frames, |
| 330 AudioBufferList* io_data) { | 367 AudioBufferList* io_data) { |
| 331 return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number, | 368 return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number, |
| 332 num_frames, io_data); | 369 num_frames, io_data); |
| 333 } | 370 } |
| 334 | 371 |
| 372 void VoiceProcessingAudioUnit::NotifySampleRateChange( | |
| 373 AudioUnit audio_unit, | |
| 374 AudioUnitPropertyID property_id, | |
| 375 AudioUnitScope scope, | |
| 376 AudioUnitElement element) { | |
| 377 RTC_DCHECK_EQ(audio_unit, vpio_unit_); | |
| 378 RTC_DCHECK_EQ(property_id, kAudioUnitProperty_SampleRate); | |
| 379 // We only care about the output. | |
|
henrika_webrtc
2016/05/04 12:33:07
Have you really been able to trigger callback on o
tkchin_webrtc
2016/05/05 23:23:28
Nope. But just in case.
henrika_webrtc
2016/05/06 11:22:17
Acknowledged.
| |
| 380 if (scope != kAudioUnitScope_Output || element != kOutputBus) { | |
| 381 return; | |
| 382 } | |
| 383 Float64 sample_rate = 0; | |
| 384 UInt32 sample_rate_size = sizeof(Float64); | |
| 385 OSStatus status = AudioUnitGetProperty(audio_unit, | |
| 386 kAudioUnitProperty_SampleRate, | |
| 387 kAudioUnitScope_Output, | |
| 388 kOutputBus, | |
| 389 &sample_rate, | |
| 390 &sample_rate_size); | |
| 391 if (status != noErr) { | |
| 392 RTCLogError(@"Failed to get sample rate. Error=%ld.", (long)status); | |
| 393 return; | |
| 394 } | |
| 395 observer_->OnSampleRateChange(sample_rate); | |
| 396 } | |
| 397 | |
| 335 AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat( | 398 AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat( |
| 336 Float64 sample_rate) const { | 399 Float64 sample_rate) const { |
| 337 // Set the application formats for input and output: | 400 // Set the application formats for input and output: |
| 338 // - use same format in both directions | 401 // - use same format in both directions |
| 339 // - avoid resampling in the I/O unit by using the hardware sample rate | 402 // - avoid resampling in the I/O unit by using the hardware sample rate |
| 340 // - linear PCM => noncompressed audio data format with one frame per packet | 403 // - linear PCM => noncompressed audio data format with one frame per packet |
| 341 // - no need to specify interleaving since only mono is supported | 404 // - no need to specify interleaving since only mono is supported |
| 342 AudioStreamBasicDescription format = {0}; | 405 AudioStreamBasicDescription format = {0}; |
| 343 RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels); | 406 RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels); |
| 344 format.mSampleRate = sample_rate; | 407 format.mSampleRate = sample_rate; |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 370 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); | 433 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); |
| 371 if (result != noErr) { | 434 if (result != noErr) { |
| 372 RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.", | 435 RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.", |
| 373 (long)result); | 436 (long)result); |
| 374 } | 437 } |
| 375 vpio_unit_ = nullptr; | 438 vpio_unit_ = nullptr; |
| 376 } | 439 } |
| 377 } | 440 } |
| 378 | 441 |
| 379 } // namespace webrtc | 442 } // namespace webrtc |
| OLD | NEW |