| Index: webrtc/modules/audio_device/ios/voice_processing_audio_unit.h
 | 
| diff --git a/webrtc/modules/audio_device/ios/voice_processing_audio_unit.h b/webrtc/modules/audio_device/ios/voice_processing_audio_unit.h
 | 
| new file mode 100644
 | 
| index 0000000000000000000000000000000000000000..c1e5cafb4bbc9500c70c172b9a21064217ddb4c5
 | 
| --- /dev/null
 | 
| +++ b/webrtc/modules/audio_device/ios/voice_processing_audio_unit.h
 | 
| @@ -0,0 +1,124 @@
 | 
| +/*
 | 
| + *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
 | 
| + *
 | 
| + *  Use of this source code is governed by a BSD-style license
 | 
| + *  that can be found in the LICENSE file in the root of the source
 | 
| + *  tree. An additional intellectual property rights grant can be found
 | 
| + *  in the file PATENTS.  All contributing project authors may
 | 
| + *  be found in the AUTHORS file in the root of the source tree.
 | 
| + */
 | 
| +
 | 
| +#ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
 | 
| +#define WEBRTC_MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
 | 
| +
 | 
| +#include <AudioUnit/AudioUnit.h>
 | 
| +
 | 
| +namespace webrtc {
 | 
| +
 | 
| +class VoiceProcessingAudioUnitObserver {
 | 
| + public:
 | 
| +  // Callback function called on a real-time priority I/O thread from the audio
 | 
| +  // unit. This method is used to signal that recorded audio is available.
 | 
| +  virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
 | 
| +                                         const AudioTimeStamp* time_stamp,
 | 
| +                                         UInt32 bus_number,
 | 
| +                                         UInt32 num_frames,
 | 
| +                                         AudioBufferList* io_data) = 0;
 | 
| +
 | 
| +  // Callback function called on a real-time priority I/O thread from the audio
 | 
| +  // unit. This method is used to provide audio samples to the audio unit.
 | 
| +  virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags,
 | 
| +                                    const AudioTimeStamp* time_stamp,
 | 
| +                                    UInt32 bus_number,
 | 
| +                                    UInt32 num_frames,
 | 
| +                                    AudioBufferList* io_data) = 0;
 | 
| +
 | 
| + protected:
 | 
| +  ~VoiceProcessingAudioUnitObserver() {}
 | 
| +};
 | 
| +
 | 
| +// Convenience class to abstract away the management of a Voice Processing
 | 
| +// I/O Audio Unit. The Voice Processing I/O unit has the same characteristics
 | 
| +// as the Remote I/O unit (supports full duplex low-latency audio input and
 | 
| +// output) and adds AEC for for two-way duplex communication. It also adds AGC,
 | 
| +// adjustment of voice-processing quality, and muting. Hence, ideal for
 | 
| +// VoIP applications.
 | 
| +class VoiceProcessingAudioUnit {
 | 
| + public:
 | 
| +  explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer);
 | 
| +  ~VoiceProcessingAudioUnit();
 | 
| +
 | 
| +  // TODO(tkchin): enum for state and state checking.
 | 
| +
 | 
| +  // Number of bytes per audio sample for 16-bit signed integer representation.
 | 
| +  static const UInt32 kBytesPerSample;
 | 
| +
 | 
| +  // Initializes this class by creating the underlying audio unit instance.
 | 
| +  // Creates a Voice-Processing I/O unit and configures it for full-duplex
 | 
| +  // audio. The selected stream format is selected to avoid internal resampling
 | 
| +  // and to match the 10ms callback rate for WebRTC as well as possible.
 | 
| +  // Does not intialize the audio unit.
 | 
| +  bool Init();
 | 
| +
 | 
| +  // Initializes the underlying audio unit with the given sample rate.
 | 
| +  bool Initialize(Float64 sample_rate);
 | 
| +
 | 
| +  // Starts the underlying audio unit.
 | 
| +  bool Start();
 | 
| +
 | 
| +  // Stops the underlying audio unit.
 | 
| +  bool Stop();
 | 
| +
 | 
| +  // Uninitializes the underlying audio unit.
 | 
| +  bool Uninitialize();
 | 
| +
 | 
| +  // Calls render on the underlying audio unit.
 | 
| +  OSStatus Render(AudioUnitRenderActionFlags* flags,
 | 
| +                  const AudioTimeStamp* time_stamp,
 | 
| +                  UInt32 output_bus_number,
 | 
| +                  UInt32 num_frames,
 | 
| +                  AudioBufferList* io_data);
 | 
| +
 | 
| + private:
 | 
| +  // The C API used to set callbacks requires static functions. When these are
 | 
| +  // called, they will invoke the relevant instance method by casting
 | 
| +  // in_ref_con to VoiceProcessingAudioUnit*.
 | 
| +  static OSStatus OnGetPlayoutData(void* in_ref_con,
 | 
| +                                   AudioUnitRenderActionFlags* flags,
 | 
| +                                   const AudioTimeStamp* time_stamp,
 | 
| +                                   UInt32 bus_number,
 | 
| +                                   UInt32 num_frames,
 | 
| +                                   AudioBufferList* io_data);
 | 
| +  static OSStatus OnDeliverRecordedData(void* in_ref_con,
 | 
| +                                        AudioUnitRenderActionFlags* flags,
 | 
| +                                        const AudioTimeStamp* time_stamp,
 | 
| +                                        UInt32 bus_number,
 | 
| +                                        UInt32 num_frames,
 | 
| +                                        AudioBufferList* io_data);
 | 
| +
 | 
| +  // Notifies observer that samples are needed for playback.
 | 
| +  OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags,
 | 
| +                                const AudioTimeStamp* time_stamp,
 | 
| +                                UInt32 bus_number,
 | 
| +                                UInt32 num_frames,
 | 
| +                                AudioBufferList* io_data);
 | 
| +  // Notifies observer that recorded samples are available for render.
 | 
| +  OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags,
 | 
| +                                     const AudioTimeStamp* time_stamp,
 | 
| +                                     UInt32 bus_number,
 | 
| +                                     UInt32 num_frames,
 | 
| +                                     AudioBufferList* io_data);
 | 
| +
 | 
| +  // Returns the predetermined format with a specific sample rate. See
 | 
| +  // implementation file for details on format.
 | 
| +  AudioStreamBasicDescription GetFormat(Float64 sample_rate) const;
 | 
| +
 | 
| +  // Deletes the underlying audio unit.
 | 
| +  void DisposeAudioUnit();
 | 
| +
 | 
| +  VoiceProcessingAudioUnitObserver* observer_;
 | 
| +  AudioUnit vpio_unit_;
 | 
| +};
 | 
| +}  // namespace webrtc
 | 
| +
 | 
| +#endif  // WEBRTC_MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
 | 
| 
 |