OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_ | |
12 #define WEBRTC_MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_ | |
13 | |
14 #include <AudioUnit/AudioUnit.h> | |
15 | |
16 namespace webrtc { | |
17 | |
18 class VoiceProcessingAudioUnitObserver { | |
19 public: | |
20 // Callback function called on a real-time priority I/O thread from the audio | |
21 // unit. This method is used to signal that recorded audio is available. | |
22 virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, | |
23 const AudioTimeStamp* time_stamp, | |
24 UInt32 bus_number, | |
25 UInt32 num_frames, | |
26 AudioBufferList* io_data) = 0; | |
27 | |
28 // Callback function called on a real-time priority I/O thread from the audio | |
29 // unit. This method is used to provide audio samples to the audio unit. | |
30 virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags, | |
31 const AudioTimeStamp* time_stamp, | |
32 UInt32 bus_number, | |
33 UInt32 num_frames, | |
34 AudioBufferList* io_data) = 0; | |
35 | |
36 protected: | |
37 ~VoiceProcessingAudioUnitObserver() {} | |
38 }; | |
39 | |
40 // Convenience class to abstract away the management of a Voice Processing | |
41 // I/O Audio Unit. The Voice-Processing I/O unit has the same characteristics | |
henrika_webrtc
2016/03/18 11:54:02
Nit, first Voice Processing and then Voice-Process
tkchin_webrtc
2016/03/18 17:34:35
Done.
| |
42 // as the Remote I/O unit (supports full duplex low-latency audio input and | |
43 // output) and adds AEC for for two-way duplex communication. It also adds AGC, | |
44 // adjustment of voice-processing quality, and muting. Hence, ideal for | |
45 // VoIP applications. | |
46 class VoiceProcessingAudioUnit { | |
47 public: | |
48 explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer); | |
49 ~VoiceProcessingAudioUnit(); | |
50 | |
51 // TODO(tkchin): enum for state and state checking. | |
52 | |
53 // Number of bytes per audio sample for 16-bit signed integer representation. | |
54 static const UInt32 kBytesPerSample; | |
55 | |
56 // Initializes this class by creating the underlying audio unit instance. | |
57 // Creates a Voice-Processing I/O unit and configures it for full-duplex | |
58 // audio. The selected stream format is selected to avoid internal resampling | |
59 // and to match the 10ms callback rate for WebRTC as well as possible. | |
60 // Does not intialize the audio unit. | |
61 bool Init(); | |
62 | |
63 // Initializes the underlying audio unit with the given sample rate. | |
64 bool Initialize(Float64 sample_rate); | |
65 | |
66 // Starts the underlying audio unit. | |
67 bool Start(); | |
68 | |
69 // Stops the underlying audio unit. | |
70 bool Stop(); | |
71 | |
72 // Uninitializes the underlying audio unit. | |
73 bool Uninitialize(); | |
74 | |
75 // Calls render on the underlying audio unit. | |
76 OSStatus Render(AudioUnitRenderActionFlags* flags, | |
77 const AudioTimeStamp* time_stamp, | |
78 UInt32 output_bus_number, | |
79 UInt32 num_frames, | |
80 AudioBufferList* io_data); | |
81 | |
82 private: | |
83 // The C API used to set callbacks requires static functions. When these are | |
84 // called, they will invoke the relevant instance method by casting | |
85 // in_ref_con to VoiceProcessingAudioUnit*. | |
86 static OSStatus OnGetPlayoutData(void* in_ref_con, | |
87 AudioUnitRenderActionFlags* flags, | |
88 const AudioTimeStamp* time_stamp, | |
89 UInt32 bus_number, | |
90 UInt32 num_frames, | |
91 AudioBufferList* io_data); | |
92 static OSStatus OnDeliverRecordedData(void* in_ref_con, | |
93 AudioUnitRenderActionFlags* flags, | |
94 const AudioTimeStamp* time_stamp, | |
95 UInt32 bus_number, | |
96 UInt32 num_frames, | |
97 AudioBufferList* io_data); | |
98 | |
99 // Notifies observer that samples are needed for playback. | |
100 OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags, | |
101 const AudioTimeStamp* time_stamp, | |
102 UInt32 bus_number, | |
103 UInt32 num_frames, | |
104 AudioBufferList* io_data); | |
105 // Notifies observer that recorded samples are available for render. | |
106 OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags, | |
107 const AudioTimeStamp* time_stamp, | |
108 UInt32 bus_number, | |
109 UInt32 num_frames, | |
110 AudioBufferList* io_data); | |
111 | |
112 // Returns the predetermined format with a specific sample rate. See | |
113 // implementation file for details on format. | |
114 AudioStreamBasicDescription GetFormat(Float64 sample_rate) const; | |
115 | |
116 // Deletes the underlying audio unit. | |
117 void DisposeAudioUnit(); | |
118 | |
119 VoiceProcessingAudioUnitObserver* observer_; | |
120 AudioUnit vpio_unit_; | |
121 }; | |
122 } // namespace webrtc | |
123 | |
124 #endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_ | |
OLD | NEW |