OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
54 private class AudioTrackThread extends Thread { | 54 private class AudioTrackThread extends Thread { |
55 private volatile boolean keepAlive = true; | 55 private volatile boolean keepAlive = true; |
56 | 56 |
57 public AudioTrackThread(String name) { | 57 public AudioTrackThread(String name) { |
58 super(name); | 58 super(name); |
59 } | 59 } |
60 | 60 |
61 @Override | 61 @Override |
62 public void run() { | 62 public void run() { |
63 Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO); | 63 Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO); |
64 Logd("AudioTrackThread" + WebRtcAudioUtils.getThreadInfo()); | 64 Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo()); |
65 | 65 |
66 try { | 66 try { |
67 // In MODE_STREAM mode we can optionally prime the output buffer by | 67 // In MODE_STREAM mode we can optionally prime the output buffer by |
68 // writing up to bufferSizeInBytes (from constructor) before starting. | 68 // writing up to bufferSizeInBytes (from constructor) before starting. |
69 // This priming will avoid an immediate underrun, but is not required. | 69 // This priming will avoid an immediate underrun, but is not required. |
70 // TODO(henrika): initial tests have shown that priming is not required. | 70 // TODO(henrika): initial tests have shown that priming is not required. |
71 audioTrack.play(); | 71 audioTrack.play(); |
72 assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING); | 72 assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING); |
73 } catch (IllegalStateException e) { | 73 } catch (IllegalStateException e) { |
74 Loge("AudioTrack.play failed: " + e.getMessage()); | 74 Logging.e(TAG, "AudioTrack.play failed: " + e.getMessage()); |
75 return; | 75 return; |
76 } | 76 } |
77 | 77 |
78 // Fixed size in bytes of each 10ms block of audio data that we ask for | 78 // Fixed size in bytes of each 10ms block of audio data that we ask for |
79 // using callbacks to the native WebRTC client. | 79 // using callbacks to the native WebRTC client. |
80 final int sizeInBytes = byteBuffer.capacity(); | 80 final int sizeInBytes = byteBuffer.capacity(); |
81 | 81 |
82 while (keepAlive) { | 82 while (keepAlive) { |
83 // Get 10ms of PCM data from the native WebRTC client. Audio data is | 83 // Get 10ms of PCM data from the native WebRTC client. Audio data is |
84 // written into the common ByteBuffer using the address that was | 84 // written into the common ByteBuffer using the address that was |
85 // cached at construction. | 85 // cached at construction. |
86 nativeGetPlayoutData(sizeInBytes, nativeAudioTrack); | 86 nativeGetPlayoutData(sizeInBytes, nativeAudioTrack); |
87 // Write data until all data has been written to the audio sink. | 87 // Write data until all data has been written to the audio sink. |
88 // Upon return, the buffer position will have been advanced to reflect | 88 // Upon return, the buffer position will have been advanced to reflect |
89 // the amount of data that was successfully written to the AudioTrack. | 89 // the amount of data that was successfully written to the AudioTrack. |
90 assertTrue(sizeInBytes <= byteBuffer.remaining()); | 90 assertTrue(sizeInBytes <= byteBuffer.remaining()); |
91 int bytesWritten = 0; | 91 int bytesWritten = 0; |
92 if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { | 92 if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { |
93 bytesWritten = audioTrack.write(byteBuffer, | 93 bytesWritten = audioTrack.write(byteBuffer, |
94 sizeInBytes, | 94 sizeInBytes, |
95 AudioTrack.WRITE_BLOCKING); | 95 AudioTrack.WRITE_BLOCKING); |
96 } else { | 96 } else { |
97 bytesWritten = audioTrack.write(byteBuffer.array(), | 97 bytesWritten = audioTrack.write(byteBuffer.array(), |
98 byteBuffer.arrayOffset(), | 98 byteBuffer.arrayOffset(), |
99 sizeInBytes); | 99 sizeInBytes); |
100 } | 100 } |
101 if (bytesWritten != sizeInBytes) { | 101 if (bytesWritten != sizeInBytes) { |
102 Loge("AudioTrack.write failed: " + bytesWritten); | 102 Logging.e(TAG, "AudioTrack.write failed: " + bytesWritten); |
103 if (bytesWritten == AudioTrack.ERROR_INVALID_OPERATION) { | 103 if (bytesWritten == AudioTrack.ERROR_INVALID_OPERATION) { |
104 keepAlive = false; | 104 keepAlive = false; |
105 } | 105 } |
106 } | 106 } |
107 // The byte buffer must be rewinded since byteBuffer.position() is | 107 // The byte buffer must be rewinded since byteBuffer.position() is |
108 // increased at each call to AudioTrack.write(). If we don't do this, | 108 // increased at each call to AudioTrack.write(). If we don't do this, |
109 // next call to AudioTrack.write() will fail. | 109 // next call to AudioTrack.write() will fail. |
110 byteBuffer.rewind(); | 110 byteBuffer.rewind(); |
111 | 111 |
112 // TODO(henrika): it is possible to create a delay estimate here by | 112 // TODO(henrika): it is possible to create a delay estimate here by |
113 // counting number of written frames and subtracting the result from | 113 // counting number of written frames and subtracting the result from |
114 // audioTrack.getPlaybackHeadPosition(). | 114 // audioTrack.getPlaybackHeadPosition(). |
115 } | 115 } |
116 | 116 |
117 try { | 117 try { |
118 audioTrack.stop(); | 118 audioTrack.stop(); |
119 } catch (IllegalStateException e) { | 119 } catch (IllegalStateException e) { |
120 Loge("AudioTrack.stop failed: " + e.getMessage()); | 120 Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage()); |
121 } | 121 } |
122 assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); | 122 assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); |
123 audioTrack.flush(); | 123 audioTrack.flush(); |
124 } | 124 } |
125 | 125 |
126 public void joinThread() { | 126 public void joinThread() { |
127 keepAlive = false; | 127 keepAlive = false; |
128 while (isAlive()) { | 128 while (isAlive()) { |
129 try { | 129 try { |
130 join(); | 130 join(); |
131 } catch (InterruptedException e) { | 131 } catch (InterruptedException e) { |
132 // Ignore. | 132 // Ignore. |
133 } | 133 } |
134 } | 134 } |
135 } | 135 } |
136 } | 136 } |
137 | 137 |
138 WebRtcAudioTrack(Context context, long nativeAudioTrack) { | 138 WebRtcAudioTrack(Context context, long nativeAudioTrack) { |
139 Logd("ctor" + WebRtcAudioUtils.getThreadInfo()); | 139 Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); |
140 this.context = context; | 140 this.context = context; |
141 this.nativeAudioTrack = nativeAudioTrack; | 141 this.nativeAudioTrack = nativeAudioTrack; |
142 audioManager = (AudioManager) context.getSystemService( | 142 audioManager = (AudioManager) context.getSystemService( |
143 Context.AUDIO_SERVICE); | 143 Context.AUDIO_SERVICE); |
144 if (DEBUG) { | 144 if (DEBUG) { |
145 WebRtcAudioUtils.logDeviceInfo(TAG); | 145 WebRtcAudioUtils.logDeviceInfo(TAG); |
146 } | 146 } |
147 } | 147 } |
148 | 148 |
149 private void initPlayout(int sampleRate, int channels) { | 149 private void initPlayout(int sampleRate, int channels) { |
150 Logd("initPlayout(sampleRate=" + sampleRate + ", channels=" + | 150 Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" |
151 channels + ")"); | 151 + channels + ")"); |
152 final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8); | 152 final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8); |
153 byteBuffer = byteBuffer.allocateDirect( | 153 byteBuffer = byteBuffer.allocateDirect( |
154 bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND)); | 154 bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND)); |
155 Logd("byteBuffer.capacity: " + byteBuffer.capacity()); | 155 Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity()); |
156 // Rather than passing the ByteBuffer with every callback (requiring | 156 // Rather than passing the ByteBuffer with every callback (requiring |
157 // the potentially expensive GetDirectBufferAddress) we simply have the | 157 // the potentially expensive GetDirectBufferAddress) we simply have the |
158 // the native class cache the address to the memory once. | 158 // the native class cache the address to the memory once. |
159 nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack); | 159 nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack); |
160 | 160 |
161 // Get the minimum buffer size required for the successful creation of an | 161 // Get the minimum buffer size required for the successful creation of an |
162 // AudioTrack object to be created in the MODE_STREAM mode. | 162 // AudioTrack object to be created in the MODE_STREAM mode. |
163 // Note that this size doesn't guarantee a smooth playback under load. | 163 // Note that this size doesn't guarantee a smooth playback under load. |
164 // TODO(henrika): should we extend the buffer size to avoid glitches? | 164 // TODO(henrika): should we extend the buffer size to avoid glitches? |
165 final int minBufferSizeInBytes = AudioTrack.getMinBufferSize( | 165 final int minBufferSizeInBytes = AudioTrack.getMinBufferSize( |
166 sampleRate, | 166 sampleRate, |
167 AudioFormat.CHANNEL_OUT_MONO, | 167 AudioFormat.CHANNEL_OUT_MONO, |
168 AudioFormat.ENCODING_PCM_16BIT); | 168 AudioFormat.ENCODING_PCM_16BIT); |
169 Logd("AudioTrack.getMinBufferSize: " + minBufferSizeInBytes); | 169 Logging.d(TAG, "AudioTrack.getMinBufferSize: " + minBufferSizeInBytes); |
170 assertTrue(audioTrack == null); | 170 assertTrue(audioTrack == null); |
171 | 171 |
172 // For the streaming mode, data must be written to the audio sink in | 172 // For the streaming mode, data must be written to the audio sink in |
173 // chunks of size (given by byteBuffer.capacity()) less than or equal | 173 // chunks of size (given by byteBuffer.capacity()) less than or equal |
174 // to the total buffer size |minBufferSizeInBytes|. | 174 // to the total buffer size |minBufferSizeInBytes|. |
175 assertTrue(byteBuffer.capacity() < minBufferSizeInBytes); | 175 assertTrue(byteBuffer.capacity() < minBufferSizeInBytes); |
176 try { | 176 try { |
177 // Create an AudioTrack object and initialize its associated audio buffer. | 177 // Create an AudioTrack object and initialize its associated audio buffer. |
178 // The size of this buffer determines how long an AudioTrack can play | 178 // The size of this buffer determines how long an AudioTrack can play |
179 // before running out of data. | 179 // before running out of data. |
180 audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL, | 180 audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL, |
181 sampleRate, | 181 sampleRate, |
182 AudioFormat.CHANNEL_OUT_MONO, | 182 AudioFormat.CHANNEL_OUT_MONO, |
183 AudioFormat.ENCODING_PCM_16BIT, | 183 AudioFormat.ENCODING_PCM_16BIT, |
184 minBufferSizeInBytes, | 184 minBufferSizeInBytes, |
185 AudioTrack.MODE_STREAM); | 185 AudioTrack.MODE_STREAM); |
186 } catch (IllegalArgumentException e) { | 186 } catch (IllegalArgumentException e) { |
187 Logd(e.getMessage()); | 187 Logging.d(TAG, e.getMessage()); |
188 return; | 188 return; |
189 } | 189 } |
190 assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED); | 190 assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED); |
191 assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); | 191 assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED); |
192 assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL); | 192 assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL); |
193 } | 193 } |
194 | 194 |
195 private boolean startPlayout() { | 195 private boolean startPlayout() { |
196 Logd("startPlayout"); | 196 Logging.d(TAG, "startPlayout"); |
197 assertTrue(audioTrack != null); | 197 assertTrue(audioTrack != null); |
198 assertTrue(audioThread == null); | 198 assertTrue(audioThread == null); |
199 audioThread = new AudioTrackThread("AudioTrackJavaThread"); | 199 audioThread = new AudioTrackThread("AudioTrackJavaThread"); |
200 audioThread.start(); | 200 audioThread.start(); |
201 return true; | 201 return true; |
202 } | 202 } |
203 | 203 |
204 private boolean stopPlayout() { | 204 private boolean stopPlayout() { |
205 Logd("stopPlayout"); | 205 Logging.d(TAG, "stopPlayout"); |
206 assertTrue(audioThread != null); | 206 assertTrue(audioThread != null); |
207 audioThread.joinThread(); | 207 audioThread.joinThread(); |
208 audioThread = null; | 208 audioThread = null; |
209 if (audioTrack != null) { | 209 if (audioTrack != null) { |
210 audioTrack.release(); | 210 audioTrack.release(); |
211 audioTrack = null; | 211 audioTrack = null; |
212 } | 212 } |
213 return true; | 213 return true; |
214 } | 214 } |
215 | 215 |
216 /** Get max possible volume index for a phone call audio stream. */ | 216 /** Get max possible volume index for a phone call audio stream. */ |
217 private int getStreamMaxVolume() { | 217 private int getStreamMaxVolume() { |
218 Logd("getStreamMaxVolume"); | 218 Logging.d(TAG, "getStreamMaxVolume"); |
219 assertTrue(audioManager != null); | 219 assertTrue(audioManager != null); |
220 return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL); | 220 return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL); |
221 } | 221 } |
222 | 222 |
223 /** Set current volume level for a phone call audio stream. */ | 223 /** Set current volume level for a phone call audio stream. */ |
224 private boolean setStreamVolume(int volume) { | 224 private boolean setStreamVolume(int volume) { |
225 Logd("setStreamVolume(" + volume + ")"); | 225 Logging.d(TAG, "setStreamVolume(" + volume + ")"); |
226 assertTrue(audioManager != null); | 226 assertTrue(audioManager != null); |
227 if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { | 227 if (WebRtcAudioUtils.runningOnLollipopOrHigher()) { |
228 if (audioManager.isVolumeFixed()) { | 228 if (audioManager.isVolumeFixed()) { |
229 Loge("The device implements a fixed volume policy."); | 229 Logging.e(TAG, "The device implements a fixed volume policy."); |
230 return false; | 230 return false; |
231 } | 231 } |
232 } | 232 } |
233 audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0); | 233 audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0); |
234 return true; | 234 return true; |
235 } | 235 } |
236 | 236 |
237 /** Get current volume level for a phone call audio stream. */ | 237 /** Get current volume level for a phone call audio stream. */ |
238 private int getStreamVolume() { | 238 private int getStreamVolume() { |
239 Logd("getStreamVolume"); | 239 Logging.d(TAG, "getStreamVolume"); |
240 assertTrue(audioManager != null); | 240 assertTrue(audioManager != null); |
241 return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL); | 241 return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL); |
242 } | 242 } |
243 | 243 |
244 /** Helper method which throws an exception when an assertion has failed. */ | 244 /** Helper method which throws an exception when an assertion has failed. */ |
245 private static void assertTrue(boolean condition) { | 245 private static void assertTrue(boolean condition) { |
246 if (!condition) { | 246 if (!condition) { |
247 throw new AssertionError("Expected condition to be true"); | 247 throw new AssertionError("Expected condition to be true"); |
248 } | 248 } |
249 } | 249 } |
250 | 250 |
251 private static void Logd(String msg) { | |
252 Logging.d(TAG, msg); | |
253 } | |
254 | |
255 private static void Loge(String msg) { | |
256 Logging.e(TAG, msg); | |
257 } | |
258 | |
259 private native void nativeCacheDirectBufferAddress( | 251 private native void nativeCacheDirectBufferAddress( |
260 ByteBuffer byteBuffer, long nativeAudioRecord); | 252 ByteBuffer byteBuffer, long nativeAudioRecord); |
261 | 253 |
262 private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord); | 254 private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord); |
263 } | 255 } |
OLD | NEW |