Index: webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java |
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java |
index 4ce35c66c3d52c0d4ceec3a81e67834c72b454f4..c2874316a5ddac0e6a0c03a78f810a918f420a38 100644 |
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java |
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java |
@@ -77,7 +77,7 @@ public class WebRtcAudioTrack { |
audioTrack.play(); |
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING); |
} catch (IllegalStateException e) { |
- Logging.e(TAG, "AudioTrack.play failed: " + e.getMessage()); |
+ Logging.e(TAG, "AudioTrack.play failed: " + e.getMessage()); |
return; |
} |
@@ -155,19 +155,16 @@ public class WebRtcAudioTrack { |
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); |
this.context = context; |
this.nativeAudioTrack = nativeAudioTrack; |
- audioManager = (AudioManager) context.getSystemService( |
- Context.AUDIO_SERVICE); |
+ audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); |
if (DEBUG) { |
WebRtcAudioUtils.logDeviceInfo(TAG); |
} |
} |
private boolean initPlayout(int sampleRate, int channels) { |
- Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" |
- + channels + ")"); |
+ Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels + ")"); |
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8); |
- byteBuffer = byteBuffer.allocateDirect( |
- bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND)); |
+ byteBuffer = byteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND)); |
Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity()); |
emptyBytes = new byte[byteBuffer.capacity()]; |
// Rather than passing the ByteBuffer with every callback (requiring |
@@ -180,9 +177,7 @@ public class WebRtcAudioTrack { |
// Note that this size doesn't guarantee a smooth playback under load. |
// TODO(henrika): should we extend the buffer size to avoid glitches? |
final int minBufferSizeInBytes = AudioTrack.getMinBufferSize( |
- sampleRate, |
- AudioFormat.CHANNEL_OUT_MONO, |
- AudioFormat.ENCODING_PCM_16BIT); |
+ sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT); |
Logging.d(TAG, "AudioTrack.getMinBufferSize: " + minBufferSizeInBytes); |
// For the streaming mode, data must be written to the audio sink in |
// chunks of size (given by byteBuffer.capacity()) less than or equal |
@@ -204,12 +199,9 @@ public class WebRtcAudioTrack { |
// Create an AudioTrack object and initialize its associated audio buffer. |
// The size of this buffer determines how long an AudioTrack can play |
// before running out of data. |
- audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL, |
- sampleRate, |
- AudioFormat.CHANNEL_OUT_MONO, |
- AudioFormat.ENCODING_PCM_16BIT, |
- minBufferSizeInBytes, |
- AudioTrack.MODE_STREAM); |
+ audioTrack = |
+ new AudioTrack(AudioManager.STREAM_VOICE_CALL, sampleRate, AudioFormat.CHANNEL_OUT_MONO, |
+ AudioFormat.ENCODING_PCM_16BIT, minBufferSizeInBytes, AudioTrack.MODE_STREAM); |
} catch (IllegalArgumentException e) { |
Logging.d(TAG, e.getMessage()); |
return false; |
@@ -290,8 +282,7 @@ public class WebRtcAudioTrack { |
} |
} |
- private native void nativeCacheDirectBufferAddress( |
- ByteBuffer byteBuffer, long nativeAudioRecord); |
+ private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord); |
private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord); |