| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 | 21 |
| 22 namespace webrtc { | 22 namespace webrtc { |
| 23 | 23 |
| 24 static const int kHighDelayThresholdMs = 300; | 24 static const int kHighDelayThresholdMs = 300; |
| 25 static const int kLogHighDelayIntervalFrames = 500; // 5 seconds. | 25 static const int kLogHighDelayIntervalFrames = 500; // 5 seconds. |
| 26 | 26 |
| 27 // ---------------------------------------------------------------------------- | 27 // ---------------------------------------------------------------------------- |
| 28 // ctor | 28 // ctor |
| 29 // ---------------------------------------------------------------------------- | 29 // ---------------------------------------------------------------------------- |
| 30 | 30 |
| 31 AudioDeviceBuffer::AudioDeviceBuffer() : | 31 AudioDeviceBuffer::AudioDeviceBuffer() |
| 32 _id(-1), | 32 : _id(-1), |
| 33 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | 33 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), |
| 34 _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()), | 34 _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()), |
| 35 _ptrCbAudioTransport(NULL), | 35 _ptrCbAudioTransport(NULL), |
| 36 _recSampleRate(0), | 36 _recSampleRate(0), |
| 37 _playSampleRate(0), | 37 _playSampleRate(0), |
| 38 _recChannels(0), | 38 _recChannels(0), |
| 39 _playChannels(0), | 39 _playChannels(0), |
| 40 _recChannel(AudioDeviceModule::kChannelBoth), | 40 _recChannel(AudioDeviceModule::kChannelBoth), |
| 41 _recBytesPerSample(0), | 41 _recBytesPerSample(0), |
| 42 _playBytesPerSample(0), | 42 _playBytesPerSample(0), |
| 43 _recSamples(0), | 43 _recSamples(0), |
| 44 _recSize(0), | 44 _recSize(0), |
| 45 _playSamples(0), | 45 _playSamples(0), |
| 46 _playSize(0), | 46 _playSize(0), |
| 47 _recFile(*FileWrapper::Create()), | 47 _recFile(*FileWrapper::Create()), |
| 48 _playFile(*FileWrapper::Create()), | 48 _playFile(*FileWrapper::Create()), |
| 49 _currentMicLevel(0), | 49 _currentMicLevel(0), |
| 50 _newMicLevel(0), | 50 _newMicLevel(0), |
| 51 _typingStatus(false), | 51 _typingStatus(false), |
| 52 _playDelayMS(0), | 52 _playDelayMS(0), |
| 53 _recDelayMS(0), | 53 _recDelayMS(0), |
| 54 _clockDrift(0), | 54 _clockDrift(0), |
| 55 // Set to the interval in order to log on the first occurrence. | 55 // Set to the interval in order to log on the first occurrence. |
| 56 high_delay_counter_(kLogHighDelayIntervalFrames) { | 56 high_delay_counter_(kLogHighDelayIntervalFrames) { |
| 57 // valid ID will be set later by SetId, use -1 for now | 57 // valid ID will be set later by SetId, use -1 for now |
| 58 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s created", __FUNCTION_
_); | 58 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s created", |
| 59 memset(_recBuffer, 0, kMaxBufferSizeBytes); | 59 __FUNCTION__); |
| 60 memset(_playBuffer, 0, kMaxBufferSizeBytes); | 60 memset(_recBuffer, 0, kMaxBufferSizeBytes); |
| 61 memset(_playBuffer, 0, kMaxBufferSizeBytes); |
| 61 } | 62 } |
| 62 | 63 |
| 63 // ---------------------------------------------------------------------------- | 64 // ---------------------------------------------------------------------------- |
| 64 // dtor | 65 // dtor |
| 65 // ---------------------------------------------------------------------------- | 66 // ---------------------------------------------------------------------------- |
| 66 | 67 |
| 67 AudioDeviceBuffer::~AudioDeviceBuffer() | 68 AudioDeviceBuffer::~AudioDeviceBuffer() { |
| 68 { | 69 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", |
| 69 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTIO
N__); | 70 __FUNCTION__); |
| 70 { | 71 { |
| 71 CriticalSectionScoped lock(&_critSect); | 72 CriticalSectionScoped lock(&_critSect); |
| 72 | 73 |
| 73 _recFile.Flush(); | 74 _recFile.Flush(); |
| 74 _recFile.CloseFile(); | 75 _recFile.CloseFile(); |
| 75 delete &_recFile; | 76 delete &_recFile; |
| 76 | 77 |
| 77 _playFile.Flush(); | 78 _playFile.Flush(); |
| 78 _playFile.CloseFile(); | 79 _playFile.CloseFile(); |
| 79 delete &_playFile; | 80 delete &_playFile; |
| 80 } | 81 } |
| 81 | 82 |
| 82 delete &_critSect; | 83 delete &_critSect; |
| 83 delete &_critSectCb; | 84 delete &_critSectCb; |
| 84 } | 85 } |
| 85 | 86 |
| 86 // ---------------------------------------------------------------------------- | 87 // ---------------------------------------------------------------------------- |
| 87 // SetId | 88 // SetId |
| 88 // ---------------------------------------------------------------------------- | 89 // ---------------------------------------------------------------------------- |
| 89 | 90 |
| 90 void AudioDeviceBuffer::SetId(uint32_t id) | 91 void AudioDeviceBuffer::SetId(uint32_t id) { |
| 91 { | 92 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, |
| 92 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "AudioDeviceBuffer::SetId(
id=%d)", id); | 93 "AudioDeviceBuffer::SetId(id=%d)", id); |
| 93 _id = id; | 94 _id = id; |
| 94 } | 95 } |
| 95 | 96 |
| 96 // ---------------------------------------------------------------------------- | 97 // ---------------------------------------------------------------------------- |
| 97 // RegisterAudioCallback | 98 // RegisterAudioCallback |
| 98 // ---------------------------------------------------------------------------- | 99 // ---------------------------------------------------------------------------- |
| 99 | 100 |
| 100 int32_t AudioDeviceBuffer::RegisterAudioCallback(AudioTransport* audioCallback) | 101 int32_t AudioDeviceBuffer::RegisterAudioCallback( |
| 101 { | 102 AudioTransport* audioCallback) { |
| 102 CriticalSectionScoped lock(&_critSectCb); | 103 CriticalSectionScoped lock(&_critSectCb); |
| 103 _ptrCbAudioTransport = audioCallback; | 104 _ptrCbAudioTransport = audioCallback; |
| 104 | 105 |
| 105 return 0; | 106 return 0; |
| 106 } | 107 } |
| 107 | 108 |
| 108 // ---------------------------------------------------------------------------- | 109 // ---------------------------------------------------------------------------- |
| 109 // InitPlayout | 110 // InitPlayout |
| 110 // ---------------------------------------------------------------------------- | 111 // ---------------------------------------------------------------------------- |
| 111 | 112 |
| 112 int32_t AudioDeviceBuffer::InitPlayout() | 113 int32_t AudioDeviceBuffer::InitPlayout() { |
| 113 { | 114 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 114 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 115 return 0; |
| 115 return 0; | |
| 116 } | 116 } |
| 117 | 117 |
| 118 // ---------------------------------------------------------------------------- | 118 // ---------------------------------------------------------------------------- |
| 119 // InitRecording | 119 // InitRecording |
| 120 // ---------------------------------------------------------------------------- | 120 // ---------------------------------------------------------------------------- |
| 121 | 121 |
| 122 int32_t AudioDeviceBuffer::InitRecording() | 122 int32_t AudioDeviceBuffer::InitRecording() { |
| 123 { | 123 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 124 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 124 return 0; |
| 125 return 0; | |
| 126 } | 125 } |
| 127 | 126 |
| 128 // ---------------------------------------------------------------------------- | 127 // ---------------------------------------------------------------------------- |
| 129 // SetRecordingSampleRate | 128 // SetRecordingSampleRate |
| 130 // ---------------------------------------------------------------------------- | 129 // ---------------------------------------------------------------------------- |
| 131 | 130 |
| 132 int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) | 131 int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) { |
| 133 { | 132 CriticalSectionScoped lock(&_critSect); |
| 134 CriticalSectionScoped lock(&_critSect); | 133 _recSampleRate = fsHz; |
| 135 _recSampleRate = fsHz; | 134 return 0; |
| 136 return 0; | |
| 137 } | 135 } |
| 138 | 136 |
| 139 // ---------------------------------------------------------------------------- | 137 // ---------------------------------------------------------------------------- |
| 140 // SetPlayoutSampleRate | 138 // SetPlayoutSampleRate |
| 141 // ---------------------------------------------------------------------------- | 139 // ---------------------------------------------------------------------------- |
| 142 | 140 |
| 143 int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) | 141 int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) { |
| 144 { | 142 CriticalSectionScoped lock(&_critSect); |
| 145 CriticalSectionScoped lock(&_critSect); | 143 _playSampleRate = fsHz; |
| 146 _playSampleRate = fsHz; | 144 return 0; |
| 147 return 0; | |
| 148 } | 145 } |
| 149 | 146 |
| 150 // ---------------------------------------------------------------------------- | 147 // ---------------------------------------------------------------------------- |
| 151 // RecordingSampleRate | 148 // RecordingSampleRate |
| 152 // ---------------------------------------------------------------------------- | 149 // ---------------------------------------------------------------------------- |
| 153 | 150 |
| 154 int32_t AudioDeviceBuffer::RecordingSampleRate() const | 151 int32_t AudioDeviceBuffer::RecordingSampleRate() const { |
| 155 { | 152 return _recSampleRate; |
| 156 return _recSampleRate; | |
| 157 } | 153 } |
| 158 | 154 |
| 159 // ---------------------------------------------------------------------------- | 155 // ---------------------------------------------------------------------------- |
| 160 // PlayoutSampleRate | 156 // PlayoutSampleRate |
| 161 // ---------------------------------------------------------------------------- | 157 // ---------------------------------------------------------------------------- |
| 162 | 158 |
| 163 int32_t AudioDeviceBuffer::PlayoutSampleRate() const | 159 int32_t AudioDeviceBuffer::PlayoutSampleRate() const { |
| 164 { | 160 return _playSampleRate; |
| 165 return _playSampleRate; | |
| 166 } | 161 } |
| 167 | 162 |
| 168 // ---------------------------------------------------------------------------- | 163 // ---------------------------------------------------------------------------- |
| 169 // SetRecordingChannels | 164 // SetRecordingChannels |
| 170 // ---------------------------------------------------------------------------- | 165 // ---------------------------------------------------------------------------- |
| 171 | 166 |
| 172 int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) | 167 int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) { |
| 173 { | 168 CriticalSectionScoped lock(&_critSect); |
| 174 CriticalSectionScoped lock(&_critSect); | 169 _recChannels = channels; |
| 175 _recChannels = channels; | 170 _recBytesPerSample = |
| 176 _recBytesPerSample = 2*channels; // 16 bits per sample in mono, 32 bits in
stereo | 171 2 * channels; // 16 bits per sample in mono, 32 bits in stereo |
| 177 return 0; | 172 return 0; |
| 178 } | 173 } |
| 179 | 174 |
| 180 // ---------------------------------------------------------------------------- | 175 // ---------------------------------------------------------------------------- |
| 181 // SetPlayoutChannels | 176 // SetPlayoutChannels |
| 182 // ---------------------------------------------------------------------------- | 177 // ---------------------------------------------------------------------------- |
| 183 | 178 |
| 184 int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) | 179 int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) { |
| 185 { | 180 CriticalSectionScoped lock(&_critSect); |
| 186 CriticalSectionScoped lock(&_critSect); | 181 _playChannels = channels; |
| 187 _playChannels = channels; | 182 // 16 bits per sample in mono, 32 bits in stereo |
| 188 // 16 bits per sample in mono, 32 bits in stereo | 183 _playBytesPerSample = 2 * channels; |
| 189 _playBytesPerSample = 2*channels; | 184 return 0; |
| 190 return 0; | |
| 191 } | 185 } |
| 192 | 186 |
| 193 // ---------------------------------------------------------------------------- | 187 // ---------------------------------------------------------------------------- |
| 194 // SetRecordingChannel | 188 // SetRecordingChannel |
| 195 // | 189 // |
| 196 // Select which channel to use while recording. | 190 // Select which channel to use while recording. |
| 197 // This API requires that stereo is enabled. | 191 // This API requires that stereo is enabled. |
| 198 // | 192 // |
| 199 // Note that, the nChannel parameter in RecordedDataIsAvailable will be | 193 // Note that, the nChannel parameter in RecordedDataIsAvailable will be |
| 200 // set to 2 even for kChannelLeft and kChannelRight. However, nBytesPerSample | 194 // set to 2 even for kChannelLeft and kChannelRight. However, nBytesPerSample |
| 201 // will be 2 instead of 4 four these cases. | 195 // will be 2 instead of 4 four these cases. |
| 202 // ---------------------------------------------------------------------------- | 196 // ---------------------------------------------------------------------------- |
| 203 | 197 |
| 204 int32_t AudioDeviceBuffer::SetRecordingChannel(const AudioDeviceModule::ChannelT
ype channel) | 198 int32_t AudioDeviceBuffer::SetRecordingChannel( |
| 205 { | 199 const AudioDeviceModule::ChannelType channel) { |
| 206 CriticalSectionScoped lock(&_critSect); | 200 CriticalSectionScoped lock(&_critSect); |
| 207 | 201 |
| 208 if (_recChannels == 1) | 202 if (_recChannels == 1) { |
| 209 { | 203 return -1; |
| 210 return -1; | 204 } |
| 211 } | |
| 212 | 205 |
| 213 if (channel == AudioDeviceModule::kChannelBoth) | 206 if (channel == AudioDeviceModule::kChannelBoth) { |
| 214 { | 207 // two bytes per channel |
| 215 // two bytes per channel | 208 _recBytesPerSample = 4; |
| 216 _recBytesPerSample = 4; | 209 } else { |
| 217 } | 210 // only utilize one out of two possible channels (left or right) |
| 218 else | 211 _recBytesPerSample = 2; |
| 219 { | 212 } |
| 220 // only utilize one out of two possible channels (left or right) | 213 _recChannel = channel; |
| 221 _recBytesPerSample = 2; | |
| 222 } | |
| 223 _recChannel = channel; | |
| 224 | 214 |
| 225 return 0; | 215 return 0; |
| 226 } | 216 } |
| 227 | 217 |
| 228 // ---------------------------------------------------------------------------- | 218 // ---------------------------------------------------------------------------- |
| 229 // RecordingChannel | 219 // RecordingChannel |
| 230 // ---------------------------------------------------------------------------- | 220 // ---------------------------------------------------------------------------- |
| 231 | 221 |
| 232 int32_t AudioDeviceBuffer::RecordingChannel(AudioDeviceModule::ChannelType& chan
nel) const | 222 int32_t AudioDeviceBuffer::RecordingChannel( |
| 233 { | 223 AudioDeviceModule::ChannelType& channel) const { |
| 234 channel = _recChannel; | 224 channel = _recChannel; |
| 235 return 0; | 225 return 0; |
| 236 } | 226 } |
| 237 | 227 |
| 238 // ---------------------------------------------------------------------------- | 228 // ---------------------------------------------------------------------------- |
| 239 // RecordingChannels | 229 // RecordingChannels |
| 240 // ---------------------------------------------------------------------------- | 230 // ---------------------------------------------------------------------------- |
| 241 | 231 |
| 242 size_t AudioDeviceBuffer::RecordingChannels() const | 232 size_t AudioDeviceBuffer::RecordingChannels() const { |
| 243 { | 233 return _recChannels; |
| 244 return _recChannels; | |
| 245 } | 234 } |
| 246 | 235 |
| 247 // ---------------------------------------------------------------------------- | 236 // ---------------------------------------------------------------------------- |
| 248 // PlayoutChannels | 237 // PlayoutChannels |
| 249 // ---------------------------------------------------------------------------- | 238 // ---------------------------------------------------------------------------- |
| 250 | 239 |
| 251 size_t AudioDeviceBuffer::PlayoutChannels() const | 240 size_t AudioDeviceBuffer::PlayoutChannels() const { |
| 252 { | 241 return _playChannels; |
| 253 return _playChannels; | |
| 254 } | 242 } |
| 255 | 243 |
| 256 // ---------------------------------------------------------------------------- | 244 // ---------------------------------------------------------------------------- |
| 257 // SetCurrentMicLevel | 245 // SetCurrentMicLevel |
| 258 // ---------------------------------------------------------------------------- | 246 // ---------------------------------------------------------------------------- |
| 259 | 247 |
| 260 int32_t AudioDeviceBuffer::SetCurrentMicLevel(uint32_t level) | 248 int32_t AudioDeviceBuffer::SetCurrentMicLevel(uint32_t level) { |
| 261 { | 249 _currentMicLevel = level; |
| 262 _currentMicLevel = level; | 250 return 0; |
| 263 return 0; | |
| 264 } | 251 } |
| 265 | 252 |
| 266 int32_t AudioDeviceBuffer::SetTypingStatus(bool typingStatus) | 253 int32_t AudioDeviceBuffer::SetTypingStatus(bool typingStatus) { |
| 267 { | 254 _typingStatus = typingStatus; |
| 268 _typingStatus = typingStatus; | 255 return 0; |
| 269 return 0; | |
| 270 } | 256 } |
| 271 | 257 |
| 272 // ---------------------------------------------------------------------------- | 258 // ---------------------------------------------------------------------------- |
| 273 // NewMicLevel | 259 // NewMicLevel |
| 274 // ---------------------------------------------------------------------------- | 260 // ---------------------------------------------------------------------------- |
| 275 | 261 |
| 276 uint32_t AudioDeviceBuffer::NewMicLevel() const | 262 uint32_t AudioDeviceBuffer::NewMicLevel() const { |
| 277 { | 263 return _newMicLevel; |
| 278 return _newMicLevel; | |
| 279 } | 264 } |
| 280 | 265 |
| 281 // ---------------------------------------------------------------------------- | 266 // ---------------------------------------------------------------------------- |
| 282 // SetVQEData | 267 // SetVQEData |
| 283 // ---------------------------------------------------------------------------- | 268 // ---------------------------------------------------------------------------- |
| 284 | 269 |
| 285 void AudioDeviceBuffer::SetVQEData(int playDelayMs, int recDelayMs, | 270 void AudioDeviceBuffer::SetVQEData(int playDelayMs, |
| 271 int recDelayMs, |
| 286 int clockDrift) { | 272 int clockDrift) { |
| 287 if (high_delay_counter_ < kLogHighDelayIntervalFrames) { | 273 if (high_delay_counter_ < kLogHighDelayIntervalFrames) { |
| 288 ++high_delay_counter_; | 274 ++high_delay_counter_; |
| 289 } else { | 275 } else { |
| 290 if (playDelayMs + recDelayMs > kHighDelayThresholdMs) { | 276 if (playDelayMs + recDelayMs > kHighDelayThresholdMs) { |
| 291 high_delay_counter_ = 0; | 277 high_delay_counter_ = 0; |
| 292 LOG(LS_WARNING) << "High audio device delay reported (render=" | 278 LOG(LS_WARNING) << "High audio device delay reported (render=" |
| 293 << playDelayMs << " ms, capture=" << recDelayMs << " ms)"; | 279 << playDelayMs << " ms, capture=" << recDelayMs << " ms)"; |
| 294 } | 280 } |
| 295 } | 281 } |
| 296 | 282 |
| 297 _playDelayMS = playDelayMs; | 283 _playDelayMS = playDelayMs; |
| 298 _recDelayMS = recDelayMs; | 284 _recDelayMS = recDelayMs; |
| 299 _clockDrift = clockDrift; | 285 _clockDrift = clockDrift; |
| 300 } | 286 } |
| 301 | 287 |
| 302 // ---------------------------------------------------------------------------- | 288 // ---------------------------------------------------------------------------- |
| 303 // StartInputFileRecording | 289 // StartInputFileRecording |
| 304 // ---------------------------------------------------------------------------- | 290 // ---------------------------------------------------------------------------- |
| 305 | 291 |
| 306 int32_t AudioDeviceBuffer::StartInputFileRecording( | 292 int32_t AudioDeviceBuffer::StartInputFileRecording( |
| 307 const char fileName[kAdmMaxFileNameSize]) | 293 const char fileName[kAdmMaxFileNameSize]) { |
| 308 { | 294 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 309 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 310 | 295 |
| 311 CriticalSectionScoped lock(&_critSect); | 296 CriticalSectionScoped lock(&_critSect); |
| 312 | 297 |
| 313 _recFile.Flush(); | 298 _recFile.Flush(); |
| 314 _recFile.CloseFile(); | 299 _recFile.CloseFile(); |
| 315 | 300 |
| 316 return _recFile.OpenFile(fileName, false) ? 0 : -1; | 301 return _recFile.OpenFile(fileName, false) ? 0 : -1; |
| 317 } | 302 } |
| 318 | 303 |
| 319 // ---------------------------------------------------------------------------- | 304 // ---------------------------------------------------------------------------- |
| 320 // StopInputFileRecording | 305 // StopInputFileRecording |
| 321 // ---------------------------------------------------------------------------- | 306 // ---------------------------------------------------------------------------- |
| 322 | 307 |
| 323 int32_t AudioDeviceBuffer::StopInputFileRecording() | 308 int32_t AudioDeviceBuffer::StopInputFileRecording() { |
| 324 { | 309 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 325 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 326 | 310 |
| 327 CriticalSectionScoped lock(&_critSect); | 311 CriticalSectionScoped lock(&_critSect); |
| 328 | 312 |
| 329 _recFile.Flush(); | 313 _recFile.Flush(); |
| 330 _recFile.CloseFile(); | 314 _recFile.CloseFile(); |
| 331 | 315 |
| 332 return 0; | 316 return 0; |
| 333 } | 317 } |
| 334 | 318 |
| 335 // ---------------------------------------------------------------------------- | 319 // ---------------------------------------------------------------------------- |
| 336 // StartOutputFileRecording | 320 // StartOutputFileRecording |
| 337 // ---------------------------------------------------------------------------- | 321 // ---------------------------------------------------------------------------- |
| 338 | 322 |
| 339 int32_t AudioDeviceBuffer::StartOutputFileRecording( | 323 int32_t AudioDeviceBuffer::StartOutputFileRecording( |
| 340 const char fileName[kAdmMaxFileNameSize]) | 324 const char fileName[kAdmMaxFileNameSize]) { |
| 341 { | 325 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 342 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 343 | 326 |
| 344 CriticalSectionScoped lock(&_critSect); | 327 CriticalSectionScoped lock(&_critSect); |
| 345 | 328 |
| 346 _playFile.Flush(); | 329 _playFile.Flush(); |
| 347 _playFile.CloseFile(); | 330 _playFile.CloseFile(); |
| 348 | 331 |
| 349 return _playFile.OpenFile(fileName, false) ? 0 : -1; | 332 return _playFile.OpenFile(fileName, false) ? 0 : -1; |
| 350 } | 333 } |
| 351 | 334 |
| 352 // ---------------------------------------------------------------------------- | 335 // ---------------------------------------------------------------------------- |
| 353 // StopOutputFileRecording | 336 // StopOutputFileRecording |
| 354 // ---------------------------------------------------------------------------- | 337 // ---------------------------------------------------------------------------- |
| 355 | 338 |
| 356 int32_t AudioDeviceBuffer::StopOutputFileRecording() | 339 int32_t AudioDeviceBuffer::StopOutputFileRecording() { |
| 357 { | 340 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 358 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 359 | 341 |
| 360 CriticalSectionScoped lock(&_critSect); | 342 CriticalSectionScoped lock(&_critSect); |
| 361 | 343 |
| 362 _playFile.Flush(); | 344 _playFile.Flush(); |
| 363 _playFile.CloseFile(); | 345 _playFile.CloseFile(); |
| 364 | 346 |
| 365 return 0; | 347 return 0; |
| 366 } | 348 } |
| 367 | 349 |
| 368 // ---------------------------------------------------------------------------- | 350 // ---------------------------------------------------------------------------- |
| 369 // SetRecordedBuffer | 351 // SetRecordedBuffer |
| 370 // | 352 // |
| 371 // Store recorded audio buffer in local memory ready for the actual | 353 // Store recorded audio buffer in local memory ready for the actual |
| 372 // "delivery" using a callback. | 354 // "delivery" using a callback. |
| 373 // | 355 // |
| 374 // This method can also parse out left or right channel from a stereo | 356 // This method can also parse out left or right channel from a stereo |
| 375 // input signal, i.e., emulate mono. | 357 // input signal, i.e., emulate mono. |
| 376 // | 358 // |
| 377 // Examples: | 359 // Examples: |
| 378 // | 360 // |
| 379 // 16-bit,48kHz mono, 10ms => nSamples=480 => _recSize=2*480=960 bytes | 361 // 16-bit,48kHz mono, 10ms => nSamples=480 => _recSize=2*480=960 bytes |
| 380 // 16-bit,48kHz stereo,10ms => nSamples=480 => _recSize=4*480=1920 bytes | 362 // 16-bit,48kHz stereo,10ms => nSamples=480 => _recSize=4*480=1920 bytes |
| 381 // ---------------------------------------------------------------------------- | 363 // ---------------------------------------------------------------------------- |
| 382 | 364 |
| 383 int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer, | 365 int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer, |
| 384 size_t nSamples) | 366 size_t nSamples) { |
| 385 { | 367 CriticalSectionScoped lock(&_critSect); |
| 386 CriticalSectionScoped lock(&_critSect); | |
| 387 | 368 |
| 388 if (_recBytesPerSample == 0) | 369 if (_recBytesPerSample == 0) { |
| 389 { | 370 assert(false); |
| 390 assert(false); | 371 return -1; |
| 391 return -1; | 372 } |
| 373 |
| 374 _recSamples = nSamples; |
| 375 _recSize = _recBytesPerSample * nSamples; // {2,4}*nSamples |
| 376 if (_recSize > kMaxBufferSizeBytes) { |
| 377 assert(false); |
| 378 return -1; |
| 379 } |
| 380 |
| 381 if (_recChannel == AudioDeviceModule::kChannelBoth) { |
| 382 // (default) copy the complete input buffer to the local buffer |
| 383 memcpy(&_recBuffer[0], audioBuffer, _recSize); |
| 384 } else { |
| 385 int16_t* ptr16In = (int16_t*)audioBuffer; |
| 386 int16_t* ptr16Out = (int16_t*)&_recBuffer[0]; |
| 387 |
| 388 if (AudioDeviceModule::kChannelRight == _recChannel) { |
| 389 ptr16In++; |
| 392 } | 390 } |
| 393 | 391 |
| 394 _recSamples = nSamples; | 392 // exctract left or right channel from input buffer to the local buffer |
| 395 _recSize = _recBytesPerSample*nSamples; // {2,4}*nSamples | 393 for (size_t i = 0; i < _recSamples; i++) { |
| 396 if (_recSize > kMaxBufferSizeBytes) | 394 *ptr16Out = *ptr16In; |
| 397 { | 395 ptr16Out++; |
| 398 assert(false); | 396 ptr16In++; |
| 399 return -1; | 397 ptr16In++; |
| 400 } | 398 } |
| 399 } |
| 401 | 400 |
| 402 if (_recChannel == AudioDeviceModule::kChannelBoth) | 401 if (_recFile.is_open()) { |
| 403 { | 402 // write to binary file in mono or stereo (interleaved) |
| 404 // (default) copy the complete input buffer to the local buffer | 403 _recFile.Write(&_recBuffer[0], _recSize); |
| 405 memcpy(&_recBuffer[0], audioBuffer, _recSize); | 404 } |
| 406 } | |
| 407 else | |
| 408 { | |
| 409 int16_t* ptr16In = (int16_t*)audioBuffer; | |
| 410 int16_t* ptr16Out = (int16_t*)&_recBuffer[0]; | |
| 411 | 405 |
| 412 if (AudioDeviceModule::kChannelRight == _recChannel) | 406 return 0; |
| 413 { | |
| 414 ptr16In++; | |
| 415 } | |
| 416 | |
| 417 // exctract left or right channel from input buffer to the local buffer | |
| 418 for (size_t i = 0; i < _recSamples; i++) | |
| 419 { | |
| 420 *ptr16Out = *ptr16In; | |
| 421 ptr16Out++; | |
| 422 ptr16In++; | |
| 423 ptr16In++; | |
| 424 } | |
| 425 } | |
| 426 | |
| 427 if (_recFile.is_open()) { | |
| 428 // write to binary file in mono or stereo (interleaved) | |
| 429 _recFile.Write(&_recBuffer[0], _recSize); | |
| 430 } | |
| 431 | |
| 432 return 0; | |
| 433 } | 407 } |
| 434 | 408 |
| 435 // ---------------------------------------------------------------------------- | 409 // ---------------------------------------------------------------------------- |
| 436 // DeliverRecordedData | 410 // DeliverRecordedData |
| 437 // ---------------------------------------------------------------------------- | 411 // ---------------------------------------------------------------------------- |
| 438 | 412 |
| 439 int32_t AudioDeviceBuffer::DeliverRecordedData() | 413 int32_t AudioDeviceBuffer::DeliverRecordedData() { |
| 440 { | 414 CriticalSectionScoped lock(&_critSectCb); |
| 441 CriticalSectionScoped lock(&_critSectCb); | |
| 442 | 415 |
| 443 // Ensure that user has initialized all essential members | 416 // Ensure that user has initialized all essential members |
| 444 if ((_recSampleRate == 0) || | 417 if ((_recSampleRate == 0) || (_recSamples == 0) || |
| 445 (_recSamples == 0) || | 418 (_recBytesPerSample == 0) || (_recChannels == 0)) { |
| 446 (_recBytesPerSample == 0) || | 419 assert(false); |
| 447 (_recChannels == 0)) | 420 return -1; |
| 448 { | 421 } |
| 449 assert(false); | |
| 450 return -1; | |
| 451 } | |
| 452 | 422 |
| 453 if (_ptrCbAudioTransport == NULL) | 423 if (_ptrCbAudioTransport == NULL) { |
| 454 { | 424 WEBRTC_TRACE( |
| 455 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to deliver r
ecorded data (AudioTransport does not exist)"); | 425 kTraceWarning, kTraceAudioDevice, _id, |
| 456 return 0; | 426 "failed to deliver recorded data (AudioTransport does not exist)"); |
| 457 } | 427 return 0; |
| 428 } |
| 458 | 429 |
| 459 int32_t res(0); | 430 int32_t res(0); |
| 460 uint32_t newMicLevel(0); | 431 uint32_t newMicLevel(0); |
| 461 uint32_t totalDelayMS = _playDelayMS +_recDelayMS; | 432 uint32_t totalDelayMS = _playDelayMS + _recDelayMS; |
| 462 | 433 |
| 463 res = _ptrCbAudioTransport->RecordedDataIsAvailable(&_recBuffer[0], | 434 res = _ptrCbAudioTransport->RecordedDataIsAvailable( |
| 464 _recSamples, | 435 &_recBuffer[0], _recSamples, _recBytesPerSample, _recChannels, |
| 465 _recBytesPerSample, | 436 _recSampleRate, totalDelayMS, _clockDrift, _currentMicLevel, |
| 466 _recChannels, | 437 _typingStatus, newMicLevel); |
| 467 _recSampleRate, | 438 if (res != -1) { |
| 468 totalDelayMS, | 439 _newMicLevel = newMicLevel; |
| 469 _clockDrift, | 440 } |
| 470 _currentMicLevel, | |
| 471 _typingStatus, | |
| 472 newMicLevel); | |
| 473 if (res != -1) | |
| 474 { | |
| 475 _newMicLevel = newMicLevel; | |
| 476 } | |
| 477 | 441 |
| 478 return 0; | 442 return 0; |
| 479 } | 443 } |
| 480 | 444 |
| 481 // ---------------------------------------------------------------------------- | 445 // ---------------------------------------------------------------------------- |
| 482 // RequestPlayoutData | 446 // RequestPlayoutData |
| 483 // ---------------------------------------------------------------------------- | 447 // ---------------------------------------------------------------------------- |
| 484 | 448 |
| 485 int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) | 449 int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) { |
| 486 { | 450 uint32_t playSampleRate = 0; |
| 487 uint32_t playSampleRate = 0; | 451 size_t playBytesPerSample = 0; |
| 488 size_t playBytesPerSample = 0; | 452 size_t playChannels = 0; |
| 489 size_t playChannels = 0; | 453 { |
| 490 { | 454 CriticalSectionScoped lock(&_critSect); |
| 491 CriticalSectionScoped lock(&_critSect); | |
| 492 | 455 |
| 493 // Store copies under lock and use copies hereafter to avoid race with | 456 // Store copies under lock and use copies hereafter to avoid race with |
| 494 // setter methods. | 457 // setter methods. |
| 495 playSampleRate = _playSampleRate; | 458 playSampleRate = _playSampleRate; |
| 496 playBytesPerSample = _playBytesPerSample; | 459 playBytesPerSample = _playBytesPerSample; |
| 497 playChannels = _playChannels; | 460 playChannels = _playChannels; |
| 498 | 461 |
| 499 // Ensure that user has initialized all essential members | 462 // Ensure that user has initialized all essential members |
| 500 if ((playBytesPerSample == 0) || | 463 if ((playBytesPerSample == 0) || (playChannels == 0) || |
| 501 (playChannels == 0) || | 464 (playSampleRate == 0)) { |
| 502 (playSampleRate == 0)) | 465 assert(false); |
| 503 { | 466 return -1; |
| 504 assert(false); | |
| 505 return -1; | |
| 506 } | |
| 507 | |
| 508 _playSamples = nSamples; | |
| 509 _playSize = playBytesPerSample * nSamples; // {2,4}*nSamples | |
| 510 if (_playSize > kMaxBufferSizeBytes) | |
| 511 { | |
| 512 assert(false); | |
| 513 return -1; | |
| 514 } | |
| 515 | |
| 516 if (nSamples != _playSamples) | |
| 517 { | |
| 518 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "invalid number
of samples to be played out (%d)", nSamples); | |
| 519 return -1; | |
| 520 } | |
| 521 } | 467 } |
| 522 | 468 |
| 523 size_t nSamplesOut(0); | 469 _playSamples = nSamples; |
| 524 | 470 _playSize = playBytesPerSample * nSamples; // {2,4}*nSamples |
| 525 CriticalSectionScoped lock(&_critSectCb); | 471 if (_playSize > kMaxBufferSizeBytes) { |
| 526 | 472 assert(false); |
| 527 if (_ptrCbAudioTransport == NULL) | 473 return -1; |
| 528 { | |
| 529 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to feed data
to playout (AudioTransport does not exist)"); | |
| 530 return 0; | |
| 531 } | 474 } |
| 532 | 475 |
| 533 if (_ptrCbAudioTransport) | 476 if (nSamples != _playSamples) { |
| 534 { | 477 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 535 uint32_t res(0); | 478 "invalid number of samples to be played out (%d)", nSamples); |
| 536 int64_t elapsed_time_ms = -1; | 479 return -1; |
| 537 int64_t ntp_time_ms = -1; | |
| 538 res = _ptrCbAudioTransport->NeedMorePlayData(_playSamples, | |
| 539 playBytesPerSample, | |
| 540 playChannels, | |
| 541 playSampleRate, | |
| 542 &_playBuffer[0], | |
| 543 nSamplesOut, | |
| 544 &elapsed_time_ms, | |
| 545 &ntp_time_ms); | |
| 546 if (res != 0) | |
| 547 { | |
| 548 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "NeedMorePlayData(
) failed"); | |
| 549 } | |
| 550 } | 480 } |
| 481 } |
| 551 | 482 |
| 552 return static_cast<int32_t>(nSamplesOut); | 483 size_t nSamplesOut(0); |
| 484 |
| 485 CriticalSectionScoped lock(&_critSectCb); |
| 486 |
| 487 if (_ptrCbAudioTransport == NULL) { |
| 488 WEBRTC_TRACE( |
| 489 kTraceWarning, kTraceAudioDevice, _id, |
| 490 "failed to feed data to playout (AudioTransport does not exist)"); |
| 491 return 0; |
| 492 } |
| 493 |
| 494 if (_ptrCbAudioTransport) { |
| 495 uint32_t res(0); |
| 496 int64_t elapsed_time_ms = -1; |
| 497 int64_t ntp_time_ms = -1; |
| 498 res = _ptrCbAudioTransport->NeedMorePlayData( |
| 499 _playSamples, playBytesPerSample, playChannels, playSampleRate, |
| 500 &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms); |
| 501 if (res != 0) { |
| 502 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 503 "NeedMorePlayData() failed"); |
| 504 } |
| 505 } |
| 506 |
| 507 return static_cast<int32_t>(nSamplesOut); |
| 553 } | 508 } |
| 554 | 509 |
| 555 // ---------------------------------------------------------------------------- | 510 // ---------------------------------------------------------------------------- |
| 556 // GetPlayoutData | 511 // GetPlayoutData |
| 557 // ---------------------------------------------------------------------------- | 512 // ---------------------------------------------------------------------------- |
| 558 | 513 |
| 559 int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer) | 514 int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer) { |
| 560 { | 515 CriticalSectionScoped lock(&_critSect); |
| 561 CriticalSectionScoped lock(&_critSect); | |
| 562 | 516 |
| 563 if (_playSize > kMaxBufferSizeBytes) | 517 if (_playSize > kMaxBufferSizeBytes) { |
| 564 { | 518 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, |
| 565 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, | 519 "_playSize %" PRIuS |
| 566 "_playSize %" PRIuS " exceeds kMaxBufferSizeBytes in " | 520 " exceeds kMaxBufferSizeBytes in " |
| 567 "AudioDeviceBuffer::GetPlayoutData", _playSize); | 521 "AudioDeviceBuffer::GetPlayoutData", |
| 568 assert(false); | 522 _playSize); |
| 569 return -1; | 523 assert(false); |
| 570 } | 524 return -1; |
| 525 } |
| 571 | 526 |
| 572 memcpy(audioBuffer, &_playBuffer[0], _playSize); | 527 memcpy(audioBuffer, &_playBuffer[0], _playSize); |
| 573 | 528 |
| 574 if (_playFile.is_open()) { | 529 if (_playFile.is_open()) { |
| 575 // write to binary file in mono or stereo (interleaved) | 530 // write to binary file in mono or stereo (interleaved) |
| 576 _playFile.Write(&_playBuffer[0], _playSize); | 531 _playFile.Write(&_playBuffer[0], _playSize); |
| 577 } | 532 } |
| 578 | 533 |
| 579 return static_cast<int32_t>(_playSamples); | 534 return static_cast<int32_t>(_playSamples); |
| 580 } | 535 } |
| 581 | 536 |
| 582 } // namespace webrtc | 537 } // namespace webrtc |
| OLD | NEW |