| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_device/audio_device_buffer.h" | 11 #include "webrtc/modules/audio_device/audio_device_buffer.h" |
| 12 | 12 |
| 13 #include <assert.h> | 13 #include "webrtc/base/checks.h" |
| 14 #include <string.h> | 14 #include "webrtc/base/logging.h" |
| 15 | |
| 16 #include "webrtc/base/format_macros.h" | 15 #include "webrtc/base/format_macros.h" |
| 17 #include "webrtc/modules/audio_device/audio_device_config.h" | 16 #include "webrtc/modules/audio_device/audio_device_config.h" |
| 18 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 17 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
| 19 #include "webrtc/system_wrappers/include/logging.h" | |
| 20 #include "webrtc/system_wrappers/include/trace.h" | |
| 21 | 18 |
| 22 namespace webrtc { | 19 namespace webrtc { |
| 23 | 20 |
| 24 static const int kHighDelayThresholdMs = 300; | 21 static const int kHighDelayThresholdMs = 300; |
| 25 static const int kLogHighDelayIntervalFrames = 500; // 5 seconds. | 22 static const int kLogHighDelayIntervalFrames = 500; // 5 seconds. |
| 26 | 23 |
| 27 // ---------------------------------------------------------------------------- | |
| 28 // ctor | |
| 29 // ---------------------------------------------------------------------------- | |
| 30 | |
| 31 AudioDeviceBuffer::AudioDeviceBuffer() | 24 AudioDeviceBuffer::AudioDeviceBuffer() |
| 32 : _id(-1), | 25 : _critSect(*CriticalSectionWrapper::CreateCriticalSection()), |
| 33 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | |
| 34 _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()), | 26 _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()), |
| 35 _ptrCbAudioTransport(NULL), | 27 _ptrCbAudioTransport(nullptr), |
| 36 _recSampleRate(0), | 28 _recSampleRate(0), |
| 37 _playSampleRate(0), | 29 _playSampleRate(0), |
| 38 _recChannels(0), | 30 _recChannels(0), |
| 39 _playChannels(0), | 31 _playChannels(0), |
| 40 _recChannel(AudioDeviceModule::kChannelBoth), | 32 _recChannel(AudioDeviceModule::kChannelBoth), |
| 41 _recBytesPerSample(0), | 33 _recBytesPerSample(0), |
| 42 _playBytesPerSample(0), | 34 _playBytesPerSample(0), |
| 43 _recSamples(0), | 35 _recSamples(0), |
| 44 _recSize(0), | 36 _recSize(0), |
| 45 _playSamples(0), | 37 _playSamples(0), |
| 46 _playSize(0), | 38 _playSize(0), |
| 47 _recFile(*FileWrapper::Create()), | 39 _recFile(*FileWrapper::Create()), |
| 48 _playFile(*FileWrapper::Create()), | 40 _playFile(*FileWrapper::Create()), |
| 49 _currentMicLevel(0), | 41 _currentMicLevel(0), |
| 50 _newMicLevel(0), | 42 _newMicLevel(0), |
| 51 _typingStatus(false), | 43 _typingStatus(false), |
| 52 _playDelayMS(0), | 44 _playDelayMS(0), |
| 53 _recDelayMS(0), | 45 _recDelayMS(0), |
| 54 _clockDrift(0), | 46 _clockDrift(0), |
| 55 // Set to the interval in order to log on the first occurrence. | 47 // Set to the interval in order to log on the first occurrence. |
| 56 high_delay_counter_(kLogHighDelayIntervalFrames) { | 48 high_delay_counter_(kLogHighDelayIntervalFrames) { |
| 57 // valid ID will be set later by SetId, use -1 for now | 49 LOG(INFO) << "AudioDeviceBuffer::ctor"; |
| 58 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s created", | |
| 59 __FUNCTION__); | |
| 60 memset(_recBuffer, 0, kMaxBufferSizeBytes); | 50 memset(_recBuffer, 0, kMaxBufferSizeBytes); |
| 61 memset(_playBuffer, 0, kMaxBufferSizeBytes); | 51 memset(_playBuffer, 0, kMaxBufferSizeBytes); |
| 62 } | 52 } |
| 63 | 53 |
| 64 // ---------------------------------------------------------------------------- | |
| 65 // dtor | |
| 66 // ---------------------------------------------------------------------------- | |
| 67 | |
| 68 AudioDeviceBuffer::~AudioDeviceBuffer() { | 54 AudioDeviceBuffer::~AudioDeviceBuffer() { |
| 69 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", | 55 LOG(INFO) << "AudioDeviceBuffer::~dtor"; |
| 70 __FUNCTION__); | |
| 71 { | 56 { |
| 72 CriticalSectionScoped lock(&_critSect); | 57 CriticalSectionScoped lock(&_critSect); |
| 73 | 58 |
| 74 _recFile.Flush(); | 59 _recFile.Flush(); |
| 75 _recFile.CloseFile(); | 60 _recFile.CloseFile(); |
| 76 delete &_recFile; | 61 delete &_recFile; |
| 77 | 62 |
| 78 _playFile.Flush(); | 63 _playFile.Flush(); |
| 79 _playFile.CloseFile(); | 64 _playFile.CloseFile(); |
| 80 delete &_playFile; | 65 delete &_playFile; |
| 81 } | 66 } |
| 82 | 67 |
| 83 delete &_critSect; | 68 delete &_critSect; |
| 84 delete &_critSectCb; | 69 delete &_critSectCb; |
| 85 } | 70 } |
| 86 | 71 |
| 87 // ---------------------------------------------------------------------------- | |
| 88 // SetId | |
| 89 // ---------------------------------------------------------------------------- | |
| 90 | |
| 91 void AudioDeviceBuffer::SetId(uint32_t id) { | |
| 92 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, | |
| 93 "AudioDeviceBuffer::SetId(id=%d)", id); | |
| 94 _id = id; | |
| 95 } | |
| 96 | |
| 97 // ---------------------------------------------------------------------------- | |
| 98 // RegisterAudioCallback | |
| 99 // ---------------------------------------------------------------------------- | |
| 100 | |
| 101 int32_t AudioDeviceBuffer::RegisterAudioCallback( | 72 int32_t AudioDeviceBuffer::RegisterAudioCallback( |
| 102 AudioTransport* audioCallback) { | 73 AudioTransport* audioCallback) { |
| 74 LOG(INFO) << __FUNCTION__; |
| 103 CriticalSectionScoped lock(&_critSectCb); | 75 CriticalSectionScoped lock(&_critSectCb); |
| 104 _ptrCbAudioTransport = audioCallback; | 76 _ptrCbAudioTransport = audioCallback; |
| 105 | |
| 106 return 0; | 77 return 0; |
| 107 } | 78 } |
| 108 | 79 |
| 109 // ---------------------------------------------------------------------------- | |
| 110 // InitPlayout | |
| 111 // ---------------------------------------------------------------------------- | |
| 112 | |
| 113 int32_t AudioDeviceBuffer::InitPlayout() { | 80 int32_t AudioDeviceBuffer::InitPlayout() { |
| 114 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 81 LOG(INFO) << __FUNCTION__; |
| 115 return 0; | 82 return 0; |
| 116 } | 83 } |
| 117 | 84 |
| 118 // ---------------------------------------------------------------------------- | |
| 119 // InitRecording | |
| 120 // ---------------------------------------------------------------------------- | |
| 121 | |
| 122 int32_t AudioDeviceBuffer::InitRecording() { | 85 int32_t AudioDeviceBuffer::InitRecording() { |
| 123 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 86 LOG(INFO) << __FUNCTION__; |
| 124 return 0; | 87 return 0; |
| 125 } | 88 } |
| 126 | 89 |
| 127 // ---------------------------------------------------------------------------- | |
| 128 // SetRecordingSampleRate | |
| 129 // ---------------------------------------------------------------------------- | |
| 130 | |
| 131 int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) { | 90 int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) { |
| 91 LOG(INFO) << "SetRecordingSampleRate(" << fsHz << ")"; |
| 132 CriticalSectionScoped lock(&_critSect); | 92 CriticalSectionScoped lock(&_critSect); |
| 133 _recSampleRate = fsHz; | 93 _recSampleRate = fsHz; |
| 134 return 0; | 94 return 0; |
| 135 } | 95 } |
| 136 | 96 |
| 137 // ---------------------------------------------------------------------------- | |
| 138 // SetPlayoutSampleRate | |
| 139 // ---------------------------------------------------------------------------- | |
| 140 | |
| 141 int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) { | 97 int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) { |
| 98 LOG(INFO) << "SetPlayoutSampleRate(" << fsHz << ")"; |
| 142 CriticalSectionScoped lock(&_critSect); | 99 CriticalSectionScoped lock(&_critSect); |
| 143 _playSampleRate = fsHz; | 100 _playSampleRate = fsHz; |
| 144 return 0; | 101 return 0; |
| 145 } | 102 } |
| 146 | 103 |
| 147 // ---------------------------------------------------------------------------- | |
| 148 // RecordingSampleRate | |
| 149 // ---------------------------------------------------------------------------- | |
| 150 | |
| 151 int32_t AudioDeviceBuffer::RecordingSampleRate() const { | 104 int32_t AudioDeviceBuffer::RecordingSampleRate() const { |
| 152 return _recSampleRate; | 105 return _recSampleRate; |
| 153 } | 106 } |
| 154 | 107 |
| 155 // ---------------------------------------------------------------------------- | |
| 156 // PlayoutSampleRate | |
| 157 // ---------------------------------------------------------------------------- | |
| 158 | |
| 159 int32_t AudioDeviceBuffer::PlayoutSampleRate() const { | 108 int32_t AudioDeviceBuffer::PlayoutSampleRate() const { |
| 160 return _playSampleRate; | 109 return _playSampleRate; |
| 161 } | 110 } |
| 162 | 111 |
| 163 // ---------------------------------------------------------------------------- | |
| 164 // SetRecordingChannels | |
| 165 // ---------------------------------------------------------------------------- | |
| 166 | |
| 167 int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) { | 112 int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) { |
| 168 CriticalSectionScoped lock(&_critSect); | 113 CriticalSectionScoped lock(&_critSect); |
| 169 _recChannels = channels; | 114 _recChannels = channels; |
| 170 _recBytesPerSample = | 115 _recBytesPerSample = |
| 171 2 * channels; // 16 bits per sample in mono, 32 bits in stereo | 116 2 * channels; // 16 bits per sample in mono, 32 bits in stereo |
| 172 return 0; | 117 return 0; |
| 173 } | 118 } |
| 174 | 119 |
| 175 // ---------------------------------------------------------------------------- | |
| 176 // SetPlayoutChannels | |
| 177 // ---------------------------------------------------------------------------- | |
| 178 | |
| 179 int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) { | 120 int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) { |
| 180 CriticalSectionScoped lock(&_critSect); | 121 CriticalSectionScoped lock(&_critSect); |
| 181 _playChannels = channels; | 122 _playChannels = channels; |
| 182 // 16 bits per sample in mono, 32 bits in stereo | 123 // 16 bits per sample in mono, 32 bits in stereo |
| 183 _playBytesPerSample = 2 * channels; | 124 _playBytesPerSample = 2 * channels; |
| 184 return 0; | 125 return 0; |
| 185 } | 126 } |
| 186 | 127 |
| 187 // ---------------------------------------------------------------------------- | |
| 188 // SetRecordingChannel | |
| 189 // | |
| 190 // Select which channel to use while recording. | |
| 191 // This API requires that stereo is enabled. | |
| 192 // | |
| 193 // Note that, the nChannel parameter in RecordedDataIsAvailable will be | |
| 194 // set to 2 even for kChannelLeft and kChannelRight. However, nBytesPerSample | |
| 195 // will be 2 instead of 4 four these cases. | |
| 196 // ---------------------------------------------------------------------------- | |
| 197 | |
| 198 int32_t AudioDeviceBuffer::SetRecordingChannel( | 128 int32_t AudioDeviceBuffer::SetRecordingChannel( |
| 199 const AudioDeviceModule::ChannelType channel) { | 129 const AudioDeviceModule::ChannelType channel) { |
| 200 CriticalSectionScoped lock(&_critSect); | 130 CriticalSectionScoped lock(&_critSect); |
| 201 | 131 |
| 202 if (_recChannels == 1) { | 132 if (_recChannels == 1) { |
| 203 return -1; | 133 return -1; |
| 204 } | 134 } |
| 205 | 135 |
| 206 if (channel == AudioDeviceModule::kChannelBoth) { | 136 if (channel == AudioDeviceModule::kChannelBoth) { |
| 207 // two bytes per channel | 137 // two bytes per channel |
| 208 _recBytesPerSample = 4; | 138 _recBytesPerSample = 4; |
| 209 } else { | 139 } else { |
| 210 // only utilize one out of two possible channels (left or right) | 140 // only utilize one out of two possible channels (left or right) |
| 211 _recBytesPerSample = 2; | 141 _recBytesPerSample = 2; |
| 212 } | 142 } |
| 213 _recChannel = channel; | 143 _recChannel = channel; |
| 214 | 144 |
| 215 return 0; | 145 return 0; |
| 216 } | 146 } |
| 217 | 147 |
| 218 // ---------------------------------------------------------------------------- | |
| 219 // RecordingChannel | |
| 220 // ---------------------------------------------------------------------------- | |
| 221 | |
| 222 int32_t AudioDeviceBuffer::RecordingChannel( | 148 int32_t AudioDeviceBuffer::RecordingChannel( |
| 223 AudioDeviceModule::ChannelType& channel) const { | 149 AudioDeviceModule::ChannelType& channel) const { |
| 224 channel = _recChannel; | 150 channel = _recChannel; |
| 225 return 0; | 151 return 0; |
| 226 } | 152 } |
| 227 | 153 |
| 228 // ---------------------------------------------------------------------------- | |
| 229 // RecordingChannels | |
| 230 // ---------------------------------------------------------------------------- | |
| 231 | |
| 232 size_t AudioDeviceBuffer::RecordingChannels() const { | 154 size_t AudioDeviceBuffer::RecordingChannels() const { |
| 233 return _recChannels; | 155 return _recChannels; |
| 234 } | 156 } |
| 235 | 157 |
| 236 // ---------------------------------------------------------------------------- | |
| 237 // PlayoutChannels | |
| 238 // ---------------------------------------------------------------------------- | |
| 239 | |
| 240 size_t AudioDeviceBuffer::PlayoutChannels() const { | 158 size_t AudioDeviceBuffer::PlayoutChannels() const { |
| 241 return _playChannels; | 159 return _playChannels; |
| 242 } | 160 } |
| 243 | 161 |
| 244 // ---------------------------------------------------------------------------- | |
| 245 // SetCurrentMicLevel | |
| 246 // ---------------------------------------------------------------------------- | |
| 247 | |
| 248 int32_t AudioDeviceBuffer::SetCurrentMicLevel(uint32_t level) { | 162 int32_t AudioDeviceBuffer::SetCurrentMicLevel(uint32_t level) { |
| 249 _currentMicLevel = level; | 163 _currentMicLevel = level; |
| 250 return 0; | 164 return 0; |
| 251 } | 165 } |
| 252 | 166 |
| 253 int32_t AudioDeviceBuffer::SetTypingStatus(bool typingStatus) { | 167 int32_t AudioDeviceBuffer::SetTypingStatus(bool typingStatus) { |
| 254 _typingStatus = typingStatus; | 168 _typingStatus = typingStatus; |
| 255 return 0; | 169 return 0; |
| 256 } | 170 } |
| 257 | 171 |
| 258 // ---------------------------------------------------------------------------- | |
| 259 // NewMicLevel | |
| 260 // ---------------------------------------------------------------------------- | |
| 261 | |
| 262 uint32_t AudioDeviceBuffer::NewMicLevel() const { | 172 uint32_t AudioDeviceBuffer::NewMicLevel() const { |
| 263 return _newMicLevel; | 173 return _newMicLevel; |
| 264 } | 174 } |
| 265 | 175 |
| 266 // ---------------------------------------------------------------------------- | |
| 267 // SetVQEData | |
| 268 // ---------------------------------------------------------------------------- | |
| 269 | |
| 270 void AudioDeviceBuffer::SetVQEData(int playDelayMs, | 176 void AudioDeviceBuffer::SetVQEData(int playDelayMs, |
| 271 int recDelayMs, | 177 int recDelayMs, |
| 272 int clockDrift) { | 178 int clockDrift) { |
| 273 if (high_delay_counter_ < kLogHighDelayIntervalFrames) { | 179 if (high_delay_counter_ < kLogHighDelayIntervalFrames) { |
| 274 ++high_delay_counter_; | 180 ++high_delay_counter_; |
| 275 } else { | 181 } else { |
| 276 if (playDelayMs + recDelayMs > kHighDelayThresholdMs) { | 182 if (playDelayMs + recDelayMs > kHighDelayThresholdMs) { |
| 277 high_delay_counter_ = 0; | 183 high_delay_counter_ = 0; |
| 278 LOG(LS_WARNING) << "High audio device delay reported (render=" | 184 LOG(LS_WARNING) << "High audio device delay reported (render=" |
| 279 << playDelayMs << " ms, capture=" << recDelayMs << " ms)"; | 185 << playDelayMs << " ms, capture=" << recDelayMs << " ms)"; |
| 280 } | 186 } |
| 281 } | 187 } |
| 282 | 188 |
| 283 _playDelayMS = playDelayMs; | 189 _playDelayMS = playDelayMs; |
| 284 _recDelayMS = recDelayMs; | 190 _recDelayMS = recDelayMs; |
| 285 _clockDrift = clockDrift; | 191 _clockDrift = clockDrift; |
| 286 } | 192 } |
| 287 | 193 |
| 288 // ---------------------------------------------------------------------------- | |
| 289 // StartInputFileRecording | |
| 290 // ---------------------------------------------------------------------------- | |
| 291 | |
| 292 int32_t AudioDeviceBuffer::StartInputFileRecording( | 194 int32_t AudioDeviceBuffer::StartInputFileRecording( |
| 293 const char fileName[kAdmMaxFileNameSize]) { | 195 const char fileName[kAdmMaxFileNameSize]) { |
| 294 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 295 | |
| 296 CriticalSectionScoped lock(&_critSect); | 196 CriticalSectionScoped lock(&_critSect); |
| 297 | 197 |
| 298 _recFile.Flush(); | 198 _recFile.Flush(); |
| 299 _recFile.CloseFile(); | 199 _recFile.CloseFile(); |
| 300 | 200 |
| 301 return _recFile.OpenFile(fileName, false) ? 0 : -1; | 201 return _recFile.OpenFile(fileName, false) ? 0 : -1; |
| 302 } | 202 } |
| 303 | 203 |
| 304 // ---------------------------------------------------------------------------- | |
| 305 // StopInputFileRecording | |
| 306 // ---------------------------------------------------------------------------- | |
| 307 | |
| 308 int32_t AudioDeviceBuffer::StopInputFileRecording() { | 204 int32_t AudioDeviceBuffer::StopInputFileRecording() { |
| 309 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 310 | |
| 311 CriticalSectionScoped lock(&_critSect); | 205 CriticalSectionScoped lock(&_critSect); |
| 312 | 206 |
| 313 _recFile.Flush(); | 207 _recFile.Flush(); |
| 314 _recFile.CloseFile(); | 208 _recFile.CloseFile(); |
| 315 | 209 |
| 316 return 0; | 210 return 0; |
| 317 } | 211 } |
| 318 | 212 |
| 319 // ---------------------------------------------------------------------------- | |
| 320 // StartOutputFileRecording | |
| 321 // ---------------------------------------------------------------------------- | |
| 322 | |
| 323 int32_t AudioDeviceBuffer::StartOutputFileRecording( | 213 int32_t AudioDeviceBuffer::StartOutputFileRecording( |
| 324 const char fileName[kAdmMaxFileNameSize]) { | 214 const char fileName[kAdmMaxFileNameSize]) { |
| 325 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 326 | |
| 327 CriticalSectionScoped lock(&_critSect); | 215 CriticalSectionScoped lock(&_critSect); |
| 328 | 216 |
| 329 _playFile.Flush(); | 217 _playFile.Flush(); |
| 330 _playFile.CloseFile(); | 218 _playFile.CloseFile(); |
| 331 | 219 |
| 332 return _playFile.OpenFile(fileName, false) ? 0 : -1; | 220 return _playFile.OpenFile(fileName, false) ? 0 : -1; |
| 333 } | 221 } |
| 334 | 222 |
| 335 // ---------------------------------------------------------------------------- | |
| 336 // StopOutputFileRecording | |
| 337 // ---------------------------------------------------------------------------- | |
| 338 | |
| 339 int32_t AudioDeviceBuffer::StopOutputFileRecording() { | 223 int32_t AudioDeviceBuffer::StopOutputFileRecording() { |
| 340 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
| 341 | |
| 342 CriticalSectionScoped lock(&_critSect); | 224 CriticalSectionScoped lock(&_critSect); |
| 343 | 225 |
| 344 _playFile.Flush(); | 226 _playFile.Flush(); |
| 345 _playFile.CloseFile(); | 227 _playFile.CloseFile(); |
| 346 | 228 |
| 347 return 0; | 229 return 0; |
| 348 } | 230 } |
| 349 | 231 |
| 350 // ---------------------------------------------------------------------------- | |
| 351 // SetRecordedBuffer | |
| 352 // | |
| 353 // Store recorded audio buffer in local memory ready for the actual | |
| 354 // "delivery" using a callback. | |
| 355 // | |
| 356 // This method can also parse out left or right channel from a stereo | |
| 357 // input signal, i.e., emulate mono. | |
| 358 // | |
| 359 // Examples: | |
| 360 // | |
| 361 // 16-bit,48kHz mono, 10ms => nSamples=480 => _recSize=2*480=960 bytes | |
| 362 // 16-bit,48kHz stereo,10ms => nSamples=480 => _recSize=4*480=1920 bytes | |
| 363 // ---------------------------------------------------------------------------- | |
| 364 | |
| 365 int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer, | 232 int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer, |
| 366 size_t nSamples) { | 233 size_t nSamples) { |
| 367 CriticalSectionScoped lock(&_critSect); | 234 CriticalSectionScoped lock(&_critSect); |
| 368 | 235 |
| 369 if (_recBytesPerSample == 0) { | 236 if (_recBytesPerSample == 0) { |
| 370 assert(false); | 237 assert(false); |
| 371 return -1; | 238 return -1; |
| 372 } | 239 } |
| 373 | 240 |
| 374 _recSamples = nSamples; | 241 _recSamples = nSamples; |
| (...skipping 24 matching lines...) Expand all Loading... |
| 399 } | 266 } |
| 400 | 267 |
| 401 if (_recFile.is_open()) { | 268 if (_recFile.is_open()) { |
| 402 // write to binary file in mono or stereo (interleaved) | 269 // write to binary file in mono or stereo (interleaved) |
| 403 _recFile.Write(&_recBuffer[0], _recSize); | 270 _recFile.Write(&_recBuffer[0], _recSize); |
| 404 } | 271 } |
| 405 | 272 |
| 406 return 0; | 273 return 0; |
| 407 } | 274 } |
| 408 | 275 |
| 409 // ---------------------------------------------------------------------------- | |
| 410 // DeliverRecordedData | |
| 411 // ---------------------------------------------------------------------------- | |
| 412 | |
| 413 int32_t AudioDeviceBuffer::DeliverRecordedData() { | 276 int32_t AudioDeviceBuffer::DeliverRecordedData() { |
| 414 CriticalSectionScoped lock(&_critSectCb); | 277 CriticalSectionScoped lock(&_critSectCb); |
| 415 | |
| 416 // Ensure that user has initialized all essential members | 278 // Ensure that user has initialized all essential members |
| 417 if ((_recSampleRate == 0) || (_recSamples == 0) || | 279 if ((_recSampleRate == 0) || (_recSamples == 0) || |
| 418 (_recBytesPerSample == 0) || (_recChannels == 0)) { | 280 (_recBytesPerSample == 0) || (_recChannels == 0)) { |
| 419 assert(false); | 281 RTC_NOTREACHED(); |
| 420 return -1; | 282 return -1; |
| 421 } | 283 } |
| 422 | 284 |
| 423 if (_ptrCbAudioTransport == NULL) { | 285 if (!_ptrCbAudioTransport) { |
| 424 WEBRTC_TRACE( | 286 LOG(LS_WARNING) << "Invalid audio transport"; |
| 425 kTraceWarning, kTraceAudioDevice, _id, | |
| 426 "failed to deliver recorded data (AudioTransport does not exist)"); | |
| 427 return 0; | 287 return 0; |
| 428 } | 288 } |
| 429 | 289 |
| 430 int32_t res(0); | 290 int32_t res(0); |
| 431 uint32_t newMicLevel(0); | 291 uint32_t newMicLevel(0); |
| 432 uint32_t totalDelayMS = _playDelayMS + _recDelayMS; | 292 uint32_t totalDelayMS = _playDelayMS + _recDelayMS; |
| 433 | |
| 434 res = _ptrCbAudioTransport->RecordedDataIsAvailable( | 293 res = _ptrCbAudioTransport->RecordedDataIsAvailable( |
| 435 &_recBuffer[0], _recSamples, _recBytesPerSample, _recChannels, | 294 &_recBuffer[0], _recSamples, _recBytesPerSample, _recChannels, |
| 436 _recSampleRate, totalDelayMS, _clockDrift, _currentMicLevel, | 295 _recSampleRate, totalDelayMS, _clockDrift, _currentMicLevel, |
| 437 _typingStatus, newMicLevel); | 296 _typingStatus, newMicLevel); |
| 438 if (res != -1) { | 297 if (res != -1) { |
| 439 _newMicLevel = newMicLevel; | 298 _newMicLevel = newMicLevel; |
| 440 } | 299 } |
| 441 | 300 |
| 442 return 0; | 301 return 0; |
| 443 } | 302 } |
| 444 | 303 |
| 445 // ---------------------------------------------------------------------------- | |
| 446 // RequestPlayoutData | |
| 447 // ---------------------------------------------------------------------------- | |
| 448 | |
| 449 int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) { | 304 int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) { |
| 450 uint32_t playSampleRate = 0; | 305 uint32_t playSampleRate = 0; |
| 451 size_t playBytesPerSample = 0; | 306 size_t playBytesPerSample = 0; |
| 452 size_t playChannels = 0; | 307 size_t playChannels = 0; |
| 308 |
| 309 // TOOD(henrika): improve bad locking model and make it more clear that only |
| 310 // 10ms buffer sizes is supported in WebRTC. |
| 453 { | 311 { |
| 454 CriticalSectionScoped lock(&_critSect); | 312 CriticalSectionScoped lock(&_critSect); |
| 455 | 313 |
| 456 // Store copies under lock and use copies hereafter to avoid race with | 314 // Store copies under lock and use copies hereafter to avoid race with |
| 457 // setter methods. | 315 // setter methods. |
| 458 playSampleRate = _playSampleRate; | 316 playSampleRate = _playSampleRate; |
| 459 playBytesPerSample = _playBytesPerSample; | 317 playBytesPerSample = _playBytesPerSample; |
| 460 playChannels = _playChannels; | 318 playChannels = _playChannels; |
| 461 | 319 |
| 462 // Ensure that user has initialized all essential members | 320 // Ensure that user has initialized all essential members |
| 463 if ((playBytesPerSample == 0) || (playChannels == 0) || | 321 if ((playBytesPerSample == 0) || (playChannels == 0) || |
| 464 (playSampleRate == 0)) { | 322 (playSampleRate == 0)) { |
| 465 assert(false); | 323 RTC_NOTREACHED(); |
| 466 return -1; | 324 return -1; |
| 467 } | 325 } |
| 468 | 326 |
| 469 _playSamples = nSamples; | 327 _playSamples = nSamples; |
| 470 _playSize = playBytesPerSample * nSamples; // {2,4}*nSamples | 328 _playSize = playBytesPerSample * nSamples; // {2,4}*nSamples |
| 471 if (_playSize > kMaxBufferSizeBytes) { | 329 RTC_CHECK_LE(_playSize, kMaxBufferSizeBytes); |
| 472 assert(false); | 330 RTC_CHECK_EQ(nSamples, _playSamples); |
| 473 return -1; | |
| 474 } | |
| 475 | |
| 476 if (nSamples != _playSamples) { | |
| 477 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 478 "invalid number of samples to be played out (%d)", nSamples); | |
| 479 return -1; | |
| 480 } | |
| 481 } | 331 } |
| 482 | 332 |
| 483 size_t nSamplesOut(0); | 333 size_t nSamplesOut(0); |
| 484 | 334 |
| 485 CriticalSectionScoped lock(&_critSectCb); | 335 CriticalSectionScoped lock(&_critSectCb); |
| 486 | 336 |
| 487 if (_ptrCbAudioTransport == NULL) { | 337 // It is currently supported to start playout without a valid audio |
| 488 WEBRTC_TRACE( | 338 // transport object. Leads to warning and silence. |
| 489 kTraceWarning, kTraceAudioDevice, _id, | 339 if (!_ptrCbAudioTransport) { |
| 490 "failed to feed data to playout (AudioTransport does not exist)"); | 340 LOG(LS_WARNING) << "Invalid audio transport"; |
| 491 return 0; | 341 return 0; |
| 492 } | 342 } |
| 493 | 343 |
| 494 if (_ptrCbAudioTransport) { | 344 uint32_t res(0); |
| 495 uint32_t res(0); | 345 int64_t elapsed_time_ms = -1; |
| 496 int64_t elapsed_time_ms = -1; | 346 int64_t ntp_time_ms = -1; |
| 497 int64_t ntp_time_ms = -1; | 347 res = _ptrCbAudioTransport->NeedMorePlayData( |
| 498 res = _ptrCbAudioTransport->NeedMorePlayData( | 348 _playSamples, playBytesPerSample, playChannels, playSampleRate, |
| 499 _playSamples, playBytesPerSample, playChannels, playSampleRate, | 349 &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms); |
| 500 &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms); | 350 if (res != 0) { |
| 501 if (res != 0) { | 351 LOG(LS_ERROR) << "NeedMorePlayData() failed"; |
| 502 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 503 "NeedMorePlayData() failed"); | |
| 504 } | |
| 505 } | 352 } |
| 506 | 353 |
| 507 return static_cast<int32_t>(nSamplesOut); | 354 return static_cast<int32_t>(nSamplesOut); |
| 508 } | 355 } |
| 509 | 356 |
| 510 // ---------------------------------------------------------------------------- | |
| 511 // GetPlayoutData | |
| 512 // ---------------------------------------------------------------------------- | |
| 513 | |
| 514 int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer) { | 357 int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer) { |
| 515 CriticalSectionScoped lock(&_critSect); | 358 CriticalSectionScoped lock(&_critSect); |
| 516 | 359 RTC_CHECK_LE(_playSize, kMaxBufferSizeBytes); |
| 517 if (_playSize > kMaxBufferSizeBytes) { | |
| 518 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, | |
| 519 "_playSize %" PRIuS | |
| 520 " exceeds kMaxBufferSizeBytes in " | |
| 521 "AudioDeviceBuffer::GetPlayoutData", | |
| 522 _playSize); | |
| 523 assert(false); | |
| 524 return -1; | |
| 525 } | |
| 526 | 360 |
| 527 memcpy(audioBuffer, &_playBuffer[0], _playSize); | 361 memcpy(audioBuffer, &_playBuffer[0], _playSize); |
| 528 | 362 |
| 529 if (_playFile.is_open()) { | 363 if (_playFile.is_open()) { |
| 530 // write to binary file in mono or stereo (interleaved) | 364 // write to binary file in mono or stereo (interleaved) |
| 531 _playFile.Write(&_playBuffer[0], _playSize); | 365 _playFile.Write(&_playBuffer[0], _playSize); |
| 532 } | 366 } |
| 533 | 367 |
| 534 return static_cast<int32_t>(_playSamples); | 368 return static_cast<int32_t>(_playSamples); |
| 535 } | 369 } |
| 536 | 370 |
| 537 } // namespace webrtc | 371 } // namespace webrtc |
| OLD | NEW |