| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/voice_engine/file_recorder.h" | 11 #include "webrtc/voice_engine/file_recorder.h" |
| 12 | 12 |
| 13 #include <list> | 13 #include <list> |
| 14 | 14 |
| 15 #include "webrtc/audio/utility/audio_frame_operations.h" |
| 15 #include "webrtc/base/platform_thread.h" | 16 #include "webrtc/base/platform_thread.h" |
| 16 #include "webrtc/common_audio/resampler/include/resampler.h" | 17 #include "webrtc/common_audio/resampler/include/resampler.h" |
| 17 #include "webrtc/common_types.h" | 18 #include "webrtc/common_types.h" |
| 18 #include "webrtc/modules/include/module_common_types.h" | 19 #include "webrtc/modules/include/module_common_types.h" |
| 19 #include "webrtc/modules/media_file/media_file.h" | 20 #include "webrtc/modules/media_file/media_file.h" |
| 20 #include "webrtc/modules/media_file/media_file_defines.h" | 21 #include "webrtc/modules/media_file/media_file_defines.h" |
| 21 #include "webrtc/system_wrappers/include/event_wrapper.h" | 22 #include "webrtc/system_wrappers/include/event_wrapper.h" |
| 22 #include "webrtc/system_wrappers/include/logging.h" | 23 #include "webrtc/system_wrappers/include/logging.h" |
| 23 #include "webrtc/typedefs.h" | 24 #include "webrtc/typedefs.h" |
| 24 #include "webrtc/voice_engine/coder.h" | 25 #include "webrtc/voice_engine/coder.h" |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 154 return -1; | 155 return -1; |
| 155 } | 156 } |
| 156 AudioFrame tempAudioFrame; | 157 AudioFrame tempAudioFrame; |
| 157 tempAudioFrame.samples_per_channel_ = 0; | 158 tempAudioFrame.samples_per_channel_ = 0; |
| 158 if (incomingAudioFrame.num_channels_ == 2 && !_moduleFile->IsStereo()) { | 159 if (incomingAudioFrame.num_channels_ == 2 && !_moduleFile->IsStereo()) { |
| 159 // Recording mono but incoming audio is (interleaved) stereo. | 160 // Recording mono but incoming audio is (interleaved) stereo. |
| 160 tempAudioFrame.num_channels_ = 1; | 161 tempAudioFrame.num_channels_ = 1; |
| 161 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; | 162 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; |
| 162 tempAudioFrame.samples_per_channel_ = | 163 tempAudioFrame.samples_per_channel_ = |
| 163 incomingAudioFrame.samples_per_channel_; | 164 incomingAudioFrame.samples_per_channel_; |
| 164 for (size_t i = 0; i < (incomingAudioFrame.samples_per_channel_); i++) { | 165 if (!incomingAudioFrame.muted()) { |
| 165 // Sample value is the average of left and right buffer rounded to | 166 AudioFrameOperations::StereoToMono( |
| 166 // closest integer value. Note samples can be either 1 or 2 byte. | 167 incomingAudioFrame.data(), incomingAudioFrame.samples_per_channel_, |
| 167 tempAudioFrame.data_[i] = ((incomingAudioFrame.data_[2 * i] + | 168 tempAudioFrame.mutable_data()); |
| 168 incomingAudioFrame.data_[(2 * i) + 1] + 1) >> | |
| 169 1); | |
| 170 } | 169 } |
| 171 } else if (incomingAudioFrame.num_channels_ == 1 && _moduleFile->IsStereo()) { | 170 } else if (incomingAudioFrame.num_channels_ == 1 && _moduleFile->IsStereo()) { |
| 172 // Recording stereo but incoming audio is mono. | 171 // Recording stereo but incoming audio is mono. |
| 173 tempAudioFrame.num_channels_ = 2; | 172 tempAudioFrame.num_channels_ = 2; |
| 174 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; | 173 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; |
| 175 tempAudioFrame.samples_per_channel_ = | 174 tempAudioFrame.samples_per_channel_ = |
| 176 incomingAudioFrame.samples_per_channel_; | 175 incomingAudioFrame.samples_per_channel_; |
| 177 for (size_t i = 0; i < (incomingAudioFrame.samples_per_channel_); i++) { | 176 if (!incomingAudioFrame.muted()) { |
| 178 // Duplicate sample to both channels | 177 AudioFrameOperations::MonoToStereo( |
| 179 tempAudioFrame.data_[2 * i] = incomingAudioFrame.data_[i]; | 178 incomingAudioFrame.data(), incomingAudioFrame.samples_per_channel_, |
| 180 tempAudioFrame.data_[2 * i + 1] = incomingAudioFrame.data_[i]; | 179 tempAudioFrame.mutable_data()); |
| 181 } | 180 } |
| 182 } | 181 } |
| 183 | 182 |
| 184 const AudioFrame* ptrAudioFrame = &incomingAudioFrame; | 183 const AudioFrame* ptrAudioFrame = &incomingAudioFrame; |
| 185 if (tempAudioFrame.samples_per_channel_ != 0) { | 184 if (tempAudioFrame.samples_per_channel_ != 0) { |
| 186 // If ptrAudioFrame is not empty it contains the audio to be recorded. | 185 // If ptrAudioFrame is not empty it contains the audio to be recorded. |
| 187 ptrAudioFrame = &tempAudioFrame; | 186 ptrAudioFrame = &tempAudioFrame; |
| 188 } | 187 } |
| 189 | 188 |
| 190 // Encode the audio data before writing to file. Don't encode if the codec | 189 // Encode the audio data before writing to file. Don't encode if the codec |
| 191 // is PCM. | 190 // is PCM. |
| 192 // NOTE: stereo recording is only supported for WAV files. | 191 // NOTE: stereo recording is only supported for WAV files. |
| 193 // TODO(hellner): WAV expect PCM in little endian byte order. Not | 192 // TODO(hellner): WAV expect PCM in little endian byte order. Not |
| 194 // "encoding" with PCM coder should be a problem for big endian systems. | 193 // "encoding" with PCM coder should be a problem for big endian systems. |
| 195 size_t encodedLenInBytes = 0; | 194 size_t encodedLenInBytes = 0; |
| 196 if (_fileFormat == kFileFormatPreencodedFile || | 195 if (_fileFormat == kFileFormatPreencodedFile || |
| 197 STR_CASE_CMP(codec_info_.plname, "L16") != 0) { | 196 STR_CASE_CMP(codec_info_.plname, "L16") != 0) { |
| 198 if (_audioEncoder.Encode(*ptrAudioFrame, _audioBuffer, | 197 if (_audioEncoder.Encode(*ptrAudioFrame, _audioBuffer, |
| 199 &encodedLenInBytes) == -1) { | 198 &encodedLenInBytes) == -1) { |
| 200 LOG(LS_WARNING) << "RecordAudioToFile() codec " << codec_info_.plname | 199 LOG(LS_WARNING) << "RecordAudioToFile() codec " << codec_info_.plname |
| 201 << " not supported or failed to encode stream."; | 200 << " not supported or failed to encode stream."; |
| 202 return -1; | 201 return -1; |
| 203 } | 202 } |
| 204 } else { | 203 } else { |
| 205 size_t outLen = 0; | 204 size_t outLen = 0; |
| 206 _audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_, | 205 _audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_, |
| 207 codec_info_.plfreq, | 206 codec_info_.plfreq, |
| 208 ptrAudioFrame->num_channels_); | 207 ptrAudioFrame->num_channels_); |
| 208 // TODO(yujo): skip resample if frame is muted. |
| 209 _audioResampler.Push( | 209 _audioResampler.Push( |
| 210 ptrAudioFrame->data_, | 210 ptrAudioFrame->data(), |
| 211 ptrAudioFrame->samples_per_channel_ * ptrAudioFrame->num_channels_, | 211 ptrAudioFrame->samples_per_channel_ * ptrAudioFrame->num_channels_, |
| 212 reinterpret_cast<int16_t*>(_audioBuffer), MAX_AUDIO_BUFFER_IN_BYTES, | 212 reinterpret_cast<int16_t*>(_audioBuffer), MAX_AUDIO_BUFFER_IN_BYTES, |
| 213 outLen); | 213 outLen); |
| 214 encodedLenInBytes = outLen * sizeof(int16_t); | 214 encodedLenInBytes = outLen * sizeof(int16_t); |
| 215 } | 215 } |
| 216 | 216 |
| 217 // Codec may not be operating at a frame rate of 10 ms. Whenever enough | 217 // Codec may not be operating at a frame rate of 10 ms. Whenever enough |
| 218 // 10 ms chunks of data has been pushed to the encoder an encoded frame | 218 // 10 ms chunks of data has been pushed to the encoder an encoded frame |
| 219 // will be available. Wait until then. | 219 // will be available. Wait until then. |
| 220 if (encodedLenInBytes) { | 220 if (encodedLenInBytes) { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 253 } // namespace | 253 } // namespace |
| 254 | 254 |
| 255 std::unique_ptr<FileRecorder> FileRecorder::CreateFileRecorder( | 255 std::unique_ptr<FileRecorder> FileRecorder::CreateFileRecorder( |
| 256 uint32_t instanceID, | 256 uint32_t instanceID, |
| 257 FileFormats fileFormat) { | 257 FileFormats fileFormat) { |
| 258 return std::unique_ptr<FileRecorder>( | 258 return std::unique_ptr<FileRecorder>( |
| 259 new FileRecorderImpl(instanceID, fileFormat)); | 259 new FileRecorderImpl(instanceID, fileFormat)); |
| 260 } | 260 } |
| 261 | 261 |
| 262 } // namespace webrtc | 262 } // namespace webrtc |
| OLD | NEW |