| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/voice_engine/file_recorder.h" | 11 #include "webrtc/voice_engine/file_recorder.h" |
| 12 | 12 |
| 13 #include <list> | 13 #include <list> |
| 14 | 14 |
| 15 #include "webrtc/audio/utility/audio_frame_operations.h" |
| 15 #include "webrtc/base/logging.h" | 16 #include "webrtc/base/logging.h" |
| 16 #include "webrtc/base/platform_thread.h" | 17 #include "webrtc/base/platform_thread.h" |
| 17 #include "webrtc/common_audio/resampler/include/resampler.h" | 18 #include "webrtc/common_audio/resampler/include/resampler.h" |
| 18 #include "webrtc/common_types.h" | 19 #include "webrtc/common_types.h" |
| 19 #include "webrtc/modules/include/module_common_types.h" | 20 #include "webrtc/modules/include/module_common_types.h" |
| 20 #include "webrtc/modules/media_file/media_file.h" | 21 #include "webrtc/modules/media_file/media_file.h" |
| 21 #include "webrtc/modules/media_file/media_file_defines.h" | 22 #include "webrtc/modules/media_file/media_file_defines.h" |
| 22 #include "webrtc/system_wrappers/include/event_wrapper.h" | 23 #include "webrtc/system_wrappers/include/event_wrapper.h" |
| 23 #include "webrtc/typedefs.h" | 24 #include "webrtc/typedefs.h" |
| 24 #include "webrtc/voice_engine/coder.h" | 25 #include "webrtc/voice_engine/coder.h" |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 152 return -1; | 153 return -1; |
| 153 } | 154 } |
| 154 AudioFrame tempAudioFrame; | 155 AudioFrame tempAudioFrame; |
| 155 tempAudioFrame.samples_per_channel_ = 0; | 156 tempAudioFrame.samples_per_channel_ = 0; |
| 156 if (incomingAudioFrame.num_channels_ == 2 && !_moduleFile->IsStereo()) { | 157 if (incomingAudioFrame.num_channels_ == 2 && !_moduleFile->IsStereo()) { |
| 157 // Recording mono but incoming audio is (interleaved) stereo. | 158 // Recording mono but incoming audio is (interleaved) stereo. |
| 158 tempAudioFrame.num_channels_ = 1; | 159 tempAudioFrame.num_channels_ = 1; |
| 159 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; | 160 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; |
| 160 tempAudioFrame.samples_per_channel_ = | 161 tempAudioFrame.samples_per_channel_ = |
| 161 incomingAudioFrame.samples_per_channel_; | 162 incomingAudioFrame.samples_per_channel_; |
| 162 for (size_t i = 0; i < (incomingAudioFrame.samples_per_channel_); i++) { | 163 if (!incomingAudioFrame.muted()) { |
| 163 // Sample value is the average of left and right buffer rounded to | 164 AudioFrameOperations::StereoToMono( |
| 164 // closest integer value. Note samples can be either 1 or 2 byte. | 165 incomingAudioFrame.data(), incomingAudioFrame.samples_per_channel_, |
| 165 tempAudioFrame.data_[i] = ((incomingAudioFrame.data_[2 * i] + | 166 tempAudioFrame.mutable_data()); |
| 166 incomingAudioFrame.data_[(2 * i) + 1] + 1) >> | |
| 167 1); | |
| 168 } | 167 } |
| 169 } else if (incomingAudioFrame.num_channels_ == 1 && _moduleFile->IsStereo()) { | 168 } else if (incomingAudioFrame.num_channels_ == 1 && _moduleFile->IsStereo()) { |
| 170 // Recording stereo but incoming audio is mono. | 169 // Recording stereo but incoming audio is mono. |
| 171 tempAudioFrame.num_channels_ = 2; | 170 tempAudioFrame.num_channels_ = 2; |
| 172 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; | 171 tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_; |
| 173 tempAudioFrame.samples_per_channel_ = | 172 tempAudioFrame.samples_per_channel_ = |
| 174 incomingAudioFrame.samples_per_channel_; | 173 incomingAudioFrame.samples_per_channel_; |
| 175 for (size_t i = 0; i < (incomingAudioFrame.samples_per_channel_); i++) { | 174 if (!incomingAudioFrame.muted()) { |
| 176 // Duplicate sample to both channels | 175 AudioFrameOperations::MonoToStereo( |
| 177 tempAudioFrame.data_[2 * i] = incomingAudioFrame.data_[i]; | 176 incomingAudioFrame.data(), incomingAudioFrame.samples_per_channel_, |
| 178 tempAudioFrame.data_[2 * i + 1] = incomingAudioFrame.data_[i]; | 177 tempAudioFrame.mutable_data()); |
| 179 } | 178 } |
| 180 } | 179 } |
| 181 | 180 |
| 182 const AudioFrame* ptrAudioFrame = &incomingAudioFrame; | 181 const AudioFrame* ptrAudioFrame = &incomingAudioFrame; |
| 183 if (tempAudioFrame.samples_per_channel_ != 0) { | 182 if (tempAudioFrame.samples_per_channel_ != 0) { |
| 184 // If ptrAudioFrame is not empty it contains the audio to be recorded. | 183 // If ptrAudioFrame is not empty it contains the audio to be recorded. |
| 185 ptrAudioFrame = &tempAudioFrame; | 184 ptrAudioFrame = &tempAudioFrame; |
| 186 } | 185 } |
| 187 | 186 |
| 188 // Encode the audio data before writing to file. Don't encode if the codec | 187 // Encode the audio data before writing to file. Don't encode if the codec |
| 189 // is PCM. | 188 // is PCM. |
| 190 // NOTE: stereo recording is only supported for WAV files. | 189 // NOTE: stereo recording is only supported for WAV files. |
| 191 // TODO(hellner): WAV expect PCM in little endian byte order. Not | 190 // TODO(hellner): WAV expect PCM in little endian byte order. Not |
| 192 // "encoding" with PCM coder should be a problem for big endian systems. | 191 // "encoding" with PCM coder should be a problem for big endian systems. |
| 193 size_t encodedLenInBytes = 0; | 192 size_t encodedLenInBytes = 0; |
| 194 if (_fileFormat == kFileFormatPreencodedFile || | 193 if (_fileFormat == kFileFormatPreencodedFile || |
| 195 STR_CASE_CMP(codec_info_.plname, "L16") != 0) { | 194 STR_CASE_CMP(codec_info_.plname, "L16") != 0) { |
| 196 if (_audioEncoder.Encode(*ptrAudioFrame, _audioBuffer, | 195 if (_audioEncoder.Encode(*ptrAudioFrame, _audioBuffer, |
| 197 &encodedLenInBytes) == -1) { | 196 &encodedLenInBytes) == -1) { |
| 198 LOG(LS_WARNING) << "RecordAudioToFile() codec " << codec_info_.plname | 197 LOG(LS_WARNING) << "RecordAudioToFile() codec " << codec_info_.plname |
| 199 << " not supported or failed to encode stream."; | 198 << " not supported or failed to encode stream."; |
| 200 return -1; | 199 return -1; |
| 201 } | 200 } |
| 202 } else { | 201 } else { |
| 203 size_t outLen = 0; | 202 size_t outLen = 0; |
| 204 _audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_, | 203 _audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_, |
| 205 codec_info_.plfreq, | 204 codec_info_.plfreq, |
| 206 ptrAudioFrame->num_channels_); | 205 ptrAudioFrame->num_channels_); |
| 206 // TODO(yujo): skip resample if frame is muted. |
| 207 _audioResampler.Push( | 207 _audioResampler.Push( |
| 208 ptrAudioFrame->data_, | 208 ptrAudioFrame->data(), |
| 209 ptrAudioFrame->samples_per_channel_ * ptrAudioFrame->num_channels_, | 209 ptrAudioFrame->samples_per_channel_ * ptrAudioFrame->num_channels_, |
| 210 reinterpret_cast<int16_t*>(_audioBuffer), MAX_AUDIO_BUFFER_IN_BYTES, | 210 reinterpret_cast<int16_t*>(_audioBuffer), MAX_AUDIO_BUFFER_IN_BYTES, |
| 211 outLen); | 211 outLen); |
| 212 encodedLenInBytes = outLen * sizeof(int16_t); | 212 encodedLenInBytes = outLen * sizeof(int16_t); |
| 213 } | 213 } |
| 214 | 214 |
| 215 // Codec may not be operating at a frame rate of 10 ms. Whenever enough | 215 // Codec may not be operating at a frame rate of 10 ms. Whenever enough |
| 216 // 10 ms chunks of data has been pushed to the encoder an encoded frame | 216 // 10 ms chunks of data has been pushed to the encoder an encoded frame |
| 217 // will be available. Wait until then. | 217 // will be available. Wait until then. |
| 218 if (encodedLenInBytes) { | 218 if (encodedLenInBytes) { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 251 } // namespace | 251 } // namespace |
| 252 | 252 |
| 253 std::unique_ptr<FileRecorder> FileRecorder::CreateFileRecorder( | 253 std::unique_ptr<FileRecorder> FileRecorder::CreateFileRecorder( |
| 254 uint32_t instanceID, | 254 uint32_t instanceID, |
| 255 FileFormats fileFormat) { | 255 FileFormats fileFormat) { |
| 256 return std::unique_ptr<FileRecorder>( | 256 return std::unique_ptr<FileRecorder>( |
| 257 new FileRecorderImpl(instanceID, fileFormat)); | 257 new FileRecorderImpl(instanceID, fileFormat)); |
| 258 } | 258 } |
| 259 | 259 |
| 260 } // namespace webrtc | 260 } // namespace webrtc |
| OLD | NEW |