| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 318 RTC_HISTOGRAM_ENUMERATION( | 318 RTC_HISTOGRAM_ENUMERATION( |
| 319 "WebRTC.Audio.Encoder.CodecType", static_cast<int>(codec_type), | 319 "WebRTC.Audio.Encoder.CodecType", static_cast<int>(codec_type), |
| 320 static_cast<int>( | 320 static_cast<int>( |
| 321 webrtc::AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes)); | 321 webrtc::AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes)); |
| 322 } | 322 } |
| 323 | 323 |
| 324 // Stereo-to-mono can be used as in-place. | 324 // Stereo-to-mono can be used as in-place. |
| 325 int DownMix(const AudioFrame& frame, | 325 int DownMix(const AudioFrame& frame, |
| 326 size_t length_out_buff, | 326 size_t length_out_buff, |
| 327 int16_t* out_buff) { | 327 int16_t* out_buff) { |
| 328 if (length_out_buff < frame.samples_per_channel_) { | 328 RTC_DCHECK_EQ(frame.num_channels_, 2); |
| 329 return -1; | 329 RTC_DCHECK_GE(length_out_buff, frame.samples_per_channel_); |
| 330 |
| 331 if (!frame.muted()) { |
| 332 const int16_t* frame_data = frame.data(); |
| 333 for (size_t n = 0; n < frame.samples_per_channel_; ++n) { |
| 334 out_buff[n] = static_cast<int16_t>( |
| 335 (static_cast<int32_t>(frame_data[2 * n]) + |
| 336 static_cast<int32_t>(frame_data[2 * n + 1])) >> 1); |
| 337 } |
| 338 } else { |
| 339 memset(out_buff, 0, frame.samples_per_channel_); |
| 330 } | 340 } |
| 331 for (size_t n = 0; n < frame.samples_per_channel_; ++n) | |
| 332 out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1; | |
| 333 return 0; | 341 return 0; |
| 334 } | 342 } |
| 335 | 343 |
| 336 // Mono-to-stereo can be used as in-place. | 344 // Mono-to-stereo can be used as in-place. |
| 337 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { | 345 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { |
| 338 if (length_out_buff < frame.samples_per_channel_) { | 346 RTC_DCHECK_EQ(frame.num_channels_, 1); |
| 339 return -1; | 347 RTC_DCHECK_GE(length_out_buff, 2 * frame.samples_per_channel_); |
| 340 } | 348 |
| 341 for (size_t n = frame.samples_per_channel_; n != 0; --n) { | 349 if (!frame.muted()) { |
| 342 size_t i = n - 1; | 350 const int16_t* frame_data = frame.data(); |
| 343 int16_t sample = frame.data_[i]; | 351 for (size_t n = frame.samples_per_channel_; n != 0; --n) { |
| 344 out_buff[2 * i + 1] = sample; | 352 size_t i = n - 1; |
| 345 out_buff[2 * i] = sample; | 353 int16_t sample = frame_data[i]; |
| 354 out_buff[2 * i + 1] = sample; |
| 355 out_buff[2 * i] = sample; |
| 356 } |
| 357 } else { |
| 358 memset(out_buff, 0, 2 * frame.samples_per_channel_); |
| 346 } | 359 } |
| 347 return 0; | 360 return 0; |
| 348 } | 361 } |
| 349 | 362 |
| 350 void ConvertEncodedInfoToFragmentationHeader( | 363 void ConvertEncodedInfoToFragmentationHeader( |
| 351 const AudioEncoder::EncodedInfo& info, | 364 const AudioEncoder::EncodedInfo& info, |
| 352 RTPFragmentationHeader* frag) { | 365 RTPFragmentationHeader* frag) { |
| 353 if (info.redundant.empty()) { | 366 if (info.redundant.empty()) { |
| 354 frag->fragmentationVectorSize = 0; | 367 frag->fragmentationVectorSize = 0; |
| 355 return; | 368 return; |
| (...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 718 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 731 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 719 return -1; | 732 return -1; |
| 720 } else { | 733 } else { |
| 721 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 734 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 722 return -1; | 735 return -1; |
| 723 } | 736 } |
| 724 } | 737 } |
| 725 | 738 |
| 726 // When adding data to encoders this pointer is pointing to an audio buffer | 739 // When adding data to encoders this pointer is pointing to an audio buffer |
| 727 // with correct number of channels. | 740 // with correct number of channels. |
| 728 const int16_t* ptr_audio = ptr_frame->data_; | 741 const int16_t* ptr_audio = ptr_frame->data(); |
| 729 | 742 |
| 730 // For pushing data to primary, point the |ptr_audio| to correct buffer. | 743 // For pushing data to primary, point the |ptr_audio| to correct buffer. |
| 731 if (!same_num_channels) | 744 if (!same_num_channels) |
| 732 ptr_audio = input_data->buffer; | 745 ptr_audio = input_data->buffer; |
| 733 | 746 |
| 747 // TODO(yujo): Skip encode of muted frames. |
| 734 input_data->input_timestamp = ptr_frame->timestamp_; | 748 input_data->input_timestamp = ptr_frame->timestamp_; |
| 735 input_data->audio = ptr_audio; | 749 input_data->audio = ptr_audio; |
| 736 input_data->length_per_channel = ptr_frame->samples_per_channel_; | 750 input_data->length_per_channel = ptr_frame->samples_per_channel_; |
| 737 input_data->audio_channel = current_num_channels; | 751 input_data->audio_channel = current_num_channels; |
| 738 | 752 |
| 739 return 0; | 753 return 0; |
| 740 } | 754 } |
| 741 | 755 |
| 742 // Perform a resampling and down-mix if required. We down-mix only if | 756 // Perform a resampling and down-mix if required. We down-mix only if |
| 743 // encoder is mono and input is stereo. In case of dual-streaming, both | 757 // encoder is mono and input is stereo. In case of dual-streaming, both |
| 744 // encoders has to be mono for down-mix to take place. | 758 // encoders has to be mono for down-mix to take place. |
| 745 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing | 759 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing |
| 746 // is required, |*ptr_out| points to |in_frame|. | 760 // is required, |*ptr_out| points to |in_frame|. |
| 761 // TODO(yujo): Make this more efficient for muted frames. |
| 747 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, | 762 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
| 748 const AudioFrame** ptr_out) { | 763 const AudioFrame** ptr_out) { |
| 749 const bool resample = | 764 const bool resample = |
| 750 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); | 765 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); |
| 751 | 766 |
| 752 // This variable is true if primary codec and secondary codec (if exists) | 767 // This variable is true if primary codec and secondary codec (if exists) |
| 753 // are both mono and input is stereo. | 768 // are both mono and input is stereo. |
| 754 // TODO(henrik.lundin): This condition should probably be | 769 // TODO(henrik.lundin): This condition should probably be |
| 755 // in_frame.num_channels_ > encoder_stack_->NumChannels() | 770 // in_frame.num_channels_ > encoder_stack_->NumChannels() |
| 756 const bool down_mix = | 771 const bool down_mix = |
| (...skipping 29 matching lines...) Expand all Loading... |
| 786 } | 801 } |
| 787 | 802 |
| 788 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 803 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 789 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 804 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 790 return 0; | 805 return 0; |
| 791 } | 806 } |
| 792 | 807 |
| 793 *ptr_out = &preprocess_frame_; | 808 *ptr_out = &preprocess_frame_; |
| 794 preprocess_frame_.num_channels_ = in_frame.num_channels_; | 809 preprocess_frame_.num_channels_ = in_frame.num_channels_; |
| 795 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; | 810 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; |
| 796 const int16_t* src_ptr_audio = in_frame.data_; | 811 const int16_t* src_ptr_audio = in_frame.data(); |
| 797 int16_t* dest_ptr_audio = preprocess_frame_.data_; | |
| 798 if (down_mix) { | 812 if (down_mix) { |
| 799 // If a resampling is required the output of a down-mix is written into a | 813 // If a resampling is required the output of a down-mix is written into a |
| 800 // local buffer, otherwise, it will be written to the output frame. | 814 // local buffer, otherwise, it will be written to the output frame. |
| 801 if (resample) | 815 int16_t* dest_ptr_audio = resample ? |
| 802 dest_ptr_audio = audio; | 816 audio : preprocess_frame_.mutable_data(); |
| 803 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) | 817 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) |
| 804 return -1; | 818 return -1; |
| 805 preprocess_frame_.num_channels_ = 1; | 819 preprocess_frame_.num_channels_ = 1; |
| 806 // Set the input of the resampler is the down-mixed signal. | 820 // Set the input of the resampler is the down-mixed signal. |
| 807 src_ptr_audio = audio; | 821 src_ptr_audio = audio; |
| 808 } | 822 } |
| 809 | 823 |
| 810 preprocess_frame_.timestamp_ = expected_codec_ts_; | 824 preprocess_frame_.timestamp_ = expected_codec_ts_; |
| 811 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; | 825 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; |
| 812 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; | 826 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; |
| 813 // If it is required, we have to do a resampling. | 827 // If it is required, we have to do a resampling. |
| 814 if (resample) { | 828 if (resample) { |
| 815 // The result of the resampler is written to output frame. | 829 // The result of the resampler is written to output frame. |
| 816 dest_ptr_audio = preprocess_frame_.data_; | 830 int16_t* dest_ptr_audio = preprocess_frame_.mutable_data(); |
| 817 | 831 |
| 818 int samples_per_channel = resampler_.Resample10Msec( | 832 int samples_per_channel = resampler_.Resample10Msec( |
| 819 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), | 833 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), |
| 820 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, | 834 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, |
| 821 dest_ptr_audio); | 835 dest_ptr_audio); |
| 822 | 836 |
| 823 if (samples_per_channel < 0) { | 837 if (samples_per_channel < 0) { |
| 824 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 838 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 825 "Cannot add 10 ms audio, resampling failed"); | 839 "Cannot add 10 ms audio, resampling failed"); |
| 826 return -1; | 840 return -1; |
| (...skipping 536 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1363 // Checks the validity of the parameters of the given codec | 1377 // Checks the validity of the parameters of the given codec |
| 1364 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { | 1378 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { |
| 1365 bool valid = acm2::RentACodec::IsCodecValid(codec); | 1379 bool valid = acm2::RentACodec::IsCodecValid(codec); |
| 1366 if (!valid) | 1380 if (!valid) |
| 1367 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, | 1381 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, |
| 1368 "Invalid codec setting"); | 1382 "Invalid codec setting"); |
| 1369 return valid; | 1383 return valid; |
| 1370 } | 1384 } |
| 1371 | 1385 |
| 1372 } // namespace webrtc | 1386 } // namespace webrtc |
| OLD | NEW |