Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 328 return (STR_CASE_CMP(codec.plname, "CN") == 0); | 328 return (STR_CASE_CMP(codec.plname, "CN") == 0); |
| 329 } | 329 } |
| 330 | 330 |
| 331 // Stereo-to-mono can be used as in-place. | 331 // Stereo-to-mono can be used as in-place. |
| 332 int DownMix(const AudioFrame& frame, | 332 int DownMix(const AudioFrame& frame, |
| 333 size_t length_out_buff, | 333 size_t length_out_buff, |
| 334 int16_t* out_buff) { | 334 int16_t* out_buff) { |
| 335 if (length_out_buff < frame.samples_per_channel_) { | 335 if (length_out_buff < frame.samples_per_channel_) { |
| 336 return -1; | 336 return -1; |
| 337 } | 337 } |
| 338 for (size_t n = 0; n < frame.samples_per_channel_; ++n) | 338 if (!frame.muted()) { |
| 339 out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1; | 339 const int16_t* frame_data = frame.data(); |
| 340 for (size_t n = 0; n < frame.samples_per_channel_; ++n) | |
| 341 out_buff[n] = (frame_data[2 * n] + frame_data[2 * n + 1]) >> 1; | |
| 342 } else { | |
| 343 memset(out_buff, 0, 2 * frame.samples_per_channel_); | |
| 344 } | |
| 340 return 0; | 345 return 0; |
| 341 } | 346 } |
| 342 | 347 |
| 343 // Mono-to-stereo can be used as in-place. | 348 // Mono-to-stereo can be used as in-place. |
| 344 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { | 349 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { |
|
hlundin-webrtc
2017/03/16 14:47:48
Add special case for frame.muted() here too, or dr
yujo
2017/03/16 23:37:21
Done. Also noticed my bug above...
| |
| 345 if (length_out_buff < frame.samples_per_channel_) { | 350 if (length_out_buff < frame.samples_per_channel_) { |
| 346 return -1; | 351 return -1; |
| 347 } | 352 } |
| 353 const int16_t* frame_data = frame.data(); | |
| 348 for (size_t n = frame.samples_per_channel_; n != 0; --n) { | 354 for (size_t n = frame.samples_per_channel_; n != 0; --n) { |
| 349 size_t i = n - 1; | 355 size_t i = n - 1; |
| 350 int16_t sample = frame.data_[i]; | 356 int16_t sample = frame_data[i]; |
| 351 out_buff[2 * i + 1] = sample; | 357 out_buff[2 * i + 1] = sample; |
| 352 out_buff[2 * i] = sample; | 358 out_buff[2 * i] = sample; |
| 353 } | 359 } |
| 354 return 0; | 360 return 0; |
| 355 } | 361 } |
| 356 | 362 |
| 357 void ConvertEncodedInfoToFragmentationHeader( | 363 void ConvertEncodedInfoToFragmentationHeader( |
| 358 const AudioEncoder::EncodedInfo& info, | 364 const AudioEncoder::EncodedInfo& info, |
| 359 RTPFragmentationHeader* frag) { | 365 RTPFragmentationHeader* frag) { |
| 360 if (info.redundant.empty()) { | 366 if (info.redundant.empty()) { |
| (...skipping 364 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 725 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 731 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 726 return -1; | 732 return -1; |
| 727 } else { | 733 } else { |
| 728 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 734 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 729 return -1; | 735 return -1; |
| 730 } | 736 } |
| 731 } | 737 } |
| 732 | 738 |
| 733 // When adding data to encoders this pointer is pointing to an audio buffer | 739 // When adding data to encoders this pointer is pointing to an audio buffer |
| 734 // with correct number of channels. | 740 // with correct number of channels. |
| 735 const int16_t* ptr_audio = ptr_frame->data_; | 741 const int16_t* ptr_audio = ptr_frame->data(); |
| 736 | 742 |
| 737 // For pushing data to primary, point the |ptr_audio| to correct buffer. | 743 // For pushing data to primary, point the |ptr_audio| to correct buffer. |
| 738 if (!same_num_channels) | 744 if (!same_num_channels) |
| 739 ptr_audio = input_data->buffer; | 745 ptr_audio = input_data->buffer; |
| 740 | 746 |
| 747 // TODO(yujo): Skip encode of muted frames. | |
|
hlundin-webrtc
2017/03/16 14:47:48
See above: I don't think we will have muted input
yujo
2017/03/16 23:37:21
Muted input is actually my motivation for this who
hlundin-webrtc
2017/03/17 14:29:38
Oh! Then I see. I though you were only in it to sh
| |
| 741 input_data->input_timestamp = ptr_frame->timestamp_; | 748 input_data->input_timestamp = ptr_frame->timestamp_; |
| 742 input_data->audio = ptr_audio; | 749 input_data->audio = ptr_audio; |
| 743 input_data->length_per_channel = ptr_frame->samples_per_channel_; | 750 input_data->length_per_channel = ptr_frame->samples_per_channel_; |
| 744 input_data->audio_channel = current_num_channels; | 751 input_data->audio_channel = current_num_channels; |
| 745 | 752 |
| 746 return 0; | 753 return 0; |
| 747 } | 754 } |
| 748 | 755 |
| 749 // Perform a resampling and down-mix if required. We down-mix only if | 756 // Perform a resampling and down-mix if required. We down-mix only if |
| 750 // encoder is mono and input is stereo. In case of dual-streaming, both | 757 // encoder is mono and input is stereo. In case of dual-streaming, both |
| 751 // encoders has to be mono for down-mix to take place. | 758 // encoders has to be mono for down-mix to take place. |
| 752 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing | 759 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing |
| 753 // is required, |*ptr_out| points to |in_frame|. | 760 // is required, |*ptr_out| points to |in_frame|. |
| 761 // TODO(yujo): Make this more efficient for muted frames. | |
| 754 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, | 762 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
| 755 const AudioFrame** ptr_out) { | 763 const AudioFrame** ptr_out) { |
| 756 const bool resample = | 764 const bool resample = |
| 757 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); | 765 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); |
| 758 | 766 |
| 759 // This variable is true if primary codec and secondary codec (if exists) | 767 // This variable is true if primary codec and secondary codec (if exists) |
| 760 // are both mono and input is stereo. | 768 // are both mono and input is stereo. |
| 761 // TODO(henrik.lundin): This condition should probably be | 769 // TODO(henrik.lundin): This condition should probably be |
| 762 // in_frame.num_channels_ > encoder_stack_->NumChannels() | 770 // in_frame.num_channels_ > encoder_stack_->NumChannels() |
| 763 const bool down_mix = | 771 const bool down_mix = |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 793 } | 801 } |
| 794 | 802 |
| 795 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 803 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 796 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 804 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 797 return 0; | 805 return 0; |
| 798 } | 806 } |
| 799 | 807 |
| 800 *ptr_out = &preprocess_frame_; | 808 *ptr_out = &preprocess_frame_; |
| 801 preprocess_frame_.num_channels_ = in_frame.num_channels_; | 809 preprocess_frame_.num_channels_ = in_frame.num_channels_; |
| 802 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; | 810 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; |
| 803 const int16_t* src_ptr_audio = in_frame.data_; | 811 const int16_t* src_ptr_audio = in_frame.data(); |
| 804 int16_t* dest_ptr_audio = preprocess_frame_.data_; | |
| 805 if (down_mix) { | 812 if (down_mix) { |
| 806 // If a resampling is required the output of a down-mix is written into a | 813 // If a resampling is required the output of a down-mix is written into a |
| 807 // local buffer, otherwise, it will be written to the output frame. | 814 // local buffer, otherwise, it will be written to the output frame. |
| 808 if (resample) | 815 int16_t* dest_ptr_audio = resample ? |
| 809 dest_ptr_audio = audio; | 816 audio : preprocess_frame_.mutable_data(); |
| 810 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) | 817 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) |
| 811 return -1; | 818 return -1; |
| 812 preprocess_frame_.num_channels_ = 1; | 819 preprocess_frame_.num_channels_ = 1; |
| 813 // Set the input of the resampler is the down-mixed signal. | 820 // Set the input of the resampler is the down-mixed signal. |
| 814 src_ptr_audio = audio; | 821 src_ptr_audio = audio; |
| 815 } | 822 } |
| 816 | 823 |
| 817 preprocess_frame_.timestamp_ = expected_codec_ts_; | 824 preprocess_frame_.timestamp_ = expected_codec_ts_; |
| 818 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; | 825 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; |
| 819 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; | 826 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; |
| 820 // If it is required, we have to do a resampling. | 827 // If it is required, we have to do a resampling. |
| 821 if (resample) { | 828 if (resample) { |
| 822 // The result of the resampler is written to output frame. | 829 // The result of the resampler is written to output frame. |
| 823 dest_ptr_audio = preprocess_frame_.data_; | 830 int16_t* dest_ptr_audio = preprocess_frame_.mutable_data(); |
| 824 | 831 |
| 825 int samples_per_channel = resampler_.Resample10Msec( | 832 int samples_per_channel = resampler_.Resample10Msec( |
| 826 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), | 833 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), |
| 827 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, | 834 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, |
| 828 dest_ptr_audio); | 835 dest_ptr_audio); |
| 829 | 836 |
| 830 if (samples_per_channel < 0) { | 837 if (samples_per_channel < 0) { |
| 831 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 838 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 832 "Cannot add 10 ms audio, resampling failed"); | 839 "Cannot add 10 ms audio, resampling failed"); |
| 833 return -1; | 840 return -1; |
| (...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1376 // Checks the validity of the parameters of the given codec | 1383 // Checks the validity of the parameters of the given codec |
| 1377 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { | 1384 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { |
| 1378 bool valid = acm2::RentACodec::IsCodecValid(codec); | 1385 bool valid = acm2::RentACodec::IsCodecValid(codec); |
| 1379 if (!valid) | 1386 if (!valid) |
| 1380 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, | 1387 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, |
| 1381 "Invalid codec setting"); | 1388 "Invalid codec setting"); |
| 1382 return valid; | 1389 return valid; |
| 1383 } | 1390 } |
| 1384 | 1391 |
| 1385 } // namespace webrtc | 1392 } // namespace webrtc |
| OLD | NEW |