| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 325 } | 325 } |
| 326 | 326 |
| 327 bool IsCodecCN(const CodecInst& codec) { | 327 bool IsCodecCN(const CodecInst& codec) { |
| 328 return (STR_CASE_CMP(codec.plname, "CN") == 0); | 328 return (STR_CASE_CMP(codec.plname, "CN") == 0); |
| 329 } | 329 } |
| 330 | 330 |
| 331 // Stereo-to-mono can be used as in-place. | 331 // Stereo-to-mono can be used as in-place. |
| 332 int DownMix(const AudioFrame& frame, | 332 int DownMix(const AudioFrame& frame, |
| 333 size_t length_out_buff, | 333 size_t length_out_buff, |
| 334 int16_t* out_buff) { | 334 int16_t* out_buff) { |
| 335 if (length_out_buff < frame.samples_per_channel_) { | 335 if (frame.num_channels_ != 2 || |
| 336 length_out_buff < frame.samples_per_channel_) { |
| 336 return -1; | 337 return -1; |
| 337 } | 338 } |
| 338 for (size_t n = 0; n < frame.samples_per_channel_; ++n) | 339 if (!frame.muted()) { |
| 339 out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1; | 340 const int16_t* frame_data = frame.data(); |
| 341 for (size_t n = 0; n < frame.samples_per_channel_; ++n) |
| 342 out_buff[n] = (frame_data[2 * n] + frame_data[2 * n + 1]) >> 1; |
| 343 } else { |
| 344 memset(out_buff, 0, frame.samples_per_channel_); |
| 345 } |
| 340 return 0; | 346 return 0; |
| 341 } | 347 } |
| 342 | 348 |
| 343 // Mono-to-stereo can be used as in-place. | 349 // Mono-to-stereo can be used as in-place. |
| 344 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { | 350 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) { |
| 345 if (length_out_buff < frame.samples_per_channel_) { | 351 if (frame.num_channels_ != 1 || |
| 352 length_out_buff < 2 * frame.samples_per_channel_) { |
| 346 return -1; | 353 return -1; |
| 347 } | 354 } |
| 348 for (size_t n = frame.samples_per_channel_; n != 0; --n) { | 355 if (!frame.muted()) { |
| 349 size_t i = n - 1; | 356 const int16_t* frame_data = frame.data(); |
| 350 int16_t sample = frame.data_[i]; | 357 for (size_t n = frame.samples_per_channel_; n != 0; --n) { |
| 351 out_buff[2 * i + 1] = sample; | 358 size_t i = n - 1; |
| 352 out_buff[2 * i] = sample; | 359 int16_t sample = frame_data[i]; |
| 360 out_buff[2 * i + 1] = sample; |
| 361 out_buff[2 * i] = sample; |
| 362 } |
| 363 } else { |
| 364 memset(out_buff, 0, 2 * frame.samples_per_channel_); |
| 353 } | 365 } |
| 354 return 0; | 366 return 0; |
| 355 } | 367 } |
| 356 | 368 |
| 357 void ConvertEncodedInfoToFragmentationHeader( | 369 void ConvertEncodedInfoToFragmentationHeader( |
| 358 const AudioEncoder::EncodedInfo& info, | 370 const AudioEncoder::EncodedInfo& info, |
| 359 RTPFragmentationHeader* frag) { | 371 RTPFragmentationHeader* frag) { |
| 360 if (info.redundant.empty()) { | 372 if (info.redundant.empty()) { |
| 361 frag->fragmentationVectorSize = 0; | 373 frag->fragmentationVectorSize = 0; |
| 362 return; | 374 return; |
| (...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 725 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 737 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 726 return -1; | 738 return -1; |
| 727 } else { | 739 } else { |
| 728 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 740 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 729 return -1; | 741 return -1; |
| 730 } | 742 } |
| 731 } | 743 } |
| 732 | 744 |
| 733 // When adding data to encoders this pointer is pointing to an audio buffer | 745 // When adding data to encoders this pointer is pointing to an audio buffer |
| 734 // with correct number of channels. | 746 // with correct number of channels. |
| 735 const int16_t* ptr_audio = ptr_frame->data_; | 747 const int16_t* ptr_audio = ptr_frame->data(); |
| 736 | 748 |
| 737 // For pushing data to primary, point the |ptr_audio| to correct buffer. | 749 // For pushing data to primary, point the |ptr_audio| to correct buffer. |
| 738 if (!same_num_channels) | 750 if (!same_num_channels) |
| 739 ptr_audio = input_data->buffer; | 751 ptr_audio = input_data->buffer; |
| 740 | 752 |
| 753 // TODO(yujo): Skip encode of muted frames. |
| 741 input_data->input_timestamp = ptr_frame->timestamp_; | 754 input_data->input_timestamp = ptr_frame->timestamp_; |
| 742 input_data->audio = ptr_audio; | 755 input_data->audio = ptr_audio; |
| 743 input_data->length_per_channel = ptr_frame->samples_per_channel_; | 756 input_data->length_per_channel = ptr_frame->samples_per_channel_; |
| 744 input_data->audio_channel = current_num_channels; | 757 input_data->audio_channel = current_num_channels; |
| 745 | 758 |
| 746 return 0; | 759 return 0; |
| 747 } | 760 } |
| 748 | 761 |
| 749 // Perform a resampling and down-mix if required. We down-mix only if | 762 // Perform a resampling and down-mix if required. We down-mix only if |
| 750 // encoder is mono and input is stereo. In case of dual-streaming, both | 763 // encoder is mono and input is stereo. In case of dual-streaming, both |
| 751 // encoders has to be mono for down-mix to take place. | 764 // encoders has to be mono for down-mix to take place. |
| 752 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing | 765 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing |
| 753 // is required, |*ptr_out| points to |in_frame|. | 766 // is required, |*ptr_out| points to |in_frame|. |
| 767 // TODO(yujo): Make this more efficient for muted frames. |
| 754 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, | 768 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
| 755 const AudioFrame** ptr_out) { | 769 const AudioFrame** ptr_out) { |
| 756 const bool resample = | 770 const bool resample = |
| 757 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); | 771 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); |
| 758 | 772 |
| 759 // This variable is true if primary codec and secondary codec (if exists) | 773 // This variable is true if primary codec and secondary codec (if exists) |
| 760 // are both mono and input is stereo. | 774 // are both mono and input is stereo. |
| 761 // TODO(henrik.lundin): This condition should probably be | 775 // TODO(henrik.lundin): This condition should probably be |
| 762 // in_frame.num_channels_ > encoder_stack_->NumChannels() | 776 // in_frame.num_channels_ > encoder_stack_->NumChannels() |
| 763 const bool down_mix = | 777 const bool down_mix = |
| (...skipping 29 matching lines...) Expand all Loading... |
| 793 } | 807 } |
| 794 | 808 |
| 795 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 809 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 796 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 810 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 797 return 0; | 811 return 0; |
| 798 } | 812 } |
| 799 | 813 |
| 800 *ptr_out = &preprocess_frame_; | 814 *ptr_out = &preprocess_frame_; |
| 801 preprocess_frame_.num_channels_ = in_frame.num_channels_; | 815 preprocess_frame_.num_channels_ = in_frame.num_channels_; |
| 802 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; | 816 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; |
| 803 const int16_t* src_ptr_audio = in_frame.data_; | 817 const int16_t* src_ptr_audio = in_frame.data(); |
| 804 int16_t* dest_ptr_audio = preprocess_frame_.data_; | |
| 805 if (down_mix) { | 818 if (down_mix) { |
| 806 // If a resampling is required the output of a down-mix is written into a | 819 // If a resampling is required the output of a down-mix is written into a |
| 807 // local buffer, otherwise, it will be written to the output frame. | 820 // local buffer, otherwise, it will be written to the output frame. |
| 808 if (resample) | 821 int16_t* dest_ptr_audio = resample ? |
| 809 dest_ptr_audio = audio; | 822 audio : preprocess_frame_.mutable_data(); |
| 810 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) | 823 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) |
| 811 return -1; | 824 return -1; |
| 812 preprocess_frame_.num_channels_ = 1; | 825 preprocess_frame_.num_channels_ = 1; |
| 813 // Set the input of the resampler is the down-mixed signal. | 826 // Set the input of the resampler is the down-mixed signal. |
| 814 src_ptr_audio = audio; | 827 src_ptr_audio = audio; |
| 815 } | 828 } |
| 816 | 829 |
| 817 preprocess_frame_.timestamp_ = expected_codec_ts_; | 830 preprocess_frame_.timestamp_ = expected_codec_ts_; |
| 818 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; | 831 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; |
| 819 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; | 832 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; |
| 820 // If it is required, we have to do a resampling. | 833 // If it is required, we have to do a resampling. |
| 821 if (resample) { | 834 if (resample) { |
| 822 // The result of the resampler is written to output frame. | 835 // The result of the resampler is written to output frame. |
| 823 dest_ptr_audio = preprocess_frame_.data_; | 836 int16_t* dest_ptr_audio = preprocess_frame_.mutable_data(); |
| 824 | 837 |
| 825 int samples_per_channel = resampler_.Resample10Msec( | 838 int samples_per_channel = resampler_.Resample10Msec( |
| 826 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), | 839 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), |
| 827 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, | 840 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, |
| 828 dest_ptr_audio); | 841 dest_ptr_audio); |
| 829 | 842 |
| 830 if (samples_per_channel < 0) { | 843 if (samples_per_channel < 0) { |
| 831 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 844 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 832 "Cannot add 10 ms audio, resampling failed"); | 845 "Cannot add 10 ms audio, resampling failed"); |
| 833 return -1; | 846 return -1; |
| (...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1376 // Checks the validity of the parameters of the given codec | 1389 // Checks the validity of the parameters of the given codec |
| 1377 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { | 1390 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { |
| 1378 bool valid = acm2::RentACodec::IsCodecValid(codec); | 1391 bool valid = acm2::RentACodec::IsCodecValid(codec); |
| 1379 if (!valid) | 1392 if (!valid) |
| 1380 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, | 1393 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, |
| 1381 "Invalid codec setting"); | 1394 "Invalid codec setting"); |
| 1382 return valid; | 1395 return valid; |
| 1383 } | 1396 } |
| 1384 | 1397 |
| 1385 } // namespace webrtc | 1398 } // namespace webrtc |
| OLD | NEW |