OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 3318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3329 { | 3329 { |
3330 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), | 3330 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), |
3331 "Channel::Demultiplex()"); | 3331 "Channel::Demultiplex()"); |
3332 _audioFrame.CopyFrom(audioFrame); | 3332 _audioFrame.CopyFrom(audioFrame); |
3333 _audioFrame.id_ = _channelId; | 3333 _audioFrame.id_ = _channelId; |
3334 return 0; | 3334 return 0; |
3335 } | 3335 } |
3336 | 3336 |
3337 void Channel::Demultiplex(const int16_t* audio_data, | 3337 void Channel::Demultiplex(const int16_t* audio_data, |
3338 int sample_rate, | 3338 int sample_rate, |
3339 int number_of_frames, | 3339 size_t number_of_frames, |
3340 int number_of_channels) { | 3340 int number_of_channels) { |
pbos-webrtc
2015/07/14 08:15:10
Make number_of_channels size_t as well?
| |
3341 CodecInst codec; | 3341 CodecInst codec; |
3342 GetSendCodec(codec); | 3342 GetSendCodec(codec); |
3343 | 3343 |
3344 if (!mono_recording_audio_.get()) { | 3344 if (!mono_recording_audio_.get()) { |
3345 // Temporary space for DownConvertToCodecFormat. | 3345 // Temporary space for DownConvertToCodecFormat. |
3346 mono_recording_audio_.reset(new int16_t[kMaxMonoDataSizeSamples]); | 3346 mono_recording_audio_.reset(new int16_t[kMaxMonoDataSizeSamples]); |
3347 } | 3347 } |
3348 DownConvertToCodecFormat(audio_data, | 3348 DownConvertToCodecFormat(audio_data, |
3349 number_of_frames, | 3349 number_of_frames, |
3350 number_of_channels, | 3350 number_of_channels, |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3391 (int16_t*)_audioFrame.data_, | 3391 (int16_t*)_audioFrame.data_, |
3392 _audioFrame.samples_per_channel_, | 3392 _audioFrame.samples_per_channel_, |
3393 _audioFrame.sample_rate_hz_, | 3393 _audioFrame.sample_rate_hz_, |
3394 isStereo); | 3394 isStereo); |
3395 } | 3395 } |
3396 } | 3396 } |
3397 | 3397 |
3398 InsertInbandDtmfTone(); | 3398 InsertInbandDtmfTone(); |
3399 | 3399 |
3400 if (_includeAudioLevelIndication) { | 3400 if (_includeAudioLevelIndication) { |
3401 int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; | 3401 size_t length = |
3402 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; | |
3402 if (is_muted) { | 3403 if (is_muted) { |
3403 rms_level_.ProcessMuted(length); | 3404 rms_level_.ProcessMuted(length); |
3404 } else { | 3405 } else { |
3405 rms_level_.Process(_audioFrame.data_, length); | 3406 rms_level_.Process(_audioFrame.data_, length); |
3406 } | 3407 } |
3407 } | 3408 } |
3408 | 3409 |
3409 return 0; | 3410 return 0; |
3410 } | 3411 } |
3411 | 3412 |
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3706 *rtp_receiver = rtp_receiver_.get(); | 3707 *rtp_receiver = rtp_receiver_.get(); |
3707 return 0; | 3708 return 0; |
3708 } | 3709 } |
3709 | 3710 |
3710 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use | 3711 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use |
3711 // a shared helper. | 3712 // a shared helper. |
3712 int32_t | 3713 int32_t |
3713 Channel::MixOrReplaceAudioWithFile(int mixingFrequency) | 3714 Channel::MixOrReplaceAudioWithFile(int mixingFrequency) |
3714 { | 3715 { |
3715 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]); | 3716 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]); |
3716 int fileSamples(0); | 3717 size_t fileSamples(0); |
3717 | 3718 |
3718 { | 3719 { |
3719 CriticalSectionScoped cs(&_fileCritSect); | 3720 CriticalSectionScoped cs(&_fileCritSect); |
3720 | 3721 |
3721 if (_inputFilePlayerPtr == NULL) | 3722 if (_inputFilePlayerPtr == NULL) |
3722 { | 3723 { |
3723 WEBRTC_TRACE(kTraceWarning, kTraceVoice, | 3724 WEBRTC_TRACE(kTraceWarning, kTraceVoice, |
3724 VoEId(_instanceId, _channelId), | 3725 VoEId(_instanceId, _channelId), |
3725 "Channel::MixOrReplaceAudioWithFile() fileplayer" | 3726 "Channel::MixOrReplaceAudioWithFile() fileplayer" |
3726 " doesnt exist"); | 3727 " doesnt exist"); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3776 return 0; | 3777 return 0; |
3777 } | 3778 } |
3778 | 3779 |
3779 int32_t | 3780 int32_t |
3780 Channel::MixAudioWithFile(AudioFrame& audioFrame, | 3781 Channel::MixAudioWithFile(AudioFrame& audioFrame, |
3781 int mixingFrequency) | 3782 int mixingFrequency) |
3782 { | 3783 { |
3783 assert(mixingFrequency <= 48000); | 3784 assert(mixingFrequency <= 48000); |
3784 | 3785 |
3785 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]); | 3786 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]); |
3786 int fileSamples(0); | 3787 size_t fileSamples(0); |
3787 | 3788 |
3788 { | 3789 { |
3789 CriticalSectionScoped cs(&_fileCritSect); | 3790 CriticalSectionScoped cs(&_fileCritSect); |
3790 | 3791 |
3791 if (_outputFilePlayerPtr == NULL) | 3792 if (_outputFilePlayerPtr == NULL) |
3792 { | 3793 { |
3793 WEBRTC_TRACE(kTraceWarning, kTraceVoice, | 3794 WEBRTC_TRACE(kTraceWarning, kTraceVoice, |
3794 VoEId(_instanceId, _channelId), | 3795 VoEId(_instanceId, _channelId), |
3795 "Channel::MixAudioWithFile() file mixing failed"); | 3796 "Channel::MixAudioWithFile() file mixing failed"); |
3796 return -1; | 3797 return -1; |
(...skipping 17 matching lines...) Expand all Loading... | |
3814 // TODO(xians): Change the code when FilePlayer supports real stereo. | 3815 // TODO(xians): Change the code when FilePlayer supports real stereo. |
3815 MixWithSat(audioFrame.data_, | 3816 MixWithSat(audioFrame.data_, |
3816 audioFrame.num_channels_, | 3817 audioFrame.num_channels_, |
3817 fileBuffer.get(), | 3818 fileBuffer.get(), |
3818 1, | 3819 1, |
3819 fileSamples); | 3820 fileSamples); |
3820 } | 3821 } |
3821 else | 3822 else |
3822 { | 3823 { |
3823 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), | 3824 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId), |
3824 "Channel::MixAudioWithFile() samples_per_channel_(%d) != " | 3825 "Channel::MixAudioWithFile() samples_per_channel_(%" PRIuS ") != " |
henrika_webrtc
2015/07/14 07:54:09
Have not used this macro before. What does it expa
| |
3825 "fileSamples(%d)", | 3826 "fileSamples(%" PRIuS ")", |
3826 audioFrame.samples_per_channel_, fileSamples); | 3827 audioFrame.samples_per_channel_, fileSamples); |
3827 return -1; | 3828 return -1; |
3828 } | 3829 } |
3829 | 3830 |
3830 return 0; | 3831 return 0; |
3831 } | 3832 } |
3832 | 3833 |
3833 int | 3834 int |
3834 Channel::InsertInbandDtmfTone() | 3835 Channel::InsertInbandDtmfTone() |
3835 { | 3836 { |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3875 // Get 10ms tone segment and set time since last tone to zero | 3876 // Get 10ms tone segment and set time since last tone to zero |
3876 if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1) | 3877 if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1) |
3877 { | 3878 { |
3878 WEBRTC_TRACE(kTraceWarning, kTraceVoice, | 3879 WEBRTC_TRACE(kTraceWarning, kTraceVoice, |
3879 VoEId(_instanceId, _channelId), | 3880 VoEId(_instanceId, _channelId), |
3880 "Channel::EncodeAndSend() inserting Dtmf failed"); | 3881 "Channel::EncodeAndSend() inserting Dtmf failed"); |
3881 return -1; | 3882 return -1; |
3882 } | 3883 } |
3883 | 3884 |
3884 // Replace mixed audio with DTMF tone. | 3885 // Replace mixed audio with DTMF tone. |
3885 for (int sample = 0; | 3886 for (size_t sample = 0; |
3886 sample < _audioFrame.samples_per_channel_; | 3887 sample < _audioFrame.samples_per_channel_; |
3887 sample++) | 3888 sample++) |
3888 { | 3889 { |
3889 for (int channel = 0; | 3890 for (int channel = 0; |
3890 channel < _audioFrame.num_channels_; | 3891 channel < _audioFrame.num_channels_; |
3891 channel++) | 3892 channel++) |
3892 { | 3893 { |
3893 const int index = sample * _audioFrame.num_channels_ + channel; | 3894 const size_t index = |
3895 sample * _audioFrame.num_channels_ + channel; | |
3894 _audioFrame.data_[index] = toneBuffer[sample]; | 3896 _audioFrame.data_[index] = toneBuffer[sample]; |
3895 } | 3897 } |
3896 } | 3898 } |
3897 | 3899 |
3898 assert(_audioFrame.samples_per_channel_ == toneSamples); | 3900 assert(_audioFrame.samples_per_channel_ == toneSamples); |
3899 } else | 3901 } else |
3900 { | 3902 { |
3901 // Add 10ms to "delay-since-last-tone" counter | 3903 // Add 10ms to "delay-since-last-tone" counter |
3902 _inbandDtmfGenerator.UpdateDelaySinceLastTone(); | 3904 _inbandDtmfGenerator.UpdateDelaySinceLastTone(); |
3903 } | 3905 } |
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4131 int64_t min_rtt = 0; | 4133 int64_t min_rtt = 0; |
4132 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) | 4134 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) |
4133 != 0) { | 4135 != 0) { |
4134 return 0; | 4136 return 0; |
4135 } | 4137 } |
4136 return rtt; | 4138 return rtt; |
4137 } | 4139 } |
4138 | 4140 |
4139 } // namespace voe | 4141 } // namespace voe |
4140 } // namespace webrtc | 4142 } // namespace webrtc |
OLD | NEW |