| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 642 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 653 ChannelState::State state = channel_state_.Get(); | 653 ChannelState::State state = channel_state_.Get(); |
| 654 | 654 |
| 655 { | 655 { |
| 656 // Pass the audio buffers to an optional sink callback, before applying | 656 // Pass the audio buffers to an optional sink callback, before applying |
| 657 // scaling/panning, as that applies to the mix operation. | 657 // scaling/panning, as that applies to the mix operation. |
| 658 // External recipients of the audio (e.g. via AudioTrack), will do their | 658 // External recipients of the audio (e.g. via AudioTrack), will do their |
| 659 // own mixing/dynamic processing. | 659 // own mixing/dynamic processing. |
| 660 rtc::CritScope cs(&_callbackCritSect); | 660 rtc::CritScope cs(&_callbackCritSect); |
| 661 if (audio_sink_) { | 661 if (audio_sink_) { |
| 662 AudioSinkInterface::Data data( | 662 AudioSinkInterface::Data data( |
| 663 &audioFrame->data_[0], audioFrame->samples_per_channel_, | 663 audioFrame->data(), audioFrame->samples_per_channel_, |
| 664 audioFrame->sample_rate_hz_, audioFrame->num_channels_, | 664 audioFrame->sample_rate_hz_, audioFrame->num_channels_, |
| 665 audioFrame->timestamp_); | 665 audioFrame->timestamp_); |
| 666 audio_sink_->OnData(data); | 666 audio_sink_->OnData(data); |
| 667 } | 667 } |
| 668 } | 668 } |
| 669 | 669 |
| 670 float output_gain = 1.0f; | 670 float output_gain = 1.0f; |
| 671 { | 671 { |
| 672 rtc::CritScope cs(&volume_settings_critsect_); | 672 rtc::CritScope cs(&volume_settings_critsect_); |
| 673 output_gain = _outputGain; | 673 output_gain = _outputGain; |
| (...skipping 2105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2779 if (channel_state_.Get().input_file_playing) { | 2779 if (channel_state_.Get().input_file_playing) { |
| 2780 MixOrReplaceAudioWithFile(audio_input); | 2780 MixOrReplaceAudioWithFile(audio_input); |
| 2781 } | 2781 } |
| 2782 | 2782 |
| 2783 bool is_muted = InputMute(); | 2783 bool is_muted = InputMute(); |
| 2784 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); | 2784 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); |
| 2785 | 2785 |
| 2786 if (_includeAudioLevelIndication) { | 2786 if (_includeAudioLevelIndication) { |
| 2787 size_t length = | 2787 size_t length = |
| 2788 audio_input->samples_per_channel_ * audio_input->num_channels_; | 2788 audio_input->samples_per_channel_ * audio_input->num_channels_; |
| 2789 RTC_CHECK_LE(length, sizeof(audio_input->data_)); | 2789 RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes); |
| 2790 if (is_muted && previous_frame_muted_) { | 2790 if (is_muted && previous_frame_muted_) { |
| 2791 rms_level_.AnalyzeMuted(length); | 2791 rms_level_.AnalyzeMuted(length); |
| 2792 } else { | 2792 } else { |
| 2793 rms_level_.Analyze( | 2793 rms_level_.Analyze( |
| 2794 rtc::ArrayView<const int16_t>(audio_input->data_, length)); | 2794 rtc::ArrayView<const int16_t>(audio_input->data(), length)); |
| 2795 } | 2795 } |
| 2796 } | 2796 } |
| 2797 previous_frame_muted_ = is_muted; | 2797 previous_frame_muted_ = is_muted; |
| 2798 | 2798 |
| 2799 // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. | 2799 // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. |
| 2800 | 2800 |
| 2801 // The ACM resamples internally. | 2801 // The ACM resamples internally. |
| 2802 audio_input->timestamp_ = _timeStamp; | 2802 audio_input->timestamp_ = _timeStamp; |
| 2803 // This call will trigger AudioPacketizationCallback::SendData if encoding | 2803 // This call will trigger AudioPacketizationCallback::SendData if encoding |
| 2804 // is done and payload is ready for packetization and transmission. | 2804 // is done and payload is ready for packetization and transmission. |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2944 "Channel::MixOrReplaceAudioWithFile() file is ended"); | 2944 "Channel::MixOrReplaceAudioWithFile() file is ended"); |
| 2945 return 0; | 2945 return 0; |
| 2946 } | 2946 } |
| 2947 } | 2947 } |
| 2948 | 2948 |
| 2949 RTC_DCHECK_EQ(audio_input->samples_per_channel_, fileSamples); | 2949 RTC_DCHECK_EQ(audio_input->samples_per_channel_, fileSamples); |
| 2950 | 2950 |
| 2951 if (_mixFileWithMicrophone) { | 2951 if (_mixFileWithMicrophone) { |
| 2952 // Currently file stream is always mono. | 2952 // Currently file stream is always mono. |
| 2953 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2953 // TODO(xians): Change the code when FilePlayer supports real stereo. |
| 2954 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(), | 2954 MixWithSat(audio_input->mutable_data(), audio_input->num_channels_, |
| 2955 1, fileSamples); | 2955 fileBuffer.get(), 1, fileSamples); |
| 2956 } else { | 2956 } else { |
| 2957 // Replace ACM audio with file. | 2957 // Replace ACM audio with file. |
| 2958 // Currently file stream is always mono. | 2958 // Currently file stream is always mono. |
| 2959 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2959 // TODO(xians): Change the code when FilePlayer supports real stereo. |
| 2960 audio_input->UpdateFrame( | 2960 audio_input->UpdateFrame( |
| 2961 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, | 2961 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, |
| 2962 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); | 2962 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); |
| 2963 } | 2963 } |
| 2964 return 0; | 2964 return 0; |
| 2965 } | 2965 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 2984 fileBuffer.get(), &fileSamples, mixingFrequency) == -1) { | 2984 fileBuffer.get(), &fileSamples, mixingFrequency) == -1) { |
| 2985 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2985 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
| 2986 "Channel::MixAudioWithFile() file mixing failed"); | 2986 "Channel::MixAudioWithFile() file mixing failed"); |
| 2987 return -1; | 2987 return -1; |
| 2988 } | 2988 } |
| 2989 } | 2989 } |
| 2990 | 2990 |
| 2991 if (audioFrame.samples_per_channel_ == fileSamples) { | 2991 if (audioFrame.samples_per_channel_ == fileSamples) { |
| 2992 // Currently file stream is always mono. | 2992 // Currently file stream is always mono. |
| 2993 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2993 // TODO(xians): Change the code when FilePlayer supports real stereo. |
| 2994 MixWithSat(audioFrame.data_, audioFrame.num_channels_, fileBuffer.get(), 1, | 2994 MixWithSat(audioFrame.mutable_data(), audioFrame.num_channels_, |
| 2995 fileSamples); | 2995 fileBuffer.get(), 1, fileSamples); |
| 2996 } else { | 2996 } else { |
| 2997 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2997 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
| 2998 "Channel::MixAudioWithFile() samples_per_channel_(%" PRIuS | 2998 "Channel::MixAudioWithFile() samples_per_channel_(%" PRIuS |
| 2999 ") != " | 2999 ") != " |
| 3000 "fileSamples(%" PRIuS ")", | 3000 "fileSamples(%" PRIuS ")", |
| 3001 audioFrame.samples_per_channel_, fileSamples); | 3001 audioFrame.samples_per_channel_, fileSamples); |
| 3002 return -1; | 3002 return -1; |
| 3003 } | 3003 } |
| 3004 | 3004 |
| 3005 return 0; | 3005 return 0; |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3140 int64_t min_rtt = 0; | 3140 int64_t min_rtt = 0; |
| 3141 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != | 3141 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != |
| 3142 0) { | 3142 0) { |
| 3143 return 0; | 3143 return 0; |
| 3144 } | 3144 } |
| 3145 return rtt; | 3145 return rtt; |
| 3146 } | 3146 } |
| 3147 | 3147 |
| 3148 } // namespace voe | 3148 } // namespace voe |
| 3149 } // namespace webrtc | 3149 } // namespace webrtc |
| OLD | NEW |