Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(73)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 1505253004: Support for remote audio into tracks (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: New AudioSink added Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/channel.h" 11 #include "webrtc/voice_engine/channel.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <utility>
14 15
15 #include "webrtc/base/checks.h" 16 #include "webrtc/base/checks.h"
16 #include "webrtc/base/format_macros.h" 17 #include "webrtc/base/format_macros.h"
17 #include "webrtc/base/logging.h" 18 #include "webrtc/base/logging.h"
18 #include "webrtc/base/thread_checker.h" 19 #include "webrtc/base/thread_checker.h"
19 #include "webrtc/base/timeutils.h" 20 #include "webrtc/base/timeutils.h"
20 #include "webrtc/common.h" 21 #include "webrtc/common.h"
21 #include "webrtc/config.h" 22 #include "webrtc/config.h"
22 #include "webrtc/modules/audio_device/include/audio_device.h" 23 #include "webrtc/modules/audio_device/include/audio_device.h"
23 #include "webrtc/modules/audio_processing/include/audio_processing.h" 24 #include "webrtc/modules/audio_processing/include/audio_processing.h"
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 ChannelState::State state = channel_state_.Get(); 554 ChannelState::State state = channel_state_.Get();
554 555
555 if (state.rx_apm_is_enabled) { 556 if (state.rx_apm_is_enabled) {
556 int err = rx_audioproc_->ProcessStream(audioFrame); 557 int err = rx_audioproc_->ProcessStream(audioFrame);
557 if (err) { 558 if (err) {
558 LOG(LS_ERROR) << "ProcessStream() error: " << err; 559 LOG(LS_ERROR) << "ProcessStream() error: " << err;
559 assert(false); 560 assert(false);
560 } 561 }
561 } 562 }
562 563
564 {
565 // Pass the audio buffers to an optional sink callback, before applying
566 // scaling/panning, as that applies to the mix operation.
567 // External recipients of the audio (e.g. via AudioTrack), will do their
568 // own mixing/dynamic processing.
569 CriticalSectionScoped cs(&_callbackCritSect);
570 if (audio_sink_) {
571 AudioSink::Data data(reinterpret_cast<uint8_t*>(&audioFrame->data_[0]),
572 audioFrame->samples_per_channel_,
573 audioFrame->sample_rate_hz_,
574 audioFrame->num_channels_, audioFrame->timestamp_);
575 audio_sink_->OnData(data);
576 }
577 }
578
563 float output_gain = 1.0f; 579 float output_gain = 1.0f;
564 float left_pan = 1.0f; 580 float left_pan = 1.0f;
565 float right_pan = 1.0f; 581 float right_pan = 1.0f;
566 { 582 {
567 CriticalSectionScoped cs(&volume_settings_critsect_); 583 CriticalSectionScoped cs(&volume_settings_critsect_);
568 output_gain = _outputGain; 584 output_gain = _outputGain;
569 left_pan = _panLeft; 585 left_pan = _panLeft;
570 right_pan= _panRight; 586 right_pan= _panRight;
571 } 587 }
572 588
(...skipping 21 matching lines...) Expand all
594 // stage) 610 // stage)
595 AudioFrameOperations::Scale(left_pan, right_pan, *audioFrame); 611 AudioFrameOperations::Scale(left_pan, right_pan, *audioFrame);
596 } 612 }
597 613
598 // Mix decoded PCM output with file if file mixing is enabled 614 // Mix decoded PCM output with file if file mixing is enabled
599 if (state.output_file_playing) 615 if (state.output_file_playing)
600 { 616 {
601 MixAudioWithFile(*audioFrame, audioFrame->sample_rate_hz_); 617 MixAudioWithFile(*audioFrame, audioFrame->sample_rate_hz_);
602 } 618 }
603 619
604 // External media 620 // External media.
605 if (_outputExternalMedia) 621 if (_outputExternalMedia)
606 { 622 {
607 CriticalSectionScoped cs(&_callbackCritSect); 623 CriticalSectionScoped cs(&_callbackCritSect);
608 const bool isStereo = (audioFrame->num_channels_ == 2); 624 const bool isStereo = (audioFrame->num_channels_ == 2);
609 if (_outputExternalMediaCallbackPtr) 625 if (_outputExternalMediaCallbackPtr)
610 { 626 {
611 _outputExternalMediaCallbackPtr->Process( 627 _outputExternalMediaCallbackPtr->Process(
612 _channelId, 628 _channelId, kPlaybackPerChannel,
613 kPlaybackPerChannel, 629 reinterpret_cast<int16_t*>(audioFrame->data_),
614 (int16_t*)audioFrame->data_, 630 audioFrame->samples_per_channel_, audioFrame->sample_rate_hz_,
615 audioFrame->samples_per_channel_, 631 isStereo);
616 audioFrame->sample_rate_hz_,
617 isStereo);
618 } 632 }
619 } 633 }
620 634
621 // Record playout if enabled 635 // Record playout if enabled
622 { 636 {
623 CriticalSectionScoped cs(&_fileCritSect); 637 CriticalSectionScoped cs(&_fileCritSect);
624 638
625 if (_outputFileRecording && _outputFileRecorderPtr) 639 if (_outputFileRecording && _outputFileRecorderPtr)
626 { 640 {
627 _outputFileRecorderPtr->RecordAudioToFile(*audioFrame); 641 _outputFileRecorderPtr->RecordAudioToFile(*audioFrame);
(...skipping 537 matching lines...) Expand 10 before | Expand all | Expand 10 after
1165 } 1179 }
1166 1180
1167 int32_t 1181 int32_t
1168 Channel::UpdateLocalTimeStamp() 1182 Channel::UpdateLocalTimeStamp()
1169 { 1183 {
1170 1184
1171 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); 1185 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1172 return 0; 1186 return 0;
1173 } 1187 }
1174 1188
1189 void Channel::SetSink(rtc::scoped_ptr<AudioSink> sink) {
1190 CriticalSectionScoped cs(&_callbackCritSect);
1191 audio_sink_ = std::move(sink);
1192 }
1193
1175 int32_t 1194 int32_t
1176 Channel::StartPlayout() 1195 Channel::StartPlayout()
1177 { 1196 {
1178 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId), 1197 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1179 "Channel::StartPlayout()"); 1198 "Channel::StartPlayout()");
1180 if (channel_state_.Get().playing) 1199 if (channel_state_.Get().playing)
1181 { 1200 {
1182 return 0; 1201 return 0;
1183 } 1202 }
1184 1203
(...skipping 2901 matching lines...) Expand 10 before | Expand all | Expand 10 after
4086 int64_t min_rtt = 0; 4105 int64_t min_rtt = 0;
4087 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) 4106 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt)
4088 != 0) { 4107 != 0) {
4089 return 0; 4108 return 0;
4090 } 4109 }
4091 return rtt; 4110 return rtt;
4092 } 4111 }
4093 4112
4094 } // namespace voe 4113 } // namespace voe
4095 } // namespace webrtc 4114 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698