OLD | NEW |
| (Empty) |
1 /* | |
2 * libjingle | |
3 * Copyright 2004 Google Inc. | |
4 * | |
5 * Redistribution and use in source and binary forms, with or without | |
6 * modification, are permitted provided that the following conditions are met: | |
7 * | |
8 * 1. Redistributions of source code must retain the above copyright notice, | |
9 * this list of conditions and the following disclaimer. | |
10 * 2. Redistributions in binary form must reproduce the above copyright notice, | |
11 * this list of conditions and the following disclaimer in the documentation | |
12 * and/or other materials provided with the distribution. | |
13 * 3. The name of the author may not be used to endorse or promote products | |
14 * derived from this software without specific prior written permission. | |
15 * | |
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 */ | |
27 | |
28 #ifndef WEBRTC_LIBJINGLE_SESSION_MEDIA_CALL_H_ | |
29 #define WEBRTC_LIBJINGLE_SESSION_MEDIA_CALL_H_ | |
30 | |
31 #include <deque> | |
32 #include <map> | |
33 #include <string> | |
34 #include <vector> | |
35 | |
36 #include "talk/media/base/mediachannel.h" | |
37 #include "talk/media/base/screencastid.h" | |
38 #include "talk/media/base/streamparams.h" | |
39 #include "talk/media/base/videocommon.h" | |
40 #include "talk/session/media/audiomonitor.h" | |
41 #include "talk/session/media/currentspeakermonitor.h" | |
42 #include "talk/session/media/mediasession.h" | |
43 #include "webrtc/base/messagequeue.h" | |
44 #include "webrtc/libjingle/session/media/mediamessages.h" | |
45 #include "webrtc/libjingle/session/sessionmanager.h" | |
46 #include "webrtc/libjingle/xmpp/jid.h" | |
47 #include "webrtc/p2p/client/socketmonitor.h" | |
48 #include "webrtc/p2p/client/socketmonitor.h" | |
49 | |
50 namespace cricket { | |
51 | |
52 struct AudioInfo; | |
53 class Call; | |
54 class MediaSessionClient; | |
55 class BaseChannel; | |
56 class VoiceChannel; | |
57 class VideoChannel; | |
58 class DataChannel; | |
59 | |
60 // Can't typedef this easily since it's forward declared as struct elsewhere. | |
61 struct CallOptions : public MediaSessionOptions { | |
62 }; | |
63 | |
64 // CurrentSpeakerMonitor used to have a dependency on Call. To remove this | |
65 // dependency, we create AudioSourceContext. CurrentSpeakerMonitor depends on | |
66 // AudioSourceContext. | |
67 // AudioSourceProxy acts as a proxy so that when SignalAudioMonitor | |
68 // in Call is triggered, SignalAudioMonitor in AudioSourceContext is triggered. | |
69 // Likewise, when OnMediaStreamsUpdate in Call is triggered, | |
70 // OnMediaStreamsUpdate in AudioSourceContext is triggered. | |
71 class AudioSourceProxy: public AudioSourceContext, public sigslot::has_slots<> { | |
72 public: | |
73 explicit AudioSourceProxy(Call* call); | |
74 | |
75 private: | |
76 void OnAudioMonitor(Call* call, const AudioInfo& info); | |
77 void OnMediaStreamsUpdate(Call* call, cricket::Session*, | |
78 const cricket::MediaStreams&, const cricket::MediaStreams&); | |
79 | |
80 Call* call_; | |
81 }; | |
82 | |
83 class Call : public rtc::MessageHandler, public sigslot::has_slots<> { | |
84 public: | |
85 explicit Call(MediaSessionClient* session_client); | |
86 ~Call(); | |
87 | |
88 // |initiator| can be empty. | |
89 Session* InitiateSession(const buzz::Jid& to, const buzz::Jid& initiator, | |
90 const CallOptions& options); | |
91 Session* InitiateSession(const std::string& id, const buzz::Jid& to, | |
92 const CallOptions& options); | |
93 void AcceptSession(Session* session, const CallOptions& options); | |
94 void RejectSession(Session* session); | |
95 void TerminateSession(Session* session); | |
96 void Terminate(); | |
97 bool SendViewRequest(Session* session, | |
98 const ViewRequest& view_request); | |
99 void SetVideoRenderer(Session* session, uint32 ssrc, | |
100 VideoRenderer* renderer); | |
101 void StartConnectionMonitor(Session* session, int cms); | |
102 void StopConnectionMonitor(Session* session); | |
103 void StartAudioMonitor(Session* session, int cms); | |
104 void StopAudioMonitor(Session* session); | |
105 bool IsAudioMonitorRunning(Session* session); | |
106 void StartSpeakerMonitor(Session* session); | |
107 void StopSpeakerMonitor(Session* session); | |
108 void Mute(bool mute); | |
109 void MuteVideo(bool mute); | |
110 bool SendData(Session* session, | |
111 const SendDataParams& params, | |
112 const rtc::Buffer& payload, | |
113 SendDataResult* result); | |
114 void PressDTMF(int event); | |
115 bool StartScreencast(Session* session, | |
116 const std::string& stream_name, uint32 ssrc, | |
117 const ScreencastId& screenid, int fps); | |
118 bool StopScreencast(Session* session, | |
119 const std::string& stream_name, uint32 ssrc); | |
120 | |
121 std::vector<Session*> sessions(); | |
122 uint32 id(); | |
123 bool has_video() const { return has_video_; } | |
124 bool has_data() const { return has_data_; } | |
125 bool muted() const { return muted_; } | |
126 bool video() const { return has_video_; } | |
127 bool secure() const; | |
128 bool video_muted() const { return video_muted_; } | |
129 const std::vector<StreamParams>* GetDataRecvStreams(Session* session) const { | |
130 MediaStreams* recv_streams = GetMediaStreams(session); | |
131 return recv_streams ? &recv_streams->data() : NULL; | |
132 } | |
133 const std::vector<StreamParams>* GetVideoRecvStreams(Session* session) const { | |
134 MediaStreams* recv_streams = GetMediaStreams(session); | |
135 return recv_streams ? &recv_streams->video() : NULL; | |
136 } | |
137 const std::vector<StreamParams>* GetAudioRecvStreams(Session* session) const { | |
138 MediaStreams* recv_streams = GetMediaStreams(session); | |
139 return recv_streams ? &recv_streams->audio() : NULL; | |
140 } | |
141 VoiceChannel* GetVoiceChannel(Session* session) const; | |
142 VideoChannel* GetVideoChannel(Session* session) const; | |
143 DataChannel* GetDataChannel(Session* session) const; | |
144 // Public just for unit tests | |
145 VideoContentDescription* CreateVideoStreamUpdate(const StreamParams& stream); | |
146 // Takes ownership of video. | |
147 void SendVideoStreamUpdate(Session* session, VideoContentDescription* video); | |
148 | |
149 // Setting this to false will cause the call to have a longer timeout and | |
150 // for the SignalSetupToCallVoicemail to never fire. | |
151 void set_send_to_voicemail(bool send_to_voicemail) { | |
152 send_to_voicemail_ = send_to_voicemail; | |
153 } | |
154 bool send_to_voicemail() { return send_to_voicemail_; } | |
155 const VoiceMediaInfo& last_voice_media_info() const { | |
156 return last_voice_media_info_; | |
157 } | |
158 | |
159 // Sets a flag on the chatapp that will redirect the call to voicemail once | |
160 // the call has been terminated | |
161 sigslot::signal0<> SignalSetupToCallVoicemail; | |
162 sigslot::signal2<Call*, Session*> SignalAddSession; | |
163 sigslot::signal2<Call*, Session*> SignalRemoveSession; | |
164 sigslot::signal3<Call*, Session*, Session::State> | |
165 SignalSessionState; | |
166 sigslot::signal3<Call*, Session*, Session::Error> | |
167 SignalSessionError; | |
168 sigslot::signal3<Call*, Session*, const std::string &> | |
169 SignalReceivedTerminateReason; | |
170 sigslot::signal2<Call*, const std::vector<ConnectionInfo> &> | |
171 SignalConnectionMonitor; | |
172 sigslot::signal2<Call*, const VoiceMediaInfo&> SignalMediaMonitor; | |
173 sigslot::signal2<Call*, const AudioInfo&> SignalAudioMonitor; | |
174 // Empty nick on StreamParams means "unknown". | |
175 // No ssrcs in StreamParams means "no current speaker". | |
176 sigslot::signal3<Call*, | |
177 Session*, | |
178 const StreamParams&> SignalSpeakerMonitor; | |
179 sigslot::signal2<Call*, const std::vector<ConnectionInfo> &> | |
180 SignalVideoConnectionMonitor; | |
181 sigslot::signal2<Call*, const VideoMediaInfo&> SignalVideoMediaMonitor; | |
182 // Gives added streams and removed streams, in that order. | |
183 sigslot::signal4<Call*, | |
184 Session*, | |
185 const MediaStreams&, | |
186 const MediaStreams&> SignalMediaStreamsUpdate; | |
187 sigslot::signal3<Call*, | |
188 const ReceiveDataParams&, | |
189 const rtc::Buffer&> SignalDataReceived; | |
190 | |
191 AudioSourceProxy* GetAudioSourceProxy(); | |
192 | |
193 private: | |
194 void OnMessage(rtc::Message* message); | |
195 void OnSessionState(BaseSession* base_session, BaseSession::State state); | |
196 void OnSessionError(BaseSession* base_session, BaseSession::Error error); | |
197 void OnSessionInfoMessage( | |
198 Session* session, const buzz::XmlElement* action_elem); | |
199 void OnViewRequest( | |
200 Session* session, const ViewRequest& view_request); | |
201 void OnRemoteDescriptionUpdate( | |
202 BaseSession* base_session, const ContentInfos& updated_contents); | |
203 void OnReceivedTerminateReason(Session* session, const std::string &reason); | |
204 void IncomingSession(Session* session, const SessionDescription* offer); | |
205 // Returns true on success. | |
206 bool AddSession(Session* session, const SessionDescription* offer); | |
207 void RemoveSession(Session* session); | |
208 void EnableChannels(bool enable); | |
209 void EnableSessionChannels(Session* session, bool enable); | |
210 void Join(Call* call, bool enable); | |
211 void OnConnectionMonitor(VoiceChannel* channel, | |
212 const std::vector<ConnectionInfo> &infos); | |
213 void OnMediaMonitor(VoiceChannel* channel, const VoiceMediaInfo& info); | |
214 void OnAudioMonitor(VoiceChannel* channel, const AudioInfo& info); | |
215 void OnSpeakerMonitor(CurrentSpeakerMonitor* monitor, uint32 ssrc); | |
216 void OnConnectionMonitor(VideoChannel* channel, | |
217 const std::vector<ConnectionInfo> &infos); | |
218 void OnMediaMonitor(VideoChannel* channel, const VideoMediaInfo& info); | |
219 void OnDataReceived(DataChannel* channel, | |
220 const ReceiveDataParams& params, | |
221 const rtc::Buffer& payload); | |
222 MediaStreams* GetMediaStreams(Session* session) const; | |
223 void UpdateRemoteMediaStreams(Session* session, | |
224 const ContentInfos& updated_contents, | |
225 bool update_channels); | |
226 bool UpdateVoiceChannelRemoteContent(Session* session, | |
227 const AudioContentDescription* audio); | |
228 bool UpdateVideoChannelRemoteContent(Session* session, | |
229 const VideoContentDescription* video); | |
230 bool UpdateDataChannelRemoteContent(Session* session, | |
231 const DataContentDescription* data); | |
232 void UpdateRecvStreams(const std::vector<StreamParams>& update_streams, | |
233 BaseChannel* channel, | |
234 std::vector<StreamParams>* recv_streams, | |
235 std::vector<StreamParams>* added_streams, | |
236 std::vector<StreamParams>* removed_streams); | |
237 void AddRecvStreams(const std::vector<StreamParams>& added_streams, | |
238 BaseChannel* channel, | |
239 std::vector<StreamParams>* recv_streams); | |
240 void AddRecvStream(const StreamParams& stream, | |
241 BaseChannel* channel, | |
242 std::vector<StreamParams>* recv_streams); | |
243 void RemoveRecvStreams(const std::vector<StreamParams>& removed_streams, | |
244 BaseChannel* channel, | |
245 std::vector<StreamParams>* recv_streams); | |
246 void RemoveRecvStream(const StreamParams& stream, | |
247 BaseChannel* channel, | |
248 std::vector<StreamParams>* recv_streams); | |
249 void ContinuePlayDTMF(); | |
250 bool StopScreencastWithoutSendingUpdate(Session* session, uint32 ssrc); | |
251 bool StopAllScreencastsWithoutSendingUpdate(Session* session); | |
252 bool SessionDescriptionContainsCrypto(const SessionDescription* sdesc) const; | |
253 Session* InternalInitiateSession(const std::string& id, | |
254 const buzz::Jid& to, | |
255 const std::string& initiator_name, | |
256 const CallOptions& options); | |
257 | |
258 uint32 id_; | |
259 MediaSessionClient* session_client_; | |
260 | |
261 struct StartedCapture { | |
262 StartedCapture(cricket::VideoCapturer* capturer, | |
263 const cricket::VideoFormat& format) : | |
264 capturer(capturer), | |
265 format(format) { | |
266 } | |
267 cricket::VideoCapturer* capturer; | |
268 cricket::VideoFormat format; | |
269 }; | |
270 typedef std::map<uint32, StartedCapture> StartedScreencastMap; | |
271 | |
272 struct MediaSession { | |
273 Session* session; | |
274 VoiceChannel* voice_channel; | |
275 VideoChannel* video_channel; | |
276 DataChannel* data_channel; | |
277 MediaStreams* recv_streams; | |
278 StartedScreencastMap started_screencasts; | |
279 }; | |
280 | |
281 // Create a map of media sessions, keyed off session->id(). | |
282 typedef std::map<std::string, MediaSession> MediaSessionMap; | |
283 MediaSessionMap media_session_map_; | |
284 | |
285 std::map<std::string, CurrentSpeakerMonitor*> speaker_monitor_map_; | |
286 bool has_video_; | |
287 bool has_data_; | |
288 bool muted_; | |
289 bool video_muted_; | |
290 bool send_to_voicemail_; | |
291 | |
292 // DTMF tones have to be queued up so that we don't flood the call. We | |
293 // keep a deque (doubely ended queue) of them around. While one is playing we | |
294 // set the playing_dtmf_ bit and schedule a message in XX msec to clear that | |
295 // bit or start the next tone playing. | |
296 std::deque<int> queued_dtmf_; | |
297 bool playing_dtmf_; | |
298 | |
299 VoiceMediaInfo last_voice_media_info_; | |
300 | |
301 rtc::scoped_ptr<AudioSourceProxy> audio_source_proxy_; | |
302 | |
303 friend class MediaSessionClient; | |
304 }; | |
305 | |
306 } // namespace cricket | |
307 | |
308 #endif // WEBRTC_LIBJINGLE_SESSION_MEDIA_CALL_H_ | |
OLD | NEW |