OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 21 matching lines...) Expand all Loading... |
32 #include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h" | 32 #include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h" |
33 #endif | 33 #endif |
34 #ifdef WEBRTC_CODEC_PCM16 | 34 #ifdef WEBRTC_CODEC_PCM16 |
35 #include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h" | 35 #include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h" |
36 #endif | 36 #endif |
37 | 37 |
38 namespace webrtc { | 38 namespace webrtc { |
39 | 39 |
40 // PCMu | 40 // PCMu |
41 | 41 |
42 int AudioDecoderPcmU::Init() { | 42 void AudioDecoderPcmU::Reset() { |
43 return 0; | |
44 } | 43 } |
45 size_t AudioDecoderPcmU::Channels() const { | 44 size_t AudioDecoderPcmU::Channels() const { |
46 return 1; | 45 return 1; |
47 } | 46 } |
48 | 47 |
49 int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded, | 48 int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded, |
50 size_t encoded_len, | 49 size_t encoded_len, |
51 int sample_rate_hz, | 50 int sample_rate_hz, |
52 int16_t* decoded, | 51 int16_t* decoded, |
53 SpeechType* speech_type) { | 52 SpeechType* speech_type) { |
54 DCHECK_EQ(sample_rate_hz, 8000); | 53 DCHECK_EQ(sample_rate_hz, 8000); |
55 int16_t temp_type = 1; // Default is speech. | 54 int16_t temp_type = 1; // Default is speech. |
56 size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type); | 55 size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type); |
57 *speech_type = ConvertSpeechType(temp_type); | 56 *speech_type = ConvertSpeechType(temp_type); |
58 return static_cast<int>(ret); | 57 return static_cast<int>(ret); |
59 } | 58 } |
60 | 59 |
61 int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded, | 60 int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded, |
62 size_t encoded_len) const { | 61 size_t encoded_len) const { |
63 // One encoded byte per sample per channel. | 62 // One encoded byte per sample per channel. |
64 return static_cast<int>(encoded_len / Channels()); | 63 return static_cast<int>(encoded_len / Channels()); |
65 } | 64 } |
66 | 65 |
67 size_t AudioDecoderPcmUMultiCh::Channels() const { | 66 size_t AudioDecoderPcmUMultiCh::Channels() const { |
68 return channels_; | 67 return channels_; |
69 } | 68 } |
70 | 69 |
71 // PCMa | 70 // PCMa |
72 | 71 |
73 int AudioDecoderPcmA::Init() { | 72 void AudioDecoderPcmA::Reset() { |
74 return 0; | |
75 } | 73 } |
76 size_t AudioDecoderPcmA::Channels() const { | 74 size_t AudioDecoderPcmA::Channels() const { |
77 return 1; | 75 return 1; |
78 } | 76 } |
79 | 77 |
80 int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded, | 78 int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded, |
81 size_t encoded_len, | 79 size_t encoded_len, |
82 int sample_rate_hz, | 80 int sample_rate_hz, |
83 int16_t* decoded, | 81 int16_t* decoded, |
84 SpeechType* speech_type) { | 82 SpeechType* speech_type) { |
(...skipping 11 matching lines...) Expand all Loading... |
96 } | 94 } |
97 | 95 |
98 size_t AudioDecoderPcmAMultiCh::Channels() const { | 96 size_t AudioDecoderPcmAMultiCh::Channels() const { |
99 return channels_; | 97 return channels_; |
100 } | 98 } |
101 | 99 |
102 // PCM16B | 100 // PCM16B |
103 #ifdef WEBRTC_CODEC_PCM16 | 101 #ifdef WEBRTC_CODEC_PCM16 |
104 AudioDecoderPcm16B::AudioDecoderPcm16B() {} | 102 AudioDecoderPcm16B::AudioDecoderPcm16B() {} |
105 | 103 |
106 int AudioDecoderPcm16B::Init() { | 104 void AudioDecoderPcm16B::Reset() { |
107 return 0; | |
108 } | 105 } |
109 size_t AudioDecoderPcm16B::Channels() const { | 106 size_t AudioDecoderPcm16B::Channels() const { |
110 return 1; | 107 return 1; |
111 } | 108 } |
112 | 109 |
113 int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded, | 110 int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded, |
114 size_t encoded_len, | 111 size_t encoded_len, |
115 int sample_rate_hz, | 112 int sample_rate_hz, |
116 int16_t* decoded, | 113 int16_t* decoded, |
117 SpeechType* speech_type) { | 114 SpeechType* speech_type) { |
(...skipping 18 matching lines...) Expand all Loading... |
136 | 133 |
137 size_t AudioDecoderPcm16BMultiCh::Channels() const { | 134 size_t AudioDecoderPcm16BMultiCh::Channels() const { |
138 return channels_; | 135 return channels_; |
139 } | 136 } |
140 #endif | 137 #endif |
141 | 138 |
142 // iLBC | 139 // iLBC |
143 #ifdef WEBRTC_CODEC_ILBC | 140 #ifdef WEBRTC_CODEC_ILBC |
144 AudioDecoderIlbc::AudioDecoderIlbc() { | 141 AudioDecoderIlbc::AudioDecoderIlbc() { |
145 WebRtcIlbcfix_DecoderCreate(&dec_state_); | 142 WebRtcIlbcfix_DecoderCreate(&dec_state_); |
| 143 WebRtcIlbcfix_Decoderinit30Ms(dec_state_); |
146 } | 144 } |
147 | 145 |
148 AudioDecoderIlbc::~AudioDecoderIlbc() { | 146 AudioDecoderIlbc::~AudioDecoderIlbc() { |
149 WebRtcIlbcfix_DecoderFree(dec_state_); | 147 WebRtcIlbcfix_DecoderFree(dec_state_); |
150 } | 148 } |
151 | 149 |
152 bool AudioDecoderIlbc::HasDecodePlc() const { | 150 bool AudioDecoderIlbc::HasDecodePlc() const { |
153 return true; | 151 return true; |
154 } | 152 } |
155 | 153 |
156 int AudioDecoderIlbc::DecodeInternal(const uint8_t* encoded, | 154 int AudioDecoderIlbc::DecodeInternal(const uint8_t* encoded, |
157 size_t encoded_len, | 155 size_t encoded_len, |
158 int sample_rate_hz, | 156 int sample_rate_hz, |
159 int16_t* decoded, | 157 int16_t* decoded, |
160 SpeechType* speech_type) { | 158 SpeechType* speech_type) { |
161 DCHECK_EQ(sample_rate_hz, 8000); | 159 DCHECK_EQ(sample_rate_hz, 8000); |
162 int16_t temp_type = 1; // Default is speech. | 160 int16_t temp_type = 1; // Default is speech. |
163 int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded, | 161 int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded, |
164 &temp_type); | 162 &temp_type); |
165 *speech_type = ConvertSpeechType(temp_type); | 163 *speech_type = ConvertSpeechType(temp_type); |
166 return ret; | 164 return ret; |
167 } | 165 } |
168 | 166 |
169 size_t AudioDecoderIlbc::DecodePlc(size_t num_frames, int16_t* decoded) { | 167 size_t AudioDecoderIlbc::DecodePlc(size_t num_frames, int16_t* decoded) { |
170 return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames); | 168 return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames); |
171 } | 169 } |
172 | 170 |
173 int AudioDecoderIlbc::Init() { | 171 void AudioDecoderIlbc::Reset() { |
174 return WebRtcIlbcfix_Decoderinit30Ms(dec_state_); | 172 WebRtcIlbcfix_Decoderinit30Ms(dec_state_); |
175 } | 173 } |
176 | 174 |
177 size_t AudioDecoderIlbc::Channels() const { | 175 size_t AudioDecoderIlbc::Channels() const { |
178 return 1; | 176 return 1; |
179 } | 177 } |
180 #endif | 178 #endif |
181 | 179 |
182 // G.722 | 180 // G.722 |
183 #ifdef WEBRTC_CODEC_G722 | 181 #ifdef WEBRTC_CODEC_G722 |
184 AudioDecoderG722::AudioDecoderG722() { | 182 AudioDecoderG722::AudioDecoderG722() { |
185 WebRtcG722_CreateDecoder(&dec_state_); | 183 WebRtcG722_CreateDecoder(&dec_state_); |
| 184 WebRtcG722_DecoderInit(dec_state_); |
186 } | 185 } |
187 | 186 |
188 AudioDecoderG722::~AudioDecoderG722() { | 187 AudioDecoderG722::~AudioDecoderG722() { |
189 WebRtcG722_FreeDecoder(dec_state_); | 188 WebRtcG722_FreeDecoder(dec_state_); |
190 } | 189 } |
191 | 190 |
192 bool AudioDecoderG722::HasDecodePlc() const { | 191 bool AudioDecoderG722::HasDecodePlc() const { |
193 return false; | 192 return false; |
194 } | 193 } |
195 | 194 |
196 int AudioDecoderG722::DecodeInternal(const uint8_t* encoded, | 195 int AudioDecoderG722::DecodeInternal(const uint8_t* encoded, |
197 size_t encoded_len, | 196 size_t encoded_len, |
198 int sample_rate_hz, | 197 int sample_rate_hz, |
199 int16_t* decoded, | 198 int16_t* decoded, |
200 SpeechType* speech_type) { | 199 SpeechType* speech_type) { |
201 DCHECK_EQ(sample_rate_hz, 16000); | 200 DCHECK_EQ(sample_rate_hz, 16000); |
202 int16_t temp_type = 1; // Default is speech. | 201 int16_t temp_type = 1; // Default is speech. |
203 size_t ret = | 202 size_t ret = |
204 WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type); | 203 WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type); |
205 *speech_type = ConvertSpeechType(temp_type); | 204 *speech_type = ConvertSpeechType(temp_type); |
206 return static_cast<int>(ret); | 205 return static_cast<int>(ret); |
207 } | 206 } |
208 | 207 |
209 int AudioDecoderG722::Init() { | 208 void AudioDecoderG722::Reset() { |
210 return WebRtcG722_DecoderInit(dec_state_); | 209 WebRtcG722_DecoderInit(dec_state_); |
211 } | 210 } |
212 | 211 |
213 int AudioDecoderG722::PacketDuration(const uint8_t* encoded, | 212 int AudioDecoderG722::PacketDuration(const uint8_t* encoded, |
214 size_t encoded_len) const { | 213 size_t encoded_len) const { |
215 // 1/2 encoded byte per sample per channel. | 214 // 1/2 encoded byte per sample per channel. |
216 return static_cast<int>(2 * encoded_len / Channels()); | 215 return static_cast<int>(2 * encoded_len / Channels()); |
217 } | 216 } |
218 | 217 |
219 size_t AudioDecoderG722::Channels() const { | 218 size_t AudioDecoderG722::Channels() const { |
220 return 1; | 219 return 1; |
221 } | 220 } |
222 | 221 |
223 AudioDecoderG722Stereo::AudioDecoderG722Stereo() { | 222 AudioDecoderG722Stereo::AudioDecoderG722Stereo() { |
224 WebRtcG722_CreateDecoder(&dec_state_left_); | 223 WebRtcG722_CreateDecoder(&dec_state_left_); |
225 WebRtcG722_CreateDecoder(&dec_state_right_); | 224 WebRtcG722_CreateDecoder(&dec_state_right_); |
| 225 WebRtcG722_DecoderInit(dec_state_left_); |
| 226 WebRtcG722_DecoderInit(dec_state_right_); |
226 } | 227 } |
227 | 228 |
228 AudioDecoderG722Stereo::~AudioDecoderG722Stereo() { | 229 AudioDecoderG722Stereo::~AudioDecoderG722Stereo() { |
229 WebRtcG722_FreeDecoder(dec_state_left_); | 230 WebRtcG722_FreeDecoder(dec_state_left_); |
230 WebRtcG722_FreeDecoder(dec_state_right_); | 231 WebRtcG722_FreeDecoder(dec_state_right_); |
231 } | 232 } |
232 | 233 |
233 int AudioDecoderG722Stereo::DecodeInternal(const uint8_t* encoded, | 234 int AudioDecoderG722Stereo::DecodeInternal(const uint8_t* encoded, |
234 size_t encoded_len, | 235 size_t encoded_len, |
235 int sample_rate_hz, | 236 int sample_rate_hz, |
(...skipping 22 matching lines...) Expand all Loading... |
258 } | 259 } |
259 *speech_type = ConvertSpeechType(temp_type); | 260 *speech_type = ConvertSpeechType(temp_type); |
260 delete [] encoded_deinterleaved; | 261 delete [] encoded_deinterleaved; |
261 return static_cast<int>(ret); | 262 return static_cast<int>(ret); |
262 } | 263 } |
263 | 264 |
264 size_t AudioDecoderG722Stereo::Channels() const { | 265 size_t AudioDecoderG722Stereo::Channels() const { |
265 return 2; | 266 return 2; |
266 } | 267 } |
267 | 268 |
268 int AudioDecoderG722Stereo::Init() { | 269 void AudioDecoderG722Stereo::Reset() { |
269 int r = WebRtcG722_DecoderInit(dec_state_left_); | 270 WebRtcG722_DecoderInit(dec_state_left_); |
270 if (r != 0) | 271 WebRtcG722_DecoderInit(dec_state_right_); |
271 return r; | |
272 return WebRtcG722_DecoderInit(dec_state_right_); | |
273 } | 272 } |
274 | 273 |
275 // Split the stereo packet and place left and right channel after each other | 274 // Split the stereo packet and place left and right channel after each other |
276 // in the output array. | 275 // in the output array. |
277 void AudioDecoderG722Stereo::SplitStereoPacket(const uint8_t* encoded, | 276 void AudioDecoderG722Stereo::SplitStereoPacket(const uint8_t* encoded, |
278 size_t encoded_len, | 277 size_t encoded_len, |
279 uint8_t* encoded_deinterleaved) { | 278 uint8_t* encoded_deinterleaved) { |
280 assert(encoded); | 279 assert(encoded); |
281 // Regroup the 4 bits/sample so |l1 l2| |r1 r2| |l3 l4| |r3 r4| ..., | 280 // Regroup the 4 bits/sample so |l1 l2| |r1 r2| |l3 l4| |r3 r4| ..., |
282 // where "lx" is 4 bits representing left sample number x, and "rx" right | 281 // where "lx" is 4 bits representing left sample number x, and "rx" right |
(...skipping 16 matching lines...) Expand all Loading... |
299 } | 298 } |
300 } | 299 } |
301 #endif | 300 #endif |
302 | 301 |
303 // Opus | 302 // Opus |
304 #ifdef WEBRTC_CODEC_OPUS | 303 #ifdef WEBRTC_CODEC_OPUS |
305 AudioDecoderOpus::AudioDecoderOpus(size_t num_channels) | 304 AudioDecoderOpus::AudioDecoderOpus(size_t num_channels) |
306 : channels_(num_channels) { | 305 : channels_(num_channels) { |
307 DCHECK(num_channels == 1 || num_channels == 2); | 306 DCHECK(num_channels == 1 || num_channels == 2); |
308 WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_)); | 307 WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_)); |
| 308 WebRtcOpus_DecoderInit(dec_state_); |
309 } | 309 } |
310 | 310 |
311 AudioDecoderOpus::~AudioDecoderOpus() { | 311 AudioDecoderOpus::~AudioDecoderOpus() { |
312 WebRtcOpus_DecoderFree(dec_state_); | 312 WebRtcOpus_DecoderFree(dec_state_); |
313 } | 313 } |
314 | 314 |
315 int AudioDecoderOpus::DecodeInternal(const uint8_t* encoded, | 315 int AudioDecoderOpus::DecodeInternal(const uint8_t* encoded, |
316 size_t encoded_len, | 316 size_t encoded_len, |
317 int sample_rate_hz, | 317 int sample_rate_hz, |
318 int16_t* decoded, | 318 int16_t* decoded, |
(...skipping 22 matching lines...) Expand all Loading... |
341 DCHECK_EQ(sample_rate_hz, 48000); | 341 DCHECK_EQ(sample_rate_hz, 48000); |
342 int16_t temp_type = 1; // Default is speech. | 342 int16_t temp_type = 1; // Default is speech. |
343 int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded, | 343 int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded, |
344 &temp_type); | 344 &temp_type); |
345 if (ret > 0) | 345 if (ret > 0) |
346 ret *= static_cast<int>(channels_); // Return total number of samples. | 346 ret *= static_cast<int>(channels_); // Return total number of samples. |
347 *speech_type = ConvertSpeechType(temp_type); | 347 *speech_type = ConvertSpeechType(temp_type); |
348 return ret; | 348 return ret; |
349 } | 349 } |
350 | 350 |
351 int AudioDecoderOpus::Init() { | 351 void AudioDecoderOpus::Reset() { |
352 return WebRtcOpus_DecoderInit(dec_state_); | 352 WebRtcOpus_DecoderInit(dec_state_); |
353 } | 353 } |
354 | 354 |
355 int AudioDecoderOpus::PacketDuration(const uint8_t* encoded, | 355 int AudioDecoderOpus::PacketDuration(const uint8_t* encoded, |
356 size_t encoded_len) const { | 356 size_t encoded_len) const { |
357 return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len); | 357 return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len); |
358 } | 358 } |
359 | 359 |
360 int AudioDecoderOpus::PacketDurationRedundant(const uint8_t* encoded, | 360 int AudioDecoderOpus::PacketDurationRedundant(const uint8_t* encoded, |
361 size_t encoded_len) const { | 361 size_t encoded_len) const { |
362 if (!PacketHasFec(encoded, encoded_len)) { | 362 if (!PacketHasFec(encoded, encoded_len)) { |
(...skipping 11 matching lines...) Expand all Loading... |
374 return (fec == 1); | 374 return (fec == 1); |
375 } | 375 } |
376 | 376 |
377 size_t AudioDecoderOpus::Channels() const { | 377 size_t AudioDecoderOpus::Channels() const { |
378 return channels_; | 378 return channels_; |
379 } | 379 } |
380 #endif | 380 #endif |
381 | 381 |
382 AudioDecoderCng::AudioDecoderCng() { | 382 AudioDecoderCng::AudioDecoderCng() { |
383 CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_)); | 383 CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_)); |
| 384 WebRtcCng_InitDec(dec_state_); |
384 } | 385 } |
385 | 386 |
386 AudioDecoderCng::~AudioDecoderCng() { | 387 AudioDecoderCng::~AudioDecoderCng() { |
387 WebRtcCng_FreeDec(dec_state_); | 388 WebRtcCng_FreeDec(dec_state_); |
388 } | 389 } |
389 | 390 |
390 int AudioDecoderCng::Init() { | 391 void AudioDecoderCng::Reset() { |
391 return WebRtcCng_InitDec(dec_state_); | 392 WebRtcCng_InitDec(dec_state_); |
392 } | 393 } |
393 | 394 |
394 int AudioDecoderCng::IncomingPacket(const uint8_t* payload, | 395 int AudioDecoderCng::IncomingPacket(const uint8_t* payload, |
395 size_t payload_len, | 396 size_t payload_len, |
396 uint16_t rtp_sequence_number, | 397 uint16_t rtp_sequence_number, |
397 uint32_t rtp_timestamp, | 398 uint32_t rtp_timestamp, |
398 uint32_t arrival_timestamp) { | 399 uint32_t arrival_timestamp) { |
399 return -1; | 400 return -1; |
400 } | 401 } |
401 | 402 |
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
589 case kDecoderRED: | 590 case kDecoderRED: |
590 case kDecoderAVT: | 591 case kDecoderAVT: |
591 case kDecoderArbitrary: | 592 case kDecoderArbitrary: |
592 default: { | 593 default: { |
593 return NULL; | 594 return NULL; |
594 } | 595 } |
595 } | 596 } |
596 } | 597 } |
597 | 598 |
598 } // namespace webrtc | 599 } // namespace webrtc |
OLD | NEW |