OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
337 LOG(LERROR) << "AcmReceiver::InsertPacket " | 337 LOG(LERROR) << "AcmReceiver::InsertPacket " |
338 << static_cast<int>(header->payloadType) | 338 << static_cast<int>(header->payloadType) |
339 << " Failed to insert packet"; | 339 << " Failed to insert packet"; |
340 return -1; | 340 return -1; |
341 } | 341 } |
342 return 0; | 342 return 0; |
343 } | 343 } |
344 | 344 |
345 int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) { | 345 int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) { |
346 enum NetEqOutputType type; | 346 enum NetEqOutputType type; |
347 int samples_per_channel; | 347 size_t samples_per_channel; |
348 int num_channels; | 348 int num_channels; |
349 bool return_silence = false; | 349 bool return_silence = false; |
350 | 350 |
351 { | 351 { |
352 // Accessing members, take the lock. | 352 // Accessing members, take the lock. |
353 CriticalSectionScoped lock(crit_sect_.get()); | 353 CriticalSectionScoped lock(crit_sect_.get()); |
354 | 354 |
355 if (av_sync_) { | 355 if (av_sync_) { |
356 assert(initial_delay_manager_.get()); | 356 assert(initial_delay_manager_.get()); |
357 assert(late_packets_sync_stream_.get()); | 357 assert(late_packets_sync_stream_.get()); |
(...skipping 29 matching lines...) Expand all Loading... |
387 int decoded_sequence_num = 0; | 387 int decoded_sequence_num = 0; |
388 uint32_t decoded_timestamp = 0; | 388 uint32_t decoded_timestamp = 0; |
389 bool update_nack = nack_enabled_ && // Update NACK only if it is enabled. | 389 bool update_nack = nack_enabled_ && // Update NACK only if it is enabled. |
390 neteq_->DecodedRtpInfo(&decoded_sequence_num, &decoded_timestamp); | 390 neteq_->DecodedRtpInfo(&decoded_sequence_num, &decoded_timestamp); |
391 if (update_nack) { | 391 if (update_nack) { |
392 assert(nack_.get()); | 392 assert(nack_.get()); |
393 nack_->UpdateLastDecodedPacket(decoded_sequence_num, decoded_timestamp); | 393 nack_->UpdateLastDecodedPacket(decoded_sequence_num, decoded_timestamp); |
394 } | 394 } |
395 | 395 |
396 // NetEq always returns 10 ms of audio. | 396 // NetEq always returns 10 ms of audio. |
397 current_sample_rate_hz_ = samples_per_channel * 100; | 397 current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100); |
398 | 398 |
399 // Update if resampling is required. | 399 // Update if resampling is required. |
400 bool need_resampling = (desired_freq_hz != -1) && | 400 bool need_resampling = (desired_freq_hz != -1) && |
401 (current_sample_rate_hz_ != desired_freq_hz); | 401 (current_sample_rate_hz_ != desired_freq_hz); |
402 | 402 |
403 if (need_resampling && !resampled_last_output_frame_) { | 403 if (need_resampling && !resampled_last_output_frame_) { |
404 // Prime the resampler with the last frame. | 404 // Prime the resampler with the last frame. |
405 int16_t temp_output[AudioFrame::kMaxDataSizeSamples]; | 405 int16_t temp_output[AudioFrame::kMaxDataSizeSamples]; |
406 samples_per_channel = | 406 int samples_per_channel_int = |
407 resampler_.Resample10Msec(last_audio_buffer_.get(), | 407 resampler_.Resample10Msec(last_audio_buffer_.get(), |
408 current_sample_rate_hz_, | 408 current_sample_rate_hz_, |
409 desired_freq_hz, | 409 desired_freq_hz, |
410 num_channels, | 410 num_channels, |
411 AudioFrame::kMaxDataSizeSamples, | 411 AudioFrame::kMaxDataSizeSamples, |
412 temp_output); | 412 temp_output); |
413 if (samples_per_channel < 0) { | 413 if (samples_per_channel_int < 0) { |
414 LOG(LERROR) << "AcmReceiver::GetAudio - " | 414 LOG(LERROR) << "AcmReceiver::GetAudio - " |
415 "Resampling last_audio_buffer_ failed."; | 415 "Resampling last_audio_buffer_ failed."; |
416 return -1; | 416 return -1; |
417 } | 417 } |
| 418 samples_per_channel = static_cast<size_t>(samples_per_channel_int); |
418 } | 419 } |
419 | 420 |
420 // The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either | 421 // The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either |
421 // through resampling, or through straight memcpy. | 422 // through resampling, or through straight memcpy. |
422 // TODO(henrik.lundin) Glitches in the output may appear if the output rate | 423 // TODO(henrik.lundin) Glitches in the output may appear if the output rate |
423 // from NetEq changes. See WebRTC issue 3923. | 424 // from NetEq changes. See WebRTC issue 3923. |
424 if (need_resampling) { | 425 if (need_resampling) { |
425 samples_per_channel = | 426 int samples_per_channel_int = |
426 resampler_.Resample10Msec(audio_buffer_.get(), | 427 resampler_.Resample10Msec(audio_buffer_.get(), |
427 current_sample_rate_hz_, | 428 current_sample_rate_hz_, |
428 desired_freq_hz, | 429 desired_freq_hz, |
429 num_channels, | 430 num_channels, |
430 AudioFrame::kMaxDataSizeSamples, | 431 AudioFrame::kMaxDataSizeSamples, |
431 audio_frame->data_); | 432 audio_frame->data_); |
432 if (samples_per_channel < 0) { | 433 if (samples_per_channel_int < 0) { |
433 LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed."; | 434 LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed."; |
434 return -1; | 435 return -1; |
435 } | 436 } |
| 437 samples_per_channel = static_cast<size_t>(samples_per_channel_int); |
436 resampled_last_output_frame_ = true; | 438 resampled_last_output_frame_ = true; |
437 } else { | 439 } else { |
438 resampled_last_output_frame_ = false; | 440 resampled_last_output_frame_ = false; |
439 // We might end up here ONLY if codec is changed. | 441 // We might end up here ONLY if codec is changed. |
440 memcpy(audio_frame->data_, | 442 memcpy(audio_frame->data_, |
441 audio_buffer_.get(), | 443 audio_buffer_.get(), |
442 samples_per_channel * num_channels * sizeof(int16_t)); | 444 samples_per_channel * num_channels * sizeof(int16_t)); |
443 } | 445 } |
444 | 446 |
445 // Swap buffers, so that the current audio is stored in |last_audio_buffer_| | 447 // Swap buffers, so that the current audio is stored in |last_audio_buffer_| |
446 // for next time. | 448 // for next time. |
447 audio_buffer_.swap(last_audio_buffer_); | 449 audio_buffer_.swap(last_audio_buffer_); |
448 | 450 |
449 audio_frame->num_channels_ = num_channels; | 451 audio_frame->num_channels_ = num_channels; |
450 audio_frame->samples_per_channel_ = samples_per_channel; | 452 audio_frame->samples_per_channel_ = samples_per_channel; |
451 audio_frame->sample_rate_hz_ = samples_per_channel * 100; | 453 audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100); |
452 | 454 |
453 // Should set |vad_activity| before calling SetAudioFrameActivityAndType(). | 455 // Should set |vad_activity| before calling SetAudioFrameActivityAndType(). |
454 audio_frame->vad_activity_ = previous_audio_activity_; | 456 audio_frame->vad_activity_ = previous_audio_activity_; |
455 SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame); | 457 SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame); |
456 previous_audio_activity_ = audio_frame->vad_activity_; | 458 previous_audio_activity_ = audio_frame->vad_activity_; |
457 call_stats_.DecodedByNetEq(audio_frame->speech_type_); | 459 call_stats_.DecodedByNetEq(audio_frame->speech_type_); |
458 | 460 |
459 // Computes the RTP timestamp of the first sample in |audio_frame| from | 461 // Computes the RTP timestamp of the first sample in |audio_frame| from |
460 // |GetPlayoutTimestamp|, which is the timestamp of the last sample of | 462 // |GetPlayoutTimestamp|, which is the timestamp of the last sample of |
461 // |audio_frame|. | 463 // |audio_frame|. |
(...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
780 frame->num_channels_ = 1; | 782 frame->num_channels_ = 1; |
781 } | 783 } |
782 | 784 |
783 // Set the audio frame's sampling frequency. | 785 // Set the audio frame's sampling frequency. |
784 if (desired_sample_rate_hz > 0) { | 786 if (desired_sample_rate_hz > 0) { |
785 frame->sample_rate_hz_ = desired_sample_rate_hz; | 787 frame->sample_rate_hz_ = desired_sample_rate_hz; |
786 } else { | 788 } else { |
787 frame->sample_rate_hz_ = current_sample_rate_hz_; | 789 frame->sample_rate_hz_ = current_sample_rate_hz_; |
788 } | 790 } |
789 | 791 |
790 frame->samples_per_channel_ = frame->sample_rate_hz_ / 100; // Always 10 ms. | 792 frame->samples_per_channel_ = |
| 793 static_cast<size_t>(frame->sample_rate_hz_ / 100); // Always 10 ms. |
791 frame->speech_type_ = AudioFrame::kCNG; | 794 frame->speech_type_ = AudioFrame::kCNG; |
792 frame->vad_activity_ = AudioFrame::kVadPassive; | 795 frame->vad_activity_ = AudioFrame::kVadPassive; |
793 int samples = frame->samples_per_channel_ * frame->num_channels_; | 796 size_t samples = frame->samples_per_channel_ * frame->num_channels_; |
794 memset(frame->data_, 0, samples * sizeof(int16_t)); | 797 memset(frame->data_, 0, samples * sizeof(int16_t)); |
795 return true; | 798 return true; |
796 } | 799 } |
797 | 800 |
798 const AcmReceiver::Decoder* AcmReceiver::RtpHeaderToDecoder( | 801 const AcmReceiver::Decoder* AcmReceiver::RtpHeaderToDecoder( |
799 const RTPHeader& rtp_header, | 802 const RTPHeader& rtp_header, |
800 const uint8_t* payload) const { | 803 const uint8_t* payload) const { |
801 auto it = decoders_.find(rtp_header.payloadType); | 804 auto it = decoders_.find(rtp_header.payloadType); |
802 if (ACMCodecDB::kRED >= 0 && // This ensures that RED is defined in WebRTC. | 805 if (ACMCodecDB::kRED >= 0 && // This ensures that RED is defined in WebRTC. |
803 it != decoders_.end() && ACMCodecDB::kRED == it->second.acm_codec_id) { | 806 it != decoders_.end() && ACMCodecDB::kRED == it->second.acm_codec_id) { |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
839 | 842 |
840 void AcmReceiver::GetDecodingCallStatistics( | 843 void AcmReceiver::GetDecodingCallStatistics( |
841 AudioDecodingCallStats* stats) const { | 844 AudioDecodingCallStats* stats) const { |
842 CriticalSectionScoped lock(crit_sect_.get()); | 845 CriticalSectionScoped lock(crit_sect_.get()); |
843 *stats = call_stats_.GetDecodingStatistics(); | 846 *stats = call_stats_.GetDecodingStatistics(); |
844 } | 847 } |
845 | 848 |
846 } // namespace acm2 | 849 } // namespace acm2 |
847 | 850 |
848 } // namespace webrtc | 851 } // namespace webrtc |
OLD | NEW |